xref: /qemu/util/qht.c (revision d73415a3)
1 /*
2  * qht.c - QEMU Hash Table, designed to scale for read-mostly workloads.
3  *
4  * Copyright (C) 2016, Emilio G. Cota <cota@braap.org>
5  *
6  * License: GNU GPL, version 2 or later.
7  *   See the COPYING file in the top-level directory.
8  *
9  * Assumptions:
10  * - NULL cannot be inserted/removed as a pointer value.
11  * - Trying to insert an already-existing hash-pointer pair is OK. However,
12  *   it is not OK to insert into the same hash table different hash-pointer
13  *   pairs that have the same pointer value, but not the hashes.
14  * - Lookups are performed under an RCU read-critical section; removals
15  *   must wait for a grace period to elapse before freeing removed objects.
16  *
17  * Features:
18  * - Reads (i.e. lookups and iterators) can be concurrent with other reads.
19  *   Lookups that are concurrent with writes to the same bucket will retry
20  *   via a seqlock; iterators acquire all bucket locks and therefore can be
21  *   concurrent with lookups and are serialized wrt writers.
22  * - Writes (i.e. insertions/removals) can be concurrent with writes to
23  *   different buckets; writes to the same bucket are serialized through a lock.
24  * - Optional auto-resizing: the hash table resizes up if the load surpasses
25  *   a certain threshold. Resizing is done concurrently with readers; writes
26  *   are serialized with the resize operation.
27  *
28  * The key structure is the bucket, which is cacheline-sized. Buckets
29  * contain a few hash values and pointers; the u32 hash values are stored in
30  * full so that resizing is fast. Having this structure instead of directly
31  * chaining items has two advantages:
32  * - Failed lookups fail fast, and touch a minimum number of cache lines.
33  * - Resizing the hash table with concurrent lookups is easy.
34  *
35  * There are two types of buckets:
36  * 1. "head" buckets are the ones allocated in the array of buckets in qht_map.
37  * 2. all "non-head" buckets (i.e. all others) are members of a chain that
38  *    starts from a head bucket.
39  * Note that the seqlock and spinlock of a head bucket applies to all buckets
40  * chained to it; these two fields are unused in non-head buckets.
41  *
42  * On removals, we move the last valid item in the chain to the position of the
43  * just-removed entry. This makes lookups slightly faster, since the moment an
44  * invalid entry is found, the (failed) lookup is over.
45  *
46  * Resizing is done by taking all bucket spinlocks (so that no other writers can
47  * race with us) and then copying all entries into a new hash map. Then, the
48  * ht->map pointer is set, and the old map is freed once no RCU readers can see
49  * it anymore.
50  *
51  * Writers check for concurrent resizes by comparing ht->map before and after
52  * acquiring their bucket lock. If they don't match, a resize has occurred
53  * while the bucket spinlock was being acquired.
54  *
55  * Related Work:
56  * - Idea of cacheline-sized buckets with full hashes taken from:
57  *   David, Guerraoui & Trigonakis, "Asynchronized Concurrency:
58  *   The Secret to Scaling Concurrent Search Data Structures", ASPLOS'15.
59  * - Why not RCU-based hash tables? They would allow us to get rid of the
60  *   seqlock, but resizing would take forever since RCU read critical
61  *   sections in QEMU take quite a long time.
62  *   More info on relativistic hash tables:
63  *   + Triplett, McKenney & Walpole, "Resizable, Scalable, Concurrent Hash
64  *     Tables via Relativistic Programming", USENIX ATC'11.
65  *   + Corbet, "Relativistic hash tables, part 1: Algorithms", @ lwn.net, 2014.
66  *     https://lwn.net/Articles/612021/
67  */
68 #include "qemu/osdep.h"
69 #include "qemu/qht.h"
70 #include "qemu/atomic.h"
71 #include "qemu/rcu.h"
72 
73 //#define QHT_DEBUG
74 
75 /*
76  * We want to avoid false sharing of cache lines. Most systems have 64-byte
77  * cache lines so we go with it for simplicity.
78  *
79  * Note that systems with smaller cache lines will be fine (the struct is
80  * almost 64-bytes); systems with larger cache lines might suffer from
81  * some false sharing.
82  */
83 #define QHT_BUCKET_ALIGN 64
84 
85 /* define these to keep sizeof(qht_bucket) within QHT_BUCKET_ALIGN */
86 #if HOST_LONG_BITS == 32
87 #define QHT_BUCKET_ENTRIES 6
88 #else /* 64-bit */
89 #define QHT_BUCKET_ENTRIES 4
90 #endif
91 
92 enum qht_iter_type {
93     QHT_ITER_VOID,    /* do nothing; use retvoid */
94     QHT_ITER_RM,      /* remove element if retbool returns true */
95 };
96 
97 struct qht_iter {
98     union {
99         qht_iter_func_t retvoid;
100         qht_iter_bool_func_t retbool;
101     } f;
102     enum qht_iter_type type;
103 };
104 
105 /*
106  * Do _not_ use qemu_mutex_[try]lock directly! Use these macros, otherwise
107  * the profiler (QSP) will deadlock.
108  */
109 static inline void qht_lock(struct qht *ht)
110 {
111     if (ht->mode & QHT_MODE_RAW_MUTEXES) {
112         qemu_mutex_lock__raw(&ht->lock);
113     } else {
114         qemu_mutex_lock(&ht->lock);
115     }
116 }
117 
118 static inline int qht_trylock(struct qht *ht)
119 {
120     if (ht->mode & QHT_MODE_RAW_MUTEXES) {
121         return qemu_mutex_trylock__raw(&(ht)->lock);
122     }
123     return qemu_mutex_trylock(&(ht)->lock);
124 }
125 
126 /* this inline is not really necessary, but it helps keep code consistent */
127 static inline void qht_unlock(struct qht *ht)
128 {
129     qemu_mutex_unlock(&ht->lock);
130 }
131 
132 /*
133  * Note: reading partially-updated pointers in @pointers could lead to
134  * segfaults. We thus access them with qatomic_read/set; this guarantees
135  * that the compiler makes all those accesses atomic. We also need the
136  * volatile-like behavior in qatomic_read, since otherwise the compiler
137  * might refetch the pointer.
138  * qatomic_read's are of course not necessary when the bucket lock is held.
139  *
140  * If both ht->lock and b->lock are grabbed, ht->lock should always
141  * be grabbed first.
142  */
143 struct qht_bucket {
144     QemuSpin lock;
145     QemuSeqLock sequence;
146     uint32_t hashes[QHT_BUCKET_ENTRIES];
147     void *pointers[QHT_BUCKET_ENTRIES];
148     struct qht_bucket *next;
149 } QEMU_ALIGNED(QHT_BUCKET_ALIGN);
150 
151 QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN);
152 
153 /**
154  * struct qht_map - structure to track an array of buckets
155  * @rcu: used by RCU. Keep it as the top field in the struct to help valgrind
156  *       find the whole struct.
157  * @buckets: array of head buckets. It is constant once the map is created.
158  * @n_buckets: number of head buckets. It is constant once the map is created.
159  * @n_added_buckets: number of added (i.e. "non-head") buckets
160  * @n_added_buckets_threshold: threshold to trigger an upward resize once the
161  *                             number of added buckets surpasses it.
162  *
163  * Buckets are tracked in what we call a "map", i.e. this structure.
164  */
165 struct qht_map {
166     struct rcu_head rcu;
167     struct qht_bucket *buckets;
168     size_t n_buckets;
169     size_t n_added_buckets;
170     size_t n_added_buckets_threshold;
171 };
172 
173 /* trigger a resize when n_added_buckets > n_buckets / div */
174 #define QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV 8
175 
176 static void qht_do_resize_reset(struct qht *ht, struct qht_map *new,
177                                 bool reset);
178 static void qht_grow_maybe(struct qht *ht);
179 
180 #ifdef QHT_DEBUG
181 
182 #define qht_debug_assert(X) do { assert(X); } while (0)
183 
184 static void qht_bucket_debug__locked(struct qht_bucket *b)
185 {
186     bool seen_empty = false;
187     bool corrupt = false;
188     int i;
189 
190     do {
191         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
192             if (b->pointers[i] == NULL) {
193                 seen_empty = true;
194                 continue;
195             }
196             if (seen_empty) {
197                 fprintf(stderr, "%s: b: %p, pos: %i, hash: 0x%x, p: %p\n",
198                         __func__, b, i, b->hashes[i], b->pointers[i]);
199                 corrupt = true;
200             }
201         }
202         b = b->next;
203     } while (b);
204     qht_debug_assert(!corrupt);
205 }
206 
207 static void qht_map_debug__all_locked(struct qht_map *map)
208 {
209     int i;
210 
211     for (i = 0; i < map->n_buckets; i++) {
212         qht_bucket_debug__locked(&map->buckets[i]);
213     }
214 }
215 #else
216 
217 #define qht_debug_assert(X) do { (void)(X); } while (0)
218 
219 static inline void qht_bucket_debug__locked(struct qht_bucket *b)
220 { }
221 
222 static inline void qht_map_debug__all_locked(struct qht_map *map)
223 { }
224 #endif /* QHT_DEBUG */
225 
226 static inline size_t qht_elems_to_buckets(size_t n_elems)
227 {
228     return pow2ceil(n_elems / QHT_BUCKET_ENTRIES);
229 }
230 
231 static inline void qht_head_init(struct qht_bucket *b)
232 {
233     memset(b, 0, sizeof(*b));
234     qemu_spin_init(&b->lock);
235     seqlock_init(&b->sequence);
236 }
237 
238 static inline
239 struct qht_bucket *qht_map_to_bucket(const struct qht_map *map, uint32_t hash)
240 {
241     return &map->buckets[hash & (map->n_buckets - 1)];
242 }
243 
244 /* acquire all bucket locks from a map */
245 static void qht_map_lock_buckets(struct qht_map *map)
246 {
247     size_t i;
248 
249     for (i = 0; i < map->n_buckets; i++) {
250         struct qht_bucket *b = &map->buckets[i];
251 
252         qemu_spin_lock(&b->lock);
253     }
254 }
255 
256 static void qht_map_unlock_buckets(struct qht_map *map)
257 {
258     size_t i;
259 
260     for (i = 0; i < map->n_buckets; i++) {
261         struct qht_bucket *b = &map->buckets[i];
262 
263         qemu_spin_unlock(&b->lock);
264     }
265 }
266 
267 /*
268  * Call with at least a bucket lock held.
269  * @map should be the value read before acquiring the lock (or locks).
270  */
271 static inline bool qht_map_is_stale__locked(const struct qht *ht,
272                                             const struct qht_map *map)
273 {
274     return map != ht->map;
275 }
276 
277 /*
278  * Grab all bucket locks, and set @pmap after making sure the map isn't stale.
279  *
280  * Pairs with qht_map_unlock_buckets(), hence the pass-by-reference.
281  *
282  * Note: callers cannot have ht->lock held.
283  */
284 static inline
285 void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap)
286 {
287     struct qht_map *map;
288 
289     map = qatomic_rcu_read(&ht->map);
290     qht_map_lock_buckets(map);
291     if (likely(!qht_map_is_stale__locked(ht, map))) {
292         *pmap = map;
293         return;
294     }
295     qht_map_unlock_buckets(map);
296 
297     /* we raced with a resize; acquire ht->lock to see the updated ht->map */
298     qht_lock(ht);
299     map = ht->map;
300     qht_map_lock_buckets(map);
301     qht_unlock(ht);
302     *pmap = map;
303     return;
304 }
305 
306 /*
307  * Get a head bucket and lock it, making sure its parent map is not stale.
308  * @pmap is filled with a pointer to the bucket's parent map.
309  *
310  * Unlock with qemu_spin_unlock(&b->lock).
311  *
312  * Note: callers cannot have ht->lock held.
313  */
314 static inline
315 struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash,
316                                              struct qht_map **pmap)
317 {
318     struct qht_bucket *b;
319     struct qht_map *map;
320 
321     map = qatomic_rcu_read(&ht->map);
322     b = qht_map_to_bucket(map, hash);
323 
324     qemu_spin_lock(&b->lock);
325     if (likely(!qht_map_is_stale__locked(ht, map))) {
326         *pmap = map;
327         return b;
328     }
329     qemu_spin_unlock(&b->lock);
330 
331     /* we raced with a resize; acquire ht->lock to see the updated ht->map */
332     qht_lock(ht);
333     map = ht->map;
334     b = qht_map_to_bucket(map, hash);
335     qemu_spin_lock(&b->lock);
336     qht_unlock(ht);
337     *pmap = map;
338     return b;
339 }
340 
341 static inline bool qht_map_needs_resize(const struct qht_map *map)
342 {
343     return qatomic_read(&map->n_added_buckets) >
344            map->n_added_buckets_threshold;
345 }
346 
347 static inline void qht_chain_destroy(const struct qht_bucket *head)
348 {
349     struct qht_bucket *curr = head->next;
350     struct qht_bucket *prev;
351 
352     qemu_spin_destroy(&head->lock);
353     while (curr) {
354         prev = curr;
355         curr = curr->next;
356         qemu_vfree(prev);
357     }
358 }
359 
360 /* pass only an orphan map */
361 static void qht_map_destroy(struct qht_map *map)
362 {
363     size_t i;
364 
365     for (i = 0; i < map->n_buckets; i++) {
366         qht_chain_destroy(&map->buckets[i]);
367     }
368     qemu_vfree(map->buckets);
369     g_free(map);
370 }
371 
372 static struct qht_map *qht_map_create(size_t n_buckets)
373 {
374     struct qht_map *map;
375     size_t i;
376 
377     map = g_malloc(sizeof(*map));
378     map->n_buckets = n_buckets;
379 
380     map->n_added_buckets = 0;
381     map->n_added_buckets_threshold = n_buckets /
382         QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV;
383 
384     /* let tiny hash tables to at least add one non-head bucket */
385     if (unlikely(map->n_added_buckets_threshold == 0)) {
386         map->n_added_buckets_threshold = 1;
387     }
388 
389     map->buckets = qemu_memalign(QHT_BUCKET_ALIGN,
390                                  sizeof(*map->buckets) * n_buckets);
391     for (i = 0; i < n_buckets; i++) {
392         qht_head_init(&map->buckets[i]);
393     }
394     return map;
395 }
396 
397 void qht_init(struct qht *ht, qht_cmp_func_t cmp, size_t n_elems,
398               unsigned int mode)
399 {
400     struct qht_map *map;
401     size_t n_buckets = qht_elems_to_buckets(n_elems);
402 
403     g_assert(cmp);
404     ht->cmp = cmp;
405     ht->mode = mode;
406     qemu_mutex_init(&ht->lock);
407     map = qht_map_create(n_buckets);
408     qatomic_rcu_set(&ht->map, map);
409 }
410 
411 /* call only when there are no readers/writers left */
412 void qht_destroy(struct qht *ht)
413 {
414     qht_map_destroy(ht->map);
415     memset(ht, 0, sizeof(*ht));
416 }
417 
418 static void qht_bucket_reset__locked(struct qht_bucket *head)
419 {
420     struct qht_bucket *b = head;
421     int i;
422 
423     seqlock_write_begin(&head->sequence);
424     do {
425         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
426             if (b->pointers[i] == NULL) {
427                 goto done;
428             }
429             qatomic_set(&b->hashes[i], 0);
430             qatomic_set(&b->pointers[i], NULL);
431         }
432         b = b->next;
433     } while (b);
434  done:
435     seqlock_write_end(&head->sequence);
436 }
437 
438 /* call with all bucket locks held */
439 static void qht_map_reset__all_locked(struct qht_map *map)
440 {
441     size_t i;
442 
443     for (i = 0; i < map->n_buckets; i++) {
444         qht_bucket_reset__locked(&map->buckets[i]);
445     }
446     qht_map_debug__all_locked(map);
447 }
448 
449 void qht_reset(struct qht *ht)
450 {
451     struct qht_map *map;
452 
453     qht_map_lock_buckets__no_stale(ht, &map);
454     qht_map_reset__all_locked(map);
455     qht_map_unlock_buckets(map);
456 }
457 
458 static inline void qht_do_resize(struct qht *ht, struct qht_map *new)
459 {
460     qht_do_resize_reset(ht, new, false);
461 }
462 
463 static inline void qht_do_resize_and_reset(struct qht *ht, struct qht_map *new)
464 {
465     qht_do_resize_reset(ht, new, true);
466 }
467 
468 bool qht_reset_size(struct qht *ht, size_t n_elems)
469 {
470     struct qht_map *new = NULL;
471     struct qht_map *map;
472     size_t n_buckets;
473 
474     n_buckets = qht_elems_to_buckets(n_elems);
475 
476     qht_lock(ht);
477     map = ht->map;
478     if (n_buckets != map->n_buckets) {
479         new = qht_map_create(n_buckets);
480     }
481     qht_do_resize_and_reset(ht, new);
482     qht_unlock(ht);
483 
484     return !!new;
485 }
486 
487 static inline
488 void *qht_do_lookup(const struct qht_bucket *head, qht_lookup_func_t func,
489                     const void *userp, uint32_t hash)
490 {
491     const struct qht_bucket *b = head;
492     int i;
493 
494     do {
495         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
496             if (qatomic_read(&b->hashes[i]) == hash) {
497                 /* The pointer is dereferenced before seqlock_read_retry,
498                  * so (unlike qht_insert__locked) we need to use
499                  * qatomic_rcu_read here.
500                  */
501                 void *p = qatomic_rcu_read(&b->pointers[i]);
502 
503                 if (likely(p) && likely(func(p, userp))) {
504                     return p;
505                 }
506             }
507         }
508         b = qatomic_rcu_read(&b->next);
509     } while (b);
510 
511     return NULL;
512 }
513 
514 static __attribute__((noinline))
515 void *qht_lookup__slowpath(const struct qht_bucket *b, qht_lookup_func_t func,
516                            const void *userp, uint32_t hash)
517 {
518     unsigned int version;
519     void *ret;
520 
521     do {
522         version = seqlock_read_begin(&b->sequence);
523         ret = qht_do_lookup(b, func, userp, hash);
524     } while (seqlock_read_retry(&b->sequence, version));
525     return ret;
526 }
527 
528 void *qht_lookup_custom(const struct qht *ht, const void *userp, uint32_t hash,
529                         qht_lookup_func_t func)
530 {
531     const struct qht_bucket *b;
532     const struct qht_map *map;
533     unsigned int version;
534     void *ret;
535 
536     map = qatomic_rcu_read(&ht->map);
537     b = qht_map_to_bucket(map, hash);
538 
539     version = seqlock_read_begin(&b->sequence);
540     ret = qht_do_lookup(b, func, userp, hash);
541     if (likely(!seqlock_read_retry(&b->sequence, version))) {
542         return ret;
543     }
544     /*
545      * Removing the do/while from the fastpath gives a 4% perf. increase when
546      * running a 100%-lookup microbenchmark.
547      */
548     return qht_lookup__slowpath(b, func, userp, hash);
549 }
550 
551 void *qht_lookup(const struct qht *ht, const void *userp, uint32_t hash)
552 {
553     return qht_lookup_custom(ht, userp, hash, ht->cmp);
554 }
555 
556 /*
557  * call with head->lock held
558  * @ht is const since it is only used for ht->cmp()
559  */
560 static void *qht_insert__locked(const struct qht *ht, struct qht_map *map,
561                                 struct qht_bucket *head, void *p, uint32_t hash,
562                                 bool *needs_resize)
563 {
564     struct qht_bucket *b = head;
565     struct qht_bucket *prev = NULL;
566     struct qht_bucket *new = NULL;
567     int i;
568 
569     do {
570         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
571             if (b->pointers[i]) {
572                 if (unlikely(b->hashes[i] == hash &&
573                              ht->cmp(b->pointers[i], p))) {
574                     return b->pointers[i];
575                 }
576             } else {
577                 goto found;
578             }
579         }
580         prev = b;
581         b = b->next;
582     } while (b);
583 
584     b = qemu_memalign(QHT_BUCKET_ALIGN, sizeof(*b));
585     memset(b, 0, sizeof(*b));
586     new = b;
587     i = 0;
588     qatomic_inc(&map->n_added_buckets);
589     if (unlikely(qht_map_needs_resize(map)) && needs_resize) {
590         *needs_resize = true;
591     }
592 
593  found:
594     /* found an empty key: acquire the seqlock and write */
595     seqlock_write_begin(&head->sequence);
596     if (new) {
597         qatomic_rcu_set(&prev->next, b);
598     }
599     /* smp_wmb() implicit in seqlock_write_begin.  */
600     qatomic_set(&b->hashes[i], hash);
601     qatomic_set(&b->pointers[i], p);
602     seqlock_write_end(&head->sequence);
603     return NULL;
604 }
605 
606 static __attribute__((noinline)) void qht_grow_maybe(struct qht *ht)
607 {
608     struct qht_map *map;
609 
610     /*
611      * If the lock is taken it probably means there's an ongoing resize,
612      * so bail out.
613      */
614     if (qht_trylock(ht)) {
615         return;
616     }
617     map = ht->map;
618     /* another thread might have just performed the resize we were after */
619     if (qht_map_needs_resize(map)) {
620         struct qht_map *new = qht_map_create(map->n_buckets * 2);
621 
622         qht_do_resize(ht, new);
623     }
624     qht_unlock(ht);
625 }
626 
627 bool qht_insert(struct qht *ht, void *p, uint32_t hash, void **existing)
628 {
629     struct qht_bucket *b;
630     struct qht_map *map;
631     bool needs_resize = false;
632     void *prev;
633 
634     /* NULL pointers are not supported */
635     qht_debug_assert(p);
636 
637     b = qht_bucket_lock__no_stale(ht, hash, &map);
638     prev = qht_insert__locked(ht, map, b, p, hash, &needs_resize);
639     qht_bucket_debug__locked(b);
640     qemu_spin_unlock(&b->lock);
641 
642     if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) {
643         qht_grow_maybe(ht);
644     }
645     if (likely(prev == NULL)) {
646         return true;
647     }
648     if (existing) {
649         *existing = prev;
650     }
651     return false;
652 }
653 
654 static inline bool qht_entry_is_last(const struct qht_bucket *b, int pos)
655 {
656     if (pos == QHT_BUCKET_ENTRIES - 1) {
657         if (b->next == NULL) {
658             return true;
659         }
660         return b->next->pointers[0] == NULL;
661     }
662     return b->pointers[pos + 1] == NULL;
663 }
664 
665 static void
666 qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j)
667 {
668     qht_debug_assert(!(to == from && i == j));
669     qht_debug_assert(to->pointers[i]);
670     qht_debug_assert(from->pointers[j]);
671 
672     qatomic_set(&to->hashes[i], from->hashes[j]);
673     qatomic_set(&to->pointers[i], from->pointers[j]);
674 
675     qatomic_set(&from->hashes[j], 0);
676     qatomic_set(&from->pointers[j], NULL);
677 }
678 
679 /*
680  * Find the last valid entry in @orig, and swap it with @orig[pos], which has
681  * just been invalidated.
682  */
683 static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos)
684 {
685     struct qht_bucket *b = orig;
686     struct qht_bucket *prev = NULL;
687     int i;
688 
689     if (qht_entry_is_last(orig, pos)) {
690         orig->hashes[pos] = 0;
691         qatomic_set(&orig->pointers[pos], NULL);
692         return;
693     }
694     do {
695         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
696             if (b->pointers[i]) {
697                 continue;
698             }
699             if (i > 0) {
700                 return qht_entry_move(orig, pos, b, i - 1);
701             }
702             qht_debug_assert(prev);
703             return qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
704         }
705         prev = b;
706         b = b->next;
707     } while (b);
708     /* no free entries other than orig[pos], so swap it with the last one */
709     qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
710 }
711 
712 /* call with b->lock held */
713 static inline
714 bool qht_remove__locked(struct qht_bucket *head, const void *p, uint32_t hash)
715 {
716     struct qht_bucket *b = head;
717     int i;
718 
719     do {
720         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
721             void *q = b->pointers[i];
722 
723             if (unlikely(q == NULL)) {
724                 return false;
725             }
726             if (q == p) {
727                 qht_debug_assert(b->hashes[i] == hash);
728                 seqlock_write_begin(&head->sequence);
729                 qht_bucket_remove_entry(b, i);
730                 seqlock_write_end(&head->sequence);
731                 return true;
732             }
733         }
734         b = b->next;
735     } while (b);
736     return false;
737 }
738 
739 bool qht_remove(struct qht *ht, const void *p, uint32_t hash)
740 {
741     struct qht_bucket *b;
742     struct qht_map *map;
743     bool ret;
744 
745     /* NULL pointers are not supported */
746     qht_debug_assert(p);
747 
748     b = qht_bucket_lock__no_stale(ht, hash, &map);
749     ret = qht_remove__locked(b, p, hash);
750     qht_bucket_debug__locked(b);
751     qemu_spin_unlock(&b->lock);
752     return ret;
753 }
754 
755 static inline void qht_bucket_iter(struct qht_bucket *head,
756                                    const struct qht_iter *iter, void *userp)
757 {
758     struct qht_bucket *b = head;
759     int i;
760 
761     do {
762         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
763             if (b->pointers[i] == NULL) {
764                 return;
765             }
766             switch (iter->type) {
767             case QHT_ITER_VOID:
768                 iter->f.retvoid(b->pointers[i], b->hashes[i], userp);
769                 break;
770             case QHT_ITER_RM:
771                 if (iter->f.retbool(b->pointers[i], b->hashes[i], userp)) {
772                     /* replace i with the last valid element in the bucket */
773                     seqlock_write_begin(&head->sequence);
774                     qht_bucket_remove_entry(b, i);
775                     seqlock_write_end(&head->sequence);
776                     qht_bucket_debug__locked(b);
777                     /* reevaluate i, since it just got replaced */
778                     i--;
779                     continue;
780                 }
781                 break;
782             default:
783                 g_assert_not_reached();
784             }
785         }
786         b = b->next;
787     } while (b);
788 }
789 
790 /* call with all of the map's locks held */
791 static inline void qht_map_iter__all_locked(struct qht_map *map,
792                                             const struct qht_iter *iter,
793                                             void *userp)
794 {
795     size_t i;
796 
797     for (i = 0; i < map->n_buckets; i++) {
798         qht_bucket_iter(&map->buckets[i], iter, userp);
799     }
800 }
801 
802 static inline void
803 do_qht_iter(struct qht *ht, const struct qht_iter *iter, void *userp)
804 {
805     struct qht_map *map;
806 
807     map = qatomic_rcu_read(&ht->map);
808     qht_map_lock_buckets(map);
809     qht_map_iter__all_locked(map, iter, userp);
810     qht_map_unlock_buckets(map);
811 }
812 
813 void qht_iter(struct qht *ht, qht_iter_func_t func, void *userp)
814 {
815     const struct qht_iter iter = {
816         .f.retvoid = func,
817         .type = QHT_ITER_VOID,
818     };
819 
820     do_qht_iter(ht, &iter, userp);
821 }
822 
823 void qht_iter_remove(struct qht *ht, qht_iter_bool_func_t func, void *userp)
824 {
825     const struct qht_iter iter = {
826         .f.retbool = func,
827         .type = QHT_ITER_RM,
828     };
829 
830     do_qht_iter(ht, &iter, userp);
831 }
832 
833 struct qht_map_copy_data {
834     struct qht *ht;
835     struct qht_map *new;
836 };
837 
838 static void qht_map_copy(void *p, uint32_t hash, void *userp)
839 {
840     struct qht_map_copy_data *data = userp;
841     struct qht *ht = data->ht;
842     struct qht_map *new = data->new;
843     struct qht_bucket *b = qht_map_to_bucket(new, hash);
844 
845     /* no need to acquire b->lock because no thread has seen this map yet */
846     qht_insert__locked(ht, new, b, p, hash, NULL);
847 }
848 
849 /*
850  * Atomically perform a resize and/or reset.
851  * Call with ht->lock held.
852  */
853 static void qht_do_resize_reset(struct qht *ht, struct qht_map *new, bool reset)
854 {
855     struct qht_map *old;
856     const struct qht_iter iter = {
857         .f.retvoid = qht_map_copy,
858         .type = QHT_ITER_VOID,
859     };
860     struct qht_map_copy_data data;
861 
862     old = ht->map;
863     qht_map_lock_buckets(old);
864 
865     if (reset) {
866         qht_map_reset__all_locked(old);
867     }
868 
869     if (new == NULL) {
870         qht_map_unlock_buckets(old);
871         return;
872     }
873 
874     g_assert(new->n_buckets != old->n_buckets);
875     data.ht = ht;
876     data.new = new;
877     qht_map_iter__all_locked(old, &iter, &data);
878     qht_map_debug__all_locked(new);
879 
880     qatomic_rcu_set(&ht->map, new);
881     qht_map_unlock_buckets(old);
882     call_rcu(old, qht_map_destroy, rcu);
883 }
884 
885 bool qht_resize(struct qht *ht, size_t n_elems)
886 {
887     size_t n_buckets = qht_elems_to_buckets(n_elems);
888     size_t ret = false;
889 
890     qht_lock(ht);
891     if (n_buckets != ht->map->n_buckets) {
892         struct qht_map *new;
893 
894         new = qht_map_create(n_buckets);
895         qht_do_resize(ht, new);
896         ret = true;
897     }
898     qht_unlock(ht);
899 
900     return ret;
901 }
902 
903 /* pass @stats to qht_statistics_destroy() when done */
904 void qht_statistics_init(const struct qht *ht, struct qht_stats *stats)
905 {
906     const struct qht_map *map;
907     int i;
908 
909     map = qatomic_rcu_read(&ht->map);
910 
911     stats->used_head_buckets = 0;
912     stats->entries = 0;
913     qdist_init(&stats->chain);
914     qdist_init(&stats->occupancy);
915     /* bail out if the qht has not yet been initialized */
916     if (unlikely(map == NULL)) {
917         stats->head_buckets = 0;
918         return;
919     }
920     stats->head_buckets = map->n_buckets;
921 
922     for (i = 0; i < map->n_buckets; i++) {
923         const struct qht_bucket *head = &map->buckets[i];
924         const struct qht_bucket *b;
925         unsigned int version;
926         size_t buckets;
927         size_t entries;
928         int j;
929 
930         do {
931             version = seqlock_read_begin(&head->sequence);
932             buckets = 0;
933             entries = 0;
934             b = head;
935             do {
936                 for (j = 0; j < QHT_BUCKET_ENTRIES; j++) {
937                     if (qatomic_read(&b->pointers[j]) == NULL) {
938                         break;
939                     }
940                     entries++;
941                 }
942                 buckets++;
943                 b = qatomic_rcu_read(&b->next);
944             } while (b);
945         } while (seqlock_read_retry(&head->sequence, version));
946 
947         if (entries) {
948             qdist_inc(&stats->chain, buckets);
949             qdist_inc(&stats->occupancy,
950                       (double)entries / QHT_BUCKET_ENTRIES / buckets);
951             stats->used_head_buckets++;
952             stats->entries += entries;
953         } else {
954             qdist_inc(&stats->occupancy, 0);
955         }
956     }
957 }
958 
959 void qht_statistics_destroy(struct qht_stats *stats)
960 {
961     qdist_destroy(&stats->occupancy);
962     qdist_destroy(&stats->chain);
963 }
964