xref: /qemu/util/hbitmap.c (revision b25f23e7)
1 /*
2  * Hierarchical Bitmap Data Type
3  *
4  * Copyright Red Hat, Inc., 2012
5  *
6  * Author: Paolo Bonzini <pbonzini@redhat.com>
7  *
8  * This work is licensed under the terms of the GNU GPL, version 2 or
9  * later.  See the COPYING file in the top-level directory.
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/hbitmap.h"
14 #include "qemu/host-utils.h"
15 #include "trace.h"
16 
17 /* HBitmaps provides an array of bits.  The bits are stored as usual in an
18  * array of unsigned longs, but HBitmap is also optimized to provide fast
19  * iteration over set bits; going from one bit to the next is O(logB n)
20  * worst case, with B = sizeof(long) * CHAR_BIT: the result is low enough
21  * that the number of levels is in fact fixed.
22  *
23  * In order to do this, it stacks multiple bitmaps with progressively coarser
24  * granularity; in all levels except the last, bit N is set iff the N-th
25  * unsigned long is nonzero in the immediately next level.  When iteration
26  * completes on the last level it can examine the 2nd-last level to quickly
27  * skip entire words, and even do so recursively to skip blocks of 64 words or
28  * powers thereof (32 on 32-bit machines).
29  *
30  * Given an index in the bitmap, it can be split in group of bits like
31  * this (for the 64-bit case):
32  *
33  *   bits 0-57 => word in the last bitmap     | bits 58-63 => bit in the word
34  *   bits 0-51 => word in the 2nd-last bitmap | bits 52-57 => bit in the word
35  *   bits 0-45 => word in the 3rd-last bitmap | bits 46-51 => bit in the word
36  *
37  * So it is easy to move up simply by shifting the index right by
38  * log2(BITS_PER_LONG) bits.  To move down, you shift the index left
39  * similarly, and add the word index within the group.  Iteration uses
40  * ffs (find first set bit) to find the next word to examine; this
41  * operation can be done in constant time in most current architectures.
42  *
43  * Setting or clearing a range of m bits on all levels, the work to perform
44  * is O(m + m/W + m/W^2 + ...), which is O(m) like on a regular bitmap.
45  *
46  * When iterating on a bitmap, each bit (on any level) is only visited
47  * once.  Hence, The total cost of visiting a bitmap with m bits in it is
48  * the number of bits that are set in all bitmaps.  Unless the bitmap is
49  * extremely sparse, this is also O(m + m/W + m/W^2 + ...), so the amortized
50  * cost of advancing from one bit to the next is usually constant (worst case
51  * O(logB n) as in the non-amortized complexity).
52  */
53 
54 struct HBitmap {
55     /* Number of total bits in the bottom level.  */
56     uint64_t size;
57 
58     /* Number of set bits in the bottom level.  */
59     uint64_t count;
60 
61     /* A scaling factor.  Given a granularity of G, each bit in the bitmap will
62      * will actually represent a group of 2^G elements.  Each operation on a
63      * range of bits first rounds the bits to determine which group they land
64      * in, and then affect the entire page; iteration will only visit the first
65      * bit of each group.  Here is an example of operations in a size-16,
66      * granularity-1 HBitmap:
67      *
68      *    initial state            00000000
69      *    set(start=0, count=9)    11111000 (iter: 0, 2, 4, 6, 8)
70      *    reset(start=1, count=3)  00111000 (iter: 4, 6, 8)
71      *    set(start=9, count=2)    00111100 (iter: 4, 6, 8, 10)
72      *    reset(start=5, count=5)  00000000
73      *
74      * From an implementation point of view, when setting or resetting bits,
75      * the bitmap will scale bit numbers right by this amount of bits.  When
76      * iterating, the bitmap will scale bit numbers left by this amount of
77      * bits.
78      */
79     int granularity;
80 
81     /* A meta dirty bitmap to track the dirtiness of bits in this HBitmap. */
82     HBitmap *meta;
83 
84     /* A number of progressively less coarse bitmaps (i.e. level 0 is the
85      * coarsest).  Each bit in level N represents a word in level N+1 that
86      * has a set bit, except the last level where each bit represents the
87      * actual bitmap.
88      *
89      * Note that all bitmaps have the same number of levels.  Even a 1-bit
90      * bitmap will still allocate HBITMAP_LEVELS arrays.
91      */
92     unsigned long *levels[HBITMAP_LEVELS];
93 
94     /* The length of each levels[] array. */
95     uint64_t sizes[HBITMAP_LEVELS];
96 };
97 
98 /* Advance hbi to the next nonzero word and return it.  hbi->pos
99  * is updated.  Returns zero if we reach the end of the bitmap.
100  */
101 unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi)
102 {
103     size_t pos = hbi->pos;
104     const HBitmap *hb = hbi->hb;
105     unsigned i = HBITMAP_LEVELS - 1;
106 
107     unsigned long cur;
108     do {
109         cur = hbi->cur[--i];
110         pos >>= BITS_PER_LEVEL;
111     } while (cur == 0);
112 
113     /* Check for end of iteration.  We always use fewer than BITS_PER_LONG
114      * bits in the level 0 bitmap; thus we can repurpose the most significant
115      * bit as a sentinel.  The sentinel is set in hbitmap_alloc and ensures
116      * that the above loop ends even without an explicit check on i.
117      */
118 
119     if (i == 0 && cur == (1UL << (BITS_PER_LONG - 1))) {
120         return 0;
121     }
122     for (; i < HBITMAP_LEVELS - 1; i++) {
123         /* Shift back pos to the left, matching the right shifts above.
124          * The index of this word's least significant set bit provides
125          * the low-order bits.
126          */
127         assert(cur);
128         pos = (pos << BITS_PER_LEVEL) + ctzl(cur);
129         hbi->cur[i] = cur & (cur - 1);
130 
131         /* Set up next level for iteration.  */
132         cur = hb->levels[i + 1][pos];
133     }
134 
135     hbi->pos = pos;
136     trace_hbitmap_iter_skip_words(hbi->hb, hbi, pos, cur);
137 
138     assert(cur);
139     return cur;
140 }
141 
142 void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first)
143 {
144     unsigned i, bit;
145     uint64_t pos;
146 
147     hbi->hb = hb;
148     pos = first >> hb->granularity;
149     assert(pos < hb->size);
150     hbi->pos = pos >> BITS_PER_LEVEL;
151     hbi->granularity = hb->granularity;
152 
153     for (i = HBITMAP_LEVELS; i-- > 0; ) {
154         bit = pos & (BITS_PER_LONG - 1);
155         pos >>= BITS_PER_LEVEL;
156 
157         /* Drop bits representing items before first.  */
158         hbi->cur[i] = hb->levels[i][pos] & ~((1UL << bit) - 1);
159 
160         /* We have already added level i+1, so the lowest set bit has
161          * been processed.  Clear it.
162          */
163         if (i != HBITMAP_LEVELS - 1) {
164             hbi->cur[i] &= ~(1UL << bit);
165         }
166     }
167 }
168 
169 bool hbitmap_empty(const HBitmap *hb)
170 {
171     return hb->count == 0;
172 }
173 
174 int hbitmap_granularity(const HBitmap *hb)
175 {
176     return hb->granularity;
177 }
178 
179 uint64_t hbitmap_count(const HBitmap *hb)
180 {
181     return hb->count << hb->granularity;
182 }
183 
184 /* Count the number of set bits between start and end, not accounting for
185  * the granularity.  Also an example of how to use hbitmap_iter_next_word.
186  */
187 static uint64_t hb_count_between(HBitmap *hb, uint64_t start, uint64_t last)
188 {
189     HBitmapIter hbi;
190     uint64_t count = 0;
191     uint64_t end = last + 1;
192     unsigned long cur;
193     size_t pos;
194 
195     hbitmap_iter_init(&hbi, hb, start << hb->granularity);
196     for (;;) {
197         pos = hbitmap_iter_next_word(&hbi, &cur);
198         if (pos >= (end >> BITS_PER_LEVEL)) {
199             break;
200         }
201         count += ctpopl(cur);
202     }
203 
204     if (pos == (end >> BITS_PER_LEVEL)) {
205         /* Drop bits representing the END-th and subsequent items.  */
206         int bit = end & (BITS_PER_LONG - 1);
207         cur &= (1UL << bit) - 1;
208         count += ctpopl(cur);
209     }
210 
211     return count;
212 }
213 
214 /* Setting starts at the last layer and propagates up if an element
215  * changes.
216  */
217 static inline bool hb_set_elem(unsigned long *elem, uint64_t start, uint64_t last)
218 {
219     unsigned long mask;
220     unsigned long old;
221 
222     assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL));
223     assert(start <= last);
224 
225     mask = 2UL << (last & (BITS_PER_LONG - 1));
226     mask -= 1UL << (start & (BITS_PER_LONG - 1));
227     old = *elem;
228     *elem |= mask;
229     return old != *elem;
230 }
231 
232 /* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
233  * Returns true if at least one bit is changed. */
234 static bool hb_set_between(HBitmap *hb, int level, uint64_t start,
235                            uint64_t last)
236 {
237     size_t pos = start >> BITS_PER_LEVEL;
238     size_t lastpos = last >> BITS_PER_LEVEL;
239     bool changed = false;
240     size_t i;
241 
242     i = pos;
243     if (i < lastpos) {
244         uint64_t next = (start | (BITS_PER_LONG - 1)) + 1;
245         changed |= hb_set_elem(&hb->levels[level][i], start, next - 1);
246         for (;;) {
247             start = next;
248             next += BITS_PER_LONG;
249             if (++i == lastpos) {
250                 break;
251             }
252             changed |= (hb->levels[level][i] == 0);
253             hb->levels[level][i] = ~0UL;
254         }
255     }
256     changed |= hb_set_elem(&hb->levels[level][i], start, last);
257 
258     /* If there was any change in this layer, we may have to update
259      * the one above.
260      */
261     if (level > 0 && changed) {
262         hb_set_between(hb, level - 1, pos, lastpos);
263     }
264     return changed;
265 }
266 
267 void hbitmap_set(HBitmap *hb, uint64_t start, uint64_t count)
268 {
269     /* Compute range in the last layer.  */
270     uint64_t first, n;
271     uint64_t last = start + count - 1;
272 
273     trace_hbitmap_set(hb, start, count,
274                       start >> hb->granularity, last >> hb->granularity);
275 
276     first = start >> hb->granularity;
277     last >>= hb->granularity;
278     assert(last < hb->size);
279     n = last - first + 1;
280 
281     hb->count += n - hb_count_between(hb, first, last);
282     if (hb_set_between(hb, HBITMAP_LEVELS - 1, first, last) &&
283         hb->meta) {
284         hbitmap_set(hb->meta, start, count);
285     }
286 }
287 
288 /* Resetting works the other way round: propagate up if the new
289  * value is zero.
290  */
291 static inline bool hb_reset_elem(unsigned long *elem, uint64_t start, uint64_t last)
292 {
293     unsigned long mask;
294     bool blanked;
295 
296     assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL));
297     assert(start <= last);
298 
299     mask = 2UL << (last & (BITS_PER_LONG - 1));
300     mask -= 1UL << (start & (BITS_PER_LONG - 1));
301     blanked = *elem != 0 && ((*elem & ~mask) == 0);
302     *elem &= ~mask;
303     return blanked;
304 }
305 
306 /* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
307  * Returns true if at least one bit is changed. */
308 static bool hb_reset_between(HBitmap *hb, int level, uint64_t start,
309                              uint64_t last)
310 {
311     size_t pos = start >> BITS_PER_LEVEL;
312     size_t lastpos = last >> BITS_PER_LEVEL;
313     bool changed = false;
314     size_t i;
315 
316     i = pos;
317     if (i < lastpos) {
318         uint64_t next = (start | (BITS_PER_LONG - 1)) + 1;
319 
320         /* Here we need a more complex test than when setting bits.  Even if
321          * something was changed, we must not blank bits in the upper level
322          * unless the lower-level word became entirely zero.  So, remove pos
323          * from the upper-level range if bits remain set.
324          */
325         if (hb_reset_elem(&hb->levels[level][i], start, next - 1)) {
326             changed = true;
327         } else {
328             pos++;
329         }
330 
331         for (;;) {
332             start = next;
333             next += BITS_PER_LONG;
334             if (++i == lastpos) {
335                 break;
336             }
337             changed |= (hb->levels[level][i] != 0);
338             hb->levels[level][i] = 0UL;
339         }
340     }
341 
342     /* Same as above, this time for lastpos.  */
343     if (hb_reset_elem(&hb->levels[level][i], start, last)) {
344         changed = true;
345     } else {
346         lastpos--;
347     }
348 
349     if (level > 0 && changed) {
350         hb_reset_between(hb, level - 1, pos, lastpos);
351     }
352 
353     return changed;
354 
355 }
356 
357 void hbitmap_reset(HBitmap *hb, uint64_t start, uint64_t count)
358 {
359     /* Compute range in the last layer.  */
360     uint64_t first;
361     uint64_t last = start + count - 1;
362 
363     trace_hbitmap_reset(hb, start, count,
364                         start >> hb->granularity, last >> hb->granularity);
365 
366     first = start >> hb->granularity;
367     last >>= hb->granularity;
368     assert(last < hb->size);
369 
370     hb->count -= hb_count_between(hb, first, last);
371     if (hb_reset_between(hb, HBITMAP_LEVELS - 1, first, last) &&
372         hb->meta) {
373         hbitmap_set(hb->meta, start, count);
374     }
375 }
376 
377 void hbitmap_reset_all(HBitmap *hb)
378 {
379     unsigned int i;
380 
381     /* Same as hbitmap_alloc() except for memset() instead of malloc() */
382     for (i = HBITMAP_LEVELS; --i >= 1; ) {
383         memset(hb->levels[i], 0, hb->sizes[i] * sizeof(unsigned long));
384     }
385 
386     hb->levels[0][0] = 1UL << (BITS_PER_LONG - 1);
387     hb->count = 0;
388 }
389 
390 bool hbitmap_is_serializable(const HBitmap *hb)
391 {
392     /* Every serialized chunk must be aligned to 64 bits so that endianness
393      * requirements can be fulfilled on both 64 bit and 32 bit hosts.
394      * We have hbitmap_serialization_granularity() which converts this
395      * alignment requirement from bitmap bits to items covered (e.g. sectors).
396      * That value is:
397      *    64 << hb->granularity
398      * Since this value must not exceed UINT64_MAX, hb->granularity must be
399      * less than 58 (== 64 - 6, where 6 is ld(64), i.e. 1 << 6 == 64).
400      *
401      * In order for hbitmap_serialization_granularity() to always return a
402      * meaningful value, bitmaps that are to be serialized must have a
403      * granularity of less than 58. */
404 
405     return hb->granularity < 58;
406 }
407 
408 bool hbitmap_get(const HBitmap *hb, uint64_t item)
409 {
410     /* Compute position and bit in the last layer.  */
411     uint64_t pos = item >> hb->granularity;
412     unsigned long bit = 1UL << (pos & (BITS_PER_LONG - 1));
413     assert(pos < hb->size);
414 
415     return (hb->levels[HBITMAP_LEVELS - 1][pos >> BITS_PER_LEVEL] & bit) != 0;
416 }
417 
418 uint64_t hbitmap_serialization_granularity(const HBitmap *hb)
419 {
420     assert(hbitmap_is_serializable(hb));
421 
422     /* Require at least 64 bit granularity to be safe on both 64 bit and 32 bit
423      * hosts. */
424     return UINT64_C(64) << hb->granularity;
425 }
426 
427 /* Start should be aligned to serialization granularity, chunk size should be
428  * aligned to serialization granularity too, except for last chunk.
429  */
430 static void serialization_chunk(const HBitmap *hb,
431                                 uint64_t start, uint64_t count,
432                                 unsigned long **first_el, uint64_t *el_count)
433 {
434     uint64_t last = start + count - 1;
435     uint64_t gran = hbitmap_serialization_granularity(hb);
436 
437     assert((start & (gran - 1)) == 0);
438     assert((last >> hb->granularity) < hb->size);
439     if ((last >> hb->granularity) != hb->size - 1) {
440         assert((count & (gran - 1)) == 0);
441     }
442 
443     start = (start >> hb->granularity) >> BITS_PER_LEVEL;
444     last = (last >> hb->granularity) >> BITS_PER_LEVEL;
445 
446     *first_el = &hb->levels[HBITMAP_LEVELS - 1][start];
447     *el_count = last - start + 1;
448 }
449 
450 uint64_t hbitmap_serialization_size(const HBitmap *hb,
451                                     uint64_t start, uint64_t count)
452 {
453     uint64_t el_count;
454     unsigned long *cur;
455 
456     if (!count) {
457         return 0;
458     }
459     serialization_chunk(hb, start, count, &cur, &el_count);
460 
461     return el_count * sizeof(unsigned long);
462 }
463 
464 void hbitmap_serialize_part(const HBitmap *hb, uint8_t *buf,
465                             uint64_t start, uint64_t count)
466 {
467     uint64_t el_count;
468     unsigned long *cur, *end;
469 
470     if (!count) {
471         return;
472     }
473     serialization_chunk(hb, start, count, &cur, &el_count);
474     end = cur + el_count;
475 
476     while (cur != end) {
477         unsigned long el =
478             (BITS_PER_LONG == 32 ? cpu_to_le32(*cur) : cpu_to_le64(*cur));
479 
480         memcpy(buf, &el, sizeof(el));
481         buf += sizeof(el);
482         cur++;
483     }
484 }
485 
486 void hbitmap_deserialize_part(HBitmap *hb, uint8_t *buf,
487                               uint64_t start, uint64_t count,
488                               bool finish)
489 {
490     uint64_t el_count;
491     unsigned long *cur, *end;
492 
493     if (!count) {
494         return;
495     }
496     serialization_chunk(hb, start, count, &cur, &el_count);
497     end = cur + el_count;
498 
499     while (cur != end) {
500         memcpy(cur, buf, sizeof(*cur));
501 
502         if (BITS_PER_LONG == 32) {
503             le32_to_cpus((uint32_t *)cur);
504         } else {
505             le64_to_cpus((uint64_t *)cur);
506         }
507 
508         buf += sizeof(unsigned long);
509         cur++;
510     }
511     if (finish) {
512         hbitmap_deserialize_finish(hb);
513     }
514 }
515 
516 void hbitmap_deserialize_zeroes(HBitmap *hb, uint64_t start, uint64_t count,
517                                 bool finish)
518 {
519     uint64_t el_count;
520     unsigned long *first;
521 
522     if (!count) {
523         return;
524     }
525     serialization_chunk(hb, start, count, &first, &el_count);
526 
527     memset(first, 0, el_count * sizeof(unsigned long));
528     if (finish) {
529         hbitmap_deserialize_finish(hb);
530     }
531 }
532 
533 void hbitmap_deserialize_finish(HBitmap *bitmap)
534 {
535     int64_t i, size, prev_size;
536     int lev;
537 
538     /* restore levels starting from penultimate to zero level, assuming
539      * that the last level is ok */
540     size = MAX((bitmap->size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1);
541     for (lev = HBITMAP_LEVELS - 1; lev-- > 0; ) {
542         prev_size = size;
543         size = MAX((size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1);
544         memset(bitmap->levels[lev], 0, size * sizeof(unsigned long));
545 
546         for (i = 0; i < prev_size; ++i) {
547             if (bitmap->levels[lev + 1][i]) {
548                 bitmap->levels[lev][i >> BITS_PER_LEVEL] |=
549                     1UL << (i & (BITS_PER_LONG - 1));
550             }
551         }
552     }
553 
554     bitmap->levels[0][0] |= 1UL << (BITS_PER_LONG - 1);
555 }
556 
557 void hbitmap_free(HBitmap *hb)
558 {
559     unsigned i;
560     assert(!hb->meta);
561     for (i = HBITMAP_LEVELS; i-- > 0; ) {
562         g_free(hb->levels[i]);
563     }
564     g_free(hb);
565 }
566 
567 HBitmap *hbitmap_alloc(uint64_t size, int granularity)
568 {
569     HBitmap *hb = g_new0(struct HBitmap, 1);
570     unsigned i;
571 
572     assert(granularity >= 0 && granularity < 64);
573     size = (size + (1ULL << granularity) - 1) >> granularity;
574     assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE));
575 
576     hb->size = size;
577     hb->granularity = granularity;
578     for (i = HBITMAP_LEVELS; i-- > 0; ) {
579         size = MAX((size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1);
580         hb->sizes[i] = size;
581         hb->levels[i] = g_new0(unsigned long, size);
582     }
583 
584     /* We necessarily have free bits in level 0 due to the definition
585      * of HBITMAP_LEVELS, so use one for a sentinel.  This speeds up
586      * hbitmap_iter_skip_words.
587      */
588     assert(size == 1);
589     hb->levels[0][0] |= 1UL << (BITS_PER_LONG - 1);
590     return hb;
591 }
592 
593 void hbitmap_truncate(HBitmap *hb, uint64_t size)
594 {
595     bool shrink;
596     unsigned i;
597     uint64_t num_elements = size;
598     uint64_t old;
599 
600     /* Size comes in as logical elements, adjust for granularity. */
601     size = (size + (1ULL << hb->granularity) - 1) >> hb->granularity;
602     assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE));
603     shrink = size < hb->size;
604 
605     /* bit sizes are identical; nothing to do. */
606     if (size == hb->size) {
607         return;
608     }
609 
610     /* If we're losing bits, let's clear those bits before we invalidate all of
611      * our invariants. This helps keep the bitcount consistent, and will prevent
612      * us from carrying around garbage bits beyond the end of the map.
613      */
614     if (shrink) {
615         /* Don't clear partial granularity groups;
616          * start at the first full one. */
617         uint64_t start = ROUND_UP(num_elements, UINT64_C(1) << hb->granularity);
618         uint64_t fix_count = (hb->size << hb->granularity) - start;
619 
620         assert(fix_count);
621         hbitmap_reset(hb, start, fix_count);
622     }
623 
624     hb->size = size;
625     for (i = HBITMAP_LEVELS; i-- > 0; ) {
626         size = MAX(BITS_TO_LONGS(size), 1);
627         if (hb->sizes[i] == size) {
628             break;
629         }
630         old = hb->sizes[i];
631         hb->sizes[i] = size;
632         hb->levels[i] = g_realloc(hb->levels[i], size * sizeof(unsigned long));
633         if (!shrink) {
634             memset(&hb->levels[i][old], 0x00,
635                    (size - old) * sizeof(*hb->levels[i]));
636         }
637     }
638     if (hb->meta) {
639         hbitmap_truncate(hb->meta, hb->size << hb->granularity);
640     }
641 }
642 
643 
644 /**
645  * Given HBitmaps A and B, let A := A (BITOR) B.
646  * Bitmap B will not be modified.
647  *
648  * @return true if the merge was successful,
649  *         false if it was not attempted.
650  */
651 bool hbitmap_merge(HBitmap *a, const HBitmap *b)
652 {
653     int i;
654     uint64_t j;
655 
656     if ((a->size != b->size) || (a->granularity != b->granularity)) {
657         return false;
658     }
659 
660     if (hbitmap_count(b) == 0) {
661         return true;
662     }
663 
664     /* This merge is O(size), as BITS_PER_LONG and HBITMAP_LEVELS are constant.
665      * It may be possible to improve running times for sparsely populated maps
666      * by using hbitmap_iter_next, but this is suboptimal for dense maps.
667      */
668     for (i = HBITMAP_LEVELS - 1; i >= 0; i--) {
669         for (j = 0; j < a->sizes[i]; j++) {
670             a->levels[i][j] |= b->levels[i][j];
671         }
672     }
673 
674     return true;
675 }
676 
677 HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size)
678 {
679     assert(!(chunk_size & (chunk_size - 1)));
680     assert(!hb->meta);
681     hb->meta = hbitmap_alloc(hb->size << hb->granularity,
682                              hb->granularity + ctz32(chunk_size));
683     return hb->meta;
684 }
685 
686 void hbitmap_free_meta(HBitmap *hb)
687 {
688     assert(hb->meta);
689     hbitmap_free(hb->meta);
690     hb->meta = NULL;
691 }
692