]>
git.proxmox.com Git - mirror_qemu.git/blob - util/hbitmap.c
2 * Hierarchical Bitmap Data Type
4 * Copyright Red Hat, Inc., 2012
6 * Author: Paolo Bonzini <pbonzini@redhat.com>
8 * This work is licensed under the terms of the GNU GPL, version 2 or
9 * later. See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include "qemu/hbitmap.h"
14 #include "qemu/host-utils.h"
17 /* HBitmaps provides an array of bits. The bits are stored as usual in an
18 * array of unsigned longs, but HBitmap is also optimized to provide fast
19 * iteration over set bits; going from one bit to the next is O(logB n)
20 * worst case, with B = sizeof(long) * CHAR_BIT: the result is low enough
21 * that the number of levels is in fact fixed.
23 * In order to do this, it stacks multiple bitmaps with progressively coarser
24 * granularity; in all levels except the last, bit N is set iff the N-th
25 * unsigned long is nonzero in the immediately next level. When iteration
26 * completes on the last level it can examine the 2nd-last level to quickly
27 * skip entire words, and even do so recursively to skip blocks of 64 words or
28 * powers thereof (32 on 32-bit machines).
30 * Given an index in the bitmap, it can be split in group of bits like
31 * this (for the 64-bit case):
33 * bits 0-57 => word in the last bitmap | bits 58-63 => bit in the word
34 * bits 0-51 => word in the 2nd-last bitmap | bits 52-57 => bit in the word
35 * bits 0-45 => word in the 3rd-last bitmap | bits 46-51 => bit in the word
37 * So it is easy to move up simply by shifting the index right by
38 * log2(BITS_PER_LONG) bits. To move down, you shift the index left
39 * similarly, and add the word index within the group. Iteration uses
40 * ffs (find first set bit) to find the next word to examine; this
41 * operation can be done in constant time in most current architectures.
43 * Setting or clearing a range of m bits on all levels, the work to perform
44 * is O(m + m/W + m/W^2 + ...), which is O(m) like on a regular bitmap.
46 * When iterating on a bitmap, each bit (on any level) is only visited
47 * once. Hence, The total cost of visiting a bitmap with m bits in it is
48 * the number of bits that are set in all bitmaps. Unless the bitmap is
49 * extremely sparse, this is also O(m + m/W + m/W^2 + ...), so the amortized
50 * cost of advancing from one bit to the next is usually constant (worst case
51 * O(logB n) as in the non-amortized complexity).
55 /* Number of total bits in the bottom level. */
58 /* Number of set bits in the bottom level. */
61 /* A scaling factor. Given a granularity of G, each bit in the bitmap will
62 * will actually represent a group of 2^G elements. Each operation on a
63 * range of bits first rounds the bits to determine which group they land
64 * in, and then affect the entire page; iteration will only visit the first
65 * bit of each group. Here is an example of operations in a size-16,
66 * granularity-1 HBitmap:
68 * initial state 00000000
69 * set(start=0, count=9) 11111000 (iter: 0, 2, 4, 6, 8)
70 * reset(start=1, count=3) 00111000 (iter: 4, 6, 8)
71 * set(start=9, count=2) 00111100 (iter: 4, 6, 8, 10)
72 * reset(start=5, count=5) 00000000
74 * From an implementation point of view, when setting or resetting bits,
75 * the bitmap will scale bit numbers right by this amount of bits. When
76 * iterating, the bitmap will scale bit numbers left by this amount of
81 /* A meta dirty bitmap to track the dirtiness of bits in this HBitmap. */
84 /* A number of progressively less coarse bitmaps (i.e. level 0 is the
85 * coarsest). Each bit in level N represents a word in level N+1 that
86 * has a set bit, except the last level where each bit represents the
89 * Note that all bitmaps have the same number of levels. Even a 1-bit
90 * bitmap will still allocate HBITMAP_LEVELS arrays.
92 unsigned long *levels
[HBITMAP_LEVELS
];
94 /* The length of each levels[] array. */
95 uint64_t sizes
[HBITMAP_LEVELS
];
98 /* Advance hbi to the next nonzero word and return it. hbi->pos
99 * is updated. Returns zero if we reach the end of the bitmap.
101 unsigned long hbitmap_iter_skip_words(HBitmapIter
*hbi
)
103 size_t pos
= hbi
->pos
;
104 const HBitmap
*hb
= hbi
->hb
;
105 unsigned i
= HBITMAP_LEVELS
- 1;
110 pos
>>= BITS_PER_LEVEL
;
111 cur
= hbi
->cur
[i
] & hb
->levels
[i
][pos
];
114 /* Check for end of iteration. We always use fewer than BITS_PER_LONG
115 * bits in the level 0 bitmap; thus we can repurpose the most significant
116 * bit as a sentinel. The sentinel is set in hbitmap_alloc and ensures
117 * that the above loop ends even without an explicit check on i.
120 if (i
== 0 && cur
== (1UL << (BITS_PER_LONG
- 1))) {
123 for (; i
< HBITMAP_LEVELS
- 1; i
++) {
124 /* Shift back pos to the left, matching the right shifts above.
125 * The index of this word's least significant set bit provides
126 * the low-order bits.
129 pos
= (pos
<< BITS_PER_LEVEL
) + ctzl(cur
);
130 hbi
->cur
[i
] = cur
& (cur
- 1);
132 /* Set up next level for iteration. */
133 cur
= hb
->levels
[i
+ 1][pos
];
137 trace_hbitmap_iter_skip_words(hbi
->hb
, hbi
, pos
, cur
);
143 int64_t hbitmap_iter_next(HBitmapIter
*hbi
)
145 unsigned long cur
= hbi
->cur
[HBITMAP_LEVELS
- 1] &
146 hbi
->hb
->levels
[HBITMAP_LEVELS
- 1][hbi
->pos
];
150 cur
= hbitmap_iter_skip_words(hbi
);
156 /* The next call will resume work from the next bit. */
157 hbi
->cur
[HBITMAP_LEVELS
- 1] = cur
& (cur
- 1);
158 item
= ((uint64_t)hbi
->pos
<< BITS_PER_LEVEL
) + ctzl(cur
);
160 return item
<< hbi
->granularity
;
163 void hbitmap_iter_init(HBitmapIter
*hbi
, const HBitmap
*hb
, uint64_t first
)
169 pos
= first
>> hb
->granularity
;
170 assert(pos
< hb
->size
);
171 hbi
->pos
= pos
>> BITS_PER_LEVEL
;
172 hbi
->granularity
= hb
->granularity
;
174 for (i
= HBITMAP_LEVELS
; i
-- > 0; ) {
175 bit
= pos
& (BITS_PER_LONG
- 1);
176 pos
>>= BITS_PER_LEVEL
;
178 /* Drop bits representing items before first. */
179 hbi
->cur
[i
] = hb
->levels
[i
][pos
] & ~((1UL << bit
) - 1);
181 /* We have already added level i+1, so the lowest set bit has
182 * been processed. Clear it.
184 if (i
!= HBITMAP_LEVELS
- 1) {
185 hbi
->cur
[i
] &= ~(1UL << bit
);
190 bool hbitmap_empty(const HBitmap
*hb
)
192 return hb
->count
== 0;
195 int hbitmap_granularity(const HBitmap
*hb
)
197 return hb
->granularity
;
200 uint64_t hbitmap_count(const HBitmap
*hb
)
202 return hb
->count
<< hb
->granularity
;
205 /* Count the number of set bits between start and end, not accounting for
206 * the granularity. Also an example of how to use hbitmap_iter_next_word.
208 static uint64_t hb_count_between(HBitmap
*hb
, uint64_t start
, uint64_t last
)
212 uint64_t end
= last
+ 1;
216 hbitmap_iter_init(&hbi
, hb
, start
<< hb
->granularity
);
218 pos
= hbitmap_iter_next_word(&hbi
, &cur
);
219 if (pos
>= (end
>> BITS_PER_LEVEL
)) {
222 count
+= ctpopl(cur
);
225 if (pos
== (end
>> BITS_PER_LEVEL
)) {
226 /* Drop bits representing the END-th and subsequent items. */
227 int bit
= end
& (BITS_PER_LONG
- 1);
228 cur
&= (1UL << bit
) - 1;
229 count
+= ctpopl(cur
);
235 /* Setting starts at the last layer and propagates up if an element
238 static inline bool hb_set_elem(unsigned long *elem
, uint64_t start
, uint64_t last
)
243 assert((last
>> BITS_PER_LEVEL
) == (start
>> BITS_PER_LEVEL
));
244 assert(start
<= last
);
246 mask
= 2UL << (last
& (BITS_PER_LONG
- 1));
247 mask
-= 1UL << (start
& (BITS_PER_LONG
- 1));
253 /* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
254 * Returns true if at least one bit is changed. */
255 static bool hb_set_between(HBitmap
*hb
, int level
, uint64_t start
,
258 size_t pos
= start
>> BITS_PER_LEVEL
;
259 size_t lastpos
= last
>> BITS_PER_LEVEL
;
260 bool changed
= false;
265 uint64_t next
= (start
| (BITS_PER_LONG
- 1)) + 1;
266 changed
|= hb_set_elem(&hb
->levels
[level
][i
], start
, next
- 1);
269 next
+= BITS_PER_LONG
;
270 if (++i
== lastpos
) {
273 changed
|= (hb
->levels
[level
][i
] == 0);
274 hb
->levels
[level
][i
] = ~0UL;
277 changed
|= hb_set_elem(&hb
->levels
[level
][i
], start
, last
);
279 /* If there was any change in this layer, we may have to update
282 if (level
> 0 && changed
) {
283 hb_set_between(hb
, level
- 1, pos
, lastpos
);
288 void hbitmap_set(HBitmap
*hb
, uint64_t start
, uint64_t count
)
290 /* Compute range in the last layer. */
292 uint64_t last
= start
+ count
- 1;
294 trace_hbitmap_set(hb
, start
, count
,
295 start
>> hb
->granularity
, last
>> hb
->granularity
);
297 first
= start
>> hb
->granularity
;
298 last
>>= hb
->granularity
;
299 assert(last
< hb
->size
);
300 n
= last
- first
+ 1;
302 hb
->count
+= n
- hb_count_between(hb
, first
, last
);
303 if (hb_set_between(hb
, HBITMAP_LEVELS
- 1, first
, last
) &&
305 hbitmap_set(hb
->meta
, start
, count
);
309 /* Resetting works the other way round: propagate up if the new
312 static inline bool hb_reset_elem(unsigned long *elem
, uint64_t start
, uint64_t last
)
317 assert((last
>> BITS_PER_LEVEL
) == (start
>> BITS_PER_LEVEL
));
318 assert(start
<= last
);
320 mask
= 2UL << (last
& (BITS_PER_LONG
- 1));
321 mask
-= 1UL << (start
& (BITS_PER_LONG
- 1));
322 blanked
= *elem
!= 0 && ((*elem
& ~mask
) == 0);
327 /* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
328 * Returns true if at least one bit is changed. */
329 static bool hb_reset_between(HBitmap
*hb
, int level
, uint64_t start
,
332 size_t pos
= start
>> BITS_PER_LEVEL
;
333 size_t lastpos
= last
>> BITS_PER_LEVEL
;
334 bool changed
= false;
339 uint64_t next
= (start
| (BITS_PER_LONG
- 1)) + 1;
341 /* Here we need a more complex test than when setting bits. Even if
342 * something was changed, we must not blank bits in the upper level
343 * unless the lower-level word became entirely zero. So, remove pos
344 * from the upper-level range if bits remain set.
346 if (hb_reset_elem(&hb
->levels
[level
][i
], start
, next
- 1)) {
354 next
+= BITS_PER_LONG
;
355 if (++i
== lastpos
) {
358 changed
|= (hb
->levels
[level
][i
] != 0);
359 hb
->levels
[level
][i
] = 0UL;
363 /* Same as above, this time for lastpos. */
364 if (hb_reset_elem(&hb
->levels
[level
][i
], start
, last
)) {
370 if (level
> 0 && changed
) {
371 hb_reset_between(hb
, level
- 1, pos
, lastpos
);
378 void hbitmap_reset(HBitmap
*hb
, uint64_t start
, uint64_t count
)
380 /* Compute range in the last layer. */
382 uint64_t last
= start
+ count
- 1;
384 trace_hbitmap_reset(hb
, start
, count
,
385 start
>> hb
->granularity
, last
>> hb
->granularity
);
387 first
= start
>> hb
->granularity
;
388 last
>>= hb
->granularity
;
389 assert(last
< hb
->size
);
391 hb
->count
-= hb_count_between(hb
, first
, last
);
392 if (hb_reset_between(hb
, HBITMAP_LEVELS
- 1, first
, last
) &&
394 hbitmap_set(hb
->meta
, start
, count
);
398 void hbitmap_reset_all(HBitmap
*hb
)
402 /* Same as hbitmap_alloc() except for memset() instead of malloc() */
403 for (i
= HBITMAP_LEVELS
; --i
>= 1; ) {
404 memset(hb
->levels
[i
], 0, hb
->sizes
[i
] * sizeof(unsigned long));
407 hb
->levels
[0][0] = 1UL << (BITS_PER_LONG
- 1);
411 bool hbitmap_is_serializable(const HBitmap
*hb
)
413 /* Every serialized chunk must be aligned to 64 bits so that endianness
414 * requirements can be fulfilled on both 64 bit and 32 bit hosts.
415 * We have hbitmap_serialization_granularity() which converts this
416 * alignment requirement from bitmap bits to items covered (e.g. sectors).
418 * 64 << hb->granularity
419 * Since this value must not exceed UINT64_MAX, hb->granularity must be
420 * less than 58 (== 64 - 6, where 6 is ld(64), i.e. 1 << 6 == 64).
422 * In order for hbitmap_serialization_granularity() to always return a
423 * meaningful value, bitmaps that are to be serialized must have a
424 * granularity of less than 58. */
426 return hb
->granularity
< 58;
429 bool hbitmap_get(const HBitmap
*hb
, uint64_t item
)
431 /* Compute position and bit in the last layer. */
432 uint64_t pos
= item
>> hb
->granularity
;
433 unsigned long bit
= 1UL << (pos
& (BITS_PER_LONG
- 1));
434 assert(pos
< hb
->size
);
436 return (hb
->levels
[HBITMAP_LEVELS
- 1][pos
>> BITS_PER_LEVEL
] & bit
) != 0;
439 uint64_t hbitmap_serialization_granularity(const HBitmap
*hb
)
441 assert(hbitmap_is_serializable(hb
));
443 /* Require at least 64 bit granularity to be safe on both 64 bit and 32 bit
445 return UINT64_C(64) << hb
->granularity
;
448 /* Start should be aligned to serialization granularity, chunk size should be
449 * aligned to serialization granularity too, except for last chunk.
451 static void serialization_chunk(const HBitmap
*hb
,
452 uint64_t start
, uint64_t count
,
453 unsigned long **first_el
, uint64_t *el_count
)
455 uint64_t last
= start
+ count
- 1;
456 uint64_t gran
= hbitmap_serialization_granularity(hb
);
458 assert((start
& (gran
- 1)) == 0);
459 assert((last
>> hb
->granularity
) < hb
->size
);
460 if ((last
>> hb
->granularity
) != hb
->size
- 1) {
461 assert((count
& (gran
- 1)) == 0);
464 start
= (start
>> hb
->granularity
) >> BITS_PER_LEVEL
;
465 last
= (last
>> hb
->granularity
) >> BITS_PER_LEVEL
;
467 *first_el
= &hb
->levels
[HBITMAP_LEVELS
- 1][start
];
468 *el_count
= last
- start
+ 1;
471 uint64_t hbitmap_serialization_size(const HBitmap
*hb
,
472 uint64_t start
, uint64_t count
)
480 serialization_chunk(hb
, start
, count
, &cur
, &el_count
);
482 return el_count
* sizeof(unsigned long);
485 void hbitmap_serialize_part(const HBitmap
*hb
, uint8_t *buf
,
486 uint64_t start
, uint64_t count
)
489 unsigned long *cur
, *end
;
494 serialization_chunk(hb
, start
, count
, &cur
, &el_count
);
495 end
= cur
+ el_count
;
499 (BITS_PER_LONG
== 32 ? cpu_to_le32(*cur
) : cpu_to_le64(*cur
));
501 memcpy(buf
, &el
, sizeof(el
));
507 void hbitmap_deserialize_part(HBitmap
*hb
, uint8_t *buf
,
508 uint64_t start
, uint64_t count
,
512 unsigned long *cur
, *end
;
517 serialization_chunk(hb
, start
, count
, &cur
, &el_count
);
518 end
= cur
+ el_count
;
521 memcpy(cur
, buf
, sizeof(*cur
));
523 if (BITS_PER_LONG
== 32) {
524 le32_to_cpus((uint32_t *)cur
);
526 le64_to_cpus((uint64_t *)cur
);
529 buf
+= sizeof(unsigned long);
533 hbitmap_deserialize_finish(hb
);
537 void hbitmap_deserialize_zeroes(HBitmap
*hb
, uint64_t start
, uint64_t count
,
541 unsigned long *first
;
546 serialization_chunk(hb
, start
, count
, &first
, &el_count
);
548 memset(first
, 0, el_count
* sizeof(unsigned long));
550 hbitmap_deserialize_finish(hb
);
554 void hbitmap_deserialize_finish(HBitmap
*bitmap
)
556 int64_t i
, size
, prev_size
;
559 /* restore levels starting from penultimate to zero level, assuming
560 * that the last level is ok */
561 size
= MAX((bitmap
->size
+ BITS_PER_LONG
- 1) >> BITS_PER_LEVEL
, 1);
562 for (lev
= HBITMAP_LEVELS
- 1; lev
-- > 0; ) {
564 size
= MAX((size
+ BITS_PER_LONG
- 1) >> BITS_PER_LEVEL
, 1);
565 memset(bitmap
->levels
[lev
], 0, size
* sizeof(unsigned long));
567 for (i
= 0; i
< prev_size
; ++i
) {
568 if (bitmap
->levels
[lev
+ 1][i
]) {
569 bitmap
->levels
[lev
][i
>> BITS_PER_LEVEL
] |=
570 1UL << (i
& (BITS_PER_LONG
- 1));
575 bitmap
->levels
[0][0] |= 1UL << (BITS_PER_LONG
- 1);
578 void hbitmap_free(HBitmap
*hb
)
582 for (i
= HBITMAP_LEVELS
; i
-- > 0; ) {
583 g_free(hb
->levels
[i
]);
588 HBitmap
*hbitmap_alloc(uint64_t size
, int granularity
)
590 HBitmap
*hb
= g_new0(struct HBitmap
, 1);
593 assert(granularity
>= 0 && granularity
< 64);
594 size
= (size
+ (1ULL << granularity
) - 1) >> granularity
;
595 assert(size
<= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE
));
598 hb
->granularity
= granularity
;
599 for (i
= HBITMAP_LEVELS
; i
-- > 0; ) {
600 size
= MAX((size
+ BITS_PER_LONG
- 1) >> BITS_PER_LEVEL
, 1);
602 hb
->levels
[i
] = g_new0(unsigned long, size
);
605 /* We necessarily have free bits in level 0 due to the definition
606 * of HBITMAP_LEVELS, so use one for a sentinel. This speeds up
607 * hbitmap_iter_skip_words.
610 hb
->levels
[0][0] |= 1UL << (BITS_PER_LONG
- 1);
614 void hbitmap_truncate(HBitmap
*hb
, uint64_t size
)
618 uint64_t num_elements
= size
;
621 /* Size comes in as logical elements, adjust for granularity. */
622 size
= (size
+ (1ULL << hb
->granularity
) - 1) >> hb
->granularity
;
623 assert(size
<= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE
));
624 shrink
= size
< hb
->size
;
626 /* bit sizes are identical; nothing to do. */
627 if (size
== hb
->size
) {
631 /* If we're losing bits, let's clear those bits before we invalidate all of
632 * our invariants. This helps keep the bitcount consistent, and will prevent
633 * us from carrying around garbage bits beyond the end of the map.
636 /* Don't clear partial granularity groups;
637 * start at the first full one. */
638 uint64_t start
= ROUND_UP(num_elements
, UINT64_C(1) << hb
->granularity
);
639 uint64_t fix_count
= (hb
->size
<< hb
->granularity
) - start
;
642 hbitmap_reset(hb
, start
, fix_count
);
646 for (i
= HBITMAP_LEVELS
; i
-- > 0; ) {
647 size
= MAX(BITS_TO_LONGS(size
), 1);
648 if (hb
->sizes
[i
] == size
) {
653 hb
->levels
[i
] = g_realloc(hb
->levels
[i
], size
* sizeof(unsigned long));
655 memset(&hb
->levels
[i
][old
], 0x00,
656 (size
- old
) * sizeof(*hb
->levels
[i
]));
660 hbitmap_truncate(hb
->meta
, hb
->size
<< hb
->granularity
);
666 * Given HBitmaps A and B, let A := A (BITOR) B.
667 * Bitmap B will not be modified.
669 * @return true if the merge was successful,
670 * false if it was not attempted.
672 bool hbitmap_merge(HBitmap
*a
, const HBitmap
*b
)
677 if ((a
->size
!= b
->size
) || (a
->granularity
!= b
->granularity
)) {
681 if (hbitmap_count(b
) == 0) {
685 /* This merge is O(size), as BITS_PER_LONG and HBITMAP_LEVELS are constant.
686 * It may be possible to improve running times for sparsely populated maps
687 * by using hbitmap_iter_next, but this is suboptimal for dense maps.
689 for (i
= HBITMAP_LEVELS
- 1; i
>= 0; i
--) {
690 for (j
= 0; j
< a
->sizes
[i
]; j
++) {
691 a
->levels
[i
][j
] |= b
->levels
[i
][j
];
698 HBitmap
*hbitmap_create_meta(HBitmap
*hb
, int chunk_size
)
700 assert(!(chunk_size
& (chunk_size
- 1)));
702 hb
->meta
= hbitmap_alloc(hb
->size
<< hb
->granularity
,
703 hb
->granularity
+ ctz32(chunk_size
));
707 void hbitmap_free_meta(HBitmap
*hb
)
710 hbitmap_free(hb
->meta
);