1 /*
2 * Copyright (c) 2015-2017, Intel Corporation
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * * Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of Intel Corporation nor the names of its contributors
13 * may be used to endorse or promote products derived from this software
14 * without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include "config.h"
30
31 #include "gtest/gtest.h"
32 #include "ue2common.h"
33 #include "rose/rose_build_scatter.h"
34 #include "util/compile_error.h"
35 #include "util/make_unique.h"
36 #include "util/multibit.h"
37 #include "util/multibit_build.h"
38
39 #include <algorithm>
40 #include <memory>
41 #include <vector>
42
43 using namespace std;
44 using namespace testing;
45 using namespace ue2;
46
47 namespace {
48 class mmbit_holder {
49 public:
mmbit_holder()50 mmbit_holder() {}
mmbit_holder(u32 num_bits,u32 excess=0)51 explicit mmbit_holder(u32 num_bits, u32 excess = 0)
52 : data(ue2::make_unique<u8[]>(mmbit_size(num_bits) + 7 + excess)) {}
init(u32 num_bits)53 void init(u32 num_bits) {
54 assert(!data);
55 data = ue2::make_unique<u8[]>(mmbit_size(num_bits) + 7);
56 }
operator u8*()57 operator u8 *() {
58 assert(data);
59 return data.get() + 7;
60 }
operator const u8*() const61 operator const u8 *() const {
62 assert(data);
63 return data.get() + 7;
64 }
65
66 private:
67 unique_ptr<u8[]> data = nullptr;
68 };
69 }
70
71 static
fill_mmbit(u8 * ba,u32 test_size)72 void fill_mmbit(u8 *ba, u32 test_size) {
73 fill_n(ba, mmbit_size(test_size), 0xff);
74 }
75
TEST(MultiBit,Set1)76 TEST(MultiBit, Set1) {
77 const u32 test_size = 32;
78 mmbit_holder ba(test_size);
79
80 fill_mmbit(ba, test_size);
81 mmbit_clear(ba, test_size);
82
83 ASSERT_FALSE(mmbit_any(ba, 32));
84
85 ASSERT_FALSE(mmbit_isset(ba, 32, 0));
86 mmbit_set(ba, 32, 0);
87 ASSERT_TRUE(mmbit_isset(ba, 32, 0));
88
89 ASSERT_TRUE(mmbit_any(ba, 32));
90
91 ASSERT_FALSE(mmbit_isset(ba, 32, 31));
92 mmbit_set(ba, 32, 31);
93 ASSERT_TRUE(mmbit_isset(ba, 32, 31));
94 }
95
TEST(MultiBit,Set2)96 TEST(MultiBit, Set2) {
97 const u32 test_size = 64;
98 mmbit_holder ba(test_size);
99
100 fill_mmbit(ba, test_size);
101 mmbit_clear(ba, test_size);
102
103 ASSERT_FALSE(mmbit_any(ba, 64));
104
105 for (u32 i = 0; i < 64; i++) {
106 ASSERT_FALSE(mmbit_isset(ba, 64, i));
107 }
108
109 ASSERT_FALSE(mmbit_isset(ba, 64, 0));
110 mmbit_set(ba, 64, 0);
111 ASSERT_TRUE(mmbit_isset(ba, 64, 0));
112 for (u32 i = 1; i < 64; i++) {
113 ASSERT_FALSE(mmbit_isset(ba, 64, i));
114 }
115
116 ASSERT_TRUE(mmbit_any(ba, 64));
117
118 ASSERT_FALSE(mmbit_isset(ba, 64, 31));
119 mmbit_set(ba, 64, 31);
120 ASSERT_TRUE(mmbit_isset(ba, 64, 31));
121
122 ASSERT_FALSE(mmbit_isset(ba, 64, 32));
123 mmbit_set(ba, 64, 32);
124 ASSERT_TRUE(mmbit_isset(ba, 64, 32));
125
126 ASSERT_FALSE(mmbit_isset(ba, 64, 48));
127 mmbit_set(ba, 64, 48);
128 ASSERT_TRUE(mmbit_isset(ba, 64, 48));
129 }
130
TEST(MultiBit,Set3)131 TEST(MultiBit, Set3) {
132 const u32 test_size = 1030;
133 mmbit_holder ba(test_size);
134
135 fill_mmbit(ba, test_size);
136 mmbit_clear(ba, test_size);
137
138 ASSERT_FALSE(mmbit_any(ba, test_size));
139
140 ASSERT_FALSE(mmbit_isset(ba, test_size, 0));
141 mmbit_set(ba, test_size, 0);
142 ASSERT_TRUE(mmbit_isset(ba, test_size, 0));
143
144 ASSERT_TRUE(mmbit_any(ba, test_size));
145
146 ASSERT_FALSE(mmbit_isset(ba, test_size, 31));
147 mmbit_set(ba, test_size, 31);
148 ASSERT_TRUE(mmbit_isset(ba, test_size, 31));
149
150 ASSERT_FALSE(mmbit_isset(ba, test_size, 32));
151 mmbit_set(ba, test_size, 32);
152 ASSERT_TRUE(mmbit_isset(ba, test_size, 32));
153
154 ASSERT_FALSE(mmbit_isset(ba, test_size, 48));
155 mmbit_set(ba, test_size, 48);
156 ASSERT_TRUE(mmbit_isset(ba, test_size, 48));
157
158 ASSERT_FALSE(mmbit_isset(ba, test_size, 60));
159 mmbit_set(ba, test_size, 60);
160 ASSERT_TRUE(mmbit_isset(ba, test_size, 60));
161
162 ASSERT_FALSE(mmbit_isset(ba, test_size, 1029));
163 mmbit_set(ba, test_size, 1029);
164 ASSERT_TRUE(mmbit_isset(ba, test_size, 1029));
165 }
166
TEST(MultiBit,Set4)167 TEST(MultiBit, Set4) {
168 const u32 test_size = 1025;
169 mmbit_holder ba(test_size);
170
171 mmbit_clear(ba, test_size);
172
173 ASSERT_FALSE(mmbit_any(ba, test_size));
174
175 for (u32 i = 0; i < test_size; i++) {
176 ASSERT_FALSE(mmbit_isset(ba, test_size, i));
177 mmbit_set(ba, test_size, i);
178 ASSERT_TRUE(mmbit_isset(ba, test_size, i));
179
180 ASSERT_EQ(i, mmbit_iterate(ba, test_size, MMB_INVALID));
181 ASSERT_EQ(MMB_INVALID, mmbit_iterate(ba, test_size, i));
182
183 mmbit_unset(ba, test_size, i);
184 ASSERT_FALSE(mmbit_isset(ba, test_size, i));
185 }
186 }
187
TEST(MultiBit,Set5)188 TEST(MultiBit, Set5) {
189 const u32 test_size = 33000;
190 mmbit_holder ba(test_size);
191
192 mmbit_clear(ba, test_size);
193
194 ASSERT_FALSE(mmbit_any(ba, test_size));
195
196 for (u32 i = 0; i < test_size; i += 8) {
197 ASSERT_FALSE(mmbit_isset(ba, test_size, i));
198 mmbit_set(ba, test_size, i);
199 ASSERT_TRUE(mmbit_isset(ba, test_size, i));
200
201 ASSERT_EQ(i, mmbit_iterate(ba, test_size, MMB_INVALID));
202 ASSERT_EQ(MMB_INVALID, mmbit_iterate(ba, test_size, i));
203
204 mmbit_unset(ba, test_size, i);
205 ASSERT_FALSE(mmbit_isset(ba, test_size, i));
206 }
207 }
208
TEST(MultiBit,Set8)209 TEST(MultiBit, Set8) {
210 const u32 test_size = 8;
211 mmbit_holder ba(test_size);
212
213 mmbit_clear(ba, test_size);
214
215 ASSERT_FALSE(mmbit_any(ba, test_size));
216
217 for (u32 i = 0; i < test_size; i++) {
218 ASSERT_FALSE(mmbit_isset(ba, test_size, i));
219 mmbit_set(ba, test_size, i);
220 ASSERT_TRUE(mmbit_isset(ba, test_size, i));
221
222 ASSERT_EQ(i, mmbit_iterate(ba, test_size, MMB_INVALID));
223 ASSERT_EQ(MMB_INVALID, mmbit_iterate(ba, test_size, i));
224
225 mmbit_unset(ba, test_size, i);
226 ASSERT_FALSE(mmbit_isset(ba, test_size, i));
227 }
228 }
229
TEST(MultiBit,Set16)230 TEST(MultiBit, Set16) {
231 const u32 test_size = 16;
232 mmbit_holder ba(test_size);
233
234 mmbit_clear(ba, test_size);
235
236 ASSERT_FALSE(mmbit_any(ba, test_size));
237
238 for (u32 i = 0; i < test_size; i++) {
239 ASSERT_FALSE(mmbit_isset(ba, test_size, i));
240 mmbit_set(ba, test_size, i);
241 ASSERT_TRUE(mmbit_isset(ba, test_size, i));
242
243 ASSERT_EQ(i, mmbit_iterate(ba, test_size, MMB_INVALID));
244 ASSERT_EQ(MMB_INVALID, mmbit_iterate(ba, test_size, i));
245
246 mmbit_unset(ba, test_size, i);
247 ASSERT_FALSE(mmbit_isset(ba, test_size, i));
248 }
249 }
250
251
TEST(MultiBit,It1)252 TEST(MultiBit, It1) {
253 const u32 test_size = 32;
254 mmbit_holder ba(test_size);
255
256 mmbit_clear(ba, test_size);
257
258 ASSERT_EQ(MMB_INVALID, mmbit_iterate(ba, test_size, MMB_INVALID));
259
260 for (u32 i = 0; i < test_size; i+=3) {
261 mmbit_set(ba, test_size, i);
262 }
263
264 ASSERT_EQ(0U, mmbit_iterate(ba, test_size, MMB_INVALID));
265 for (u32 i = 0; i < 29; i++) {
266 ASSERT_EQ(i/3 * 3 + 3, mmbit_iterate(ba, test_size, i));
267 }
268
269 for (u32 i = 0; i < test_size; i++) {
270 mmbit_set(ba, test_size, i);
271 }
272
273 ASSERT_EQ(0U, mmbit_iterate(ba, test_size, MMB_INVALID));
274 for (u32 i = 1; i < test_size; i++) {
275 ASSERT_EQ(i, mmbit_iterate(ba, test_size, i - 1));
276 }
277 }
278
TEST(MultiBit,It2)279 TEST(MultiBit, It2) {
280 mmbit_holder ba(64);
281
282 mmbit_clear(ba, 64);
283
284 ASSERT_EQ(MMB_INVALID, mmbit_iterate(ba, 64, MMB_INVALID));
285
286 for (u32 i = 0; i < 64; i+=3) {
287 mmbit_set(ba, 64, i);
288 }
289
290 ASSERT_EQ(0U, mmbit_iterate(ba, 64, MMB_INVALID));
291 for (u32 i = 0; i < 62; i++) {
292 ASSERT_EQ(i/3 * 3 + 3, mmbit_iterate(ba, 64, i));
293 }
294
295 for (u32 i = 0; i < 64; i++) {
296 mmbit_set(ba, 64, i);
297 }
298
299 ASSERT_EQ(0U, mmbit_iterate(ba, 64, MMB_INVALID));
300 for (u32 i = 1; i < 64; i++) {
301 ASSERT_EQ(i, mmbit_iterate(ba, 64, i - 1));
302 }
303 }
304
TEST(MultiBit,It3)305 TEST(MultiBit, It3) {
306 const size_t test_size = 60;
307 mmbit_holder ba(test_size, 4);
308
309 fill_n((u8 *)ba, mmbit_size(test_size) + 4, 0xff);
310
311 mmbit_clear(ba, test_size);
312
313 ASSERT_EQ(MMB_INVALID, mmbit_iterate(ba, test_size, MMB_INVALID));
314
315 for (u32 i = 0; i < test_size; i+=3) {
316 mmbit_set(ba, test_size, i);
317 }
318
319 ASSERT_EQ(0U, mmbit_iterate(ba, test_size, MMB_INVALID));
320 for (u32 i = 0; i < test_size - 3; i++) {
321 ASSERT_EQ(i/3 * 3 + 3, mmbit_iterate(ba, test_size, i));
322 }
323
324 for (u32 i = 0; i < test_size; i++) {
325 mmbit_set(ba, test_size, i);
326 }
327
328 ASSERT_EQ(0U, mmbit_iterate(ba, test_size, MMB_INVALID));
329 for (u32 i = 1; i < test_size; i++) {
330 ASSERT_EQ(i, mmbit_iterate(ba, test_size, i - 1));
331 }
332 }
333
334 // We provide both test size and stride so that larger tests don't take forever
335 // checking every single key.
336 struct MultiBitTestParam {
337 u32 size;
338 u32 stride;
339 };
340
341 // Parameterized test case for bounded iterator, rather that propagating
342 // copypasta. Allocates space as given.
343 class MultiBitTest : public TestWithParam<MultiBitTestParam> {
344 protected:
SetUp()345 virtual void SetUp() {
346 const MultiBitTestParam &p = GetParam();
347 test_size = p.size;
348 stride = p.stride;
349 ba.init(test_size);
350 // blast with ones for the lulz
351 fill_mmbit(ba, test_size);
352 }
353
TearDown()354 virtual void TearDown() {
355 }
356
357 u32 test_size; // number of bits in the multibit
358 u32 stride; // stride to use for scans
359 mmbit_holder ba; // multibit storage
360 };
361
TEST_P(MultiBitTest,BoundedIteratorSingle)362 TEST_P(MultiBitTest, BoundedIteratorSingle) {
363 SCOPED_TRACE(test_size);
364 ASSERT_TRUE(ba != nullptr);
365
366 // Set one bit on and run some checks.
367 for (u64a i = 0; i < test_size; i += stride) {
368 SCOPED_TRACE(i);
369
370 mmbit_clear(ba, test_size);
371 mmbit_set(ba, test_size, i);
372
373 // Scanning from the start to our bit should find nothing.
374 ASSERT_EQ(MMB_INVALID, mmbit_iterate_bounded(ba, test_size, i, i));
375
376 // Scanning from the start to the end should find our bit.
377 ASSERT_EQ(i, mmbit_iterate_bounded(ba, test_size, 0, test_size));
378
379 // Scanning from our bit to one past it should find itself.
380 ASSERT_EQ(i, mmbit_iterate_bounded(ba, test_size, i, i + 1));
381
382 // Scanning from our bit to the end should find itself.
383 ASSERT_EQ(i, mmbit_iterate_bounded(ba, test_size, i, test_size));
384
385 // Scanning from one past our bit to the end should find nothing.
386 if (i != test_size - 1) {
387 // Ordinary iterator.
388 ASSERT_EQ(MMB_INVALID, mmbit_iterate(ba, test_size, i));
389
390 // Bounded iterator.
391 ASSERT_EQ(MMB_INVALID,
392 mmbit_iterate_bounded(ba, test_size, i + 1, test_size));
393 }
394 }
395 }
396
TEST_P(MultiBitTest,BoundedIteratorAll)397 TEST_P(MultiBitTest, BoundedIteratorAll) {
398 SCOPED_TRACE(test_size);
399 ASSERT_TRUE(ba != nullptr);
400
401 // Switch everything on.
402 fill_mmbit(ba, test_size);
403
404 for (u64a i = 0; i < test_size; i += stride) {
405 if (i != 0) {
406 ASSERT_EQ(0U, mmbit_iterate_bounded(ba, test_size, 0, i));
407 }
408 ASSERT_EQ(i, mmbit_iterate_bounded(ba, test_size, i, i+1));
409 ASSERT_EQ(i, mmbit_iterate_bounded(ba, test_size, i, test_size));
410 }
411 }
412
TEST_P(MultiBitTest,BoundedIteratorEven)413 TEST_P(MultiBitTest, BoundedIteratorEven) {
414 SCOPED_TRACE(test_size);
415 ASSERT_TRUE(ba != nullptr);
416
417 // Set every even-numbered bit and see what we can see.
418 mmbit_clear(ba, test_size);
419 for (u64a i = 0; i < test_size; i += 2) {
420 mmbit_set(ba, test_size, i);
421 }
422
423 u32 even_stride = stride % 2 ? stride + 1 : stride;
424
425 for (u64a i = 0; i < test_size; i += even_stride) {
426 // Scanning from each even bit to the end should find itself.
427 ASSERT_EQ(i, mmbit_iterate_bounded(ba, test_size, i, test_size));
428
429 // Scanning from i to i+1 should find itself.
430 ASSERT_EQ(i, mmbit_iterate_bounded(ba, test_size, i, i+1));
431
432 // Scanning from i+1 to i+2 should find nothing.
433 if (i+1 < test_size) {
434 ASSERT_EQ(MMB_INVALID, mmbit_iterate_bounded(ba, test_size, i+1, i+2));
435 }
436
437 // Scanning from i+1 to the end should find i+2.
438 if (i+2 < test_size) {
439 ASSERT_EQ(i+2, mmbit_iterate_bounded(ba, test_size, i+1, test_size));
440 }
441 }
442 }
443
TEST_P(MultiBitTest,BoundedIteratorOdd)444 TEST_P(MultiBitTest, BoundedIteratorOdd) {
445 SCOPED_TRACE(test_size);
446 ASSERT_TRUE(ba != nullptr);
447
448 // Set every odd-numbered bit and see what we can see.
449 mmbit_clear(ba, test_size);
450 for (u64a i = 1; i < test_size; i += 2) {
451 mmbit_set(ba, test_size, i);
452 }
453
454 u32 even_stride = stride % 2 ? stride + 1 : stride;
455
456 for (u64a i = 0; i < test_size; i += even_stride) {
457 // Scanning from each even bit to the end should find i+1.
458 if (i+1 < test_size) {
459 ASSERT_EQ(i+1, mmbit_iterate_bounded(ba, test_size, i, test_size));
460 }
461
462 // Scanning from i to i+1 should find nothing.
463 ASSERT_EQ(MMB_INVALID, mmbit_iterate_bounded(ba, test_size, i, i+1));
464
465 // Scanning from i+1 to i+2 should find i+1.
466 if (i+1 < test_size) {
467 ASSERT_EQ(i+1, mmbit_iterate_bounded(ba, test_size, i+1, i+2));
468 }
469
470 // Scanning from i+1 to the end should find i+1.
471 if (i+1 < test_size) {
472 ASSERT_EQ(i+1, mmbit_iterate_bounded(ba, test_size, i+1, test_size));
473 }
474 }
475 }
476
TEST_P(MultiBitTest,Set)477 TEST_P(MultiBitTest, Set) {
478 SCOPED_TRACE(test_size);
479 ASSERT_TRUE(ba != nullptr);
480
481 mmbit_clear(ba, test_size);
482 ASSERT_FALSE(mmbit_any(ba, test_size));
483
484 for (u64a i = 0; i < test_size; i += stride) {
485 SCOPED_TRACE(i);
486
487 // set a bit that wasn't set before
488 ASSERT_FALSE(mmbit_isset(ba, test_size, i));
489 ASSERT_FALSE(mmbit_set(ba, test_size, i));
490 ASSERT_TRUE(mmbit_isset(ba, test_size, i));
491
492 // set it again, should return true
493 ASSERT_TRUE(mmbit_set(ba, test_size, i));
494
495 // clear it and make sure it's gone
496 mmbit_unset(ba, test_size, i);
497 ASSERT_FALSE(mmbit_isset(ba, test_size, i));
498
499 // set it again, should return false
500 ASSERT_FALSE(mmbit_set(ba, test_size, i));
501 }
502 }
503
TEST_P(MultiBitTest,Iter)504 TEST_P(MultiBitTest, Iter) {
505 SCOPED_TRACE(test_size);
506 ASSERT_TRUE(ba != nullptr);
507
508 mmbit_clear(ba, test_size);
509 ASSERT_EQ(MMB_INVALID, mmbit_iterate(ba, test_size, MMB_INVALID));
510
511 for (u64a i = 0; i < test_size; i += stride) {
512 SCOPED_TRACE(i);
513 mmbit_clear(ba, test_size);
514 mmbit_set(ba, test_size, i);
515 ASSERT_EQ(i, mmbit_iterate(ba, test_size, MMB_INVALID));
516 ASSERT_EQ(MMB_INVALID, mmbit_iterate(ba, test_size, i));
517 }
518 }
519
TEST_P(MultiBitTest,IterAll)520 TEST_P(MultiBitTest, IterAll) {
521 SCOPED_TRACE(test_size);
522 ASSERT_TRUE(ba != nullptr);
523
524 mmbit_clear(ba, test_size);
525 ASSERT_EQ(MMB_INVALID, mmbit_iterate(ba, test_size, MMB_INVALID));
526
527 // Set all bits.
528 for (u64a i = 0; i < test_size; i += stride) {
529 mmbit_set(ba, test_size, i);
530 }
531
532 // Find all bits.
533 u32 it = MMB_INVALID;
534 for (u64a i = 0; i < test_size; i += stride) {
535 ASSERT_EQ(i, mmbit_iterate(ba, test_size, it));
536 it = i;
537 }
538 }
539
TEST_P(MultiBitTest,AnyPrecise)540 TEST_P(MultiBitTest, AnyPrecise) {
541 SCOPED_TRACE(test_size);
542 ASSERT_TRUE(ba != nullptr);
543
544 mmbit_clear(ba, test_size);
545 ASSERT_FALSE(mmbit_any_precise(ba, test_size));
546
547 for (u64a i = 0; i < test_size; i += stride) {
548 SCOPED_TRACE(i);
549 mmbit_clear(ba, test_size);
550 mmbit_set(ba, test_size, i);
551 ASSERT_TRUE(mmbit_any_precise(ba, test_size));
552 }
553 }
554
TEST_P(MultiBitTest,Any)555 TEST_P(MultiBitTest, Any) {
556 SCOPED_TRACE(test_size);
557 ASSERT_TRUE(ba != nullptr);
558
559 mmbit_clear(ba, test_size);
560 ASSERT_FALSE(mmbit_any(ba, test_size));
561
562 for (u64a i = 0; i < test_size; i += stride) {
563 SCOPED_TRACE(i);
564 mmbit_clear(ba, test_size);
565 mmbit_set(ba, test_size, i);
566 ASSERT_TRUE(mmbit_any(ba, test_size));
567 }
568 }
569
TEST_P(MultiBitTest,All)570 TEST_P(MultiBitTest, All) {
571 SCOPED_TRACE(test_size);
572 ASSERT_TRUE(ba != nullptr);
573
574 mmbit_clear(ba, test_size);
575 ASSERT_FALSE(mmbit_all(ba, test_size));
576
577 for (u64a i = 0; i < test_size - 1; i += stride) {
578 SCOPED_TRACE(i);
579 mmbit_set(ba, test_size, i);
580 ASSERT_FALSE(mmbit_all(ba, test_size));
581 }
582
583 // Set all bits.
584 fill_mmbit(ba, test_size);
585 ASSERT_TRUE(mmbit_all(ba, test_size));
586 }
587
TEST_P(MultiBitTest,UnsetRange1)588 TEST_P(MultiBitTest, UnsetRange1) {
589 SCOPED_TRACE(test_size);
590 ASSERT_TRUE(ba != nullptr);
591
592 // Set all bits.
593 fill_mmbit(ba, test_size);
594
595 // Use mmbit_unset_range to switch off any single bit.
596 for (u64a i = 0; i < test_size; i += stride) {
597 SCOPED_TRACE(i);
598 ASSERT_TRUE(mmbit_isset(ba, test_size, i));
599 mmbit_unset_range(ba, test_size, i, i + 1);
600 ASSERT_FALSE(mmbit_isset(ba, test_size, i));
601 }
602 }
603
TEST_P(MultiBitTest,UnsetRange2)604 TEST_P(MultiBitTest, UnsetRange2) {
605 SCOPED_TRACE(test_size);
606 ASSERT_TRUE(ba != nullptr);
607
608 // XXX: slow on large cases, so we skip those.
609 if (stride > 1) {
610 return;
611 }
612
613 // Set all bits.
614 fill_mmbit(ba, test_size);
615
616 // Use mmbit_unset_range to switch off all bits.
617 mmbit_unset_range(ba, test_size, 0, test_size);
618
619 for (u64a i = 0; i < test_size; i += stride) {
620 SCOPED_TRACE(i);
621 ASSERT_FALSE(mmbit_isset(ba, test_size, i));
622 }
623 }
624
TEST_P(MultiBitTest,UnsetRange3)625 TEST_P(MultiBitTest, UnsetRange3) {
626 SCOPED_TRACE(test_size);
627 ASSERT_TRUE(ba != nullptr);
628
629 // Use mmbit_unset_range to switch off bits in chunks of 3.
630 for (u64a i = 0; i < test_size - 3; i += stride) {
631 // Switch on the bit before, the bits in question, and the bit after.
632 if (i > 0) {
633 mmbit_set(ba, test_size, i - 1);
634 }
635 for (u64a j = i; j < min(i + 4, (u64a)test_size); j++) {
636 mmbit_set(ba, test_size, j);
637 }
638
639 // Switch off the chunk.
640 mmbit_unset_range(ba, test_size, i, i + 3);
641
642 ASSERT_FALSE(mmbit_isset(ba, test_size, i));
643 ASSERT_FALSE(mmbit_isset(ba, test_size, i + 1));
644 ASSERT_FALSE(mmbit_isset(ba, test_size, i + 2));
645
646 // Bits on either side should be intact.
647 if (i > 0) {
648 ASSERT_TRUE(mmbit_isset(ba, test_size, i - 1));
649 }
650 if (i + 3 < test_size) {
651 ASSERT_TRUE(mmbit_isset(ba, test_size, i + 3));
652 }
653 }
654 }
655
TEST_P(MultiBitTest,InitRangeAll)656 TEST_P(MultiBitTest, InitRangeAll) {
657 SCOPED_TRACE(test_size);
658 ASSERT_TRUE(ba != nullptr);
659
660 // Use mmbit_init_range to set all the bits.
661 mmbit_init_range(ba, test_size, 0, test_size);
662
663 // Make sure they're all set.
664 for (u64a i = 0; i < test_size; i += stride) {
665 SCOPED_TRACE(i);
666 ASSERT_TRUE(mmbit_isset(ba, test_size, i));
667 }
668 }
669
TEST_P(MultiBitTest,InitRangeNone)670 TEST_P(MultiBitTest, InitRangeNone) {
671 SCOPED_TRACE(test_size);
672 ASSERT_TRUE(ba != nullptr);
673
674 // Use mmbit_set_range to set all the bits.
675 mmbit_init_range(ba, test_size, 0, 0);
676
677 // Make sure nothing's set.
678 ASSERT_EQ(MMB_INVALID, mmbit_iterate(ba, test_size, MMB_INVALID));
679 }
680
TEST_P(MultiBitTest,InitRangeOne)681 TEST_P(MultiBitTest, InitRangeOne) {
682 SCOPED_TRACE(test_size);
683 ASSERT_TRUE(ba != nullptr);
684
685 for (u64a i = 0; i < test_size; i += stride) {
686 mmbit_init_range(ba, test_size, i, i + 1);
687
688 // Only bit 'i' should be on.
689 ASSERT_EQ(i, mmbit_iterate(ba, test_size, MMB_INVALID));
690 ASSERT_EQ(MMB_INVALID, mmbit_iterate(ba, test_size, i));
691 }
692 }
693
TEST_P(MultiBitTest,InitRangeChunked)694 TEST_P(MultiBitTest, InitRangeChunked) {
695 SCOPED_TRACE(test_size);
696 ASSERT_TRUE(ba != nullptr);
697
698 // Init ranges chunk by chunk.
699
700 for (u32 n = 2; n <= 10; n++) {
701 u32 chunk_size = test_size / n;
702 if (chunk_size == 0) {
703 break;
704 }
705
706 for (u32 k = 0; k < n; k++) {
707 u32 chunk_begin = k * chunk_size;
708 u32 chunk_end = min(test_size, (k + 1) * chunk_size);
709
710 mmbit_init_range(ba, test_size, chunk_begin, chunk_end);
711
712 // First bit set should be chunk_begin.
713 ASSERT_EQ(chunk_begin, mmbit_iterate(ba, test_size, MMB_INVALID));
714
715 // All bits in the chunk should be on.
716 for (u64a i = chunk_begin; i < chunk_end; i += stride) {
717 SCOPED_TRACE(i);
718 ASSERT_TRUE(mmbit_isset(ba, test_size, i));
719 }
720
721 // Last bit on is chunk_end - 1.
722 if (chunk_end) {
723 ASSERT_EQ(MMB_INVALID, mmbit_iterate(ba, test_size, chunk_end - 1));
724 }
725 }
726 }
727 }
728
729 static
apply(const scatter_plan_raw & sp,u8 * out)730 void apply(const scatter_plan_raw &sp, u8 *out) {
731 for (const auto &e : sp.p_u64a) {
732 memcpy(out + e.offset, &e.val, sizeof(e.val));
733 }
734 for (const auto &e : sp.p_u32) {
735 memcpy(out + e.offset, &e.val, sizeof(e.val));
736 }
737 for (const auto &e : sp.p_u16) {
738 memcpy(out + e.offset, &e.val, sizeof(e.val));
739 }
740 for (const auto &e : sp.p_u8) {
741 memcpy(out + e.offset, &e.val, sizeof(e.val));
742 }
743 }
744
TEST_P(MultiBitTest,InitRangePlanChunked)745 TEST_P(MultiBitTest, InitRangePlanChunked) {
746 SCOPED_TRACE(test_size);
747 ASSERT_TRUE(ba != nullptr);
748
749 // Init ranges chunk by chunk.
750
751 for (u32 n = 2; n <= 10; n++) {
752 u32 chunk_size = test_size / n;
753 if (chunk_size == 0) {
754 break;
755 }
756
757 for (u32 k = 0; k < n; k++) {
758 u32 chunk_begin = k * chunk_size;
759 u32 chunk_end = min(test_size, (k + 1) * chunk_size);
760
761 scatter_plan_raw sp;
762 mmbBuildInitRangePlan(test_size, chunk_begin, chunk_end, &sp);
763 memset(ba, 0xaa, mmbit_size(test_size));
764 apply(sp, ba);
765
766 // First bit set should be chunk_begin.
767 ASSERT_EQ(chunk_begin, mmbit_iterate(ba, test_size, MMB_INVALID));
768
769 // All bits in the chunk should be on.
770 for (u64a i = chunk_begin; i < chunk_end; i += stride) {
771 SCOPED_TRACE(i);
772 ASSERT_TRUE(mmbit_isset(ba, test_size, i));
773 }
774
775 // Last bit on is chunk_end - 1.
776 if (chunk_end) {
777 ASSERT_EQ(MMB_INVALID, mmbit_iterate(ba, test_size, chunk_end - 1));
778 }
779 }
780 }
781 }
782
TEST(MultiBit,SparseIteratorBegin1)783 TEST(MultiBit, SparseIteratorBegin1) {
784 const u32 test_size = 100;
785 vector<u32> bits;
786
787 bits.push_back(1);
788 bits.push_back(5);
789 bits.push_back(27);
790 bits.push_back(35);
791 bits.push_back(68);
792
793 auto it = mmbBuildSparseIterator(bits, test_size);
794 //ASSERT_EQ(4U, it.size());
795
796 // Trivial initial test: all bits in 'bits' are on, all others are off
797 mmbit_holder ba(test_size);
798 mmbit_clear(ba, test_size);
799 for (size_t i = 0; i < bits.size(); i++) {
800 mmbit_set(ba, test_size, bits[i]);
801 }
802
803 vector<mmbit_sparse_state> state(mmbit_sparse_iter_state_size(test_size));
804
805 for (u32 i = 0; i < bits.size(); i++) {
806 u32 val, idx = 0xffffffff;
807 val = mmbit_sparse_iter_begin(ba, test_size, &idx, &it[0], &state[0]);
808 ASSERT_EQ(bits[i], val);
809 ASSERT_EQ(i, idx);
810 mmbit_unset(ba, test_size, bits[i]);
811 ASSERT_FALSE(mmbit_isset(ba, test_size, bits[i]));
812 }
813
814 // All clear, begin should be invalid
815 u32 val, idx = 0xffffffff;
816 val = mmbit_sparse_iter_begin(ba, test_size, &idx, &it[0], &state[0]);
817 ASSERT_EQ(MMB_INVALID, val);
818 }
819
TEST(MultiBit,SparseIteratorBegin2)820 TEST(MultiBit, SparseIteratorBegin2) {
821 const u32 test_size = 40000;
822 vector<u32> bits;
823
824 bits.push_back(1);
825 bits.push_back(256);
826 bits.push_back(257);
827 bits.push_back(1024);
828 bits.push_back(8920);
829 bits.push_back(37000);
830
831 auto it = mmbBuildSparseIterator(bits, test_size);
832 //ASSERT_EQ(12U, it.size());
833
834 // Trivial initial test: all bits in 'bits' are on, all others are off
835 mmbit_holder ba(test_size);
836 mmbit_clear(ba, test_size);
837 for (size_t i = 0; i < bits.size(); i++) {
838 mmbit_set(ba, test_size, bits[i]);
839 }
840
841 vector<mmbit_sparse_state> state(mmbit_sparse_iter_state_size(test_size));
842
843 for (u32 i = 0; i < bits.size(); i++) {
844 u32 val, idx = 0xffffffff;
845 val = mmbit_sparse_iter_begin(ba, test_size, &idx, &it[0], &state[0]);
846 ASSERT_EQ(bits[i], val);
847 ASSERT_EQ(i, idx);
848 mmbit_unset(ba, test_size, bits[i]);
849 ASSERT_FALSE(mmbit_isset(ba, test_size, bits[i]));
850 }
851
852 // All clear, begin should be invalid
853 u32 val, idx = 0xffffffff;
854 val = mmbit_sparse_iter_begin(ba, test_size, &idx, &it[0], &state[0]);
855 ASSERT_EQ(MMB_INVALID, val);
856 }
857
TEST(MultiBit,SparseIteratorNext1)858 TEST(MultiBit, SparseIteratorNext1) {
859 const u32 test_size = 100;
860 vector<u32> bits;
861
862 bits.push_back(1);
863 bits.push_back(5);
864 bits.push_back(27);
865 bits.push_back(35);
866 bits.push_back(68);
867
868 auto it = mmbBuildSparseIterator(bits, test_size);
869
870 // Trivial initial test: all bits in 'bits' are on, all others are off
871 mmbit_holder ba(test_size);
872 mmbit_clear(ba, test_size);
873 for (size_t i = 0; i < bits.size(); i++) {
874 mmbit_set(ba, test_size, bits[i]);
875 }
876
877 vector<mmbit_sparse_state> state(mmbit_sparse_iter_state_size(test_size));
878 u32 val, idx = 0xffffffff;
879 val = mmbit_sparse_iter_begin(ba, test_size, &idx, &it[0], &state[0]);
880 ASSERT_EQ(bits[0], val);
881 ASSERT_EQ(0U, idx);
882
883 for (u32 i = 1; i < bits.size(); i++) {
884 val = mmbit_sparse_iter_next(ba, test_size, val, &idx, &it[0], &state[0]);
885 ASSERT_EQ(bits[i], val);
886 ASSERT_EQ(i, idx);
887 }
888
889 // Same test with all bits on
890 for (size_t i = 0; i < test_size; i++) {
891 mmbit_set(ba, test_size, i);
892 }
893
894 val = mmbit_sparse_iter_begin(ba, test_size, &idx, &it[0], &state[0]);
895 ASSERT_EQ(bits[0], val);
896 ASSERT_EQ(0U, idx);
897
898 for (u32 i = 1; i < bits.size(); i++) {
899 val = mmbit_sparse_iter_next(ba, test_size, val, &idx, &it[0], &state[0]);
900 ASSERT_EQ(bits[i], val);
901 ASSERT_EQ(i, idx);
902 }
903
904 // Test with only the last half on
905 mmbit_clear(ba, test_size);
906 size_t last_half = bits.size()/2;
907 for (size_t i = last_half; i < bits.size(); i++) {
908 mmbit_set(ba, test_size, bits[i]);
909 }
910
911 val = mmbit_sparse_iter_begin(ba, test_size, &idx, &it[0], &state[0]);
912 ASSERT_EQ(bits[last_half], val);
913 ASSERT_EQ(last_half, idx);
914
915 for (u32 i = last_half + 1; i < bits.size(); i++) {
916 val = mmbit_sparse_iter_next(ba, test_size, val, &idx, &it[0], &state[0]);
917 ASSERT_EQ(bits[i], val);
918 ASSERT_EQ(i, idx);
919 }
920 }
921
TEST(MultiBit,SparseIteratorNext2)922 TEST(MultiBit, SparseIteratorNext2) {
923 const u32 test_size = 40000;
924 vector<u32> bits;
925
926 bits.push_back(1);
927 bits.push_back(256);
928 bits.push_back(257);
929 bits.push_back(1024);
930 bits.push_back(1025);
931 bits.push_back(4095);
932 bits.push_back(4096);
933 bits.push_back(4097);
934 bits.push_back(8920);
935 bits.push_back(37000);
936 bits.push_back(39999);
937
938 auto it = mmbBuildSparseIterator(bits, test_size);
939
940 // Trivial initial test: all bits in 'bits' are on, all others are off
941 mmbit_holder ba(test_size);
942 mmbit_clear(ba, test_size);
943 for (size_t i = 0; i < bits.size(); i++) {
944 mmbit_set(ba, test_size, bits[i]);
945 }
946
947 vector<mmbit_sparse_state> state(mmbit_sparse_iter_state_size(test_size));
948 u32 val, idx = 0xffffffff;
949 val = mmbit_sparse_iter_begin(ba, test_size, &idx, &it[0], &state[0]);
950 ASSERT_EQ(bits[0], val);
951 ASSERT_EQ(0U, idx);
952
953 for (u32 i = 1; i < bits.size(); i++) {
954 val = mmbit_sparse_iter_next(ba, test_size, val, &idx, &it[0], &state[0]);
955 ASSERT_EQ(bits[i], val);
956 ASSERT_EQ(i, idx);
957 }
958
959 // Same test with all bits on
960 for (size_t i = 0; i < test_size; i++) {
961 mmbit_set(ba, test_size, i);
962 }
963
964 val = mmbit_sparse_iter_begin(ba, test_size, &idx, &it[0], &state[0]);
965 ASSERT_EQ(bits[0], val);
966 ASSERT_EQ(0U, idx);
967
968 for (u32 i = 1; i < bits.size(); i++) {
969 val = mmbit_sparse_iter_next(ba, test_size, val, &idx, &it[0], &state[0]);
970 ASSERT_EQ(bits[i], val);
971 ASSERT_EQ(i, idx);
972 }
973
974 // Test with only the last half on
975 mmbit_clear(ba, test_size);
976 size_t last_half = bits.size()/2;
977 for (size_t i = last_half; i < bits.size(); i++) {
978 mmbit_set(ba, test_size, bits[i]);
979 }
980
981 val = mmbit_sparse_iter_begin(ba, test_size, &idx, &it[0], &state[0]);
982 ASSERT_EQ(bits[last_half], val);
983 ASSERT_EQ(last_half, idx);
984
985 for (u32 i = last_half + 1; i < bits.size(); i++) {
986 val = mmbit_sparse_iter_next(ba, test_size, val, &idx, &it[0], &state[0]);
987 ASSERT_EQ(bits[i], val);
988 ASSERT_EQ(i, idx);
989 }
990 }
991
TEST(MultiBit,SparseIteratorNextSmall)992 TEST(MultiBit, SparseIteratorNextSmall) {
993 const u32 test_size = 15;
994 vector<u32> bits;
995
996 bits.push_back(1);
997 bits.push_back(3);
998 bits.push_back(6);
999 bits.push_back(7);
1000 bits.push_back(12);
1001 bits.push_back(14);
1002
1003 auto it = mmbBuildSparseIterator(bits, test_size);
1004
1005 // Trivial initial test: all bits in 'bits' are on, all others are off
1006 mmbit_holder ba(test_size);
1007 mmbit_clear(ba, test_size);
1008 for (size_t i = 0; i < bits.size(); i++) {
1009 mmbit_set(ba, test_size, bits[i]);
1010 }
1011
1012 vector<mmbit_sparse_state> state(mmbit_sparse_iter_state_size(test_size));
1013 u32 val, idx = 0xffffffff;
1014 val = mmbit_sparse_iter_begin(ba, test_size, &idx, &it[0], &state[0]);
1015 ASSERT_EQ(bits[0], val);
1016 ASSERT_EQ(0U, idx);
1017
1018 for (u32 i = 1; i < bits.size(); i++) {
1019 val = mmbit_sparse_iter_next(ba, test_size, val, &idx, &it[0], &state[0]);
1020 ASSERT_EQ(bits[i], val);
1021 ASSERT_EQ(i, idx);
1022 }
1023
1024 // Same test with all bits on
1025 for (size_t i = 0; i < test_size; i++) {
1026 mmbit_set(ba, test_size, i);
1027 }
1028
1029 val = mmbit_sparse_iter_begin(ba, test_size, &idx, &it[0], &state[0]);
1030 ASSERT_EQ(bits[0], val);
1031 ASSERT_EQ(0U, idx);
1032
1033 for (u32 i = 1; i < bits.size(); i++) {
1034 val = mmbit_sparse_iter_next(ba, test_size, val, &idx, &it[0], &state[0]);
1035 ASSERT_EQ(bits[i], val);
1036 ASSERT_EQ(i, idx);
1037 }
1038
1039 // Test with only the last half on
1040 mmbit_clear(ba, test_size);
1041 size_t last_half = bits.size()/2;
1042 for (size_t i = last_half; i < bits.size(); i++) {
1043 mmbit_set(ba, test_size, bits[i]);
1044 }
1045
1046 val = mmbit_sparse_iter_begin(ba, test_size, &idx, &it[0], &state[0]);
1047 ASSERT_EQ(bits[last_half], val);
1048 ASSERT_EQ(last_half, idx);
1049
1050 for (u32 i = last_half + 1; i < bits.size(); i++) {
1051 val = mmbit_sparse_iter_next(ba, test_size, val, &idx, &it[0], &state[0]);
1052 ASSERT_EQ(bits[i], val);
1053 ASSERT_EQ(i, idx);
1054 }
1055 }
1056
TEST_P(MultiBitTest,SparseIteratorBeginAll)1057 TEST_P(MultiBitTest, SparseIteratorBeginAll) {
1058 SCOPED_TRACE(test_size);
1059 ASSERT_TRUE(ba != nullptr);
1060
1061 // Put all our bits into the sparse iterator.
1062 vector<u32> bits;
1063 bits.reserve(test_size / stride);
1064 for (u64a i = 0; i < test_size; i += stride) {
1065 bits.push_back(i);
1066 }
1067 auto it = mmbBuildSparseIterator(bits, test_size);
1068
1069 // Switch all bits on in state.
1070 mmbit_clear(ba, test_size);
1071 ASSERT_FALSE(mmbit_any(ba, test_size));
1072 fill_mmbit(ba, test_size);
1073
1074 // Iterate over all bits; clear the first one and ensure that the iterator
1075 // reports the next bit as the start bit.
1076 vector<mmbit_sparse_state> state(mmbit_sparse_iter_state_size(test_size));
1077 u32 val, idx = 0xffffffff;
1078
1079 for (size_t i = 0, num_keys = bits.size(); i < num_keys; i++) {
1080 val = mmbit_sparse_iter_begin(ba, test_size, &idx, &it[0], &state[0]);
1081 ASSERT_EQ(bits[i], val);
1082 ASSERT_EQ(i, idx);
1083 mmbit_unset(ba, test_size, bits[i]);
1084 }
1085
1086 // Cleared all bits, iterator should return invalid.
1087 val = mmbit_sparse_iter_begin(ba, test_size, &idx, &it[0], &state[0]);
1088 ASSERT_EQ(MMB_INVALID, val);
1089 }
1090
TEST_P(MultiBitTest,SparseIteratorBeginThirds)1091 TEST_P(MultiBitTest, SparseIteratorBeginThirds) {
1092 SCOPED_TRACE(test_size);
1093 ASSERT_TRUE(ba != nullptr);
1094
1095 // XXX: only run on stride=1 cases for speed.
1096 if (stride > 1) {
1097 return;
1098 }
1099
1100 // Put all our bits into the sparse iterator
1101 vector<u32> bits(test_size);
1102 for (u32 i = 0; i != test_size; i++) {
1103 bits[i] = i;
1104 }
1105 auto it = mmbBuildSparseIterator(bits, test_size);
1106
1107 // Switch every third bits on in state
1108 mmbit_clear(ba, test_size);
1109 ASSERT_FALSE(mmbit_any(ba, test_size));
1110 for (u64a i = 0; i < test_size; i += 3) {
1111 mmbit_set(ba, test_size, i);
1112 }
1113
1114 // Iterate over all bits; clear the first on bit and ensure that the
1115 // iterator reports the next on bit as the start bit.
1116 vector<mmbit_sparse_state> state(mmbit_sparse_iter_state_size(test_size));
1117 u32 val, idx = 0xffffffff;
1118 val = mmbit_sparse_iter_begin(ba, test_size, &idx, &it[0], &state[0]);
1119 ASSERT_EQ(0U, val);
1120 ASSERT_EQ(0U, idx);
1121
1122 for (u64a i = 0; i < test_size - 3; i += 3) {
1123 mmbit_unset(ba, test_size, i);
1124 val = mmbit_sparse_iter_begin(ba, test_size, &idx, &it[0], &state[0]);
1125 ASSERT_EQ(i+3, val);
1126 ASSERT_EQ(i+3, idx);
1127 }
1128 }
1129
TEST_P(MultiBitTest,SparseIteratorNextAll)1130 TEST_P(MultiBitTest, SparseIteratorNextAll) {
1131 SCOPED_TRACE(test_size);
1132 ASSERT_TRUE(ba != nullptr);
1133
1134 // Put all our bits into the sparse iterator.
1135 vector<u32> bits;
1136 bits.reserve(test_size / stride);
1137 for (u64a i = 0; i < test_size; i += stride) {
1138 bits.push_back(i);
1139 }
1140 auto it = mmbBuildSparseIterator(bits, test_size);
1141
1142 // Switch all bits on in state
1143 mmbit_clear(ba, test_size);
1144 ASSERT_FALSE(mmbit_any(ba, test_size));
1145 fill_mmbit(ba, test_size);
1146
1147 // Iterate over all bits.
1148 vector<mmbit_sparse_state> state(mmbit_sparse_iter_state_size(test_size));
1149 u32 val, idx = 0xffffffff;
1150 val = mmbit_sparse_iter_begin(ba, test_size, &idx, &it[0], &state[0]);
1151 ASSERT_EQ(0U, val);
1152 ASSERT_EQ(0U, idx);
1153
1154 for (u32 i = 1, num_keys = bits.size(); i < num_keys; i++) {
1155 val = mmbit_sparse_iter_next(ba, test_size, val, &idx, &it[0], &state[0]);
1156 ASSERT_EQ(bits[i], val);
1157 ASSERT_EQ(i, idx);
1158 }
1159
1160 // All bits tested, we should now return invalid
1161 val = mmbit_sparse_iter_next(ba, test_size, bits.back(), &idx, &it[0], &state[0]);
1162 ASSERT_EQ(MMB_INVALID, val);
1163 }
1164
TEST_P(MultiBitTest,SparseIteratorNextExactStrided)1165 TEST_P(MultiBitTest, SparseIteratorNextExactStrided) {
1166 SCOPED_TRACE(test_size);
1167 ASSERT_TRUE(ba != nullptr);
1168
1169 if (stride == 1) {
1170 // This test is the same as SparseIteratorNextAll for stride 1
1171 return;
1172 }
1173
1174 // Put all our bits into the sparse iterator and switch them on in the
1175 // state.
1176 mmbit_clear(ba, test_size);
1177 vector<u32> bits;
1178 bits.reserve(test_size / stride);
1179 for (u64a i = 0; i < test_size; i += stride) {
1180 bits.push_back(i);
1181 mmbit_set(ba, test_size, i);
1182 }
1183 auto it = mmbBuildSparseIterator(bits, test_size);
1184
1185 // Iterate over all bits.
1186 vector<mmbit_sparse_state> state(mmbit_sparse_iter_state_size(test_size));
1187 u32 val, idx = 0xffffffff;
1188 val = mmbit_sparse_iter_begin(ba, test_size, &idx, &it[0], &state[0]);
1189 ASSERT_EQ(0U, val);
1190 ASSERT_EQ(0U, idx);
1191
1192 for (u32 i = 1, num_keys = bits.size(); i < num_keys; i++) {
1193 val = mmbit_sparse_iter_next(ba, test_size, val, &idx, &it[0], &state[0]);
1194 ASSERT_EQ(bits[i], val);
1195 ASSERT_EQ(i, idx);
1196 }
1197
1198 // All bits tested, we should now return invalid
1199 val = mmbit_sparse_iter_next(ba, test_size, bits.back(), &idx, &it[0], &state[0]);
1200 ASSERT_EQ(MMB_INVALID, val);
1201 }
1202
TEST_P(MultiBitTest,SparseIteratorNextNone)1203 TEST_P(MultiBitTest, SparseIteratorNextNone) {
1204 SCOPED_TRACE(test_size);
1205 ASSERT_TRUE(ba != nullptr);
1206
1207 // Put all our bits into the sparse iterator.
1208 vector<u32> bits;
1209 bits.reserve(test_size / stride);
1210 for (u64a i = 0; i < test_size; i += stride) {
1211 bits.push_back(i);
1212 }
1213 auto it = mmbBuildSparseIterator(bits, test_size);
1214
1215 // Switch only the first bit on
1216 mmbit_clear(ba, test_size);
1217 mmbit_set(ba, test_size, 0);
1218
1219 // Begin should find first bit
1220 vector<mmbit_sparse_state> state(mmbit_sparse_iter_state_size(test_size));
1221 u32 val, idx = 0xffffffff;
1222 val = mmbit_sparse_iter_begin(ba, test_size, &idx, &it[0], &state[0]);
1223 ASSERT_EQ(0U, val);
1224 ASSERT_EQ(0U, idx);
1225
1226 // Next should return invalid
1227 val = mmbit_sparse_iter_next(ba, test_size, val, &idx, &it[0], &state[0]);
1228 ASSERT_EQ(MMB_INVALID, val);
1229 }
1230
TEST_P(MultiBitTest,SparseIteratorUnsetAll)1231 TEST_P(MultiBitTest, SparseIteratorUnsetAll) {
1232 SCOPED_TRACE(test_size);
1233 ASSERT_TRUE(ba != nullptr);
1234
1235 // Put all our bits into the sparse iterator
1236 vector<u32> bits;
1237 bits.reserve(test_size / stride);
1238 for (u64a i = 0; i < test_size; i += stride) {
1239 bits.push_back(i);
1240 }
1241 auto it = mmbBuildSparseIterator(bits, test_size);
1242
1243 // Switch all bits on
1244 mmbit_clear(ba, test_size);
1245 fill_mmbit(ba, test_size);
1246
1247 // Run iterator unset, which should clear all bits
1248 struct mmbit_sparse_state sparse_iter_state[MAX_SPARSE_ITER_STATES];
1249 mmbit_sparse_iter_unset(ba, test_size, &it[0], sparse_iter_state);
1250
1251 // All bits should now be off
1252 for (u32 i = 0, num_keys = bits.size(); i < num_keys; i++) {
1253 ASSERT_FALSE(mmbit_isset(ba, test_size, bits[i]));
1254 }
1255 }
1256
TEST_P(MultiBitTest,SparseIteratorUnsetHalves)1257 TEST_P(MultiBitTest, SparseIteratorUnsetHalves) {
1258 SCOPED_TRACE(test_size);
1259 ASSERT_TRUE(ba != nullptr);
1260
1261 // TODO: write useful striding version of this test.
1262 if (stride > 1) {
1263 return;
1264 }
1265
1266 // Two sparse iterators: one for even bits, one for odd ones
1267 vector<u32> even, odd;
1268 for (u64a i = 0; i < test_size; i += 2) {
1269 even.push_back(i);
1270 }
1271 for (u64a i = 1; i < test_size; i += 2) {
1272 odd.push_back(i);
1273 }
1274
1275 auto it_even = mmbBuildSparseIterator(even, test_size);
1276 auto it_odd = mmbBuildSparseIterator(odd, test_size);
1277
1278 // Switch all bits on
1279 mmbit_clear(ba, test_size);
1280 for (u32 i = 0; i != test_size; i++) {
1281 mmbit_set(ba, test_size, i);
1282 ASSERT_TRUE(mmbit_isset(ba, test_size, i));
1283 }
1284
1285 // Run even iterator unset, which should clear all even bits
1286 struct mmbit_sparse_state sparse_iter_state[MAX_SPARSE_ITER_STATES];
1287 mmbit_sparse_iter_unset(ba, test_size, &it_even[0], sparse_iter_state);
1288
1289 // All even bits are off, all odd bits are on
1290 for (u32 i = 0; i != test_size; i++) {
1291 if ((i % 2) == 0) {
1292 ASSERT_FALSE(mmbit_isset(ba, test_size, i));
1293 } else {
1294 ASSERT_TRUE(mmbit_isset(ba, test_size, i));
1295 }
1296 }
1297
1298 // Run odd iterator unset, clearing the odd bits
1299 mmbit_sparse_iter_unset(ba, test_size, &it_odd[0], sparse_iter_state);
1300
1301 // All bits should now be off
1302 for (u32 i = 0; i != test_size; i++) {
1303 ASSERT_FALSE(mmbit_isset(ba, test_size, i));
1304 }
1305 }
1306
1307 static const MultiBitTestParam multibitTests[] = {
1308 // We provide both test size and stride so that larger tests don't take
1309 // forever checking every single key.
1310
1311 // Small cases, stride 1.
1312 { 4, 1 },
1313 { 7, 1 },
1314 { 8, 1 },
1315 { 13, 1 },
1316 { 16, 1 },
1317 { 17, 1 },
1318 { 32, 1 },
1319 { 33, 1 },
1320 { 57, 1 },
1321 { 64, 1 },
1322 { 65, 1 },
1323 { 100, 1 },
1324 { 128, 1 },
1325 { 200, 1 },
1326 { 256, 1 },
1327 { 302, 1 },
1328 { 1024, 1 },
1329 { 1025, 1 },
1330 { 2099, 1 },
1331 { 10000, 1 },
1332 { 32768, 1 },
1333 { 32769, 1 },
1334 { 200000, 1 },
1335
1336 // Larger cases, bigger strides.
1337 { 1U << 18, 3701 },
1338 { 1U << 19, 3701 },
1339 { 1U << 20, 3701 },
1340 { 1U << 21, 3701 },
1341 { 1U << 22, 3701 },
1342 { 1U << 23, 3701 },
1343 { 1U << 24, 3701 },
1344 { 1U << 25, 3701 },
1345 { 1U << 26, 3701 },
1346 { 1U << 27, 7919 },
1347 { 1U << 28, 15073 },
1348 { 1U << 29, 24413 },
1349 { 1U << 30, 50377 },
1350 { 1U << 31, 104729 },
1351 };
1352
1353 INSTANTIATE_TEST_CASE_P(MultiBit, MultiBitTest, ValuesIn(multibitTests));
1354
TEST(MultiBit,SizeTooBig)1355 TEST(MultiBit, SizeTooBig) {
1356 ASSERT_NO_THROW(mmbit_size(MMB_MAX_BITS));
1357 ASSERT_THROW(mmbit_size(MMB_MAX_BITS + 1), ResourceLimitError);
1358 }
1359