1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include <stdint.h>
16 
17 #include <algorithm>
18 #include <functional>
19 #include <map>
20 #include <numeric>
21 #include <random>
22 #include <set>
23 #include <string>
24 #include <type_traits>
25 #include <unordered_map>
26 #include <unordered_set>
27 #include <vector>
28 
29 #include "absl/base/internal/raw_logging.h"
30 #include "absl/container/btree_map.h"
31 #include "absl/container/btree_set.h"
32 #include "absl/container/btree_test.h"
33 #include "absl/container/flat_hash_map.h"
34 #include "absl/container/flat_hash_set.h"
35 #include "absl/container/internal/hashtable_debug.h"
36 #include "absl/flags/flag.h"
37 #include "absl/hash/hash.h"
38 #include "absl/memory/memory.h"
39 #include "absl/strings/cord.h"
40 #include "absl/strings/str_format.h"
41 #include "absl/time/time.h"
42 #include "benchmark/benchmark.h"
43 
44 namespace absl {
45 ABSL_NAMESPACE_BEGIN
46 namespace container_internal {
47 namespace {
48 
49 constexpr size_t kBenchmarkValues = 1 << 20;
50 
51 // How many times we add and remove sub-batches in one batch of *AddRem
52 // benchmarks.
53 constexpr size_t kAddRemBatchSize = 1 << 2;
54 
55 // Generates n values in the range [0, 4 * n].
56 template <typename V>
GenerateValues(int n)57 std::vector<V> GenerateValues(int n) {
58   constexpr int kSeed = 23;
59   return GenerateValuesWithSeed<V>(n, 4 * n, kSeed);
60 }
61 
62 // Benchmark insertion of values into a container.
63 template <typename T>
BM_InsertImpl(benchmark::State & state,bool sorted)64 void BM_InsertImpl(benchmark::State& state, bool sorted) {
65   using V = typename remove_pair_const<typename T::value_type>::type;
66   typename KeyOfValue<typename T::key_type, V>::type key_of_value;
67 
68   std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
69   if (sorted) {
70     std::sort(values.begin(), values.end());
71   }
72   T container(values.begin(), values.end());
73 
74   // Remove and re-insert 10% of the keys per batch.
75   const int batch_size = (kBenchmarkValues + 9) / 10;
76   while (state.KeepRunningBatch(batch_size)) {
77     state.PauseTiming();
78     const auto i = static_cast<int>(state.iterations());
79 
80     for (int j = i; j < i + batch_size; j++) {
81       int x = j % kBenchmarkValues;
82       container.erase(key_of_value(values[x]));
83     }
84 
85     state.ResumeTiming();
86 
87     for (int j = i; j < i + batch_size; j++) {
88       int x = j % kBenchmarkValues;
89       container.insert(values[x]);
90     }
91   }
92 }
93 
94 template <typename T>
BM_Insert(benchmark::State & state)95 void BM_Insert(benchmark::State& state) {
96   BM_InsertImpl<T>(state, false);
97 }
98 
99 template <typename T>
BM_InsertSorted(benchmark::State & state)100 void BM_InsertSorted(benchmark::State& state) {
101   BM_InsertImpl<T>(state, true);
102 }
103 
104 // container::insert sometimes returns a pair<iterator, bool> and sometimes
105 // returns an iterator (for multi- containers).
106 template <typename Iter>
GetIterFromInsert(const std::pair<Iter,bool> & pair)107 Iter GetIterFromInsert(const std::pair<Iter, bool>& pair) {
108   return pair.first;
109 }
110 template <typename Iter>
GetIterFromInsert(const Iter iter)111 Iter GetIterFromInsert(const Iter iter) {
112   return iter;
113 }
114 
115 // Benchmark insertion of values into a container at the end.
116 template <typename T>
BM_InsertEnd(benchmark::State & state)117 void BM_InsertEnd(benchmark::State& state) {
118   using V = typename remove_pair_const<typename T::value_type>::type;
119   typename KeyOfValue<typename T::key_type, V>::type key_of_value;
120 
121   T container;
122   const int kSize = 10000;
123   for (int i = 0; i < kSize; ++i) {
124     container.insert(Generator<V>(kSize)(i));
125   }
126   V v = Generator<V>(kSize)(kSize - 1);
127   typename T::key_type k = key_of_value(v);
128 
129   auto it = container.find(k);
130   while (state.KeepRunning()) {
131     // Repeatedly removing then adding v.
132     container.erase(it);
133     it = GetIterFromInsert(container.insert(v));
134   }
135 }
136 
137 // Benchmark inserting the first few elements in a container. In b-tree, this is
138 // when the root node grows.
139 template <typename T>
BM_InsertSmall(benchmark::State & state)140 void BM_InsertSmall(benchmark::State& state) {
141   using V = typename remove_pair_const<typename T::value_type>::type;
142 
143   const int kSize = 8;
144   std::vector<V> values = GenerateValues<V>(kSize);
145   T container;
146 
147   while (state.KeepRunningBatch(kSize)) {
148     for (int i = 0; i < kSize; ++i) {
149       benchmark::DoNotOptimize(container.insert(values[i]));
150     }
151     state.PauseTiming();
152     // Do not measure the time it takes to clear the container.
153     container.clear();
154     state.ResumeTiming();
155   }
156 }
157 
158 template <typename T>
BM_LookupImpl(benchmark::State & state,bool sorted)159 void BM_LookupImpl(benchmark::State& state, bool sorted) {
160   using V = typename remove_pair_const<typename T::value_type>::type;
161   typename KeyOfValue<typename T::key_type, V>::type key_of_value;
162 
163   std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
164   if (sorted) {
165     std::sort(values.begin(), values.end());
166   }
167   T container(values.begin(), values.end());
168 
169   while (state.KeepRunning()) {
170     int idx = state.iterations() % kBenchmarkValues;
171     benchmark::DoNotOptimize(container.find(key_of_value(values[idx])));
172   }
173 }
174 
175 // Benchmark lookup of values in a container.
176 template <typename T>
BM_Lookup(benchmark::State & state)177 void BM_Lookup(benchmark::State& state) {
178   BM_LookupImpl<T>(state, false);
179 }
180 
181 // Benchmark lookup of values in a full container, meaning that values
182 // are inserted in-order to take advantage of biased insertion, which
183 // yields a full tree.
184 template <typename T>
BM_FullLookup(benchmark::State & state)185 void BM_FullLookup(benchmark::State& state) {
186   BM_LookupImpl<T>(state, true);
187 }
188 
189 // Benchmark deletion of values from a container.
190 template <typename T>
BM_Delete(benchmark::State & state)191 void BM_Delete(benchmark::State& state) {
192   using V = typename remove_pair_const<typename T::value_type>::type;
193   typename KeyOfValue<typename T::key_type, V>::type key_of_value;
194   std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
195   T container(values.begin(), values.end());
196 
197   // Remove and re-insert 10% of the keys per batch.
198   const int batch_size = (kBenchmarkValues + 9) / 10;
199   while (state.KeepRunningBatch(batch_size)) {
200     const int i = state.iterations();
201 
202     for (int j = i; j < i + batch_size; j++) {
203       int x = j % kBenchmarkValues;
204       container.erase(key_of_value(values[x]));
205     }
206 
207     state.PauseTiming();
208     for (int j = i; j < i + batch_size; j++) {
209       int x = j % kBenchmarkValues;
210       container.insert(values[x]);
211     }
212     state.ResumeTiming();
213   }
214 }
215 
216 // Benchmark deletion of multiple values from a container.
217 template <typename T>
BM_DeleteRange(benchmark::State & state)218 void BM_DeleteRange(benchmark::State& state) {
219   using V = typename remove_pair_const<typename T::value_type>::type;
220   typename KeyOfValue<typename T::key_type, V>::type key_of_value;
221   std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
222   T container(values.begin(), values.end());
223 
224   // Remove and re-insert 10% of the keys per batch.
225   const int batch_size = (kBenchmarkValues + 9) / 10;
226   while (state.KeepRunningBatch(batch_size)) {
227     const int i = state.iterations();
228 
229     const int start_index = i % kBenchmarkValues;
230 
231     state.PauseTiming();
232     {
233       std::vector<V> removed;
234       removed.reserve(batch_size);
235       auto itr = container.find(key_of_value(values[start_index]));
236       auto start = itr;
237       for (int j = 0; j < batch_size; j++) {
238         if (itr == container.end()) {
239           state.ResumeTiming();
240           container.erase(start, itr);
241           state.PauseTiming();
242           itr = container.begin();
243           start = itr;
244         }
245         removed.push_back(*itr++);
246       }
247 
248       state.ResumeTiming();
249       container.erase(start, itr);
250       state.PauseTiming();
251 
252       container.insert(removed.begin(), removed.end());
253     }
254     state.ResumeTiming();
255   }
256 }
257 
258 // Benchmark steady-state insert (into first half of range) and remove (from
259 // second half of range), treating the container approximately like a queue with
260 // log-time access for all elements. This benchmark does not test the case where
261 // insertion and removal happen in the same region of the tree.  This benchmark
262 // counts two value constructors.
263 template <typename T>
BM_QueueAddRem(benchmark::State & state)264 void BM_QueueAddRem(benchmark::State& state) {
265   using V = typename remove_pair_const<typename T::value_type>::type;
266   typename KeyOfValue<typename T::key_type, V>::type key_of_value;
267 
268   ABSL_RAW_CHECK(kBenchmarkValues % 2 == 0, "for performance");
269 
270   T container;
271 
272   const size_t half = kBenchmarkValues / 2;
273   std::vector<int> remove_keys(half);
274   std::vector<int> add_keys(half);
275 
276   // We want to do the exact same work repeatedly, and the benchmark can end
277   // after a different number of iterations depending on the speed of the
278   // individual run so we use a large batch size here and ensure that we do
279   // deterministic work every batch.
280   while (state.KeepRunningBatch(half * kAddRemBatchSize)) {
281     state.PauseTiming();
282 
283     container.clear();
284 
285     for (size_t i = 0; i < half; ++i) {
286       remove_keys[i] = i;
287       add_keys[i] = i;
288     }
289     constexpr int kSeed = 5;
290     std::mt19937_64 rand(kSeed);
291     std::shuffle(remove_keys.begin(), remove_keys.end(), rand);
292     std::shuffle(add_keys.begin(), add_keys.end(), rand);
293 
294     // Note needs lazy generation of values.
295     Generator<V> g(kBenchmarkValues * kAddRemBatchSize);
296 
297     for (size_t i = 0; i < half; ++i) {
298       container.insert(g(add_keys[i]));
299       container.insert(g(half + remove_keys[i]));
300     }
301 
302     // There are three parts each of size "half":
303     // 1 is being deleted from  [offset - half, offset)
304     // 2 is standing            [offset, offset + half)
305     // 3 is being inserted into [offset + half, offset + 2 * half)
306     size_t offset = 0;
307 
308     for (size_t i = 0; i < kAddRemBatchSize; ++i) {
309       std::shuffle(remove_keys.begin(), remove_keys.end(), rand);
310       std::shuffle(add_keys.begin(), add_keys.end(), rand);
311       offset += half;
312 
313       state.ResumeTiming();
314       for (size_t idx = 0; idx < half; ++idx) {
315         container.erase(key_of_value(g(offset - half + remove_keys[idx])));
316         container.insert(g(offset + half + add_keys[idx]));
317       }
318       state.PauseTiming();
319     }
320     state.ResumeTiming();
321   }
322 }
323 
324 // Mixed insertion and deletion in the same range using pre-constructed values.
325 template <typename T>
BM_MixedAddRem(benchmark::State & state)326 void BM_MixedAddRem(benchmark::State& state) {
327   using V = typename remove_pair_const<typename T::value_type>::type;
328   typename KeyOfValue<typename T::key_type, V>::type key_of_value;
329 
330   ABSL_RAW_CHECK(kBenchmarkValues % 2 == 0, "for performance");
331 
332   T container;
333 
334   // Create two random shuffles
335   std::vector<int> remove_keys(kBenchmarkValues);
336   std::vector<int> add_keys(kBenchmarkValues);
337 
338   // We want to do the exact same work repeatedly, and the benchmark can end
339   // after a different number of iterations depending on the speed of the
340   // individual run so we use a large batch size here and ensure that we do
341   // deterministic work every batch.
342   while (state.KeepRunningBatch(kBenchmarkValues * kAddRemBatchSize)) {
343     state.PauseTiming();
344 
345     container.clear();
346 
347     constexpr int kSeed = 7;
348     std::mt19937_64 rand(kSeed);
349 
350     std::vector<V> values = GenerateValues<V>(kBenchmarkValues * 2);
351 
352     // Insert the first half of the values (already in random order)
353     container.insert(values.begin(), values.begin() + kBenchmarkValues);
354 
355     // Insert the first half of the values (already in random order)
356     for (size_t i = 0; i < kBenchmarkValues; ++i) {
357       // remove_keys and add_keys will be swapped before each round,
358       // therefore fill add_keys here w/ the keys being inserted, so
359       // they'll be the first to be removed.
360       remove_keys[i] = i + kBenchmarkValues;
361       add_keys[i] = i;
362     }
363 
364     for (size_t i = 0; i < kAddRemBatchSize; ++i) {
365       remove_keys.swap(add_keys);
366       std::shuffle(remove_keys.begin(), remove_keys.end(), rand);
367       std::shuffle(add_keys.begin(), add_keys.end(), rand);
368 
369       state.ResumeTiming();
370       for (size_t idx = 0; idx < kBenchmarkValues; ++idx) {
371         container.erase(key_of_value(values[remove_keys[idx]]));
372         container.insert(values[add_keys[idx]]);
373       }
374       state.PauseTiming();
375     }
376     state.ResumeTiming();
377   }
378 }
379 
380 // Insertion at end, removal from the beginning.  This benchmark
381 // counts two value constructors.
382 // TODO(ezb): we could add a GenerateNext version of generator that could reduce
383 // noise for string-like types.
384 template <typename T>
BM_Fifo(benchmark::State & state)385 void BM_Fifo(benchmark::State& state) {
386   using V = typename remove_pair_const<typename T::value_type>::type;
387 
388   T container;
389   // Need lazy generation of values as state.max_iterations is large.
390   Generator<V> g(kBenchmarkValues + state.max_iterations);
391 
392   for (int i = 0; i < kBenchmarkValues; i++) {
393     container.insert(g(i));
394   }
395 
396   while (state.KeepRunning()) {
397     container.erase(container.begin());
398     container.insert(container.end(), g(state.iterations() + kBenchmarkValues));
399   }
400 }
401 
402 // Iteration (forward) through the tree
403 template <typename T>
BM_FwdIter(benchmark::State & state)404 void BM_FwdIter(benchmark::State& state) {
405   using V = typename remove_pair_const<typename T::value_type>::type;
406   using R = typename T::value_type const*;
407 
408   std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
409   T container(values.begin(), values.end());
410 
411   auto iter = container.end();
412 
413   R r = nullptr;
414 
415   while (state.KeepRunning()) {
416     if (iter == container.end()) iter = container.begin();
417     r = &(*iter);
418     ++iter;
419   }
420 
421   benchmark::DoNotOptimize(r);
422 }
423 
424 // Benchmark random range-construction of a container.
425 template <typename T>
BM_RangeConstructionImpl(benchmark::State & state,bool sorted)426 void BM_RangeConstructionImpl(benchmark::State& state, bool sorted) {
427   using V = typename remove_pair_const<typename T::value_type>::type;
428 
429   std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
430   if (sorted) {
431     std::sort(values.begin(), values.end());
432   }
433   {
434     T container(values.begin(), values.end());
435   }
436 
437   while (state.KeepRunning()) {
438     T container(values.begin(), values.end());
439     benchmark::DoNotOptimize(container);
440   }
441 }
442 
443 template <typename T>
BM_InsertRangeRandom(benchmark::State & state)444 void BM_InsertRangeRandom(benchmark::State& state) {
445   BM_RangeConstructionImpl<T>(state, false);
446 }
447 
448 template <typename T>
BM_InsertRangeSorted(benchmark::State & state)449 void BM_InsertRangeSorted(benchmark::State& state) {
450   BM_RangeConstructionImpl<T>(state, true);
451 }
452 
453 #define STL_ORDERED_TYPES(value)                     \
454   using stl_set_##value = std::set<value>;           \
455   using stl_map_##value = std::map<value, intptr_t>; \
456   using stl_multiset_##value = std::multiset<value>; \
457   using stl_multimap_##value = std::multimap<value, intptr_t>
458 
459 using StdString = std::string;
460 STL_ORDERED_TYPES(int32_t);
461 STL_ORDERED_TYPES(int64_t);
462 STL_ORDERED_TYPES(StdString);
463 STL_ORDERED_TYPES(Cord);
464 STL_ORDERED_TYPES(Time);
465 
466 #define STL_UNORDERED_TYPES(value)                                       \
467   using stl_unordered_set_##value = std::unordered_set<value>;           \
468   using stl_unordered_map_##value = std::unordered_map<value, intptr_t>; \
469   using flat_hash_set_##value = flat_hash_set<value>;                    \
470   using flat_hash_map_##value = flat_hash_map<value, intptr_t>;          \
471   using stl_unordered_multiset_##value = std::unordered_multiset<value>; \
472   using stl_unordered_multimap_##value =                                 \
473       std::unordered_multimap<value, intptr_t>
474 
475 #define STL_UNORDERED_TYPES_CUSTOM_HASH(value, hash)                           \
476   using stl_unordered_set_##value = std::unordered_set<value, hash>;           \
477   using stl_unordered_map_##value = std::unordered_map<value, intptr_t, hash>; \
478   using flat_hash_set_##value = flat_hash_set<value, hash>;                    \
479   using flat_hash_map_##value = flat_hash_map<value, intptr_t, hash>;          \
480   using stl_unordered_multiset_##value = std::unordered_multiset<value, hash>; \
481   using stl_unordered_multimap_##value =                                       \
482       std::unordered_multimap<value, intptr_t, hash>
483 
484 STL_UNORDERED_TYPES_CUSTOM_HASH(Cord, absl::Hash<absl::Cord>);
485 
486 STL_UNORDERED_TYPES(int32_t);
487 STL_UNORDERED_TYPES(int64_t);
488 STL_UNORDERED_TYPES(StdString);
489 STL_UNORDERED_TYPES_CUSTOM_HASH(Time, absl::Hash<absl::Time>);
490 
491 #define BTREE_TYPES(value)                                            \
492   using btree_256_set_##value =                                       \
493       btree_set<value, std::less<value>, std::allocator<value>>;      \
494   using btree_256_map_##value =                                       \
495       btree_map<value, intptr_t, std::less<value>,                    \
496                 std::allocator<std::pair<const value, intptr_t>>>;    \
497   using btree_256_multiset_##value =                                  \
498       btree_multiset<value, std::less<value>, std::allocator<value>>; \
499   using btree_256_multimap_##value =                                  \
500       btree_multimap<value, intptr_t, std::less<value>,               \
501                      std::allocator<std::pair<const value, intptr_t>>>
502 
503 BTREE_TYPES(int32_t);
504 BTREE_TYPES(int64_t);
505 BTREE_TYPES(StdString);
506 BTREE_TYPES(Cord);
507 BTREE_TYPES(Time);
508 
509 #define MY_BENCHMARK4(type, func)                                              \
510   void BM_##type##_##func(benchmark::State& state) { BM_##func<type>(state); } \
511   BENCHMARK(BM_##type##_##func)
512 
513 #define MY_BENCHMARK3(type)               \
514   MY_BENCHMARK4(type, Insert);            \
515   MY_BENCHMARK4(type, InsertSorted);      \
516   MY_BENCHMARK4(type, InsertEnd);         \
517   MY_BENCHMARK4(type, InsertSmall);       \
518   MY_BENCHMARK4(type, Lookup);            \
519   MY_BENCHMARK4(type, FullLookup);        \
520   MY_BENCHMARK4(type, Delete);            \
521   MY_BENCHMARK4(type, DeleteRange);       \
522   MY_BENCHMARK4(type, QueueAddRem);       \
523   MY_BENCHMARK4(type, MixedAddRem);       \
524   MY_BENCHMARK4(type, Fifo);              \
525   MY_BENCHMARK4(type, FwdIter);           \
526   MY_BENCHMARK4(type, InsertRangeRandom); \
527   MY_BENCHMARK4(type, InsertRangeSorted)
528 
529 #define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \
530   MY_BENCHMARK3(stl_##type);                    \
531   MY_BENCHMARK3(stl_unordered_##type);          \
532   MY_BENCHMARK3(btree_256_##type)
533 
534 #define MY_BENCHMARK2(type)                \
535   MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type); \
536   MY_BENCHMARK3(flat_hash_##type)
537 
538 // Define MULTI_TESTING to see benchmarks for multi-containers also.
539 //
540 // You can use --copt=-DMULTI_TESTING.
541 #ifdef MULTI_TESTING
542 #define MY_BENCHMARK(type)                            \
543   MY_BENCHMARK2(set_##type);                          \
544   MY_BENCHMARK2(map_##type);                          \
545   MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multiset_##type); \
546   MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multimap_##type)
547 #else
548 #define MY_BENCHMARK(type)   \
549   MY_BENCHMARK2(set_##type); \
550   MY_BENCHMARK2(map_##type)
551 #endif
552 
553 MY_BENCHMARK(int32_t);
554 MY_BENCHMARK(int64_t);
555 MY_BENCHMARK(StdString);
556 MY_BENCHMARK(Cord);
557 MY_BENCHMARK(Time);
558 
559 // Define a type whose size and cost of moving are independently customizable.
560 // When sizeof(value_type) increases, we expect btree to no longer have as much
561 // cache-locality advantage over STL. When cost of moving increases, we expect
562 // btree to actually do more work than STL because it has to move values around
563 // and STL doesn't have to.
564 template <int Size, int Copies>
565 struct BigType {
BigTypeabsl::container_internal::__anone4696ee00111::BigType566   BigType() : BigType(0) {}
BigTypeabsl::container_internal::__anone4696ee00111::BigType567   explicit BigType(int x) { std::iota(values.begin(), values.end(), x); }
568 
Copyabsl::container_internal::__anone4696ee00111::BigType569   void Copy(const BigType& other) {
570     for (int i = 0; i < Size && i < Copies; ++i) values[i] = other.values[i];
571     // If Copies > Size, do extra copies.
572     for (int i = Size, idx = 0; i < Copies; ++i) {
573       int64_t tmp = other.values[idx];
574       benchmark::DoNotOptimize(tmp);
575       idx = idx + 1 == Size ? 0 : idx + 1;
576     }
577   }
578 
BigTypeabsl::container_internal::__anone4696ee00111::BigType579   BigType(const BigType& other) { Copy(other); }
operator =absl::container_internal::__anone4696ee00111::BigType580   BigType& operator=(const BigType& other) {
581     Copy(other);
582     return *this;
583   }
584 
585   // Compare only the first Copies elements if Copies is less than Size.
operator <absl::container_internal::__anone4696ee00111::BigType586   bool operator<(const BigType& other) const {
587     return std::lexicographical_compare(
588         values.begin(), values.begin() + std::min(Size, Copies),
589         other.values.begin(), other.values.begin() + std::min(Size, Copies));
590   }
operator ==absl::container_internal::__anone4696ee00111::BigType591   bool operator==(const BigType& other) const {
592     return std::equal(values.begin(), values.begin() + std::min(Size, Copies),
593                       other.values.begin());
594   }
595 
596   // Support absl::Hash.
597   template <typename State>
AbslHashValue(State h,const BigType & b)598   friend State AbslHashValue(State h, const BigType& b) {
599     for (int i = 0; i < Size && i < Copies; ++i)
600       h = State::combine(std::move(h), b.values[i]);
601     return h;
602   }
603 
604   std::array<int64_t, Size> values;
605 };
606 
607 #define BIG_TYPE_BENCHMARKS(SIZE, COPIES)                                     \
608   using stl_set_size##SIZE##copies##COPIES = std::set<BigType<SIZE, COPIES>>; \
609   using stl_map_size##SIZE##copies##COPIES =                                  \
610       std::map<BigType<SIZE, COPIES>, intptr_t>;                              \
611   using stl_multiset_size##SIZE##copies##COPIES =                             \
612       std::multiset<BigType<SIZE, COPIES>>;                                   \
613   using stl_multimap_size##SIZE##copies##COPIES =                             \
614       std::multimap<BigType<SIZE, COPIES>, intptr_t>;                         \
615   using stl_unordered_set_size##SIZE##copies##COPIES =                        \
616       std::unordered_set<BigType<SIZE, COPIES>,                               \
617                          absl::Hash<BigType<SIZE, COPIES>>>;                  \
618   using stl_unordered_map_size##SIZE##copies##COPIES =                        \
619       std::unordered_map<BigType<SIZE, COPIES>, intptr_t,                     \
620                          absl::Hash<BigType<SIZE, COPIES>>>;                  \
621   using flat_hash_set_size##SIZE##copies##COPIES =                            \
622       flat_hash_set<BigType<SIZE, COPIES>>;                                   \
623   using flat_hash_map_size##SIZE##copies##COPIES =                            \
624       flat_hash_map<BigType<SIZE, COPIES>, intptr_t>;                         \
625   using stl_unordered_multiset_size##SIZE##copies##COPIES =                   \
626       std::unordered_multiset<BigType<SIZE, COPIES>,                          \
627                               absl::Hash<BigType<SIZE, COPIES>>>;             \
628   using stl_unordered_multimap_size##SIZE##copies##COPIES =                   \
629       std::unordered_multimap<BigType<SIZE, COPIES>, intptr_t,                \
630                               absl::Hash<BigType<SIZE, COPIES>>>;             \
631   using btree_256_set_size##SIZE##copies##COPIES =                            \
632       btree_set<BigType<SIZE, COPIES>>;                                       \
633   using btree_256_map_size##SIZE##copies##COPIES =                            \
634       btree_map<BigType<SIZE, COPIES>, intptr_t>;                             \
635   using btree_256_multiset_size##SIZE##copies##COPIES =                       \
636       btree_multiset<BigType<SIZE, COPIES>>;                                  \
637   using btree_256_multimap_size##SIZE##copies##COPIES =                       \
638       btree_multimap<BigType<SIZE, COPIES>, intptr_t>;                        \
639   MY_BENCHMARK(size##SIZE##copies##COPIES)
640 
641 // Define BIG_TYPE_TESTING to see benchmarks for more big types.
642 //
643 // You can use --copt=-DBIG_TYPE_TESTING.
644 #ifndef NODESIZE_TESTING
645 #ifdef BIG_TYPE_TESTING
646 BIG_TYPE_BENCHMARKS(1, 4);
647 BIG_TYPE_BENCHMARKS(4, 1);
648 BIG_TYPE_BENCHMARKS(4, 4);
649 BIG_TYPE_BENCHMARKS(1, 8);
650 BIG_TYPE_BENCHMARKS(8, 1);
651 BIG_TYPE_BENCHMARKS(8, 8);
652 BIG_TYPE_BENCHMARKS(1, 16);
653 BIG_TYPE_BENCHMARKS(16, 1);
654 BIG_TYPE_BENCHMARKS(16, 16);
655 BIG_TYPE_BENCHMARKS(1, 32);
656 BIG_TYPE_BENCHMARKS(32, 1);
657 BIG_TYPE_BENCHMARKS(32, 32);
658 #else
659 BIG_TYPE_BENCHMARKS(32, 32);
660 #endif
661 #endif
662 
663 // Benchmark using unique_ptrs to large value types. In order to be able to use
664 // the same benchmark code as the other types, use a type that holds a
665 // unique_ptr and has a copy constructor.
666 template <int Size>
667 struct BigTypePtr {
BigTypePtrabsl::container_internal::__anone4696ee00111::BigTypePtr668   BigTypePtr() : BigTypePtr(0) {}
BigTypePtrabsl::container_internal::__anone4696ee00111::BigTypePtr669   explicit BigTypePtr(int x) {
670     ptr = absl::make_unique<BigType<Size, Size>>(x);
671   }
BigTypePtrabsl::container_internal::__anone4696ee00111::BigTypePtr672   BigTypePtr(const BigTypePtr& other) {
673     ptr = absl::make_unique<BigType<Size, Size>>(*other.ptr);
674   }
675   BigTypePtr(BigTypePtr&& other) noexcept = default;
operator =absl::container_internal::__anone4696ee00111::BigTypePtr676   BigTypePtr& operator=(const BigTypePtr& other) {
677     ptr = absl::make_unique<BigType<Size, Size>>(*other.ptr);
678   }
679   BigTypePtr& operator=(BigTypePtr&& other) noexcept = default;
680 
operator <absl::container_internal::__anone4696ee00111::BigTypePtr681   bool operator<(const BigTypePtr& other) const { return *ptr < *other.ptr; }
operator ==absl::container_internal::__anone4696ee00111::BigTypePtr682   bool operator==(const BigTypePtr& other) const { return *ptr == *other.ptr; }
683 
684   std::unique_ptr<BigType<Size, Size>> ptr;
685 };
686 
687 template <int Size>
ContainerInfo(const btree_set<BigTypePtr<Size>> & b)688 double ContainerInfo(const btree_set<BigTypePtr<Size>>& b) {
689   const double bytes_used =
690       b.bytes_used() + b.size() * sizeof(BigType<Size, Size>);
691   const double bytes_per_value = bytes_used / b.size();
692   BtreeContainerInfoLog(b, bytes_used, bytes_per_value);
693   return bytes_per_value;
694 }
695 template <int Size>
ContainerInfo(const btree_map<int,BigTypePtr<Size>> & b)696 double ContainerInfo(const btree_map<int, BigTypePtr<Size>>& b) {
697   const double bytes_used =
698       b.bytes_used() + b.size() * sizeof(BigType<Size, Size>);
699   const double bytes_per_value = bytes_used / b.size();
700   BtreeContainerInfoLog(b, bytes_used, bytes_per_value);
701   return bytes_per_value;
702 }
703 
704 #define BIG_TYPE_PTR_BENCHMARKS(SIZE)                                          \
705   using stl_set_size##SIZE##copies##SIZE##ptr = std::set<BigType<SIZE, SIZE>>; \
706   using stl_map_size##SIZE##copies##SIZE##ptr =                                \
707       std::map<int, BigType<SIZE, SIZE>>;                                      \
708   using stl_unordered_set_size##SIZE##copies##SIZE##ptr =                      \
709       std::unordered_set<BigType<SIZE, SIZE>,                                  \
710                          absl::Hash<BigType<SIZE, SIZE>>>;                     \
711   using stl_unordered_map_size##SIZE##copies##SIZE##ptr =                      \
712       std::unordered_map<int, BigType<SIZE, SIZE>>;                            \
713   using flat_hash_set_size##SIZE##copies##SIZE##ptr =                          \
714       flat_hash_set<BigType<SIZE, SIZE>>;                                      \
715   using flat_hash_map_size##SIZE##copies##SIZE##ptr =                          \
716       flat_hash_map<int, BigTypePtr<SIZE>>;                                    \
717   using btree_256_set_size##SIZE##copies##SIZE##ptr =                          \
718       btree_set<BigTypePtr<SIZE>>;                                             \
719   using btree_256_map_size##SIZE##copies##SIZE##ptr =                          \
720       btree_map<int, BigTypePtr<SIZE>>;                                        \
721   MY_BENCHMARK3(stl_set_size##SIZE##copies##SIZE##ptr);                        \
722   MY_BENCHMARK3(stl_unordered_set_size##SIZE##copies##SIZE##ptr);              \
723   MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr);                  \
724   MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr);                  \
725   MY_BENCHMARK3(stl_map_size##SIZE##copies##SIZE##ptr);                        \
726   MY_BENCHMARK3(stl_unordered_map_size##SIZE##copies##SIZE##ptr);              \
727   MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr);                  \
728   MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr)
729 
730 BIG_TYPE_PTR_BENCHMARKS(32);
731 
732 }  // namespace
733 }  // namespace container_internal
734 ABSL_NAMESPACE_END
735 }  // namespace absl
736