1 //  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
2 //  This source code is licensed under both the GPLv2 (found in the
3 //  COPYING file in the root directory) and Apache 2.0 License
4 //  (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9 
10 #ifndef GFLAGS
11 #include <cstdio>
main()12 int main() {
13   fprintf(stderr, "Please install gflags to run rocksdb tools\n");
14   return 1;
15 }
16 #else
17 
18 #include <atomic>
19 #include <iostream>
20 #include <memory>
21 #include <thread>
22 #include <type_traits>
23 #include <vector>
24 
25 #include "db/dbformat.h"
26 #include "db/memtable.h"
27 #include "memory/arena.h"
28 #include "port/port.h"
29 #include "port/stack_trace.h"
30 #include "rocksdb/comparator.h"
31 #include "rocksdb/convenience.h"
32 #include "rocksdb/memtablerep.h"
33 #include "rocksdb/options.h"
34 #include "rocksdb/slice_transform.h"
35 #include "rocksdb/system_clock.h"
36 #include "rocksdb/write_buffer_manager.h"
37 #include "test_util/testutil.h"
38 #include "util/gflags_compat.h"
39 #include "util/mutexlock.h"
40 #include "util/stop_watch.h"
41 
42 using GFLAGS_NAMESPACE::ParseCommandLineFlags;
43 using GFLAGS_NAMESPACE::RegisterFlagValidator;
44 using GFLAGS_NAMESPACE::SetUsageMessage;
45 
46 DEFINE_string(benchmarks, "fillrandom",
47               "Comma-separated list of benchmarks to run. Options:\n"
48               "\tfillrandom             -- write N random values\n"
49               "\tfillseq                -- write N values in sequential order\n"
50               "\treadrandom             -- read N values in random order\n"
51               "\treadseq                -- scan the DB\n"
52               "\treadwrite              -- 1 thread writes while N - 1 threads "
53               "do random\n"
54               "\t                          reads\n"
55               "\tseqreadwrite           -- 1 thread writes while N - 1 threads "
56               "do scans\n");
57 
58 DEFINE_string(memtablerep, "skiplist",
59               "Which implementation of memtablerep to use. See "
60               "include/memtablerep.h for\n"
61               "  more details. Options:\n"
62               "\tskiplist            -- backed by a skiplist\n"
63               "\tvector              -- backed by an std::vector\n"
64               "\thashskiplist        -- backed by a hash skip list\n"
65               "\thashlinklist        -- backed by a hash linked list\n"
66               "\tcuckoo              -- backed by a cuckoo hash table");
67 
68 DEFINE_int64(bucket_count, 1000000,
69              "bucket_count parameter to pass into NewHashSkiplistRepFactory or "
70              "NewHashLinkListRepFactory");
71 
72 DEFINE_int32(
73     hashskiplist_height, 4,
74     "skiplist_height parameter to pass into NewHashSkiplistRepFactory");
75 
76 DEFINE_int32(
77     hashskiplist_branching_factor, 4,
78     "branching_factor parameter to pass into NewHashSkiplistRepFactory");
79 
80 DEFINE_int32(
81     huge_page_tlb_size, 0,
82     "huge_page_tlb_size parameter to pass into NewHashLinkListRepFactory");
83 
84 DEFINE_int32(bucket_entries_logging_threshold, 4096,
85              "bucket_entries_logging_threshold parameter to pass into "
86              "NewHashLinkListRepFactory");
87 
88 DEFINE_bool(if_log_bucket_dist_when_flash, true,
89             "if_log_bucket_dist_when_flash parameter to pass into "
90             "NewHashLinkListRepFactory");
91 
92 DEFINE_int32(
93     threshold_use_skiplist, 256,
94     "threshold_use_skiplist parameter to pass into NewHashLinkListRepFactory");
95 
96 DEFINE_int64(write_buffer_size, 256,
97              "write_buffer_size parameter to pass into WriteBufferManager");
98 
99 DEFINE_int32(
100     num_threads, 1,
101     "Number of concurrent threads to run. If the benchmark includes writes,\n"
102     "then at most one thread will be a writer");
103 
104 DEFINE_int32(num_operations, 1000000,
105              "Number of operations to do for write and random read benchmarks");
106 
107 DEFINE_int32(num_scans, 10,
108              "Number of times for each thread to scan the memtablerep for "
109              "sequential read "
110              "benchmarks");
111 
112 DEFINE_int32(item_size, 100, "Number of bytes each item should be");
113 
114 DEFINE_int32(prefix_length, 8,
115              "Prefix length to pass into NewFixedPrefixTransform");
116 
117 /* VectorRep settings */
118 DEFINE_int64(vectorrep_count, 0,
119              "Number of entries to reserve on VectorRep initialization");
120 
121 DEFINE_int64(seed, 0,
122              "Seed base for random number generators. "
123              "When 0 it is deterministic.");
124 
125 namespace ROCKSDB_NAMESPACE {
126 
127 namespace {
128 struct CallbackVerifyArgs {
129   bool found;
130   LookupKey* key;
131   MemTableRep* table;
132   InternalKeyComparator* comparator;
133 };
134 }  // namespace
135 
136 // Helper for quickly generating random data.
137 class RandomGenerator {
138  private:
139   std::string data_;
140   unsigned int pos_;
141 
142  public:
RandomGenerator()143   RandomGenerator() {
144     Random rnd(301);
145     auto size = (unsigned)std::max(1048576, FLAGS_item_size);
146     data_ = rnd.RandomString(size);
147     pos_ = 0;
148   }
149 
Generate(unsigned int len)150   Slice Generate(unsigned int len) {
151     assert(len <= data_.size());
152     if (pos_ + len > data_.size()) {
153       pos_ = 0;
154     }
155     pos_ += len;
156     return Slice(data_.data() + pos_ - len, len);
157   }
158 };
159 
160 enum WriteMode { SEQUENTIAL, RANDOM, UNIQUE_RANDOM };
161 
162 class KeyGenerator {
163  public:
KeyGenerator(Random64 * rand,WriteMode mode,uint64_t num)164   KeyGenerator(Random64* rand, WriteMode mode, uint64_t num)
165       : rand_(rand), mode_(mode), num_(num), next_(0) {
166     if (mode_ == UNIQUE_RANDOM) {
167       // NOTE: if memory consumption of this approach becomes a concern,
168       // we can either break it into pieces and only random shuffle a section
169       // each time. Alternatively, use a bit map implementation
170       // (https://reviews.facebook.net/differential/diff/54627/)
171       values_.resize(num_);
172       for (uint64_t i = 0; i < num_; ++i) {
173         values_[i] = i;
174       }
175       RandomShuffle(values_.begin(), values_.end(),
176                     static_cast<uint32_t>(FLAGS_seed));
177     }
178   }
179 
Next()180   uint64_t Next() {
181     switch (mode_) {
182       case SEQUENTIAL:
183         return next_++;
184       case RANDOM:
185         return rand_->Next() % num_;
186       case UNIQUE_RANDOM:
187         return values_[next_++];
188     }
189     assert(false);
190     return std::numeric_limits<uint64_t>::max();
191   }
192 
193  private:
194   Random64* rand_;
195   WriteMode mode_;
196   const uint64_t num_;
197   uint64_t next_;
198   std::vector<uint64_t> values_;
199 };
200 
201 class BenchmarkThread {
202  public:
BenchmarkThread(MemTableRep * table,KeyGenerator * key_gen,uint64_t * bytes_written,uint64_t * bytes_read,uint64_t * sequence,uint64_t num_ops,uint64_t * read_hits)203   explicit BenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
204                            uint64_t* bytes_written, uint64_t* bytes_read,
205                            uint64_t* sequence, uint64_t num_ops,
206                            uint64_t* read_hits)
207       : table_(table),
208         key_gen_(key_gen),
209         bytes_written_(bytes_written),
210         bytes_read_(bytes_read),
211         sequence_(sequence),
212         num_ops_(num_ops),
213         read_hits_(read_hits) {}
214 
215   virtual void operator()() = 0;
~BenchmarkThread()216   virtual ~BenchmarkThread() {}
217 
218  protected:
219   MemTableRep* table_;
220   KeyGenerator* key_gen_;
221   uint64_t* bytes_written_;
222   uint64_t* bytes_read_;
223   uint64_t* sequence_;
224   uint64_t num_ops_;
225   uint64_t* read_hits_;
226   RandomGenerator generator_;
227 };
228 
229 class FillBenchmarkThread : public BenchmarkThread {
230  public:
FillBenchmarkThread(MemTableRep * table,KeyGenerator * key_gen,uint64_t * bytes_written,uint64_t * bytes_read,uint64_t * sequence,uint64_t num_ops,uint64_t * read_hits)231   FillBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
232                       uint64_t* bytes_written, uint64_t* bytes_read,
233                       uint64_t* sequence, uint64_t num_ops, uint64_t* read_hits)
234       : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
235                         num_ops, read_hits) {}
236 
FillOne()237   void FillOne() {
238     char* buf = nullptr;
239     auto internal_key_size = 16;
240     auto encoded_len =
241         FLAGS_item_size + VarintLength(internal_key_size) + internal_key_size;
242     KeyHandle handle = table_->Allocate(encoded_len, &buf);
243     assert(buf != nullptr);
244     char* p = EncodeVarint32(buf, internal_key_size);
245     auto key = key_gen_->Next();
246     EncodeFixed64(p, key);
247     p += 8;
248     EncodeFixed64(p, ++(*sequence_));
249     p += 8;
250     Slice bytes = generator_.Generate(FLAGS_item_size);
251     memcpy(p, bytes.data(), FLAGS_item_size);
252     p += FLAGS_item_size;
253     assert(p == buf + encoded_len);
254     table_->Insert(handle);
255     *bytes_written_ += encoded_len;
256   }
257 
operator ()()258   void operator()() override {
259     for (unsigned int i = 0; i < num_ops_; ++i) {
260       FillOne();
261     }
262   }
263 };
264 
265 class ConcurrentFillBenchmarkThread : public FillBenchmarkThread {
266  public:
ConcurrentFillBenchmarkThread(MemTableRep * table,KeyGenerator * key_gen,uint64_t * bytes_written,uint64_t * bytes_read,uint64_t * sequence,uint64_t num_ops,uint64_t * read_hits,std::atomic_int * threads_done)267   ConcurrentFillBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
268                                 uint64_t* bytes_written, uint64_t* bytes_read,
269                                 uint64_t* sequence, uint64_t num_ops,
270                                 uint64_t* read_hits,
271                                 std::atomic_int* threads_done)
272       : FillBenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
273                             num_ops, read_hits) {
274     threads_done_ = threads_done;
275   }
276 
operator ()()277   void operator()() override {
278     // # of read threads will be total threads - write threads (always 1). Loop
279     // while all reads complete.
280     while ((*threads_done_).load() < (FLAGS_num_threads - 1)) {
281       FillOne();
282     }
283   }
284 
285  private:
286   std::atomic_int* threads_done_;
287 };
288 
289 class ReadBenchmarkThread : public BenchmarkThread {
290  public:
ReadBenchmarkThread(MemTableRep * table,KeyGenerator * key_gen,uint64_t * bytes_written,uint64_t * bytes_read,uint64_t * sequence,uint64_t num_ops,uint64_t * read_hits)291   ReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
292                       uint64_t* bytes_written, uint64_t* bytes_read,
293                       uint64_t* sequence, uint64_t num_ops, uint64_t* read_hits)
294       : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
295                         num_ops, read_hits) {}
296 
callback(void * arg,const char * entry)297   static bool callback(void* arg, const char* entry) {
298     CallbackVerifyArgs* callback_args = static_cast<CallbackVerifyArgs*>(arg);
299     assert(callback_args != nullptr);
300     uint32_t key_length;
301     const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
302     if ((callback_args->comparator)
303             ->user_comparator()
304             ->Equal(Slice(key_ptr, key_length - 8),
305                     callback_args->key->user_key())) {
306       callback_args->found = true;
307     }
308     return false;
309   }
310 
ReadOne()311   void ReadOne() {
312     std::string user_key;
313     auto key = key_gen_->Next();
314     PutFixed64(&user_key, key);
315     LookupKey lookup_key(user_key, *sequence_);
316     InternalKeyComparator internal_key_comp(BytewiseComparator());
317     CallbackVerifyArgs verify_args;
318     verify_args.found = false;
319     verify_args.key = &lookup_key;
320     verify_args.table = table_;
321     verify_args.comparator = &internal_key_comp;
322     table_->Get(lookup_key, &verify_args, callback);
323     if (verify_args.found) {
324       *bytes_read_ += VarintLength(16) + 16 + FLAGS_item_size;
325       ++*read_hits_;
326     }
327   }
operator ()()328   void operator()() override {
329     for (unsigned int i = 0; i < num_ops_; ++i) {
330       ReadOne();
331     }
332   }
333 };
334 
335 class SeqReadBenchmarkThread : public BenchmarkThread {
336  public:
SeqReadBenchmarkThread(MemTableRep * table,KeyGenerator * key_gen,uint64_t * bytes_written,uint64_t * bytes_read,uint64_t * sequence,uint64_t num_ops,uint64_t * read_hits)337   SeqReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
338                          uint64_t* bytes_written, uint64_t* bytes_read,
339                          uint64_t* sequence, uint64_t num_ops,
340                          uint64_t* read_hits)
341       : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
342                         num_ops, read_hits) {}
343 
ReadOneSeq()344   void ReadOneSeq() {
345     std::unique_ptr<MemTableRep::Iterator> iter(table_->GetIterator());
346     for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
347       // pretend to read the value
348       *bytes_read_ += VarintLength(16) + 16 + FLAGS_item_size;
349     }
350     ++*read_hits_;
351   }
352 
operator ()()353   void operator()() override {
354     for (unsigned int i = 0; i < num_ops_; ++i) {
355       { ReadOneSeq(); }
356     }
357   }
358 };
359 
360 class ConcurrentReadBenchmarkThread : public ReadBenchmarkThread {
361  public:
ConcurrentReadBenchmarkThread(MemTableRep * table,KeyGenerator * key_gen,uint64_t * bytes_written,uint64_t * bytes_read,uint64_t * sequence,uint64_t num_ops,uint64_t * read_hits,std::atomic_int * threads_done)362   ConcurrentReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
363                                 uint64_t* bytes_written, uint64_t* bytes_read,
364                                 uint64_t* sequence, uint64_t num_ops,
365                                 uint64_t* read_hits,
366                                 std::atomic_int* threads_done)
367       : ReadBenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
368                             num_ops, read_hits) {
369     threads_done_ = threads_done;
370   }
371 
operator ()()372   void operator()() override {
373     for (unsigned int i = 0; i < num_ops_; ++i) {
374       ReadOne();
375     }
376     ++*threads_done_;
377   }
378 
379  private:
380   std::atomic_int* threads_done_;
381 };
382 
383 class SeqConcurrentReadBenchmarkThread : public SeqReadBenchmarkThread {
384  public:
SeqConcurrentReadBenchmarkThread(MemTableRep * table,KeyGenerator * key_gen,uint64_t * bytes_written,uint64_t * bytes_read,uint64_t * sequence,uint64_t num_ops,uint64_t * read_hits,std::atomic_int * threads_done)385   SeqConcurrentReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
386                                    uint64_t* bytes_written,
387                                    uint64_t* bytes_read, uint64_t* sequence,
388                                    uint64_t num_ops, uint64_t* read_hits,
389                                    std::atomic_int* threads_done)
390       : SeqReadBenchmarkThread(table, key_gen, bytes_written, bytes_read,
391                                sequence, num_ops, read_hits) {
392     threads_done_ = threads_done;
393   }
394 
operator ()()395   void operator()() override {
396     for (unsigned int i = 0; i < num_ops_; ++i) {
397       ReadOneSeq();
398     }
399     ++*threads_done_;
400   }
401 
402  private:
403   std::atomic_int* threads_done_;
404 };
405 
406 class Benchmark {
407  public:
Benchmark(MemTableRep * table,KeyGenerator * key_gen,uint64_t * sequence,uint32_t num_threads)408   explicit Benchmark(MemTableRep* table, KeyGenerator* key_gen,
409                      uint64_t* sequence, uint32_t num_threads)
410       : table_(table),
411         key_gen_(key_gen),
412         sequence_(sequence),
413         num_threads_(num_threads) {}
414 
~Benchmark()415   virtual ~Benchmark() {}
Run()416   virtual void Run() {
417     std::cout << "Number of threads: " << num_threads_ << std::endl;
418     std::vector<port::Thread> threads;
419     uint64_t bytes_written = 0;
420     uint64_t bytes_read = 0;
421     uint64_t read_hits = 0;
422     StopWatchNano timer(SystemClock::Default().get(), true);
423     RunThreads(&threads, &bytes_written, &bytes_read, true, &read_hits);
424     auto elapsed_time = static_cast<double>(timer.ElapsedNanos() / 1000);
425     std::cout << "Elapsed time: " << static_cast<int>(elapsed_time) << " us"
426               << std::endl;
427 
428     if (bytes_written > 0) {
429       auto MiB_written = static_cast<double>(bytes_written) / (1 << 20);
430       auto write_throughput = MiB_written / (elapsed_time / 1000000);
431       std::cout << "Total bytes written: " << MiB_written << " MiB"
432                 << std::endl;
433       std::cout << "Write throughput: " << write_throughput << " MiB/s"
434                 << std::endl;
435       auto us_per_op = elapsed_time / num_write_ops_per_thread_;
436       std::cout << "write us/op: " << us_per_op << std::endl;
437     }
438     if (bytes_read > 0) {
439       auto MiB_read = static_cast<double>(bytes_read) / (1 << 20);
440       auto read_throughput = MiB_read / (elapsed_time / 1000000);
441       std::cout << "Total bytes read: " << MiB_read << " MiB" << std::endl;
442       std::cout << "Read throughput: " << read_throughput << " MiB/s"
443                 << std::endl;
444       auto us_per_op = elapsed_time / num_read_ops_per_thread_;
445       std::cout << "read us/op: " << us_per_op << std::endl;
446     }
447   }
448 
449   virtual void RunThreads(std::vector<port::Thread>* threads,
450                           uint64_t* bytes_written, uint64_t* bytes_read,
451                           bool write, uint64_t* read_hits) = 0;
452 
453  protected:
454   MemTableRep* table_;
455   KeyGenerator* key_gen_;
456   uint64_t* sequence_;
457   uint64_t num_write_ops_per_thread_ = 0;
458   uint64_t num_read_ops_per_thread_ = 0;
459   const uint32_t num_threads_;
460 };
461 
462 class FillBenchmark : public Benchmark {
463  public:
FillBenchmark(MemTableRep * table,KeyGenerator * key_gen,uint64_t * sequence)464   explicit FillBenchmark(MemTableRep* table, KeyGenerator* key_gen,
465                          uint64_t* sequence)
466       : Benchmark(table, key_gen, sequence, 1) {
467     num_write_ops_per_thread_ = FLAGS_num_operations;
468   }
469 
RunThreads(std::vector<port::Thread> *,uint64_t * bytes_written,uint64_t * bytes_read,bool,uint64_t * read_hits)470   void RunThreads(std::vector<port::Thread>* /*threads*/, uint64_t* bytes_written,
471                   uint64_t* bytes_read, bool /*write*/,
472                   uint64_t* read_hits) override {
473     FillBenchmarkThread(table_, key_gen_, bytes_written, bytes_read, sequence_,
474                         num_write_ops_per_thread_, read_hits)();
475   }
476 };
477 
478 class ReadBenchmark : public Benchmark {
479  public:
ReadBenchmark(MemTableRep * table,KeyGenerator * key_gen,uint64_t * sequence)480   explicit ReadBenchmark(MemTableRep* table, KeyGenerator* key_gen,
481                          uint64_t* sequence)
482       : Benchmark(table, key_gen, sequence, FLAGS_num_threads) {
483     num_read_ops_per_thread_ = FLAGS_num_operations / FLAGS_num_threads;
484   }
485 
RunThreads(std::vector<port::Thread> * threads,uint64_t * bytes_written,uint64_t * bytes_read,bool,uint64_t * read_hits)486   void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
487                   uint64_t* bytes_read, bool /*write*/,
488                   uint64_t* read_hits) override {
489     for (int i = 0; i < FLAGS_num_threads; ++i) {
490       threads->emplace_back(
491           ReadBenchmarkThread(table_, key_gen_, bytes_written, bytes_read,
492                               sequence_, num_read_ops_per_thread_, read_hits));
493     }
494     for (auto& thread : *threads) {
495       thread.join();
496     }
497     std::cout << "read hit%: "
498               << (static_cast<double>(*read_hits) / FLAGS_num_operations) * 100
499               << std::endl;
500   }
501 };
502 
503 class SeqReadBenchmark : public Benchmark {
504  public:
SeqReadBenchmark(MemTableRep * table,uint64_t * sequence)505   explicit SeqReadBenchmark(MemTableRep* table, uint64_t* sequence)
506       : Benchmark(table, nullptr, sequence, FLAGS_num_threads) {
507     num_read_ops_per_thread_ = FLAGS_num_scans;
508   }
509 
RunThreads(std::vector<port::Thread> * threads,uint64_t * bytes_written,uint64_t * bytes_read,bool,uint64_t * read_hits)510   void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
511                   uint64_t* bytes_read, bool /*write*/,
512                   uint64_t* read_hits) override {
513     for (int i = 0; i < FLAGS_num_threads; ++i) {
514       threads->emplace_back(SeqReadBenchmarkThread(
515           table_, key_gen_, bytes_written, bytes_read, sequence_,
516           num_read_ops_per_thread_, read_hits));
517     }
518     for (auto& thread : *threads) {
519       thread.join();
520     }
521   }
522 };
523 
524 template <class ReadThreadType>
525 class ReadWriteBenchmark : public Benchmark {
526  public:
ReadWriteBenchmark(MemTableRep * table,KeyGenerator * key_gen,uint64_t * sequence)527   explicit ReadWriteBenchmark(MemTableRep* table, KeyGenerator* key_gen,
528                               uint64_t* sequence)
529       : Benchmark(table, key_gen, sequence, FLAGS_num_threads) {
530     num_read_ops_per_thread_ =
531         FLAGS_num_threads <= 1
532             ? 0
533             : (FLAGS_num_operations / (FLAGS_num_threads - 1));
534     num_write_ops_per_thread_ = FLAGS_num_operations;
535   }
536 
RunThreads(std::vector<port::Thread> * threads,uint64_t * bytes_written,uint64_t * bytes_read,bool,uint64_t * read_hits)537   void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
538                   uint64_t* bytes_read, bool /*write*/,
539                   uint64_t* read_hits) override {
540     std::atomic_int threads_done;
541     threads_done.store(0);
542     threads->emplace_back(ConcurrentFillBenchmarkThread(
543         table_, key_gen_, bytes_written, bytes_read, sequence_,
544         num_write_ops_per_thread_, read_hits, &threads_done));
545     for (int i = 1; i < FLAGS_num_threads; ++i) {
546       threads->emplace_back(
547           ReadThreadType(table_, key_gen_, bytes_written, bytes_read, sequence_,
548                          num_read_ops_per_thread_, read_hits, &threads_done));
549     }
550     for (auto& thread : *threads) {
551       thread.join();
552     }
553   }
554 };
555 
556 }  // namespace ROCKSDB_NAMESPACE
557 
PrintWarnings()558 void PrintWarnings() {
559 #if defined(__GNUC__) && !defined(__OPTIMIZE__)
560   fprintf(stdout,
561           "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
562 #endif
563 #ifndef NDEBUG
564   fprintf(stdout,
565           "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
566 #endif
567 }
568 
main(int argc,char ** argv)569 int main(int argc, char** argv) {
570   ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
571   SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) +
572                   " [OPTIONS]...");
573   ParseCommandLineFlags(&argc, &argv, true);
574 
575   PrintWarnings();
576 
577   ROCKSDB_NAMESPACE::Options options;
578 
579   std::unique_ptr<ROCKSDB_NAMESPACE::MemTableRepFactory> factory;
580   if (FLAGS_memtablerep == "skiplist") {
581     factory.reset(new ROCKSDB_NAMESPACE::SkipListFactory);
582 #ifndef ROCKSDB_LITE
583   } else if (FLAGS_memtablerep == "vector") {
584     factory.reset(new ROCKSDB_NAMESPACE::VectorRepFactory);
585   } else if (FLAGS_memtablerep == "hashskiplist" ||
586              FLAGS_memtablerep == "prefix_hash") {
587     factory.reset(ROCKSDB_NAMESPACE::NewHashSkipListRepFactory(
588         FLAGS_bucket_count, FLAGS_hashskiplist_height,
589         FLAGS_hashskiplist_branching_factor));
590     options.prefix_extractor.reset(
591         ROCKSDB_NAMESPACE::NewFixedPrefixTransform(FLAGS_prefix_length));
592   } else if (FLAGS_memtablerep == "hashlinklist" ||
593              FLAGS_memtablerep == "hash_linkedlist") {
594     factory.reset(ROCKSDB_NAMESPACE::NewHashLinkListRepFactory(
595         FLAGS_bucket_count, FLAGS_huge_page_tlb_size,
596         FLAGS_bucket_entries_logging_threshold,
597         FLAGS_if_log_bucket_dist_when_flash, FLAGS_threshold_use_skiplist));
598     options.prefix_extractor.reset(
599         ROCKSDB_NAMESPACE::NewFixedPrefixTransform(FLAGS_prefix_length));
600 #endif  // ROCKSDB_LITE
601   } else {
602     ROCKSDB_NAMESPACE::ConfigOptions config_options;
603     config_options.ignore_unsupported_options = false;
604 
605     ROCKSDB_NAMESPACE::Status s =
606         ROCKSDB_NAMESPACE::MemTableRepFactory::CreateFromString(
607             config_options, FLAGS_memtablerep, &factory);
608     if (!s.ok()) {
609       fprintf(stdout, "Unknown memtablerep: %s\n", s.ToString().c_str());
610       exit(1);
611     }
612   }
613 
614   ROCKSDB_NAMESPACE::InternalKeyComparator internal_key_comp(
615       ROCKSDB_NAMESPACE::BytewiseComparator());
616   ROCKSDB_NAMESPACE::MemTable::KeyComparator key_comp(internal_key_comp);
617   ROCKSDB_NAMESPACE::Arena arena;
618   ROCKSDB_NAMESPACE::WriteBufferManager wb(FLAGS_write_buffer_size);
619   uint64_t sequence;
620   auto createMemtableRep = [&] {
621     sequence = 0;
622     return factory->CreateMemTableRep(key_comp, &arena,
623                                       options.prefix_extractor.get(),
624                                       options.info_log.get());
625   };
626   std::unique_ptr<ROCKSDB_NAMESPACE::MemTableRep> memtablerep;
627   ROCKSDB_NAMESPACE::Random64 rng(FLAGS_seed);
628   const char* benchmarks = FLAGS_benchmarks.c_str();
629   while (benchmarks != nullptr) {
630     std::unique_ptr<ROCKSDB_NAMESPACE::KeyGenerator> key_gen;
631     const char* sep = strchr(benchmarks, ',');
632     ROCKSDB_NAMESPACE::Slice name;
633     if (sep == nullptr) {
634       name = benchmarks;
635       benchmarks = nullptr;
636     } else {
637       name = ROCKSDB_NAMESPACE::Slice(benchmarks, sep - benchmarks);
638       benchmarks = sep + 1;
639     }
640     std::unique_ptr<ROCKSDB_NAMESPACE::Benchmark> benchmark;
641     if (name == ROCKSDB_NAMESPACE::Slice("fillseq")) {
642       memtablerep.reset(createMemtableRep());
643       key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
644           &rng, ROCKSDB_NAMESPACE::SEQUENTIAL, FLAGS_num_operations));
645       benchmark.reset(new ROCKSDB_NAMESPACE::FillBenchmark(
646           memtablerep.get(), key_gen.get(), &sequence));
647     } else if (name == ROCKSDB_NAMESPACE::Slice("fillrandom")) {
648       memtablerep.reset(createMemtableRep());
649       key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
650           &rng, ROCKSDB_NAMESPACE::UNIQUE_RANDOM, FLAGS_num_operations));
651       benchmark.reset(new ROCKSDB_NAMESPACE::FillBenchmark(
652           memtablerep.get(), key_gen.get(), &sequence));
653     } else if (name == ROCKSDB_NAMESPACE::Slice("readrandom")) {
654       key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
655           &rng, ROCKSDB_NAMESPACE::RANDOM, FLAGS_num_operations));
656       benchmark.reset(new ROCKSDB_NAMESPACE::ReadBenchmark(
657           memtablerep.get(), key_gen.get(), &sequence));
658     } else if (name == ROCKSDB_NAMESPACE::Slice("readseq")) {
659       key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
660           &rng, ROCKSDB_NAMESPACE::SEQUENTIAL, FLAGS_num_operations));
661       benchmark.reset(new ROCKSDB_NAMESPACE::SeqReadBenchmark(memtablerep.get(),
662                                                               &sequence));
663     } else if (name == ROCKSDB_NAMESPACE::Slice("readwrite")) {
664       memtablerep.reset(createMemtableRep());
665       key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
666           &rng, ROCKSDB_NAMESPACE::RANDOM, FLAGS_num_operations));
667       benchmark.reset(new ROCKSDB_NAMESPACE::ReadWriteBenchmark<
668                       ROCKSDB_NAMESPACE::ConcurrentReadBenchmarkThread>(
669           memtablerep.get(), key_gen.get(), &sequence));
670     } else if (name == ROCKSDB_NAMESPACE::Slice("seqreadwrite")) {
671       memtablerep.reset(createMemtableRep());
672       key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
673           &rng, ROCKSDB_NAMESPACE::RANDOM, FLAGS_num_operations));
674       benchmark.reset(new ROCKSDB_NAMESPACE::ReadWriteBenchmark<
675                       ROCKSDB_NAMESPACE::SeqConcurrentReadBenchmarkThread>(
676           memtablerep.get(), key_gen.get(), &sequence));
677     } else {
678       std::cout << "WARNING: skipping unknown benchmark '" << name.ToString()
679                 << std::endl;
680       continue;
681     }
682     std::cout << "Running " << name.ToString() << std::endl;
683     benchmark->Run();
684   }
685 
686   return 0;
687 }
688 
689 #endif  // GFLAGS
690