1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5
6 #ifndef GFLAGS
7 #include <cstdio>
main()8 int main() {
9 fprintf(stderr, "Please install gflags to run rocksdb tools\n");
10 return 1;
11 }
12 #else
13
14 #include <stdio.h>
15 #include <sys/types.h>
16 #include <cinttypes>
17
18 #include "port/port.h"
19 #include "rocksdb/cache.h"
20 #include "rocksdb/db.h"
21 #include "rocksdb/env.h"
22 #include "util/gflags_compat.h"
23 #include "util/mutexlock.h"
24 #include "util/random.h"
25
26 using GFLAGS_NAMESPACE::ParseCommandLineFlags;
27
28 static const uint32_t KB = 1024;
29
30 DEFINE_int32(threads, 16, "Number of concurrent threads to run.");
31 DEFINE_int64(cache_size, 8 * KB * KB,
32 "Number of bytes to use as a cache of uncompressed data.");
33 DEFINE_int32(num_shard_bits, 4, "shard_bits.");
34
35 DEFINE_int64(max_key, 1 * KB * KB * KB, "Max number of key to place in cache");
36 DEFINE_uint64(ops_per_thread, 1200000, "Number of operations per thread.");
37
38 DEFINE_bool(populate_cache, false, "Populate cache before operations");
39 DEFINE_int32(insert_percent, 40,
40 "Ratio of insert to total workload (expressed as a percentage)");
41 DEFINE_int32(lookup_percent, 50,
42 "Ratio of lookup to total workload (expressed as a percentage)");
43 DEFINE_int32(erase_percent, 10,
44 "Ratio of erase to total workload (expressed as a percentage)");
45
46 DEFINE_bool(use_clock_cache, false, "");
47
48 namespace rocksdb {
49
50 class CacheBench;
51 namespace {
deleter(const Slice &,void * value)52 void deleter(const Slice& /*key*/, void* value) {
53 delete reinterpret_cast<char *>(value);
54 }
55
56 // State shared by all concurrent executions of the same benchmark.
57 class SharedState {
58 public:
SharedState(CacheBench * cache_bench)59 explicit SharedState(CacheBench* cache_bench)
60 : cv_(&mu_),
61 num_threads_(FLAGS_threads),
62 num_initialized_(0),
63 start_(false),
64 num_done_(0),
65 cache_bench_(cache_bench) {
66 }
67
~SharedState()68 ~SharedState() {}
69
GetMutex()70 port::Mutex* GetMutex() {
71 return &mu_;
72 }
73
GetCondVar()74 port::CondVar* GetCondVar() {
75 return &cv_;
76 }
77
GetCacheBench() const78 CacheBench* GetCacheBench() const {
79 return cache_bench_;
80 }
81
IncInitialized()82 void IncInitialized() {
83 num_initialized_++;
84 }
85
IncDone()86 void IncDone() {
87 num_done_++;
88 }
89
AllInitialized() const90 bool AllInitialized() const {
91 return num_initialized_ >= num_threads_;
92 }
93
AllDone() const94 bool AllDone() const {
95 return num_done_ >= num_threads_;
96 }
97
SetStart()98 void SetStart() {
99 start_ = true;
100 }
101
Started() const102 bool Started() const {
103 return start_;
104 }
105
106 private:
107 port::Mutex mu_;
108 port::CondVar cv_;
109
110 const uint64_t num_threads_;
111 uint64_t num_initialized_;
112 bool start_;
113 uint64_t num_done_;
114
115 CacheBench* cache_bench_;
116 };
117
118 // Per-thread state for concurrent executions of the same benchmark.
119 struct ThreadState {
120 uint32_t tid;
121 Random rnd;
122 SharedState* shared;
123
ThreadStaterocksdb::__anonf8ebd6340111::ThreadState124 ThreadState(uint32_t index, SharedState* _shared)
125 : tid(index), rnd(1000 + index), shared(_shared) {}
126 };
127 } // namespace
128
129 class CacheBench {
130 public:
CacheBench()131 CacheBench() : num_threads_(FLAGS_threads) {
132 if (FLAGS_use_clock_cache) {
133 cache_ = NewClockCache(FLAGS_cache_size, FLAGS_num_shard_bits);
134 if (!cache_) {
135 fprintf(stderr, "Clock cache not supported.\n");
136 exit(1);
137 }
138 } else {
139 cache_ = NewLRUCache(FLAGS_cache_size, FLAGS_num_shard_bits);
140 }
141 }
142
~CacheBench()143 ~CacheBench() {}
144
PopulateCache()145 void PopulateCache() {
146 Random rnd(1);
147 for (int64_t i = 0; i < FLAGS_cache_size; i++) {
148 uint64_t rand_key = rnd.Next() % FLAGS_max_key;
149 // Cast uint64* to be char*, data would be copied to cache
150 Slice key(reinterpret_cast<char*>(&rand_key), 8);
151 // do insert
152 cache_->Insert(key, new char[10], 1, &deleter);
153 }
154 }
155
Run()156 bool Run() {
157 rocksdb::Env* env = rocksdb::Env::Default();
158
159 PrintEnv();
160 SharedState shared(this);
161 std::vector<ThreadState*> threads(num_threads_);
162 for (uint32_t i = 0; i < num_threads_; i++) {
163 threads[i] = new ThreadState(i, &shared);
164 env->StartThread(ThreadBody, threads[i]);
165 }
166 {
167 MutexLock l(shared.GetMutex());
168 while (!shared.AllInitialized()) {
169 shared.GetCondVar()->Wait();
170 }
171 // Record start time
172 uint64_t start_time = env->NowMicros();
173
174 // Start all threads
175 shared.SetStart();
176 shared.GetCondVar()->SignalAll();
177
178 // Wait threads to complete
179 while (!shared.AllDone()) {
180 shared.GetCondVar()->Wait();
181 }
182
183 // Record end time
184 uint64_t end_time = env->NowMicros();
185 double elapsed = static_cast<double>(end_time - start_time) * 1e-6;
186 uint32_t qps = static_cast<uint32_t>(
187 static_cast<double>(FLAGS_threads * FLAGS_ops_per_thread) / elapsed);
188 fprintf(stdout, "Complete in %.3f s; QPS = %u\n", elapsed, qps);
189 }
190 return true;
191 }
192
193 private:
194 std::shared_ptr<Cache> cache_;
195 uint32_t num_threads_;
196
ThreadBody(void * v)197 static void ThreadBody(void* v) {
198 ThreadState* thread = reinterpret_cast<ThreadState*>(v);
199 SharedState* shared = thread->shared;
200
201 {
202 MutexLock l(shared->GetMutex());
203 shared->IncInitialized();
204 if (shared->AllInitialized()) {
205 shared->GetCondVar()->SignalAll();
206 }
207 while (!shared->Started()) {
208 shared->GetCondVar()->Wait();
209 }
210 }
211 thread->shared->GetCacheBench()->OperateCache(thread);
212
213 {
214 MutexLock l(shared->GetMutex());
215 shared->IncDone();
216 if (shared->AllDone()) {
217 shared->GetCondVar()->SignalAll();
218 }
219 }
220 }
221
OperateCache(ThreadState * thread)222 void OperateCache(ThreadState* thread) {
223 for (uint64_t i = 0; i < FLAGS_ops_per_thread; i++) {
224 uint64_t rand_key = thread->rnd.Next() % FLAGS_max_key;
225 // Cast uint64* to be char*, data would be copied to cache
226 Slice key(reinterpret_cast<char*>(&rand_key), 8);
227 int32_t prob_op = thread->rnd.Uniform(100);
228 if (prob_op >= 0 && prob_op < FLAGS_insert_percent) {
229 // do insert
230 cache_->Insert(key, new char[10], 1, &deleter);
231 } else if (prob_op -= FLAGS_insert_percent &&
232 prob_op < FLAGS_lookup_percent) {
233 // do lookup
234 auto handle = cache_->Lookup(key);
235 if (handle) {
236 cache_->Release(handle);
237 }
238 } else if (prob_op -= FLAGS_lookup_percent &&
239 prob_op < FLAGS_erase_percent) {
240 // do erase
241 cache_->Erase(key);
242 }
243 }
244 }
245
PrintEnv() const246 void PrintEnv() const {
247 printf("RocksDB version : %d.%d\n", kMajorVersion, kMinorVersion);
248 printf("Number of threads : %d\n", FLAGS_threads);
249 printf("Ops per thread : %" PRIu64 "\n", FLAGS_ops_per_thread);
250 printf("Cache size : %" PRIu64 "\n", FLAGS_cache_size);
251 printf("Num shard bits : %d\n", FLAGS_num_shard_bits);
252 printf("Max key : %" PRIu64 "\n", FLAGS_max_key);
253 printf("Populate cache : %d\n", FLAGS_populate_cache);
254 printf("Insert percentage : %d%%\n", FLAGS_insert_percent);
255 printf("Lookup percentage : %d%%\n", FLAGS_lookup_percent);
256 printf("Erase percentage : %d%%\n", FLAGS_erase_percent);
257 printf("----------------------------\n");
258 }
259 };
260 } // namespace rocksdb
261
main(int argc,char ** argv)262 int main(int argc, char** argv) {
263 ParseCommandLineFlags(&argc, &argv, true);
264
265 if (FLAGS_threads <= 0) {
266 fprintf(stderr, "threads number <= 0\n");
267 exit(1);
268 }
269
270 rocksdb::CacheBench bench;
271 if (FLAGS_populate_cache) {
272 bench.PopulateCache();
273 }
274 if (bench.Run()) {
275 return 0;
276 } else {
277 return 1;
278 }
279 }
280
281 #endif // GFLAGS
282