1 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. See the AUTHORS file for names of contributors.
4
5 #include "leveldb/db.h"
6
7 #include <atomic>
8 #include <cinttypes>
9 #include <string>
10
11 #include "gtest/gtest.h"
12 #include "benchmark/benchmark.h"
13 #include "db/db_impl.h"
14 #include "db/filename.h"
15 #include "db/version_set.h"
16 #include "db/write_batch_internal.h"
17 #include "leveldb/cache.h"
18 #include "leveldb/env.h"
19 #include "leveldb/filter_policy.h"
20 #include "leveldb/table.h"
21 #include "port/port.h"
22 #include "port/thread_annotations.h"
23 #include "util/hash.h"
24 #include "util/logging.h"
25 #include "util/mutexlock.h"
26 #include "util/testutil.h"
27
28 namespace leveldb {
29
RandomString(Random * rnd,int len)30 static std::string RandomString(Random* rnd, int len) {
31 std::string r;
32 test::RandomString(rnd, len, &r);
33 return r;
34 }
35
RandomKey(Random * rnd)36 static std::string RandomKey(Random* rnd) {
37 int len =
38 (rnd->OneIn(3) ? 1 // Short sometimes to encourage collisions
39 : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
40 return test::RandomKey(rnd, len);
41 }
42
43 namespace {
44 class AtomicCounter {
45 public:
AtomicCounter()46 AtomicCounter() : count_(0) {}
Increment()47 void Increment() { IncrementBy(1); }
IncrementBy(int count)48 void IncrementBy(int count) LOCKS_EXCLUDED(mu_) {
49 MutexLock l(&mu_);
50 count_ += count;
51 }
Read()52 int Read() LOCKS_EXCLUDED(mu_) {
53 MutexLock l(&mu_);
54 return count_;
55 }
Reset()56 void Reset() LOCKS_EXCLUDED(mu_) {
57 MutexLock l(&mu_);
58 count_ = 0;
59 }
60
61 private:
62 port::Mutex mu_;
63 int count_ GUARDED_BY(mu_);
64 };
65
DelayMilliseconds(int millis)66 void DelayMilliseconds(int millis) {
67 Env::Default()->SleepForMicroseconds(millis * 1000);
68 }
69 } // namespace
70
71 // Test Env to override default Env behavior for testing.
72 class TestEnv : public EnvWrapper {
73 public:
TestEnv(Env * base)74 explicit TestEnv(Env* base) : EnvWrapper(base), ignore_dot_files_(false) {}
75
SetIgnoreDotFiles(bool ignored)76 void SetIgnoreDotFiles(bool ignored) { ignore_dot_files_ = ignored; }
77
GetChildren(const std::string & dir,std::vector<std::string> * result)78 Status GetChildren(const std::string& dir,
79 std::vector<std::string>* result) override {
80 Status s = target()->GetChildren(dir, result);
81 if (!s.ok() || !ignore_dot_files_) {
82 return s;
83 }
84
85 std::vector<std::string>::iterator it = result->begin();
86 while (it != result->end()) {
87 if ((*it == ".") || (*it == "..")) {
88 it = result->erase(it);
89 } else {
90 ++it;
91 }
92 }
93
94 return s;
95 }
96
97 private:
98 bool ignore_dot_files_;
99 };
100
101 // Special Env used to delay background operations.
102 class SpecialEnv : public EnvWrapper {
103 public:
104 // sstable/log Sync() calls are blocked while this pointer is non-null.
105 std::atomic<bool> delay_data_sync_;
106
107 // sstable/log Sync() calls return an error.
108 std::atomic<bool> data_sync_error_;
109
110 // Simulate no-space errors while this pointer is non-null.
111 std::atomic<bool> no_space_;
112
113 // Simulate non-writable file system while this pointer is non-null.
114 std::atomic<bool> non_writable_;
115
116 // Force sync of manifest files to fail while this pointer is non-null.
117 std::atomic<bool> manifest_sync_error_;
118
119 // Force write to manifest files to fail while this pointer is non-null.
120 std::atomic<bool> manifest_write_error_;
121
122 bool count_random_reads_;
123 AtomicCounter random_read_counter_;
124
SpecialEnv(Env * base)125 explicit SpecialEnv(Env* base)
126 : EnvWrapper(base),
127 delay_data_sync_(false),
128 data_sync_error_(false),
129 no_space_(false),
130 non_writable_(false),
131 manifest_sync_error_(false),
132 manifest_write_error_(false),
133 count_random_reads_(false) {}
134
NewWritableFile(const std::string & f,WritableFile ** r)135 Status NewWritableFile(const std::string& f, WritableFile** r) {
136 class DataFile : public WritableFile {
137 private:
138 SpecialEnv* const env_;
139 WritableFile* const base_;
140
141 public:
142 DataFile(SpecialEnv* env, WritableFile* base) : env_(env), base_(base) {}
143 ~DataFile() { delete base_; }
144 Status Append(const Slice& data) {
145 if (env_->no_space_.load(std::memory_order_acquire)) {
146 // Drop writes on the floor
147 return Status::OK();
148 } else {
149 return base_->Append(data);
150 }
151 }
152 Status Close() { return base_->Close(); }
153 Status Flush() { return base_->Flush(); }
154 Status Sync() {
155 if (env_->data_sync_error_.load(std::memory_order_acquire)) {
156 return Status::IOError("simulated data sync error");
157 }
158 while (env_->delay_data_sync_.load(std::memory_order_acquire)) {
159 DelayMilliseconds(100);
160 }
161 return base_->Sync();
162 }
163 };
164 class ManifestFile : public WritableFile {
165 private:
166 SpecialEnv* env_;
167 WritableFile* base_;
168
169 public:
170 ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) {}
171 ~ManifestFile() { delete base_; }
172 Status Append(const Slice& data) {
173 if (env_->manifest_write_error_.load(std::memory_order_acquire)) {
174 return Status::IOError("simulated writer error");
175 } else {
176 return base_->Append(data);
177 }
178 }
179 Status Close() { return base_->Close(); }
180 Status Flush() { return base_->Flush(); }
181 Status Sync() {
182 if (env_->manifest_sync_error_.load(std::memory_order_acquire)) {
183 return Status::IOError("simulated sync error");
184 } else {
185 return base_->Sync();
186 }
187 }
188 };
189
190 if (non_writable_.load(std::memory_order_acquire)) {
191 return Status::IOError("simulated write error");
192 }
193
194 Status s = target()->NewWritableFile(f, r);
195 if (s.ok()) {
196 if (strstr(f.c_str(), ".ldb") != nullptr ||
197 strstr(f.c_str(), ".log") != nullptr) {
198 *r = new DataFile(this, *r);
199 } else if (strstr(f.c_str(), "MANIFEST") != nullptr) {
200 *r = new ManifestFile(this, *r);
201 }
202 }
203 return s;
204 }
205
NewRandomAccessFile(const std::string & f,RandomAccessFile ** r)206 Status NewRandomAccessFile(const std::string& f, RandomAccessFile** r) {
207 class CountingFile : public RandomAccessFile {
208 private:
209 RandomAccessFile* target_;
210 AtomicCounter* counter_;
211
212 public:
213 CountingFile(RandomAccessFile* target, AtomicCounter* counter)
214 : target_(target), counter_(counter) {}
215 ~CountingFile() override { delete target_; }
216 Status Read(uint64_t offset, size_t n, Slice* result,
217 char* scratch) const override {
218 counter_->Increment();
219 return target_->Read(offset, n, result, scratch);
220 }
221 };
222
223 Status s = target()->NewRandomAccessFile(f, r);
224 if (s.ok() && count_random_reads_) {
225 *r = new CountingFile(*r, &random_read_counter_);
226 }
227 return s;
228 }
229 };
230
231 class DBTest : public testing::Test {
232 public:
233 std::string dbname_;
234 SpecialEnv* env_;
235 DB* db_;
236
237 Options last_options_;
238
DBTest()239 DBTest() : env_(new SpecialEnv(Env::Default())), option_config_(kDefault) {
240 filter_policy_ = NewBloomFilterPolicy(10);
241 dbname_ = testing::TempDir() + "db_test";
242 DestroyDB(dbname_, Options());
243 db_ = nullptr;
244 Reopen();
245 }
246
~DBTest()247 ~DBTest() {
248 delete db_;
249 DestroyDB(dbname_, Options());
250 delete env_;
251 delete filter_policy_;
252 }
253
254 // Switch to a fresh database with the next option configuration to
255 // test. Return false if there are no more configurations to test.
ChangeOptions()256 bool ChangeOptions() {
257 option_config_++;
258 if (option_config_ >= kEnd) {
259 return false;
260 } else {
261 DestroyAndReopen();
262 return true;
263 }
264 }
265
266 // Return the current option configuration.
CurrentOptions()267 Options CurrentOptions() {
268 Options options;
269 options.reuse_logs = false;
270 switch (option_config_) {
271 case kReuse:
272 options.reuse_logs = true;
273 break;
274 case kFilter:
275 options.filter_policy = filter_policy_;
276 break;
277 case kUncompressed:
278 options.compression = kNoCompression;
279 break;
280 default:
281 break;
282 }
283 return options;
284 }
285
dbfull()286 DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_); }
287
Reopen(Options * options=nullptr)288 void Reopen(Options* options = nullptr) {
289 ASSERT_LEVELDB_OK(TryReopen(options));
290 }
291
Close()292 void Close() {
293 delete db_;
294 db_ = nullptr;
295 }
296
DestroyAndReopen(Options * options=nullptr)297 void DestroyAndReopen(Options* options = nullptr) {
298 delete db_;
299 db_ = nullptr;
300 DestroyDB(dbname_, Options());
301 ASSERT_LEVELDB_OK(TryReopen(options));
302 }
303
TryReopen(Options * options)304 Status TryReopen(Options* options) {
305 delete db_;
306 db_ = nullptr;
307 Options opts;
308 if (options != nullptr) {
309 opts = *options;
310 } else {
311 opts = CurrentOptions();
312 opts.create_if_missing = true;
313 }
314 last_options_ = opts;
315
316 return DB::Open(opts, dbname_, &db_);
317 }
318
Put(const std::string & k,const std::string & v)319 Status Put(const std::string& k, const std::string& v) {
320 return db_->Put(WriteOptions(), k, v);
321 }
322
Delete(const std::string & k)323 Status Delete(const std::string& k) { return db_->Delete(WriteOptions(), k); }
324
Get(const std::string & k,const Snapshot * snapshot=nullptr)325 std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
326 ReadOptions options;
327 options.snapshot = snapshot;
328 std::string result;
329 Status s = db_->Get(options, k, &result);
330 if (s.IsNotFound()) {
331 result = "NOT_FOUND";
332 } else if (!s.ok()) {
333 result = s.ToString();
334 }
335 return result;
336 }
337
338 // Return a string that contains all key,value pairs in order,
339 // formatted like "(k1->v1)(k2->v2)".
Contents()340 std::string Contents() {
341 std::vector<std::string> forward;
342 std::string result;
343 Iterator* iter = db_->NewIterator(ReadOptions());
344 for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
345 std::string s = IterStatus(iter);
346 result.push_back('(');
347 result.append(s);
348 result.push_back(')');
349 forward.push_back(s);
350 }
351
352 // Check reverse iteration results are the reverse of forward results
353 size_t matched = 0;
354 for (iter->SeekToLast(); iter->Valid(); iter->Prev()) {
355 EXPECT_LT(matched, forward.size());
356 EXPECT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]);
357 matched++;
358 }
359 EXPECT_EQ(matched, forward.size());
360
361 delete iter;
362 return result;
363 }
364
AllEntriesFor(const Slice & user_key)365 std::string AllEntriesFor(const Slice& user_key) {
366 Iterator* iter = dbfull()->TEST_NewInternalIterator();
367 InternalKey target(user_key, kMaxSequenceNumber, kTypeValue);
368 iter->Seek(target.Encode());
369 std::string result;
370 if (!iter->status().ok()) {
371 result = iter->status().ToString();
372 } else {
373 result = "[ ";
374 bool first = true;
375 while (iter->Valid()) {
376 ParsedInternalKey ikey;
377 if (!ParseInternalKey(iter->key(), &ikey)) {
378 result += "CORRUPTED";
379 } else {
380 if (last_options_.comparator->Compare(ikey.user_key, user_key) != 0) {
381 break;
382 }
383 if (!first) {
384 result += ", ";
385 }
386 first = false;
387 switch (ikey.type) {
388 case kTypeValue:
389 result += iter->value().ToString();
390 break;
391 case kTypeDeletion:
392 result += "DEL";
393 break;
394 }
395 }
396 iter->Next();
397 }
398 if (!first) {
399 result += " ";
400 }
401 result += "]";
402 }
403 delete iter;
404 return result;
405 }
406
NumTableFilesAtLevel(int level)407 int NumTableFilesAtLevel(int level) {
408 std::string property;
409 EXPECT_TRUE(db_->GetProperty(
410 "leveldb.num-files-at-level" + NumberToString(level), &property));
411 return std::stoi(property);
412 }
413
TotalTableFiles()414 int TotalTableFiles() {
415 int result = 0;
416 for (int level = 0; level < config::kNumLevels; level++) {
417 result += NumTableFilesAtLevel(level);
418 }
419 return result;
420 }
421
422 // Return spread of files per level
FilesPerLevel()423 std::string FilesPerLevel() {
424 std::string result;
425 int last_non_zero_offset = 0;
426 for (int level = 0; level < config::kNumLevels; level++) {
427 int f = NumTableFilesAtLevel(level);
428 char buf[100];
429 std::snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
430 result += buf;
431 if (f > 0) {
432 last_non_zero_offset = result.size();
433 }
434 }
435 result.resize(last_non_zero_offset);
436 return result;
437 }
438
CountFiles()439 int CountFiles() {
440 std::vector<std::string> files;
441 env_->GetChildren(dbname_, &files);
442 return static_cast<int>(files.size());
443 }
444
Size(const Slice & start,const Slice & limit)445 uint64_t Size(const Slice& start, const Slice& limit) {
446 Range r(start, limit);
447 uint64_t size;
448 db_->GetApproximateSizes(&r, 1, &size);
449 return size;
450 }
451
Compact(const Slice & start,const Slice & limit)452 void Compact(const Slice& start, const Slice& limit) {
453 db_->CompactRange(&start, &limit);
454 }
455
456 // Do n memtable compactions, each of which produces an sstable
457 // covering the range [small_key,large_key].
MakeTables(int n,const std::string & small_key,const std::string & large_key)458 void MakeTables(int n, const std::string& small_key,
459 const std::string& large_key) {
460 for (int i = 0; i < n; i++) {
461 Put(small_key, "begin");
462 Put(large_key, "end");
463 dbfull()->TEST_CompactMemTable();
464 }
465 }
466
467 // Prevent pushing of new sstables into deeper levels by adding
468 // tables that cover a specified range to all levels.
FillLevels(const std::string & smallest,const std::string & largest)469 void FillLevels(const std::string& smallest, const std::string& largest) {
470 MakeTables(config::kNumLevels, smallest, largest);
471 }
472
DumpFileCounts(const char * label)473 void DumpFileCounts(const char* label) {
474 std::fprintf(stderr, "---\n%s:\n", label);
475 std::fprintf(
476 stderr, "maxoverlap: %lld\n",
477 static_cast<long long>(dbfull()->TEST_MaxNextLevelOverlappingBytes()));
478 for (int level = 0; level < config::kNumLevels; level++) {
479 int num = NumTableFilesAtLevel(level);
480 if (num > 0) {
481 std::fprintf(stderr, " level %3d : %d files\n", level, num);
482 }
483 }
484 }
485
DumpSSTableList()486 std::string DumpSSTableList() {
487 std::string property;
488 db_->GetProperty("leveldb.sstables", &property);
489 return property;
490 }
491
IterStatus(Iterator * iter)492 std::string IterStatus(Iterator* iter) {
493 std::string result;
494 if (iter->Valid()) {
495 result = iter->key().ToString() + "->" + iter->value().ToString();
496 } else {
497 result = "(invalid)";
498 }
499 return result;
500 }
501
DeleteAnSSTFile()502 bool DeleteAnSSTFile() {
503 std::vector<std::string> filenames;
504 EXPECT_LEVELDB_OK(env_->GetChildren(dbname_, &filenames));
505 uint64_t number;
506 FileType type;
507 for (size_t i = 0; i < filenames.size(); i++) {
508 if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) {
509 EXPECT_LEVELDB_OK(env_->RemoveFile(TableFileName(dbname_, number)));
510 return true;
511 }
512 }
513 return false;
514 }
515
516 // Returns number of files renamed.
RenameLDBToSST()517 int RenameLDBToSST() {
518 std::vector<std::string> filenames;
519 EXPECT_LEVELDB_OK(env_->GetChildren(dbname_, &filenames));
520 uint64_t number;
521 FileType type;
522 int files_renamed = 0;
523 for (size_t i = 0; i < filenames.size(); i++) {
524 if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) {
525 const std::string from = TableFileName(dbname_, number);
526 const std::string to = SSTTableFileName(dbname_, number);
527 EXPECT_LEVELDB_OK(env_->RenameFile(from, to));
528 files_renamed++;
529 }
530 }
531 return files_renamed;
532 }
533
534 private:
535 // Sequence of option configurations to try
536 enum OptionConfig { kDefault, kReuse, kFilter, kUncompressed, kEnd };
537
538 const FilterPolicy* filter_policy_;
539 int option_config_;
540 };
541
TEST_F(DBTest,Empty)542 TEST_F(DBTest, Empty) {
543 do {
544 ASSERT_TRUE(db_ != nullptr);
545 ASSERT_EQ("NOT_FOUND", Get("foo"));
546 } while (ChangeOptions());
547 }
548
TEST_F(DBTest,EmptyKey)549 TEST_F(DBTest, EmptyKey) {
550 do {
551 ASSERT_LEVELDB_OK(Put("", "v1"));
552 ASSERT_EQ("v1", Get(""));
553 ASSERT_LEVELDB_OK(Put("", "v2"));
554 ASSERT_EQ("v2", Get(""));
555 } while (ChangeOptions());
556 }
557
TEST_F(DBTest,EmptyValue)558 TEST_F(DBTest, EmptyValue) {
559 do {
560 ASSERT_LEVELDB_OK(Put("key", "v1"));
561 ASSERT_EQ("v1", Get("key"));
562 ASSERT_LEVELDB_OK(Put("key", ""));
563 ASSERT_EQ("", Get("key"));
564 ASSERT_LEVELDB_OK(Put("key", "v2"));
565 ASSERT_EQ("v2", Get("key"));
566 } while (ChangeOptions());
567 }
568
TEST_F(DBTest,ReadWrite)569 TEST_F(DBTest, ReadWrite) {
570 do {
571 ASSERT_LEVELDB_OK(Put("foo", "v1"));
572 ASSERT_EQ("v1", Get("foo"));
573 ASSERT_LEVELDB_OK(Put("bar", "v2"));
574 ASSERT_LEVELDB_OK(Put("foo", "v3"));
575 ASSERT_EQ("v3", Get("foo"));
576 ASSERT_EQ("v2", Get("bar"));
577 } while (ChangeOptions());
578 }
579
TEST_F(DBTest,PutDeleteGet)580 TEST_F(DBTest, PutDeleteGet) {
581 do {
582 ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v1"));
583 ASSERT_EQ("v1", Get("foo"));
584 ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v2"));
585 ASSERT_EQ("v2", Get("foo"));
586 ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), "foo"));
587 ASSERT_EQ("NOT_FOUND", Get("foo"));
588 } while (ChangeOptions());
589 }
590
TEST_F(DBTest,GetFromImmutableLayer)591 TEST_F(DBTest, GetFromImmutableLayer) {
592 do {
593 Options options = CurrentOptions();
594 options.env = env_;
595 options.write_buffer_size = 100000; // Small write buffer
596 Reopen(&options);
597
598 ASSERT_LEVELDB_OK(Put("foo", "v1"));
599 ASSERT_EQ("v1", Get("foo"));
600
601 // Block sync calls.
602 env_->delay_data_sync_.store(true, std::memory_order_release);
603 Put("k1", std::string(100000, 'x')); // Fill memtable.
604 Put("k2", std::string(100000, 'y')); // Trigger compaction.
605 ASSERT_EQ("v1", Get("foo"));
606 // Release sync calls.
607 env_->delay_data_sync_.store(false, std::memory_order_release);
608 } while (ChangeOptions());
609 }
610
TEST_F(DBTest,GetFromVersions)611 TEST_F(DBTest, GetFromVersions) {
612 do {
613 ASSERT_LEVELDB_OK(Put("foo", "v1"));
614 dbfull()->TEST_CompactMemTable();
615 ASSERT_EQ("v1", Get("foo"));
616 } while (ChangeOptions());
617 }
618
TEST_F(DBTest,GetMemUsage)619 TEST_F(DBTest, GetMemUsage) {
620 do {
621 ASSERT_LEVELDB_OK(Put("foo", "v1"));
622 std::string val;
623 ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val));
624 int mem_usage = std::stoi(val);
625 ASSERT_GT(mem_usage, 0);
626 ASSERT_LT(mem_usage, 5 * 1024 * 1024);
627 } while (ChangeOptions());
628 }
629
TEST_F(DBTest,GetSnapshot)630 TEST_F(DBTest, GetSnapshot) {
631 do {
632 // Try with both a short key and a long key
633 for (int i = 0; i < 2; i++) {
634 std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
635 ASSERT_LEVELDB_OK(Put(key, "v1"));
636 const Snapshot* s1 = db_->GetSnapshot();
637 ASSERT_LEVELDB_OK(Put(key, "v2"));
638 ASSERT_EQ("v2", Get(key));
639 ASSERT_EQ("v1", Get(key, s1));
640 dbfull()->TEST_CompactMemTable();
641 ASSERT_EQ("v2", Get(key));
642 ASSERT_EQ("v1", Get(key, s1));
643 db_->ReleaseSnapshot(s1);
644 }
645 } while (ChangeOptions());
646 }
647
TEST_F(DBTest,GetIdenticalSnapshots)648 TEST_F(DBTest, GetIdenticalSnapshots) {
649 do {
650 // Try with both a short key and a long key
651 for (int i = 0; i < 2; i++) {
652 std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
653 ASSERT_LEVELDB_OK(Put(key, "v1"));
654 const Snapshot* s1 = db_->GetSnapshot();
655 const Snapshot* s2 = db_->GetSnapshot();
656 const Snapshot* s3 = db_->GetSnapshot();
657 ASSERT_LEVELDB_OK(Put(key, "v2"));
658 ASSERT_EQ("v2", Get(key));
659 ASSERT_EQ("v1", Get(key, s1));
660 ASSERT_EQ("v1", Get(key, s2));
661 ASSERT_EQ("v1", Get(key, s3));
662 db_->ReleaseSnapshot(s1);
663 dbfull()->TEST_CompactMemTable();
664 ASSERT_EQ("v2", Get(key));
665 ASSERT_EQ("v1", Get(key, s2));
666 db_->ReleaseSnapshot(s2);
667 ASSERT_EQ("v1", Get(key, s3));
668 db_->ReleaseSnapshot(s3);
669 }
670 } while (ChangeOptions());
671 }
672
TEST_F(DBTest,IterateOverEmptySnapshot)673 TEST_F(DBTest, IterateOverEmptySnapshot) {
674 do {
675 const Snapshot* snapshot = db_->GetSnapshot();
676 ReadOptions read_options;
677 read_options.snapshot = snapshot;
678 ASSERT_LEVELDB_OK(Put("foo", "v1"));
679 ASSERT_LEVELDB_OK(Put("foo", "v2"));
680
681 Iterator* iterator1 = db_->NewIterator(read_options);
682 iterator1->SeekToFirst();
683 ASSERT_TRUE(!iterator1->Valid());
684 delete iterator1;
685
686 dbfull()->TEST_CompactMemTable();
687
688 Iterator* iterator2 = db_->NewIterator(read_options);
689 iterator2->SeekToFirst();
690 ASSERT_TRUE(!iterator2->Valid());
691 delete iterator2;
692
693 db_->ReleaseSnapshot(snapshot);
694 } while (ChangeOptions());
695 }
696
TEST_F(DBTest,GetLevel0Ordering)697 TEST_F(DBTest, GetLevel0Ordering) {
698 do {
699 // Check that we process level-0 files in correct order. The code
700 // below generates two level-0 files where the earlier one comes
701 // before the later one in the level-0 file list since the earlier
702 // one has a smaller "smallest" key.
703 ASSERT_LEVELDB_OK(Put("bar", "b"));
704 ASSERT_LEVELDB_OK(Put("foo", "v1"));
705 dbfull()->TEST_CompactMemTable();
706 ASSERT_LEVELDB_OK(Put("foo", "v2"));
707 dbfull()->TEST_CompactMemTable();
708 ASSERT_EQ("v2", Get("foo"));
709 } while (ChangeOptions());
710 }
711
TEST_F(DBTest,GetOrderedByLevels)712 TEST_F(DBTest, GetOrderedByLevels) {
713 do {
714 ASSERT_LEVELDB_OK(Put("foo", "v1"));
715 Compact("a", "z");
716 ASSERT_EQ("v1", Get("foo"));
717 ASSERT_LEVELDB_OK(Put("foo", "v2"));
718 ASSERT_EQ("v2", Get("foo"));
719 dbfull()->TEST_CompactMemTable();
720 ASSERT_EQ("v2", Get("foo"));
721 } while (ChangeOptions());
722 }
723
TEST_F(DBTest,GetPicksCorrectFile)724 TEST_F(DBTest, GetPicksCorrectFile) {
725 do {
726 // Arrange to have multiple files in a non-level-0 level.
727 ASSERT_LEVELDB_OK(Put("a", "va"));
728 Compact("a", "b");
729 ASSERT_LEVELDB_OK(Put("x", "vx"));
730 Compact("x", "y");
731 ASSERT_LEVELDB_OK(Put("f", "vf"));
732 Compact("f", "g");
733 ASSERT_EQ("va", Get("a"));
734 ASSERT_EQ("vf", Get("f"));
735 ASSERT_EQ("vx", Get("x"));
736 } while (ChangeOptions());
737 }
738
TEST_F(DBTest,GetEncountersEmptyLevel)739 TEST_F(DBTest, GetEncountersEmptyLevel) {
740 do {
741 // Arrange for the following to happen:
742 // * sstable A in level 0
743 // * nothing in level 1
744 // * sstable B in level 2
745 // Then do enough Get() calls to arrange for an automatic compaction
746 // of sstable A. A bug would cause the compaction to be marked as
747 // occurring at level 1 (instead of the correct level 0).
748
749 // Step 1: First place sstables in levels 0 and 2
750 int compaction_count = 0;
751 while (NumTableFilesAtLevel(0) == 0 || NumTableFilesAtLevel(2) == 0) {
752 ASSERT_LE(compaction_count, 100) << "could not fill levels 0 and 2";
753 compaction_count++;
754 Put("a", "begin");
755 Put("z", "end");
756 dbfull()->TEST_CompactMemTable();
757 }
758
759 // Step 2: clear level 1 if necessary.
760 dbfull()->TEST_CompactRange(1, nullptr, nullptr);
761 ASSERT_EQ(NumTableFilesAtLevel(0), 1);
762 ASSERT_EQ(NumTableFilesAtLevel(1), 0);
763 ASSERT_EQ(NumTableFilesAtLevel(2), 1);
764
765 // Step 3: read a bunch of times
766 for (int i = 0; i < 1000; i++) {
767 ASSERT_EQ("NOT_FOUND", Get("missing"));
768 }
769
770 // Step 4: Wait for compaction to finish
771 DelayMilliseconds(1000);
772
773 ASSERT_EQ(NumTableFilesAtLevel(0), 0);
774 } while (ChangeOptions());
775 }
776
TEST_F(DBTest,IterEmpty)777 TEST_F(DBTest, IterEmpty) {
778 Iterator* iter = db_->NewIterator(ReadOptions());
779
780 iter->SeekToFirst();
781 ASSERT_EQ(IterStatus(iter), "(invalid)");
782
783 iter->SeekToLast();
784 ASSERT_EQ(IterStatus(iter), "(invalid)");
785
786 iter->Seek("foo");
787 ASSERT_EQ(IterStatus(iter), "(invalid)");
788
789 delete iter;
790 }
791
TEST_F(DBTest,IterSingle)792 TEST_F(DBTest, IterSingle) {
793 ASSERT_LEVELDB_OK(Put("a", "va"));
794 Iterator* iter = db_->NewIterator(ReadOptions());
795
796 iter->SeekToFirst();
797 ASSERT_EQ(IterStatus(iter), "a->va");
798 iter->Next();
799 ASSERT_EQ(IterStatus(iter), "(invalid)");
800 iter->SeekToFirst();
801 ASSERT_EQ(IterStatus(iter), "a->va");
802 iter->Prev();
803 ASSERT_EQ(IterStatus(iter), "(invalid)");
804
805 iter->SeekToLast();
806 ASSERT_EQ(IterStatus(iter), "a->va");
807 iter->Next();
808 ASSERT_EQ(IterStatus(iter), "(invalid)");
809 iter->SeekToLast();
810 ASSERT_EQ(IterStatus(iter), "a->va");
811 iter->Prev();
812 ASSERT_EQ(IterStatus(iter), "(invalid)");
813
814 iter->Seek("");
815 ASSERT_EQ(IterStatus(iter), "a->va");
816 iter->Next();
817 ASSERT_EQ(IterStatus(iter), "(invalid)");
818
819 iter->Seek("a");
820 ASSERT_EQ(IterStatus(iter), "a->va");
821 iter->Next();
822 ASSERT_EQ(IterStatus(iter), "(invalid)");
823
824 iter->Seek("b");
825 ASSERT_EQ(IterStatus(iter), "(invalid)");
826
827 delete iter;
828 }
829
TEST_F(DBTest,IterMulti)830 TEST_F(DBTest, IterMulti) {
831 ASSERT_LEVELDB_OK(Put("a", "va"));
832 ASSERT_LEVELDB_OK(Put("b", "vb"));
833 ASSERT_LEVELDB_OK(Put("c", "vc"));
834 Iterator* iter = db_->NewIterator(ReadOptions());
835
836 iter->SeekToFirst();
837 ASSERT_EQ(IterStatus(iter), "a->va");
838 iter->Next();
839 ASSERT_EQ(IterStatus(iter), "b->vb");
840 iter->Next();
841 ASSERT_EQ(IterStatus(iter), "c->vc");
842 iter->Next();
843 ASSERT_EQ(IterStatus(iter), "(invalid)");
844 iter->SeekToFirst();
845 ASSERT_EQ(IterStatus(iter), "a->va");
846 iter->Prev();
847 ASSERT_EQ(IterStatus(iter), "(invalid)");
848
849 iter->SeekToLast();
850 ASSERT_EQ(IterStatus(iter), "c->vc");
851 iter->Prev();
852 ASSERT_EQ(IterStatus(iter), "b->vb");
853 iter->Prev();
854 ASSERT_EQ(IterStatus(iter), "a->va");
855 iter->Prev();
856 ASSERT_EQ(IterStatus(iter), "(invalid)");
857 iter->SeekToLast();
858 ASSERT_EQ(IterStatus(iter), "c->vc");
859 iter->Next();
860 ASSERT_EQ(IterStatus(iter), "(invalid)");
861
862 iter->Seek("");
863 ASSERT_EQ(IterStatus(iter), "a->va");
864 iter->Seek("a");
865 ASSERT_EQ(IterStatus(iter), "a->va");
866 iter->Seek("ax");
867 ASSERT_EQ(IterStatus(iter), "b->vb");
868 iter->Seek("b");
869 ASSERT_EQ(IterStatus(iter), "b->vb");
870 iter->Seek("z");
871 ASSERT_EQ(IterStatus(iter), "(invalid)");
872
873 // Switch from reverse to forward
874 iter->SeekToLast();
875 iter->Prev();
876 iter->Prev();
877 iter->Next();
878 ASSERT_EQ(IterStatus(iter), "b->vb");
879
880 // Switch from forward to reverse
881 iter->SeekToFirst();
882 iter->Next();
883 iter->Next();
884 iter->Prev();
885 ASSERT_EQ(IterStatus(iter), "b->vb");
886
887 // Make sure iter stays at snapshot
888 ASSERT_LEVELDB_OK(Put("a", "va2"));
889 ASSERT_LEVELDB_OK(Put("a2", "va3"));
890 ASSERT_LEVELDB_OK(Put("b", "vb2"));
891 ASSERT_LEVELDB_OK(Put("c", "vc2"));
892 ASSERT_LEVELDB_OK(Delete("b"));
893 iter->SeekToFirst();
894 ASSERT_EQ(IterStatus(iter), "a->va");
895 iter->Next();
896 ASSERT_EQ(IterStatus(iter), "b->vb");
897 iter->Next();
898 ASSERT_EQ(IterStatus(iter), "c->vc");
899 iter->Next();
900 ASSERT_EQ(IterStatus(iter), "(invalid)");
901 iter->SeekToLast();
902 ASSERT_EQ(IterStatus(iter), "c->vc");
903 iter->Prev();
904 ASSERT_EQ(IterStatus(iter), "b->vb");
905 iter->Prev();
906 ASSERT_EQ(IterStatus(iter), "a->va");
907 iter->Prev();
908 ASSERT_EQ(IterStatus(iter), "(invalid)");
909
910 delete iter;
911 }
912
TEST_F(DBTest,IterSmallAndLargeMix)913 TEST_F(DBTest, IterSmallAndLargeMix) {
914 ASSERT_LEVELDB_OK(Put("a", "va"));
915 ASSERT_LEVELDB_OK(Put("b", std::string(100000, 'b')));
916 ASSERT_LEVELDB_OK(Put("c", "vc"));
917 ASSERT_LEVELDB_OK(Put("d", std::string(100000, 'd')));
918 ASSERT_LEVELDB_OK(Put("e", std::string(100000, 'e')));
919
920 Iterator* iter = db_->NewIterator(ReadOptions());
921
922 iter->SeekToFirst();
923 ASSERT_EQ(IterStatus(iter), "a->va");
924 iter->Next();
925 ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b'));
926 iter->Next();
927 ASSERT_EQ(IterStatus(iter), "c->vc");
928 iter->Next();
929 ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd'));
930 iter->Next();
931 ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e'));
932 iter->Next();
933 ASSERT_EQ(IterStatus(iter), "(invalid)");
934
935 iter->SeekToLast();
936 ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e'));
937 iter->Prev();
938 ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd'));
939 iter->Prev();
940 ASSERT_EQ(IterStatus(iter), "c->vc");
941 iter->Prev();
942 ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b'));
943 iter->Prev();
944 ASSERT_EQ(IterStatus(iter), "a->va");
945 iter->Prev();
946 ASSERT_EQ(IterStatus(iter), "(invalid)");
947
948 delete iter;
949 }
950
TEST_F(DBTest,IterMultiWithDelete)951 TEST_F(DBTest, IterMultiWithDelete) {
952 do {
953 ASSERT_LEVELDB_OK(Put("a", "va"));
954 ASSERT_LEVELDB_OK(Put("b", "vb"));
955 ASSERT_LEVELDB_OK(Put("c", "vc"));
956 ASSERT_LEVELDB_OK(Delete("b"));
957 ASSERT_EQ("NOT_FOUND", Get("b"));
958
959 Iterator* iter = db_->NewIterator(ReadOptions());
960 iter->Seek("c");
961 ASSERT_EQ(IterStatus(iter), "c->vc");
962 iter->Prev();
963 ASSERT_EQ(IterStatus(iter), "a->va");
964 delete iter;
965 } while (ChangeOptions());
966 }
967
TEST_F(DBTest,IterMultiWithDeleteAndCompaction)968 TEST_F(DBTest, IterMultiWithDeleteAndCompaction) {
969 do {
970 ASSERT_LEVELDB_OK(Put("b", "vb"));
971 ASSERT_LEVELDB_OK(Put("c", "vc"));
972 ASSERT_LEVELDB_OK(Put("a", "va"));
973 dbfull()->TEST_CompactMemTable();
974 ASSERT_LEVELDB_OK(Delete("b"));
975 ASSERT_EQ("NOT_FOUND", Get("b"));
976
977 Iterator* iter = db_->NewIterator(ReadOptions());
978 iter->Seek("c");
979 ASSERT_EQ(IterStatus(iter), "c->vc");
980 iter->Prev();
981 ASSERT_EQ(IterStatus(iter), "a->va");
982 iter->Seek("b");
983 ASSERT_EQ(IterStatus(iter), "c->vc");
984 delete iter;
985 } while (ChangeOptions());
986 }
987
TEST_F(DBTest,Recover)988 TEST_F(DBTest, Recover) {
989 do {
990 ASSERT_LEVELDB_OK(Put("foo", "v1"));
991 ASSERT_LEVELDB_OK(Put("baz", "v5"));
992
993 Reopen();
994 ASSERT_EQ("v1", Get("foo"));
995
996 ASSERT_EQ("v1", Get("foo"));
997 ASSERT_EQ("v5", Get("baz"));
998 ASSERT_LEVELDB_OK(Put("bar", "v2"));
999 ASSERT_LEVELDB_OK(Put("foo", "v3"));
1000
1001 Reopen();
1002 ASSERT_EQ("v3", Get("foo"));
1003 ASSERT_LEVELDB_OK(Put("foo", "v4"));
1004 ASSERT_EQ("v4", Get("foo"));
1005 ASSERT_EQ("v2", Get("bar"));
1006 ASSERT_EQ("v5", Get("baz"));
1007 } while (ChangeOptions());
1008 }
1009
TEST_F(DBTest,RecoveryWithEmptyLog)1010 TEST_F(DBTest, RecoveryWithEmptyLog) {
1011 do {
1012 ASSERT_LEVELDB_OK(Put("foo", "v1"));
1013 ASSERT_LEVELDB_OK(Put("foo", "v2"));
1014 Reopen();
1015 Reopen();
1016 ASSERT_LEVELDB_OK(Put("foo", "v3"));
1017 Reopen();
1018 ASSERT_EQ("v3", Get("foo"));
1019 } while (ChangeOptions());
1020 }
1021
1022 // Check that writes done during a memtable compaction are recovered
1023 // if the database is shutdown during the memtable compaction.
TEST_F(DBTest,RecoverDuringMemtableCompaction)1024 TEST_F(DBTest, RecoverDuringMemtableCompaction) {
1025 do {
1026 Options options = CurrentOptions();
1027 options.env = env_;
1028 options.write_buffer_size = 1000000;
1029 Reopen(&options);
1030
1031 // Trigger a long memtable compaction and reopen the database during it
1032 ASSERT_LEVELDB_OK(Put("foo", "v1")); // Goes to 1st log file
1033 ASSERT_LEVELDB_OK(
1034 Put("big1", std::string(10000000, 'x'))); // Fills memtable
1035 ASSERT_LEVELDB_OK(
1036 Put("big2", std::string(1000, 'y'))); // Triggers compaction
1037 ASSERT_LEVELDB_OK(Put("bar", "v2")); // Goes to new log file
1038
1039 Reopen(&options);
1040 ASSERT_EQ("v1", Get("foo"));
1041 ASSERT_EQ("v2", Get("bar"));
1042 ASSERT_EQ(std::string(10000000, 'x'), Get("big1"));
1043 ASSERT_EQ(std::string(1000, 'y'), Get("big2"));
1044 } while (ChangeOptions());
1045 }
1046
Key(int i)1047 static std::string Key(int i) {
1048 char buf[100];
1049 std::snprintf(buf, sizeof(buf), "key%06d", i);
1050 return std::string(buf);
1051 }
1052
TEST_F(DBTest,MinorCompactionsHappen)1053 TEST_F(DBTest, MinorCompactionsHappen) {
1054 Options options = CurrentOptions();
1055 options.write_buffer_size = 10000;
1056 Reopen(&options);
1057
1058 const int N = 500;
1059
1060 int starting_num_tables = TotalTableFiles();
1061 for (int i = 0; i < N; i++) {
1062 ASSERT_LEVELDB_OK(Put(Key(i), Key(i) + std::string(1000, 'v')));
1063 }
1064 int ending_num_tables = TotalTableFiles();
1065 ASSERT_GT(ending_num_tables, starting_num_tables);
1066
1067 for (int i = 0; i < N; i++) {
1068 ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i)));
1069 }
1070
1071 Reopen();
1072
1073 for (int i = 0; i < N; i++) {
1074 ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i)));
1075 }
1076 }
1077
TEST_F(DBTest,RecoverWithLargeLog)1078 TEST_F(DBTest, RecoverWithLargeLog) {
1079 {
1080 Options options = CurrentOptions();
1081 Reopen(&options);
1082 ASSERT_LEVELDB_OK(Put("big1", std::string(200000, '1')));
1083 ASSERT_LEVELDB_OK(Put("big2", std::string(200000, '2')));
1084 ASSERT_LEVELDB_OK(Put("small3", std::string(10, '3')));
1085 ASSERT_LEVELDB_OK(Put("small4", std::string(10, '4')));
1086 ASSERT_EQ(NumTableFilesAtLevel(0), 0);
1087 }
1088
1089 // Make sure that if we re-open with a small write buffer size that
1090 // we flush table files in the middle of a large log file.
1091 Options options = CurrentOptions();
1092 options.write_buffer_size = 100000;
1093 Reopen(&options);
1094 ASSERT_EQ(NumTableFilesAtLevel(0), 3);
1095 ASSERT_EQ(std::string(200000, '1'), Get("big1"));
1096 ASSERT_EQ(std::string(200000, '2'), Get("big2"));
1097 ASSERT_EQ(std::string(10, '3'), Get("small3"));
1098 ASSERT_EQ(std::string(10, '4'), Get("small4"));
1099 ASSERT_GT(NumTableFilesAtLevel(0), 1);
1100 }
1101
TEST_F(DBTest,CompactionsGenerateMultipleFiles)1102 TEST_F(DBTest, CompactionsGenerateMultipleFiles) {
1103 Options options = CurrentOptions();
1104 options.write_buffer_size = 100000000; // Large write buffer
1105 Reopen(&options);
1106
1107 Random rnd(301);
1108
1109 // Write 8MB (80 values, each 100K)
1110 ASSERT_EQ(NumTableFilesAtLevel(0), 0);
1111 std::vector<std::string> values;
1112 for (int i = 0; i < 80; i++) {
1113 values.push_back(RandomString(&rnd, 100000));
1114 ASSERT_LEVELDB_OK(Put(Key(i), values[i]));
1115 }
1116
1117 // Reopening moves updates to level-0
1118 Reopen(&options);
1119 dbfull()->TEST_CompactRange(0, nullptr, nullptr);
1120
1121 ASSERT_EQ(NumTableFilesAtLevel(0), 0);
1122 ASSERT_GT(NumTableFilesAtLevel(1), 1);
1123 for (int i = 0; i < 80; i++) {
1124 ASSERT_EQ(Get(Key(i)), values[i]);
1125 }
1126 }
1127
TEST_F(DBTest,RepeatedWritesToSameKey)1128 TEST_F(DBTest, RepeatedWritesToSameKey) {
1129 Options options = CurrentOptions();
1130 options.env = env_;
1131 options.write_buffer_size = 100000; // Small write buffer
1132 Reopen(&options);
1133
1134 // We must have at most one file per level except for level-0,
1135 // which may have up to kL0_StopWritesTrigger files.
1136 const int kMaxFiles = config::kNumLevels + config::kL0_StopWritesTrigger;
1137
1138 Random rnd(301);
1139 std::string value = RandomString(&rnd, 2 * options.write_buffer_size);
1140 for (int i = 0; i < 5 * kMaxFiles; i++) {
1141 Put("key", value);
1142 ASSERT_LE(TotalTableFiles(), kMaxFiles);
1143 std::fprintf(stderr, "after %d: %d files\n", i + 1, TotalTableFiles());
1144 }
1145 }
1146
TEST_F(DBTest,SparseMerge)1147 TEST_F(DBTest, SparseMerge) {
1148 Options options = CurrentOptions();
1149 options.compression = kNoCompression;
1150 Reopen(&options);
1151
1152 FillLevels("A", "Z");
1153
1154 // Suppose there is:
1155 // small amount of data with prefix A
1156 // large amount of data with prefix B
1157 // small amount of data with prefix C
1158 // and that recent updates have made small changes to all three prefixes.
1159 // Check that we do not do a compaction that merges all of B in one shot.
1160 const std::string value(1000, 'x');
1161 Put("A", "va");
1162 // Write approximately 100MB of "B" values
1163 for (int i = 0; i < 100000; i++) {
1164 char key[100];
1165 std::snprintf(key, sizeof(key), "B%010d", i);
1166 Put(key, value);
1167 }
1168 Put("C", "vc");
1169 dbfull()->TEST_CompactMemTable();
1170 dbfull()->TEST_CompactRange(0, nullptr, nullptr);
1171
1172 // Make sparse update
1173 Put("A", "va2");
1174 Put("B100", "bvalue2");
1175 Put("C", "vc2");
1176 dbfull()->TEST_CompactMemTable();
1177
1178 // Compactions should not cause us to create a situation where
1179 // a file overlaps too much data at the next level.
1180 ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
1181 dbfull()->TEST_CompactRange(0, nullptr, nullptr);
1182 ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
1183 dbfull()->TEST_CompactRange(1, nullptr, nullptr);
1184 ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
1185 }
1186
Between(uint64_t val,uint64_t low,uint64_t high)1187 static bool Between(uint64_t val, uint64_t low, uint64_t high) {
1188 bool result = (val >= low) && (val <= high);
1189 if (!result) {
1190 std::fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
1191 (unsigned long long)(val), (unsigned long long)(low),
1192 (unsigned long long)(high));
1193 }
1194 return result;
1195 }
1196
TEST_F(DBTest,ApproximateSizes)1197 TEST_F(DBTest, ApproximateSizes) {
1198 do {
1199 Options options = CurrentOptions();
1200 options.write_buffer_size = 100000000; // Large write buffer
1201 options.compression = kNoCompression;
1202 DestroyAndReopen();
1203
1204 ASSERT_TRUE(Between(Size("", "xyz"), 0, 0));
1205 Reopen(&options);
1206 ASSERT_TRUE(Between(Size("", "xyz"), 0, 0));
1207
1208 // Write 8MB (80 values, each 100K)
1209 ASSERT_EQ(NumTableFilesAtLevel(0), 0);
1210 const int N = 80;
1211 static const int S1 = 100000;
1212 static const int S2 = 105000; // Allow some expansion from metadata
1213 Random rnd(301);
1214 for (int i = 0; i < N; i++) {
1215 ASSERT_LEVELDB_OK(Put(Key(i), RandomString(&rnd, S1)));
1216 }
1217
1218 // 0 because GetApproximateSizes() does not account for memtable space
1219 ASSERT_TRUE(Between(Size("", Key(50)), 0, 0));
1220
1221 if (options.reuse_logs) {
1222 // Recovery will reuse memtable, and GetApproximateSizes() does not
1223 // account for memtable usage;
1224 Reopen(&options);
1225 ASSERT_TRUE(Between(Size("", Key(50)), 0, 0));
1226 continue;
1227 }
1228
1229 // Check sizes across recovery by reopening a few times
1230 for (int run = 0; run < 3; run++) {
1231 Reopen(&options);
1232
1233 for (int compact_start = 0; compact_start < N; compact_start += 10) {
1234 for (int i = 0; i < N; i += 10) {
1235 ASSERT_TRUE(Between(Size("", Key(i)), S1 * i, S2 * i));
1236 ASSERT_TRUE(Between(Size("", Key(i) + ".suffix"), S1 * (i + 1),
1237 S2 * (i + 1)));
1238 ASSERT_TRUE(Between(Size(Key(i), Key(i + 10)), S1 * 10, S2 * 10));
1239 }
1240 ASSERT_TRUE(Between(Size("", Key(50)), S1 * 50, S2 * 50));
1241 ASSERT_TRUE(Between(Size("", Key(50) + ".suffix"), S1 * 50, S2 * 50));
1242
1243 std::string cstart_str = Key(compact_start);
1244 std::string cend_str = Key(compact_start + 9);
1245 Slice cstart = cstart_str;
1246 Slice cend = cend_str;
1247 dbfull()->TEST_CompactRange(0, &cstart, &cend);
1248 }
1249
1250 ASSERT_EQ(NumTableFilesAtLevel(0), 0);
1251 ASSERT_GT(NumTableFilesAtLevel(1), 0);
1252 }
1253 } while (ChangeOptions());
1254 }
1255
TEST_F(DBTest,ApproximateSizes_MixOfSmallAndLarge)1256 TEST_F(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
1257 do {
1258 Options options = CurrentOptions();
1259 options.compression = kNoCompression;
1260 Reopen();
1261
1262 Random rnd(301);
1263 std::string big1 = RandomString(&rnd, 100000);
1264 ASSERT_LEVELDB_OK(Put(Key(0), RandomString(&rnd, 10000)));
1265 ASSERT_LEVELDB_OK(Put(Key(1), RandomString(&rnd, 10000)));
1266 ASSERT_LEVELDB_OK(Put(Key(2), big1));
1267 ASSERT_LEVELDB_OK(Put(Key(3), RandomString(&rnd, 10000)));
1268 ASSERT_LEVELDB_OK(Put(Key(4), big1));
1269 ASSERT_LEVELDB_OK(Put(Key(5), RandomString(&rnd, 10000)));
1270 ASSERT_LEVELDB_OK(Put(Key(6), RandomString(&rnd, 300000)));
1271 ASSERT_LEVELDB_OK(Put(Key(7), RandomString(&rnd, 10000)));
1272
1273 if (options.reuse_logs) {
1274 // Need to force a memtable compaction since recovery does not do so.
1275 ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
1276 }
1277
1278 // Check sizes across recovery by reopening a few times
1279 for (int run = 0; run < 3; run++) {
1280 Reopen(&options);
1281
1282 ASSERT_TRUE(Between(Size("", Key(0)), 0, 0));
1283 ASSERT_TRUE(Between(Size("", Key(1)), 10000, 11000));
1284 ASSERT_TRUE(Between(Size("", Key(2)), 20000, 21000));
1285 ASSERT_TRUE(Between(Size("", Key(3)), 120000, 121000));
1286 ASSERT_TRUE(Between(Size("", Key(4)), 130000, 131000));
1287 ASSERT_TRUE(Between(Size("", Key(5)), 230000, 231000));
1288 ASSERT_TRUE(Between(Size("", Key(6)), 240000, 241000));
1289 ASSERT_TRUE(Between(Size("", Key(7)), 540000, 541000));
1290 ASSERT_TRUE(Between(Size("", Key(8)), 550000, 560000));
1291
1292 ASSERT_TRUE(Between(Size(Key(3), Key(5)), 110000, 111000));
1293
1294 dbfull()->TEST_CompactRange(0, nullptr, nullptr);
1295 }
1296 } while (ChangeOptions());
1297 }
1298
TEST_F(DBTest,IteratorPinsRef)1299 TEST_F(DBTest, IteratorPinsRef) {
1300 Put("foo", "hello");
1301
1302 // Get iterator that will yield the current contents of the DB.
1303 Iterator* iter = db_->NewIterator(ReadOptions());
1304
1305 // Write to force compactions
1306 Put("foo", "newvalue1");
1307 for (int i = 0; i < 100; i++) {
1308 ASSERT_LEVELDB_OK(
1309 Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values
1310 }
1311 Put("foo", "newvalue2");
1312
1313 iter->SeekToFirst();
1314 ASSERT_TRUE(iter->Valid());
1315 ASSERT_EQ("foo", iter->key().ToString());
1316 ASSERT_EQ("hello", iter->value().ToString());
1317 iter->Next();
1318 ASSERT_TRUE(!iter->Valid());
1319 delete iter;
1320 }
1321
TEST_F(DBTest,Snapshot)1322 TEST_F(DBTest, Snapshot) {
1323 do {
1324 Put("foo", "v1");
1325 const Snapshot* s1 = db_->GetSnapshot();
1326 Put("foo", "v2");
1327 const Snapshot* s2 = db_->GetSnapshot();
1328 Put("foo", "v3");
1329 const Snapshot* s3 = db_->GetSnapshot();
1330
1331 Put("foo", "v4");
1332 ASSERT_EQ("v1", Get("foo", s1));
1333 ASSERT_EQ("v2", Get("foo", s2));
1334 ASSERT_EQ("v3", Get("foo", s3));
1335 ASSERT_EQ("v4", Get("foo"));
1336
1337 db_->ReleaseSnapshot(s3);
1338 ASSERT_EQ("v1", Get("foo", s1));
1339 ASSERT_EQ("v2", Get("foo", s2));
1340 ASSERT_EQ("v4", Get("foo"));
1341
1342 db_->ReleaseSnapshot(s1);
1343 ASSERT_EQ("v2", Get("foo", s2));
1344 ASSERT_EQ("v4", Get("foo"));
1345
1346 db_->ReleaseSnapshot(s2);
1347 ASSERT_EQ("v4", Get("foo"));
1348 } while (ChangeOptions());
1349 }
1350
TEST_F(DBTest,HiddenValuesAreRemoved)1351 TEST_F(DBTest, HiddenValuesAreRemoved) {
1352 do {
1353 Random rnd(301);
1354 FillLevels("a", "z");
1355
1356 std::string big = RandomString(&rnd, 50000);
1357 Put("foo", big);
1358 Put("pastfoo", "v");
1359 const Snapshot* snapshot = db_->GetSnapshot();
1360 Put("foo", "tiny");
1361 Put("pastfoo2", "v2"); // Advance sequence number one more
1362
1363 ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
1364 ASSERT_GT(NumTableFilesAtLevel(0), 0);
1365
1366 ASSERT_EQ(big, Get("foo", snapshot));
1367 ASSERT_TRUE(Between(Size("", "pastfoo"), 50000, 60000));
1368 db_->ReleaseSnapshot(snapshot);
1369 ASSERT_EQ(AllEntriesFor("foo"), "[ tiny, " + big + " ]");
1370 Slice x("x");
1371 dbfull()->TEST_CompactRange(0, nullptr, &x);
1372 ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
1373 ASSERT_EQ(NumTableFilesAtLevel(0), 0);
1374 ASSERT_GE(NumTableFilesAtLevel(1), 1);
1375 dbfull()->TEST_CompactRange(1, nullptr, &x);
1376 ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
1377
1378 ASSERT_TRUE(Between(Size("", "pastfoo"), 0, 1000));
1379 } while (ChangeOptions());
1380 }
1381
TEST_F(DBTest,DeletionMarkers1)1382 TEST_F(DBTest, DeletionMarkers1) {
1383 Put("foo", "v1");
1384 ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
1385 const int last = config::kMaxMemCompactLevel;
1386 ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
1387
1388 // Place a table at level last-1 to prevent merging with preceding mutation
1389 Put("a", "begin");
1390 Put("z", "end");
1391 dbfull()->TEST_CompactMemTable();
1392 ASSERT_EQ(NumTableFilesAtLevel(last), 1);
1393 ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1);
1394
1395 Delete("foo");
1396 Put("foo", "v2");
1397 ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
1398 ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
1399 ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
1400 Slice z("z");
1401 dbfull()->TEST_CompactRange(last - 2, nullptr, &z);
1402 // DEL eliminated, but v1 remains because we aren't compacting that level
1403 // (DEL can be eliminated because v2 hides v1).
1404 ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
1405 dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr);
1406 // Merging last-1 w/ last, so we are the base level for "foo", so
1407 // DEL is removed. (as is v1).
1408 ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
1409 }
1410
TEST_F(DBTest,DeletionMarkers2)1411 TEST_F(DBTest, DeletionMarkers2) {
1412 Put("foo", "v1");
1413 ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
1414 const int last = config::kMaxMemCompactLevel;
1415 ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
1416
1417 // Place a table at level last-1 to prevent merging with preceding mutation
1418 Put("a", "begin");
1419 Put("z", "end");
1420 dbfull()->TEST_CompactMemTable();
1421 ASSERT_EQ(NumTableFilesAtLevel(last), 1);
1422 ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1);
1423
1424 Delete("foo");
1425 ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
1426 ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
1427 ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
1428 dbfull()->TEST_CompactRange(last - 2, nullptr, nullptr);
1429 // DEL kept: "last" file overlaps
1430 ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
1431 dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr);
1432 // Merging last-1 w/ last, so we are the base level for "foo", so
1433 // DEL is removed. (as is v1).
1434 ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
1435 }
1436
TEST_F(DBTest,OverlapInLevel0)1437 TEST_F(DBTest, OverlapInLevel0) {
1438 do {
1439 ASSERT_EQ(config::kMaxMemCompactLevel, 2) << "Fix test to match config";
1440
1441 // Fill levels 1 and 2 to disable the pushing of new memtables to levels >
1442 // 0.
1443 ASSERT_LEVELDB_OK(Put("100", "v100"));
1444 ASSERT_LEVELDB_OK(Put("999", "v999"));
1445 dbfull()->TEST_CompactMemTable();
1446 ASSERT_LEVELDB_OK(Delete("100"));
1447 ASSERT_LEVELDB_OK(Delete("999"));
1448 dbfull()->TEST_CompactMemTable();
1449 ASSERT_EQ("0,1,1", FilesPerLevel());
1450
1451 // Make files spanning the following ranges in level-0:
1452 // files[0] 200 .. 900
1453 // files[1] 300 .. 500
1454 // Note that files are sorted by smallest key.
1455 ASSERT_LEVELDB_OK(Put("300", "v300"));
1456 ASSERT_LEVELDB_OK(Put("500", "v500"));
1457 dbfull()->TEST_CompactMemTable();
1458 ASSERT_LEVELDB_OK(Put("200", "v200"));
1459 ASSERT_LEVELDB_OK(Put("600", "v600"));
1460 ASSERT_LEVELDB_OK(Put("900", "v900"));
1461 dbfull()->TEST_CompactMemTable();
1462 ASSERT_EQ("2,1,1", FilesPerLevel());
1463
1464 // Compact away the placeholder files we created initially
1465 dbfull()->TEST_CompactRange(1, nullptr, nullptr);
1466 dbfull()->TEST_CompactRange(2, nullptr, nullptr);
1467 ASSERT_EQ("2", FilesPerLevel());
1468
1469 // Do a memtable compaction. Before bug-fix, the compaction would
1470 // not detect the overlap with level-0 files and would incorrectly place
1471 // the deletion in a deeper level.
1472 ASSERT_LEVELDB_OK(Delete("600"));
1473 dbfull()->TEST_CompactMemTable();
1474 ASSERT_EQ("3", FilesPerLevel());
1475 ASSERT_EQ("NOT_FOUND", Get("600"));
1476 } while (ChangeOptions());
1477 }
1478
TEST_F(DBTest,L0_CompactionBug_Issue44_a)1479 TEST_F(DBTest, L0_CompactionBug_Issue44_a) {
1480 Reopen();
1481 ASSERT_LEVELDB_OK(Put("b", "v"));
1482 Reopen();
1483 ASSERT_LEVELDB_OK(Delete("b"));
1484 ASSERT_LEVELDB_OK(Delete("a"));
1485 Reopen();
1486 ASSERT_LEVELDB_OK(Delete("a"));
1487 Reopen();
1488 ASSERT_LEVELDB_OK(Put("a", "v"));
1489 Reopen();
1490 Reopen();
1491 ASSERT_EQ("(a->v)", Contents());
1492 DelayMilliseconds(1000); // Wait for compaction to finish
1493 ASSERT_EQ("(a->v)", Contents());
1494 }
1495
TEST_F(DBTest,L0_CompactionBug_Issue44_b)1496 TEST_F(DBTest, L0_CompactionBug_Issue44_b) {
1497 Reopen();
1498 Put("", "");
1499 Reopen();
1500 Delete("e");
1501 Put("", "");
1502 Reopen();
1503 Put("c", "cv");
1504 Reopen();
1505 Put("", "");
1506 Reopen();
1507 Put("", "");
1508 DelayMilliseconds(1000); // Wait for compaction to finish
1509 Reopen();
1510 Put("d", "dv");
1511 Reopen();
1512 Put("", "");
1513 Reopen();
1514 Delete("d");
1515 Delete("b");
1516 Reopen();
1517 ASSERT_EQ("(->)(c->cv)", Contents());
1518 DelayMilliseconds(1000); // Wait for compaction to finish
1519 ASSERT_EQ("(->)(c->cv)", Contents());
1520 }
1521
TEST_F(DBTest,Fflush_Issue474)1522 TEST_F(DBTest, Fflush_Issue474) {
1523 static const int kNum = 100000;
1524 Random rnd(test::RandomSeed());
1525 for (int i = 0; i < kNum; i++) {
1526 std::fflush(nullptr);
1527 ASSERT_LEVELDB_OK(Put(RandomKey(&rnd), RandomString(&rnd, 100)));
1528 }
1529 }
1530
TEST_F(DBTest,ComparatorCheck)1531 TEST_F(DBTest, ComparatorCheck) {
1532 class NewComparator : public Comparator {
1533 public:
1534 const char* Name() const override { return "leveldb.NewComparator"; }
1535 int Compare(const Slice& a, const Slice& b) const override {
1536 return BytewiseComparator()->Compare(a, b);
1537 }
1538 void FindShortestSeparator(std::string* s, const Slice& l) const override {
1539 BytewiseComparator()->FindShortestSeparator(s, l);
1540 }
1541 void FindShortSuccessor(std::string* key) const override {
1542 BytewiseComparator()->FindShortSuccessor(key);
1543 }
1544 };
1545 NewComparator cmp;
1546 Options new_options = CurrentOptions();
1547 new_options.comparator = &cmp;
1548 Status s = TryReopen(&new_options);
1549 ASSERT_TRUE(!s.ok());
1550 ASSERT_TRUE(s.ToString().find("comparator") != std::string::npos)
1551 << s.ToString();
1552 }
1553
TEST_F(DBTest,CustomComparator)1554 TEST_F(DBTest, CustomComparator) {
1555 class NumberComparator : public Comparator {
1556 public:
1557 const char* Name() const override { return "test.NumberComparator"; }
1558 int Compare(const Slice& a, const Slice& b) const override {
1559 return ToNumber(a) - ToNumber(b);
1560 }
1561 void FindShortestSeparator(std::string* s, const Slice& l) const override {
1562 ToNumber(*s); // Check format
1563 ToNumber(l); // Check format
1564 }
1565 void FindShortSuccessor(std::string* key) const override {
1566 ToNumber(*key); // Check format
1567 }
1568
1569 private:
1570 static int ToNumber(const Slice& x) {
1571 // Check that there are no extra characters.
1572 EXPECT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']')
1573 << EscapeString(x);
1574 int val;
1575 char ignored;
1576 EXPECT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1)
1577 << EscapeString(x);
1578 return val;
1579 }
1580 };
1581 NumberComparator cmp;
1582 Options new_options = CurrentOptions();
1583 new_options.create_if_missing = true;
1584 new_options.comparator = &cmp;
1585 new_options.filter_policy = nullptr; // Cannot use bloom filters
1586 new_options.write_buffer_size = 1000; // Compact more often
1587 DestroyAndReopen(&new_options);
1588 ASSERT_LEVELDB_OK(Put("[10]", "ten"));
1589 ASSERT_LEVELDB_OK(Put("[0x14]", "twenty"));
1590 for (int i = 0; i < 2; i++) {
1591 ASSERT_EQ("ten", Get("[10]"));
1592 ASSERT_EQ("ten", Get("[0xa]"));
1593 ASSERT_EQ("twenty", Get("[20]"));
1594 ASSERT_EQ("twenty", Get("[0x14]"));
1595 ASSERT_EQ("NOT_FOUND", Get("[15]"));
1596 ASSERT_EQ("NOT_FOUND", Get("[0xf]"));
1597 Compact("[0]", "[9999]");
1598 }
1599
1600 for (int run = 0; run < 2; run++) {
1601 for (int i = 0; i < 1000; i++) {
1602 char buf[100];
1603 std::snprintf(buf, sizeof(buf), "[%d]", i * 10);
1604 ASSERT_LEVELDB_OK(Put(buf, buf));
1605 }
1606 Compact("[0]", "[1000000]");
1607 }
1608 }
1609
TEST_F(DBTest,ManualCompaction)1610 TEST_F(DBTest, ManualCompaction) {
1611 ASSERT_EQ(config::kMaxMemCompactLevel, 2)
1612 << "Need to update this test to match kMaxMemCompactLevel";
1613
1614 MakeTables(3, "p", "q");
1615 ASSERT_EQ("1,1,1", FilesPerLevel());
1616
1617 // Compaction range falls before files
1618 Compact("", "c");
1619 ASSERT_EQ("1,1,1", FilesPerLevel());
1620
1621 // Compaction range falls after files
1622 Compact("r", "z");
1623 ASSERT_EQ("1,1,1", FilesPerLevel());
1624
1625 // Compaction range overlaps files
1626 Compact("p1", "p9");
1627 ASSERT_EQ("0,0,1", FilesPerLevel());
1628
1629 // Populate a different range
1630 MakeTables(3, "c", "e");
1631 ASSERT_EQ("1,1,2", FilesPerLevel());
1632
1633 // Compact just the new range
1634 Compact("b", "f");
1635 ASSERT_EQ("0,0,2", FilesPerLevel());
1636
1637 // Compact all
1638 MakeTables(1, "a", "z");
1639 ASSERT_EQ("0,1,2", FilesPerLevel());
1640 db_->CompactRange(nullptr, nullptr);
1641 ASSERT_EQ("0,0,1", FilesPerLevel());
1642 }
1643
TEST_F(DBTest,DBOpen_Options)1644 TEST_F(DBTest, DBOpen_Options) {
1645 std::string dbname = testing::TempDir() + "db_options_test";
1646 DestroyDB(dbname, Options());
1647
1648 // Does not exist, and create_if_missing == false: error
1649 DB* db = nullptr;
1650 Options opts;
1651 opts.create_if_missing = false;
1652 Status s = DB::Open(opts, dbname, &db);
1653 ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != nullptr);
1654 ASSERT_TRUE(db == nullptr);
1655
1656 // Does not exist, and create_if_missing == true: OK
1657 opts.create_if_missing = true;
1658 s = DB::Open(opts, dbname, &db);
1659 ASSERT_LEVELDB_OK(s);
1660 ASSERT_TRUE(db != nullptr);
1661
1662 delete db;
1663 db = nullptr;
1664
1665 // Does exist, and error_if_exists == true: error
1666 opts.create_if_missing = false;
1667 opts.error_if_exists = true;
1668 s = DB::Open(opts, dbname, &db);
1669 ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != nullptr);
1670 ASSERT_TRUE(db == nullptr);
1671
1672 // Does exist, and error_if_exists == false: OK
1673 opts.create_if_missing = true;
1674 opts.error_if_exists = false;
1675 s = DB::Open(opts, dbname, &db);
1676 ASSERT_LEVELDB_OK(s);
1677 ASSERT_TRUE(db != nullptr);
1678
1679 delete db;
1680 db = nullptr;
1681 }
1682
TEST_F(DBTest,DestroyEmptyDir)1683 TEST_F(DBTest, DestroyEmptyDir) {
1684 std::string dbname = testing::TempDir() + "db_empty_dir";
1685 TestEnv env(Env::Default());
1686 env.RemoveDir(dbname);
1687 ASSERT_TRUE(!env.FileExists(dbname));
1688
1689 Options opts;
1690 opts.env = &env;
1691
1692 ASSERT_LEVELDB_OK(env.CreateDir(dbname));
1693 ASSERT_TRUE(env.FileExists(dbname));
1694 std::vector<std::string> children;
1695 ASSERT_LEVELDB_OK(env.GetChildren(dbname, &children));
1696 // The stock Env's do not filter out '.' and '..' special files.
1697 ASSERT_EQ(2, children.size());
1698 ASSERT_LEVELDB_OK(DestroyDB(dbname, opts));
1699 ASSERT_TRUE(!env.FileExists(dbname));
1700
1701 // Should also be destroyed if Env is filtering out dot files.
1702 env.SetIgnoreDotFiles(true);
1703 ASSERT_LEVELDB_OK(env.CreateDir(dbname));
1704 ASSERT_TRUE(env.FileExists(dbname));
1705 ASSERT_LEVELDB_OK(env.GetChildren(dbname, &children));
1706 ASSERT_EQ(0, children.size());
1707 ASSERT_LEVELDB_OK(DestroyDB(dbname, opts));
1708 ASSERT_TRUE(!env.FileExists(dbname));
1709 }
1710
TEST_F(DBTest,DestroyOpenDB)1711 TEST_F(DBTest, DestroyOpenDB) {
1712 std::string dbname = testing::TempDir() + "open_db_dir";
1713 env_->RemoveDir(dbname);
1714 ASSERT_TRUE(!env_->FileExists(dbname));
1715
1716 Options opts;
1717 opts.create_if_missing = true;
1718 DB* db = nullptr;
1719 ASSERT_LEVELDB_OK(DB::Open(opts, dbname, &db));
1720 ASSERT_TRUE(db != nullptr);
1721
1722 // Must fail to destroy an open db.
1723 ASSERT_TRUE(env_->FileExists(dbname));
1724 ASSERT_TRUE(!DestroyDB(dbname, Options()).ok());
1725 ASSERT_TRUE(env_->FileExists(dbname));
1726
1727 delete db;
1728 db = nullptr;
1729
1730 // Should succeed destroying a closed db.
1731 ASSERT_LEVELDB_OK(DestroyDB(dbname, Options()));
1732 ASSERT_TRUE(!env_->FileExists(dbname));
1733 }
1734
TEST_F(DBTest,Locking)1735 TEST_F(DBTest, Locking) {
1736 DB* db2 = nullptr;
1737 Status s = DB::Open(CurrentOptions(), dbname_, &db2);
1738 ASSERT_TRUE(!s.ok()) << "Locking did not prevent re-opening db";
1739 }
1740
1741 // Check that number of files does not grow when we are out of space
TEST_F(DBTest,NoSpace)1742 TEST_F(DBTest, NoSpace) {
1743 Options options = CurrentOptions();
1744 options.env = env_;
1745 Reopen(&options);
1746
1747 ASSERT_LEVELDB_OK(Put("foo", "v1"));
1748 ASSERT_EQ("v1", Get("foo"));
1749 Compact("a", "z");
1750 const int num_files = CountFiles();
1751 // Force out-of-space errors.
1752 env_->no_space_.store(true, std::memory_order_release);
1753 for (int i = 0; i < 10; i++) {
1754 for (int level = 0; level < config::kNumLevels - 1; level++) {
1755 dbfull()->TEST_CompactRange(level, nullptr, nullptr);
1756 }
1757 }
1758 env_->no_space_.store(false, std::memory_order_release);
1759 ASSERT_LT(CountFiles(), num_files + 3);
1760 }
1761
TEST_F(DBTest,NonWritableFileSystem)1762 TEST_F(DBTest, NonWritableFileSystem) {
1763 Options options = CurrentOptions();
1764 options.write_buffer_size = 1000;
1765 options.env = env_;
1766 Reopen(&options);
1767 ASSERT_LEVELDB_OK(Put("foo", "v1"));
1768 // Force errors for new files.
1769 env_->non_writable_.store(true, std::memory_order_release);
1770 std::string big(100000, 'x');
1771 int errors = 0;
1772 for (int i = 0; i < 20; i++) {
1773 std::fprintf(stderr, "iter %d; errors %d\n", i, errors);
1774 if (!Put("foo", big).ok()) {
1775 errors++;
1776 DelayMilliseconds(100);
1777 }
1778 }
1779 ASSERT_GT(errors, 0);
1780 env_->non_writable_.store(false, std::memory_order_release);
1781 }
1782
TEST_F(DBTest,WriteSyncError)1783 TEST_F(DBTest, WriteSyncError) {
1784 // Check that log sync errors cause the DB to disallow future writes.
1785
1786 // (a) Cause log sync calls to fail
1787 Options options = CurrentOptions();
1788 options.env = env_;
1789 Reopen(&options);
1790 env_->data_sync_error_.store(true, std::memory_order_release);
1791
1792 // (b) Normal write should succeed
1793 WriteOptions w;
1794 ASSERT_LEVELDB_OK(db_->Put(w, "k1", "v1"));
1795 ASSERT_EQ("v1", Get("k1"));
1796
1797 // (c) Do a sync write; should fail
1798 w.sync = true;
1799 ASSERT_TRUE(!db_->Put(w, "k2", "v2").ok());
1800 ASSERT_EQ("v1", Get("k1"));
1801 ASSERT_EQ("NOT_FOUND", Get("k2"));
1802
1803 // (d) make sync behave normally
1804 env_->data_sync_error_.store(false, std::memory_order_release);
1805
1806 // (e) Do a non-sync write; should fail
1807 w.sync = false;
1808 ASSERT_TRUE(!db_->Put(w, "k3", "v3").ok());
1809 ASSERT_EQ("v1", Get("k1"));
1810 ASSERT_EQ("NOT_FOUND", Get("k2"));
1811 ASSERT_EQ("NOT_FOUND", Get("k3"));
1812 }
1813
TEST_F(DBTest,ManifestWriteError)1814 TEST_F(DBTest, ManifestWriteError) {
1815 // Test for the following problem:
1816 // (a) Compaction produces file F
1817 // (b) Log record containing F is written to MANIFEST file, but Sync() fails
1818 // (c) GC deletes F
1819 // (d) After reopening DB, reads fail since deleted F is named in log record
1820
1821 // We iterate twice. In the second iteration, everything is the
1822 // same except the log record never makes it to the MANIFEST file.
1823 for (int iter = 0; iter < 2; iter++) {
1824 std::atomic<bool>* error_type = (iter == 0) ? &env_->manifest_sync_error_
1825 : &env_->manifest_write_error_;
1826
1827 // Insert foo=>bar mapping
1828 Options options = CurrentOptions();
1829 options.env = env_;
1830 options.create_if_missing = true;
1831 options.error_if_exists = false;
1832 DestroyAndReopen(&options);
1833 ASSERT_LEVELDB_OK(Put("foo", "bar"));
1834 ASSERT_EQ("bar", Get("foo"));
1835
1836 // Memtable compaction (will succeed)
1837 dbfull()->TEST_CompactMemTable();
1838 ASSERT_EQ("bar", Get("foo"));
1839 const int last = config::kMaxMemCompactLevel;
1840 ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo=>bar is now in last level
1841
1842 // Merging compaction (will fail)
1843 error_type->store(true, std::memory_order_release);
1844 dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail
1845 ASSERT_EQ("bar", Get("foo"));
1846
1847 // Recovery: should not lose data
1848 error_type->store(false, std::memory_order_release);
1849 Reopen(&options);
1850 ASSERT_EQ("bar", Get("foo"));
1851 }
1852 }
1853
TEST_F(DBTest,MissingSSTFile)1854 TEST_F(DBTest, MissingSSTFile) {
1855 ASSERT_LEVELDB_OK(Put("foo", "bar"));
1856 ASSERT_EQ("bar", Get("foo"));
1857
1858 // Dump the memtable to disk.
1859 dbfull()->TEST_CompactMemTable();
1860 ASSERT_EQ("bar", Get("foo"));
1861
1862 Close();
1863 ASSERT_TRUE(DeleteAnSSTFile());
1864 Options options = CurrentOptions();
1865 options.paranoid_checks = true;
1866 Status s = TryReopen(&options);
1867 ASSERT_TRUE(!s.ok());
1868 ASSERT_TRUE(s.ToString().find("issing") != std::string::npos) << s.ToString();
1869 }
1870
TEST_F(DBTest,StillReadSST)1871 TEST_F(DBTest, StillReadSST) {
1872 ASSERT_LEVELDB_OK(Put("foo", "bar"));
1873 ASSERT_EQ("bar", Get("foo"));
1874
1875 // Dump the memtable to disk.
1876 dbfull()->TEST_CompactMemTable();
1877 ASSERT_EQ("bar", Get("foo"));
1878 Close();
1879 ASSERT_GT(RenameLDBToSST(), 0);
1880 Options options = CurrentOptions();
1881 options.paranoid_checks = true;
1882 Status s = TryReopen(&options);
1883 ASSERT_TRUE(s.ok());
1884 ASSERT_EQ("bar", Get("foo"));
1885 }
1886
TEST_F(DBTest,FilesDeletedAfterCompaction)1887 TEST_F(DBTest, FilesDeletedAfterCompaction) {
1888 ASSERT_LEVELDB_OK(Put("foo", "v2"));
1889 Compact("a", "z");
1890 const int num_files = CountFiles();
1891 for (int i = 0; i < 10; i++) {
1892 ASSERT_LEVELDB_OK(Put("foo", "v2"));
1893 Compact("a", "z");
1894 }
1895 ASSERT_EQ(CountFiles(), num_files);
1896 }
1897
TEST_F(DBTest,BloomFilter)1898 TEST_F(DBTest, BloomFilter) {
1899 env_->count_random_reads_ = true;
1900 Options options = CurrentOptions();
1901 options.env = env_;
1902 options.block_cache = NewLRUCache(0); // Prevent cache hits
1903 options.filter_policy = NewBloomFilterPolicy(10);
1904 Reopen(&options);
1905
1906 // Populate multiple layers
1907 const int N = 10000;
1908 for (int i = 0; i < N; i++) {
1909 ASSERT_LEVELDB_OK(Put(Key(i), Key(i)));
1910 }
1911 Compact("a", "z");
1912 for (int i = 0; i < N; i += 100) {
1913 ASSERT_LEVELDB_OK(Put(Key(i), Key(i)));
1914 }
1915 dbfull()->TEST_CompactMemTable();
1916
1917 // Prevent auto compactions triggered by seeks
1918 env_->delay_data_sync_.store(true, std::memory_order_release);
1919
1920 // Lookup present keys. Should rarely read from small sstable.
1921 env_->random_read_counter_.Reset();
1922 for (int i = 0; i < N; i++) {
1923 ASSERT_EQ(Key(i), Get(Key(i)));
1924 }
1925 int reads = env_->random_read_counter_.Read();
1926 std::fprintf(stderr, "%d present => %d reads\n", N, reads);
1927 ASSERT_GE(reads, N);
1928 ASSERT_LE(reads, N + 2 * N / 100);
1929
1930 // Lookup present keys. Should rarely read from either sstable.
1931 env_->random_read_counter_.Reset();
1932 for (int i = 0; i < N; i++) {
1933 ASSERT_EQ("NOT_FOUND", Get(Key(i) + ".missing"));
1934 }
1935 reads = env_->random_read_counter_.Read();
1936 std::fprintf(stderr, "%d missing => %d reads\n", N, reads);
1937 ASSERT_LE(reads, 3 * N / 100);
1938
1939 env_->delay_data_sync_.store(false, std::memory_order_release);
1940 Close();
1941 delete options.block_cache;
1942 delete options.filter_policy;
1943 }
1944
1945 // Multi-threaded test:
1946 namespace {
1947
1948 static const int kNumThreads = 4;
1949 static const int kTestSeconds = 10;
1950 static const int kNumKeys = 1000;
1951
1952 struct MTState {
1953 DBTest* test;
1954 std::atomic<bool> stop;
1955 std::atomic<int> counter[kNumThreads];
1956 std::atomic<bool> thread_done[kNumThreads];
1957 };
1958
1959 struct MTThread {
1960 MTState* state;
1961 int id;
1962 };
1963
MTThreadBody(void * arg)1964 static void MTThreadBody(void* arg) {
1965 MTThread* t = reinterpret_cast<MTThread*>(arg);
1966 int id = t->id;
1967 DB* db = t->state->test->db_;
1968 int counter = 0;
1969 std::fprintf(stderr, "... starting thread %d\n", id);
1970 Random rnd(1000 + id);
1971 std::string value;
1972 char valbuf[1500];
1973 while (!t->state->stop.load(std::memory_order_acquire)) {
1974 t->state->counter[id].store(counter, std::memory_order_release);
1975
1976 int key = rnd.Uniform(kNumKeys);
1977 char keybuf[20];
1978 std::snprintf(keybuf, sizeof(keybuf), "%016d", key);
1979
1980 if (rnd.OneIn(2)) {
1981 // Write values of the form <key, my id, counter>.
1982 // We add some padding for force compactions.
1983 std::snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d", key, id,
1984 static_cast<int>(counter));
1985 ASSERT_LEVELDB_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf)));
1986 } else {
1987 // Read a value and verify that it matches the pattern written above.
1988 Status s = db->Get(ReadOptions(), Slice(keybuf), &value);
1989 if (s.IsNotFound()) {
1990 // Key has not yet been written
1991 } else {
1992 // Check that the writer thread counter is >= the counter in the value
1993 ASSERT_LEVELDB_OK(s);
1994 int k, w, c;
1995 ASSERT_EQ(3, sscanf(value.c_str(), "%d.%d.%d", &k, &w, &c)) << value;
1996 ASSERT_EQ(k, key);
1997 ASSERT_GE(w, 0);
1998 ASSERT_LT(w, kNumThreads);
1999 ASSERT_LE(c, t->state->counter[w].load(std::memory_order_acquire));
2000 }
2001 }
2002 counter++;
2003 }
2004 t->state->thread_done[id].store(true, std::memory_order_release);
2005 std::fprintf(stderr, "... stopping thread %d after %d ops\n", id, counter);
2006 }
2007
2008 } // namespace
2009
TEST_F(DBTest,MultiThreaded)2010 TEST_F(DBTest, MultiThreaded) {
2011 do {
2012 // Initialize state
2013 MTState mt;
2014 mt.test = this;
2015 mt.stop.store(false, std::memory_order_release);
2016 for (int id = 0; id < kNumThreads; id++) {
2017 mt.counter[id].store(false, std::memory_order_release);
2018 mt.thread_done[id].store(false, std::memory_order_release);
2019 }
2020
2021 // Start threads
2022 MTThread thread[kNumThreads];
2023 for (int id = 0; id < kNumThreads; id++) {
2024 thread[id].state = &mt;
2025 thread[id].id = id;
2026 env_->StartThread(MTThreadBody, &thread[id]);
2027 }
2028
2029 // Let them run for a while
2030 DelayMilliseconds(kTestSeconds * 1000);
2031
2032 // Stop the threads and wait for them to finish
2033 mt.stop.store(true, std::memory_order_release);
2034 for (int id = 0; id < kNumThreads; id++) {
2035 while (!mt.thread_done[id].load(std::memory_order_acquire)) {
2036 DelayMilliseconds(100);
2037 }
2038 }
2039 } while (ChangeOptions());
2040 }
2041
2042 namespace {
2043 typedef std::map<std::string, std::string> KVMap;
2044 }
2045
2046 class ModelDB : public DB {
2047 public:
2048 class ModelSnapshot : public Snapshot {
2049 public:
2050 KVMap map_;
2051 };
2052
ModelDB(const Options & options)2053 explicit ModelDB(const Options& options) : options_(options) {}
2054 ~ModelDB() override = default;
Put(const WriteOptions & o,const Slice & k,const Slice & v)2055 Status Put(const WriteOptions& o, const Slice& k, const Slice& v) override {
2056 return DB::Put(o, k, v);
2057 }
Delete(const WriteOptions & o,const Slice & key)2058 Status Delete(const WriteOptions& o, const Slice& key) override {
2059 return DB::Delete(o, key);
2060 }
Get(const ReadOptions & options,const Slice & key,std::string * value)2061 Status Get(const ReadOptions& options, const Slice& key,
2062 std::string* value) override {
2063 assert(false); // Not implemented
2064 return Status::NotFound(key);
2065 }
NewIterator(const ReadOptions & options)2066 Iterator* NewIterator(const ReadOptions& options) override {
2067 if (options.snapshot == nullptr) {
2068 KVMap* saved = new KVMap;
2069 *saved = map_;
2070 return new ModelIter(saved, true);
2071 } else {
2072 const KVMap* snapshot_state =
2073 &(reinterpret_cast<const ModelSnapshot*>(options.snapshot)->map_);
2074 return new ModelIter(snapshot_state, false);
2075 }
2076 }
GetSnapshot()2077 const Snapshot* GetSnapshot() override {
2078 ModelSnapshot* snapshot = new ModelSnapshot;
2079 snapshot->map_ = map_;
2080 return snapshot;
2081 }
2082
ReleaseSnapshot(const Snapshot * snapshot)2083 void ReleaseSnapshot(const Snapshot* snapshot) override {
2084 delete reinterpret_cast<const ModelSnapshot*>(snapshot);
2085 }
Write(const WriteOptions & options,WriteBatch * batch)2086 Status Write(const WriteOptions& options, WriteBatch* batch) override {
2087 class Handler : public WriteBatch::Handler {
2088 public:
2089 KVMap* map_;
2090 void Put(const Slice& key, const Slice& value) override {
2091 (*map_)[key.ToString()] = value.ToString();
2092 }
2093 void Delete(const Slice& key) override { map_->erase(key.ToString()); }
2094 };
2095 Handler handler;
2096 handler.map_ = &map_;
2097 return batch->Iterate(&handler);
2098 }
2099
GetProperty(const Slice & property,std::string * value)2100 bool GetProperty(const Slice& property, std::string* value) override {
2101 return false;
2102 }
GetApproximateSizes(const Range * r,int n,uint64_t * sizes)2103 void GetApproximateSizes(const Range* r, int n, uint64_t* sizes) override {
2104 for (int i = 0; i < n; i++) {
2105 sizes[i] = 0;
2106 }
2107 }
CompactRange(const Slice * start,const Slice * end)2108 void CompactRange(const Slice* start, const Slice* end) override {}
2109
2110 private:
2111 class ModelIter : public Iterator {
2112 public:
ModelIter(const KVMap * map,bool owned)2113 ModelIter(const KVMap* map, bool owned)
2114 : map_(map), owned_(owned), iter_(map_->end()) {}
~ModelIter()2115 ~ModelIter() override {
2116 if (owned_) delete map_;
2117 }
Valid() const2118 bool Valid() const override { return iter_ != map_->end(); }
SeekToFirst()2119 void SeekToFirst() override { iter_ = map_->begin(); }
SeekToLast()2120 void SeekToLast() override {
2121 if (map_->empty()) {
2122 iter_ = map_->end();
2123 } else {
2124 iter_ = map_->find(map_->rbegin()->first);
2125 }
2126 }
Seek(const Slice & k)2127 void Seek(const Slice& k) override {
2128 iter_ = map_->lower_bound(k.ToString());
2129 }
Next()2130 void Next() override { ++iter_; }
Prev()2131 void Prev() override { --iter_; }
key() const2132 Slice key() const override { return iter_->first; }
value() const2133 Slice value() const override { return iter_->second; }
status() const2134 Status status() const override { return Status::OK(); }
2135
2136 private:
2137 const KVMap* const map_;
2138 const bool owned_; // Do we own map_
2139 KVMap::const_iterator iter_;
2140 };
2141 const Options options_;
2142 KVMap map_;
2143 };
2144
CompareIterators(int step,DB * model,DB * db,const Snapshot * model_snap,const Snapshot * db_snap)2145 static bool CompareIterators(int step, DB* model, DB* db,
2146 const Snapshot* model_snap,
2147 const Snapshot* db_snap) {
2148 ReadOptions options;
2149 options.snapshot = model_snap;
2150 Iterator* miter = model->NewIterator(options);
2151 options.snapshot = db_snap;
2152 Iterator* dbiter = db->NewIterator(options);
2153 bool ok = true;
2154 int count = 0;
2155 std::vector<std::string> seek_keys;
2156 // Compare equality of all elements using Next(). Save some of the keys for
2157 // comparing Seek equality.
2158 for (miter->SeekToFirst(), dbiter->SeekToFirst();
2159 ok && miter->Valid() && dbiter->Valid(); miter->Next(), dbiter->Next()) {
2160 count++;
2161 if (miter->key().compare(dbiter->key()) != 0) {
2162 std::fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", step,
2163 EscapeString(miter->key()).c_str(),
2164 EscapeString(dbiter->key()).c_str());
2165 ok = false;
2166 break;
2167 }
2168
2169 if (miter->value().compare(dbiter->value()) != 0) {
2170 std::fprintf(stderr,
2171 "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
2172 step, EscapeString(miter->key()).c_str(),
2173 EscapeString(miter->value()).c_str(),
2174 EscapeString(miter->value()).c_str());
2175 ok = false;
2176 break;
2177 }
2178
2179 if (count % 10 == 0) {
2180 seek_keys.push_back(miter->key().ToString());
2181 }
2182 }
2183
2184 if (ok) {
2185 if (miter->Valid() != dbiter->Valid()) {
2186 std::fprintf(stderr, "step %d: Mismatch at end of iterators: %d vs. %d\n",
2187 step, miter->Valid(), dbiter->Valid());
2188 ok = false;
2189 }
2190 }
2191
2192 if (ok) {
2193 // Validate iterator equality when performing seeks.
2194 for (auto kiter = seek_keys.begin(); ok && kiter != seek_keys.end();
2195 ++kiter) {
2196 miter->Seek(*kiter);
2197 dbiter->Seek(*kiter);
2198 if (!miter->Valid() || !dbiter->Valid()) {
2199 std::fprintf(stderr, "step %d: Seek iterators invalid: %d vs. %d\n",
2200 step, miter->Valid(), dbiter->Valid());
2201 ok = false;
2202 }
2203 if (miter->key().compare(dbiter->key()) != 0) {
2204 std::fprintf(stderr, "step %d: Seek key mismatch: '%s' vs. '%s'\n",
2205 step, EscapeString(miter->key()).c_str(),
2206 EscapeString(dbiter->key()).c_str());
2207 ok = false;
2208 break;
2209 }
2210
2211 if (miter->value().compare(dbiter->value()) != 0) {
2212 std::fprintf(
2213 stderr,
2214 "step %d: Seek value mismatch for key '%s': '%s' vs. '%s'\n", step,
2215 EscapeString(miter->key()).c_str(),
2216 EscapeString(miter->value()).c_str(),
2217 EscapeString(miter->value()).c_str());
2218 ok = false;
2219 break;
2220 }
2221 }
2222 }
2223
2224 std::fprintf(stderr, "%d entries compared: ok=%d\n", count, ok);
2225 delete miter;
2226 delete dbiter;
2227 return ok;
2228 }
2229
TEST_F(DBTest,Randomized)2230 TEST_F(DBTest, Randomized) {
2231 Random rnd(test::RandomSeed());
2232 do {
2233 ModelDB model(CurrentOptions());
2234 const int N = 10000;
2235 const Snapshot* model_snap = nullptr;
2236 const Snapshot* db_snap = nullptr;
2237 std::string k, v;
2238 for (int step = 0; step < N; step++) {
2239 if (step % 100 == 0) {
2240 std::fprintf(stderr, "Step %d of %d\n", step, N);
2241 }
2242 // TODO(sanjay): Test Get() works
2243 int p = rnd.Uniform(100);
2244 if (p < 45) { // Put
2245 k = RandomKey(&rnd);
2246 v = RandomString(
2247 &rnd, rnd.OneIn(20) ? 100 + rnd.Uniform(100) : rnd.Uniform(8));
2248 ASSERT_LEVELDB_OK(model.Put(WriteOptions(), k, v));
2249 ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), k, v));
2250
2251 } else if (p < 90) { // Delete
2252 k = RandomKey(&rnd);
2253 ASSERT_LEVELDB_OK(model.Delete(WriteOptions(), k));
2254 ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), k));
2255
2256 } else { // Multi-element batch
2257 WriteBatch b;
2258 const int num = rnd.Uniform(8);
2259 for (int i = 0; i < num; i++) {
2260 if (i == 0 || !rnd.OneIn(10)) {
2261 k = RandomKey(&rnd);
2262 } else {
2263 // Periodically re-use the same key from the previous iter, so
2264 // we have multiple entries in the write batch for the same key
2265 }
2266 if (rnd.OneIn(2)) {
2267 v = RandomString(&rnd, rnd.Uniform(10));
2268 b.Put(k, v);
2269 } else {
2270 b.Delete(k);
2271 }
2272 }
2273 ASSERT_LEVELDB_OK(model.Write(WriteOptions(), &b));
2274 ASSERT_LEVELDB_OK(db_->Write(WriteOptions(), &b));
2275 }
2276
2277 if ((step % 100) == 0) {
2278 ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
2279 ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap));
2280 // Save a snapshot from each DB this time that we'll use next
2281 // time we compare things, to make sure the current state is
2282 // preserved with the snapshot
2283 if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
2284 if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
2285
2286 Reopen();
2287 ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
2288
2289 model_snap = model.GetSnapshot();
2290 db_snap = db_->GetSnapshot();
2291 }
2292 }
2293 if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
2294 if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
2295 } while (ChangeOptions());
2296 }
2297
MakeKey(unsigned int num)2298 std::string MakeKey(unsigned int num) {
2299 char buf[30];
2300 std::snprintf(buf, sizeof(buf), "%016u", num);
2301 return std::string(buf);
2302 }
2303
BM_LogAndApply(benchmark::State & state)2304 static void BM_LogAndApply(benchmark::State& state) {
2305 const int num_base_files = state.range(0);
2306
2307 std::string dbname = testing::TempDir() + "leveldb_test_benchmark";
2308 DestroyDB(dbname, Options());
2309
2310 DB* db = nullptr;
2311 Options opts;
2312 opts.create_if_missing = true;
2313 Status s = DB::Open(opts, dbname, &db);
2314 ASSERT_LEVELDB_OK(s);
2315 ASSERT_TRUE(db != nullptr);
2316
2317 delete db;
2318 db = nullptr;
2319
2320 Env* env = Env::Default();
2321
2322 port::Mutex mu;
2323 MutexLock l(&mu);
2324
2325 InternalKeyComparator cmp(BytewiseComparator());
2326 Options options;
2327 VersionSet vset(dbname, &options, nullptr, &cmp);
2328 bool save_manifest;
2329 ASSERT_LEVELDB_OK(vset.Recover(&save_manifest));
2330 VersionEdit vbase;
2331 uint64_t fnum = 1;
2332 for (int i = 0; i < num_base_files; i++) {
2333 InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
2334 InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
2335 vbase.AddFile(2, fnum++, 1 /* file size */, start, limit);
2336 }
2337 ASSERT_LEVELDB_OK(vset.LogAndApply(&vbase, &mu));
2338
2339 uint64_t start_micros = env->NowMicros();
2340
2341 for (auto st : state) {
2342 VersionEdit vedit;
2343 vedit.RemoveFile(2, fnum);
2344 InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
2345 InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
2346 vedit.AddFile(2, fnum++, 1 /* file size */, start, limit);
2347 vset.LogAndApply(&vedit, &mu);
2348 }
2349 uint64_t stop_micros = env->NowMicros();
2350 unsigned int us = stop_micros - start_micros;
2351 char buf[16];
2352 std::snprintf(buf, sizeof(buf), "%d", num_base_files);
2353 std::fprintf(stderr,
2354 "BM_LogAndApply/%-6s %8" PRIu64
2355 " iters : %9u us (%7.0f us / iter)\n",
2356 buf, state.iterations(), us, ((float)us) / state.iterations());
2357 }
2358
2359 BENCHMARK(BM_LogAndApply)->Arg(1)->Arg(100)->Arg(10000)->Arg(100000);
2360 } // namespace leveldb
2361
main(int argc,char ** argv)2362 int main(int argc, char** argv) {
2363 testing::InitGoogleTest(&argc, argv);
2364 benchmark::RunSpecifiedBenchmarks();
2365 return RUN_ALL_TESTS();
2366 }
2367