1 //  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
2 //  This source code is licensed under both the GPLv2 (found in the
3 //  COPYING file in the root directory) and Apache 2.0 License
4 //  (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9 
10 #include "env/mock_env.h"
11 #include <algorithm>
12 #include <chrono>
13 #include "port/sys_time.h"
14 #include "util/cast_util.h"
15 #include "util/murmurhash.h"
16 #include "util/random.h"
17 #include "util/rate_limiter.h"
18 
19 namespace ROCKSDB_NAMESPACE {
20 
21 class MemFile {
22  public:
MemFile(Env * env,const std::string & fn,bool _is_lock_file=false)23   explicit MemFile(Env* env, const std::string& fn, bool _is_lock_file = false)
24       : env_(env),
25         fn_(fn),
26         refs_(0),
27         is_lock_file_(_is_lock_file),
28         locked_(false),
29         size_(0),
30         modified_time_(Now()),
31         rnd_(static_cast<uint32_t>(
32             MurmurHash(fn.data(), static_cast<int>(fn.size()), 0))),
33         fsynced_bytes_(0) {}
34   // No copying allowed.
35   MemFile(const MemFile&) = delete;
36   void operator=(const MemFile&) = delete;
37 
Ref()38   void Ref() {
39     MutexLock lock(&mutex_);
40     ++refs_;
41   }
42 
is_lock_file() const43   bool is_lock_file() const { return is_lock_file_; }
44 
Lock()45   bool Lock() {
46     assert(is_lock_file_);
47     MutexLock lock(&mutex_);
48     if (locked_) {
49       return false;
50     } else {
51       locked_ = true;
52       return true;
53     }
54   }
55 
Unlock()56   void Unlock() {
57     assert(is_lock_file_);
58     MutexLock lock(&mutex_);
59     locked_ = false;
60   }
61 
Unref()62   void Unref() {
63     bool do_delete = false;
64     {
65       MutexLock lock(&mutex_);
66       --refs_;
67       assert(refs_ >= 0);
68       if (refs_ <= 0) {
69         do_delete = true;
70       }
71     }
72 
73     if (do_delete) {
74       delete this;
75     }
76   }
77 
Size() const78   uint64_t Size() const { return size_; }
79 
Truncate(size_t size)80   void Truncate(size_t size) {
81     MutexLock lock(&mutex_);
82     if (size < size_) {
83       data_.resize(size);
84       size_ = size;
85     }
86   }
87 
CorruptBuffer()88   void CorruptBuffer() {
89     if (fsynced_bytes_ >= size_) {
90       return;
91     }
92     uint64_t buffered_bytes = size_ - fsynced_bytes_;
93     uint64_t start =
94         fsynced_bytes_ + rnd_.Uniform(static_cast<int>(buffered_bytes));
95     uint64_t end = std::min(start + 512, size_.load());
96     MutexLock lock(&mutex_);
97     for (uint64_t pos = start; pos < end; ++pos) {
98       data_[static_cast<size_t>(pos)] = static_cast<char>(rnd_.Uniform(256));
99     }
100   }
101 
Read(uint64_t offset,size_t n,Slice * result,char * scratch) const102   Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const {
103     MutexLock lock(&mutex_);
104     const uint64_t available = Size() - std::min(Size(), offset);
105     size_t offset_ = static_cast<size_t>(offset);
106     if (n > available) {
107       n = static_cast<size_t>(available);
108     }
109     if (n == 0) {
110       *result = Slice();
111       return Status::OK();
112     }
113     if (scratch) {
114       memcpy(scratch, &(data_[offset_]), n);
115       *result = Slice(scratch, n);
116     } else {
117       *result = Slice(&(data_[offset_]), n);
118     }
119     return Status::OK();
120   }
121 
Write(uint64_t offset,const Slice & data)122   Status Write(uint64_t offset, const Slice& data) {
123     MutexLock lock(&mutex_);
124     size_t offset_ = static_cast<size_t>(offset);
125     if (offset + data.size() > data_.size()) {
126       data_.resize(offset_ + data.size());
127     }
128     data_.replace(offset_, data.size(), data.data(), data.size());
129     size_ = data_.size();
130     modified_time_ = Now();
131     return Status::OK();
132   }
133 
Append(const Slice & data)134   Status Append(const Slice& data) {
135     MutexLock lock(&mutex_);
136     data_.append(data.data(), data.size());
137     size_ = data_.size();
138     modified_time_ = Now();
139     return Status::OK();
140   }
141 
Fsync()142   Status Fsync() {
143     fsynced_bytes_ = size_.load();
144     return Status::OK();
145   }
146 
ModifiedTime() const147   uint64_t ModifiedTime() const { return modified_time_; }
148 
149  private:
Now()150   uint64_t Now() {
151     int64_t unix_time = 0;
152     auto s = env_->GetCurrentTime(&unix_time);
153     assert(s.ok());
154     return static_cast<uint64_t>(unix_time);
155   }
156 
157   // Private since only Unref() should be used to delete it.
~MemFile()158   ~MemFile() { assert(refs_ == 0); }
159 
160   Env* env_;
161   const std::string fn_;
162   mutable port::Mutex mutex_;
163   int refs_;
164   bool is_lock_file_;
165   bool locked_;
166 
167   // Data written into this file, all bytes before fsynced_bytes are
168   // persistent.
169   std::string data_;
170   std::atomic<uint64_t> size_;
171   std::atomic<uint64_t> modified_time_;
172 
173   Random rnd_;
174   std::atomic<uint64_t> fsynced_bytes_;
175 };
176 
177 namespace {
178 
179 class MockSequentialFile : public SequentialFile {
180  public:
MockSequentialFile(MemFile * file)181   explicit MockSequentialFile(MemFile* file) : file_(file), pos_(0) {
182     file_->Ref();
183   }
184 
~MockSequentialFile()185   ~MockSequentialFile() override { file_->Unref(); }
186 
Read(size_t n,Slice * result,char * scratch)187   Status Read(size_t n, Slice* result, char* scratch) override {
188     Status s = file_->Read(pos_, n, result, scratch);
189     if (s.ok()) {
190       pos_ += result->size();
191     }
192     return s;
193   }
194 
Skip(uint64_t n)195   Status Skip(uint64_t n) override {
196     if (pos_ > file_->Size()) {
197       return Status::IOError("pos_ > file_->Size()");
198     }
199     const uint64_t available = file_->Size() - pos_;
200     if (n > available) {
201       n = available;
202     }
203     pos_ += static_cast<size_t>(n);
204     return Status::OK();
205   }
206 
207  private:
208   MemFile* file_;
209   size_t pos_;
210 };
211 
212 class MockRandomAccessFile : public RandomAccessFile {
213  public:
MockRandomAccessFile(MemFile * file)214   explicit MockRandomAccessFile(MemFile* file) : file_(file) { file_->Ref(); }
215 
~MockRandomAccessFile()216   ~MockRandomAccessFile() override { file_->Unref(); }
217 
Read(uint64_t offset,size_t n,Slice * result,char * scratch) const218   Status Read(uint64_t offset, size_t n, Slice* result,
219               char* scratch) const override {
220     return file_->Read(offset, n, result, scratch);
221   }
222 
223  private:
224   MemFile* file_;
225 };
226 
227 class MockRandomRWFile : public RandomRWFile {
228  public:
MockRandomRWFile(MemFile * file)229   explicit MockRandomRWFile(MemFile* file) : file_(file) { file_->Ref(); }
230 
~MockRandomRWFile()231   ~MockRandomRWFile() override { file_->Unref(); }
232 
Write(uint64_t offset,const Slice & data)233   Status Write(uint64_t offset, const Slice& data) override {
234     return file_->Write(offset, data);
235   }
236 
Read(uint64_t offset,size_t n,Slice * result,char * scratch) const237   Status Read(uint64_t offset, size_t n, Slice* result,
238               char* scratch) const override {
239     return file_->Read(offset, n, result, scratch);
240   }
241 
Close()242   Status Close() override { return file_->Fsync(); }
243 
Flush()244   Status Flush() override { return Status::OK(); }
245 
Sync()246   Status Sync() override { return file_->Fsync(); }
247 
248  private:
249   MemFile* file_;
250 };
251 
252 class MockWritableFile : public WritableFile {
253  public:
MockWritableFile(MemFile * file,RateLimiter * rate_limiter)254   MockWritableFile(MemFile* file, RateLimiter* rate_limiter)
255       : file_(file), rate_limiter_(rate_limiter) {
256     file_->Ref();
257   }
258 
~MockWritableFile()259   ~MockWritableFile() override { file_->Unref(); }
260 
Append(const Slice & data)261   Status Append(const Slice& data) override {
262     size_t bytes_written = 0;
263     while (bytes_written < data.size()) {
264       auto bytes = RequestToken(data.size() - bytes_written);
265       Status s = file_->Append(Slice(data.data() + bytes_written, bytes));
266       if (!s.ok()) {
267         return s;
268       }
269       bytes_written += bytes;
270     }
271     return Status::OK();
272   }
Truncate(uint64_t size)273   Status Truncate(uint64_t size) override {
274     file_->Truncate(static_cast<size_t>(size));
275     return Status::OK();
276   }
Close()277   Status Close() override { return file_->Fsync(); }
278 
Flush()279   Status Flush() override { return Status::OK(); }
280 
Sync()281   Status Sync() override { return file_->Fsync(); }
282 
GetFileSize()283   uint64_t GetFileSize() override { return file_->Size(); }
284 
285  private:
RequestToken(size_t bytes)286   inline size_t RequestToken(size_t bytes) {
287     if (rate_limiter_ && io_priority_ < Env::IO_TOTAL) {
288       bytes = std::min(
289           bytes, static_cast<size_t>(rate_limiter_->GetSingleBurstBytes()));
290       rate_limiter_->Request(bytes, io_priority_);
291     }
292     return bytes;
293   }
294 
295   MemFile* file_;
296   RateLimiter* rate_limiter_;
297 };
298 
299 class MockEnvDirectory : public Directory {
300  public:
Fsync()301   Status Fsync() override { return Status::OK(); }
302 };
303 
304 class MockEnvFileLock : public FileLock {
305  public:
MockEnvFileLock(const std::string & fname)306   explicit MockEnvFileLock(const std::string& fname) : fname_(fname) {}
307 
FileName() const308   std::string FileName() const { return fname_; }
309 
310  private:
311   const std::string fname_;
312 };
313 
314 class TestMemLogger : public Logger {
315  private:
316   std::unique_ptr<WritableFile> file_;
317   std::atomic_size_t log_size_;
318   static const uint64_t flush_every_seconds_ = 5;
319   std::atomic_uint_fast64_t last_flush_micros_;
320   Env* env_;
321   std::atomic<bool> flush_pending_;
322 
323  public:
TestMemLogger(std::unique_ptr<WritableFile> f,Env * env,const InfoLogLevel log_level=InfoLogLevel::ERROR_LEVEL)324   TestMemLogger(std::unique_ptr<WritableFile> f, Env* env,
325                 const InfoLogLevel log_level = InfoLogLevel::ERROR_LEVEL)
326       : Logger(log_level),
327         file_(std::move(f)),
328         log_size_(0),
329         last_flush_micros_(0),
330         env_(env),
331         flush_pending_(false) {}
~TestMemLogger()332   ~TestMemLogger() override {}
333 
Flush()334   void Flush() override {
335     if (flush_pending_) {
336       flush_pending_ = false;
337     }
338     last_flush_micros_ = env_->NowMicros();
339   }
340 
341   using Logger::Logv;
Logv(const char * format,va_list ap)342   void Logv(const char* format, va_list ap) override {
343     // We try twice: the first time with a fixed-size stack allocated buffer,
344     // and the second time with a much larger dynamically allocated buffer.
345     char buffer[500];
346     for (int iter = 0; iter < 2; iter++) {
347       char* base;
348       int bufsize;
349       if (iter == 0) {
350         bufsize = sizeof(buffer);
351         base = buffer;
352       } else {
353         bufsize = 30000;
354         base = new char[bufsize];
355       }
356       char* p = base;
357       char* limit = base + bufsize;
358 
359       struct timeval now_tv;
360       gettimeofday(&now_tv, nullptr);
361       const time_t seconds = now_tv.tv_sec;
362       struct tm t;
363       memset(&t, 0, sizeof(t));
364       struct tm* ret __attribute__((__unused__));
365       ret = localtime_r(&seconds, &t);
366       assert(ret);
367       p += snprintf(p, limit - p, "%04d/%02d/%02d-%02d:%02d:%02d.%06d ",
368                     t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour,
369                     t.tm_min, t.tm_sec, static_cast<int>(now_tv.tv_usec));
370 
371       // Print the message
372       if (p < limit) {
373         va_list backup_ap;
374         va_copy(backup_ap, ap);
375         p += vsnprintf(p, limit - p, format, backup_ap);
376         va_end(backup_ap);
377       }
378 
379       // Truncate to available space if necessary
380       if (p >= limit) {
381         if (iter == 0) {
382           continue;  // Try again with larger buffer
383         } else {
384           p = limit - 1;
385         }
386       }
387 
388       // Add newline if necessary
389       if (p == base || p[-1] != '\n') {
390         *p++ = '\n';
391       }
392 
393       assert(p <= limit);
394       const size_t write_size = p - base;
395 
396       file_->Append(Slice(base, write_size));
397       flush_pending_ = true;
398       log_size_ += write_size;
399       uint64_t now_micros =
400           static_cast<uint64_t>(now_tv.tv_sec) * 1000000 + now_tv.tv_usec;
401       if (now_micros - last_flush_micros_ >= flush_every_seconds_ * 1000000) {
402         flush_pending_ = false;
403         last_flush_micros_ = now_micros;
404       }
405       if (base != buffer) {
406         delete[] base;
407       }
408       break;
409     }
410   }
GetLogFileSize() const411   size_t GetLogFileSize() const override { return log_size_; }
412 };
413 
414 }  // Anonymous namespace
415 
MockEnv(Env * base_env)416 MockEnv::MockEnv(Env* base_env) : EnvWrapper(base_env), fake_sleep_micros_(0) {}
417 
~MockEnv()418 MockEnv::~MockEnv() {
419   for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i) {
420     i->second->Unref();
421   }
422 }
423 
424 // Partial implementation of the Env interface.
NewSequentialFile(const std::string & fname,std::unique_ptr<SequentialFile> * result,const EnvOptions &)425 Status MockEnv::NewSequentialFile(const std::string& fname,
426                                   std::unique_ptr<SequentialFile>* result,
427                                   const EnvOptions& /*soptions*/) {
428   auto fn = NormalizePath(fname);
429   MutexLock lock(&mutex_);
430   if (file_map_.find(fn) == file_map_.end()) {
431     *result = nullptr;
432     return Status::IOError(fn, "File not found");
433   }
434   auto* f = file_map_[fn];
435   if (f->is_lock_file()) {
436     return Status::InvalidArgument(fn, "Cannot open a lock file.");
437   }
438   result->reset(new MockSequentialFile(f));
439   return Status::OK();
440 }
441 
NewRandomAccessFile(const std::string & fname,std::unique_ptr<RandomAccessFile> * result,const EnvOptions &)442 Status MockEnv::NewRandomAccessFile(const std::string& fname,
443                                     std::unique_ptr<RandomAccessFile>* result,
444                                     const EnvOptions& /*soptions*/) {
445   auto fn = NormalizePath(fname);
446   MutexLock lock(&mutex_);
447   if (file_map_.find(fn) == file_map_.end()) {
448     *result = nullptr;
449     return Status::IOError(fn, "File not found");
450   }
451   auto* f = file_map_[fn];
452   if (f->is_lock_file()) {
453     return Status::InvalidArgument(fn, "Cannot open a lock file.");
454   }
455   result->reset(new MockRandomAccessFile(f));
456   return Status::OK();
457 }
458 
NewRandomRWFile(const std::string & fname,std::unique_ptr<RandomRWFile> * result,const EnvOptions &)459 Status MockEnv::NewRandomRWFile(const std::string& fname,
460                                 std::unique_ptr<RandomRWFile>* result,
461                                 const EnvOptions& /*soptions*/) {
462   auto fn = NormalizePath(fname);
463   MutexLock lock(&mutex_);
464   if (file_map_.find(fn) == file_map_.end()) {
465     *result = nullptr;
466     return Status::IOError(fn, "File not found");
467   }
468   auto* f = file_map_[fn];
469   if (f->is_lock_file()) {
470     return Status::InvalidArgument(fn, "Cannot open a lock file.");
471   }
472   result->reset(new MockRandomRWFile(f));
473   return Status::OK();
474 }
475 
ReuseWritableFile(const std::string & fname,const std::string & old_fname,std::unique_ptr<WritableFile> * result,const EnvOptions & options)476 Status MockEnv::ReuseWritableFile(const std::string& fname,
477                                   const std::string& old_fname,
478                                   std::unique_ptr<WritableFile>* result,
479                                   const EnvOptions& options) {
480   auto s = RenameFile(old_fname, fname);
481   if (!s.ok()) {
482     return s;
483   }
484   result->reset();
485   return NewWritableFile(fname, result, options);
486 }
487 
NewWritableFile(const std::string & fname,std::unique_ptr<WritableFile> * result,const EnvOptions & env_options)488 Status MockEnv::NewWritableFile(const std::string& fname,
489                                 std::unique_ptr<WritableFile>* result,
490                                 const EnvOptions& env_options) {
491   auto fn = NormalizePath(fname);
492   MutexLock lock(&mutex_);
493   if (file_map_.find(fn) != file_map_.end()) {
494     DeleteFileInternal(fn);
495   }
496   MemFile* file = new MemFile(this, fn, false);
497   file->Ref();
498   file_map_[fn] = file;
499 
500   result->reset(new MockWritableFile(file, env_options.rate_limiter));
501   return Status::OK();
502 }
503 
NewDirectory(const std::string &,std::unique_ptr<Directory> * result)504 Status MockEnv::NewDirectory(const std::string& /*name*/,
505                              std::unique_ptr<Directory>* result) {
506   result->reset(new MockEnvDirectory());
507   return Status::OK();
508 }
509 
FileExists(const std::string & fname)510 Status MockEnv::FileExists(const std::string& fname) {
511   auto fn = NormalizePath(fname);
512   MutexLock lock(&mutex_);
513   if (file_map_.find(fn) != file_map_.end()) {
514     // File exists
515     return Status::OK();
516   }
517   // Now also check if fn exists as a dir
518   for (const auto& iter : file_map_) {
519     const std::string& filename = iter.first;
520     if (filename.size() >= fn.size() + 1 && filename[fn.size()] == '/' &&
521         Slice(filename).starts_with(Slice(fn))) {
522       return Status::OK();
523     }
524   }
525   return Status::NotFound();
526 }
527 
GetChildren(const std::string & dir,std::vector<std::string> * result)528 Status MockEnv::GetChildren(const std::string& dir,
529                             std::vector<std::string>* result) {
530   auto d = NormalizePath(dir);
531   bool found_dir = false;
532   {
533     MutexLock lock(&mutex_);
534     result->clear();
535     for (const auto& iter : file_map_) {
536       const std::string& filename = iter.first;
537 
538       if (filename == d) {
539         found_dir = true;
540       } else if (filename.size() >= d.size() + 1 && filename[d.size()] == '/' &&
541                  Slice(filename).starts_with(Slice(d))) {
542         found_dir = true;
543         size_t next_slash = filename.find('/', d.size() + 1);
544         if (next_slash != std::string::npos) {
545           result->push_back(
546               filename.substr(d.size() + 1, next_slash - d.size() - 1));
547         } else {
548           result->push_back(filename.substr(d.size() + 1));
549         }
550       }
551     }
552   }
553   result->erase(std::unique(result->begin(), result->end()), result->end());
554   return found_dir ? Status::OK() : Status::NotFound();
555 }
556 
DeleteFileInternal(const std::string & fname)557 void MockEnv::DeleteFileInternal(const std::string& fname) {
558   assert(fname == NormalizePath(fname));
559   const auto& pair = file_map_.find(fname);
560   if (pair != file_map_.end()) {
561     pair->second->Unref();
562     file_map_.erase(fname);
563   }
564 }
565 
DeleteFile(const std::string & fname)566 Status MockEnv::DeleteFile(const std::string& fname) {
567   auto fn = NormalizePath(fname);
568   MutexLock lock(&mutex_);
569   if (file_map_.find(fn) == file_map_.end()) {
570     return Status::IOError(fn, "File not found");
571   }
572 
573   DeleteFileInternal(fn);
574   return Status::OK();
575 }
576 
Truncate(const std::string & fname,size_t size)577 Status MockEnv::Truncate(const std::string& fname, size_t size) {
578   auto fn = NormalizePath(fname);
579   MutexLock lock(&mutex_);
580   auto iter = file_map_.find(fn);
581   if (iter == file_map_.end()) {
582     return Status::IOError(fn, "File not found");
583   }
584   iter->second->Truncate(size);
585   return Status::OK();
586 }
587 
CreateDir(const std::string & dirname)588 Status MockEnv::CreateDir(const std::string& dirname) {
589   auto dn = NormalizePath(dirname);
590   if (file_map_.find(dn) == file_map_.end()) {
591     MemFile* file = new MemFile(this, dn, false);
592     file->Ref();
593     file_map_[dn] = file;
594   } else {
595     return Status::IOError();
596   }
597   return Status::OK();
598 }
599 
CreateDirIfMissing(const std::string & dirname)600 Status MockEnv::CreateDirIfMissing(const std::string& dirname) {
601   CreateDir(dirname);
602   return Status::OK();
603 }
604 
DeleteDir(const std::string & dirname)605 Status MockEnv::DeleteDir(const std::string& dirname) {
606   return DeleteFile(dirname);
607 }
608 
GetFileSize(const std::string & fname,uint64_t * file_size)609 Status MockEnv::GetFileSize(const std::string& fname, uint64_t* file_size) {
610   auto fn = NormalizePath(fname);
611   MutexLock lock(&mutex_);
612   auto iter = file_map_.find(fn);
613   if (iter == file_map_.end()) {
614     return Status::IOError(fn, "File not found");
615   }
616 
617   *file_size = iter->second->Size();
618   return Status::OK();
619 }
620 
GetFileModificationTime(const std::string & fname,uint64_t * time)621 Status MockEnv::GetFileModificationTime(const std::string& fname,
622                                         uint64_t* time) {
623   auto fn = NormalizePath(fname);
624   MutexLock lock(&mutex_);
625   auto iter = file_map_.find(fn);
626   if (iter == file_map_.end()) {
627     return Status::IOError(fn, "File not found");
628   }
629   *time = iter->second->ModifiedTime();
630   return Status::OK();
631 }
632 
RenameFile(const std::string & src,const std::string & dest)633 Status MockEnv::RenameFile(const std::string& src, const std::string& dest) {
634   auto s = NormalizePath(src);
635   auto t = NormalizePath(dest);
636   MutexLock lock(&mutex_);
637   if (file_map_.find(s) == file_map_.end()) {
638     return Status::IOError(s, "File not found");
639   }
640 
641   DeleteFileInternal(t);
642   file_map_[t] = file_map_[s];
643   file_map_.erase(s);
644   return Status::OK();
645 }
646 
LinkFile(const std::string & src,const std::string & dest)647 Status MockEnv::LinkFile(const std::string& src, const std::string& dest) {
648   auto s = NormalizePath(src);
649   auto t = NormalizePath(dest);
650   MutexLock lock(&mutex_);
651   if (file_map_.find(s) == file_map_.end()) {
652     return Status::IOError(s, "File not found");
653   }
654 
655   DeleteFileInternal(t);
656   file_map_[t] = file_map_[s];
657   file_map_[t]->Ref();  // Otherwise it might get deleted when noone uses s
658   return Status::OK();
659 }
660 
NewLogger(const std::string & fname,std::shared_ptr<Logger> * result)661 Status MockEnv::NewLogger(const std::string& fname,
662                           std::shared_ptr<Logger>* result) {
663   auto fn = NormalizePath(fname);
664   MutexLock lock(&mutex_);
665   auto iter = file_map_.find(fn);
666   MemFile* file = nullptr;
667   if (iter == file_map_.end()) {
668     file = new MemFile(this, fn, false);
669     file->Ref();
670     file_map_[fn] = file;
671   } else {
672     file = iter->second;
673   }
674   std::unique_ptr<WritableFile> f(new MockWritableFile(file, nullptr));
675   result->reset(new TestMemLogger(std::move(f), this));
676   return Status::OK();
677 }
678 
LockFile(const std::string & fname,FileLock ** flock)679 Status MockEnv::LockFile(const std::string& fname, FileLock** flock) {
680   auto fn = NormalizePath(fname);
681   {
682     MutexLock lock(&mutex_);
683     if (file_map_.find(fn) != file_map_.end()) {
684       if (!file_map_[fn]->is_lock_file()) {
685         return Status::InvalidArgument(fname, "Not a lock file.");
686       }
687       if (!file_map_[fn]->Lock()) {
688         return Status::IOError(fn, "Lock is already held.");
689       }
690     } else {
691       auto* file = new MemFile(this, fn, true);
692       file->Ref();
693       file->Lock();
694       file_map_[fn] = file;
695     }
696   }
697   *flock = new MockEnvFileLock(fn);
698   return Status::OK();
699 }
700 
UnlockFile(FileLock * flock)701 Status MockEnv::UnlockFile(FileLock* flock) {
702   std::string fn =
703       static_cast_with_check<MockEnvFileLock, FileLock>(flock)->FileName();
704   {
705     MutexLock lock(&mutex_);
706     if (file_map_.find(fn) != file_map_.end()) {
707       if (!file_map_[fn]->is_lock_file()) {
708         return Status::InvalidArgument(fn, "Not a lock file.");
709       }
710       file_map_[fn]->Unlock();
711     }
712   }
713   delete flock;
714   return Status::OK();
715 }
716 
GetTestDirectory(std::string * path)717 Status MockEnv::GetTestDirectory(std::string* path) {
718   *path = "/test";
719   return Status::OK();
720 }
721 
GetCurrentTime(int64_t * unix_time)722 Status MockEnv::GetCurrentTime(int64_t* unix_time) {
723   auto s = EnvWrapper::GetCurrentTime(unix_time);
724   if (s.ok()) {
725     *unix_time += fake_sleep_micros_.load() / (1000 * 1000);
726   }
727   return s;
728 }
729 
NowMicros()730 uint64_t MockEnv::NowMicros() {
731   return EnvWrapper::NowMicros() + fake_sleep_micros_.load();
732 }
733 
NowNanos()734 uint64_t MockEnv::NowNanos() {
735   return EnvWrapper::NowNanos() + fake_sleep_micros_.load() * 1000;
736 }
737 
CorruptBuffer(const std::string & fname)738 Status MockEnv::CorruptBuffer(const std::string& fname) {
739   auto fn = NormalizePath(fname);
740   MutexLock lock(&mutex_);
741   auto iter = file_map_.find(fn);
742   if (iter == file_map_.end()) {
743     return Status::IOError(fn, "File not found");
744   }
745   iter->second->CorruptBuffer();
746   return Status::OK();
747 }
748 
NormalizePath(const std::string path)749 std::string MockEnv::NormalizePath(const std::string path) {
750   std::string dst;
751   for (auto c : path) {
752     if (!dst.empty() && c == '/' && dst.back() == '/') {
753       continue;
754     }
755     dst.push_back(c);
756   }
757   return dst;
758 }
759 
FakeSleepForMicroseconds(int64_t micros)760 void MockEnv::FakeSleepForMicroseconds(int64_t micros) {
761   fake_sleep_micros_.fetch_add(micros);
762 }
763 
764 #ifndef ROCKSDB_LITE
765 // This is to maintain the behavior before swithcing from InMemoryEnv to MockEnv
NewMemEnv(Env * base_env)766 Env* NewMemEnv(Env* base_env) { return new MockEnv(base_env); }
767 
768 #else  // ROCKSDB_LITE
769 
NewMemEnv(Env *)770 Env* NewMemEnv(Env* /*base_env*/) { return nullptr; }
771 
772 #endif  // !ROCKSDB_LITE
773 
774 }  // namespace ROCKSDB_NAMESPACE
775