1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "net/http/mock_http_cache.h"
6
7 #include <algorithm>
8 #include <limits>
9 #include <memory>
10 #include <utility>
11
12 #include "base/bind.h"
13 #include "base/callback_helpers.h"
14 #include "base/feature_list.h"
15 #include "base/location.h"
16 #include "base/single_thread_task_runner.h"
17 #include "base/threading/thread_task_runner_handle.h"
18 #include "net/base/features.h"
19 #include "net/base/net_errors.h"
20 #include "net/disk_cache/disk_cache_test_util.h"
21 #include "net/http/http_cache_writers.h"
22 #include "testing/gtest/include/gtest/gtest.h"
23
24 namespace net {
25
26 namespace {
27
28 // During testing, we are going to limit the size of a cache entry to this many
29 // bytes using DCHECKs in order to prevent a test from causing unbounded memory
30 // growth. In practice cache entry shouldn't come anywhere near this limit for
31 // tests that use the mock cache. If they do, that's likely a problem with the
32 // test. If a test requires using massive cache entries, they should use a real
33 // cache backend instead.
34 const int kMaxMockCacheEntrySize = 100 * 1000 * 1000;
35
36 // We can override the test mode for a given operation by setting this global
37 // variable.
38 int g_test_mode = 0;
39
GetTestModeForEntry(const std::string & key)40 int GetTestModeForEntry(const std::string& key) {
41 std::string url = key;
42
43 // 'key' is prefixed with an identifier if it corresponds to a cached POST.
44 // Skip past that to locate the actual URL.
45 //
46 // TODO(darin): It breaks the abstraction a bit that we assume 'key' is an
47 // URL corresponding to a registered MockTransaction. It would be good to
48 // have another way to access the test_mode.
49 if (isdigit(key[0])) {
50 size_t slash = key.find('/');
51 DCHECK(slash != std::string::npos);
52 url = url.substr(slash + 1);
53 }
54
55 // If we split the cache by top frame origin, then the origin is prepended to
56 // the key. Skip to the second url in the key.
57 if (base::StartsWith(url, "_dk_", base::CompareCase::SENSITIVE)) {
58 auto const pos = url.find(" http");
59 url = url.substr(pos + 1);
60 if (base::FeatureList::IsEnabled(
61 net::features::kAppendFrameOriginToNetworkIsolationKey)) {
62 auto const pos = url.find(" http");
63 url = url.substr(pos + 1);
64 }
65 }
66
67 const MockTransaction* t = FindMockTransaction(GURL(url));
68 DCHECK(t);
69 return t->test_mode;
70 }
71
72 } // namespace
73
74 //-----------------------------------------------------------------------------
75
76 struct MockDiskEntry::CallbackInfo {
77 scoped_refptr<MockDiskEntry> entry;
78 net::CompletionOnceCallback callback;
79 int result;
80 };
81
MockDiskEntry(const std::string & key)82 MockDiskEntry::MockDiskEntry(const std::string& key)
83 : key_(key),
84 in_memory_data_(0),
85 max_file_size_(std::numeric_limits<int>::max()),
86 doomed_(false),
87 sparse_(false),
88 fail_requests_(0),
89 fail_sparse_requests_(false),
90 busy_(false),
91 delayed_(false),
92 cancel_(false),
93 defer_op_(DEFER_NONE),
94 resume_return_code_(0) {
95 test_mode_ = GetTestModeForEntry(key);
96 }
97
Doom()98 void MockDiskEntry::Doom() {
99 doomed_ = true;
100 }
101
Close()102 void MockDiskEntry::Close() {
103 Release();
104 }
105
GetKey() const106 std::string MockDiskEntry::GetKey() const {
107 return key_;
108 }
109
GetLastUsed() const110 base::Time MockDiskEntry::GetLastUsed() const {
111 return base::Time::Now();
112 }
113
GetLastModified() const114 base::Time MockDiskEntry::GetLastModified() const {
115 return base::Time::Now();
116 }
117
GetDataSize(int index) const118 int32_t MockDiskEntry::GetDataSize(int index) const {
119 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
120 return static_cast<int32_t>(data_[index].size());
121 }
122
ReadData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)123 int MockDiskEntry::ReadData(int index,
124 int offset,
125 IOBuffer* buf,
126 int buf_len,
127 CompletionOnceCallback callback) {
128 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
129 DCHECK(!callback.is_null());
130
131 if (fail_requests_ & FAIL_READ)
132 return ERR_CACHE_READ_FAILURE;
133
134 if (offset < 0 || offset > static_cast<int>(data_[index].size()))
135 return ERR_FAILED;
136 if (static_cast<size_t>(offset) == data_[index].size())
137 return 0;
138
139 int num = std::min(buf_len, static_cast<int>(data_[index].size()) - offset);
140 memcpy(buf->data(), &data_[index][offset], num);
141
142 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
143 return num;
144
145 // Pause and resume.
146 if (defer_op_ == DEFER_READ) {
147 defer_op_ = DEFER_NONE;
148 resume_callback_ = std::move(callback);
149 resume_return_code_ = num;
150 return ERR_IO_PENDING;
151 }
152
153 CallbackLater(std::move(callback), num);
154 return ERR_IO_PENDING;
155 }
156
ResumeDiskEntryOperation()157 void MockDiskEntry::ResumeDiskEntryOperation() {
158 DCHECK(!resume_callback_.is_null());
159 CallbackLater(std::move(resume_callback_), resume_return_code_);
160 resume_return_code_ = 0;
161 }
162
WriteData(int index,int offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback,bool truncate)163 int MockDiskEntry::WriteData(int index,
164 int offset,
165 IOBuffer* buf,
166 int buf_len,
167 CompletionOnceCallback callback,
168 bool truncate) {
169 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
170 DCHECK(!callback.is_null());
171 DCHECK(truncate);
172
173 if (fail_requests_ & FAIL_WRITE) {
174 CallbackLater(std::move(callback), ERR_CACHE_READ_FAILURE);
175 return ERR_IO_PENDING;
176 }
177
178 if (offset < 0 || offset > static_cast<int>(data_[index].size()))
179 return ERR_FAILED;
180
181 DCHECK_LT(offset + buf_len, kMaxMockCacheEntrySize);
182 if (offset + buf_len > max_file_size_ && index == 1)
183 return net::ERR_FAILED;
184
185 data_[index].resize(offset + buf_len);
186 if (buf_len)
187 memcpy(&data_[index][offset], buf->data(), buf_len);
188
189 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
190 return buf_len;
191
192 if (defer_op_ == DEFER_WRITE) {
193 defer_op_ = DEFER_NONE;
194 resume_callback_ = std::move(callback);
195 resume_return_code_ = buf_len;
196 return ERR_IO_PENDING;
197 }
198
199 CallbackLater(std::move(callback), buf_len);
200 return ERR_IO_PENDING;
201 }
202
ReadSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)203 int MockDiskEntry::ReadSparseData(int64_t offset,
204 IOBuffer* buf,
205 int buf_len,
206 CompletionOnceCallback callback) {
207 DCHECK(!callback.is_null());
208 if (fail_sparse_requests_)
209 return ERR_NOT_IMPLEMENTED;
210 if (!sparse_ || busy_ || cancel_)
211 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
212 if (offset < 0)
213 return ERR_FAILED;
214
215 if (fail_requests_ & FAIL_READ_SPARSE)
216 return ERR_CACHE_READ_FAILURE;
217
218 DCHECK(offset < std::numeric_limits<int32_t>::max());
219 int real_offset = static_cast<int>(offset);
220 if (!buf_len)
221 return 0;
222
223 int num = std::min(static_cast<int>(data_[1].size()) - real_offset,
224 buf_len);
225 memcpy(buf->data(), &data_[1][real_offset], num);
226
227 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
228 return num;
229
230 CallbackLater(std::move(callback), num);
231 busy_ = true;
232 delayed_ = false;
233 return ERR_IO_PENDING;
234 }
235
WriteSparseData(int64_t offset,IOBuffer * buf,int buf_len,CompletionOnceCallback callback)236 int MockDiskEntry::WriteSparseData(int64_t offset,
237 IOBuffer* buf,
238 int buf_len,
239 CompletionOnceCallback callback) {
240 DCHECK(!callback.is_null());
241 if (fail_sparse_requests_)
242 return ERR_NOT_IMPLEMENTED;
243 if (busy_ || cancel_)
244 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
245 if (!sparse_) {
246 if (data_[1].size())
247 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
248 sparse_ = true;
249 }
250 if (offset < 0)
251 return ERR_FAILED;
252 if (!buf_len)
253 return 0;
254
255 if (fail_requests_ & FAIL_WRITE_SPARSE)
256 return ERR_CACHE_READ_FAILURE;
257
258 DCHECK(offset < std::numeric_limits<int32_t>::max());
259 int real_offset = static_cast<int>(offset);
260
261 if (static_cast<int>(data_[1].size()) < real_offset + buf_len) {
262 DCHECK_LT(real_offset + buf_len, kMaxMockCacheEntrySize);
263 data_[1].resize(real_offset + buf_len);
264 }
265
266 memcpy(&data_[1][real_offset], buf->data(), buf_len);
267 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
268 return buf_len;
269
270 CallbackLater(std::move(callback), buf_len);
271 return ERR_IO_PENDING;
272 }
273
GetAvailableRange(int64_t offset,int len,int64_t * start,CompletionOnceCallback callback)274 int MockDiskEntry::GetAvailableRange(int64_t offset,
275 int len,
276 int64_t* start,
277 CompletionOnceCallback callback) {
278 DCHECK(!callback.is_null());
279 if (!sparse_ || busy_ || cancel_)
280 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
281 if (offset < 0)
282 return ERR_FAILED;
283
284 if (fail_requests_ & FAIL_GET_AVAILABLE_RANGE)
285 return ERR_CACHE_READ_FAILURE;
286
287 *start = offset;
288 DCHECK(offset < std::numeric_limits<int32_t>::max());
289 int real_offset = static_cast<int>(offset);
290 if (static_cast<int>(data_[1].size()) < real_offset)
291 return 0;
292
293 int num = std::min(static_cast<int>(data_[1].size()) - real_offset, len);
294 int count = 0;
295 for (; num > 0; num--, real_offset++) {
296 if (!count) {
297 if (data_[1][real_offset]) {
298 count++;
299 *start = real_offset;
300 }
301 } else {
302 if (!data_[1][real_offset])
303 break;
304 count++;
305 }
306 }
307 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
308 return count;
309
310 CallbackLater(std::move(callback), count);
311 return ERR_IO_PENDING;
312 }
313
CouldBeSparse() const314 bool MockDiskEntry::CouldBeSparse() const {
315 if (fail_sparse_requests_)
316 return false;
317 return sparse_;
318 }
319
CancelSparseIO()320 void MockDiskEntry::CancelSparseIO() {
321 cancel_ = true;
322 }
323
ReadyForSparseIO(CompletionOnceCallback callback)324 net::Error MockDiskEntry::ReadyForSparseIO(CompletionOnceCallback callback) {
325 if (fail_sparse_requests_)
326 return ERR_NOT_IMPLEMENTED;
327 if (!cancel_)
328 return OK;
329
330 cancel_ = false;
331 DCHECK(!callback.is_null());
332 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
333 return OK;
334
335 // The pending operation is already in the message loop (and hopefully
336 // already in the second pass). Just notify the caller that it finished.
337 CallbackLater(std::move(callback), 0);
338 return ERR_IO_PENDING;
339 }
340
SetLastUsedTimeForTest(base::Time time)341 void MockDiskEntry::SetLastUsedTimeForTest(base::Time time) {
342 NOTREACHED();
343 }
344
345 // If |value| is true, don't deliver any completion callbacks until called
346 // again with |value| set to false. Caution: remember to enable callbacks
347 // again or all subsequent tests will fail.
348 // Static.
IgnoreCallbacks(bool value)349 void MockDiskEntry::IgnoreCallbacks(bool value) {
350 if (ignore_callbacks_ == value)
351 return;
352 ignore_callbacks_ = value;
353 if (!value)
354 StoreAndDeliverCallbacks(false, nullptr, CompletionOnceCallback(), 0);
355 }
356
357 MockDiskEntry::~MockDiskEntry() = default;
358
359 // Unlike the callbacks for MockHttpTransaction, we want this one to run even
360 // if the consumer called Close on the MockDiskEntry. We achieve that by
361 // leveraging the fact that this class is reference counted.
CallbackLater(CompletionOnceCallback callback,int result)362 void MockDiskEntry::CallbackLater(CompletionOnceCallback callback, int result) {
363 if (ignore_callbacks_)
364 return StoreAndDeliverCallbacks(true, this, std::move(callback), result);
365 base::ThreadTaskRunnerHandle::Get()->PostTask(
366 FROM_HERE, base::BindOnce(&MockDiskEntry::RunCallback, this,
367 std::move(callback), result));
368 }
369
RunCallback(CompletionOnceCallback callback,int result)370 void MockDiskEntry::RunCallback(CompletionOnceCallback callback, int result) {
371 if (busy_) {
372 // This is kind of hacky, but controlling the behavior of just this entry
373 // from a test is sort of complicated. What we really want to do is
374 // delay the delivery of a sparse IO operation a little more so that the
375 // request start operation (async) will finish without seeing the end of
376 // this operation (already posted to the message loop)... and without
377 // just delaying for n mS (which may cause trouble with slow bots). So
378 // we re-post this operation (all async sparse IO operations will take two
379 // trips through the message loop instead of one).
380 if (!delayed_) {
381 delayed_ = true;
382 return CallbackLater(std::move(callback), result);
383 }
384 }
385 busy_ = false;
386 std::move(callback).Run(result);
387 }
388
389 // When |store| is true, stores the callback to be delivered later; otherwise
390 // delivers any callback previously stored.
391 // Static.
StoreAndDeliverCallbacks(bool store,MockDiskEntry * entry,CompletionOnceCallback callback,int result)392 void MockDiskEntry::StoreAndDeliverCallbacks(bool store,
393 MockDiskEntry* entry,
394 CompletionOnceCallback callback,
395 int result) {
396 static std::vector<CallbackInfo> callback_list;
397 if (store) {
398 CallbackInfo c = {entry, std::move(callback), result};
399 callback_list.push_back(std::move(c));
400 } else {
401 for (size_t i = 0; i < callback_list.size(); i++) {
402 CallbackInfo& c = callback_list[i];
403 c.entry->CallbackLater(std::move(c.callback), c.result);
404 }
405 callback_list.clear();
406 }
407 }
408
409 // Statics.
410 bool MockDiskEntry::ignore_callbacks_ = false;
411
412 //-----------------------------------------------------------------------------
413
MockDiskCache()414 MockDiskCache::MockDiskCache()
415 : Backend(DISK_CACHE),
416 open_count_(0),
417 create_count_(0),
418 doomed_count_(0),
419 max_file_size_(std::numeric_limits<int>::max()),
420 fail_requests_(false),
421 soft_failures_(0),
422 soft_failures_one_instance_(0),
423 double_create_check_(true),
424 fail_sparse_requests_(false),
425 support_in_memory_entry_data_(true),
426 force_fail_callback_later_(false),
427 defer_op_(MockDiskEntry::DEFER_NONE) {}
428
~MockDiskCache()429 MockDiskCache::~MockDiskCache() {
430 ReleaseAll();
431 }
432
GetEntryCount() const433 int32_t MockDiskCache::GetEntryCount() const {
434 return static_cast<int32_t>(entries_.size());
435 }
436
OpenOrCreateEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)437 disk_cache::EntryResult MockDiskCache::OpenOrCreateEntry(
438 const std::string& key,
439 net::RequestPriority request_priority,
440 EntryResultCallback callback) {
441 DCHECK(!callback.is_null());
442 base::RepeatingCallback<void(EntryResult)> copyable_callback;
443 if (callback)
444 copyable_callback = base::AdaptCallbackForRepeating(std::move(callback));
445
446 if (force_fail_callback_later_) {
447 CallbackLater(base::BindOnce(
448 copyable_callback,
449 EntryResult::MakeError(ERR_CACHE_OPEN_OR_CREATE_FAILURE)));
450 return EntryResult::MakeError(ERR_IO_PENDING);
451 }
452
453 if (fail_requests_)
454 return EntryResult::MakeError(ERR_CACHE_OPEN_OR_CREATE_FAILURE);
455
456 EntryResult result;
457
458 // First try opening the entry.
459 result = OpenEntry(key, request_priority, copyable_callback);
460 if (result.net_error() == OK || result.net_error() == ERR_IO_PENDING)
461 return result;
462
463 // Unable to open, try creating the entry.
464 result = CreateEntry(key, request_priority, copyable_callback);
465 if (result.net_error() == OK || result.net_error() == ERR_IO_PENDING)
466 return result;
467
468 return EntryResult::MakeError(ERR_CACHE_OPEN_OR_CREATE_FAILURE);
469 }
470
OpenEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)471 disk_cache::EntryResult MockDiskCache::OpenEntry(
472 const std::string& key,
473 net::RequestPriority request_priority,
474 EntryResultCallback callback) {
475 DCHECK(!callback.is_null());
476 if (force_fail_callback_later_) {
477 CallbackLater(base::BindOnce(
478 std::move(callback), EntryResult::MakeError(ERR_CACHE_OPEN_FAILURE)));
479 return EntryResult::MakeError(ERR_IO_PENDING);
480 }
481
482 if (fail_requests_)
483 return EntryResult::MakeError(ERR_CACHE_OPEN_FAILURE);
484
485 auto it = entries_.find(key);
486 if (it == entries_.end())
487 return EntryResult::MakeError(ERR_CACHE_OPEN_FAILURE);
488
489 if (it->second->is_doomed()) {
490 it->second->Release();
491 entries_.erase(it);
492 return EntryResult::MakeError(ERR_CACHE_OPEN_FAILURE);
493 }
494
495 open_count_++;
496
497 MockDiskEntry* entry = it->second;
498 entry->AddRef();
499
500 if (soft_failures_ || soft_failures_one_instance_) {
501 entry->set_fail_requests(soft_failures_ | soft_failures_one_instance_);
502 soft_failures_one_instance_ = 0;
503 }
504
505 entry->set_max_file_size(max_file_size_);
506
507 EntryResult result = EntryResult::MakeOpened(entry);
508 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
509 return result;
510
511 CallbackLater(base::BindOnce(std::move(callback), std::move(result)));
512 return EntryResult::MakeError(ERR_IO_PENDING);
513 }
514
CreateEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)515 disk_cache::EntryResult MockDiskCache::CreateEntry(
516 const std::string& key,
517 net::RequestPriority request_priority,
518 EntryResultCallback callback) {
519 DCHECK(!callback.is_null());
520 if (force_fail_callback_later_) {
521 CallbackLater(base::BindOnce(
522 std::move(callback), EntryResult::MakeError(ERR_CACHE_CREATE_FAILURE)));
523 return EntryResult::MakeError(ERR_IO_PENDING);
524 }
525
526 if (fail_requests_)
527 return EntryResult::MakeError(ERR_CACHE_CREATE_FAILURE);
528
529 auto it = entries_.find(key);
530 if (it != entries_.end()) {
531 if (!it->second->is_doomed()) {
532 if (double_create_check_)
533 NOTREACHED();
534 else
535 return EntryResult::MakeError(ERR_CACHE_CREATE_FAILURE);
536 }
537 it->second->Release();
538 entries_.erase(it);
539 }
540
541 create_count_++;
542
543 MockDiskEntry* new_entry = new MockDiskEntry(key);
544
545 new_entry->AddRef();
546 entries_[key] = new_entry;
547
548 new_entry->AddRef();
549
550 if (soft_failures_ || soft_failures_one_instance_) {
551 new_entry->set_fail_requests(soft_failures_ | soft_failures_one_instance_);
552 soft_failures_one_instance_ = 0;
553 }
554
555 if (fail_sparse_requests_)
556 new_entry->set_fail_sparse_requests();
557
558 new_entry->set_max_file_size(max_file_size_);
559
560 EntryResult result = EntryResult::MakeCreated(new_entry);
561 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
562 return result;
563
564 // Pause and resume.
565 if (defer_op_ == MockDiskEntry::DEFER_CREATE) {
566 defer_op_ = MockDiskEntry::DEFER_NONE;
567 resume_callback_ = base::BindOnce(std::move(callback), std::move(result));
568 return EntryResult::MakeError(ERR_IO_PENDING);
569 }
570
571 CallbackLater(base::BindOnce(std::move(callback), std::move(result)));
572 return EntryResult::MakeError(ERR_IO_PENDING);
573 }
574
DoomEntry(const std::string & key,net::RequestPriority request_priority,CompletionOnceCallback callback)575 net::Error MockDiskCache::DoomEntry(const std::string& key,
576 net::RequestPriority request_priority,
577 CompletionOnceCallback callback) {
578 DCHECK(!callback.is_null());
579 if (force_fail_callback_later_) {
580 CallbackLater(base::BindOnce(std::move(callback), ERR_CACHE_DOOM_FAILURE));
581 return ERR_IO_PENDING;
582 }
583
584 if (fail_requests_)
585 return ERR_CACHE_DOOM_FAILURE;
586
587 auto it = entries_.find(key);
588 if (it != entries_.end()) {
589 it->second->Release();
590 entries_.erase(it);
591 doomed_count_++;
592 }
593
594 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
595 return OK;
596
597 CallbackLater(base::BindOnce(std::move(callback), OK));
598 return ERR_IO_PENDING;
599 }
600
DoomAllEntries(CompletionOnceCallback callback)601 net::Error MockDiskCache::DoomAllEntries(CompletionOnceCallback callback) {
602 return ERR_NOT_IMPLEMENTED;
603 }
604
DoomEntriesBetween(const base::Time initial_time,const base::Time end_time,CompletionOnceCallback callback)605 net::Error MockDiskCache::DoomEntriesBetween(const base::Time initial_time,
606 const base::Time end_time,
607 CompletionOnceCallback callback) {
608 return ERR_NOT_IMPLEMENTED;
609 }
610
DoomEntriesSince(const base::Time initial_time,CompletionOnceCallback callback)611 net::Error MockDiskCache::DoomEntriesSince(const base::Time initial_time,
612 CompletionOnceCallback callback) {
613 return ERR_NOT_IMPLEMENTED;
614 }
615
CalculateSizeOfAllEntries(Int64CompletionOnceCallback callback)616 int64_t MockDiskCache::CalculateSizeOfAllEntries(
617 Int64CompletionOnceCallback callback) {
618 return ERR_NOT_IMPLEMENTED;
619 }
620
621 class MockDiskCache::NotImplementedIterator : public Iterator {
622 public:
OpenNextEntry(EntryResultCallback callback)623 EntryResult OpenNextEntry(EntryResultCallback callback) override {
624 return EntryResult::MakeError(ERR_NOT_IMPLEMENTED);
625 }
626 };
627
CreateIterator()628 std::unique_ptr<disk_cache::Backend::Iterator> MockDiskCache::CreateIterator() {
629 return std::unique_ptr<Iterator>(new NotImplementedIterator());
630 }
631
GetStats(base::StringPairs * stats)632 void MockDiskCache::GetStats(base::StringPairs* stats) {
633 }
634
OnExternalCacheHit(const std::string & key)635 void MockDiskCache::OnExternalCacheHit(const std::string& key) {
636 external_cache_hits_.push_back(key);
637 }
638
DumpMemoryStats(base::trace_event::ProcessMemoryDump * pmd,const std::string & parent_absolute_name) const639 size_t MockDiskCache::DumpMemoryStats(
640 base::trace_event::ProcessMemoryDump* pmd,
641 const std::string& parent_absolute_name) const {
642 return 0u;
643 }
644
GetEntryInMemoryData(const std::string & key)645 uint8_t MockDiskCache::GetEntryInMemoryData(const std::string& key) {
646 if (!support_in_memory_entry_data_)
647 return 0;
648
649 auto it = entries_.find(key);
650 if (it != entries_.end())
651 return it->second->in_memory_data();
652 return 0;
653 }
654
SetEntryInMemoryData(const std::string & key,uint8_t data)655 void MockDiskCache::SetEntryInMemoryData(const std::string& key, uint8_t data) {
656 auto it = entries_.find(key);
657 if (it != entries_.end())
658 it->second->set_in_memory_data(data);
659 }
660
MaxFileSize() const661 int64_t MockDiskCache::MaxFileSize() const {
662 return max_file_size_;
663 }
664
ReleaseAll()665 void MockDiskCache::ReleaseAll() {
666 for (auto entry : entries_)
667 entry.second->Release();
668 entries_.clear();
669 }
670
CallbackLater(base::OnceClosure callback)671 void MockDiskCache::CallbackLater(base::OnceClosure callback) {
672 base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, std::move(callback));
673 }
674
IsDiskEntryDoomed(const std::string & key)675 bool MockDiskCache::IsDiskEntryDoomed(const std::string& key) {
676 auto it = entries_.find(key);
677 if (it != entries_.end())
678 return it->second->is_doomed();
679
680 return false;
681 }
682
ResumeCacheOperation()683 void MockDiskCache::ResumeCacheOperation() {
684 DCHECK(!resume_callback_.is_null());
685 CallbackLater(std::move(resume_callback_));
686 }
687
GetDiskEntryRef(const std::string & key)688 scoped_refptr<MockDiskEntry> MockDiskCache::GetDiskEntryRef(
689 const std::string& key) {
690 auto it = entries_.find(key);
691 if (it == entries_.end())
692 return nullptr;
693 return it->second;
694 }
695
GetExternalCacheHits() const696 const std::vector<std::string>& MockDiskCache::GetExternalCacheHits() const {
697 return external_cache_hits_;
698 }
699
700 //-----------------------------------------------------------------------------
701
CreateBackend(NetLog * net_log,std::unique_ptr<disk_cache::Backend> * backend,CompletionOnceCallback callback)702 int MockBackendFactory::CreateBackend(
703 NetLog* net_log,
704 std::unique_ptr<disk_cache::Backend>* backend,
705 CompletionOnceCallback callback) {
706 backend->reset(new MockDiskCache());
707 return OK;
708 }
709
710 //-----------------------------------------------------------------------------
711
MockHttpCache()712 MockHttpCache::MockHttpCache() : MockHttpCache(false) {}
713
MockHttpCache(std::unique_ptr<HttpCache::BackendFactory> disk_cache_factory)714 MockHttpCache::MockHttpCache(
715 std::unique_ptr<HttpCache::BackendFactory> disk_cache_factory)
716 : MockHttpCache(std::move(disk_cache_factory), false) {}
717
MockHttpCache(bool is_main_cache)718 MockHttpCache::MockHttpCache(bool is_main_cache)
719 : MockHttpCache(std::make_unique<MockBackendFactory>(), is_main_cache) {}
720
MockHttpCache(std::unique_ptr<HttpCache::BackendFactory> disk_cache_factory,bool is_main_cache)721 MockHttpCache::MockHttpCache(
722 std::unique_ptr<HttpCache::BackendFactory> disk_cache_factory,
723 bool is_main_cache)
724 : http_cache_(std::make_unique<MockNetworkLayer>(),
725 std::move(disk_cache_factory),
726 is_main_cache) {}
727
backend()728 disk_cache::Backend* MockHttpCache::backend() {
729 TestCompletionCallback cb;
730 disk_cache::Backend* backend;
731 int rv = http_cache_.GetBackend(&backend, cb.callback());
732 rv = cb.GetResult(rv);
733 return (rv == OK) ? backend : nullptr;
734 }
735
disk_cache()736 MockDiskCache* MockHttpCache::disk_cache() {
737 return static_cast<MockDiskCache*>(backend());
738 }
739
CreateTransaction(std::unique_ptr<HttpTransaction> * trans)740 int MockHttpCache::CreateTransaction(std::unique_ptr<HttpTransaction>* trans) {
741 return http_cache_.CreateTransaction(DEFAULT_PRIORITY, trans);
742 }
743
SimulateCacheLockTimeout()744 void MockHttpCache::SimulateCacheLockTimeout() {
745 http_cache_.SimulateCacheLockTimeoutForTesting();
746 }
747
SimulateCacheLockTimeoutAfterHeaders()748 void MockHttpCache::SimulateCacheLockTimeoutAfterHeaders() {
749 http_cache_.SimulateCacheLockTimeoutAfterHeadersForTesting();
750 }
751
FailConditionalizations()752 void MockHttpCache::FailConditionalizations() {
753 http_cache_.FailConditionalizationForTest();
754 }
755
ReadResponseInfo(disk_cache::Entry * disk_entry,HttpResponseInfo * response_info,bool * response_truncated)756 bool MockHttpCache::ReadResponseInfo(disk_cache::Entry* disk_entry,
757 HttpResponseInfo* response_info,
758 bool* response_truncated) {
759 int size = disk_entry->GetDataSize(0);
760
761 TestCompletionCallback cb;
762 scoped_refptr<IOBuffer> buffer = base::MakeRefCounted<IOBuffer>(size);
763 int rv = disk_entry->ReadData(0, 0, buffer.get(), size, cb.callback());
764 rv = cb.GetResult(rv);
765 EXPECT_EQ(size, rv);
766
767 return HttpCache::ParseResponseInfo(buffer->data(), size, response_info,
768 response_truncated);
769 }
770
WriteResponseInfo(disk_cache::Entry * disk_entry,const HttpResponseInfo * response_info,bool skip_transient_headers,bool response_truncated)771 bool MockHttpCache::WriteResponseInfo(disk_cache::Entry* disk_entry,
772 const HttpResponseInfo* response_info,
773 bool skip_transient_headers,
774 bool response_truncated) {
775 base::Pickle pickle;
776 response_info->Persist(
777 &pickle, skip_transient_headers, response_truncated);
778
779 TestCompletionCallback cb;
780 scoped_refptr<WrappedIOBuffer> data = base::MakeRefCounted<WrappedIOBuffer>(
781 reinterpret_cast<const char*>(pickle.data()));
782 int len = static_cast<int>(pickle.size());
783
784 int rv = disk_entry->WriteData(0, 0, data.get(), len, cb.callback(), true);
785 rv = cb.GetResult(rv);
786 return (rv == len);
787 }
788
OpenBackendEntry(const std::string & key,disk_cache::Entry ** entry)789 bool MockHttpCache::OpenBackendEntry(const std::string& key,
790 disk_cache::Entry** entry) {
791 TestEntryResultCompletionCallback cb;
792 disk_cache::EntryResult result =
793 backend()->OpenEntry(key, net::HIGHEST, cb.callback());
794 result = cb.GetResult(std::move(result));
795 if (result.net_error() == OK) {
796 *entry = result.ReleaseEntry();
797 return true;
798 } else {
799 return false;
800 }
801 }
802
CreateBackendEntry(const std::string & key,disk_cache::Entry ** entry,NetLog * net_log)803 bool MockHttpCache::CreateBackendEntry(const std::string& key,
804 disk_cache::Entry** entry,
805 NetLog* net_log) {
806 TestEntryResultCompletionCallback cb;
807 disk_cache::EntryResult result =
808 backend()->CreateEntry(key, net::HIGHEST, cb.callback());
809 result = cb.GetResult(std::move(result));
810 if (result.net_error() == OK) {
811 *entry = result.ReleaseEntry();
812 return true;
813 } else {
814 return false;
815 }
816 }
817
818 // Static.
GetTestMode(int test_mode)819 int MockHttpCache::GetTestMode(int test_mode) {
820 if (!g_test_mode)
821 return test_mode;
822
823 return g_test_mode;
824 }
825
826 // Static.
SetTestMode(int test_mode)827 void MockHttpCache::SetTestMode(int test_mode) {
828 g_test_mode = test_mode;
829 }
830
IsWriterPresent(const std::string & key)831 bool MockHttpCache::IsWriterPresent(const std::string& key) {
832 HttpCache::ActiveEntry* entry = http_cache_.FindActiveEntry(key);
833 return entry && entry->writers && !entry->writers->IsEmpty();
834 }
835
IsHeadersTransactionPresent(const std::string & key)836 bool MockHttpCache::IsHeadersTransactionPresent(const std::string& key) {
837 HttpCache::ActiveEntry* entry = http_cache_.FindActiveEntry(key);
838 return entry && entry->headers_transaction;
839 }
840
GetCountReaders(const std::string & key)841 int MockHttpCache::GetCountReaders(const std::string& key) {
842 HttpCache::ActiveEntry* entry = http_cache_.FindActiveEntry(key);
843 return entry ? entry->readers.size() : 0;
844 }
845
GetCountAddToEntryQueue(const std::string & key)846 int MockHttpCache::GetCountAddToEntryQueue(const std::string& key) {
847 HttpCache::ActiveEntry* entry = http_cache_.FindActiveEntry(key);
848 return entry ? entry->add_to_entry_queue.size() : 0;
849 }
850
GetCountDoneHeadersQueue(const std::string & key)851 int MockHttpCache::GetCountDoneHeadersQueue(const std::string& key) {
852 HttpCache::ActiveEntry* entry = http_cache_.FindActiveEntry(key);
853 return entry ? entry->done_headers_queue.size() : 0;
854 }
855
GetCountWriterTransactions(const std::string & key)856 int MockHttpCache::GetCountWriterTransactions(const std::string& key) {
857 HttpCache::ActiveEntry* entry = http_cache_.FindActiveEntry(key);
858 return entry && entry->writers ? entry->writers->GetTransactionsCount() : 0;
859 }
860
861 //-----------------------------------------------------------------------------
862
CreateEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)863 disk_cache::EntryResult MockDiskCacheNoCB::CreateEntry(
864 const std::string& key,
865 net::RequestPriority request_priority,
866 EntryResultCallback callback) {
867 return EntryResult::MakeError(ERR_IO_PENDING);
868 }
869
870 //-----------------------------------------------------------------------------
871
CreateBackend(NetLog * net_log,std::unique_ptr<disk_cache::Backend> * backend,CompletionOnceCallback callback)872 int MockBackendNoCbFactory::CreateBackend(
873 NetLog* net_log,
874 std::unique_ptr<disk_cache::Backend>* backend,
875 CompletionOnceCallback callback) {
876 backend->reset(new MockDiskCacheNoCB());
877 return OK;
878 }
879
880 //-----------------------------------------------------------------------------
881
MockBlockingBackendFactory()882 MockBlockingBackendFactory::MockBlockingBackendFactory()
883 : backend_(nullptr), block_(true), fail_(false) {}
884
885 MockBlockingBackendFactory::~MockBlockingBackendFactory() = default;
886
CreateBackend(NetLog * net_log,std::unique_ptr<disk_cache::Backend> * backend,CompletionOnceCallback callback)887 int MockBlockingBackendFactory::CreateBackend(
888 NetLog* net_log,
889 std::unique_ptr<disk_cache::Backend>* backend,
890 CompletionOnceCallback callback) {
891 if (!block_) {
892 if (!fail_)
893 backend->reset(new MockDiskCache());
894 return Result();
895 }
896
897 backend_ = backend;
898 callback_ = std::move(callback);
899 return ERR_IO_PENDING;
900 }
901
FinishCreation()902 void MockBlockingBackendFactory::FinishCreation() {
903 block_ = false;
904 if (!callback_.is_null()) {
905 if (!fail_)
906 backend_->reset(new MockDiskCache());
907 // Running the callback might delete |this|.
908 std::move(callback_).Run(Result());
909 }
910 }
911
912 } // namespace net
913