1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/browser/cache_storage/legacy/legacy_cache_storage_cache.h"
6
7 #include <stddef.h>
8 #include <algorithm>
9 #include <functional>
10 #include <limits>
11 #include <memory>
12 #include <string>
13 #include <utility>
14
15 #include "base/barrier_closure.h"
16 #include "base/bind.h"
17 #include "base/callback_helpers.h"
18 #include "base/containers/flat_map.h"
19 #include "base/files/file_path.h"
20 #include "base/guid.h"
21 #include "base/macros.h"
22 #include "base/memory/ptr_util.h"
23 #include "base/metrics/histogram_functions.h"
24 #include "base/metrics/histogram_macros.h"
25 #include "base/numerics/checked_math.h"
26 #include "base/strings/string_split.h"
27 #include "base/strings/string_util.h"
28 #include "base/strings/stringprintf.h"
29 #include "base/trace_event/trace_event.h"
30 #include "base/trace_event/traced_value.h"
31 #include "content/browser/cache_storage/cache_storage.pb.h"
32 #include "content/browser/cache_storage/cache_storage_blob_to_disk_cache.h"
33 #include "content/browser/cache_storage/cache_storage_cache_entry_handler.h"
34 #include "content/browser/cache_storage/cache_storage_cache_handle.h"
35 #include "content/browser/cache_storage/cache_storage_cache_observer.h"
36 #include "content/browser/cache_storage/cache_storage_histogram_utils.h"
37 #include "content/browser/cache_storage/cache_storage_manager.h"
38 #include "content/browser/cache_storage/cache_storage_quota_client.h"
39 #include "content/browser/cache_storage/cache_storage_scheduler.h"
40 #include "content/browser/cache_storage/cache_storage_trace_utils.h"
41 #include "content/browser/cache_storage/legacy/legacy_cache_storage.h"
42 #include "content/common/background_fetch/background_fetch_types.h"
43 #include "crypto/hmac.h"
44 #include "crypto/symmetric_key.h"
45 #include "mojo/public/cpp/bindings/remote.h"
46 #include "net/base/completion_repeating_callback.h"
47 #include "net/base/io_buffer.h"
48 #include "net/base/net_errors.h"
49 #include "net/disk_cache/disk_cache.h"
50 #include "net/http/http_request_headers.h"
51 #include "net/http/http_response_headers.h"
52 #include "net/http/http_status_code.h"
53 #include "services/network/public/mojom/fetch_api.mojom.h"
54 #include "storage/browser/blob/blob_storage_context.h"
55 #include "storage/browser/quota/padding_key.h"
56 #include "storage/browser/quota/quota_manager_proxy.h"
57 #include "third_party/blink/public/common/cache_storage/cache_storage_utils.h"
58 #include "third_party/blink/public/common/fetch/fetch_api_request_headers_map.h"
59 #include "third_party/blink/public/mojom/loader/referrer.mojom.h"
60 #include "third_party/blink/public/mojom/quota/quota_types.mojom.h"
61
62 using blink::mojom::CacheStorageError;
63 using blink::mojom::CacheStorageVerboseError;
64
65 namespace content {
66
67 namespace {
68
69 using ResponseHeaderMap = base::flat_map<std::string, std::string>;
70
71 const size_t kMaxQueryCacheResultBytes =
72 1024 * 1024 * 10; // 10MB query cache limit
73
74 // If the way that a cache's padding is calculated changes increment this
75 // version.
76 //
77 // History:
78 //
79 // 1: Uniform random 400K.
80 // 2: Uniform random 14,431K.
81 const int32_t kCachePaddingAlgorithmVersion = 2;
82
83 // Maximum number of recursive QueryCacheOpenNextEntry() calls we permit
84 // before forcing an asynchronous task.
85 const int kMaxQueryCacheRecursiveDepth = 20;
86
87 using MetadataCallback =
88 base::OnceCallback<void(std::unique_ptr<proto::CacheMetadata>)>;
89
ProtoResponseTypeToFetchResponseType(proto::CacheResponse::ResponseType response_type)90 network::mojom::FetchResponseType ProtoResponseTypeToFetchResponseType(
91 proto::CacheResponse::ResponseType response_type) {
92 switch (response_type) {
93 case proto::CacheResponse::BASIC_TYPE:
94 return network::mojom::FetchResponseType::kBasic;
95 case proto::CacheResponse::CORS_TYPE:
96 return network::mojom::FetchResponseType::kCors;
97 case proto::CacheResponse::DEFAULT_TYPE:
98 return network::mojom::FetchResponseType::kDefault;
99 case proto::CacheResponse::ERROR_TYPE:
100 return network::mojom::FetchResponseType::kError;
101 case proto::CacheResponse::OPAQUE_TYPE:
102 return network::mojom::FetchResponseType::kOpaque;
103 case proto::CacheResponse::OPAQUE_REDIRECT_TYPE:
104 return network::mojom::FetchResponseType::kOpaqueRedirect;
105 }
106 NOTREACHED();
107 return network::mojom::FetchResponseType::kOpaque;
108 }
109
FetchResponseTypeToProtoResponseType(network::mojom::FetchResponseType response_type)110 proto::CacheResponse::ResponseType FetchResponseTypeToProtoResponseType(
111 network::mojom::FetchResponseType response_type) {
112 switch (response_type) {
113 case network::mojom::FetchResponseType::kBasic:
114 return proto::CacheResponse::BASIC_TYPE;
115 case network::mojom::FetchResponseType::kCors:
116 return proto::CacheResponse::CORS_TYPE;
117 case network::mojom::FetchResponseType::kDefault:
118 return proto::CacheResponse::DEFAULT_TYPE;
119 case network::mojom::FetchResponseType::kError:
120 return proto::CacheResponse::ERROR_TYPE;
121 case network::mojom::FetchResponseType::kOpaque:
122 return proto::CacheResponse::OPAQUE_TYPE;
123 case network::mojom::FetchResponseType::kOpaqueRedirect:
124 return proto::CacheResponse::OPAQUE_REDIRECT_TYPE;
125 }
126 NOTREACHED();
127 return proto::CacheResponse::OPAQUE_TYPE;
128 }
129
130 // Assert that ConnectionInfo does not change since we cast it to
131 // an integer in order to serialize it to disk.
132 static_assert(net::HttpResponseInfo::CONNECTION_INFO_UNKNOWN == 0,
133 "ConnectionInfo enum is stable");
134 static_assert(net::HttpResponseInfo::CONNECTION_INFO_HTTP1_1 == 1,
135 "ConnectionInfo enum is stable");
136 static_assert(net::HttpResponseInfo::CONNECTION_INFO_DEPRECATED_SPDY2 == 2,
137 "ConnectionInfo enum is stable");
138 static_assert(net::HttpResponseInfo::CONNECTION_INFO_DEPRECATED_SPDY3 == 3,
139 "ConnectionInfo enum is stable");
140 static_assert(net::HttpResponseInfo::CONNECTION_INFO_HTTP2 == 4,
141 "ConnectionInfo enum is stable");
142 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_UNKNOWN_VERSION == 5,
143 "ConnectionInfo enum is stable");
144 static_assert(net::HttpResponseInfo::CONNECTION_INFO_DEPRECATED_HTTP2_14 == 6,
145 "ConnectionInfo enum is stable");
146 static_assert(net::HttpResponseInfo::CONNECTION_INFO_DEPRECATED_HTTP2_15 == 7,
147 "ConnectionInfo enum is stable");
148 static_assert(net::HttpResponseInfo::CONNECTION_INFO_HTTP0_9 == 8,
149 "ConnectionInfo enum is stable");
150 static_assert(net::HttpResponseInfo::CONNECTION_INFO_HTTP1_0 == 9,
151 "ConnectionInfo enum is stable");
152 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_32 == 10,
153 "ConnectionInfo enum is stable");
154 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_33 == 11,
155 "ConnectionInfo enum is stable");
156 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_34 == 12,
157 "ConnectionInfo enum is stable");
158 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_35 == 13,
159 "ConnectionInfo enum is stable");
160 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_36 == 14,
161 "ConnectionInfo enum is stable");
162 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_37 == 15,
163 "ConnectionInfo enum is stable");
164 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_38 == 16,
165 "ConnectionInfo enum is stable");
166 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_39 == 17,
167 "ConnectionInfo enum is stable");
168 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_40 == 18,
169 "ConnectionInfo enum is stable");
170 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_41 == 19,
171 "ConnectionInfo enum is stable");
172 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_42 == 20,
173 "ConnectionInfo enum is stable");
174 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_43 == 21,
175 "ConnectionInfo enum is stable");
176 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_Q099 == 22,
177 "ConnectionInfo enum is stable");
178 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_44 == 23,
179 "ConnectionInfo enum is stable");
180 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_45 == 24,
181 "ConnectionInfo enum is stable");
182 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_46 == 25,
183 "ConnectionInfo enum is stable");
184 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_47 == 26,
185 "ConnectionInfo enum is stable");
186 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_999 == 27,
187 "ConnectionInfo enum is stable");
188 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_Q048 == 28,
189 "ConnectionInfo enum is stable");
190 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_Q049 == 29,
191 "ConnectionInfo enum is stable");
192 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_Q050 == 30,
193 "ConnectionInfo enum is stable");
194 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_T048 == 31,
195 "ConnectionInfo enum is stable");
196 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_T049 == 32,
197 "ConnectionInfo enum is stable");
198 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_T050 == 33,
199 "ConnectionInfo enum is stable");
200 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_T099 == 34,
201 "ConnectionInfo enum is stable");
202 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_DRAFT_25 == 35,
203 "ConnectionInfo enum is stable");
204 static_assert(net::HttpResponseInfo::CONNECTION_INFO_QUIC_DRAFT_27 == 36,
205 "ConnectionInfo enum is stable");
206
207 // Copy headers out of a cache entry and into a protobuf. The callback is
208 // guaranteed to be run.
209 void ReadMetadata(disk_cache::Entry* entry, MetadataCallback callback);
210 void ReadMetadataDidReadMetadata(disk_cache::Entry* entry,
211 MetadataCallback callback,
212 scoped_refptr<net::IOBufferWithSize> buffer,
213 int rv);
214
VaryMatches(const blink::FetchAPIRequestHeadersMap & request,const blink::FetchAPIRequestHeadersMap & cached_request,const ResponseHeaderMap & response)215 bool VaryMatches(const blink::FetchAPIRequestHeadersMap& request,
216 const blink::FetchAPIRequestHeadersMap& cached_request,
217 const ResponseHeaderMap& response) {
218 auto vary_iter = std::find_if(
219 response.begin(), response.end(),
220 [](const ResponseHeaderMap::value_type& pair) -> bool {
221 return base::CompareCaseInsensitiveASCII(pair.first, "vary") == 0;
222 });
223 if (vary_iter == response.end())
224 return true;
225
226 for (const std::string& trimmed :
227 base::SplitString(vary_iter->second, ",", base::TRIM_WHITESPACE,
228 base::SPLIT_WANT_NONEMPTY)) {
229 if (trimmed == "*")
230 return false;
231
232 auto request_iter = request.find(trimmed);
233 auto cached_request_iter = cached_request.find(trimmed);
234
235 // If the header exists in one but not the other, no match.
236 if ((request_iter == request.end()) !=
237 (cached_request_iter == cached_request.end()))
238 return false;
239
240 // If the header exists in one, it exists in both. Verify that the values
241 // are equal.
242 if (request_iter != request.end() &&
243 request_iter->second != cached_request_iter->second)
244 return false;
245 }
246
247 return true;
248 }
249
250 // Check a batch operation list for duplicate entries. A StackVector
251 // must be passed to store any resulting duplicate URL strings. Returns
252 // true if any duplicates were found.
FindDuplicateOperations(const std::vector<blink::mojom::BatchOperationPtr> & operations,std::vector<std::string> * duplicate_url_list_out)253 bool FindDuplicateOperations(
254 const std::vector<blink::mojom::BatchOperationPtr>& operations,
255 std::vector<std::string>* duplicate_url_list_out) {
256 using blink::mojom::BatchOperation;
257 DCHECK(duplicate_url_list_out);
258
259 if (operations.size() < 2) {
260 return false;
261 }
262
263 // Create a temporary sorted vector of the operations to support quickly
264 // finding potentially duplicate entries. Multiple entries may have the
265 // same URL, but differ by VARY header, so a sorted list is easier to
266 // work with than a map.
267 //
268 // Note, this will use 512 bytes of stack space on 64-bit devices. The
269 // static size attempts to accommodate most typical Cache.addAll() uses in
270 // service worker install events while not blowing up the stack too much.
271 base::StackVector<BatchOperation*, 64> sorted;
272 sorted->reserve(operations.size());
273 for (const auto& op : operations) {
274 sorted->push_back(op.get());
275 }
276 std::sort(sorted->begin(), sorted->end(),
277 [](BatchOperation* left, BatchOperation* right) {
278 return left->request->url < right->request->url;
279 });
280
281 // Check each entry in the sorted vector for any duplicates. Since the
282 // list is sorted we only need to inspect the immediate neighbors that
283 // have the same URL. This results in an average complexity of O(n log n).
284 // If the entire list has entries with the same URL and different VARY
285 // headers then this devolves into O(n^2).
286 for (auto outer = sorted->cbegin(); outer != sorted->cend(); ++outer) {
287 const BatchOperation* outer_op = *outer;
288
289 // Note, the spec checks CacheQueryOptions like ignoreSearch, etc, but
290 // currently there is no way for script to trigger a batch operation with
291 // multiple entries and non-default options. The only exposed API that
292 // supports multiple operations is addAll() and it does not allow options
293 // to be passed. Therefore we assume we do not need to take any options
294 // into account here.
295 DCHECK(!outer_op->match_options);
296
297 // If this entry already matches a duplicate we found, then just skip
298 // ahead to find any remaining duplicates.
299 if (!duplicate_url_list_out->empty() &&
300 outer_op->request->url.spec() == duplicate_url_list_out->back()) {
301 continue;
302 }
303
304 for (auto inner = std::next(outer); inner != sorted->cend(); ++inner) {
305 const BatchOperation* inner_op = *inner;
306 // Since the list is sorted we can stop looking at neighbors after
307 // the first different URL.
308 if (outer_op->request->url != inner_op->request->url) {
309 break;
310 }
311
312 // VaryMatches() is asymmetric since the operation depends on the VARY
313 // header in the target response. Since we only visit each pair of
314 // entries once we need to perform the VaryMatches() call in both
315 // directions.
316 if (VaryMatches(outer_op->request->headers, inner_op->request->headers,
317 inner_op->response->headers) ||
318 VaryMatches(outer_op->request->headers, inner_op->request->headers,
319 outer_op->response->headers)) {
320 duplicate_url_list_out->push_back(inner_op->request->url.spec());
321 break;
322 }
323 }
324 }
325
326 return !duplicate_url_list_out->empty();
327 }
328
RemoveQueryParam(const GURL & url)329 GURL RemoveQueryParam(const GURL& url) {
330 url::Replacements<char> replacements;
331 replacements.ClearQuery();
332 return url.ReplaceComponents(replacements);
333 }
334
ReadMetadata(disk_cache::Entry * entry,MetadataCallback callback)335 void ReadMetadata(disk_cache::Entry* entry, MetadataCallback callback) {
336 DCHECK(entry);
337
338 scoped_refptr<net::IOBufferWithSize> buffer =
339 base::MakeRefCounted<net::IOBufferWithSize>(
340 entry->GetDataSize(LegacyCacheStorageCache::INDEX_HEADERS));
341
342 // Create a callback that is copyable, even though it can only be called once.
343 // BindRepeating() cannot be used directly because |callback| is not
344 // copyable.
345 net::CompletionRepeatingCallback read_header_callback =
346 base::AdaptCallbackForRepeating(base::BindOnce(
347 ReadMetadataDidReadMetadata, entry, std::move(callback), buffer));
348
349 int read_rv =
350 entry->ReadData(LegacyCacheStorageCache::INDEX_HEADERS, 0, buffer.get(),
351 buffer->size(), read_header_callback);
352
353 if (read_rv != net::ERR_IO_PENDING)
354 std::move(read_header_callback).Run(read_rv);
355 }
356
ReadMetadataDidReadMetadata(disk_cache::Entry * entry,MetadataCallback callback,scoped_refptr<net::IOBufferWithSize> buffer,int rv)357 void ReadMetadataDidReadMetadata(disk_cache::Entry* entry,
358 MetadataCallback callback,
359 scoped_refptr<net::IOBufferWithSize> buffer,
360 int rv) {
361 if (rv != buffer->size()) {
362 std::move(callback).Run(nullptr);
363 return;
364 }
365
366 std::unique_ptr<proto::CacheMetadata> metadata(new proto::CacheMetadata());
367
368 if (!metadata->ParseFromArray(buffer->data(), buffer->size())) {
369 std::move(callback).Run(nullptr);
370 return;
371 }
372
373 std::move(callback).Run(std::move(metadata));
374 }
375
CreateRequest(const proto::CacheMetadata & metadata,const GURL & request_url)376 blink::mojom::FetchAPIRequestPtr CreateRequest(
377 const proto::CacheMetadata& metadata,
378 const GURL& request_url) {
379 auto request = blink::mojom::FetchAPIRequest::New();
380 request->url = request_url;
381 request->method = metadata.request().method();
382 request->is_reload = false;
383 request->referrer = blink::mojom::Referrer::New();
384 request->headers = {};
385
386 for (int i = 0; i < metadata.request().headers_size(); ++i) {
387 const proto::CacheHeaderMap header = metadata.request().headers(i);
388 DCHECK_EQ(std::string::npos, header.name().find('\0'));
389 DCHECK_EQ(std::string::npos, header.value().find('\0'));
390 request->headers.insert(std::make_pair(header.name(), header.value()));
391 }
392 return request;
393 }
394
CreateResponse(const proto::CacheMetadata & metadata,const std::string & cache_name)395 blink::mojom::FetchAPIResponsePtr CreateResponse(
396 const proto::CacheMetadata& metadata,
397 const std::string& cache_name) {
398 // We no longer support Responses with only a single URL entry. This field
399 // was deprecated in M57.
400 if (metadata.response().has_url())
401 return nullptr;
402
403 std::vector<GURL> url_list;
404 url_list.reserve(metadata.response().url_list_size());
405 for (int i = 0; i < metadata.response().url_list_size(); ++i)
406 url_list.push_back(GURL(metadata.response().url_list(i)));
407
408 ResponseHeaderMap headers;
409 for (int i = 0; i < metadata.response().headers_size(); ++i) {
410 const proto::CacheHeaderMap header = metadata.response().headers(i);
411 DCHECK_EQ(std::string::npos, header.name().find('\0'));
412 DCHECK_EQ(std::string::npos, header.value().find('\0'));
413 headers.insert(std::make_pair(header.name(), header.value()));
414 }
415
416 std::string alpn_negotiated_protocol =
417 metadata.response().has_alpn_negotiated_protocol()
418 ? metadata.response().alpn_negotiated_protocol()
419 : "unknown";
420
421 base::Optional<std::string> mime_type;
422 if (metadata.response().has_mime_type())
423 mime_type = metadata.response().mime_type();
424
425 base::Optional<std::string> request_method;
426 if (metadata.response().has_request_method())
427 request_method = metadata.response().request_method();
428
429 // Note that |has_range_requested| can be safely set to false since it only
430 // affects HTTP 206 (Partial) responses, which are blocked from cache storage.
431 // See https://fetch.spec.whatwg.org/#main-fetch for usage of
432 // |has_range_requested|.
433 return blink::mojom::FetchAPIResponse::New(
434 url_list, metadata.response().status_code(),
435 metadata.response().status_text(),
436 ProtoResponseTypeToFetchResponseType(metadata.response().response_type()),
437 network::mojom::FetchResponseSource::kCacheStorage, headers, mime_type,
438 request_method, nullptr /* blob */,
439 blink::mojom::ServiceWorkerResponseError::kUnknown,
440 base::Time::FromInternalValue(metadata.response().response_time()),
441 cache_name,
442 std::vector<std::string>(
443 metadata.response().cors_exposed_header_names().begin(),
444 metadata.response().cors_exposed_header_names().end()),
445 nullptr /* side_data_blob */, nullptr /* side_data_blob_for_cache_put */,
446 network::mojom::ParsedHeaders::New(),
447 // Default proto value of 0 maps to CONNECTION_INFO_UNKNOWN.
448 static_cast<net::HttpResponseInfo::ConnectionInfo>(
449 metadata.response().connection_info()),
450 alpn_negotiated_protocol, metadata.response().loaded_with_credentials(),
451 metadata.response().was_fetched_via_spdy(),
452 /* has_range_requested */ false);
453 }
454
455 // The size of opaque (non-cors) resource responses are padded in order
456 // to obfuscate their actual size.
ShouldPadResponseType(network::mojom::FetchResponseType response_type,bool has_urls)457 bool ShouldPadResponseType(network::mojom::FetchResponseType response_type,
458 bool has_urls) {
459 switch (response_type) {
460 case network::mojom::FetchResponseType::kBasic:
461 case network::mojom::FetchResponseType::kCors:
462 case network::mojom::FetchResponseType::kDefault:
463 case network::mojom::FetchResponseType::kError:
464 return false;
465 case network::mojom::FetchResponseType::kOpaque:
466 case network::mojom::FetchResponseType::kOpaqueRedirect:
467 return has_urls;
468 }
469 NOTREACHED();
470 return false;
471 }
472
ShouldPadResourceSize(const content::proto::CacheResponse * response)473 bool ShouldPadResourceSize(const content::proto::CacheResponse* response) {
474 return ShouldPadResponseType(
475 ProtoResponseTypeToFetchResponseType(response->response_type()),
476 response->url_list_size());
477 }
478
ShouldPadResourceSize(const blink::mojom::FetchAPIResponse & response)479 bool ShouldPadResourceSize(const blink::mojom::FetchAPIResponse& response) {
480 return ShouldPadResponseType(response.response_type,
481 !response.url_list.empty());
482 }
483
CalculateResponsePaddingInternal(const::content::proto::CacheResponse * response,const crypto::SymmetricKey * padding_key,int side_data_size)484 int64_t CalculateResponsePaddingInternal(
485 const ::content::proto::CacheResponse* response,
486 const crypto::SymmetricKey* padding_key,
487 int side_data_size) {
488 DCHECK(ShouldPadResourceSize(response));
489 DCHECK_GE(side_data_size, 0);
490 const std::string& url = response->url_list(response->url_list_size() - 1);
491 bool loaded_with_credentials = response->has_loaded_with_credentials() &&
492 response->loaded_with_credentials();
493 const std::string& request_method = response->has_request_method()
494 ? response->request_method()
495 : net::HttpRequestHeaders::kGetMethod;
496 return storage::ComputeResponsePadding(url, padding_key, side_data_size > 0,
497 loaded_with_credentials,
498 request_method);
499 }
500
GetDiskCachePriority(CacheStorageSchedulerPriority priority)501 net::RequestPriority GetDiskCachePriority(
502 CacheStorageSchedulerPriority priority) {
503 return priority == CacheStorageSchedulerPriority::kHigh ? net::HIGHEST
504 : net::MEDIUM;
505 }
506
507 } // namespace
508
509 struct LegacyCacheStorageCache::QueryCacheResult {
QueryCacheResultcontent::LegacyCacheStorageCache::QueryCacheResult510 explicit QueryCacheResult(base::Time entry_time) : entry_time(entry_time) {}
511
512 blink::mojom::FetchAPIRequestPtr request;
513 blink::mojom::FetchAPIResponsePtr response;
514 disk_cache::ScopedEntryPtr entry;
515 base::Time entry_time;
516 };
517
518 struct LegacyCacheStorageCache::QueryCacheContext {
QueryCacheContextcontent::LegacyCacheStorageCache::QueryCacheContext519 QueryCacheContext(blink::mojom::FetchAPIRequestPtr request,
520 blink::mojom::CacheQueryOptionsPtr options,
521 QueryCacheCallback callback,
522 QueryTypes query_types)
523 : request(std::move(request)),
524 options(std::move(options)),
525 callback(std::move(callback)),
526 query_types(query_types),
527 matches(std::make_unique<QueryCacheResults>()) {}
528
529 ~QueryCacheContext() = default;
530
531 // Input to QueryCache
532 blink::mojom::FetchAPIRequestPtr request;
533 blink::mojom::CacheQueryOptionsPtr options;
534 QueryCacheCallback callback;
535 QueryTypes query_types = 0;
536 size_t estimated_out_bytes = 0;
537
538 // Iteration state
539 std::unique_ptr<disk_cache::Backend::Iterator> backend_iterator;
540
541 // Output of QueryCache
542 std::unique_ptr<std::vector<QueryCacheResult>> matches;
543
544 private:
545 DISALLOW_COPY_AND_ASSIGN(QueryCacheContext);
546 };
547
548 // static
549 std::unique_ptr<LegacyCacheStorageCache>
CreateMemoryCache(const url::Origin & origin,CacheStorageOwner owner,const std::string & cache_name,LegacyCacheStorage * cache_storage,scoped_refptr<base::SequencedTaskRunner> scheduler_task_runner,scoped_refptr<storage::QuotaManagerProxy> quota_manager_proxy,scoped_refptr<BlobStorageContextWrapper> blob_storage_context,std::unique_ptr<crypto::SymmetricKey> cache_padding_key)550 LegacyCacheStorageCache::CreateMemoryCache(
551 const url::Origin& origin,
552 CacheStorageOwner owner,
553 const std::string& cache_name,
554 LegacyCacheStorage* cache_storage,
555 scoped_refptr<base::SequencedTaskRunner> scheduler_task_runner,
556 scoped_refptr<storage::QuotaManagerProxy> quota_manager_proxy,
557 scoped_refptr<BlobStorageContextWrapper> blob_storage_context,
558 std::unique_ptr<crypto::SymmetricKey> cache_padding_key) {
559 LegacyCacheStorageCache* cache = new LegacyCacheStorageCache(
560 origin, owner, cache_name, base::FilePath(), cache_storage,
561 std::move(scheduler_task_runner), std::move(quota_manager_proxy),
562 std::move(blob_storage_context), 0 /* cache_size */,
563 0 /* cache_padding */, std::move(cache_padding_key));
564 cache->SetObserver(cache_storage);
565 cache->InitBackend();
566 return base::WrapUnique(cache);
567 }
568
569 // static
570 std::unique_ptr<LegacyCacheStorageCache>
CreatePersistentCache(const url::Origin & origin,CacheStorageOwner owner,const std::string & cache_name,LegacyCacheStorage * cache_storage,const base::FilePath & path,scoped_refptr<base::SequencedTaskRunner> scheduler_task_runner,scoped_refptr<storage::QuotaManagerProxy> quota_manager_proxy,scoped_refptr<BlobStorageContextWrapper> blob_storage_context,int64_t cache_size,int64_t cache_padding,std::unique_ptr<crypto::SymmetricKey> cache_padding_key)571 LegacyCacheStorageCache::CreatePersistentCache(
572 const url::Origin& origin,
573 CacheStorageOwner owner,
574 const std::string& cache_name,
575 LegacyCacheStorage* cache_storage,
576 const base::FilePath& path,
577 scoped_refptr<base::SequencedTaskRunner> scheduler_task_runner,
578 scoped_refptr<storage::QuotaManagerProxy> quota_manager_proxy,
579 scoped_refptr<BlobStorageContextWrapper> blob_storage_context,
580 int64_t cache_size,
581 int64_t cache_padding,
582 std::unique_ptr<crypto::SymmetricKey> cache_padding_key) {
583 LegacyCacheStorageCache* cache = new LegacyCacheStorageCache(
584 origin, owner, cache_name, path, cache_storage,
585 std::move(scheduler_task_runner), std::move(quota_manager_proxy),
586 std::move(blob_storage_context), cache_size, cache_padding,
587 std::move(cache_padding_key));
588 cache->SetObserver(cache_storage);
589 cache->InitBackend();
590 return base::WrapUnique(cache);
591 }
592
AsWeakPtr()593 base::WeakPtr<LegacyCacheStorageCache> LegacyCacheStorageCache::AsWeakPtr() {
594 return weak_ptr_factory_.GetWeakPtr();
595 }
596
CreateHandle()597 CacheStorageCacheHandle LegacyCacheStorageCache::CreateHandle() {
598 return CacheStorageCacheHandle(weak_ptr_factory_.GetWeakPtr());
599 }
600
AddHandleRef()601 void LegacyCacheStorageCache::AddHandleRef() {
602 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
603 handle_ref_count_ += 1;
604 // Reference the parent CacheStorage while the Cache is referenced. Some
605 // code may only directly reference the Cache and we don't want to let the
606 // CacheStorage cleanup if it becomes unreferenced in these cases.
607 if (handle_ref_count_ == 1 && cache_storage_)
608 cache_storage_handle_ = cache_storage_->CreateHandle();
609 }
610
DropHandleRef()611 void LegacyCacheStorageCache::DropHandleRef() {
612 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
613 DCHECK_GT(handle_ref_count_, 0U);
614 handle_ref_count_ -= 1;
615 // Dropping the last reference may result in the parent CacheStorage
616 // deleting itself or this Cache object. Be careful not to touch the
617 // `this` pointer in this method after the following code.
618 if (handle_ref_count_ == 0 && cache_storage_) {
619 CacheStorageHandle handle = std::move(cache_storage_handle_);
620 cache_storage_->CacheUnreferenced(this);
621 }
622 }
623
IsUnreferenced() const624 bool LegacyCacheStorageCache::IsUnreferenced() const {
625 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
626 return !handle_ref_count_;
627 }
628
Match(blink::mojom::FetchAPIRequestPtr request,blink::mojom::CacheQueryOptionsPtr match_options,CacheStorageSchedulerPriority priority,int64_t trace_id,ResponseCallback callback)629 void LegacyCacheStorageCache::Match(
630 blink::mojom::FetchAPIRequestPtr request,
631 blink::mojom::CacheQueryOptionsPtr match_options,
632 CacheStorageSchedulerPriority priority,
633 int64_t trace_id,
634 ResponseCallback callback) {
635 if (backend_state_ == BACKEND_CLOSED) {
636 std::move(callback).Run(
637 MakeErrorStorage(ErrorStorageType::kMatchBackendClosed), nullptr);
638 return;
639 }
640
641 auto id = scheduler_->CreateId();
642 scheduler_->ScheduleOperation(
643 id, CacheStorageSchedulerMode::kShared, CacheStorageSchedulerOp::kMatch,
644 priority,
645 base::BindOnce(
646 &LegacyCacheStorageCache::MatchImpl, weak_ptr_factory_.GetWeakPtr(),
647 std::move(request), std::move(match_options), trace_id, priority,
648 scheduler_->WrapCallbackToRunNext(id, std::move(callback))));
649 }
650
MatchAll(blink::mojom::FetchAPIRequestPtr request,blink::mojom::CacheQueryOptionsPtr match_options,int64_t trace_id,ResponsesCallback callback)651 void LegacyCacheStorageCache::MatchAll(
652 blink::mojom::FetchAPIRequestPtr request,
653 blink::mojom::CacheQueryOptionsPtr match_options,
654 int64_t trace_id,
655 ResponsesCallback callback) {
656 if (backend_state_ == BACKEND_CLOSED) {
657 std::move(callback).Run(
658 MakeErrorStorage(ErrorStorageType::kMatchAllBackendClosed),
659 std::vector<blink::mojom::FetchAPIResponsePtr>());
660 return;
661 }
662
663 auto id = scheduler_->CreateId();
664 scheduler_->ScheduleOperation(
665 id, CacheStorageSchedulerMode::kShared,
666 CacheStorageSchedulerOp::kMatchAll,
667 CacheStorageSchedulerPriority::kNormal,
668 base::BindOnce(
669 &LegacyCacheStorageCache::MatchAllImpl,
670 weak_ptr_factory_.GetWeakPtr(), std::move(request),
671 std::move(match_options), trace_id,
672 CacheStorageSchedulerPriority::kNormal,
673 scheduler_->WrapCallbackToRunNext(id, std::move(callback))));
674 }
675
WriteSideData(ErrorCallback callback,const GURL & url,base::Time expected_response_time,int64_t trace_id,scoped_refptr<net::IOBuffer> buffer,int buf_len)676 void LegacyCacheStorageCache::WriteSideData(ErrorCallback callback,
677 const GURL& url,
678 base::Time expected_response_time,
679 int64_t trace_id,
680 scoped_refptr<net::IOBuffer> buffer,
681 int buf_len) {
682 if (backend_state_ == BACKEND_CLOSED) {
683 scheduler_task_runner_->PostTask(
684 FROM_HERE,
685 base::BindOnce(
686 std::move(callback),
687 MakeErrorStorage(ErrorStorageType::kWriteSideDataBackendClosed)));
688 return;
689 }
690
691 // GetUsageAndQuota is called before entering a scheduled operation since it
692 // can call Size, another scheduled operation.
693 quota_manager_proxy_->GetUsageAndQuota(
694 scheduler_task_runner_.get(), origin_,
695 blink::mojom::StorageType::kTemporary,
696 base::BindOnce(&LegacyCacheStorageCache::WriteSideDataDidGetQuota,
697 weak_ptr_factory_.GetWeakPtr(), std::move(callback), url,
698 expected_response_time, trace_id, buffer, buf_len));
699 }
700
BatchOperation(std::vector<blink::mojom::BatchOperationPtr> operations,int64_t trace_id,VerboseErrorCallback callback,BadMessageCallback bad_message_callback)701 void LegacyCacheStorageCache::BatchOperation(
702 std::vector<blink::mojom::BatchOperationPtr> operations,
703 int64_t trace_id,
704 VerboseErrorCallback callback,
705 BadMessageCallback bad_message_callback) {
706 // This method may produce a warning message that should be returned in the
707 // final VerboseErrorCallback. A message may be present in both the failure
708 // and success paths.
709 base::Optional<std::string> message;
710
711 if (backend_state_ == BACKEND_CLOSED) {
712 scheduler_task_runner_->PostTask(
713 FROM_HERE,
714 base::BindOnce(
715 std::move(callback),
716 CacheStorageVerboseError::New(
717 MakeErrorStorage(ErrorStorageType::kBatchBackendClosed),
718 std::move(message))));
719 return;
720 }
721
722 // From BatchCacheOperations:
723 //
724 // https://w3c.github.io/ServiceWorker/#batch-cache-operations-algorithm
725 //
726 // "If the result of running Query Cache with operation’s request,
727 // operation’s options, and addedItems is not empty, throw an
728 // InvalidStateError DOMException."
729 std::vector<std::string> duplicate_url_list;
730 if (FindDuplicateOperations(operations, &duplicate_url_list)) {
731 // If we found any duplicates we need to at least warn the user. Format
732 // the URL list into a comma-separated list.
733 std::string url_list_string = base::JoinString(duplicate_url_list, ", ");
734
735 // Place the duplicate list into an error message.
736 message.emplace(
737 base::StringPrintf("duplicate requests (%s)", url_list_string.c_str()));
738
739 scheduler_task_runner_->PostTask(
740 FROM_HERE,
741 base::BindOnce(std::move(callback),
742 CacheStorageVerboseError::New(
743 CacheStorageError::kErrorDuplicateOperation,
744 std::move(message))));
745 return;
746 }
747
748 // Estimate the required size of the put operations. The size of the deletes
749 // is unknown and not considered.
750 base::CheckedNumeric<uint64_t> safe_space_required = 0;
751 base::CheckedNumeric<uint64_t> safe_side_data_size = 0;
752 for (const auto& operation : operations) {
753 if (operation->operation_type == blink::mojom::OperationType::kPut) {
754 safe_space_required += CalculateRequiredSafeSpaceForPut(operation);
755 safe_side_data_size +=
756 (operation->response->side_data_blob_for_cache_put
757 ? operation->response->side_data_blob_for_cache_put->size
758 : 0);
759 }
760 }
761 if (!safe_space_required.IsValid() || !safe_side_data_size.IsValid()) {
762 scheduler_task_runner_->PostTask(FROM_HERE,
763 std::move(bad_message_callback));
764 scheduler_task_runner_->PostTask(
765 FROM_HERE,
766 base::BindOnce(
767 std::move(callback),
768 CacheStorageVerboseError::New(
769 MakeErrorStorage(ErrorStorageType::kBatchInvalidSpace),
770 std::move(message))));
771 return;
772 }
773 uint64_t space_required = safe_space_required.ValueOrDie();
774 uint64_t side_data_size = safe_side_data_size.ValueOrDie();
775 if (space_required || side_data_size) {
776 // GetUsageAndQuota is called before entering a scheduled operation since it
777 // can call Size, another scheduled operation. This is racy. The decision
778 // to commit is made before the scheduled Put operation runs. By the time
779 // Put runs, the cache might already be full and the origin will be larger
780 // than it's supposed to be.
781 quota_manager_proxy_->GetUsageAndQuota(
782 scheduler_task_runner_.get(), origin_,
783 blink::mojom::StorageType::kTemporary,
784 base::BindOnce(&LegacyCacheStorageCache::BatchDidGetUsageAndQuota,
785 weak_ptr_factory_.GetWeakPtr(), std::move(operations),
786 trace_id, std::move(callback),
787 std::move(bad_message_callback), std::move(message),
788 space_required, side_data_size));
789 return;
790 }
791
792 BatchDidGetUsageAndQuota(std::move(operations), trace_id, std::move(callback),
793 std::move(bad_message_callback), std::move(message),
794 0 /* space_required */, 0 /* side_data_size */,
795 blink::mojom::QuotaStatusCode::kOk, 0 /* usage */,
796 0 /* quota */);
797 }
798
BatchDidGetUsageAndQuota(std::vector<blink::mojom::BatchOperationPtr> operations,int64_t trace_id,VerboseErrorCallback callback,BadMessageCallback bad_message_callback,base::Optional<std::string> message,uint64_t space_required,uint64_t side_data_size,blink::mojom::QuotaStatusCode status_code,int64_t usage,int64_t quota)799 void LegacyCacheStorageCache::BatchDidGetUsageAndQuota(
800 std::vector<blink::mojom::BatchOperationPtr> operations,
801 int64_t trace_id,
802 VerboseErrorCallback callback,
803 BadMessageCallback bad_message_callback,
804 base::Optional<std::string> message,
805 uint64_t space_required,
806 uint64_t side_data_size,
807 blink::mojom::QuotaStatusCode status_code,
808 int64_t usage,
809 int64_t quota) {
810 TRACE_EVENT_WITH_FLOW1("CacheStorage",
811 "LegacyCacheStorageCache::BatchDidGetUsageAndQuota",
812 TRACE_ID_GLOBAL(trace_id),
813 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
814 "operations", CacheStorageTracedValue(operations));
815 base::CheckedNumeric<uint64_t> safe_space_required = space_required;
816 base::CheckedNumeric<uint64_t> safe_space_required_with_side_data;
817 safe_space_required += usage;
818 safe_space_required_with_side_data = safe_space_required + side_data_size;
819 if (!safe_space_required.IsValid() ||
820 !safe_space_required_with_side_data.IsValid()) {
821 scheduler_task_runner_->PostTask(FROM_HERE,
822 std::move(bad_message_callback));
823 scheduler_task_runner_->PostTask(
824 FROM_HERE,
825 base::BindOnce(
826 std::move(callback),
827 CacheStorageVerboseError::New(
828 MakeErrorStorage(
829 ErrorStorageType::kBatchDidGetUsageAndQuotaInvalidSpace),
830 std::move(message))));
831 return;
832 }
833 if (status_code != blink::mojom::QuotaStatusCode::kOk ||
834 safe_space_required.ValueOrDie() > quota) {
835 scheduler_task_runner_->PostTask(
836 FROM_HERE, base::BindOnce(std::move(callback),
837 CacheStorageVerboseError::New(
838 CacheStorageError::kErrorQuotaExceeded,
839 std::move(message))));
840 return;
841 }
842 bool skip_side_data = safe_space_required_with_side_data.ValueOrDie() > quota;
843
844 // The following relies on the guarantee that the RepeatingCallback returned
845 // from AdaptCallbackForRepeating invokes the original callback on the first
846 // invocation, and (critically) that subsequent invocations are ignored.
847 // TODO(jsbell): Replace AdaptCallbackForRepeating with ...? crbug.com/730593
848 auto callback_copy = base::AdaptCallbackForRepeating(std::move(callback));
849 auto barrier_closure = base::BarrierClosure(
850 operations.size(),
851 base::BindOnce(&LegacyCacheStorageCache::BatchDidAllOperations,
852 weak_ptr_factory_.GetWeakPtr(), callback_copy, message,
853 trace_id));
854 auto completion_callback = base::BindRepeating(
855 &LegacyCacheStorageCache::BatchDidOneOperation,
856 weak_ptr_factory_.GetWeakPtr(), std::move(barrier_closure),
857 std::move(callback_copy), std::move(message), trace_id);
858
859 // Operations may synchronously invoke |callback| which could release the
860 // last reference to this instance. Hold a handle for the duration of this
861 // loop. (Asynchronous tasks scheduled by the operations use weak ptrs which
862 // will no-op automatically.)
863 CacheStorageCacheHandle handle = CreateHandle();
864
865 for (auto& operation : operations) {
866 switch (operation->operation_type) {
867 case blink::mojom::OperationType::kPut:
868 if (skip_side_data) {
869 operation->response->side_data_blob_for_cache_put = nullptr;
870 Put(std::move(operation), trace_id, completion_callback);
871 } else {
872 Put(std::move(operation), trace_id, completion_callback);
873 }
874 break;
875 case blink::mojom::OperationType::kDelete:
876 DCHECK_EQ(1u, operations.size());
877 Delete(std::move(operation), completion_callback);
878 break;
879 case blink::mojom::OperationType::kUndefined:
880 NOTREACHED();
881 // TODO(nhiroki): This should return "TypeError".
882 // http://crbug.com/425505
883 completion_callback.Run(MakeErrorStorage(
884 ErrorStorageType::kBatchDidGetUsageAndQuotaUndefinedOp));
885 break;
886 }
887 }
888 }
889
BatchDidOneOperation(base::OnceClosure completion_closure,VerboseErrorCallback error_callback,base::Optional<std::string> message,int64_t trace_id,CacheStorageError error)890 void LegacyCacheStorageCache::BatchDidOneOperation(
891 base::OnceClosure completion_closure,
892 VerboseErrorCallback error_callback,
893 base::Optional<std::string> message,
894 int64_t trace_id,
895 CacheStorageError error) {
896 TRACE_EVENT_WITH_FLOW0("CacheStorage",
897 "LegacyCacheStorageCache::BatchDidOneOperation",
898 TRACE_ID_GLOBAL(trace_id),
899 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT);
900 if (error != CacheStorageError::kSuccess) {
901 // This relies on |callback| being created by AdaptCallbackForRepeating
902 // and ignoring anything but the first invocation.
903 std::move(error_callback)
904 .Run(CacheStorageVerboseError::New(error, std::move(message)));
905 }
906
907 std::move(completion_closure).Run();
908 }
909
BatchDidAllOperations(VerboseErrorCallback callback,base::Optional<std::string> message,int64_t trace_id)910 void LegacyCacheStorageCache::BatchDidAllOperations(
911 VerboseErrorCallback callback,
912 base::Optional<std::string> message,
913 int64_t trace_id) {
914 TRACE_EVENT_WITH_FLOW0("CacheStorage",
915 "LegacyCacheStorageCache::BatchDidAllOperations",
916 TRACE_ID_GLOBAL(trace_id),
917 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT);
918 // This relies on |callback| being created by AdaptCallbackForRepeating
919 // and ignoring anything but the first invocation.
920 std::move(callback).Run(CacheStorageVerboseError::New(
921 CacheStorageError::kSuccess, std::move(message)));
922 }
923
Keys(blink::mojom::FetchAPIRequestPtr request,blink::mojom::CacheQueryOptionsPtr options,int64_t trace_id,RequestsCallback callback)924 void LegacyCacheStorageCache::Keys(blink::mojom::FetchAPIRequestPtr request,
925 blink::mojom::CacheQueryOptionsPtr options,
926 int64_t trace_id,
927 RequestsCallback callback) {
928 if (backend_state_ == BACKEND_CLOSED) {
929 std::move(callback).Run(
930 MakeErrorStorage(ErrorStorageType::kKeysBackendClosed), nullptr);
931 return;
932 }
933
934 auto id = scheduler_->CreateId();
935 scheduler_->ScheduleOperation(
936 id, CacheStorageSchedulerMode::kShared, CacheStorageSchedulerOp::kKeys,
937 CacheStorageSchedulerPriority::kNormal,
938 base::BindOnce(
939 &LegacyCacheStorageCache::KeysImpl, weak_ptr_factory_.GetWeakPtr(),
940 std::move(request), std::move(options), trace_id,
941 scheduler_->WrapCallbackToRunNext(id, std::move(callback))));
942 }
943
Close(base::OnceClosure callback)944 void LegacyCacheStorageCache::Close(base::OnceClosure callback) {
945 DCHECK_NE(BACKEND_CLOSED, backend_state_)
946 << "Was LegacyCacheStorageCache::Close() called twice?";
947
948 auto id = scheduler_->CreateId();
949 scheduler_->ScheduleOperation(
950 id, CacheStorageSchedulerMode::kExclusive,
951 CacheStorageSchedulerOp::kClose, CacheStorageSchedulerPriority::kNormal,
952 base::BindOnce(
953 &LegacyCacheStorageCache::CloseImpl, weak_ptr_factory_.GetWeakPtr(),
954 scheduler_->WrapCallbackToRunNext(id, std::move(callback))));
955 }
956
Size(SizeCallback callback)957 void LegacyCacheStorageCache::Size(SizeCallback callback) {
958 if (backend_state_ == BACKEND_CLOSED) {
959 // TODO(jkarlin): Delete caches that can't be initialized.
960 scheduler_task_runner_->PostTask(FROM_HERE,
961 base::BindOnce(std::move(callback), 0));
962 return;
963 }
964
965 auto id = scheduler_->CreateId();
966 scheduler_->ScheduleOperation(
967 id, CacheStorageSchedulerMode::kShared, CacheStorageSchedulerOp::kSize,
968 CacheStorageSchedulerPriority::kNormal,
969 base::BindOnce(
970 &LegacyCacheStorageCache::SizeImpl, weak_ptr_factory_.GetWeakPtr(),
971 scheduler_->WrapCallbackToRunNext(id, std::move(callback))));
972 }
973
GetSizeThenClose(SizeCallback callback)974 void LegacyCacheStorageCache::GetSizeThenClose(SizeCallback callback) {
975 if (backend_state_ == BACKEND_CLOSED) {
976 scheduler_task_runner_->PostTask(FROM_HERE,
977 base::BindOnce(std::move(callback), 0));
978 return;
979 }
980
981 auto id = scheduler_->CreateId();
982 scheduler_->ScheduleOperation(
983 id, CacheStorageSchedulerMode::kExclusive,
984 CacheStorageSchedulerOp::kSizeThenClose,
985 CacheStorageSchedulerPriority::kNormal,
986 base::BindOnce(
987 &LegacyCacheStorageCache::SizeImpl, weak_ptr_factory_.GetWeakPtr(),
988 base::BindOnce(
989 &LegacyCacheStorageCache::GetSizeThenCloseDidGetSize,
990 weak_ptr_factory_.GetWeakPtr(),
991 scheduler_->WrapCallbackToRunNext(id, std::move(callback)))));
992 }
993
SetObserver(CacheStorageCacheObserver * observer)994 void LegacyCacheStorageCache::SetObserver(CacheStorageCacheObserver* observer) {
995 DCHECK((observer == nullptr) ^ (cache_observer_ == nullptr));
996 cache_observer_ = observer;
997 }
998
999 // static
EstimatedStructSize(const blink::mojom::FetchAPIRequestPtr & request)1000 size_t LegacyCacheStorageCache::EstimatedStructSize(
1001 const blink::mojom::FetchAPIRequestPtr& request) {
1002 size_t size = sizeof(*request);
1003 size += request->url.spec().size();
1004
1005 for (const auto& key_and_value : request->headers) {
1006 size += key_and_value.first.size();
1007 size += key_and_value.second.size();
1008 }
1009
1010 return size;
1011 }
1012
~LegacyCacheStorageCache()1013 LegacyCacheStorageCache::~LegacyCacheStorageCache() {
1014 quota_manager_proxy_->NotifyOriginNoLongerInUse(origin_);
1015 }
1016
SetSchedulerForTesting(std::unique_ptr<CacheStorageScheduler> scheduler)1017 void LegacyCacheStorageCache::SetSchedulerForTesting(
1018 std::unique_ptr<CacheStorageScheduler> scheduler) {
1019 DCHECK(!scheduler_->ScheduledOperations());
1020 scheduler_ = std::move(scheduler);
1021 }
1022
LegacyCacheStorageCache(const url::Origin & origin,CacheStorageOwner owner,const std::string & cache_name,const base::FilePath & path,LegacyCacheStorage * cache_storage,scoped_refptr<base::SequencedTaskRunner> scheduler_task_runner,scoped_refptr<storage::QuotaManagerProxy> quota_manager_proxy,scoped_refptr<BlobStorageContextWrapper> blob_storage_context,int64_t cache_size,int64_t cache_padding,std::unique_ptr<crypto::SymmetricKey> cache_padding_key)1023 LegacyCacheStorageCache::LegacyCacheStorageCache(
1024 const url::Origin& origin,
1025 CacheStorageOwner owner,
1026 const std::string& cache_name,
1027 const base::FilePath& path,
1028 LegacyCacheStorage* cache_storage,
1029 scoped_refptr<base::SequencedTaskRunner> scheduler_task_runner,
1030 scoped_refptr<storage::QuotaManagerProxy> quota_manager_proxy,
1031 scoped_refptr<BlobStorageContextWrapper> blob_storage_context,
1032 int64_t cache_size,
1033 int64_t cache_padding,
1034 std::unique_ptr<crypto::SymmetricKey> cache_padding_key)
1035 : origin_(origin),
1036 owner_(owner),
1037 cache_name_(cache_name),
1038 path_(path),
1039 cache_storage_(cache_storage),
1040 scheduler_task_runner_(std::move(scheduler_task_runner)),
1041 quota_manager_proxy_(std::move(quota_manager_proxy)),
1042 scheduler_(new CacheStorageScheduler(CacheStorageSchedulerClient::kCache,
1043 scheduler_task_runner_)),
1044 cache_size_(cache_size),
1045 cache_padding_(cache_padding),
1046 cache_padding_key_(std::move(cache_padding_key)),
1047 max_query_size_bytes_(kMaxQueryCacheResultBytes),
1048 cache_observer_(nullptr),
1049 cache_entry_handler_(
1050 CacheStorageCacheEntryHandler::CreateCacheEntryHandler(
1051 owner,
1052 std::move(blob_storage_context))),
1053 memory_only_(path.empty()) {
1054 DCHECK(!origin_.opaque());
1055 DCHECK(quota_manager_proxy_.get());
1056 DCHECK(cache_padding_key_.get());
1057
1058 if (cache_size_ != CacheStorage::kSizeUnknown &&
1059 cache_padding_ != CacheStorage::kSizeUnknown) {
1060 // The size of this cache has already been reported to the QuotaManager.
1061 last_reported_size_ = cache_size_ + cache_padding_;
1062 }
1063
1064 quota_manager_proxy_->NotifyOriginInUse(origin_);
1065 }
1066
QueryCache(blink::mojom::FetchAPIRequestPtr request,blink::mojom::CacheQueryOptionsPtr options,QueryTypes query_types,CacheStorageSchedulerPriority priority,QueryCacheCallback callback)1067 void LegacyCacheStorageCache::QueryCache(
1068 blink::mojom::FetchAPIRequestPtr request,
1069 blink::mojom::CacheQueryOptionsPtr options,
1070 QueryTypes query_types,
1071 CacheStorageSchedulerPriority priority,
1072 QueryCacheCallback callback) {
1073 DCHECK_NE(
1074 QUERY_CACHE_ENTRIES | QUERY_CACHE_RESPONSES_WITH_BODIES,
1075 query_types & (QUERY_CACHE_ENTRIES | QUERY_CACHE_RESPONSES_WITH_BODIES));
1076 if (backend_state_ == BACKEND_CLOSED) {
1077 std::move(callback).Run(
1078 MakeErrorStorage(ErrorStorageType::kQueryCacheBackendClosed), nullptr);
1079 return;
1080 }
1081
1082 if (owner_ != CacheStorageOwner::kBackgroundFetch &&
1083 (!options || !options->ignore_method) && request &&
1084 !request->method.empty() &&
1085 request->method != net::HttpRequestHeaders::kGetMethod) {
1086 std::move(callback).Run(CacheStorageError::kSuccess,
1087 std::make_unique<QueryCacheResults>());
1088 return;
1089 }
1090
1091 std::string request_url;
1092 if (request)
1093 request_url = request->url.spec();
1094
1095 std::unique_ptr<QueryCacheContext> query_cache_context(
1096 new QueryCacheContext(std::move(request), std::move(options),
1097 std::move(callback), query_types));
1098 if (query_cache_context->request &&
1099 !query_cache_context->request->url.is_empty() &&
1100 (!query_cache_context->options ||
1101 !query_cache_context->options->ignore_search)) {
1102 // There is no need to scan the entire backend, just open the exact
1103 // URL.
1104
1105 // Create a callback that is copyable, even though it can only be called
1106 // once. BindRepeating() cannot be used directly because
1107 // |query_cache_context| is not copyable.
1108 auto open_entry_callback = base::AdaptCallbackForRepeating(base::BindOnce(
1109 &LegacyCacheStorageCache::QueryCacheDidOpenFastPath,
1110 weak_ptr_factory_.GetWeakPtr(), std::move(query_cache_context)));
1111
1112 disk_cache::EntryResult result = backend_->OpenEntry(
1113 request_url, GetDiskCachePriority(priority), open_entry_callback);
1114 if (result.net_error() != net::ERR_IO_PENDING)
1115 std::move(open_entry_callback).Run(std::move(result));
1116 return;
1117 }
1118
1119 query_cache_context->backend_iterator = backend_->CreateIterator();
1120 QueryCacheOpenNextEntry(std::move(query_cache_context));
1121 }
1122
QueryCacheDidOpenFastPath(std::unique_ptr<QueryCacheContext> query_cache_context,disk_cache::EntryResult result)1123 void LegacyCacheStorageCache::QueryCacheDidOpenFastPath(
1124 std::unique_ptr<QueryCacheContext> query_cache_context,
1125 disk_cache::EntryResult result) {
1126 if (result.net_error() != net::OK) {
1127 QueryCacheContext* results = query_cache_context.get();
1128 std::move(results->callback)
1129 .Run(CacheStorageError::kSuccess,
1130 std::move(query_cache_context->matches));
1131 return;
1132 }
1133 QueryCacheFilterEntry(std::move(query_cache_context), std::move(result));
1134 }
1135
QueryCacheOpenNextEntry(std::unique_ptr<QueryCacheContext> query_cache_context)1136 void LegacyCacheStorageCache::QueryCacheOpenNextEntry(
1137 std::unique_ptr<QueryCacheContext> query_cache_context) {
1138 query_cache_recursive_depth_ += 1;
1139 auto cleanup = base::ScopedClosureRunner(base::BindOnce(
1140 [](CacheStorageCacheHandle handle) {
1141 LegacyCacheStorageCache* self = From(handle);
1142 if (!self)
1143 return;
1144 DCHECK_GT(self->query_cache_recursive_depth_, 0);
1145 self->query_cache_recursive_depth_ -= 1;
1146 },
1147 CreateHandle()));
1148
1149 if (!query_cache_context->backend_iterator) {
1150 // Iteration is complete.
1151 std::sort(query_cache_context->matches->begin(),
1152 query_cache_context->matches->end(), QueryCacheResultCompare);
1153
1154 std::move(query_cache_context->callback)
1155 .Run(CacheStorageError::kSuccess,
1156 std::move(query_cache_context->matches));
1157 return;
1158 }
1159
1160 disk_cache::Backend::Iterator& iterator =
1161 *query_cache_context->backend_iterator;
1162
1163 // Create a callback that is copyable, even though it can only be called once.
1164 // BindRepeating() cannot be used directly because |query_cache_context| is
1165 // not copyable.
1166 auto open_entry_callback = base::AdaptCallbackForRepeating(base::BindOnce(
1167 &LegacyCacheStorageCache::QueryCacheFilterEntry,
1168 weak_ptr_factory_.GetWeakPtr(), std::move(query_cache_context)));
1169
1170 disk_cache::EntryResult result = iterator.OpenNextEntry(open_entry_callback);
1171
1172 if (result.net_error() == net::ERR_IO_PENDING)
1173 return;
1174
1175 // In most cases we can immediately invoke the callback when there is no
1176 // pending IO. We must be careful, however, to avoid blowing out the stack
1177 // when iterating a large cache. Only invoke the callback synchronously
1178 // if we have not recursed past a threshold depth.
1179 if (query_cache_recursive_depth_ <= kMaxQueryCacheRecursiveDepth) {
1180 std::move(open_entry_callback).Run(std::move(result));
1181 return;
1182 }
1183
1184 scheduler_task_runner_->PostTask(
1185 FROM_HERE,
1186 base::BindOnce(std::move(open_entry_callback), std::move(result)));
1187 }
1188
QueryCacheFilterEntry(std::unique_ptr<QueryCacheContext> query_cache_context,disk_cache::EntryResult result)1189 void LegacyCacheStorageCache::QueryCacheFilterEntry(
1190 std::unique_ptr<QueryCacheContext> query_cache_context,
1191 disk_cache::EntryResult result) {
1192 if (result.net_error() == net::ERR_FAILED) {
1193 // This is the indicator that iteration is complete.
1194 query_cache_context->backend_iterator.reset();
1195 QueryCacheOpenNextEntry(std::move(query_cache_context));
1196 return;
1197 }
1198
1199 if (result.net_error() < 0) {
1200 std::move(query_cache_context->callback)
1201 .Run(MakeErrorStorage(ErrorStorageType::kQueryCacheFilterEntryFailed),
1202 std::move(query_cache_context->matches));
1203 return;
1204 }
1205
1206 disk_cache::ScopedEntryPtr entry(result.ReleaseEntry());
1207
1208 if (backend_state_ == BACKEND_CLOSED) {
1209 std::move(query_cache_context->callback)
1210 .Run(CacheStorageError::kErrorNotFound,
1211 std::move(query_cache_context->matches));
1212 return;
1213 }
1214
1215 if (query_cache_context->request &&
1216 !query_cache_context->request->url.is_empty()) {
1217 GURL requestURL = query_cache_context->request->url;
1218 GURL cachedURL = GURL(entry->GetKey());
1219
1220 if (query_cache_context->options &&
1221 query_cache_context->options->ignore_search) {
1222 requestURL = RemoveQueryParam(requestURL);
1223 cachedURL = RemoveQueryParam(cachedURL);
1224 }
1225
1226 if (cachedURL != requestURL) {
1227 QueryCacheOpenNextEntry(std::move(query_cache_context));
1228 return;
1229 }
1230 }
1231
1232 disk_cache::Entry* entry_ptr = entry.get();
1233 ReadMetadata(
1234 entry_ptr,
1235 base::BindOnce(&LegacyCacheStorageCache::QueryCacheDidReadMetadata,
1236 weak_ptr_factory_.GetWeakPtr(),
1237 std::move(query_cache_context), std::move(entry)));
1238 }
1239
QueryCacheDidReadMetadata(std::unique_ptr<QueryCacheContext> query_cache_context,disk_cache::ScopedEntryPtr entry,std::unique_ptr<proto::CacheMetadata> metadata)1240 void LegacyCacheStorageCache::QueryCacheDidReadMetadata(
1241 std::unique_ptr<QueryCacheContext> query_cache_context,
1242 disk_cache::ScopedEntryPtr entry,
1243 std::unique_ptr<proto::CacheMetadata> metadata) {
1244 if (!metadata) {
1245 entry->Doom();
1246 QueryCacheOpenNextEntry(std::move(query_cache_context));
1247 return;
1248 }
1249
1250 // If the entry was created before we started adding entry times, then
1251 // default to using the Response object's time for sorting purposes.
1252 int64_t entry_time = metadata->has_entry_time()
1253 ? metadata->entry_time()
1254 : metadata->response().response_time();
1255
1256 query_cache_context->matches->push_back(
1257 QueryCacheResult(base::Time::FromInternalValue(entry_time)));
1258 QueryCacheResult* match = &query_cache_context->matches->back();
1259 match->request = CreateRequest(*metadata, GURL(entry->GetKey()));
1260 match->response = CreateResponse(*metadata, cache_name_);
1261
1262 if (!match->response) {
1263 entry->Doom();
1264 query_cache_context->matches->pop_back();
1265 QueryCacheOpenNextEntry(std::move(query_cache_context));
1266 return;
1267 }
1268
1269 if (query_cache_context->request &&
1270 (!query_cache_context->options ||
1271 !query_cache_context->options->ignore_vary) &&
1272 !VaryMatches(query_cache_context->request->headers,
1273 match->request->headers, match->response->headers)) {
1274 query_cache_context->matches->pop_back();
1275 QueryCacheOpenNextEntry(std::move(query_cache_context));
1276 return;
1277 }
1278
1279 auto blob_entry = cache_entry_handler_->CreateDiskCacheBlobEntry(
1280 CreateHandle(), std::move(entry));
1281
1282 if (query_cache_context->query_types & QUERY_CACHE_ENTRIES)
1283 match->entry = std::move(blob_entry->disk_cache_entry());
1284
1285 if (query_cache_context->query_types & QUERY_CACHE_REQUESTS) {
1286 query_cache_context->estimated_out_bytes +=
1287 EstimatedStructSize(match->request);
1288 if (query_cache_context->estimated_out_bytes > max_query_size_bytes_) {
1289 std::move(query_cache_context->callback)
1290 .Run(CacheStorageError::kErrorQueryTooLarge, nullptr);
1291 return;
1292 }
1293
1294 cache_entry_handler_->PopulateRequestBody(blob_entry, match->request.get());
1295 } else {
1296 match->request.reset();
1297 }
1298
1299 if (query_cache_context->query_types & QUERY_CACHE_RESPONSES_WITH_BODIES) {
1300 query_cache_context->estimated_out_bytes +=
1301 EstimatedResponseSizeWithoutBlob(*match->response);
1302 if (query_cache_context->estimated_out_bytes > max_query_size_bytes_) {
1303 std::move(query_cache_context->callback)
1304 .Run(CacheStorageError::kErrorQueryTooLarge, nullptr);
1305 return;
1306 }
1307 if (blob_entry->disk_cache_entry()->GetDataSize(INDEX_RESPONSE_BODY) == 0) {
1308 QueryCacheOpenNextEntry(std::move(query_cache_context));
1309 return;
1310 }
1311
1312 cache_entry_handler_->PopulateResponseBody(blob_entry,
1313 match->response.get());
1314 } else if (!(query_cache_context->query_types &
1315 QUERY_CACHE_RESPONSES_NO_BODIES)) {
1316 match->response.reset();
1317 }
1318
1319 QueryCacheOpenNextEntry(std::move(query_cache_context));
1320 }
1321
1322 // static
QueryCacheResultCompare(const QueryCacheResult & lhs,const QueryCacheResult & rhs)1323 bool LegacyCacheStorageCache::QueryCacheResultCompare(
1324 const QueryCacheResult& lhs,
1325 const QueryCacheResult& rhs) {
1326 return lhs.entry_time < rhs.entry_time;
1327 }
1328
1329 // static
EstimatedResponseSizeWithoutBlob(const blink::mojom::FetchAPIResponse & response)1330 size_t LegacyCacheStorageCache::EstimatedResponseSizeWithoutBlob(
1331 const blink::mojom::FetchAPIResponse& response) {
1332 size_t size = sizeof(blink::mojom::FetchAPIResponse);
1333 for (const auto& url : response.url_list)
1334 size += url.spec().size();
1335 size += response.status_text.size();
1336 if (response.cache_storage_cache_name)
1337 size += response.cache_storage_cache_name->size();
1338 for (const auto& key_and_value : response.headers) {
1339 size += key_and_value.first.size();
1340 size += key_and_value.second.size();
1341 }
1342 for (const auto& header : response.cors_exposed_header_names)
1343 size += header.size();
1344 return size;
1345 }
1346
1347 // static
CalculateResponsePadding(const blink::mojom::FetchAPIResponse & response,const crypto::SymmetricKey * padding_key,int side_data_size)1348 int64_t LegacyCacheStorageCache::CalculateResponsePadding(
1349 const blink::mojom::FetchAPIResponse& response,
1350 const crypto::SymmetricKey* padding_key,
1351 int side_data_size) {
1352 DCHECK_GE(side_data_size, 0);
1353 if (!ShouldPadResourceSize(response))
1354 return 0;
1355 // Going forward we should always have a request method here since its
1356 // impossible to create a no-cors Response via the constructor. We must
1357 // handle a missing method, however, since we may get a Response loaded
1358 // from an old cache_storage instance without the data.
1359 std::string request_method = response.request_method.has_value()
1360 ? response.request_method.value()
1361 : net::HttpRequestHeaders::kGetMethod;
1362 return storage::ComputeResponsePadding(
1363 response.url_list.back().spec(), padding_key, side_data_size > 0,
1364 response.loaded_with_credentials, request_method);
1365 }
1366
1367 // static
GetResponsePaddingVersion()1368 int32_t LegacyCacheStorageCache::GetResponsePaddingVersion() {
1369 return kCachePaddingAlgorithmVersion;
1370 }
1371
MatchImpl(blink::mojom::FetchAPIRequestPtr request,blink::mojom::CacheQueryOptionsPtr match_options,int64_t trace_id,CacheStorageSchedulerPriority priority,ResponseCallback callback)1372 void LegacyCacheStorageCache::MatchImpl(
1373 blink::mojom::FetchAPIRequestPtr request,
1374 blink::mojom::CacheQueryOptionsPtr match_options,
1375 int64_t trace_id,
1376 CacheStorageSchedulerPriority priority,
1377 ResponseCallback callback) {
1378 MatchAllImpl(
1379 std::move(request), std::move(match_options), trace_id, priority,
1380 base::BindOnce(&LegacyCacheStorageCache::MatchDidMatchAll,
1381 weak_ptr_factory_.GetWeakPtr(), std::move(callback)));
1382 }
1383
MatchDidMatchAll(ResponseCallback callback,CacheStorageError match_all_error,std::vector<blink::mojom::FetchAPIResponsePtr> match_all_responses)1384 void LegacyCacheStorageCache::MatchDidMatchAll(
1385 ResponseCallback callback,
1386 CacheStorageError match_all_error,
1387 std::vector<blink::mojom::FetchAPIResponsePtr> match_all_responses) {
1388 if (match_all_error != CacheStorageError::kSuccess) {
1389 std::move(callback).Run(match_all_error, nullptr);
1390 return;
1391 }
1392
1393 if (match_all_responses.empty()) {
1394 std::move(callback).Run(CacheStorageError::kErrorNotFound, nullptr);
1395 return;
1396 }
1397
1398 std::move(callback).Run(CacheStorageError::kSuccess,
1399 std::move(match_all_responses[0]));
1400 }
1401
MatchAllImpl(blink::mojom::FetchAPIRequestPtr request,blink::mojom::CacheQueryOptionsPtr options,int64_t trace_id,CacheStorageSchedulerPriority priority,ResponsesCallback callback)1402 void LegacyCacheStorageCache::MatchAllImpl(
1403 blink::mojom::FetchAPIRequestPtr request,
1404 blink::mojom::CacheQueryOptionsPtr options,
1405 int64_t trace_id,
1406 CacheStorageSchedulerPriority priority,
1407 ResponsesCallback callback) {
1408 DCHECK_NE(BACKEND_UNINITIALIZED, backend_state_);
1409 TRACE_EVENT_WITH_FLOW2("CacheStorage",
1410 "LegacyCacheStorageCache::MatchAllImpl",
1411 TRACE_ID_GLOBAL(trace_id),
1412 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
1413 "request", CacheStorageTracedValue(request), "options",
1414 CacheStorageTracedValue(options));
1415 if (backend_state_ != BACKEND_OPEN) {
1416 std::move(callback).Run(
1417 MakeErrorStorage(ErrorStorageType::kStorageMatchAllBackendClosed),
1418 std::vector<blink::mojom::FetchAPIResponsePtr>());
1419 return;
1420 }
1421
1422 // Hold the cache alive while performing any operation touching the
1423 // disk_cache backend.
1424 callback = WrapCallbackWithHandle(std::move(callback));
1425
1426 QueryCache(std::move(request), std::move(options),
1427 QUERY_CACHE_REQUESTS | QUERY_CACHE_RESPONSES_WITH_BODIES, priority,
1428 base::BindOnce(&LegacyCacheStorageCache::MatchAllDidQueryCache,
1429 weak_ptr_factory_.GetWeakPtr(), std::move(callback),
1430 trace_id));
1431 }
1432
MatchAllDidQueryCache(ResponsesCallback callback,int64_t trace_id,CacheStorageError error,std::unique_ptr<QueryCacheResults> query_cache_results)1433 void LegacyCacheStorageCache::MatchAllDidQueryCache(
1434 ResponsesCallback callback,
1435 int64_t trace_id,
1436 CacheStorageError error,
1437 std::unique_ptr<QueryCacheResults> query_cache_results) {
1438 TRACE_EVENT_WITH_FLOW0("CacheStorage",
1439 "LegacyCacheStorageCache::MatchAllDidQueryCache",
1440 TRACE_ID_GLOBAL(trace_id),
1441 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT);
1442
1443 if (error != CacheStorageError::kSuccess) {
1444 std::move(callback).Run(error,
1445 std::vector<blink::mojom::FetchAPIResponsePtr>());
1446 return;
1447 }
1448
1449 std::vector<blink::mojom::FetchAPIResponsePtr> out_responses;
1450 out_responses.reserve(query_cache_results->size());
1451
1452 for (auto& result : *query_cache_results) {
1453 out_responses.push_back(std::move(result.response));
1454 }
1455
1456 std::move(callback).Run(CacheStorageError::kSuccess,
1457 std::move(out_responses));
1458 }
1459
WriteSideDataDidGetQuota(ErrorCallback callback,const GURL & url,base::Time expected_response_time,int64_t trace_id,scoped_refptr<net::IOBuffer> buffer,int buf_len,blink::mojom::QuotaStatusCode status_code,int64_t usage,int64_t quota)1460 void LegacyCacheStorageCache::WriteSideDataDidGetQuota(
1461 ErrorCallback callback,
1462 const GURL& url,
1463 base::Time expected_response_time,
1464 int64_t trace_id,
1465 scoped_refptr<net::IOBuffer> buffer,
1466 int buf_len,
1467 blink::mojom::QuotaStatusCode status_code,
1468 int64_t usage,
1469 int64_t quota) {
1470 TRACE_EVENT_WITH_FLOW0("CacheStorage",
1471 "LegacyCacheStorageCache::WriteSideDataDidGetQuota",
1472 TRACE_ID_GLOBAL(trace_id),
1473 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT);
1474
1475 if (status_code != blink::mojom::QuotaStatusCode::kOk ||
1476 (buf_len > quota - usage)) {
1477 scheduler_task_runner_->PostTask(
1478 FROM_HERE, base::BindOnce(std::move(callback),
1479 CacheStorageError::kErrorQuotaExceeded));
1480 return;
1481 }
1482
1483 auto id = scheduler_->CreateId();
1484 scheduler_->ScheduleOperation(
1485 id, CacheStorageSchedulerMode::kExclusive,
1486 CacheStorageSchedulerOp::kWriteSideData,
1487 CacheStorageSchedulerPriority::kNormal,
1488 base::BindOnce(&LegacyCacheStorageCache::WriteSideDataImpl,
1489 weak_ptr_factory_.GetWeakPtr(),
1490 scheduler_->WrapCallbackToRunNext(id, std::move(callback)),
1491 url, expected_response_time, trace_id, buffer, buf_len));
1492 }
1493
WriteSideDataImpl(ErrorCallback callback,const GURL & url,base::Time expected_response_time,int64_t trace_id,scoped_refptr<net::IOBuffer> buffer,int buf_len)1494 void LegacyCacheStorageCache::WriteSideDataImpl(
1495 ErrorCallback callback,
1496 const GURL& url,
1497 base::Time expected_response_time,
1498 int64_t trace_id,
1499 scoped_refptr<net::IOBuffer> buffer,
1500 int buf_len) {
1501 DCHECK_NE(BACKEND_UNINITIALIZED, backend_state_);
1502 TRACE_EVENT_WITH_FLOW1(
1503 "CacheStorage", "LegacyCacheStorageCache::WriteSideDataImpl",
1504 TRACE_ID_GLOBAL(trace_id),
1505 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT, "url", url.spec());
1506 if (backend_state_ != BACKEND_OPEN) {
1507 std::move(callback).Run(
1508 MakeErrorStorage(ErrorStorageType::kWriteSideDataImplBackendClosed));
1509 return;
1510 }
1511
1512 // Hold the cache alive while performing any operation touching the
1513 // disk_cache backend.
1514 callback = WrapCallbackWithHandle(std::move(callback));
1515
1516 // Create a callback that is copyable, even though it can only be called once.
1517 // BindRepeating() cannot be used directly because |callback| is not copyable.
1518 auto open_entry_callback = base::AdaptCallbackForRepeating(
1519 base::BindOnce(&LegacyCacheStorageCache::WriteSideDataDidOpenEntry,
1520 weak_ptr_factory_.GetWeakPtr(), std::move(callback),
1521 expected_response_time, trace_id, buffer, buf_len));
1522
1523 // Note, the simple disk_cache priority is not important here because we
1524 // only allow one write operation at a time. Therefore there will be no
1525 // competing operations in the disk_cache queue.
1526 disk_cache::EntryResult result =
1527 backend_->OpenEntry(url.spec(), net::MEDIUM, open_entry_callback);
1528 if (result.net_error() != net::ERR_IO_PENDING)
1529 std::move(open_entry_callback).Run(std::move(result));
1530 }
1531
WriteSideDataDidOpenEntry(ErrorCallback callback,base::Time expected_response_time,int64_t trace_id,scoped_refptr<net::IOBuffer> buffer,int buf_len,disk_cache::EntryResult result)1532 void LegacyCacheStorageCache::WriteSideDataDidOpenEntry(
1533 ErrorCallback callback,
1534 base::Time expected_response_time,
1535 int64_t trace_id,
1536 scoped_refptr<net::IOBuffer> buffer,
1537 int buf_len,
1538 disk_cache::EntryResult result) {
1539 TRACE_EVENT_WITH_FLOW0("CacheStorage",
1540 "LegacyCacheStorageCache::WriteSideDataDidOpenEntry",
1541 TRACE_ID_GLOBAL(trace_id),
1542 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT);
1543
1544 if (result.net_error() != net::OK) {
1545 std::move(callback).Run(CacheStorageError::kErrorNotFound);
1546 return;
1547 }
1548
1549 // Give the ownership of entry to a ScopedWritableEntry which will doom the
1550 // entry before closing unless we tell it that writing has successfully
1551 // completed via WritingCompleted.
1552 ScopedWritableEntry entry(result.ReleaseEntry());
1553 disk_cache::Entry* entry_ptr = entry.get();
1554
1555 ReadMetadata(
1556 entry_ptr,
1557 base::BindOnce(&LegacyCacheStorageCache::WriteSideDataDidReadMetaData,
1558 weak_ptr_factory_.GetWeakPtr(), std::move(callback),
1559 expected_response_time, trace_id, buffer, buf_len,
1560 std::move(entry)));
1561 }
1562
WriteSideDataDidReadMetaData(ErrorCallback callback,base::Time expected_response_time,int64_t trace_id,scoped_refptr<net::IOBuffer> buffer,int buf_len,ScopedWritableEntry entry,std::unique_ptr<proto::CacheMetadata> headers)1563 void LegacyCacheStorageCache::WriteSideDataDidReadMetaData(
1564 ErrorCallback callback,
1565 base::Time expected_response_time,
1566 int64_t trace_id,
1567 scoped_refptr<net::IOBuffer> buffer,
1568 int buf_len,
1569 ScopedWritableEntry entry,
1570 std::unique_ptr<proto::CacheMetadata> headers) {
1571 TRACE_EVENT_WITH_FLOW0(
1572 "CacheStorage", "LegacyCacheStorageCache::WriteSideDataDidReadMetaData",
1573 TRACE_ID_GLOBAL(trace_id),
1574 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT);
1575 if (!headers || headers->response().response_time() !=
1576 expected_response_time.ToInternalValue()) {
1577 WriteSideDataComplete(std::move(callback), std::move(entry),
1578 CacheStorageError::kErrorNotFound);
1579 return;
1580 }
1581 // Get a temporary copy of the entry pointer before passing it in base::Bind.
1582 disk_cache::Entry* temp_entry_ptr = entry.get();
1583
1584 std::unique_ptr<content::proto::CacheResponse> response(
1585 headers->release_response());
1586
1587 int side_data_size_before_write = 0;
1588 if (ShouldPadResourceSize(response.get()))
1589 side_data_size_before_write = entry->GetDataSize(INDEX_SIDE_DATA);
1590
1591 // Create a callback that is copyable, even though it can only be called once.
1592 // BindRepeating() cannot be used directly because |callback|, |entry| and
1593 // |response| are not copyable.
1594 net::CompletionRepeatingCallback write_side_data_callback =
1595 base::AdaptCallbackForRepeating(base::BindOnce(
1596 &LegacyCacheStorageCache::WriteSideDataDidWrite,
1597 weak_ptr_factory_.GetWeakPtr(), std::move(callback), std::move(entry),
1598 buf_len, std::move(response), side_data_size_before_write, trace_id));
1599
1600 DCHECK(scheduler_->IsRunningExclusiveOperation());
1601 int rv = temp_entry_ptr->WriteData(
1602 INDEX_SIDE_DATA, 0 /* offset */, buffer.get(), buf_len,
1603 write_side_data_callback, true /* truncate */);
1604
1605 if (rv != net::ERR_IO_PENDING)
1606 std::move(write_side_data_callback).Run(rv);
1607 }
1608
WriteSideDataDidWrite(ErrorCallback callback,ScopedWritableEntry entry,int expected_bytes,std::unique_ptr<::content::proto::CacheResponse> response,int side_data_size_before_write,int64_t trace_id,int rv)1609 void LegacyCacheStorageCache::WriteSideDataDidWrite(
1610 ErrorCallback callback,
1611 ScopedWritableEntry entry,
1612 int expected_bytes,
1613 std::unique_ptr<::content::proto::CacheResponse> response,
1614 int side_data_size_before_write,
1615 int64_t trace_id,
1616 int rv) {
1617 TRACE_EVENT_WITH_FLOW0("CacheStorage",
1618 "LegacyCacheStorageCache::WriteSideDataDidWrite",
1619 TRACE_ID_GLOBAL(trace_id), TRACE_EVENT_FLAG_FLOW_IN);
1620 if (rv != expected_bytes) {
1621 WriteSideDataComplete(std::move(callback), std::move(entry),
1622 CacheStorageError::kErrorStorage);
1623 return;
1624 }
1625
1626 if (ShouldPadResourceSize(response.get())) {
1627 cache_padding_ -= CalculateResponsePaddingInternal(
1628 response.get(), cache_padding_key_.get(), side_data_size_before_write);
1629
1630 cache_padding_ += CalculateResponsePaddingInternal(
1631 response.get(), cache_padding_key_.get(), rv);
1632 }
1633
1634 WriteSideDataComplete(std::move(callback), std::move(entry),
1635 CacheStorageError::kSuccess);
1636 }
1637
WriteSideDataComplete(ErrorCallback callback,ScopedWritableEntry entry,blink::mojom::CacheStorageError error)1638 void LegacyCacheStorageCache::WriteSideDataComplete(
1639 ErrorCallback callback,
1640 ScopedWritableEntry entry,
1641 blink::mojom::CacheStorageError error) {
1642 if (error != CacheStorageError::kSuccess) {
1643 // If we found the entry, then we possibly wrote something and now we're
1644 // dooming the entry, causing a change in size, so update the size before
1645 // returning.
1646 if (error != CacheStorageError::kErrorNotFound) {
1647 UpdateCacheSize(base::BindOnce(std::move(callback), error));
1648 return;
1649 }
1650
1651 entry.get_deleter()
1652 .WritingCompleted(); // Since we didn't change the entry.
1653 std::move(callback).Run(error);
1654 return;
1655 }
1656
1657 entry.get_deleter().WritingCompleted(); // Since we didn't change the entry.
1658 UpdateCacheSize(base::BindOnce(std::move(callback), error));
1659 }
1660
Put(blink::mojom::BatchOperationPtr operation,int64_t trace_id,ErrorCallback callback)1661 void LegacyCacheStorageCache::Put(blink::mojom::BatchOperationPtr operation,
1662 int64_t trace_id,
1663 ErrorCallback callback) {
1664 DCHECK(BACKEND_OPEN == backend_state_ || initializing_);
1665 DCHECK_EQ(blink::mojom::OperationType::kPut, operation->operation_type);
1666 Put(std::move(operation->request), std::move(operation->response), trace_id,
1667 std::move(callback));
1668 }
1669
Put(blink::mojom::FetchAPIRequestPtr request,blink::mojom::FetchAPIResponsePtr response,int64_t trace_id,ErrorCallback callback)1670 void LegacyCacheStorageCache::Put(blink::mojom::FetchAPIRequestPtr request,
1671 blink::mojom::FetchAPIResponsePtr response,
1672 int64_t trace_id,
1673 ErrorCallback callback) {
1674 DCHECK(BACKEND_OPEN == backend_state_ || initializing_);
1675
1676 UMA_HISTOGRAM_ENUMERATION("ServiceWorkerCache.Cache.AllWritesResponseType",
1677 response->response_type);
1678
1679 auto put_context = cache_entry_handler_->CreatePutContext(
1680 std::move(request), std::move(response), trace_id);
1681 auto id = scheduler_->CreateId();
1682 put_context->callback =
1683 scheduler_->WrapCallbackToRunNext(id, std::move(callback));
1684
1685 scheduler_->ScheduleOperation(
1686 id, CacheStorageSchedulerMode::kExclusive, CacheStorageSchedulerOp::kPut,
1687 CacheStorageSchedulerPriority::kNormal,
1688 base::BindOnce(&LegacyCacheStorageCache::PutImpl,
1689 weak_ptr_factory_.GetWeakPtr(), std::move(put_context)));
1690 }
1691
PutImpl(std::unique_ptr<PutContext> put_context)1692 void LegacyCacheStorageCache::PutImpl(std::unique_ptr<PutContext> put_context) {
1693 DCHECK_NE(BACKEND_UNINITIALIZED, backend_state_);
1694 TRACE_EVENT_WITH_FLOW2(
1695 "CacheStorage", "LegacyCacheStorageCache::PutImpl",
1696 TRACE_ID_GLOBAL(put_context->trace_id),
1697 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT, "request",
1698 CacheStorageTracedValue(put_context->request), "response",
1699 CacheStorageTracedValue(put_context->response));
1700 if (backend_state_ != BACKEND_OPEN) {
1701 PutComplete(std::move(put_context),
1702 MakeErrorStorage(ErrorStorageType::kPutImplBackendClosed));
1703 return;
1704 }
1705
1706 // Hold the cache alive while performing any operation touching the
1707 // disk_cache backend.
1708 put_context->callback =
1709 WrapCallbackWithHandle(std::move(put_context->callback));
1710
1711 // Explicitly delete the incumbent resource (which may not exist). This is
1712 // only done so that it's padding will be decremented from the calculated
1713 // cache padding.
1714 // TODO(cmumford): Research alternatives to this explicit delete as it
1715 // seriously impacts put performance.
1716 auto delete_request = blink::mojom::FetchAPIRequest::New();
1717 delete_request->url = put_context->request->url;
1718 delete_request->method = "";
1719 delete_request->is_reload = false;
1720 delete_request->referrer = blink::mojom::Referrer::New();
1721 delete_request->headers = {};
1722
1723 blink::mojom::CacheQueryOptionsPtr query_options =
1724 blink::mojom::CacheQueryOptions::New();
1725 query_options->ignore_method = true;
1726 query_options->ignore_vary = true;
1727 DeleteImpl(
1728 std::move(delete_request), std::move(query_options),
1729 base::BindOnce(&LegacyCacheStorageCache::PutDidDeleteEntry,
1730 weak_ptr_factory_.GetWeakPtr(), std::move(put_context)));
1731 }
1732
PutDidDeleteEntry(std::unique_ptr<PutContext> put_context,CacheStorageError error)1733 void LegacyCacheStorageCache::PutDidDeleteEntry(
1734 std::unique_ptr<PutContext> put_context,
1735 CacheStorageError error) {
1736 TRACE_EVENT_WITH_FLOW0("CacheStorage",
1737 "LegacyCacheStorageCache::PutDidDeleteEntry",
1738 TRACE_ID_GLOBAL(put_context->trace_id),
1739 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT);
1740 if (backend_state_ != BACKEND_OPEN) {
1741 PutComplete(
1742 std::move(put_context),
1743 MakeErrorStorage(ErrorStorageType::kPutDidDeleteEntryBackendClosed));
1744 return;
1745 }
1746
1747 if (error != CacheStorageError::kSuccess &&
1748 error != CacheStorageError::kErrorNotFound) {
1749 PutComplete(std::move(put_context), error);
1750 return;
1751 }
1752
1753 const blink::mojom::FetchAPIRequest& request_ = *(put_context->request);
1754 disk_cache::Backend* backend_ptr = backend_.get();
1755
1756 // Create a callback that is copyable, even though it can only be called once.
1757 // BindRepeating() cannot be used directly because |put_context| is not
1758 // copyable.
1759 auto create_entry_callback = base::AdaptCallbackForRepeating(
1760 base::BindOnce(&LegacyCacheStorageCache::PutDidCreateEntry,
1761 weak_ptr_factory_.GetWeakPtr(), std::move(put_context)));
1762
1763 DCHECK(scheduler_->IsRunningExclusiveOperation());
1764 disk_cache::EntryResult result = backend_ptr->OpenOrCreateEntry(
1765 request_.url.spec(), net::MEDIUM, create_entry_callback);
1766
1767 if (result.net_error() != net::ERR_IO_PENDING)
1768 std::move(create_entry_callback).Run(std::move(result));
1769 }
1770
PutDidCreateEntry(std::unique_ptr<PutContext> put_context,disk_cache::EntryResult result)1771 void LegacyCacheStorageCache::PutDidCreateEntry(
1772 std::unique_ptr<PutContext> put_context,
1773 disk_cache::EntryResult result) {
1774 TRACE_EVENT_WITH_FLOW0("CacheStorage",
1775 "LegacyCacheStorageCache::PutDidCreateEntry",
1776 TRACE_ID_GLOBAL(put_context->trace_id),
1777 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT);
1778
1779 int rv = result.net_error();
1780
1781 // Moving the entry into a ScopedWritableEntry which will doom the entry
1782 // before closing unless we tell it that writing has successfully completed
1783 // via WritingCompleted.
1784 put_context->cache_entry.reset(result.ReleaseEntry());
1785
1786 if (rv != net::OK) {
1787 quota_manager_proxy_->NotifyWriteFailed(origin_);
1788 PutComplete(std::move(put_context), CacheStorageError::kErrorExists);
1789 return;
1790 }
1791
1792 proto::CacheMetadata metadata;
1793 metadata.set_entry_time(base::Time::Now().ToInternalValue());
1794 proto::CacheRequest* request_metadata = metadata.mutable_request();
1795 request_metadata->set_method(put_context->request->method);
1796 for (const auto& header : put_context->request->headers) {
1797 DCHECK_EQ(std::string::npos, header.first.find('\0'));
1798 DCHECK_EQ(std::string::npos, header.second.find('\0'));
1799 proto::CacheHeaderMap* header_map = request_metadata->add_headers();
1800 header_map->set_name(header.first);
1801 header_map->set_value(header.second);
1802 }
1803
1804 proto::CacheResponse* response_metadata = metadata.mutable_response();
1805 DCHECK_NE(put_context->response->status_code, net::HTTP_PARTIAL_CONTENT);
1806 response_metadata->set_status_code(put_context->response->status_code);
1807 response_metadata->set_status_text(put_context->response->status_text);
1808 response_metadata->set_response_type(FetchResponseTypeToProtoResponseType(
1809 put_context->response->response_type));
1810 for (const auto& url : put_context->response->url_list)
1811 response_metadata->add_url_list(url.spec());
1812 response_metadata->set_loaded_with_credentials(
1813 put_context->response->loaded_with_credentials);
1814 response_metadata->set_connection_info(
1815 put_context->response->connection_info);
1816 response_metadata->set_alpn_negotiated_protocol(
1817 put_context->response->alpn_negotiated_protocol);
1818 response_metadata->set_was_fetched_via_spdy(
1819 put_context->response->was_fetched_via_spdy);
1820 if (put_context->response->mime_type.has_value())
1821 response_metadata->set_mime_type(put_context->response->mime_type.value());
1822 if (put_context->response->request_method.has_value()) {
1823 response_metadata->set_request_method(
1824 put_context->response->request_method.value());
1825 }
1826 response_metadata->set_response_time(
1827 put_context->response->response_time.ToInternalValue());
1828 for (ResponseHeaderMap::const_iterator it =
1829 put_context->response->headers.begin();
1830 it != put_context->response->headers.end(); ++it) {
1831 DCHECK_EQ(std::string::npos, it->first.find('\0'));
1832 DCHECK_EQ(std::string::npos, it->second.find('\0'));
1833 proto::CacheHeaderMap* header_map = response_metadata->add_headers();
1834 header_map->set_name(it->first);
1835 header_map->set_value(it->second);
1836 }
1837 for (const auto& header : put_context->response->cors_exposed_header_names)
1838 response_metadata->add_cors_exposed_header_names(header);
1839
1840 std::unique_ptr<std::string> serialized(new std::string());
1841 if (!metadata.SerializeToString(serialized.get())) {
1842 PutComplete(
1843 std::move(put_context),
1844 MakeErrorStorage(ErrorStorageType::kMetadataSerializationFailed));
1845 return;
1846 }
1847
1848 scoped_refptr<net::StringIOBuffer> buffer =
1849 base::MakeRefCounted<net::StringIOBuffer>(std::move(serialized));
1850
1851 // Get a temporary copy of the entry pointer before passing it in base::Bind.
1852 disk_cache::Entry* temp_entry_ptr = put_context->cache_entry.get();
1853
1854 // Create a callback that is copyable, even though it can only be called once.
1855 // BindRepeating() cannot be used directly because |put_context| is not
1856 // copyable.
1857 net::CompletionRepeatingCallback write_headers_callback =
1858 base::AdaptCallbackForRepeating(
1859 base::BindOnce(&LegacyCacheStorageCache::PutDidWriteHeaders,
1860 weak_ptr_factory_.GetWeakPtr(), std::move(put_context),
1861 buffer->size()));
1862
1863 DCHECK(scheduler_->IsRunningExclusiveOperation());
1864 rv = temp_entry_ptr->WriteData(INDEX_HEADERS, 0 /* offset */, buffer.get(),
1865 buffer->size(), write_headers_callback,
1866 true /* truncate */);
1867
1868 if (rv != net::ERR_IO_PENDING)
1869 std::move(write_headers_callback).Run(rv);
1870 }
1871
PutDidWriteHeaders(std::unique_ptr<PutContext> put_context,int expected_bytes,int rv)1872 void LegacyCacheStorageCache::PutDidWriteHeaders(
1873 std::unique_ptr<PutContext> put_context,
1874 int expected_bytes,
1875 int rv) {
1876 TRACE_EVENT_WITH_FLOW0("CacheStorage",
1877 "LegacyCacheStorageCache::PutDidWriteHeaders",
1878 TRACE_ID_GLOBAL(put_context->trace_id),
1879 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT);
1880
1881 if (rv != expected_bytes) {
1882 quota_manager_proxy_->NotifyWriteFailed(origin_);
1883 PutComplete(
1884 std::move(put_context),
1885 MakeErrorStorage(ErrorStorageType::kPutDidWriteHeadersWrongBytes));
1886 return;
1887 }
1888
1889 if (ShouldPadResourceSize(*put_context->response)) {
1890 cache_padding_ += CalculateResponsePadding(*put_context->response,
1891 cache_padding_key_.get(),
1892 0 /* side_data_size */);
1893 }
1894
1895 PutWriteBlobToCache(std::move(put_context), INDEX_RESPONSE_BODY);
1896 }
1897
PutWriteBlobToCache(std::unique_ptr<PutContext> put_context,int disk_cache_body_index)1898 void LegacyCacheStorageCache::PutWriteBlobToCache(
1899 std::unique_ptr<PutContext> put_context,
1900 int disk_cache_body_index) {
1901 DCHECK(disk_cache_body_index == INDEX_RESPONSE_BODY ||
1902 disk_cache_body_index == INDEX_SIDE_DATA);
1903
1904 TRACE_EVENT_WITH_FLOW0("CacheStorage",
1905 "LegacyCacheStorageCache::PutWriteBlobToCache",
1906 TRACE_ID_GLOBAL(put_context->trace_id),
1907 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT);
1908
1909 mojo::PendingRemote<blink::mojom::Blob> blob;
1910 int64_t blob_size = 0;
1911
1912 switch (disk_cache_body_index) {
1913 case INDEX_RESPONSE_BODY: {
1914 blob = std::move(put_context->blob);
1915 put_context->blob.reset();
1916 blob_size = put_context->blob_size;
1917 break;
1918 }
1919 case INDEX_SIDE_DATA: {
1920 blob = std::move(put_context->side_data_blob);
1921 put_context->side_data_blob.reset();
1922 blob_size = put_context->side_data_blob_size;
1923 break;
1924 }
1925 case INDEX_HEADERS:
1926 NOTREACHED();
1927 }
1928
1929 ScopedWritableEntry entry(put_context->cache_entry.release());
1930
1931 // If there isn't blob data for this index, then we may need to clear any
1932 // pre-existing data. This can happen under rare circumstances if a stale
1933 // file is present and accepted by OpenOrCreateEntry().
1934 if (!blob) {
1935 disk_cache::Entry* temp_entry_ptr = entry.get();
1936
1937 net::CompletionRepeatingCallback clear_callback =
1938 base::AdaptCallbackForRepeating(base::BindOnce(
1939 &LegacyCacheStorageCache::PutWriteBlobToCacheComplete,
1940 weak_ptr_factory_.GetWeakPtr(), std::move(put_context),
1941 disk_cache_body_index, std::move(entry)));
1942
1943 // If there is no pre-existing data, then proceed to the next
1944 // step immediately.
1945 if (temp_entry_ptr->GetDataSize(disk_cache_body_index) != 0) {
1946 std::move(clear_callback).Run(net::OK);
1947 return;
1948 }
1949
1950 // There is pre-existing data and we need to truncate it.
1951 int rv = temp_entry_ptr->WriteData(
1952 disk_cache_body_index, /* offset = */ 0, /* buf = */ nullptr,
1953 /* buf_len = */ 0, clear_callback, /* truncate = */ true);
1954
1955 if (rv != net::ERR_IO_PENDING)
1956 std::move(clear_callback).Run(rv);
1957
1958 return;
1959 }
1960
1961 // We have real data, so stream it into the entry. This will overwrite
1962 // any existing data.
1963 auto blob_to_cache = std::make_unique<CacheStorageBlobToDiskCache>(
1964 quota_manager_proxy_, origin_);
1965 CacheStorageBlobToDiskCache* blob_to_cache_raw = blob_to_cache.get();
1966 BlobToDiskCacheIDMap::KeyType blob_to_cache_key =
1967 active_blob_to_disk_cache_writers_.Add(std::move(blob_to_cache));
1968
1969 blob_to_cache_raw->StreamBlobToCache(
1970 std::move(entry), disk_cache_body_index, std::move(blob), blob_size,
1971 base::BindOnce(&LegacyCacheStorageCache::PutDidWriteBlobToCache,
1972 weak_ptr_factory_.GetWeakPtr(), std::move(put_context),
1973 blob_to_cache_key, disk_cache_body_index));
1974 }
1975
PutDidWriteBlobToCache(std::unique_ptr<PutContext> put_context,BlobToDiskCacheIDMap::KeyType blob_to_cache_key,int disk_cache_body_index,ScopedWritableEntry entry,bool success)1976 void LegacyCacheStorageCache::PutDidWriteBlobToCache(
1977 std::unique_ptr<PutContext> put_context,
1978 BlobToDiskCacheIDMap::KeyType blob_to_cache_key,
1979 int disk_cache_body_index,
1980 ScopedWritableEntry entry,
1981 bool success) {
1982 DCHECK(entry);
1983 TRACE_EVENT_WITH_FLOW0("CacheStorage",
1984 "LegacyCacheStorageCache::PutDidWriteBlobToCache",
1985 TRACE_ID_GLOBAL(put_context->trace_id),
1986 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT);
1987
1988 active_blob_to_disk_cache_writers_.Remove(blob_to_cache_key);
1989
1990 PutWriteBlobToCacheComplete(std::move(put_context), disk_cache_body_index,
1991 std::move(entry),
1992 (success ? net::OK : net::ERR_FAILED));
1993 }
1994
PutWriteBlobToCacheComplete(std::unique_ptr<PutContext> put_context,int disk_cache_body_index,ScopedWritableEntry entry,int rv)1995 void LegacyCacheStorageCache::PutWriteBlobToCacheComplete(
1996 std::unique_ptr<PutContext> put_context,
1997 int disk_cache_body_index,
1998 ScopedWritableEntry entry,
1999 int rv) {
2000 DCHECK(entry);
2001
2002 put_context->cache_entry = std::move(entry);
2003
2004 if (rv != net::OK) {
2005 PutComplete(
2006 std::move(put_context),
2007 MakeErrorStorage(ErrorStorageType::kPutDidWriteBlobToCacheFailed));
2008 return;
2009 }
2010
2011 if (disk_cache_body_index == INDEX_RESPONSE_BODY) {
2012 PutWriteBlobToCache(std::move(put_context), INDEX_SIDE_DATA);
2013 return;
2014 }
2015
2016 PutComplete(std::move(put_context), CacheStorageError::kSuccess);
2017 }
2018
PutComplete(std::unique_ptr<PutContext> put_context,blink::mojom::CacheStorageError error)2019 void LegacyCacheStorageCache::PutComplete(
2020 std::unique_ptr<PutContext> put_context,
2021 blink::mojom::CacheStorageError error) {
2022 if (error == CacheStorageError::kSuccess) {
2023 // Make sure we've written everything.
2024 DCHECK(put_context->cache_entry);
2025 DCHECK(!put_context->blob);
2026 DCHECK(!put_context->side_data_blob);
2027
2028 // Tell the WritableScopedEntry not to doom the entry since it was a
2029 // successful operation.
2030 put_context->cache_entry.get_deleter().WritingCompleted();
2031 }
2032
2033 UpdateCacheSize(base::BindOnce(std::move(put_context->callback), error));
2034 }
2035
CalculateCacheSizePadding(SizePaddingCallback got_sizes_callback)2036 void LegacyCacheStorageCache::CalculateCacheSizePadding(
2037 SizePaddingCallback got_sizes_callback) {
2038 // Create a callback that is copyable, even though it can only be called once.
2039 // BindRepeating() cannot be used directly because |got_sizes_callback| is not
2040 // copyable.
2041 net::Int64CompletionRepeatingCallback got_size_callback =
2042 base::AdaptCallbackForRepeating(base::BindOnce(
2043 &LegacyCacheStorageCache::CalculateCacheSizePaddingGotSize,
2044 weak_ptr_factory_.GetWeakPtr(), std::move(got_sizes_callback)));
2045
2046 int64_t rv = backend_->CalculateSizeOfAllEntries(got_size_callback);
2047 if (rv != net::ERR_IO_PENDING)
2048 std::move(got_size_callback).Run(rv);
2049 }
2050
CalculateCacheSizePaddingGotSize(SizePaddingCallback callback,int64_t cache_size)2051 void LegacyCacheStorageCache::CalculateCacheSizePaddingGotSize(
2052 SizePaddingCallback callback,
2053 int64_t cache_size) {
2054 // Enumerating entries is only done during cache initialization and only if
2055 // necessary.
2056 DCHECK_EQ(backend_state_, BACKEND_UNINITIALIZED);
2057 auto request = blink::mojom::FetchAPIRequest::New();
2058 blink::mojom::CacheQueryOptionsPtr options =
2059 blink::mojom::CacheQueryOptions::New();
2060 options->ignore_search = true;
2061 QueryCache(std::move(request), std::move(options),
2062 QUERY_CACHE_RESPONSES_NO_BODIES,
2063 CacheStorageSchedulerPriority::kNormal,
2064 base::BindOnce(&LegacyCacheStorageCache::PaddingDidQueryCache,
2065 weak_ptr_factory_.GetWeakPtr(), std::move(callback),
2066 cache_size));
2067 }
2068
PaddingDidQueryCache(SizePaddingCallback callback,int64_t cache_size,CacheStorageError error,std::unique_ptr<QueryCacheResults> query_cache_results)2069 void LegacyCacheStorageCache::PaddingDidQueryCache(
2070 SizePaddingCallback callback,
2071 int64_t cache_size,
2072 CacheStorageError error,
2073 std::unique_ptr<QueryCacheResults> query_cache_results) {
2074 int64_t cache_padding = 0;
2075 if (error == CacheStorageError::kSuccess) {
2076 for (const auto& result : *query_cache_results) {
2077 if (ShouldPadResourceSize(*result.response)) {
2078 int32_t side_data_size =
2079 result.entry ? result.entry->GetDataSize(INDEX_SIDE_DATA) : 0;
2080 cache_padding += CalculateResponsePadding(
2081 *result.response, cache_padding_key_.get(), side_data_size);
2082 }
2083 }
2084 }
2085
2086 std::move(callback).Run(cache_size, cache_padding);
2087 }
2088
CalculateCacheSize(net::Int64CompletionOnceCallback callback)2089 void LegacyCacheStorageCache::CalculateCacheSize(
2090 net::Int64CompletionOnceCallback callback) {
2091 net::Int64CompletionRepeatingCallback got_size_callback =
2092 AdaptCallbackForRepeating(std::move(callback));
2093 int64_t rv = backend_->CalculateSizeOfAllEntries(got_size_callback);
2094 if (rv != net::ERR_IO_PENDING)
2095 got_size_callback.Run(rv);
2096 }
2097
UpdateCacheSize(base::OnceClosure callback)2098 void LegacyCacheStorageCache::UpdateCacheSize(base::OnceClosure callback) {
2099 if (backend_state_ != BACKEND_OPEN)
2100 return;
2101
2102 // Note that the callback holds a cache handle to keep the cache alive during
2103 // the operation since this UpdateCacheSize is often run after an operation
2104 // completes and runs its callback.
2105 CalculateCacheSize(base::AdaptCallbackForRepeating(base::BindOnce(
2106 &LegacyCacheStorageCache::UpdateCacheSizeGotSize,
2107 weak_ptr_factory_.GetWeakPtr(), CreateHandle(), std::move(callback))));
2108 }
2109
UpdateCacheSizeGotSize(CacheStorageCacheHandle cache_handle,base::OnceClosure callback,int64_t current_cache_size)2110 void LegacyCacheStorageCache::UpdateCacheSizeGotSize(
2111 CacheStorageCacheHandle cache_handle,
2112 base::OnceClosure callback,
2113 int64_t current_cache_size) {
2114 DCHECK_NE(current_cache_size, CacheStorage::kSizeUnknown);
2115 cache_size_ = current_cache_size;
2116 int64_t size_delta = PaddedCacheSize() - last_reported_size_;
2117 last_reported_size_ = PaddedCacheSize();
2118
2119 quota_manager_proxy_->NotifyStorageModified(
2120 CacheStorageQuotaClient::GetClientTypeFromOwner(owner_), origin_,
2121 blink::mojom::StorageType::kTemporary, size_delta);
2122
2123 if (cache_storage_)
2124 cache_storage_->NotifyCacheContentChanged(cache_name_);
2125
2126 if (cache_observer_)
2127 cache_observer_->CacheSizeUpdated(this);
2128
2129 std::move(callback).Run();
2130 }
2131
GetAllMatchedEntries(blink::mojom::FetchAPIRequestPtr request,blink::mojom::CacheQueryOptionsPtr options,int64_t trace_id,CacheEntriesCallback callback)2132 void LegacyCacheStorageCache::GetAllMatchedEntries(
2133 blink::mojom::FetchAPIRequestPtr request,
2134 blink::mojom::CacheQueryOptionsPtr options,
2135 int64_t trace_id,
2136 CacheEntriesCallback callback) {
2137 if (backend_state_ == BACKEND_CLOSED) {
2138 std::move(callback).Run(
2139 MakeErrorStorage(ErrorStorageType::kKeysBackendClosed), {});
2140 return;
2141 }
2142
2143 auto id = scheduler_->CreateId();
2144 scheduler_->ScheduleOperation(
2145 id, CacheStorageSchedulerMode::kShared,
2146 CacheStorageSchedulerOp::kGetAllMatched,
2147 CacheStorageSchedulerPriority::kNormal,
2148 base::BindOnce(
2149 &LegacyCacheStorageCache::GetAllMatchedEntriesImpl,
2150 weak_ptr_factory_.GetWeakPtr(), std::move(request),
2151 std::move(options), trace_id,
2152 scheduler_->WrapCallbackToRunNext(id, std::move(callback))));
2153 }
2154
GetAllMatchedEntriesImpl(blink::mojom::FetchAPIRequestPtr request,blink::mojom::CacheQueryOptionsPtr options,int64_t trace_id,CacheEntriesCallback callback)2155 void LegacyCacheStorageCache::GetAllMatchedEntriesImpl(
2156 blink::mojom::FetchAPIRequestPtr request,
2157 blink::mojom::CacheQueryOptionsPtr options,
2158 int64_t trace_id,
2159 CacheEntriesCallback callback) {
2160 DCHECK_NE(BACKEND_UNINITIALIZED, backend_state_);
2161 TRACE_EVENT_WITH_FLOW2("CacheStorage",
2162 "LegacyCacheStorageCache::GetAllMatchedEntriesImpl",
2163 TRACE_ID_GLOBAL(trace_id),
2164 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
2165 "request", CacheStorageTracedValue(request), "options",
2166 CacheStorageTracedValue(options));
2167 if (backend_state_ != BACKEND_OPEN) {
2168 std::move(callback).Run(
2169 MakeErrorStorage(
2170 ErrorStorageType::kStorageGetAllMatchedEntriesBackendClosed),
2171 {});
2172 return;
2173 }
2174
2175 // Hold the cache alive while performing any operation touching the
2176 // disk_cache backend.
2177 callback = WrapCallbackWithHandle(std::move(callback));
2178
2179 QueryCache(
2180 std::move(request), std::move(options),
2181 QUERY_CACHE_REQUESTS | QUERY_CACHE_RESPONSES_WITH_BODIES,
2182 CacheStorageSchedulerPriority::kNormal,
2183 base::BindOnce(
2184 &LegacyCacheStorageCache::GetAllMatchedEntriesDidQueryCache,
2185 weak_ptr_factory_.GetWeakPtr(), trace_id, std::move(callback)));
2186 }
2187
GetAllMatchedEntriesDidQueryCache(int64_t trace_id,CacheEntriesCallback callback,blink::mojom::CacheStorageError error,std::unique_ptr<QueryCacheResults> query_cache_results)2188 void LegacyCacheStorageCache::GetAllMatchedEntriesDidQueryCache(
2189 int64_t trace_id,
2190 CacheEntriesCallback callback,
2191 blink::mojom::CacheStorageError error,
2192 std::unique_ptr<QueryCacheResults> query_cache_results) {
2193 TRACE_EVENT_WITH_FLOW0(
2194 "CacheStorage",
2195 "LegacyCacheStorageCache::GetAllMatchedEntriesDidQueryCache",
2196 TRACE_ID_GLOBAL(trace_id),
2197 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT);
2198
2199 if (error != CacheStorageError::kSuccess) {
2200 std::move(callback).Run(error, {});
2201 return;
2202 }
2203
2204 std::vector<CacheEntry> entries;
2205 entries.reserve(query_cache_results->size());
2206
2207 for (auto& result : *query_cache_results) {
2208 entries.emplace_back(std::move(result.request), std::move(result.response));
2209 }
2210
2211 std::move(callback).Run(CacheStorageError::kSuccess, std::move(entries));
2212 }
2213
GetInitState() const2214 CacheStorageCache::InitState LegacyCacheStorageCache::GetInitState() const {
2215 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
2216 return initializing_ ? InitState::Initializing : InitState::Initialized;
2217 }
2218
Delete(blink::mojom::BatchOperationPtr operation,ErrorCallback callback)2219 void LegacyCacheStorageCache::Delete(blink::mojom::BatchOperationPtr operation,
2220 ErrorCallback callback) {
2221 DCHECK(BACKEND_OPEN == backend_state_ || initializing_);
2222 DCHECK_EQ(blink::mojom::OperationType::kDelete, operation->operation_type);
2223
2224 auto request = blink::mojom::FetchAPIRequest::New();
2225 request->url = operation->request->url;
2226 request->method = operation->request->method;
2227 request->is_reload = operation->request->is_reload;
2228 request->referrer = operation->request->referrer.Clone();
2229 request->headers = operation->request->headers;
2230
2231 auto id = scheduler_->CreateId();
2232 scheduler_->ScheduleOperation(
2233 id, CacheStorageSchedulerMode::kExclusive,
2234 CacheStorageSchedulerOp::kDelete, CacheStorageSchedulerPriority::kNormal,
2235 base::BindOnce(
2236 &LegacyCacheStorageCache::DeleteImpl, weak_ptr_factory_.GetWeakPtr(),
2237 std::move(request), std::move(operation->match_options),
2238 scheduler_->WrapCallbackToRunNext(id, std::move(callback))));
2239 }
2240
DeleteImpl(blink::mojom::FetchAPIRequestPtr request,blink::mojom::CacheQueryOptionsPtr match_options,ErrorCallback callback)2241 void LegacyCacheStorageCache::DeleteImpl(
2242 blink::mojom::FetchAPIRequestPtr request,
2243 blink::mojom::CacheQueryOptionsPtr match_options,
2244 ErrorCallback callback) {
2245 DCHECK_NE(BACKEND_UNINITIALIZED, backend_state_);
2246 if (backend_state_ != BACKEND_OPEN) {
2247 std::move(callback).Run(
2248 MakeErrorStorage(ErrorStorageType::kDeleteImplBackendClosed));
2249 return;
2250 }
2251
2252 // Hold the cache alive while performing any operation touching the
2253 // disk_cache backend.
2254 callback = WrapCallbackWithHandle(std::move(callback));
2255
2256 QueryCache(
2257 std::move(request), std::move(match_options),
2258 QUERY_CACHE_ENTRIES | QUERY_CACHE_RESPONSES_NO_BODIES,
2259 CacheStorageSchedulerPriority::kNormal,
2260 base::BindOnce(&LegacyCacheStorageCache::DeleteDidQueryCache,
2261 weak_ptr_factory_.GetWeakPtr(), std::move(callback)));
2262 }
2263
DeleteDidQueryCache(ErrorCallback callback,CacheStorageError error,std::unique_ptr<QueryCacheResults> query_cache_results)2264 void LegacyCacheStorageCache::DeleteDidQueryCache(
2265 ErrorCallback callback,
2266 CacheStorageError error,
2267 std::unique_ptr<QueryCacheResults> query_cache_results) {
2268 if (error != CacheStorageError::kSuccess) {
2269 std::move(callback).Run(error);
2270 return;
2271 }
2272
2273 if (query_cache_results->empty()) {
2274 std::move(callback).Run(CacheStorageError::kErrorNotFound);
2275 return;
2276 }
2277
2278 DCHECK(scheduler_->IsRunningExclusiveOperation());
2279
2280 for (auto& result : *query_cache_results) {
2281 disk_cache::ScopedEntryPtr entry = std::move(result.entry);
2282 if (ShouldPadResourceSize(*result.response)) {
2283 cache_padding_ -=
2284 CalculateResponsePadding(*result.response, cache_padding_key_.get(),
2285 entry->GetDataSize(INDEX_SIDE_DATA));
2286 }
2287 entry->Doom();
2288 }
2289
2290 UpdateCacheSize(
2291 base::BindOnce(std::move(callback), CacheStorageError::kSuccess));
2292 }
2293
KeysImpl(blink::mojom::FetchAPIRequestPtr request,blink::mojom::CacheQueryOptionsPtr options,int64_t trace_id,RequestsCallback callback)2294 void LegacyCacheStorageCache::KeysImpl(
2295 blink::mojom::FetchAPIRequestPtr request,
2296 blink::mojom::CacheQueryOptionsPtr options,
2297 int64_t trace_id,
2298 RequestsCallback callback) {
2299 DCHECK_NE(BACKEND_UNINITIALIZED, backend_state_);
2300 TRACE_EVENT_WITH_FLOW2("CacheStorage", "LegacyCacheStorageCache::KeysImpl",
2301 TRACE_ID_GLOBAL(trace_id),
2302 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
2303 "request", CacheStorageTracedValue(request), "options",
2304 CacheStorageTracedValue(options));
2305
2306 if (backend_state_ != BACKEND_OPEN) {
2307 std::move(callback).Run(
2308 MakeErrorStorage(ErrorStorageType::kKeysImplBackendClosed), nullptr);
2309 return;
2310 }
2311
2312 // Hold the cache alive while performing any operation touching the
2313 // disk_cache backend.
2314 callback = WrapCallbackWithHandle(std::move(callback));
2315
2316 QueryCache(std::move(request), std::move(options), QUERY_CACHE_REQUESTS,
2317 CacheStorageSchedulerPriority::kNormal,
2318 base::BindOnce(&LegacyCacheStorageCache::KeysDidQueryCache,
2319 weak_ptr_factory_.GetWeakPtr(), std::move(callback),
2320 trace_id));
2321 }
2322
KeysDidQueryCache(RequestsCallback callback,int64_t trace_id,CacheStorageError error,std::unique_ptr<QueryCacheResults> query_cache_results)2323 void LegacyCacheStorageCache::KeysDidQueryCache(
2324 RequestsCallback callback,
2325 int64_t trace_id,
2326 CacheStorageError error,
2327 std::unique_ptr<QueryCacheResults> query_cache_results) {
2328 TRACE_EVENT_WITH_FLOW0("CacheStorage",
2329 "LegacyCacheStorageCache::KeysDidQueryCache",
2330 TRACE_ID_GLOBAL(trace_id),
2331 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT);
2332
2333 if (error != CacheStorageError::kSuccess) {
2334 std::move(callback).Run(error, nullptr);
2335 return;
2336 }
2337
2338 std::unique_ptr<Requests> out_requests = std::make_unique<Requests>();
2339 out_requests->reserve(query_cache_results->size());
2340 for (auto& result : *query_cache_results)
2341 out_requests->push_back(std::move(result.request));
2342 std::move(callback).Run(CacheStorageError::kSuccess, std::move(out_requests));
2343 }
2344
CloseImpl(base::OnceClosure callback)2345 void LegacyCacheStorageCache::CloseImpl(base::OnceClosure callback) {
2346 DCHECK_EQ(BACKEND_OPEN, backend_state_);
2347
2348 DCHECK(scheduler_->IsRunningExclusiveOperation());
2349 backend_.reset();
2350 post_backend_closed_callback_ = std::move(callback);
2351 }
2352
DeleteBackendCompletedIO()2353 void LegacyCacheStorageCache::DeleteBackendCompletedIO() {
2354 if (!post_backend_closed_callback_.is_null()) {
2355 DCHECK_NE(BACKEND_CLOSED, backend_state_);
2356 backend_state_ = BACKEND_CLOSED;
2357 std::move(post_backend_closed_callback_).Run();
2358 }
2359 }
2360
SizeImpl(SizeCallback callback)2361 void LegacyCacheStorageCache::SizeImpl(SizeCallback callback) {
2362 DCHECK_NE(BACKEND_UNINITIALIZED, backend_state_);
2363
2364 // TODO(cmumford): Can CacheStorage::kSizeUnknown be returned instead of zero?
2365 if (backend_state_ != BACKEND_OPEN) {
2366 scheduler_task_runner_->PostTask(FROM_HERE,
2367 base::BindOnce(std::move(callback), 0));
2368 return;
2369 }
2370
2371 int64_t size = backend_state_ == BACKEND_OPEN ? PaddedCacheSize() : 0;
2372 scheduler_task_runner_->PostTask(FROM_HERE,
2373 base::BindOnce(std::move(callback), size));
2374 }
2375
GetSizeThenCloseDidGetSize(SizeCallback callback,int64_t cache_size)2376 void LegacyCacheStorageCache::GetSizeThenCloseDidGetSize(SizeCallback callback,
2377 int64_t cache_size) {
2378 cache_entry_handler_->InvalidateDiskCacheBlobEntrys();
2379 CloseImpl(base::BindOnce(std::move(callback), cache_size));
2380 }
2381
CreateBackend(ErrorCallback callback)2382 void LegacyCacheStorageCache::CreateBackend(ErrorCallback callback) {
2383 DCHECK(!backend_);
2384
2385 // Use APP_CACHE as opposed to DISK_CACHE to prevent cache eviction.
2386 net::CacheType cache_type = memory_only_ ? net::MEMORY_CACHE : net::APP_CACHE;
2387
2388 // The maximum size of each cache. Ultimately, cache size
2389 // is controlled per-origin by the QuotaManager.
2390 int64_t max_bytes = memory_only_ ? std::numeric_limits<int>::max()
2391 : std::numeric_limits<int64_t>::max();
2392
2393 std::unique_ptr<ScopedBackendPtr> backend_ptr(new ScopedBackendPtr());
2394
2395 // Temporary pointer so that backend_ptr can be Pass()'d in Bind below.
2396 ScopedBackendPtr* backend = backend_ptr.get();
2397
2398 // Create a callback that is copyable, even though it can only be called once.
2399 // BindRepeating() cannot be used directly because |callback| and
2400 // |backend_ptr| are not copyable.
2401 net::CompletionRepeatingCallback create_cache_callback =
2402 base::AdaptCallbackForRepeating(
2403 base::BindOnce(&LegacyCacheStorageCache::CreateBackendDidCreate,
2404 weak_ptr_factory_.GetWeakPtr(), std::move(callback),
2405 std::move(backend_ptr)));
2406
2407 DCHECK(scheduler_->IsRunningExclusiveOperation());
2408 int rv = disk_cache::CreateCacheBackend(
2409 cache_type, net::CACHE_BACKEND_SIMPLE, path_, max_bytes,
2410 disk_cache::ResetHandling::kNeverReset, nullptr, backend,
2411 base::BindOnce(&LegacyCacheStorageCache::DeleteBackendCompletedIO,
2412 weak_ptr_factory_.GetWeakPtr()),
2413 create_cache_callback);
2414 if (rv != net::ERR_IO_PENDING)
2415 std::move(create_cache_callback).Run(rv);
2416 }
2417
CreateBackendDidCreate(LegacyCacheStorageCache::ErrorCallback callback,std::unique_ptr<ScopedBackendPtr> backend_ptr,int rv)2418 void LegacyCacheStorageCache::CreateBackendDidCreate(
2419 LegacyCacheStorageCache::ErrorCallback callback,
2420 std::unique_ptr<ScopedBackendPtr> backend_ptr,
2421 int rv) {
2422 if (rv != net::OK) {
2423 std::move(callback).Run(
2424 MakeErrorStorage(ErrorStorageType::kCreateBackendDidCreateFailed));
2425 return;
2426 }
2427
2428 backend_ = std::move(*backend_ptr);
2429 std::move(callback).Run(CacheStorageError::kSuccess);
2430 }
2431
InitBackend()2432 void LegacyCacheStorageCache::InitBackend() {
2433 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
2434 DCHECK_EQ(BACKEND_UNINITIALIZED, backend_state_);
2435 DCHECK(!initializing_);
2436 DCHECK(!scheduler_->ScheduledOperations());
2437 initializing_ = true;
2438
2439 auto id = scheduler_->CreateId();
2440 scheduler_->ScheduleOperation(
2441 id, CacheStorageSchedulerMode::kExclusive, CacheStorageSchedulerOp::kInit,
2442 CacheStorageSchedulerPriority::kNormal,
2443 base::BindOnce(
2444 &LegacyCacheStorageCache::CreateBackend,
2445 weak_ptr_factory_.GetWeakPtr(),
2446 base::BindOnce(
2447 &LegacyCacheStorageCache::InitDidCreateBackend,
2448 weak_ptr_factory_.GetWeakPtr(),
2449 scheduler_->WrapCallbackToRunNext(id, base::DoNothing::Once()))));
2450 }
2451
InitDidCreateBackend(base::OnceClosure callback,CacheStorageError cache_create_error)2452 void LegacyCacheStorageCache::InitDidCreateBackend(
2453 base::OnceClosure callback,
2454 CacheStorageError cache_create_error) {
2455 if (cache_create_error != CacheStorageError::kSuccess) {
2456 InitGotCacheSize(std::move(callback), cache_create_error, 0);
2457 return;
2458 }
2459
2460 auto calculate_size_callback =
2461 base::AdaptCallbackForRepeating(std::move(callback));
2462 int64_t rv = backend_->CalculateSizeOfAllEntries(
2463 base::BindOnce(&LegacyCacheStorageCache::InitGotCacheSize,
2464 weak_ptr_factory_.GetWeakPtr(), calculate_size_callback,
2465 cache_create_error));
2466
2467 if (rv != net::ERR_IO_PENDING)
2468 InitGotCacheSize(std::move(calculate_size_callback), cache_create_error,
2469 rv);
2470 }
2471
InitGotCacheSize(base::OnceClosure callback,CacheStorageError cache_create_error,int64_t cache_size)2472 void LegacyCacheStorageCache::InitGotCacheSize(
2473 base::OnceClosure callback,
2474 CacheStorageError cache_create_error,
2475 int64_t cache_size) {
2476 if (cache_create_error != CacheStorageError::kSuccess) {
2477 InitGotCacheSizeAndPadding(std::move(callback), cache_create_error, 0, 0);
2478 return;
2479 }
2480
2481 // Now that we know the cache size either 1) the cache size should be unknown
2482 // (which is why the size was calculated), or 2) it must match the current
2483 // size. If the sizes aren't equal then there is a bug in how the cache size
2484 // is saved in the store's index.
2485 if (cache_size_ != CacheStorage::kSizeUnknown) {
2486 DLOG_IF(ERROR, cache_size_ != cache_size)
2487 << "Cache size: " << cache_size
2488 << " does not match size from index: " << cache_size_;
2489 if (cache_size_ != cache_size) {
2490 // We assume that if the sizes match then then cached padding is still
2491 // correct. If not then we recalculate the padding.
2492 CalculateCacheSizePaddingGotSize(
2493 base::BindOnce(&LegacyCacheStorageCache::InitGotCacheSizeAndPadding,
2494 weak_ptr_factory_.GetWeakPtr(), std::move(callback),
2495 cache_create_error),
2496 cache_size);
2497 return;
2498 }
2499 }
2500
2501 if (cache_padding_ == CacheStorage::kSizeUnknown || cache_padding_ < 0) {
2502 CalculateCacheSizePaddingGotSize(
2503 base::BindOnce(&LegacyCacheStorageCache::InitGotCacheSizeAndPadding,
2504 weak_ptr_factory_.GetWeakPtr(), std::move(callback),
2505 cache_create_error),
2506 cache_size);
2507 return;
2508 }
2509
2510 // If cached size matches actual size then assume cached padding is still
2511 // correct.
2512 InitGotCacheSizeAndPadding(std::move(callback), cache_create_error,
2513 cache_size, cache_padding_);
2514 }
2515
InitGotCacheSizeAndPadding(base::OnceClosure callback,CacheStorageError cache_create_error,int64_t cache_size,int64_t cache_padding)2516 void LegacyCacheStorageCache::InitGotCacheSizeAndPadding(
2517 base::OnceClosure callback,
2518 CacheStorageError cache_create_error,
2519 int64_t cache_size,
2520 int64_t cache_padding) {
2521 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
2522 cache_size_ = cache_size;
2523 cache_padding_ = cache_padding;
2524
2525 initializing_ = false;
2526 backend_state_ = (cache_create_error == CacheStorageError::kSuccess &&
2527 backend_ && backend_state_ == BACKEND_UNINITIALIZED)
2528 ? BACKEND_OPEN
2529 : BACKEND_CLOSED;
2530
2531 UMA_HISTOGRAM_ENUMERATION("ServiceWorkerCache.InitBackendResult",
2532 cache_create_error);
2533
2534 if (cache_observer_)
2535 cache_observer_->CacheSizeUpdated(this);
2536
2537 std::move(callback).Run();
2538 }
2539
PaddedCacheSize() const2540 int64_t LegacyCacheStorageCache::PaddedCacheSize() const {
2541 DCHECK_NE(BACKEND_UNINITIALIZED, backend_state_);
2542 if (cache_size_ == CacheStorage::kSizeUnknown ||
2543 cache_padding_ == CacheStorage::kSizeUnknown) {
2544 return CacheStorage::kSizeUnknown;
2545 }
2546 return cache_size_ + cache_padding_;
2547 }
2548
2549 base::CheckedNumeric<uint64_t>
CalculateRequiredSafeSpaceForPut(const blink::mojom::BatchOperationPtr & operation)2550 LegacyCacheStorageCache::CalculateRequiredSafeSpaceForPut(
2551 const blink::mojom::BatchOperationPtr& operation) {
2552 DCHECK_EQ(blink::mojom::OperationType::kPut, operation->operation_type);
2553 base::CheckedNumeric<uint64_t> safe_space_required = 0;
2554 safe_space_required +=
2555 CalculateRequiredSafeSpaceForResponse(operation->response);
2556 safe_space_required +=
2557 CalculateRequiredSafeSpaceForRequest(operation->request);
2558
2559 return safe_space_required;
2560 }
2561
2562 base::CheckedNumeric<uint64_t>
CalculateRequiredSafeSpaceForRequest(const blink::mojom::FetchAPIRequestPtr & request)2563 LegacyCacheStorageCache::CalculateRequiredSafeSpaceForRequest(
2564 const blink::mojom::FetchAPIRequestPtr& request) {
2565 base::CheckedNumeric<uint64_t> safe_space_required = 0;
2566 safe_space_required += request->method.size();
2567
2568 safe_space_required += request->url.spec().size();
2569
2570 for (const auto& header : request->headers) {
2571 safe_space_required += header.first.size();
2572 safe_space_required += header.second.size();
2573 }
2574
2575 return safe_space_required;
2576 }
2577
2578 base::CheckedNumeric<uint64_t>
CalculateRequiredSafeSpaceForResponse(const blink::mojom::FetchAPIResponsePtr & response)2579 LegacyCacheStorageCache::CalculateRequiredSafeSpaceForResponse(
2580 const blink::mojom::FetchAPIResponsePtr& response) {
2581 base::CheckedNumeric<uint64_t> safe_space_required = 0;
2582 safe_space_required += (response->blob ? response->blob->size : 0);
2583 safe_space_required += response->status_text.size();
2584
2585 for (const auto& header : response->headers) {
2586 safe_space_required += header.first.size();
2587 safe_space_required += header.second.size();
2588 }
2589
2590 for (const auto& header : response->cors_exposed_header_names) {
2591 safe_space_required += header.size();
2592 }
2593
2594 for (const auto& url : response->url_list) {
2595 safe_space_required += url.spec().size();
2596 }
2597
2598 return safe_space_required;
2599 }
2600
2601 } // namespace content
2602