1 /* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4
5 #include "CacheFile.h"
6
7 #include <algorithm>
8 #include <utility>
9
10 #include "CacheFileChunk.h"
11 #include "CacheFileInputStream.h"
12 #include "CacheFileOutputStream.h"
13 #include "CacheFileUtils.h"
14 #include "CacheIndex.h"
15 #include "CacheLog.h"
16 #include "mozilla/DebugOnly.h"
17 #include "mozilla/Telemetry.h"
18 #include "mozilla/TelemetryHistogramEnums.h"
19 #include "nsComponentManagerUtils.h"
20 #include "nsICacheEntry.h"
21 #include "nsProxyRelease.h"
22 #include "nsThreadUtils.h"
23
24 // When CACHE_CHUNKS is defined we always cache unused chunks in mCacheChunks.
25 // When it is not defined, we always release the chunks ASAP, i.e. we cache
26 // unused chunks only when:
27 // - CacheFile is memory-only
28 // - CacheFile is still waiting for the handle
29 // - the chunk is preloaded
30
31 //#define CACHE_CHUNKS
32
33 namespace mozilla::net {
34
35 class NotifyCacheFileListenerEvent : public Runnable {
36 public:
NotifyCacheFileListenerEvent(CacheFileListener * aCallback,nsresult aResult,bool aIsNew)37 NotifyCacheFileListenerEvent(CacheFileListener* aCallback, nsresult aResult,
38 bool aIsNew)
39 : Runnable("net::NotifyCacheFileListenerEvent"),
40 mCallback(aCallback),
41 mRV(aResult),
42 mIsNew(aIsNew) {
43 LOG(
44 ("NotifyCacheFileListenerEvent::NotifyCacheFileListenerEvent() "
45 "[this=%p]",
46 this));
47 }
48
49 protected:
~NotifyCacheFileListenerEvent()50 ~NotifyCacheFileListenerEvent() {
51 LOG(
52 ("NotifyCacheFileListenerEvent::~NotifyCacheFileListenerEvent() "
53 "[this=%p]",
54 this));
55 }
56
57 public:
Run()58 NS_IMETHOD Run() override {
59 LOG(("NotifyCacheFileListenerEvent::Run() [this=%p]", this));
60
61 mCallback->OnFileReady(mRV, mIsNew);
62 return NS_OK;
63 }
64
65 protected:
66 nsCOMPtr<CacheFileListener> mCallback;
67 nsresult mRV;
68 bool mIsNew;
69 };
70
71 class NotifyChunkListenerEvent : public Runnable {
72 public:
NotifyChunkListenerEvent(CacheFileChunkListener * aCallback,nsresult aResult,uint32_t aChunkIdx,CacheFileChunk * aChunk)73 NotifyChunkListenerEvent(CacheFileChunkListener* aCallback, nsresult aResult,
74 uint32_t aChunkIdx, CacheFileChunk* aChunk)
75 : Runnable("net::NotifyChunkListenerEvent"),
76 mCallback(aCallback),
77 mRV(aResult),
78 mChunkIdx(aChunkIdx),
79 mChunk(aChunk) {
80 LOG(("NotifyChunkListenerEvent::NotifyChunkListenerEvent() [this=%p]",
81 this));
82 }
83
84 protected:
~NotifyChunkListenerEvent()85 ~NotifyChunkListenerEvent() {
86 LOG(("NotifyChunkListenerEvent::~NotifyChunkListenerEvent() [this=%p]",
87 this));
88 }
89
90 public:
Run()91 NS_IMETHOD Run() override {
92 LOG(("NotifyChunkListenerEvent::Run() [this=%p]", this));
93
94 mCallback->OnChunkAvailable(mRV, mChunkIdx, mChunk);
95 return NS_OK;
96 }
97
98 protected:
99 nsCOMPtr<CacheFileChunkListener> mCallback;
100 nsresult mRV;
101 uint32_t mChunkIdx;
102 RefPtr<CacheFileChunk> mChunk;
103 };
104
105 class DoomFileHelper : public CacheFileIOListener {
106 public:
107 NS_DECL_THREADSAFE_ISUPPORTS
108
DoomFileHelper(CacheFileListener * aListener)109 explicit DoomFileHelper(CacheFileListener* aListener)
110 : mListener(aListener) {}
111
OnFileOpened(CacheFileHandle * aHandle,nsresult aResult)112 NS_IMETHOD OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) override {
113 MOZ_CRASH("DoomFileHelper::OnFileOpened should not be called!");
114 return NS_ERROR_UNEXPECTED;
115 }
116
OnDataWritten(CacheFileHandle * aHandle,const char * aBuf,nsresult aResult)117 NS_IMETHOD OnDataWritten(CacheFileHandle* aHandle, const char* aBuf,
118 nsresult aResult) override {
119 MOZ_CRASH("DoomFileHelper::OnDataWritten should not be called!");
120 return NS_ERROR_UNEXPECTED;
121 }
122
OnDataRead(CacheFileHandle * aHandle,char * aBuf,nsresult aResult)123 NS_IMETHOD OnDataRead(CacheFileHandle* aHandle, char* aBuf,
124 nsresult aResult) override {
125 MOZ_CRASH("DoomFileHelper::OnDataRead should not be called!");
126 return NS_ERROR_UNEXPECTED;
127 }
128
OnFileDoomed(CacheFileHandle * aHandle,nsresult aResult)129 NS_IMETHOD OnFileDoomed(CacheFileHandle* aHandle, nsresult aResult) override {
130 if (mListener) mListener->OnFileDoomed(aResult);
131 return NS_OK;
132 }
133
OnEOFSet(CacheFileHandle * aHandle,nsresult aResult)134 NS_IMETHOD OnEOFSet(CacheFileHandle* aHandle, nsresult aResult) override {
135 MOZ_CRASH("DoomFileHelper::OnEOFSet should not be called!");
136 return NS_ERROR_UNEXPECTED;
137 }
138
OnFileRenamed(CacheFileHandle * aHandle,nsresult aResult)139 NS_IMETHOD OnFileRenamed(CacheFileHandle* aHandle,
140 nsresult aResult) override {
141 MOZ_CRASH("DoomFileHelper::OnFileRenamed should not be called!");
142 return NS_ERROR_UNEXPECTED;
143 }
144
145 private:
146 virtual ~DoomFileHelper() = default;
147
148 nsCOMPtr<CacheFileListener> mListener;
149 };
150
NS_IMPL_ISUPPORTS(DoomFileHelper,CacheFileIOListener)151 NS_IMPL_ISUPPORTS(DoomFileHelper, CacheFileIOListener)
152
153 NS_IMPL_ADDREF(CacheFile)
154 NS_IMPL_RELEASE(CacheFile)
155 NS_INTERFACE_MAP_BEGIN(CacheFile)
156 NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileChunkListener)
157 NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileIOListener)
158 NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileMetadataListener)
159 NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports,
160 mozilla::net::CacheFileChunkListener)
161 NS_INTERFACE_MAP_END
162
163 CacheFile::CacheFile() { LOG(("CacheFile::CacheFile() [this=%p]", this)); }
164
~CacheFile()165 CacheFile::~CacheFile() {
166 LOG(("CacheFile::~CacheFile() [this=%p]", this));
167
168 MutexAutoLock lock(mLock);
169 if (!mMemoryOnly && mReady && !mKill) {
170 // mReady flag indicates we have metadata plus in a valid state.
171 WriteMetadataIfNeededLocked(true);
172 }
173 }
174
Init(const nsACString & aKey,bool aCreateNew,bool aMemoryOnly,bool aSkipSizeCheck,bool aPriority,bool aPinned,CacheFileListener * aCallback)175 nsresult CacheFile::Init(const nsACString& aKey, bool aCreateNew,
176 bool aMemoryOnly, bool aSkipSizeCheck, bool aPriority,
177 bool aPinned, CacheFileListener* aCallback) {
178 MOZ_ASSERT(!mListener);
179 MOZ_ASSERT(!mHandle);
180
181 MOZ_ASSERT(!(aMemoryOnly && aPinned));
182
183 nsresult rv;
184
185 mKey = aKey;
186 mOpenAsMemoryOnly = mMemoryOnly = aMemoryOnly;
187 mSkipSizeCheck = aSkipSizeCheck;
188 mPriority = aPriority;
189 mPinned = aPinned;
190
191 // Some consumers (at least nsHTTPCompressConv) assume that Read() can read
192 // such amount of data that was announced by Available().
193 // CacheFileInputStream::Available() uses also preloaded chunks to compute
194 // number of available bytes in the input stream, so we have to make sure the
195 // preloadChunkCount won't change during CacheFile's lifetime since otherwise
196 // we could potentially release some cached chunks that was used to calculate
197 // available bytes but would not be available later during call to
198 // CacheFileInputStream::Read().
199 mPreloadChunkCount = CacheObserver::PreloadChunkCount();
200
201 LOG(
202 ("CacheFile::Init() [this=%p, key=%s, createNew=%d, memoryOnly=%d, "
203 "priority=%d, listener=%p]",
204 this, mKey.get(), aCreateNew, aMemoryOnly, aPriority, aCallback));
205
206 if (mMemoryOnly) {
207 MOZ_ASSERT(!aCallback);
208
209 mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, false, mKey);
210 mReady = true;
211 mDataSize = mMetadata->Offset();
212 return NS_OK;
213 }
214 uint32_t flags;
215 if (aCreateNew) {
216 MOZ_ASSERT(!aCallback);
217 flags = CacheFileIOManager::CREATE_NEW;
218
219 // make sure we can use this entry immediately
220 mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, mPinned, mKey);
221 mReady = true;
222 mDataSize = mMetadata->Offset();
223 } else {
224 flags = CacheFileIOManager::CREATE;
225 }
226
227 if (mPriority) {
228 flags |= CacheFileIOManager::PRIORITY;
229 }
230
231 if (mPinned) {
232 flags |= CacheFileIOManager::PINNED;
233 }
234
235 mOpeningFile = true;
236 mListener = aCallback;
237 rv = CacheFileIOManager::OpenFile(mKey, flags, this);
238 if (NS_FAILED(rv)) {
239 mListener = nullptr;
240 mOpeningFile = false;
241
242 if (mPinned) {
243 LOG(
244 ("CacheFile::Init() - CacheFileIOManager::OpenFile() failed "
245 "but we want to pin, fail the file opening. [this=%p]",
246 this));
247 return NS_ERROR_NOT_AVAILABLE;
248 }
249
250 if (aCreateNew) {
251 NS_WARNING("Forcing memory-only entry since OpenFile failed");
252 LOG(
253 ("CacheFile::Init() - CacheFileIOManager::OpenFile() failed "
254 "synchronously. We can continue in memory-only mode since "
255 "aCreateNew == true. [this=%p]",
256 this));
257
258 mMemoryOnly = true;
259 } else if (rv == NS_ERROR_NOT_INITIALIZED) {
260 NS_WARNING(
261 "Forcing memory-only entry since CacheIOManager isn't "
262 "initialized.");
263 LOG(
264 ("CacheFile::Init() - CacheFileIOManager isn't initialized, "
265 "initializing entry as memory-only. [this=%p]",
266 this));
267
268 mMemoryOnly = true;
269 mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, mPinned, mKey);
270 mReady = true;
271 mDataSize = mMetadata->Offset();
272
273 RefPtr<NotifyCacheFileListenerEvent> ev;
274 ev = new NotifyCacheFileListenerEvent(aCallback, NS_OK, true);
275 rv = NS_DispatchToCurrentThread(ev);
276 NS_ENSURE_SUCCESS(rv, rv);
277 } else {
278 NS_ENSURE_SUCCESS(rv, rv);
279 }
280 }
281
282 return NS_OK;
283 }
284
OnChunkRead(nsresult aResult,CacheFileChunk * aChunk)285 nsresult CacheFile::OnChunkRead(nsresult aResult, CacheFileChunk* aChunk) {
286 CacheFileAutoLock lock(this);
287
288 nsresult rv;
289
290 uint32_t index = aChunk->Index();
291
292 LOG(("CacheFile::OnChunkRead() [this=%p, rv=0x%08" PRIx32
293 ", chunk=%p, idx=%u]",
294 this, static_cast<uint32_t>(aResult), aChunk, index));
295
296 if (aChunk->mDiscardedChunk) {
297 // We discard only unused chunks, so it must be still unused when reading
298 // data finishes.
299 MOZ_ASSERT(aChunk->mRefCnt == 2);
300 aChunk->mActiveChunk = false;
301 ReleaseOutsideLock(
302 RefPtr<CacheFileChunkListener>(std::move(aChunk->mFile)));
303
304 DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk);
305 MOZ_ASSERT(removed);
306 return NS_OK;
307 }
308
309 if (NS_FAILED(aResult)) {
310 SetError(aResult);
311 }
312
313 if (HaveChunkListeners(index)) {
314 rv = NotifyChunkListeners(index, aResult, aChunk);
315 NS_ENSURE_SUCCESS(rv, rv);
316 }
317
318 return NS_OK;
319 }
320
OnChunkWritten(nsresult aResult,CacheFileChunk * aChunk)321 nsresult CacheFile::OnChunkWritten(nsresult aResult, CacheFileChunk* aChunk) {
322 // In case the chunk was reused, made dirty and released between calls to
323 // CacheFileChunk::Write() and CacheFile::OnChunkWritten(), we must write
324 // the chunk to the disk again. When the chunk is unused and is dirty simply
325 // addref and release (outside the lock) the chunk which ensures that
326 // CacheFile::DeactivateChunk() will be called again.
327 RefPtr<CacheFileChunk> deactivateChunkAgain;
328
329 CacheFileAutoLock lock(this);
330
331 nsresult rv;
332
333 LOG(("CacheFile::OnChunkWritten() [this=%p, rv=0x%08" PRIx32
334 ", chunk=%p, idx=%u]",
335 this, static_cast<uint32_t>(aResult), aChunk, aChunk->Index()));
336
337 MOZ_ASSERT(!mMemoryOnly);
338 MOZ_ASSERT(!mOpeningFile);
339 MOZ_ASSERT(mHandle);
340
341 if (aChunk->mDiscardedChunk) {
342 // We discard only unused chunks, so it must be still unused when writing
343 // data finishes.
344 MOZ_ASSERT(aChunk->mRefCnt == 2);
345 aChunk->mActiveChunk = false;
346 ReleaseOutsideLock(
347 RefPtr<CacheFileChunkListener>(std::move(aChunk->mFile)));
348
349 DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk);
350 MOZ_ASSERT(removed);
351 return NS_OK;
352 }
353
354 if (NS_FAILED(aResult)) {
355 SetError(aResult);
356 }
357
358 if (NS_SUCCEEDED(aResult) && !aChunk->IsDirty()) {
359 // update hash value in metadata
360 mMetadata->SetHash(aChunk->Index(), aChunk->Hash());
361 }
362
363 // notify listeners if there is any
364 if (HaveChunkListeners(aChunk->Index())) {
365 // don't release the chunk since there are some listeners queued
366 rv = NotifyChunkListeners(aChunk->Index(), aResult, aChunk);
367 if (NS_SUCCEEDED(rv)) {
368 MOZ_ASSERT(aChunk->mRefCnt != 2);
369 return NS_OK;
370 }
371 }
372
373 if (aChunk->mRefCnt != 2) {
374 LOG(
375 ("CacheFile::OnChunkWritten() - Chunk is still used [this=%p, chunk=%p,"
376 " refcnt=%" PRIuPTR "]",
377 this, aChunk, aChunk->mRefCnt.get()));
378
379 return NS_OK;
380 }
381
382 if (aChunk->IsDirty()) {
383 LOG(
384 ("CacheFile::OnChunkWritten() - Unused chunk is dirty. We must go "
385 "through deactivation again. [this=%p, chunk=%p]",
386 this, aChunk));
387
388 deactivateChunkAgain = aChunk;
389 return NS_OK;
390 }
391
392 bool keepChunk = false;
393 if (NS_SUCCEEDED(aResult)) {
394 keepChunk = ShouldCacheChunk(aChunk->Index());
395 LOG(("CacheFile::OnChunkWritten() - %s unused chunk [this=%p, chunk=%p]",
396 keepChunk ? "Caching" : "Releasing", this, aChunk));
397 } else {
398 LOG(
399 ("CacheFile::OnChunkWritten() - Releasing failed chunk [this=%p, "
400 "chunk=%p]",
401 this, aChunk));
402 }
403
404 RemoveChunkInternal(aChunk, keepChunk);
405
406 WriteMetadataIfNeededLocked();
407
408 return NS_OK;
409 }
410
OnChunkAvailable(nsresult aResult,uint32_t aChunkIdx,CacheFileChunk * aChunk)411 nsresult CacheFile::OnChunkAvailable(nsresult aResult, uint32_t aChunkIdx,
412 CacheFileChunk* aChunk) {
413 MOZ_CRASH("CacheFile::OnChunkAvailable should not be called!");
414 return NS_ERROR_UNEXPECTED;
415 }
416
OnChunkUpdated(CacheFileChunk * aChunk)417 nsresult CacheFile::OnChunkUpdated(CacheFileChunk* aChunk) {
418 MOZ_CRASH("CacheFile::OnChunkUpdated should not be called!");
419 return NS_ERROR_UNEXPECTED;
420 }
421
OnFileOpened(CacheFileHandle * aHandle,nsresult aResult)422 nsresult CacheFile::OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) {
423 // Using an 'auto' class to perform doom or fail the listener
424 // outside the CacheFile's lock.
425 class AutoFailDoomListener {
426 public:
427 explicit AutoFailDoomListener(CacheFileHandle* aHandle)
428 : mHandle(aHandle), mAlreadyDoomed(false) {}
429 ~AutoFailDoomListener() {
430 if (!mListener) return;
431
432 if (mHandle) {
433 if (mAlreadyDoomed) {
434 mListener->OnFileDoomed(mHandle, NS_OK);
435 } else {
436 CacheFileIOManager::DoomFile(mHandle, mListener);
437 }
438 } else {
439 mListener->OnFileDoomed(nullptr, NS_ERROR_NOT_AVAILABLE);
440 }
441 }
442
443 CacheFileHandle* mHandle;
444 nsCOMPtr<CacheFileIOListener> mListener;
445 bool mAlreadyDoomed;
446 } autoDoom(aHandle);
447
448 nsCOMPtr<CacheFileListener> listener;
449 bool isNew = false;
450 nsresult retval = NS_OK;
451
452 {
453 CacheFileAutoLock lock(this);
454
455 MOZ_ASSERT(mOpeningFile);
456 MOZ_ASSERT((NS_SUCCEEDED(aResult) && aHandle) ||
457 (NS_FAILED(aResult) && !aHandle));
458 MOZ_ASSERT((mListener && !mMetadata) || // !createNew
459 (!mListener && mMetadata)); // createNew
460 MOZ_ASSERT(!mMemoryOnly || mMetadata); // memory-only was set on new entry
461
462 LOG(("CacheFile::OnFileOpened() [this=%p, rv=0x%08" PRIx32 ", handle=%p]",
463 this, static_cast<uint32_t>(aResult), aHandle));
464
465 mOpeningFile = false;
466
467 autoDoom.mListener.swap(mDoomAfterOpenListener);
468
469 if (mMemoryOnly) {
470 // We can be here only in case the entry was initilized as createNew and
471 // SetMemoryOnly() was called.
472
473 // Just don't store the handle into mHandle and exit
474 autoDoom.mAlreadyDoomed = true;
475 return NS_OK;
476 }
477
478 if (NS_FAILED(aResult)) {
479 if (mMetadata) {
480 // This entry was initialized as createNew, just switch to memory-only
481 // mode.
482 NS_WARNING("Forcing memory-only entry since OpenFile failed");
483 LOG(
484 ("CacheFile::OnFileOpened() - CacheFileIOManager::OpenFile() "
485 "failed asynchronously. We can continue in memory-only mode since "
486 "aCreateNew == true. [this=%p]",
487 this));
488
489 mMemoryOnly = true;
490 return NS_OK;
491 }
492
493 if (aResult == NS_ERROR_FILE_INVALID_PATH) {
494 // CacheFileIOManager doesn't have mCacheDirectory, switch to
495 // memory-only mode.
496 NS_WARNING(
497 "Forcing memory-only entry since CacheFileIOManager doesn't "
498 "have mCacheDirectory.");
499 LOG(
500 ("CacheFile::OnFileOpened() - CacheFileIOManager doesn't have "
501 "mCacheDirectory, initializing entry as memory-only. [this=%p]",
502 this));
503
504 mMemoryOnly = true;
505 mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, mPinned, mKey);
506 mReady = true;
507 mDataSize = mMetadata->Offset();
508
509 isNew = true;
510 retval = NS_OK;
511 } else {
512 // CacheFileIOManager::OpenFile() failed for another reason.
513 isNew = false;
514 retval = aResult;
515 }
516
517 mListener.swap(listener);
518 } else {
519 mHandle = aHandle;
520 if (NS_FAILED(mStatus)) {
521 CacheFileIOManager::DoomFile(mHandle, nullptr);
522 }
523
524 if (mMetadata) {
525 InitIndexEntry();
526
527 // The entry was initialized as createNew, don't try to read metadata.
528 mMetadata->SetHandle(mHandle);
529
530 // Write all cached chunks, otherwise they may stay unwritten.
531 for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) {
532 uint32_t idx = iter.Key();
533 RefPtr<CacheFileChunk>& chunk = iter.Data();
534
535 LOG(("CacheFile::OnFileOpened() - write [this=%p, idx=%u, chunk=%p]",
536 this, idx, chunk.get()));
537
538 mChunks.InsertOrUpdate(idx, RefPtr{chunk});
539 chunk->mFile = this;
540 chunk->mActiveChunk = true;
541
542 MOZ_ASSERT(chunk->IsReady());
543
544 // This would be cleaner if we had an nsRefPtr constructor that took
545 // a RefPtr<Derived>.
546 ReleaseOutsideLock(std::move(chunk));
547
548 iter.Remove();
549 }
550
551 return NS_OK;
552 }
553 }
554 }
555
556 if (listener) {
557 listener->OnFileReady(retval, isNew);
558 return NS_OK;
559 }
560
561 MOZ_ASSERT(NS_SUCCEEDED(aResult));
562 MOZ_ASSERT(!mMetadata);
563 MOZ_ASSERT(mListener);
564
565 mMetadata = new CacheFileMetadata(mHandle, mKey);
566 mMetadata->ReadMetadata(this);
567 return NS_OK;
568 }
569
OnDataWritten(CacheFileHandle * aHandle,const char * aBuf,nsresult aResult)570 nsresult CacheFile::OnDataWritten(CacheFileHandle* aHandle, const char* aBuf,
571 nsresult aResult) {
572 MOZ_CRASH("CacheFile::OnDataWritten should not be called!");
573 return NS_ERROR_UNEXPECTED;
574 }
575
OnDataRead(CacheFileHandle * aHandle,char * aBuf,nsresult aResult)576 nsresult CacheFile::OnDataRead(CacheFileHandle* aHandle, char* aBuf,
577 nsresult aResult) {
578 MOZ_CRASH("CacheFile::OnDataRead should not be called!");
579 return NS_ERROR_UNEXPECTED;
580 }
581
OnMetadataRead(nsresult aResult)582 nsresult CacheFile::OnMetadataRead(nsresult aResult) {
583 MOZ_ASSERT(mListener);
584
585 LOG(("CacheFile::OnMetadataRead() [this=%p, rv=0x%08" PRIx32 "]", this,
586 static_cast<uint32_t>(aResult)));
587
588 bool isNew = false;
589 if (NS_SUCCEEDED(aResult)) {
590 mPinned = mMetadata->Pinned();
591 mReady = true;
592 mDataSize = mMetadata->Offset();
593 if (mDataSize == 0 && mMetadata->ElementsSize() == 0) {
594 isNew = true;
595 mMetadata->MarkDirty();
596 } else {
597 const char* altData = mMetadata->GetElement(CacheFileUtils::kAltDataKey);
598 if (altData && (NS_FAILED(CacheFileUtils::ParseAlternativeDataInfo(
599 altData, &mAltDataOffset, &mAltDataType)) ||
600 (mAltDataOffset > mDataSize))) {
601 // alt-metadata cannot be parsed or alt-data offset is invalid
602 mMetadata->InitEmptyMetadata();
603 isNew = true;
604 mAltDataOffset = -1;
605 mAltDataType.Truncate();
606 mDataSize = 0;
607 } else {
608 CacheFileAutoLock lock(this);
609 PreloadChunks(0);
610 }
611 }
612
613 InitIndexEntry();
614 }
615
616 nsCOMPtr<CacheFileListener> listener;
617 mListener.swap(listener);
618 listener->OnFileReady(aResult, isNew);
619 return NS_OK;
620 }
621
OnMetadataWritten(nsresult aResult)622 nsresult CacheFile::OnMetadataWritten(nsresult aResult) {
623 CacheFileAutoLock lock(this);
624
625 LOG(("CacheFile::OnMetadataWritten() [this=%p, rv=0x%08" PRIx32 "]", this,
626 static_cast<uint32_t>(aResult)));
627
628 MOZ_ASSERT(mWritingMetadata);
629 mWritingMetadata = false;
630
631 MOZ_ASSERT(!mMemoryOnly);
632 MOZ_ASSERT(!mOpeningFile);
633
634 if (NS_WARN_IF(NS_FAILED(aResult))) {
635 // TODO close streams with an error ???
636 SetError(aResult);
637 }
638
639 if (mOutput || mInputs.Length() || mChunks.Count()) return NS_OK;
640
641 if (IsDirty()) WriteMetadataIfNeededLocked();
642
643 if (!mWritingMetadata) {
644 LOG(("CacheFile::OnMetadataWritten() - Releasing file handle [this=%p]",
645 this));
646 CacheFileIOManager::ReleaseNSPRHandle(mHandle);
647 }
648
649 return NS_OK;
650 }
651
OnFileDoomed(CacheFileHandle * aHandle,nsresult aResult)652 nsresult CacheFile::OnFileDoomed(CacheFileHandle* aHandle, nsresult aResult) {
653 nsCOMPtr<CacheFileListener> listener;
654
655 {
656 CacheFileAutoLock lock(this);
657
658 MOZ_ASSERT(mListener);
659
660 LOG(("CacheFile::OnFileDoomed() [this=%p, rv=0x%08" PRIx32 ", handle=%p]",
661 this, static_cast<uint32_t>(aResult), aHandle));
662
663 mListener.swap(listener);
664 }
665
666 listener->OnFileDoomed(aResult);
667 return NS_OK;
668 }
669
OnEOFSet(CacheFileHandle * aHandle,nsresult aResult)670 nsresult CacheFile::OnEOFSet(CacheFileHandle* aHandle, nsresult aResult) {
671 MOZ_CRASH("CacheFile::OnEOFSet should not be called!");
672 return NS_ERROR_UNEXPECTED;
673 }
674
OnFileRenamed(CacheFileHandle * aHandle,nsresult aResult)675 nsresult CacheFile::OnFileRenamed(CacheFileHandle* aHandle, nsresult aResult) {
676 MOZ_CRASH("CacheFile::OnFileRenamed should not be called!");
677 return NS_ERROR_UNEXPECTED;
678 }
679
IsKilled()680 bool CacheFile::IsKilled() {
681 bool killed = mKill;
682 if (killed) {
683 LOG(("CacheFile is killed, this=%p", this));
684 }
685
686 return killed;
687 }
688
OpenInputStream(nsICacheEntry * aEntryHandle,nsIInputStream ** _retval)689 nsresult CacheFile::OpenInputStream(nsICacheEntry* aEntryHandle,
690 nsIInputStream** _retval) {
691 CacheFileAutoLock lock(this);
692
693 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
694
695 if (!mReady) {
696 LOG(("CacheFile::OpenInputStream() - CacheFile is not ready [this=%p]",
697 this));
698
699 return NS_ERROR_NOT_AVAILABLE;
700 }
701
702 if (NS_FAILED(mStatus)) {
703 LOG(
704 ("CacheFile::OpenInputStream() - CacheFile is in a failure state "
705 "[this=%p, status=0x%08" PRIx32 "]",
706 this, static_cast<uint32_t>(mStatus)));
707
708 // Don't allow opening the input stream when this CacheFile is in
709 // a failed state. This is the only way to protect consumers correctly
710 // from reading a broken entry. When the file is in the failed state,
711 // it's also doomed, so reopening the entry won't make any difference -
712 // data will still be inaccessible anymore. Note that for just doomed
713 // files, we must allow reading the data.
714 return mStatus;
715 }
716
717 // Once we open input stream we no longer allow preloading of chunks without
718 // input stream, i.e. we will no longer keep first few chunks preloaded when
719 // the last input stream is closed.
720 mPreloadWithoutInputStreams = false;
721
722 CacheFileInputStream* input =
723 new CacheFileInputStream(this, aEntryHandle, false);
724 LOG(("CacheFile::OpenInputStream() - Creating new input stream %p [this=%p]",
725 input, this));
726
727 mInputs.AppendElement(input);
728 NS_ADDREF(input);
729
730 mDataAccessed = true;
731 *_retval = do_AddRef(input).take();
732 return NS_OK;
733 }
734
OpenAlternativeInputStream(nsICacheEntry * aEntryHandle,const char * aAltDataType,nsIInputStream ** _retval)735 nsresult CacheFile::OpenAlternativeInputStream(nsICacheEntry* aEntryHandle,
736 const char* aAltDataType,
737 nsIInputStream** _retval) {
738 CacheFileAutoLock lock(this);
739
740 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
741
742 if (NS_WARN_IF(!mReady)) {
743 LOG(
744 ("CacheFile::OpenAlternativeInputStream() - CacheFile is not ready "
745 "[this=%p]",
746 this));
747 return NS_ERROR_NOT_AVAILABLE;
748 }
749
750 if (mAltDataOffset == -1) {
751 LOG(
752 ("CacheFile::OpenAlternativeInputStream() - Alternative data is not "
753 "available [this=%p]",
754 this));
755 return NS_ERROR_NOT_AVAILABLE;
756 }
757
758 if (NS_FAILED(mStatus)) {
759 LOG(
760 ("CacheFile::OpenAlternativeInputStream() - CacheFile is in a failure "
761 "state [this=%p, status=0x%08" PRIx32 "]",
762 this, static_cast<uint32_t>(mStatus)));
763
764 // Don't allow opening the input stream when this CacheFile is in
765 // a failed state. This is the only way to protect consumers correctly
766 // from reading a broken entry. When the file is in the failed state,
767 // it's also doomed, so reopening the entry won't make any difference -
768 // data will still be inaccessible anymore. Note that for just doomed
769 // files, we must allow reading the data.
770 return mStatus;
771 }
772
773 if (mAltDataType != aAltDataType) {
774 LOG(
775 ("CacheFile::OpenAlternativeInputStream() - Alternative data is of a "
776 "different type than requested [this=%p, availableType=%s, "
777 "requestedType=%s]",
778 this, mAltDataType.get(), aAltDataType));
779 return NS_ERROR_NOT_AVAILABLE;
780 }
781
782 // Once we open input stream we no longer allow preloading of chunks without
783 // input stream, i.e. we will no longer keep first few chunks preloaded when
784 // the last input stream is closed.
785 mPreloadWithoutInputStreams = false;
786
787 CacheFileInputStream* input =
788 new CacheFileInputStream(this, aEntryHandle, true);
789
790 LOG(
791 ("CacheFile::OpenAlternativeInputStream() - Creating new input stream %p "
792 "[this=%p]",
793 input, this));
794
795 mInputs.AppendElement(input);
796 NS_ADDREF(input);
797
798 mDataAccessed = true;
799 *_retval = do_AddRef(input).take();
800
801 return NS_OK;
802 }
803
OpenOutputStream(CacheOutputCloseListener * aCloseListener,nsIOutputStream ** _retval)804 nsresult CacheFile::OpenOutputStream(CacheOutputCloseListener* aCloseListener,
805 nsIOutputStream** _retval) {
806 CacheFileAutoLock lock(this);
807
808 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
809
810 nsresult rv;
811
812 if (!mReady) {
813 LOG(("CacheFile::OpenOutputStream() - CacheFile is not ready [this=%p]",
814 this));
815
816 return NS_ERROR_NOT_AVAILABLE;
817 }
818
819 if (mOutput) {
820 LOG(
821 ("CacheFile::OpenOutputStream() - We already have output stream %p "
822 "[this=%p]",
823 mOutput, this));
824
825 return NS_ERROR_NOT_AVAILABLE;
826 }
827
828 if (NS_FAILED(mStatus)) {
829 LOG(
830 ("CacheFile::OpenOutputStream() - CacheFile is in a failure state "
831 "[this=%p, status=0x%08" PRIx32 "]",
832 this, static_cast<uint32_t>(mStatus)));
833
834 // The CacheFile is already doomed. It make no sense to allow to write any
835 // data to such entry.
836 return mStatus;
837 }
838
839 // Fail if there is any input stream opened for alternative data
840 for (uint32_t i = 0; i < mInputs.Length(); ++i) {
841 if (mInputs[i]->IsAlternativeData()) {
842 return NS_ERROR_NOT_AVAILABLE;
843 }
844 }
845
846 if (mAltDataOffset != -1) {
847 // Remove alt-data
848 rv = Truncate(mAltDataOffset);
849 if (NS_FAILED(rv)) {
850 LOG(
851 ("CacheFile::OpenOutputStream() - Truncating alt-data failed "
852 "[rv=0x%08" PRIx32 "]",
853 static_cast<uint32_t>(rv)));
854 return rv;
855 }
856 SetAltMetadata(nullptr);
857 mAltDataOffset = -1;
858 mAltDataType.Truncate();
859 }
860
861 // Once we open output stream we no longer allow preloading of chunks without
862 // input stream. There is no reason to believe that some input stream will be
863 // opened soon. Otherwise we would cache unused chunks of all newly created
864 // entries until the CacheFile is destroyed.
865 mPreloadWithoutInputStreams = false;
866
867 mOutput = new CacheFileOutputStream(this, aCloseListener, false);
868
869 LOG(
870 ("CacheFile::OpenOutputStream() - Creating new output stream %p "
871 "[this=%p]",
872 mOutput, this));
873
874 mDataAccessed = true;
875 *_retval = do_AddRef(mOutput).take();
876 return NS_OK;
877 }
878
OpenAlternativeOutputStream(CacheOutputCloseListener * aCloseListener,const char * aAltDataType,nsIAsyncOutputStream ** _retval)879 nsresult CacheFile::OpenAlternativeOutputStream(
880 CacheOutputCloseListener* aCloseListener, const char* aAltDataType,
881 nsIAsyncOutputStream** _retval) {
882 CacheFileAutoLock lock(this);
883
884 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
885
886 if (!mReady) {
887 LOG(
888 ("CacheFile::OpenAlternativeOutputStream() - CacheFile is not ready "
889 "[this=%p]",
890 this));
891
892 return NS_ERROR_NOT_AVAILABLE;
893 }
894
895 if (mOutput) {
896 LOG(
897 ("CacheFile::OpenAlternativeOutputStream() - We already have output "
898 "stream %p [this=%p]",
899 mOutput, this));
900
901 return NS_ERROR_NOT_AVAILABLE;
902 }
903
904 if (NS_FAILED(mStatus)) {
905 LOG(
906 ("CacheFile::OpenAlternativeOutputStream() - CacheFile is in a failure "
907 "state [this=%p, status=0x%08" PRIx32 "]",
908 this, static_cast<uint32_t>(mStatus)));
909
910 // The CacheFile is already doomed. It make no sense to allow to write any
911 // data to such entry.
912 return mStatus;
913 }
914
915 // Fail if there is any input stream opened for alternative data
916 for (uint32_t i = 0; i < mInputs.Length(); ++i) {
917 if (mInputs[i]->IsAlternativeData()) {
918 return NS_ERROR_NOT_AVAILABLE;
919 }
920 }
921
922 nsresult rv;
923
924 if (mAltDataOffset != -1) {
925 // Truncate old alt-data
926 rv = Truncate(mAltDataOffset);
927 if (NS_FAILED(rv)) {
928 LOG(
929 ("CacheFile::OpenAlternativeOutputStream() - Truncating old alt-data "
930 "failed [rv=0x%08" PRIx32 "]",
931 static_cast<uint32_t>(rv)));
932 return rv;
933 }
934 } else {
935 mAltDataOffset = mDataSize;
936 }
937
938 nsAutoCString altMetadata;
939 CacheFileUtils::BuildAlternativeDataInfo(aAltDataType, mAltDataOffset,
940 altMetadata);
941 rv = SetAltMetadata(altMetadata.get());
942 if (NS_FAILED(rv)) {
943 LOG(
944 ("CacheFile::OpenAlternativeOutputStream() - Set Metadata for alt-data"
945 "failed [rv=0x%08" PRIx32 "]",
946 static_cast<uint32_t>(rv)));
947 return rv;
948 }
949
950 // Once we open output stream we no longer allow preloading of chunks without
951 // input stream. There is no reason to believe that some input stream will be
952 // opened soon. Otherwise we would cache unused chunks of all newly created
953 // entries until the CacheFile is destroyed.
954 mPreloadWithoutInputStreams = false;
955
956 mOutput = new CacheFileOutputStream(this, aCloseListener, true);
957
958 LOG(
959 ("CacheFile::OpenAlternativeOutputStream() - Creating new output stream "
960 "%p [this=%p]",
961 mOutput, this));
962
963 mDataAccessed = true;
964 mAltDataType = aAltDataType;
965 *_retval = do_AddRef(mOutput).take();
966 return NS_OK;
967 }
968
SetMemoryOnly()969 nsresult CacheFile::SetMemoryOnly() {
970 CacheFileAutoLock lock(this);
971
972 LOG(("CacheFile::SetMemoryOnly() mMemoryOnly=%d [this=%p]", mMemoryOnly,
973 this));
974
975 if (mMemoryOnly) return NS_OK;
976
977 MOZ_ASSERT(mReady);
978
979 if (!mReady) {
980 LOG(("CacheFile::SetMemoryOnly() - CacheFile is not ready [this=%p]",
981 this));
982
983 return NS_ERROR_NOT_AVAILABLE;
984 }
985
986 if (mDataAccessed) {
987 LOG(("CacheFile::SetMemoryOnly() - Data was already accessed [this=%p]",
988 this));
989 return NS_ERROR_NOT_AVAILABLE;
990 }
991
992 // TODO what to do when this isn't a new entry and has an existing metadata???
993 mMemoryOnly = true;
994 return NS_OK;
995 }
996
Doom(CacheFileListener * aCallback)997 nsresult CacheFile::Doom(CacheFileListener* aCallback) {
998 LOG(("CacheFile::Doom() [this=%p, listener=%p]", this, aCallback));
999
1000 CacheFileAutoLock lock(this);
1001
1002 return DoomLocked(aCallback);
1003 }
1004
DoomLocked(CacheFileListener * aCallback)1005 nsresult CacheFile::DoomLocked(CacheFileListener* aCallback) {
1006 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
1007
1008 LOG(("CacheFile::DoomLocked() [this=%p, listener=%p]", this, aCallback));
1009
1010 nsresult rv = NS_OK;
1011
1012 if (mMemoryOnly) {
1013 return NS_ERROR_FILE_NOT_FOUND;
1014 }
1015
1016 if (mHandle && mHandle->IsDoomed()) {
1017 return NS_ERROR_FILE_NOT_FOUND;
1018 }
1019
1020 nsCOMPtr<CacheFileIOListener> listener;
1021 if (aCallback || !mHandle) {
1022 listener = new DoomFileHelper(aCallback);
1023 }
1024 if (mHandle) {
1025 rv = CacheFileIOManager::DoomFile(mHandle, listener);
1026 } else if (mOpeningFile) {
1027 mDoomAfterOpenListener = listener;
1028 }
1029
1030 return rv;
1031 }
1032
ThrowMemoryCachedData()1033 nsresult CacheFile::ThrowMemoryCachedData() {
1034 CacheFileAutoLock lock(this);
1035
1036 LOG(("CacheFile::ThrowMemoryCachedData() [this=%p]", this));
1037
1038 if (mMemoryOnly) {
1039 // This method should not be called when the CacheFile was initialized as
1040 // memory-only, but it can be called when CacheFile end up as memory-only
1041 // due to e.g. IO failure since CacheEntry doesn't know it.
1042 LOG(
1043 ("CacheFile::ThrowMemoryCachedData() - Ignoring request because the "
1044 "entry is memory-only. [this=%p]",
1045 this));
1046
1047 return NS_ERROR_NOT_AVAILABLE;
1048 }
1049
1050 if (mOpeningFile) {
1051 // mayhemer, note: we shouldn't get here, since CacheEntry prevents loading
1052 // entries from being purged.
1053
1054 LOG(
1055 ("CacheFile::ThrowMemoryCachedData() - Ignoring request because the "
1056 "entry is still opening the file [this=%p]",
1057 this));
1058
1059 return NS_ERROR_ABORT;
1060 }
1061
1062 // We cannot release all cached chunks since we need to keep preloaded chunks
1063 // in memory. See initialization of mPreloadChunkCount for explanation.
1064 CleanUpCachedChunks();
1065
1066 return NS_OK;
1067 }
1068
GetElement(const char * aKey,char ** _retval)1069 nsresult CacheFile::GetElement(const char* aKey, char** _retval) {
1070 CacheFileAutoLock lock(this);
1071 MOZ_ASSERT(mMetadata);
1072 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1073
1074 const char* value;
1075 value = mMetadata->GetElement(aKey);
1076 if (!value) return NS_ERROR_NOT_AVAILABLE;
1077
1078 *_retval = NS_xstrdup(value);
1079 return NS_OK;
1080 }
1081
SetElement(const char * aKey,const char * aValue)1082 nsresult CacheFile::SetElement(const char* aKey, const char* aValue) {
1083 CacheFileAutoLock lock(this);
1084
1085 LOG(("CacheFile::SetElement() this=%p", this));
1086
1087 MOZ_ASSERT(mMetadata);
1088 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1089
1090 if (!strcmp(aKey, CacheFileUtils::kAltDataKey)) {
1091 NS_ERROR(
1092 "alt-data element is reserved for internal use and must not be "
1093 "changed via CacheFile::SetElement()");
1094 return NS_ERROR_FAILURE;
1095 }
1096
1097 PostWriteTimer();
1098 return mMetadata->SetElement(aKey, aValue);
1099 }
1100
VisitMetaData(nsICacheEntryMetaDataVisitor * aVisitor)1101 nsresult CacheFile::VisitMetaData(nsICacheEntryMetaDataVisitor* aVisitor) {
1102 CacheFileAutoLock lock(this);
1103 MOZ_ASSERT(mMetadata);
1104 MOZ_ASSERT(mReady);
1105 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1106
1107 mMetadata->Visit(aVisitor);
1108 return NS_OK;
1109 }
1110
ElementsSize(uint32_t * _retval)1111 nsresult CacheFile::ElementsSize(uint32_t* _retval) {
1112 CacheFileAutoLock lock(this);
1113
1114 if (!mMetadata) return NS_ERROR_NOT_AVAILABLE;
1115
1116 *_retval = mMetadata->ElementsSize();
1117 return NS_OK;
1118 }
1119
SetExpirationTime(uint32_t aExpirationTime)1120 nsresult CacheFile::SetExpirationTime(uint32_t aExpirationTime) {
1121 CacheFileAutoLock lock(this);
1122
1123 LOG(("CacheFile::SetExpirationTime() this=%p, expiration=%u", this,
1124 aExpirationTime));
1125
1126 MOZ_ASSERT(mMetadata);
1127 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1128
1129 PostWriteTimer();
1130 mMetadata->SetExpirationTime(aExpirationTime);
1131 return NS_OK;
1132 }
1133
GetExpirationTime(uint32_t * _retval)1134 nsresult CacheFile::GetExpirationTime(uint32_t* _retval) {
1135 CacheFileAutoLock lock(this);
1136 MOZ_ASSERT(mMetadata);
1137 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1138
1139 *_retval = mMetadata->GetExpirationTime();
1140 return NS_OK;
1141 }
1142
SetFrecency(uint32_t aFrecency)1143 nsresult CacheFile::SetFrecency(uint32_t aFrecency) {
1144 CacheFileAutoLock lock(this);
1145
1146 LOG(("CacheFile::SetFrecency() this=%p, frecency=%u", this, aFrecency));
1147
1148 MOZ_ASSERT(mMetadata);
1149 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1150
1151 PostWriteTimer();
1152
1153 if (mHandle && !mHandle->IsDoomed()) {
1154 CacheFileIOManager::UpdateIndexEntry(mHandle, &aFrecency, nullptr, nullptr,
1155 nullptr, nullptr);
1156 }
1157
1158 mMetadata->SetFrecency(aFrecency);
1159 return NS_OK;
1160 }
1161
GetFrecency(uint32_t * _retval)1162 nsresult CacheFile::GetFrecency(uint32_t* _retval) {
1163 CacheFileAutoLock lock(this);
1164 MOZ_ASSERT(mMetadata);
1165 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1166 *_retval = mMetadata->GetFrecency();
1167 return NS_OK;
1168 }
1169
SetNetworkTimes(uint64_t aOnStartTime,uint64_t aOnStopTime)1170 nsresult CacheFile::SetNetworkTimes(uint64_t aOnStartTime,
1171 uint64_t aOnStopTime) {
1172 CacheFileAutoLock lock(this);
1173
1174 LOG(("CacheFile::SetNetworkTimes() this=%p, aOnStartTime=%" PRIu64
1175 ", aOnStopTime=%" PRIu64 "",
1176 this, aOnStartTime, aOnStopTime));
1177
1178 MOZ_ASSERT(mMetadata);
1179 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1180
1181 PostWriteTimer();
1182
1183 nsAutoCString onStartTime;
1184 onStartTime.AppendInt(aOnStartTime);
1185 nsresult rv =
1186 mMetadata->SetElement("net-response-time-onstart", onStartTime.get());
1187 if (NS_WARN_IF(NS_FAILED(rv))) {
1188 return rv;
1189 }
1190
1191 nsAutoCString onStopTime;
1192 onStopTime.AppendInt(aOnStopTime);
1193 rv = mMetadata->SetElement("net-response-time-onstop", onStopTime.get());
1194 if (NS_WARN_IF(NS_FAILED(rv))) {
1195 return rv;
1196 }
1197
1198 uint16_t onStartTime16 = aOnStartTime <= kIndexTimeOutOfBound
1199 ? aOnStartTime
1200 : kIndexTimeOutOfBound;
1201 uint16_t onStopTime16 =
1202 aOnStopTime <= kIndexTimeOutOfBound ? aOnStopTime : kIndexTimeOutOfBound;
1203
1204 if (mHandle && !mHandle->IsDoomed()) {
1205 CacheFileIOManager::UpdateIndexEntry(
1206 mHandle, nullptr, nullptr, &onStartTime16, &onStopTime16, nullptr);
1207 }
1208 return NS_OK;
1209 }
1210
GetOnStartTime(uint64_t * _retval)1211 nsresult CacheFile::GetOnStartTime(uint64_t* _retval) {
1212 CacheFileAutoLock lock(this);
1213
1214 MOZ_ASSERT(mMetadata);
1215 const char* onStartTimeStr =
1216 mMetadata->GetElement("net-response-time-onstart");
1217 if (!onStartTimeStr) {
1218 return NS_ERROR_NOT_AVAILABLE;
1219 }
1220 nsresult rv;
1221 *_retval = nsDependentCString(onStartTimeStr).ToInteger64(&rv);
1222 MOZ_ASSERT(NS_SUCCEEDED(rv));
1223 return NS_OK;
1224 }
1225
GetOnStopTime(uint64_t * _retval)1226 nsresult CacheFile::GetOnStopTime(uint64_t* _retval) {
1227 CacheFileAutoLock lock(this);
1228
1229 MOZ_ASSERT(mMetadata);
1230 const char* onStopTimeStr = mMetadata->GetElement("net-response-time-onstop");
1231 if (!onStopTimeStr) {
1232 return NS_ERROR_NOT_AVAILABLE;
1233 }
1234 nsresult rv;
1235 *_retval = nsDependentCString(onStopTimeStr).ToInteger64(&rv);
1236 MOZ_ASSERT(NS_SUCCEEDED(rv));
1237 return NS_OK;
1238 }
1239
SetContentType(uint8_t aContentType)1240 nsresult CacheFile::SetContentType(uint8_t aContentType) {
1241 CacheFileAutoLock lock(this);
1242
1243 LOG(("CacheFile::SetContentType() this=%p, contentType=%u", this,
1244 aContentType));
1245
1246 MOZ_ASSERT(mMetadata);
1247 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1248
1249 PostWriteTimer();
1250
1251 // Save the content type to metadata for case we need to rebuild the index.
1252 nsAutoCString contentType;
1253 contentType.AppendInt(aContentType);
1254 nsresult rv = mMetadata->SetElement("ctid", contentType.get());
1255 if (NS_WARN_IF(NS_FAILED(rv))) {
1256 return rv;
1257 }
1258
1259 if (mHandle && !mHandle->IsDoomed()) {
1260 CacheFileIOManager::UpdateIndexEntry(mHandle, nullptr, nullptr, nullptr,
1261 nullptr, &aContentType);
1262 }
1263 return NS_OK;
1264 }
1265
SetAltMetadata(const char * aAltMetadata)1266 nsresult CacheFile::SetAltMetadata(const char* aAltMetadata) {
1267 AssertOwnsLock();
1268 LOG(("CacheFile::SetAltMetadata() this=%p, aAltMetadata=%s", this,
1269 aAltMetadata ? aAltMetadata : ""));
1270
1271 MOZ_ASSERT(mMetadata);
1272 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1273
1274 PostWriteTimer();
1275
1276 nsresult rv =
1277 mMetadata->SetElement(CacheFileUtils::kAltDataKey, aAltMetadata);
1278
1279 bool hasAltData = !!aAltMetadata;
1280
1281 if (NS_FAILED(rv)) {
1282 // Removing element shouldn't fail because it doesn't allocate memory.
1283 mMetadata->SetElement(CacheFileUtils::kAltDataKey, nullptr);
1284
1285 mAltDataOffset = -1;
1286 mAltDataType.Truncate();
1287 hasAltData = false;
1288 }
1289
1290 if (mHandle && !mHandle->IsDoomed()) {
1291 CacheFileIOManager::UpdateIndexEntry(mHandle, nullptr, &hasAltData, nullptr,
1292 nullptr, nullptr);
1293 }
1294 return rv;
1295 }
1296
GetLastModified(uint32_t * _retval)1297 nsresult CacheFile::GetLastModified(uint32_t* _retval) {
1298 CacheFileAutoLock lock(this);
1299 MOZ_ASSERT(mMetadata);
1300 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1301
1302 *_retval = mMetadata->GetLastModified();
1303 return NS_OK;
1304 }
1305
GetLastFetched(uint32_t * _retval)1306 nsresult CacheFile::GetLastFetched(uint32_t* _retval) {
1307 CacheFileAutoLock lock(this);
1308 MOZ_ASSERT(mMetadata);
1309 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1310
1311 *_retval = mMetadata->GetLastFetched();
1312 return NS_OK;
1313 }
1314
GetFetchCount(uint32_t * _retval)1315 nsresult CacheFile::GetFetchCount(uint32_t* _retval) {
1316 CacheFileAutoLock lock(this);
1317 MOZ_ASSERT(mMetadata);
1318 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1319 *_retval = mMetadata->GetFetchCount();
1320 return NS_OK;
1321 }
1322
GetDiskStorageSizeInKB(uint32_t * aDiskStorageSize)1323 nsresult CacheFile::GetDiskStorageSizeInKB(uint32_t* aDiskStorageSize) {
1324 if (!mHandle) {
1325 return NS_ERROR_NOT_AVAILABLE;
1326 }
1327
1328 *aDiskStorageSize = mHandle->FileSizeInK();
1329 return NS_OK;
1330 }
1331
OnFetched()1332 nsresult CacheFile::OnFetched() {
1333 CacheFileAutoLock lock(this);
1334
1335 LOG(("CacheFile::OnFetched() this=%p", this));
1336
1337 MOZ_ASSERT(mMetadata);
1338 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1339
1340 PostWriteTimer();
1341
1342 mMetadata->OnFetched();
1343 return NS_OK;
1344 }
1345
Lock()1346 void CacheFile::Lock() { mLock.Lock(); }
1347
Unlock()1348 void CacheFile::Unlock() {
1349 // move the elements out of mObjsToRelease
1350 // so that they can be released after we unlock
1351 nsTArray<RefPtr<nsISupports>> objs = std::move(mObjsToRelease);
1352
1353 mLock.Unlock();
1354 }
1355
AssertOwnsLock() const1356 void CacheFile::AssertOwnsLock() const { mLock.AssertCurrentThreadOwns(); }
1357
ReleaseOutsideLock(RefPtr<nsISupports> aObject)1358 void CacheFile::ReleaseOutsideLock(RefPtr<nsISupports> aObject) {
1359 AssertOwnsLock();
1360
1361 mObjsToRelease.AppendElement(std::move(aObject));
1362 }
1363
GetChunkLocked(uint32_t aIndex,ECallerType aCaller,CacheFileChunkListener * aCallback,CacheFileChunk ** _retval)1364 nsresult CacheFile::GetChunkLocked(uint32_t aIndex, ECallerType aCaller,
1365 CacheFileChunkListener* aCallback,
1366 CacheFileChunk** _retval) {
1367 AssertOwnsLock();
1368
1369 LOG(("CacheFile::GetChunkLocked() [this=%p, idx=%u, caller=%d, listener=%p]",
1370 this, aIndex, aCaller, aCallback));
1371
1372 MOZ_ASSERT(mReady);
1373 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
1374 MOZ_ASSERT((aCaller == READER && aCallback) ||
1375 (aCaller == WRITER && !aCallback) ||
1376 (aCaller == PRELOADER && !aCallback));
1377
1378 // Preload chunks from disk when this is disk backed entry and the listener
1379 // is reader.
1380 bool preload = !mMemoryOnly && (aCaller == READER);
1381
1382 nsresult rv;
1383
1384 RefPtr<CacheFileChunk> chunk;
1385 if (mChunks.Get(aIndex, getter_AddRefs(chunk))) {
1386 LOG(("CacheFile::GetChunkLocked() - Found chunk %p in mChunks [this=%p]",
1387 chunk.get(), this));
1388
1389 // Preloader calls this method to preload only non-loaded chunks.
1390 MOZ_ASSERT(aCaller != PRELOADER, "Unexpected!");
1391
1392 // We might get failed chunk between releasing the lock in
1393 // CacheFileChunk::OnDataWritten/Read and CacheFile::OnChunkWritten/Read
1394 rv = chunk->GetStatus();
1395 if (NS_FAILED(rv)) {
1396 SetError(rv);
1397 LOG(
1398 ("CacheFile::GetChunkLocked() - Found failed chunk in mChunks "
1399 "[this=%p]",
1400 this));
1401 return rv;
1402 }
1403
1404 if (chunk->IsReady() || aCaller == WRITER) {
1405 chunk.swap(*_retval);
1406 } else {
1407 QueueChunkListener(aIndex, aCallback);
1408 }
1409
1410 if (preload) {
1411 PreloadChunks(aIndex + 1);
1412 }
1413
1414 return NS_OK;
1415 }
1416
1417 if (mCachedChunks.Get(aIndex, getter_AddRefs(chunk))) {
1418 LOG(("CacheFile::GetChunkLocked() - Reusing cached chunk %p [this=%p]",
1419 chunk.get(), this));
1420
1421 // Preloader calls this method to preload only non-loaded chunks.
1422 MOZ_ASSERT(aCaller != PRELOADER, "Unexpected!");
1423
1424 mChunks.InsertOrUpdate(aIndex, RefPtr{chunk});
1425 mCachedChunks.Remove(aIndex);
1426 chunk->mFile = this;
1427 chunk->mActiveChunk = true;
1428
1429 MOZ_ASSERT(chunk->IsReady());
1430
1431 chunk.swap(*_retval);
1432
1433 if (preload) {
1434 PreloadChunks(aIndex + 1);
1435 }
1436
1437 return NS_OK;
1438 }
1439
1440 int64_t off = aIndex * static_cast<int64_t>(kChunkSize);
1441
1442 if (off < mDataSize) {
1443 // We cannot be here if this is memory only entry since the chunk must exist
1444 MOZ_ASSERT(!mMemoryOnly);
1445 if (mMemoryOnly) {
1446 // If this ever really happen it is better to fail rather than crashing on
1447 // a null handle.
1448 LOG(
1449 ("CacheFile::GetChunkLocked() - Unexpected state! Offset < mDataSize "
1450 "for memory-only entry. [this=%p, off=%" PRId64
1451 ", mDataSize=%" PRId64 "]",
1452 this, off, mDataSize));
1453
1454 return NS_ERROR_UNEXPECTED;
1455 }
1456
1457 chunk = new CacheFileChunk(this, aIndex, aCaller == WRITER);
1458 mChunks.InsertOrUpdate(aIndex, RefPtr{chunk});
1459 chunk->mActiveChunk = true;
1460
1461 LOG(
1462 ("CacheFile::GetChunkLocked() - Reading newly created chunk %p from "
1463 "the disk [this=%p]",
1464 chunk.get(), this));
1465
1466 // Read the chunk from the disk
1467 rv = chunk->Read(mHandle,
1468 std::min(static_cast<uint32_t>(mDataSize - off),
1469 static_cast<uint32_t>(kChunkSize)),
1470 mMetadata->GetHash(aIndex), this);
1471 if (NS_WARN_IF(NS_FAILED(rv))) {
1472 RemoveChunkInternal(chunk, false);
1473 return rv;
1474 }
1475
1476 if (aCaller == WRITER) {
1477 chunk.swap(*_retval);
1478 } else if (aCaller != PRELOADER) {
1479 QueueChunkListener(aIndex, aCallback);
1480 }
1481
1482 if (preload) {
1483 PreloadChunks(aIndex + 1);
1484 }
1485
1486 return NS_OK;
1487 }
1488 if (off == mDataSize) {
1489 if (aCaller == WRITER) {
1490 // this listener is going to write to the chunk
1491 chunk = new CacheFileChunk(this, aIndex, true);
1492 mChunks.InsertOrUpdate(aIndex, RefPtr{chunk});
1493 chunk->mActiveChunk = true;
1494
1495 LOG(("CacheFile::GetChunkLocked() - Created new empty chunk %p [this=%p]",
1496 chunk.get(), this));
1497
1498 chunk->InitNew();
1499 mMetadata->SetHash(aIndex, chunk->Hash());
1500
1501 if (HaveChunkListeners(aIndex)) {
1502 rv = NotifyChunkListeners(aIndex, NS_OK, chunk);
1503 NS_ENSURE_SUCCESS(rv, rv);
1504 }
1505
1506 chunk.swap(*_retval);
1507 return NS_OK;
1508 }
1509 } else {
1510 if (aCaller == WRITER) {
1511 // this chunk was requested by writer, but we need to fill the gap first
1512
1513 // Fill with zero the last chunk if it is incomplete
1514 if (mDataSize % kChunkSize) {
1515 rv = PadChunkWithZeroes(mDataSize / kChunkSize);
1516 NS_ENSURE_SUCCESS(rv, rv);
1517
1518 MOZ_ASSERT(!(mDataSize % kChunkSize));
1519 }
1520
1521 uint32_t startChunk = mDataSize / kChunkSize;
1522
1523 if (mMemoryOnly) {
1524 // We need to create all missing CacheFileChunks if this is memory-only
1525 // entry
1526 for (uint32_t i = startChunk; i < aIndex; i++) {
1527 rv = PadChunkWithZeroes(i);
1528 NS_ENSURE_SUCCESS(rv, rv);
1529 }
1530 } else {
1531 // We don't need to create CacheFileChunk for other empty chunks unless
1532 // there is some input stream waiting for this chunk.
1533
1534 if (startChunk != aIndex) {
1535 // Make sure the file contains zeroes at the end of the file
1536 rv = CacheFileIOManager::TruncateSeekSetEOF(
1537 mHandle, startChunk * kChunkSize, aIndex * kChunkSize, nullptr);
1538 NS_ENSURE_SUCCESS(rv, rv);
1539 }
1540
1541 for (uint32_t i = startChunk; i < aIndex; i++) {
1542 if (HaveChunkListeners(i)) {
1543 rv = PadChunkWithZeroes(i);
1544 NS_ENSURE_SUCCESS(rv, rv);
1545 } else {
1546 mMetadata->SetHash(i, kEmptyChunkHash);
1547 mDataSize = (i + 1) * kChunkSize;
1548 }
1549 }
1550 }
1551
1552 MOZ_ASSERT(mDataSize == off);
1553 rv = GetChunkLocked(aIndex, WRITER, nullptr, getter_AddRefs(chunk));
1554 NS_ENSURE_SUCCESS(rv, rv);
1555
1556 chunk.swap(*_retval);
1557 return NS_OK;
1558 }
1559 }
1560
1561 // We can be here only if the caller is reader since writer always create a
1562 // new chunk above and preloader calls this method to preload only chunks that
1563 // are not loaded but that do exist.
1564 MOZ_ASSERT(aCaller == READER, "Unexpected!");
1565
1566 if (mOutput) {
1567 // the chunk doesn't exist but mOutput may create it
1568 QueueChunkListener(aIndex, aCallback);
1569 } else {
1570 return NS_ERROR_NOT_AVAILABLE;
1571 }
1572
1573 return NS_OK;
1574 }
1575
PreloadChunks(uint32_t aIndex)1576 void CacheFile::PreloadChunks(uint32_t aIndex) {
1577 AssertOwnsLock();
1578
1579 uint32_t limit = aIndex + mPreloadChunkCount;
1580
1581 for (uint32_t i = aIndex; i < limit; ++i) {
1582 int64_t off = i * static_cast<int64_t>(kChunkSize);
1583
1584 if (off >= mDataSize) {
1585 // This chunk is beyond EOF.
1586 return;
1587 }
1588
1589 if (mChunks.GetWeak(i) || mCachedChunks.GetWeak(i)) {
1590 // This chunk is already in memory or is being read right now.
1591 continue;
1592 }
1593
1594 LOG(("CacheFile::PreloadChunks() - Preloading chunk [this=%p, idx=%u]",
1595 this, i));
1596
1597 RefPtr<CacheFileChunk> chunk;
1598 GetChunkLocked(i, PRELOADER, nullptr, getter_AddRefs(chunk));
1599 // We've checked that we don't have this chunk, so no chunk must be
1600 // returned.
1601 MOZ_ASSERT(!chunk);
1602 }
1603 }
1604
ShouldCacheChunk(uint32_t aIndex)1605 bool CacheFile::ShouldCacheChunk(uint32_t aIndex) {
1606 AssertOwnsLock();
1607
1608 #ifdef CACHE_CHUNKS
1609 // We cache all chunks.
1610 return true;
1611 #else
1612
1613 if (mPreloadChunkCount != 0 && mInputs.Length() == 0 &&
1614 mPreloadWithoutInputStreams && aIndex < mPreloadChunkCount) {
1615 // We don't have any input stream yet, but it is likely that some will be
1616 // opened soon. Keep first mPreloadChunkCount chunks in memory. The
1617 // condition is here instead of in MustKeepCachedChunk() since these
1618 // chunks should be preloaded and can be kept in memory as an optimization,
1619 // but they can be released at any time until they are considered as
1620 // preloaded chunks for any input stream.
1621 return true;
1622 }
1623
1624 // Cache only chunks that we really need to keep.
1625 return MustKeepCachedChunk(aIndex);
1626 #endif
1627 }
1628
MustKeepCachedChunk(uint32_t aIndex)1629 bool CacheFile::MustKeepCachedChunk(uint32_t aIndex) {
1630 AssertOwnsLock();
1631
1632 // We must keep the chunk when this is memory only entry or we don't have
1633 // a handle yet.
1634 if (mMemoryOnly || mOpeningFile) {
1635 return true;
1636 }
1637
1638 if (mPreloadChunkCount == 0) {
1639 // Preloading of chunks is disabled
1640 return false;
1641 }
1642
1643 // Check whether this chunk should be considered as preloaded chunk for any
1644 // existing input stream.
1645
1646 // maxPos is the position of the last byte in the given chunk
1647 int64_t maxPos = static_cast<int64_t>(aIndex + 1) * kChunkSize - 1;
1648
1649 // minPos is the position of the first byte in a chunk that precedes the given
1650 // chunk by mPreloadChunkCount chunks
1651 int64_t minPos;
1652 if (mPreloadChunkCount >= aIndex) {
1653 minPos = 0;
1654 } else {
1655 minPos = static_cast<int64_t>(aIndex - mPreloadChunkCount) * kChunkSize;
1656 }
1657
1658 for (uint32_t i = 0; i < mInputs.Length(); ++i) {
1659 int64_t inputPos = mInputs[i]->GetPosition();
1660 if (inputPos >= minPos && inputPos <= maxPos) {
1661 return true;
1662 }
1663 }
1664
1665 return false;
1666 }
1667
DeactivateChunk(CacheFileChunk * aChunk)1668 nsresult CacheFile::DeactivateChunk(CacheFileChunk* aChunk) {
1669 nsresult rv;
1670
1671 // Avoid lock reentrancy by increasing the RefCnt
1672 RefPtr<CacheFileChunk> chunk = aChunk;
1673
1674 {
1675 CacheFileAutoLock lock(this);
1676
1677 LOG(("CacheFile::DeactivateChunk() [this=%p, chunk=%p, idx=%u]", this,
1678 aChunk, aChunk->Index()));
1679
1680 MOZ_ASSERT(mReady);
1681 MOZ_ASSERT((mHandle && !mMemoryOnly && !mOpeningFile) ||
1682 (!mHandle && mMemoryOnly && !mOpeningFile) ||
1683 (!mHandle && !mMemoryOnly && mOpeningFile));
1684
1685 if (aChunk->mRefCnt != 2) {
1686 LOG(
1687 ("CacheFile::DeactivateChunk() - Chunk is still used [this=%p, "
1688 "chunk=%p, refcnt=%" PRIuPTR "]",
1689 this, aChunk, aChunk->mRefCnt.get()));
1690
1691 // somebody got the reference before the lock was acquired
1692 return NS_OK;
1693 }
1694
1695 if (aChunk->mDiscardedChunk) {
1696 aChunk->mActiveChunk = false;
1697 ReleaseOutsideLock(
1698 RefPtr<CacheFileChunkListener>(std::move(aChunk->mFile)));
1699
1700 DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk);
1701 MOZ_ASSERT(removed);
1702 return NS_OK;
1703 }
1704
1705 #ifdef DEBUG
1706 {
1707 // We can be here iff the chunk is in the hash table
1708 RefPtr<CacheFileChunk> chunkCheck;
1709 mChunks.Get(chunk->Index(), getter_AddRefs(chunkCheck));
1710 MOZ_ASSERT(chunkCheck == chunk);
1711
1712 // We also shouldn't have any queued listener for this chunk
1713 ChunkListeners* listeners;
1714 mChunkListeners.Get(chunk->Index(), &listeners);
1715 MOZ_ASSERT(!listeners);
1716 }
1717 #endif
1718
1719 if (NS_FAILED(chunk->GetStatus())) {
1720 SetError(chunk->GetStatus());
1721 }
1722
1723 if (NS_FAILED(mStatus)) {
1724 // Don't write any chunk to disk since this entry will be doomed
1725 LOG(
1726 ("CacheFile::DeactivateChunk() - Releasing chunk because of status "
1727 "[this=%p, chunk=%p, mStatus=0x%08" PRIx32 "]",
1728 this, chunk.get(), static_cast<uint32_t>(mStatus)));
1729
1730 RemoveChunkInternal(chunk, false);
1731 return mStatus;
1732 }
1733
1734 if (chunk->IsDirty() && !mMemoryOnly && !mOpeningFile) {
1735 LOG(
1736 ("CacheFile::DeactivateChunk() - Writing dirty chunk to the disk "
1737 "[this=%p]",
1738 this));
1739
1740 mDataIsDirty = true;
1741
1742 rv = chunk->Write(mHandle, this);
1743 if (NS_FAILED(rv)) {
1744 LOG(
1745 ("CacheFile::DeactivateChunk() - CacheFileChunk::Write() failed "
1746 "synchronously. Removing it. [this=%p, chunk=%p, rv=0x%08" PRIx32
1747 "]",
1748 this, chunk.get(), static_cast<uint32_t>(rv)));
1749
1750 RemoveChunkInternal(chunk, false);
1751
1752 SetError(rv);
1753 return rv;
1754 }
1755
1756 // Chunk will be removed in OnChunkWritten if it is still unused
1757
1758 // chunk needs to be released under the lock to be able to rely on
1759 // CacheFileChunk::mRefCnt in CacheFile::OnChunkWritten()
1760 chunk = nullptr;
1761 return NS_OK;
1762 }
1763
1764 bool keepChunk = ShouldCacheChunk(aChunk->Index());
1765 LOG(("CacheFile::DeactivateChunk() - %s unused chunk [this=%p, chunk=%p]",
1766 keepChunk ? "Caching" : "Releasing", this, chunk.get()));
1767
1768 RemoveChunkInternal(chunk, keepChunk);
1769
1770 if (!mMemoryOnly) WriteMetadataIfNeededLocked();
1771 }
1772
1773 return NS_OK;
1774 }
1775
RemoveChunkInternal(CacheFileChunk * aChunk,bool aCacheChunk)1776 void CacheFile::RemoveChunkInternal(CacheFileChunk* aChunk, bool aCacheChunk) {
1777 AssertOwnsLock();
1778
1779 aChunk->mActiveChunk = false;
1780 ReleaseOutsideLock(RefPtr<CacheFileChunkListener>(std::move(aChunk->mFile)));
1781
1782 if (aCacheChunk) {
1783 mCachedChunks.InsertOrUpdate(aChunk->Index(), RefPtr{aChunk});
1784 }
1785
1786 mChunks.Remove(aChunk->Index());
1787 }
1788
OutputStreamExists(bool aAlternativeData)1789 bool CacheFile::OutputStreamExists(bool aAlternativeData) {
1790 AssertOwnsLock();
1791
1792 if (!mOutput) {
1793 return false;
1794 }
1795
1796 return mOutput->IsAlternativeData() == aAlternativeData;
1797 }
1798
BytesFromChunk(uint32_t aIndex,bool aAlternativeData)1799 int64_t CacheFile::BytesFromChunk(uint32_t aIndex, bool aAlternativeData) {
1800 AssertOwnsLock();
1801
1802 int64_t dataSize;
1803
1804 if (mAltDataOffset != -1) {
1805 if (aAlternativeData) {
1806 dataSize = mDataSize;
1807 } else {
1808 dataSize = mAltDataOffset;
1809 }
1810 } else {
1811 MOZ_ASSERT(!aAlternativeData);
1812 dataSize = mDataSize;
1813 }
1814
1815 if (!dataSize) {
1816 return 0;
1817 }
1818
1819 // Index of the last existing chunk.
1820 uint32_t lastChunk = (dataSize - 1) / kChunkSize;
1821 if (aIndex > lastChunk) {
1822 return 0;
1823 }
1824
1825 // We can use only preloaded chunks for the given stream to calculate
1826 // available bytes if this is an entry stored on disk, since only those
1827 // chunks are guaranteed not to be released.
1828 uint32_t maxPreloadedChunk;
1829 if (mMemoryOnly) {
1830 maxPreloadedChunk = lastChunk;
1831 } else {
1832 maxPreloadedChunk = std::min(aIndex + mPreloadChunkCount, lastChunk);
1833 }
1834
1835 uint32_t i;
1836 for (i = aIndex; i <= maxPreloadedChunk; ++i) {
1837 CacheFileChunk* chunk;
1838
1839 chunk = mChunks.GetWeak(i);
1840 if (chunk) {
1841 MOZ_ASSERT(i == lastChunk || chunk->DataSize() == kChunkSize);
1842 if (chunk->IsReady()) {
1843 continue;
1844 }
1845
1846 // don't search this chunk in cached
1847 break;
1848 }
1849
1850 chunk = mCachedChunks.GetWeak(i);
1851 if (chunk) {
1852 MOZ_ASSERT(i == lastChunk || chunk->DataSize() == kChunkSize);
1853 continue;
1854 }
1855
1856 break;
1857 }
1858
1859 // theoretic bytes in advance
1860 int64_t advance = int64_t(i - aIndex) * kChunkSize;
1861 // real bytes till the end of the file
1862 int64_t tail = dataSize - (aIndex * kChunkSize);
1863
1864 return std::min(advance, tail);
1865 }
1866
Truncate(int64_t aOffset)1867 nsresult CacheFile::Truncate(int64_t aOffset) {
1868 AssertOwnsLock();
1869
1870 LOG(("CacheFile::Truncate() [this=%p, offset=%" PRId64 "]", this, aOffset));
1871
1872 nsresult rv;
1873
1874 // If we ever need to truncate on non alt-data boundary, we need to handle
1875 // existing input streams.
1876 MOZ_ASSERT(aOffset == mAltDataOffset,
1877 "Truncating normal data not implemented");
1878 MOZ_ASSERT(mReady);
1879 MOZ_ASSERT(!mOutput);
1880
1881 uint32_t lastChunk = 0;
1882 if (mDataSize > 0) {
1883 lastChunk = (mDataSize - 1) / kChunkSize;
1884 }
1885
1886 uint32_t newLastChunk = 0;
1887 if (aOffset > 0) {
1888 newLastChunk = (aOffset - 1) / kChunkSize;
1889 }
1890
1891 uint32_t bytesInNewLastChunk = aOffset - newLastChunk * kChunkSize;
1892
1893 LOG(
1894 ("CacheFileTruncate() - lastChunk=%u, newLastChunk=%u, "
1895 "bytesInNewLastChunk=%u",
1896 lastChunk, newLastChunk, bytesInNewLastChunk));
1897
1898 // Remove all truncated chunks from mCachedChunks
1899 for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) {
1900 uint32_t idx = iter.Key();
1901
1902 if (idx > newLastChunk) {
1903 // This is unused chunk, simply remove it.
1904 LOG(("CacheFile::Truncate() - removing cached chunk [idx=%u]", idx));
1905 iter.Remove();
1906 }
1907 }
1908
1909 // We need to make sure no input stream holds a reference to a chunk we're
1910 // going to discard. In theory, if alt-data begins at chunk boundary, input
1911 // stream for normal data can get the chunk containing only alt-data via
1912 // EnsureCorrectChunk() call. The input stream won't read the data from such
1913 // chunk, but it will keep the reference until the stream is closed and we
1914 // cannot simply discard this chunk.
1915 int64_t maxInputChunk = -1;
1916 for (uint32_t i = 0; i < mInputs.Length(); ++i) {
1917 int64_t inputChunk = mInputs[i]->GetChunkIdx();
1918
1919 if (maxInputChunk < inputChunk) {
1920 maxInputChunk = inputChunk;
1921 }
1922
1923 MOZ_RELEASE_ASSERT(mInputs[i]->GetPosition() <= aOffset);
1924 }
1925
1926 MOZ_RELEASE_ASSERT(maxInputChunk <= newLastChunk + 1);
1927 if (maxInputChunk == newLastChunk + 1) {
1928 // Truncating must be done at chunk boundary
1929 MOZ_RELEASE_ASSERT(bytesInNewLastChunk == kChunkSize);
1930 newLastChunk++;
1931 bytesInNewLastChunk = 0;
1932 LOG(
1933 ("CacheFile::Truncate() - chunk %p is still in use, using "
1934 "newLastChunk=%u and bytesInNewLastChunk=%u",
1935 mChunks.GetWeak(newLastChunk), newLastChunk, bytesInNewLastChunk));
1936 }
1937
1938 // Discard all truncated chunks in mChunks
1939 for (auto iter = mChunks.Iter(); !iter.Done(); iter.Next()) {
1940 uint32_t idx = iter.Key();
1941
1942 if (idx > newLastChunk) {
1943 RefPtr<CacheFileChunk>& chunk = iter.Data();
1944 LOG(("CacheFile::Truncate() - discarding chunk [idx=%u, chunk=%p]", idx,
1945 chunk.get()));
1946
1947 if (HaveChunkListeners(idx)) {
1948 NotifyChunkListeners(idx, NS_ERROR_NOT_AVAILABLE, chunk);
1949 }
1950
1951 chunk->mDiscardedChunk = true;
1952 mDiscardedChunks.AppendElement(chunk);
1953 iter.Remove();
1954 }
1955 }
1956
1957 // Remove hashes of all removed chunks from the metadata
1958 for (uint32_t i = lastChunk; i > newLastChunk; --i) {
1959 mMetadata->RemoveHash(i);
1960 }
1961
1962 // Truncate new last chunk
1963 if (bytesInNewLastChunk == kChunkSize) {
1964 LOG(("CacheFile::Truncate() - not truncating last chunk."));
1965 } else {
1966 RefPtr<CacheFileChunk> chunk;
1967 if (mChunks.Get(newLastChunk, getter_AddRefs(chunk))) {
1968 LOG(("CacheFile::Truncate() - New last chunk %p got from mChunks.",
1969 chunk.get()));
1970 } else if (mCachedChunks.Get(newLastChunk, getter_AddRefs(chunk))) {
1971 LOG(("CacheFile::Truncate() - New last chunk %p got from mCachedChunks.",
1972 chunk.get()));
1973 } else {
1974 // New last chunk isn't loaded but we need to update the hash.
1975 MOZ_ASSERT(!mMemoryOnly);
1976 MOZ_ASSERT(mHandle);
1977
1978 rv = GetChunkLocked(newLastChunk, PRELOADER, nullptr,
1979 getter_AddRefs(chunk));
1980 if (NS_FAILED(rv)) {
1981 return rv;
1982 }
1983 // We've checked that we don't have this chunk, so no chunk must be
1984 // returned.
1985 MOZ_ASSERT(!chunk);
1986
1987 if (!mChunks.Get(newLastChunk, getter_AddRefs(chunk))) {
1988 return NS_ERROR_UNEXPECTED;
1989 }
1990
1991 LOG(("CacheFile::Truncate() - New last chunk %p got from preloader.",
1992 chunk.get()));
1993 }
1994
1995 rv = chunk->GetStatus();
1996 if (NS_FAILED(rv)) {
1997 LOG(
1998 ("CacheFile::Truncate() - New last chunk is failed "
1999 "[status=0x%08" PRIx32 "]",
2000 static_cast<uint32_t>(rv)));
2001 return rv;
2002 }
2003
2004 chunk->Truncate(bytesInNewLastChunk);
2005
2006 // If the chunk is ready set the new hash now. If it's still being loaded
2007 // CacheChunk::Truncate() made the chunk dirty and the hash will be updated
2008 // in OnChunkWritten().
2009 if (chunk->IsReady()) {
2010 mMetadata->SetHash(newLastChunk, chunk->Hash());
2011 }
2012 }
2013
2014 if (mHandle) {
2015 rv = CacheFileIOManager::TruncateSeekSetEOF(mHandle, aOffset, aOffset,
2016 nullptr);
2017 if (NS_FAILED(rv)) {
2018 return rv;
2019 }
2020 }
2021
2022 mDataSize = aOffset;
2023
2024 return NS_OK;
2025 }
2026
StatusToTelemetryEnum(nsresult aStatus)2027 static uint32_t StatusToTelemetryEnum(nsresult aStatus) {
2028 if (NS_SUCCEEDED(aStatus)) {
2029 return 0;
2030 }
2031
2032 switch (aStatus) {
2033 case NS_BASE_STREAM_CLOSED:
2034 return 0; // Log this as a success
2035 case NS_ERROR_OUT_OF_MEMORY:
2036 return 2;
2037 case NS_ERROR_FILE_NO_DEVICE_SPACE:
2038 return 3;
2039 case NS_ERROR_FILE_CORRUPTED:
2040 return 4;
2041 case NS_ERROR_FILE_NOT_FOUND:
2042 return 5;
2043 case NS_BINDING_ABORTED:
2044 return 6;
2045 default:
2046 return 1; // other error
2047 }
2048
2049 MOZ_ASSERT_UNREACHABLE("We should never get here");
2050 }
2051
RemoveInput(CacheFileInputStream * aInput,nsresult aStatus)2052 void CacheFile::RemoveInput(CacheFileInputStream* aInput, nsresult aStatus) {
2053 AssertOwnsLock();
2054
2055 LOG(("CacheFile::RemoveInput() [this=%p, input=%p, status=0x%08" PRIx32 "]",
2056 this, aInput, static_cast<uint32_t>(aStatus)));
2057
2058 DebugOnly<bool> found{};
2059 found = mInputs.RemoveElement(aInput);
2060 MOZ_ASSERT(found);
2061
2062 ReleaseOutsideLock(
2063 already_AddRefed<nsIInputStream>(static_cast<nsIInputStream*>(aInput)));
2064
2065 if (!mMemoryOnly) WriteMetadataIfNeededLocked();
2066
2067 // If the input didn't read all data, there might be left some preloaded
2068 // chunks that won't be used anymore.
2069 CleanUpCachedChunks();
2070
2071 Telemetry::Accumulate(Telemetry::NETWORK_CACHE_V2_INPUT_STREAM_STATUS,
2072 StatusToTelemetryEnum(aStatus));
2073 }
2074
RemoveOutput(CacheFileOutputStream * aOutput,nsresult aStatus)2075 void CacheFile::RemoveOutput(CacheFileOutputStream* aOutput, nsresult aStatus) {
2076 AssertOwnsLock();
2077
2078 nsresult rv;
2079
2080 LOG(("CacheFile::RemoveOutput() [this=%p, output=%p, status=0x%08" PRIx32 "]",
2081 this, aOutput, static_cast<uint32_t>(aStatus)));
2082
2083 if (mOutput != aOutput) {
2084 LOG(
2085 ("CacheFile::RemoveOutput() - This output was already removed, ignoring"
2086 " call [this=%p]",
2087 this));
2088 return;
2089 }
2090
2091 mOutput = nullptr;
2092
2093 // Cancel all queued chunk and update listeners that cannot be satisfied
2094 NotifyListenersAboutOutputRemoval();
2095
2096 if (!mMemoryOnly) WriteMetadataIfNeededLocked();
2097
2098 // Make sure the CacheFile status is set to a failure when the output stream
2099 // is closed with a fatal error. This way we propagate correctly and w/o any
2100 // windows the failure state of this entry to end consumers.
2101 if (NS_SUCCEEDED(mStatus) && NS_FAILED(aStatus) &&
2102 aStatus != NS_BASE_STREAM_CLOSED) {
2103 if (aOutput->IsAlternativeData()) {
2104 MOZ_ASSERT(mAltDataOffset != -1);
2105 // If there is no alt-data input stream truncate only alt-data, otherwise
2106 // doom the entry.
2107 bool altDataInputExists = false;
2108 for (uint32_t i = 0; i < mInputs.Length(); ++i) {
2109 if (mInputs[i]->IsAlternativeData()) {
2110 altDataInputExists = true;
2111 break;
2112 }
2113 }
2114 if (altDataInputExists) {
2115 SetError(aStatus);
2116 } else {
2117 rv = Truncate(mAltDataOffset);
2118 if (NS_FAILED(rv)) {
2119 LOG(
2120 ("CacheFile::RemoveOutput() - Truncating alt-data failed "
2121 "[rv=0x%08" PRIx32 "]",
2122 static_cast<uint32_t>(rv)));
2123 SetError(aStatus);
2124 } else {
2125 SetAltMetadata(nullptr);
2126 mAltDataOffset = -1;
2127 mAltDataType.Truncate();
2128 }
2129 }
2130 } else {
2131 SetError(aStatus);
2132 }
2133 }
2134
2135 // Notify close listener as the last action
2136 aOutput->NotifyCloseListener();
2137
2138 Telemetry::Accumulate(Telemetry::NETWORK_CACHE_V2_OUTPUT_STREAM_STATUS,
2139 StatusToTelemetryEnum(aStatus));
2140 }
2141
NotifyChunkListener(CacheFileChunkListener * aCallback,nsIEventTarget * aTarget,nsresult aResult,uint32_t aChunkIdx,CacheFileChunk * aChunk)2142 nsresult CacheFile::NotifyChunkListener(CacheFileChunkListener* aCallback,
2143 nsIEventTarget* aTarget,
2144 nsresult aResult, uint32_t aChunkIdx,
2145 CacheFileChunk* aChunk) {
2146 LOG(
2147 ("CacheFile::NotifyChunkListener() [this=%p, listener=%p, target=%p, "
2148 "rv=0x%08" PRIx32 ", idx=%u, chunk=%p]",
2149 this, aCallback, aTarget, static_cast<uint32_t>(aResult), aChunkIdx,
2150 aChunk));
2151
2152 RefPtr<NotifyChunkListenerEvent> ev;
2153 ev = new NotifyChunkListenerEvent(aCallback, aResult, aChunkIdx, aChunk);
2154 if (aTarget) {
2155 return aTarget->Dispatch(ev, NS_DISPATCH_NORMAL);
2156 }
2157 return NS_DispatchToCurrentThread(ev);
2158 }
2159
QueueChunkListener(uint32_t aIndex,CacheFileChunkListener * aCallback)2160 void CacheFile::QueueChunkListener(uint32_t aIndex,
2161 CacheFileChunkListener* aCallback) {
2162 LOG(("CacheFile::QueueChunkListener() [this=%p, idx=%u, listener=%p]", this,
2163 aIndex, aCallback));
2164
2165 AssertOwnsLock();
2166
2167 MOZ_ASSERT(aCallback);
2168
2169 ChunkListenerItem* item = new ChunkListenerItem();
2170 item->mTarget = CacheFileIOManager::IOTarget();
2171 if (!item->mTarget) {
2172 LOG(
2173 ("CacheFile::QueueChunkListener() - Cannot get Cache I/O thread! Using "
2174 "main thread for callback."));
2175 item->mTarget = GetMainThreadEventTarget();
2176 }
2177 item->mCallback = aCallback;
2178
2179 mChunkListeners.GetOrInsertNew(aIndex)->mItems.AppendElement(item);
2180 }
2181
NotifyChunkListeners(uint32_t aIndex,nsresult aResult,CacheFileChunk * aChunk)2182 nsresult CacheFile::NotifyChunkListeners(uint32_t aIndex, nsresult aResult,
2183 CacheFileChunk* aChunk) {
2184 LOG(("CacheFile::NotifyChunkListeners() [this=%p, idx=%u, rv=0x%08" PRIx32
2185 ", "
2186 "chunk=%p]",
2187 this, aIndex, static_cast<uint32_t>(aResult), aChunk));
2188
2189 AssertOwnsLock();
2190
2191 nsresult rv, rv2;
2192
2193 ChunkListeners* listeners;
2194 mChunkListeners.Get(aIndex, &listeners);
2195 MOZ_ASSERT(listeners);
2196
2197 rv = NS_OK;
2198 for (uint32_t i = 0; i < listeners->mItems.Length(); i++) {
2199 ChunkListenerItem* item = listeners->mItems[i];
2200 rv2 = NotifyChunkListener(item->mCallback, item->mTarget, aResult, aIndex,
2201 aChunk);
2202 if (NS_FAILED(rv2) && NS_SUCCEEDED(rv)) rv = rv2;
2203 delete item;
2204 }
2205
2206 mChunkListeners.Remove(aIndex);
2207
2208 return rv;
2209 }
2210
HaveChunkListeners(uint32_t aIndex)2211 bool CacheFile::HaveChunkListeners(uint32_t aIndex) {
2212 ChunkListeners* listeners;
2213 mChunkListeners.Get(aIndex, &listeners);
2214 return !!listeners;
2215 }
2216
NotifyListenersAboutOutputRemoval()2217 void CacheFile::NotifyListenersAboutOutputRemoval() {
2218 LOG(("CacheFile::NotifyListenersAboutOutputRemoval() [this=%p]", this));
2219
2220 AssertOwnsLock();
2221
2222 // First fail all chunk listeners that wait for non-existent chunk
2223 for (auto iter = mChunkListeners.Iter(); !iter.Done(); iter.Next()) {
2224 uint32_t idx = iter.Key();
2225 auto* listeners = iter.UserData();
2226
2227 LOG(
2228 ("CacheFile::NotifyListenersAboutOutputRemoval() - fail "
2229 "[this=%p, idx=%u]",
2230 this, idx));
2231
2232 RefPtr<CacheFileChunk> chunk;
2233 mChunks.Get(idx, getter_AddRefs(chunk));
2234 if (chunk) {
2235 // Skip these listeners because the chunk is being read. We don't have
2236 // assertion here to check its state because it might be already in READY
2237 // state while CacheFile::OnChunkRead() is waiting on Cache I/O thread for
2238 // a lock so the listeners hasn't been notified yet. In any case, the
2239 // listeners will be notified from CacheFile::OnChunkRead().
2240 continue;
2241 }
2242
2243 for (uint32_t i = 0; i < listeners->mItems.Length(); i++) {
2244 ChunkListenerItem* item = listeners->mItems[i];
2245 NotifyChunkListener(item->mCallback, item->mTarget,
2246 NS_ERROR_NOT_AVAILABLE, idx, nullptr);
2247 delete item;
2248 }
2249
2250 iter.Remove();
2251 }
2252
2253 // Fail all update listeners
2254 for (const auto& entry : mChunks) {
2255 const RefPtr<CacheFileChunk>& chunk = entry.GetData();
2256 LOG(
2257 ("CacheFile::NotifyListenersAboutOutputRemoval() - fail2 "
2258 "[this=%p, idx=%u]",
2259 this, entry.GetKey()));
2260
2261 if (chunk->IsReady()) {
2262 chunk->NotifyUpdateListeners();
2263 }
2264 }
2265 }
2266
DataSize(int64_t * aSize)2267 bool CacheFile::DataSize(int64_t* aSize) {
2268 CacheFileAutoLock lock(this);
2269
2270 if (OutputStreamExists(false)) {
2271 return false;
2272 }
2273
2274 if (mAltDataOffset == -1) {
2275 *aSize = mDataSize;
2276 } else {
2277 *aSize = mAltDataOffset;
2278 }
2279
2280 return true;
2281 }
2282
GetAltDataSize(int64_t * aSize)2283 nsresult CacheFile::GetAltDataSize(int64_t* aSize) {
2284 CacheFileAutoLock lock(this);
2285 if (mOutput) {
2286 return NS_ERROR_IN_PROGRESS;
2287 }
2288
2289 if (mAltDataOffset == -1) {
2290 return NS_ERROR_NOT_AVAILABLE;
2291 }
2292
2293 *aSize = mDataSize - mAltDataOffset;
2294 return NS_OK;
2295 }
2296
GetAltDataType(nsACString & aType)2297 nsresult CacheFile::GetAltDataType(nsACString& aType) {
2298 CacheFileAutoLock lock(this);
2299
2300 if (mAltDataOffset == -1) {
2301 return NS_ERROR_NOT_AVAILABLE;
2302 }
2303
2304 aType = mAltDataType;
2305 return NS_OK;
2306 }
2307
IsDoomed()2308 bool CacheFile::IsDoomed() {
2309 CacheFileAutoLock lock(this);
2310
2311 if (!mHandle) return false;
2312
2313 return mHandle->IsDoomed();
2314 }
2315
IsWriteInProgress()2316 bool CacheFile::IsWriteInProgress() {
2317 CacheFileAutoLock lock(this);
2318
2319 bool result = false;
2320
2321 if (!mMemoryOnly) {
2322 result =
2323 mDataIsDirty || (mMetadata && mMetadata->IsDirty()) || mWritingMetadata;
2324 }
2325
2326 result = result || mOpeningFile || mOutput || mChunks.Count();
2327
2328 return result;
2329 }
2330
EntryWouldExceedLimit(int64_t aOffset,int64_t aSize,bool aIsAltData)2331 bool CacheFile::EntryWouldExceedLimit(int64_t aOffset, int64_t aSize,
2332 bool aIsAltData) {
2333 CacheFileAutoLock lock(this);
2334
2335 if (mSkipSizeCheck || aSize < 0) {
2336 return false;
2337 }
2338
2339 int64_t totalSize = aOffset + aSize;
2340 if (aIsAltData) {
2341 totalSize += (mAltDataOffset == -1) ? mDataSize : mAltDataOffset;
2342 }
2343
2344 return CacheObserver::EntryIsTooBig(totalSize, !mMemoryOnly);
2345 }
2346
IsDirty()2347 bool CacheFile::IsDirty() { return mDataIsDirty || mMetadata->IsDirty(); }
2348
WriteMetadataIfNeeded()2349 void CacheFile::WriteMetadataIfNeeded() {
2350 LOG(("CacheFile::WriteMetadataIfNeeded() [this=%p]", this));
2351
2352 CacheFileAutoLock lock(this);
2353
2354 if (!mMemoryOnly) WriteMetadataIfNeededLocked();
2355 }
2356
WriteMetadataIfNeededLocked(bool aFireAndForget)2357 void CacheFile::WriteMetadataIfNeededLocked(bool aFireAndForget) {
2358 // When aFireAndForget is set to true, we are called from dtor.
2359 // |this| must not be referenced after this method returns!
2360
2361 LOG(("CacheFile::WriteMetadataIfNeededLocked() [this=%p]", this));
2362
2363 nsresult rv;
2364
2365 AssertOwnsLock();
2366 MOZ_ASSERT(!mMemoryOnly);
2367
2368 if (!mMetadata) {
2369 MOZ_CRASH("Must have metadata here");
2370 return;
2371 }
2372
2373 if (NS_FAILED(mStatus)) return;
2374
2375 if (!IsDirty() || mOutput || mInputs.Length() || mChunks.Count() ||
2376 mWritingMetadata || mOpeningFile || mKill) {
2377 return;
2378 }
2379
2380 if (!aFireAndForget) {
2381 // if aFireAndForget is set, we are called from dtor. Write
2382 // scheduler hard-refers CacheFile otherwise, so we cannot be here.
2383 CacheFileIOManager::UnscheduleMetadataWrite(this);
2384 }
2385
2386 LOG(("CacheFile::WriteMetadataIfNeededLocked() - Writing metadata [this=%p]",
2387 this));
2388
2389 rv = mMetadata->WriteMetadata(mDataSize, aFireAndForget ? nullptr : this);
2390 if (NS_SUCCEEDED(rv)) {
2391 mWritingMetadata = true;
2392 mDataIsDirty = false;
2393 } else {
2394 LOG(
2395 ("CacheFile::WriteMetadataIfNeededLocked() - Writing synchronously "
2396 "failed [this=%p]",
2397 this));
2398 // TODO: close streams with error
2399 SetError(rv);
2400 }
2401 }
2402
PostWriteTimer()2403 void CacheFile::PostWriteTimer() {
2404 if (mMemoryOnly) return;
2405
2406 LOG(("CacheFile::PostWriteTimer() [this=%p]", this));
2407
2408 CacheFileIOManager::ScheduleMetadataWrite(this);
2409 }
2410
CleanUpCachedChunks()2411 void CacheFile::CleanUpCachedChunks() {
2412 for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) {
2413 uint32_t idx = iter.Key();
2414 const RefPtr<CacheFileChunk>& chunk = iter.Data();
2415
2416 LOG(("CacheFile::CleanUpCachedChunks() [this=%p, idx=%u, chunk=%p]", this,
2417 idx, chunk.get()));
2418
2419 if (MustKeepCachedChunk(idx)) {
2420 LOG(("CacheFile::CleanUpCachedChunks() - Keeping chunk"));
2421 continue;
2422 }
2423
2424 LOG(("CacheFile::CleanUpCachedChunks() - Removing chunk"));
2425 iter.Remove();
2426 }
2427 }
2428
PadChunkWithZeroes(uint32_t aChunkIdx)2429 nsresult CacheFile::PadChunkWithZeroes(uint32_t aChunkIdx) {
2430 AssertOwnsLock();
2431
2432 // This method is used to pad last incomplete chunk with zeroes or create
2433 // a new chunk full of zeroes
2434 MOZ_ASSERT(mDataSize / kChunkSize == aChunkIdx);
2435
2436 nsresult rv;
2437 RefPtr<CacheFileChunk> chunk;
2438 rv = GetChunkLocked(aChunkIdx, WRITER, nullptr, getter_AddRefs(chunk));
2439 NS_ENSURE_SUCCESS(rv, rv);
2440
2441 LOG(
2442 ("CacheFile::PadChunkWithZeroes() - Zeroing hole in chunk %d, range %d-%d"
2443 " [this=%p]",
2444 aChunkIdx, chunk->DataSize(), kChunkSize - 1, this));
2445
2446 CacheFileChunkWriteHandle hnd = chunk->GetWriteHandle(kChunkSize);
2447 if (!hnd.Buf()) {
2448 ReleaseOutsideLock(std::move(chunk));
2449 SetError(NS_ERROR_OUT_OF_MEMORY);
2450 return NS_ERROR_OUT_OF_MEMORY;
2451 }
2452
2453 uint32_t offset = hnd.DataSize();
2454 memset(hnd.Buf() + offset, 0, kChunkSize - offset);
2455 hnd.UpdateDataSize(offset, kChunkSize - offset);
2456
2457 ReleaseOutsideLock(std::move(chunk));
2458
2459 return NS_OK;
2460 }
2461
SetError(nsresult aStatus)2462 void CacheFile::SetError(nsresult aStatus) {
2463 AssertOwnsLock();
2464
2465 if (NS_SUCCEEDED(mStatus)) {
2466 mStatus = aStatus;
2467 if (mHandle) {
2468 CacheFileIOManager::DoomFile(mHandle, nullptr);
2469 }
2470 }
2471 }
2472
InitIndexEntry()2473 nsresult CacheFile::InitIndexEntry() {
2474 MOZ_ASSERT(mHandle);
2475
2476 if (mHandle->IsDoomed()) return NS_OK;
2477
2478 nsresult rv;
2479
2480 rv = CacheFileIOManager::InitIndexEntry(
2481 mHandle, GetOriginAttrsHash(mMetadata->OriginAttributes()),
2482 mMetadata->IsAnonymous(), mPinned);
2483 NS_ENSURE_SUCCESS(rv, rv);
2484
2485 uint32_t frecency = mMetadata->GetFrecency();
2486
2487 bool hasAltData =
2488 mMetadata->GetElement(CacheFileUtils::kAltDataKey) != nullptr;
2489
2490 static auto toUint16 = [](const char* s) -> uint16_t {
2491 if (s) {
2492 nsresult rv;
2493 uint64_t n64 = nsDependentCString(s).ToInteger64(&rv);
2494 MOZ_ASSERT(NS_SUCCEEDED(rv));
2495 return n64 <= kIndexTimeOutOfBound ? n64 : kIndexTimeOutOfBound;
2496 }
2497 return kIndexTimeNotAvailable;
2498 };
2499
2500 const char* onStartTimeStr =
2501 mMetadata->GetElement("net-response-time-onstart");
2502 uint16_t onStartTime = toUint16(onStartTimeStr);
2503
2504 const char* onStopTimeStr = mMetadata->GetElement("net-response-time-onstop");
2505 uint16_t onStopTime = toUint16(onStopTimeStr);
2506
2507 const char* contentTypeStr = mMetadata->GetElement("ctid");
2508 uint8_t contentType = nsICacheEntry::CONTENT_TYPE_UNKNOWN;
2509 if (contentTypeStr) {
2510 int64_t n64 = nsDependentCString(contentTypeStr).ToInteger64(&rv);
2511 if (NS_FAILED(rv) || n64 < nsICacheEntry::CONTENT_TYPE_UNKNOWN ||
2512 n64 >= nsICacheEntry::CONTENT_TYPE_LAST) {
2513 n64 = nsICacheEntry::CONTENT_TYPE_UNKNOWN;
2514 }
2515 contentType = n64;
2516 }
2517
2518 rv = CacheFileIOManager::UpdateIndexEntry(
2519 mHandle, &frecency, &hasAltData, &onStartTime, &onStopTime, &contentType);
2520 NS_ENSURE_SUCCESS(rv, rv);
2521
2522 return NS_OK;
2523 }
2524
SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const2525 size_t CacheFile::SizeOfExcludingThis(
2526 mozilla::MallocSizeOf mallocSizeOf) const {
2527 CacheFileAutoLock lock(const_cast<CacheFile*>(this));
2528
2529 size_t n = 0;
2530 n += mKey.SizeOfExcludingThisIfUnshared(mallocSizeOf);
2531 n += mChunks.ShallowSizeOfExcludingThis(mallocSizeOf);
2532 for (const auto& chunk : mChunks.Values()) {
2533 n += chunk->SizeOfIncludingThis(mallocSizeOf);
2534 }
2535 n += mCachedChunks.ShallowSizeOfExcludingThis(mallocSizeOf);
2536 for (const auto& chunk : mCachedChunks.Values()) {
2537 n += chunk->SizeOfIncludingThis(mallocSizeOf);
2538 }
2539 // Ignore metadata if it's still being read. It's not safe to access buffers
2540 // in CacheFileMetadata because they might be reallocated on another thread
2541 // outside CacheFile's lock.
2542 if (mMetadata && mReady) {
2543 n += mMetadata->SizeOfIncludingThis(mallocSizeOf);
2544 }
2545
2546 // Input streams are not elsewhere reported.
2547 n += mInputs.ShallowSizeOfExcludingThis(mallocSizeOf);
2548 for (uint32_t i = 0; i < mInputs.Length(); ++i) {
2549 n += mInputs[i]->SizeOfIncludingThis(mallocSizeOf);
2550 }
2551
2552 // Output streams are not elsewhere reported.
2553 if (mOutput) {
2554 n += mOutput->SizeOfIncludingThis(mallocSizeOf);
2555 }
2556
2557 // The listeners are usually classes reported just above.
2558 n += mChunkListeners.ShallowSizeOfExcludingThis(mallocSizeOf);
2559 n += mObjsToRelease.ShallowSizeOfExcludingThis(mallocSizeOf);
2560
2561 // mHandle reported directly from CacheFileIOManager.
2562
2563 return n;
2564 }
2565
SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const2566 size_t CacheFile::SizeOfIncludingThis(
2567 mozilla::MallocSizeOf mallocSizeOf) const {
2568 return mallocSizeOf(this) + SizeOfExcludingThis(mallocSizeOf);
2569 }
2570
2571 } // namespace mozilla::net
2572