1 /* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4
5 #include "CacheFile.h"
6
7 #include <algorithm>
8 #include <utility>
9
10 #include "CacheFileChunk.h"
11 #include "CacheFileInputStream.h"
12 #include "CacheFileOutputStream.h"
13 #include "CacheLog.h"
14 #include "mozilla/DebugOnly.h"
15 #include "mozilla/Telemetry.h"
16 #include "nsComponentManagerUtils.h"
17 #include "nsProxyRelease.h"
18 #include "nsThreadUtils.h"
19
20 // When CACHE_CHUNKS is defined we always cache unused chunks in mCacheChunks.
21 // When it is not defined, we always release the chunks ASAP, i.e. we cache
22 // unused chunks only when:
23 // - CacheFile is memory-only
24 // - CacheFile is still waiting for the handle
25 // - the chunk is preloaded
26
27 //#define CACHE_CHUNKS
28
29 namespace mozilla {
30 namespace net {
31
32 class NotifyCacheFileListenerEvent : public Runnable {
33 public:
NotifyCacheFileListenerEvent(CacheFileListener * aCallback,nsresult aResult,bool aIsNew)34 NotifyCacheFileListenerEvent(CacheFileListener* aCallback, nsresult aResult,
35 bool aIsNew)
36 : Runnable("net::NotifyCacheFileListenerEvent"),
37 mCallback(aCallback),
38 mRV(aResult),
39 mIsNew(aIsNew) {
40 LOG(
41 ("NotifyCacheFileListenerEvent::NotifyCacheFileListenerEvent() "
42 "[this=%p]",
43 this));
44 }
45
46 protected:
~NotifyCacheFileListenerEvent()47 ~NotifyCacheFileListenerEvent() {
48 LOG(
49 ("NotifyCacheFileListenerEvent::~NotifyCacheFileListenerEvent() "
50 "[this=%p]",
51 this));
52 }
53
54 public:
Run()55 NS_IMETHOD Run() override {
56 LOG(("NotifyCacheFileListenerEvent::Run() [this=%p]", this));
57
58 mCallback->OnFileReady(mRV, mIsNew);
59 return NS_OK;
60 }
61
62 protected:
63 nsCOMPtr<CacheFileListener> mCallback;
64 nsresult mRV;
65 bool mIsNew;
66 };
67
68 class NotifyChunkListenerEvent : public Runnable {
69 public:
NotifyChunkListenerEvent(CacheFileChunkListener * aCallback,nsresult aResult,uint32_t aChunkIdx,CacheFileChunk * aChunk)70 NotifyChunkListenerEvent(CacheFileChunkListener* aCallback, nsresult aResult,
71 uint32_t aChunkIdx, CacheFileChunk* aChunk)
72 : Runnable("net::NotifyChunkListenerEvent"),
73 mCallback(aCallback),
74 mRV(aResult),
75 mChunkIdx(aChunkIdx),
76 mChunk(aChunk) {
77 LOG(("NotifyChunkListenerEvent::NotifyChunkListenerEvent() [this=%p]",
78 this));
79 }
80
81 protected:
~NotifyChunkListenerEvent()82 ~NotifyChunkListenerEvent() {
83 LOG(("NotifyChunkListenerEvent::~NotifyChunkListenerEvent() [this=%p]",
84 this));
85 }
86
87 public:
Run()88 NS_IMETHOD Run() override {
89 LOG(("NotifyChunkListenerEvent::Run() [this=%p]", this));
90
91 mCallback->OnChunkAvailable(mRV, mChunkIdx, mChunk);
92 return NS_OK;
93 }
94
95 protected:
96 nsCOMPtr<CacheFileChunkListener> mCallback;
97 nsresult mRV;
98 uint32_t mChunkIdx;
99 RefPtr<CacheFileChunk> mChunk;
100 };
101
102 class DoomFileHelper : public CacheFileIOListener {
103 public:
104 NS_DECL_THREADSAFE_ISUPPORTS
105
DoomFileHelper(CacheFileListener * aListener)106 explicit DoomFileHelper(CacheFileListener* aListener)
107 : mListener(aListener) {}
108
OnFileOpened(CacheFileHandle * aHandle,nsresult aResult)109 NS_IMETHOD OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) override {
110 MOZ_CRASH("DoomFileHelper::OnFileOpened should not be called!");
111 return NS_ERROR_UNEXPECTED;
112 }
113
OnDataWritten(CacheFileHandle * aHandle,const char * aBuf,nsresult aResult)114 NS_IMETHOD OnDataWritten(CacheFileHandle* aHandle, const char* aBuf,
115 nsresult aResult) override {
116 MOZ_CRASH("DoomFileHelper::OnDataWritten should not be called!");
117 return NS_ERROR_UNEXPECTED;
118 }
119
OnDataRead(CacheFileHandle * aHandle,char * aBuf,nsresult aResult)120 NS_IMETHOD OnDataRead(CacheFileHandle* aHandle, char* aBuf,
121 nsresult aResult) override {
122 MOZ_CRASH("DoomFileHelper::OnDataRead should not be called!");
123 return NS_ERROR_UNEXPECTED;
124 }
125
OnFileDoomed(CacheFileHandle * aHandle,nsresult aResult)126 NS_IMETHOD OnFileDoomed(CacheFileHandle* aHandle, nsresult aResult) override {
127 if (mListener) mListener->OnFileDoomed(aResult);
128 return NS_OK;
129 }
130
OnEOFSet(CacheFileHandle * aHandle,nsresult aResult)131 NS_IMETHOD OnEOFSet(CacheFileHandle* aHandle, nsresult aResult) override {
132 MOZ_CRASH("DoomFileHelper::OnEOFSet should not be called!");
133 return NS_ERROR_UNEXPECTED;
134 }
135
OnFileRenamed(CacheFileHandle * aHandle,nsresult aResult)136 NS_IMETHOD OnFileRenamed(CacheFileHandle* aHandle,
137 nsresult aResult) override {
138 MOZ_CRASH("DoomFileHelper::OnFileRenamed should not be called!");
139 return NS_ERROR_UNEXPECTED;
140 }
141
142 private:
143 virtual ~DoomFileHelper() = default;
144
145 nsCOMPtr<CacheFileListener> mListener;
146 };
147
NS_IMPL_ISUPPORTS(DoomFileHelper,CacheFileIOListener)148 NS_IMPL_ISUPPORTS(DoomFileHelper, CacheFileIOListener)
149
150 NS_IMPL_ADDREF(CacheFile)
151 NS_IMPL_RELEASE(CacheFile)
152 NS_INTERFACE_MAP_BEGIN(CacheFile)
153 NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileChunkListener)
154 NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileIOListener)
155 NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileMetadataListener)
156 NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports,
157 mozilla::net::CacheFileChunkListener)
158 NS_INTERFACE_MAP_END
159
160 CacheFile::CacheFile()
161 : mLock("CacheFile.mLock"),
162 mOpeningFile(false),
163 mReady(false),
164 mMemoryOnly(false),
165 mSkipSizeCheck(false),
166 mOpenAsMemoryOnly(false),
167 mPinned(false),
168 mPriority(false),
169 mDataAccessed(false),
170 mDataIsDirty(false),
171 mWritingMetadata(false),
172 mPreloadWithoutInputStreams(true),
173 mPreloadChunkCount(0),
174 mStatus(NS_OK),
175 mDataSize(-1),
176 mAltDataOffset(-1),
177 mKill(false),
178 mOutput(nullptr) {
179 LOG(("CacheFile::CacheFile() [this=%p]", this));
180 }
181
~CacheFile()182 CacheFile::~CacheFile() {
183 LOG(("CacheFile::~CacheFile() [this=%p]", this));
184
185 MutexAutoLock lock(mLock);
186 if (!mMemoryOnly && mReady && !mKill) {
187 // mReady flag indicates we have metadata plus in a valid state.
188 WriteMetadataIfNeededLocked(true);
189 }
190 }
191
Init(const nsACString & aKey,bool aCreateNew,bool aMemoryOnly,bool aSkipSizeCheck,bool aPriority,bool aPinned,CacheFileListener * aCallback)192 nsresult CacheFile::Init(const nsACString& aKey, bool aCreateNew,
193 bool aMemoryOnly, bool aSkipSizeCheck, bool aPriority,
194 bool aPinned, CacheFileListener* aCallback) {
195 MOZ_ASSERT(!mListener);
196 MOZ_ASSERT(!mHandle);
197
198 MOZ_ASSERT(!(aMemoryOnly && aPinned));
199
200 nsresult rv;
201
202 mKey = aKey;
203 mOpenAsMemoryOnly = mMemoryOnly = aMemoryOnly;
204 mSkipSizeCheck = aSkipSizeCheck;
205 mPriority = aPriority;
206 mPinned = aPinned;
207
208 // Some consumers (at least nsHTTPCompressConv) assume that Read() can read
209 // such amount of data that was announced by Available().
210 // CacheFileInputStream::Available() uses also preloaded chunks to compute
211 // number of available bytes in the input stream, so we have to make sure the
212 // preloadChunkCount won't change during CacheFile's lifetime since otherwise
213 // we could potentially release some cached chunks that was used to calculate
214 // available bytes but would not be available later during call to
215 // CacheFileInputStream::Read().
216 mPreloadChunkCount = CacheObserver::PreloadChunkCount();
217
218 LOG(
219 ("CacheFile::Init() [this=%p, key=%s, createNew=%d, memoryOnly=%d, "
220 "priority=%d, listener=%p]",
221 this, mKey.get(), aCreateNew, aMemoryOnly, aPriority, aCallback));
222
223 if (mMemoryOnly) {
224 MOZ_ASSERT(!aCallback);
225
226 mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, false, mKey);
227 mReady = true;
228 mDataSize = mMetadata->Offset();
229 return NS_OK;
230 } else {
231 uint32_t flags;
232 if (aCreateNew) {
233 MOZ_ASSERT(!aCallback);
234 flags = CacheFileIOManager::CREATE_NEW;
235
236 // make sure we can use this entry immediately
237 mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, mPinned, mKey);
238 mReady = true;
239 mDataSize = mMetadata->Offset();
240 } else {
241 flags = CacheFileIOManager::CREATE;
242 }
243
244 if (mPriority) {
245 flags |= CacheFileIOManager::PRIORITY;
246 }
247
248 if (mPinned) {
249 flags |= CacheFileIOManager::PINNED;
250 }
251
252 mOpeningFile = true;
253 mListener = aCallback;
254 rv = CacheFileIOManager::OpenFile(mKey, flags, this);
255 if (NS_FAILED(rv)) {
256 mListener = nullptr;
257 mOpeningFile = false;
258
259 if (mPinned) {
260 LOG(
261 ("CacheFile::Init() - CacheFileIOManager::OpenFile() failed "
262 "but we want to pin, fail the file opening. [this=%p]",
263 this));
264 return NS_ERROR_NOT_AVAILABLE;
265 }
266
267 if (aCreateNew) {
268 NS_WARNING("Forcing memory-only entry since OpenFile failed");
269 LOG(
270 ("CacheFile::Init() - CacheFileIOManager::OpenFile() failed "
271 "synchronously. We can continue in memory-only mode since "
272 "aCreateNew == true. [this=%p]",
273 this));
274
275 mMemoryOnly = true;
276 } else if (rv == NS_ERROR_NOT_INITIALIZED) {
277 NS_WARNING(
278 "Forcing memory-only entry since CacheIOManager isn't "
279 "initialized.");
280 LOG(
281 ("CacheFile::Init() - CacheFileIOManager isn't initialized, "
282 "initializing entry as memory-only. [this=%p]",
283 this));
284
285 mMemoryOnly = true;
286 mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, mPinned, mKey);
287 mReady = true;
288 mDataSize = mMetadata->Offset();
289
290 RefPtr<NotifyCacheFileListenerEvent> ev;
291 ev = new NotifyCacheFileListenerEvent(aCallback, NS_OK, true);
292 rv = NS_DispatchToCurrentThread(ev);
293 NS_ENSURE_SUCCESS(rv, rv);
294 } else {
295 NS_ENSURE_SUCCESS(rv, rv);
296 }
297 }
298 }
299
300 return NS_OK;
301 }
302
OnChunkRead(nsresult aResult,CacheFileChunk * aChunk)303 nsresult CacheFile::OnChunkRead(nsresult aResult, CacheFileChunk* aChunk) {
304 CacheFileAutoLock lock(this);
305
306 nsresult rv;
307
308 uint32_t index = aChunk->Index();
309
310 LOG(("CacheFile::OnChunkRead() [this=%p, rv=0x%08" PRIx32
311 ", chunk=%p, idx=%u]",
312 this, static_cast<uint32_t>(aResult), aChunk, index));
313
314 if (aChunk->mDiscardedChunk) {
315 // We discard only unused chunks, so it must be still unused when reading
316 // data finishes.
317 MOZ_ASSERT(aChunk->mRefCnt == 2);
318 aChunk->mActiveChunk = false;
319 ReleaseOutsideLock(
320 RefPtr<CacheFileChunkListener>(std::move(aChunk->mFile)));
321
322 DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk);
323 MOZ_ASSERT(removed);
324 return NS_OK;
325 }
326
327 if (NS_FAILED(aResult)) {
328 SetError(aResult);
329 }
330
331 if (HaveChunkListeners(index)) {
332 rv = NotifyChunkListeners(index, aResult, aChunk);
333 NS_ENSURE_SUCCESS(rv, rv);
334 }
335
336 return NS_OK;
337 }
338
OnChunkWritten(nsresult aResult,CacheFileChunk * aChunk)339 nsresult CacheFile::OnChunkWritten(nsresult aResult, CacheFileChunk* aChunk) {
340 // In case the chunk was reused, made dirty and released between calls to
341 // CacheFileChunk::Write() and CacheFile::OnChunkWritten(), we must write
342 // the chunk to the disk again. When the chunk is unused and is dirty simply
343 // addref and release (outside the lock) the chunk which ensures that
344 // CacheFile::DeactivateChunk() will be called again.
345 RefPtr<CacheFileChunk> deactivateChunkAgain;
346
347 CacheFileAutoLock lock(this);
348
349 nsresult rv;
350
351 LOG(("CacheFile::OnChunkWritten() [this=%p, rv=0x%08" PRIx32
352 ", chunk=%p, idx=%u]",
353 this, static_cast<uint32_t>(aResult), aChunk, aChunk->Index()));
354
355 MOZ_ASSERT(!mMemoryOnly);
356 MOZ_ASSERT(!mOpeningFile);
357 MOZ_ASSERT(mHandle);
358
359 if (aChunk->mDiscardedChunk) {
360 // We discard only unused chunks, so it must be still unused when writing
361 // data finishes.
362 MOZ_ASSERT(aChunk->mRefCnt == 2);
363 aChunk->mActiveChunk = false;
364 ReleaseOutsideLock(
365 RefPtr<CacheFileChunkListener>(std::move(aChunk->mFile)));
366
367 DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk);
368 MOZ_ASSERT(removed);
369 return NS_OK;
370 }
371
372 if (NS_FAILED(aResult)) {
373 SetError(aResult);
374 }
375
376 if (NS_SUCCEEDED(aResult) && !aChunk->IsDirty()) {
377 // update hash value in metadata
378 mMetadata->SetHash(aChunk->Index(), aChunk->Hash());
379 }
380
381 // notify listeners if there is any
382 if (HaveChunkListeners(aChunk->Index())) {
383 // don't release the chunk since there are some listeners queued
384 rv = NotifyChunkListeners(aChunk->Index(), aResult, aChunk);
385 if (NS_SUCCEEDED(rv)) {
386 MOZ_ASSERT(aChunk->mRefCnt != 2);
387 return NS_OK;
388 }
389 }
390
391 if (aChunk->mRefCnt != 2) {
392 LOG(
393 ("CacheFile::OnChunkWritten() - Chunk is still used [this=%p, chunk=%p,"
394 " refcnt=%" PRIuPTR "]",
395 this, aChunk, aChunk->mRefCnt.get()));
396
397 return NS_OK;
398 }
399
400 if (aChunk->IsDirty()) {
401 LOG(
402 ("CacheFile::OnChunkWritten() - Unused chunk is dirty. We must go "
403 "through deactivation again. [this=%p, chunk=%p]",
404 this, aChunk));
405
406 deactivateChunkAgain = aChunk;
407 return NS_OK;
408 }
409
410 bool keepChunk = false;
411 if (NS_SUCCEEDED(aResult)) {
412 keepChunk = ShouldCacheChunk(aChunk->Index());
413 LOG(("CacheFile::OnChunkWritten() - %s unused chunk [this=%p, chunk=%p]",
414 keepChunk ? "Caching" : "Releasing", this, aChunk));
415 } else {
416 LOG(
417 ("CacheFile::OnChunkWritten() - Releasing failed chunk [this=%p, "
418 "chunk=%p]",
419 this, aChunk));
420 }
421
422 RemoveChunkInternal(aChunk, keepChunk);
423
424 WriteMetadataIfNeededLocked();
425
426 return NS_OK;
427 }
428
OnChunkAvailable(nsresult aResult,uint32_t aChunkIdx,CacheFileChunk * aChunk)429 nsresult CacheFile::OnChunkAvailable(nsresult aResult, uint32_t aChunkIdx,
430 CacheFileChunk* aChunk) {
431 MOZ_CRASH("CacheFile::OnChunkAvailable should not be called!");
432 return NS_ERROR_UNEXPECTED;
433 }
434
OnChunkUpdated(CacheFileChunk * aChunk)435 nsresult CacheFile::OnChunkUpdated(CacheFileChunk* aChunk) {
436 MOZ_CRASH("CacheFile::OnChunkUpdated should not be called!");
437 return NS_ERROR_UNEXPECTED;
438 }
439
OnFileOpened(CacheFileHandle * aHandle,nsresult aResult)440 nsresult CacheFile::OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) {
441 // Using an 'auto' class to perform doom or fail the listener
442 // outside the CacheFile's lock.
443 class AutoFailDoomListener {
444 public:
445 explicit AutoFailDoomListener(CacheFileHandle* aHandle)
446 : mHandle(aHandle), mAlreadyDoomed(false) {}
447 ~AutoFailDoomListener() {
448 if (!mListener) return;
449
450 if (mHandle) {
451 if (mAlreadyDoomed) {
452 mListener->OnFileDoomed(mHandle, NS_OK);
453 } else {
454 CacheFileIOManager::DoomFile(mHandle, mListener);
455 }
456 } else {
457 mListener->OnFileDoomed(nullptr, NS_ERROR_NOT_AVAILABLE);
458 }
459 }
460
461 CacheFileHandle* mHandle;
462 nsCOMPtr<CacheFileIOListener> mListener;
463 bool mAlreadyDoomed;
464 } autoDoom(aHandle);
465
466 nsCOMPtr<CacheFileListener> listener;
467 bool isNew = false;
468 nsresult retval = NS_OK;
469
470 {
471 CacheFileAutoLock lock(this);
472
473 MOZ_ASSERT(mOpeningFile);
474 MOZ_ASSERT((NS_SUCCEEDED(aResult) && aHandle) ||
475 (NS_FAILED(aResult) && !aHandle));
476 MOZ_ASSERT((mListener && !mMetadata) || // !createNew
477 (!mListener && mMetadata)); // createNew
478 MOZ_ASSERT(!mMemoryOnly || mMetadata); // memory-only was set on new entry
479
480 LOG(("CacheFile::OnFileOpened() [this=%p, rv=0x%08" PRIx32 ", handle=%p]",
481 this, static_cast<uint32_t>(aResult), aHandle));
482
483 mOpeningFile = false;
484
485 autoDoom.mListener.swap(mDoomAfterOpenListener);
486
487 if (mMemoryOnly) {
488 // We can be here only in case the entry was initilized as createNew and
489 // SetMemoryOnly() was called.
490
491 // Just don't store the handle into mHandle and exit
492 autoDoom.mAlreadyDoomed = true;
493 return NS_OK;
494 }
495
496 if (NS_FAILED(aResult)) {
497 if (mMetadata) {
498 // This entry was initialized as createNew, just switch to memory-only
499 // mode.
500 NS_WARNING("Forcing memory-only entry since OpenFile failed");
501 LOG(
502 ("CacheFile::OnFileOpened() - CacheFileIOManager::OpenFile() "
503 "failed asynchronously. We can continue in memory-only mode since "
504 "aCreateNew == true. [this=%p]",
505 this));
506
507 mMemoryOnly = true;
508 return NS_OK;
509 }
510
511 if (aResult == NS_ERROR_FILE_INVALID_PATH) {
512 // CacheFileIOManager doesn't have mCacheDirectory, switch to
513 // memory-only mode.
514 NS_WARNING(
515 "Forcing memory-only entry since CacheFileIOManager doesn't "
516 "have mCacheDirectory.");
517 LOG(
518 ("CacheFile::OnFileOpened() - CacheFileIOManager doesn't have "
519 "mCacheDirectory, initializing entry as memory-only. [this=%p]",
520 this));
521
522 mMemoryOnly = true;
523 mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, mPinned, mKey);
524 mReady = true;
525 mDataSize = mMetadata->Offset();
526
527 isNew = true;
528 retval = NS_OK;
529 } else {
530 // CacheFileIOManager::OpenFile() failed for another reason.
531 isNew = false;
532 retval = aResult;
533 }
534
535 mListener.swap(listener);
536 } else {
537 mHandle = aHandle;
538 if (NS_FAILED(mStatus)) {
539 CacheFileIOManager::DoomFile(mHandle, nullptr);
540 }
541
542 if (mMetadata) {
543 InitIndexEntry();
544
545 // The entry was initialized as createNew, don't try to read metadata.
546 mMetadata->SetHandle(mHandle);
547
548 // Write all cached chunks, otherwise they may stay unwritten.
549 for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) {
550 uint32_t idx = iter.Key();
551 RefPtr<CacheFileChunk>& chunk = iter.Data();
552
553 LOG(("CacheFile::OnFileOpened() - write [this=%p, idx=%u, chunk=%p]",
554 this, idx, chunk.get()));
555
556 mChunks.Put(idx, RefPtr{chunk});
557 chunk->mFile = this;
558 chunk->mActiveChunk = true;
559
560 MOZ_ASSERT(chunk->IsReady());
561
562 // This would be cleaner if we had an nsRefPtr constructor that took
563 // a RefPtr<Derived>.
564 ReleaseOutsideLock(std::move(chunk));
565
566 iter.Remove();
567 }
568
569 return NS_OK;
570 }
571 }
572 }
573
574 if (listener) {
575 listener->OnFileReady(retval, isNew);
576 return NS_OK;
577 }
578
579 MOZ_ASSERT(NS_SUCCEEDED(aResult));
580 MOZ_ASSERT(!mMetadata);
581 MOZ_ASSERT(mListener);
582
583 mMetadata = new CacheFileMetadata(mHandle, mKey);
584 mMetadata->ReadMetadata(this);
585 return NS_OK;
586 }
587
OnDataWritten(CacheFileHandle * aHandle,const char * aBuf,nsresult aResult)588 nsresult CacheFile::OnDataWritten(CacheFileHandle* aHandle, const char* aBuf,
589 nsresult aResult) {
590 MOZ_CRASH("CacheFile::OnDataWritten should not be called!");
591 return NS_ERROR_UNEXPECTED;
592 }
593
OnDataRead(CacheFileHandle * aHandle,char * aBuf,nsresult aResult)594 nsresult CacheFile::OnDataRead(CacheFileHandle* aHandle, char* aBuf,
595 nsresult aResult) {
596 MOZ_CRASH("CacheFile::OnDataRead should not be called!");
597 return NS_ERROR_UNEXPECTED;
598 }
599
OnMetadataRead(nsresult aResult)600 nsresult CacheFile::OnMetadataRead(nsresult aResult) {
601 MOZ_ASSERT(mListener);
602
603 LOG(("CacheFile::OnMetadataRead() [this=%p, rv=0x%08" PRIx32 "]", this,
604 static_cast<uint32_t>(aResult)));
605
606 bool isNew = false;
607 if (NS_SUCCEEDED(aResult)) {
608 mPinned = mMetadata->Pinned();
609 mReady = true;
610 mDataSize = mMetadata->Offset();
611 if (mDataSize == 0 && mMetadata->ElementsSize() == 0) {
612 isNew = true;
613 mMetadata->MarkDirty();
614 } else {
615 const char* altData = mMetadata->GetElement(CacheFileUtils::kAltDataKey);
616 if (altData && (NS_FAILED(CacheFileUtils::ParseAlternativeDataInfo(
617 altData, &mAltDataOffset, &mAltDataType)) ||
618 (mAltDataOffset > mDataSize))) {
619 // alt-metadata cannot be parsed or alt-data offset is invalid
620 mMetadata->InitEmptyMetadata();
621 isNew = true;
622 mAltDataOffset = -1;
623 mAltDataType.Truncate();
624 mDataSize = 0;
625 } else {
626 CacheFileAutoLock lock(this);
627 PreloadChunks(0);
628 }
629 }
630
631 InitIndexEntry();
632 }
633
634 nsCOMPtr<CacheFileListener> listener;
635 mListener.swap(listener);
636 listener->OnFileReady(aResult, isNew);
637 return NS_OK;
638 }
639
OnMetadataWritten(nsresult aResult)640 nsresult CacheFile::OnMetadataWritten(nsresult aResult) {
641 CacheFileAutoLock lock(this);
642
643 LOG(("CacheFile::OnMetadataWritten() [this=%p, rv=0x%08" PRIx32 "]", this,
644 static_cast<uint32_t>(aResult)));
645
646 MOZ_ASSERT(mWritingMetadata);
647 mWritingMetadata = false;
648
649 MOZ_ASSERT(!mMemoryOnly);
650 MOZ_ASSERT(!mOpeningFile);
651
652 if (NS_WARN_IF(NS_FAILED(aResult))) {
653 // TODO close streams with an error ???
654 SetError(aResult);
655 }
656
657 if (mOutput || mInputs.Length() || mChunks.Count()) return NS_OK;
658
659 if (IsDirty()) WriteMetadataIfNeededLocked();
660
661 if (!mWritingMetadata) {
662 LOG(("CacheFile::OnMetadataWritten() - Releasing file handle [this=%p]",
663 this));
664 CacheFileIOManager::ReleaseNSPRHandle(mHandle);
665 }
666
667 return NS_OK;
668 }
669
OnFileDoomed(CacheFileHandle * aHandle,nsresult aResult)670 nsresult CacheFile::OnFileDoomed(CacheFileHandle* aHandle, nsresult aResult) {
671 nsCOMPtr<CacheFileListener> listener;
672
673 {
674 CacheFileAutoLock lock(this);
675
676 MOZ_ASSERT(mListener);
677
678 LOG(("CacheFile::OnFileDoomed() [this=%p, rv=0x%08" PRIx32 ", handle=%p]",
679 this, static_cast<uint32_t>(aResult), aHandle));
680
681 mListener.swap(listener);
682 }
683
684 listener->OnFileDoomed(aResult);
685 return NS_OK;
686 }
687
OnEOFSet(CacheFileHandle * aHandle,nsresult aResult)688 nsresult CacheFile::OnEOFSet(CacheFileHandle* aHandle, nsresult aResult) {
689 MOZ_CRASH("CacheFile::OnEOFSet should not be called!");
690 return NS_ERROR_UNEXPECTED;
691 }
692
OnFileRenamed(CacheFileHandle * aHandle,nsresult aResult)693 nsresult CacheFile::OnFileRenamed(CacheFileHandle* aHandle, nsresult aResult) {
694 MOZ_CRASH("CacheFile::OnFileRenamed should not be called!");
695 return NS_ERROR_UNEXPECTED;
696 }
697
IsKilled()698 bool CacheFile::IsKilled() {
699 bool killed = mKill;
700 if (killed) {
701 LOG(("CacheFile is killed, this=%p", this));
702 }
703
704 return killed;
705 }
706
OpenInputStream(nsICacheEntry * aEntryHandle,nsIInputStream ** _retval)707 nsresult CacheFile::OpenInputStream(nsICacheEntry* aEntryHandle,
708 nsIInputStream** _retval) {
709 CacheFileAutoLock lock(this);
710
711 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
712
713 if (!mReady) {
714 LOG(("CacheFile::OpenInputStream() - CacheFile is not ready [this=%p]",
715 this));
716
717 return NS_ERROR_NOT_AVAILABLE;
718 }
719
720 if (NS_FAILED(mStatus)) {
721 LOG(
722 ("CacheFile::OpenInputStream() - CacheFile is in a failure state "
723 "[this=%p, status=0x%08" PRIx32 "]",
724 this, static_cast<uint32_t>(mStatus)));
725
726 // Don't allow opening the input stream when this CacheFile is in
727 // a failed state. This is the only way to protect consumers correctly
728 // from reading a broken entry. When the file is in the failed state,
729 // it's also doomed, so reopening the entry won't make any difference -
730 // data will still be inaccessible anymore. Note that for just doomed
731 // files, we must allow reading the data.
732 return mStatus;
733 }
734
735 // Once we open input stream we no longer allow preloading of chunks without
736 // input stream, i.e. we will no longer keep first few chunks preloaded when
737 // the last input stream is closed.
738 mPreloadWithoutInputStreams = false;
739
740 CacheFileInputStream* input =
741 new CacheFileInputStream(this, aEntryHandle, false);
742 LOG(("CacheFile::OpenInputStream() - Creating new input stream %p [this=%p]",
743 input, this));
744
745 mInputs.AppendElement(input);
746 NS_ADDREF(input);
747
748 mDataAccessed = true;
749 *_retval = do_AddRef(input).take();
750 return NS_OK;
751 }
752
OpenAlternativeInputStream(nsICacheEntry * aEntryHandle,const char * aAltDataType,nsIInputStream ** _retval)753 nsresult CacheFile::OpenAlternativeInputStream(nsICacheEntry* aEntryHandle,
754 const char* aAltDataType,
755 nsIInputStream** _retval) {
756 CacheFileAutoLock lock(this);
757
758 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
759
760 if (NS_WARN_IF(!mReady)) {
761 LOG(
762 ("CacheFile::OpenAlternativeInputStream() - CacheFile is not ready "
763 "[this=%p]",
764 this));
765 return NS_ERROR_NOT_AVAILABLE;
766 }
767
768 if (mAltDataOffset == -1) {
769 LOG(
770 ("CacheFile::OpenAlternativeInputStream() - Alternative data is not "
771 "available [this=%p]",
772 this));
773 return NS_ERROR_NOT_AVAILABLE;
774 }
775
776 if (NS_FAILED(mStatus)) {
777 LOG(
778 ("CacheFile::OpenAlternativeInputStream() - CacheFile is in a failure "
779 "state [this=%p, status=0x%08" PRIx32 "]",
780 this, static_cast<uint32_t>(mStatus)));
781
782 // Don't allow opening the input stream when this CacheFile is in
783 // a failed state. This is the only way to protect consumers correctly
784 // from reading a broken entry. When the file is in the failed state,
785 // it's also doomed, so reopening the entry won't make any difference -
786 // data will still be inaccessible anymore. Note that for just doomed
787 // files, we must allow reading the data.
788 return mStatus;
789 }
790
791 if (mAltDataType != aAltDataType) {
792 LOG(
793 ("CacheFile::OpenAlternativeInputStream() - Alternative data is of a "
794 "different type than requested [this=%p, availableType=%s, "
795 "requestedType=%s]",
796 this, mAltDataType.get(), aAltDataType));
797 return NS_ERROR_NOT_AVAILABLE;
798 }
799
800 // Once we open input stream we no longer allow preloading of chunks without
801 // input stream, i.e. we will no longer keep first few chunks preloaded when
802 // the last input stream is closed.
803 mPreloadWithoutInputStreams = false;
804
805 CacheFileInputStream* input =
806 new CacheFileInputStream(this, aEntryHandle, true);
807
808 LOG(
809 ("CacheFile::OpenAlternativeInputStream() - Creating new input stream %p "
810 "[this=%p]",
811 input, this));
812
813 mInputs.AppendElement(input);
814 NS_ADDREF(input);
815
816 mDataAccessed = true;
817 *_retval = do_AddRef(input).take();
818
819 return NS_OK;
820 }
821
OpenOutputStream(CacheOutputCloseListener * aCloseListener,nsIOutputStream ** _retval)822 nsresult CacheFile::OpenOutputStream(CacheOutputCloseListener* aCloseListener,
823 nsIOutputStream** _retval) {
824 CacheFileAutoLock lock(this);
825
826 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
827
828 nsresult rv;
829
830 if (!mReady) {
831 LOG(("CacheFile::OpenOutputStream() - CacheFile is not ready [this=%p]",
832 this));
833
834 return NS_ERROR_NOT_AVAILABLE;
835 }
836
837 if (mOutput) {
838 LOG(
839 ("CacheFile::OpenOutputStream() - We already have output stream %p "
840 "[this=%p]",
841 mOutput, this));
842
843 return NS_ERROR_NOT_AVAILABLE;
844 }
845
846 if (NS_FAILED(mStatus)) {
847 LOG(
848 ("CacheFile::OpenOutputStream() - CacheFile is in a failure state "
849 "[this=%p, status=0x%08" PRIx32 "]",
850 this, static_cast<uint32_t>(mStatus)));
851
852 // The CacheFile is already doomed. It make no sense to allow to write any
853 // data to such entry.
854 return mStatus;
855 }
856
857 // Fail if there is any input stream opened for alternative data
858 for (uint32_t i = 0; i < mInputs.Length(); ++i) {
859 if (mInputs[i]->IsAlternativeData()) {
860 return NS_ERROR_NOT_AVAILABLE;
861 }
862 }
863
864 if (mAltDataOffset != -1) {
865 // Remove alt-data
866 rv = Truncate(mAltDataOffset);
867 if (NS_FAILED(rv)) {
868 LOG(
869 ("CacheFile::OpenOutputStream() - Truncating alt-data failed "
870 "[rv=0x%08" PRIx32 "]",
871 static_cast<uint32_t>(rv)));
872 return rv;
873 }
874 SetAltMetadata(nullptr);
875 mAltDataOffset = -1;
876 mAltDataType.Truncate();
877 }
878
879 // Once we open output stream we no longer allow preloading of chunks without
880 // input stream. There is no reason to believe that some input stream will be
881 // opened soon. Otherwise we would cache unused chunks of all newly created
882 // entries until the CacheFile is destroyed.
883 mPreloadWithoutInputStreams = false;
884
885 mOutput = new CacheFileOutputStream(this, aCloseListener, false);
886
887 LOG(
888 ("CacheFile::OpenOutputStream() - Creating new output stream %p "
889 "[this=%p]",
890 mOutput, this));
891
892 mDataAccessed = true;
893 *_retval = do_AddRef(mOutput).take();
894 return NS_OK;
895 }
896
OpenAlternativeOutputStream(CacheOutputCloseListener * aCloseListener,const char * aAltDataType,nsIAsyncOutputStream ** _retval)897 nsresult CacheFile::OpenAlternativeOutputStream(
898 CacheOutputCloseListener* aCloseListener, const char* aAltDataType,
899 nsIAsyncOutputStream** _retval) {
900 CacheFileAutoLock lock(this);
901
902 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
903
904 if (!mReady) {
905 LOG(
906 ("CacheFile::OpenAlternativeOutputStream() - CacheFile is not ready "
907 "[this=%p]",
908 this));
909
910 return NS_ERROR_NOT_AVAILABLE;
911 }
912
913 if (mOutput) {
914 LOG(
915 ("CacheFile::OpenAlternativeOutputStream() - We already have output "
916 "stream %p [this=%p]",
917 mOutput, this));
918
919 return NS_ERROR_NOT_AVAILABLE;
920 }
921
922 if (NS_FAILED(mStatus)) {
923 LOG(
924 ("CacheFile::OpenAlternativeOutputStream() - CacheFile is in a failure "
925 "state [this=%p, status=0x%08" PRIx32 "]",
926 this, static_cast<uint32_t>(mStatus)));
927
928 // The CacheFile is already doomed. It make no sense to allow to write any
929 // data to such entry.
930 return mStatus;
931 }
932
933 // Fail if there is any input stream opened for alternative data
934 for (uint32_t i = 0; i < mInputs.Length(); ++i) {
935 if (mInputs[i]->IsAlternativeData()) {
936 return NS_ERROR_NOT_AVAILABLE;
937 }
938 }
939
940 nsresult rv;
941
942 if (mAltDataOffset != -1) {
943 // Truncate old alt-data
944 rv = Truncate(mAltDataOffset);
945 if (NS_FAILED(rv)) {
946 LOG(
947 ("CacheFile::OpenAlternativeOutputStream() - Truncating old alt-data "
948 "failed [rv=0x%08" PRIx32 "]",
949 static_cast<uint32_t>(rv)));
950 return rv;
951 }
952 } else {
953 mAltDataOffset = mDataSize;
954 }
955
956 nsAutoCString altMetadata;
957 CacheFileUtils::BuildAlternativeDataInfo(aAltDataType, mAltDataOffset,
958 altMetadata);
959 rv = SetAltMetadata(altMetadata.get());
960 if (NS_FAILED(rv)) {
961 LOG(
962 ("CacheFile::OpenAlternativeOutputStream() - Set Metadata for alt-data"
963 "failed [rv=0x%08" PRIx32 "]",
964 static_cast<uint32_t>(rv)));
965 return rv;
966 }
967
968 // Once we open output stream we no longer allow preloading of chunks without
969 // input stream. There is no reason to believe that some input stream will be
970 // opened soon. Otherwise we would cache unused chunks of all newly created
971 // entries until the CacheFile is destroyed.
972 mPreloadWithoutInputStreams = false;
973
974 mOutput = new CacheFileOutputStream(this, aCloseListener, true);
975
976 LOG(
977 ("CacheFile::OpenAlternativeOutputStream() - Creating new output stream "
978 "%p [this=%p]",
979 mOutput, this));
980
981 mDataAccessed = true;
982 mAltDataType = aAltDataType;
983 *_retval = do_AddRef(mOutput).take();
984 return NS_OK;
985 }
986
SetMemoryOnly()987 nsresult CacheFile::SetMemoryOnly() {
988 CacheFileAutoLock lock(this);
989
990 LOG(("CacheFile::SetMemoryOnly() mMemoryOnly=%d [this=%p]", mMemoryOnly,
991 this));
992
993 if (mMemoryOnly) return NS_OK;
994
995 MOZ_ASSERT(mReady);
996
997 if (!mReady) {
998 LOG(("CacheFile::SetMemoryOnly() - CacheFile is not ready [this=%p]",
999 this));
1000
1001 return NS_ERROR_NOT_AVAILABLE;
1002 }
1003
1004 if (mDataAccessed) {
1005 LOG(("CacheFile::SetMemoryOnly() - Data was already accessed [this=%p]",
1006 this));
1007 return NS_ERROR_NOT_AVAILABLE;
1008 }
1009
1010 // TODO what to do when this isn't a new entry and has an existing metadata???
1011 mMemoryOnly = true;
1012 return NS_OK;
1013 }
1014
Doom(CacheFileListener * aCallback)1015 nsresult CacheFile::Doom(CacheFileListener* aCallback) {
1016 LOG(("CacheFile::Doom() [this=%p, listener=%p]", this, aCallback));
1017
1018 CacheFileAutoLock lock(this);
1019
1020 return DoomLocked(aCallback);
1021 }
1022
DoomLocked(CacheFileListener * aCallback)1023 nsresult CacheFile::DoomLocked(CacheFileListener* aCallback) {
1024 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
1025
1026 LOG(("CacheFile::DoomLocked() [this=%p, listener=%p]", this, aCallback));
1027
1028 nsresult rv = NS_OK;
1029
1030 if (mMemoryOnly) {
1031 return NS_ERROR_FILE_NOT_FOUND;
1032 }
1033
1034 if (mHandle && mHandle->IsDoomed()) {
1035 return NS_ERROR_FILE_NOT_FOUND;
1036 }
1037
1038 nsCOMPtr<CacheFileIOListener> listener;
1039 if (aCallback || !mHandle) {
1040 listener = new DoomFileHelper(aCallback);
1041 }
1042 if (mHandle) {
1043 rv = CacheFileIOManager::DoomFile(mHandle, listener);
1044 } else if (mOpeningFile) {
1045 mDoomAfterOpenListener = listener;
1046 }
1047
1048 return rv;
1049 }
1050
ThrowMemoryCachedData()1051 nsresult CacheFile::ThrowMemoryCachedData() {
1052 CacheFileAutoLock lock(this);
1053
1054 LOG(("CacheFile::ThrowMemoryCachedData() [this=%p]", this));
1055
1056 if (mMemoryOnly) {
1057 // This method should not be called when the CacheFile was initialized as
1058 // memory-only, but it can be called when CacheFile end up as memory-only
1059 // due to e.g. IO failure since CacheEntry doesn't know it.
1060 LOG(
1061 ("CacheFile::ThrowMemoryCachedData() - Ignoring request because the "
1062 "entry is memory-only. [this=%p]",
1063 this));
1064
1065 return NS_ERROR_NOT_AVAILABLE;
1066 }
1067
1068 if (mOpeningFile) {
1069 // mayhemer, note: we shouldn't get here, since CacheEntry prevents loading
1070 // entries from being purged.
1071
1072 LOG(
1073 ("CacheFile::ThrowMemoryCachedData() - Ignoring request because the "
1074 "entry is still opening the file [this=%p]",
1075 this));
1076
1077 return NS_ERROR_ABORT;
1078 }
1079
1080 // We cannot release all cached chunks since we need to keep preloaded chunks
1081 // in memory. See initialization of mPreloadChunkCount for explanation.
1082 CleanUpCachedChunks();
1083
1084 return NS_OK;
1085 }
1086
GetElement(const char * aKey,char ** _retval)1087 nsresult CacheFile::GetElement(const char* aKey, char** _retval) {
1088 CacheFileAutoLock lock(this);
1089 MOZ_ASSERT(mMetadata);
1090 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1091
1092 const char* value;
1093 value = mMetadata->GetElement(aKey);
1094 if (!value) return NS_ERROR_NOT_AVAILABLE;
1095
1096 *_retval = NS_xstrdup(value);
1097 return NS_OK;
1098 }
1099
SetElement(const char * aKey,const char * aValue)1100 nsresult CacheFile::SetElement(const char* aKey, const char* aValue) {
1101 CacheFileAutoLock lock(this);
1102
1103 LOG(("CacheFile::SetElement() this=%p", this));
1104
1105 MOZ_ASSERT(mMetadata);
1106 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1107
1108 if (!strcmp(aKey, CacheFileUtils::kAltDataKey)) {
1109 NS_ERROR(
1110 "alt-data element is reserved for internal use and must not be "
1111 "changed via CacheFile::SetElement()");
1112 return NS_ERROR_FAILURE;
1113 }
1114
1115 PostWriteTimer();
1116 return mMetadata->SetElement(aKey, aValue);
1117 }
1118
VisitMetaData(nsICacheEntryMetaDataVisitor * aVisitor)1119 nsresult CacheFile::VisitMetaData(nsICacheEntryMetaDataVisitor* aVisitor) {
1120 CacheFileAutoLock lock(this);
1121 MOZ_ASSERT(mMetadata);
1122 MOZ_ASSERT(mReady);
1123 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1124
1125 mMetadata->Visit(aVisitor);
1126 return NS_OK;
1127 }
1128
ElementsSize(uint32_t * _retval)1129 nsresult CacheFile::ElementsSize(uint32_t* _retval) {
1130 CacheFileAutoLock lock(this);
1131
1132 if (!mMetadata) return NS_ERROR_NOT_AVAILABLE;
1133
1134 *_retval = mMetadata->ElementsSize();
1135 return NS_OK;
1136 }
1137
SetExpirationTime(uint32_t aExpirationTime)1138 nsresult CacheFile::SetExpirationTime(uint32_t aExpirationTime) {
1139 CacheFileAutoLock lock(this);
1140
1141 LOG(("CacheFile::SetExpirationTime() this=%p, expiration=%u", this,
1142 aExpirationTime));
1143
1144 MOZ_ASSERT(mMetadata);
1145 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1146
1147 PostWriteTimer();
1148 mMetadata->SetExpirationTime(aExpirationTime);
1149 return NS_OK;
1150 }
1151
GetExpirationTime(uint32_t * _retval)1152 nsresult CacheFile::GetExpirationTime(uint32_t* _retval) {
1153 CacheFileAutoLock lock(this);
1154 MOZ_ASSERT(mMetadata);
1155 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1156
1157 *_retval = mMetadata->GetExpirationTime();
1158 return NS_OK;
1159 }
1160
SetFrecency(uint32_t aFrecency)1161 nsresult CacheFile::SetFrecency(uint32_t aFrecency) {
1162 CacheFileAutoLock lock(this);
1163
1164 LOG(("CacheFile::SetFrecency() this=%p, frecency=%u", this, aFrecency));
1165
1166 MOZ_ASSERT(mMetadata);
1167 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1168
1169 PostWriteTimer();
1170
1171 if (mHandle && !mHandle->IsDoomed())
1172 CacheFileIOManager::UpdateIndexEntry(mHandle, &aFrecency, nullptr, nullptr,
1173 nullptr, nullptr);
1174
1175 mMetadata->SetFrecency(aFrecency);
1176 return NS_OK;
1177 }
1178
GetFrecency(uint32_t * _retval)1179 nsresult CacheFile::GetFrecency(uint32_t* _retval) {
1180 CacheFileAutoLock lock(this);
1181 MOZ_ASSERT(mMetadata);
1182 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1183 *_retval = mMetadata->GetFrecency();
1184 return NS_OK;
1185 }
1186
SetNetworkTimes(uint64_t aOnStartTime,uint64_t aOnStopTime)1187 nsresult CacheFile::SetNetworkTimes(uint64_t aOnStartTime,
1188 uint64_t aOnStopTime) {
1189 CacheFileAutoLock lock(this);
1190
1191 LOG(("CacheFile::SetNetworkTimes() this=%p, aOnStartTime=%" PRIu64
1192 ", aOnStopTime=%" PRIu64 "",
1193 this, aOnStartTime, aOnStopTime));
1194
1195 MOZ_ASSERT(mMetadata);
1196 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1197
1198 PostWriteTimer();
1199
1200 nsAutoCString onStartTime;
1201 onStartTime.AppendInt(aOnStartTime);
1202 nsresult rv =
1203 mMetadata->SetElement("net-response-time-onstart", onStartTime.get());
1204 if (NS_WARN_IF(NS_FAILED(rv))) {
1205 return rv;
1206 }
1207
1208 nsAutoCString onStopTime;
1209 onStopTime.AppendInt(aOnStopTime);
1210 rv = mMetadata->SetElement("net-response-time-onstop", onStopTime.get());
1211 if (NS_WARN_IF(NS_FAILED(rv))) {
1212 return rv;
1213 }
1214
1215 uint16_t onStartTime16 = aOnStartTime <= kIndexTimeOutOfBound
1216 ? aOnStartTime
1217 : kIndexTimeOutOfBound;
1218 uint16_t onStopTime16 =
1219 aOnStopTime <= kIndexTimeOutOfBound ? aOnStopTime : kIndexTimeOutOfBound;
1220
1221 if (mHandle && !mHandle->IsDoomed()) {
1222 CacheFileIOManager::UpdateIndexEntry(
1223 mHandle, nullptr, nullptr, &onStartTime16, &onStopTime16, nullptr);
1224 }
1225 return NS_OK;
1226 }
1227
GetOnStartTime(uint64_t * _retval)1228 nsresult CacheFile::GetOnStartTime(uint64_t* _retval) {
1229 CacheFileAutoLock lock(this);
1230
1231 MOZ_ASSERT(mMetadata);
1232 const char* onStartTimeStr =
1233 mMetadata->GetElement("net-response-time-onstart");
1234 if (!onStartTimeStr) {
1235 return NS_ERROR_NOT_AVAILABLE;
1236 }
1237 nsresult rv;
1238 *_retval = nsDependentCString(onStartTimeStr).ToInteger64(&rv);
1239 MOZ_ASSERT(NS_SUCCEEDED(rv));
1240 return NS_OK;
1241 }
1242
GetOnStopTime(uint64_t * _retval)1243 nsresult CacheFile::GetOnStopTime(uint64_t* _retval) {
1244 CacheFileAutoLock lock(this);
1245
1246 MOZ_ASSERT(mMetadata);
1247 const char* onStopTimeStr = mMetadata->GetElement("net-response-time-onstop");
1248 if (!onStopTimeStr) {
1249 return NS_ERROR_NOT_AVAILABLE;
1250 }
1251 nsresult rv;
1252 *_retval = nsDependentCString(onStopTimeStr).ToInteger64(&rv);
1253 MOZ_ASSERT(NS_SUCCEEDED(rv));
1254 return NS_OK;
1255 }
1256
SetContentType(uint8_t aContentType)1257 nsresult CacheFile::SetContentType(uint8_t aContentType) {
1258 CacheFileAutoLock lock(this);
1259
1260 LOG(("CacheFile::SetContentType() this=%p, contentType=%u", this,
1261 aContentType));
1262
1263 MOZ_ASSERT(mMetadata);
1264 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1265
1266 PostWriteTimer();
1267
1268 // Save the content type to metadata for case we need to rebuild the index.
1269 nsAutoCString contentType;
1270 contentType.AppendInt(aContentType);
1271 nsresult rv = mMetadata->SetElement("ctid", contentType.get());
1272 if (NS_WARN_IF(NS_FAILED(rv))) {
1273 return rv;
1274 }
1275
1276 if (mHandle && !mHandle->IsDoomed()) {
1277 CacheFileIOManager::UpdateIndexEntry(mHandle, nullptr, nullptr, nullptr,
1278 nullptr, &aContentType);
1279 }
1280 return NS_OK;
1281 }
1282
SetAltMetadata(const char * aAltMetadata)1283 nsresult CacheFile::SetAltMetadata(const char* aAltMetadata) {
1284 AssertOwnsLock();
1285 LOG(("CacheFile::SetAltMetadata() this=%p, aAltMetadata=%s", this,
1286 aAltMetadata ? aAltMetadata : ""));
1287
1288 MOZ_ASSERT(mMetadata);
1289 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1290
1291 PostWriteTimer();
1292
1293 nsresult rv =
1294 mMetadata->SetElement(CacheFileUtils::kAltDataKey, aAltMetadata);
1295
1296 bool hasAltData = !!aAltMetadata;
1297
1298 if (NS_FAILED(rv)) {
1299 // Removing element shouldn't fail because it doesn't allocate memory.
1300 mMetadata->SetElement(CacheFileUtils::kAltDataKey, nullptr);
1301
1302 mAltDataOffset = -1;
1303 mAltDataType.Truncate();
1304 hasAltData = false;
1305 }
1306
1307 if (mHandle && !mHandle->IsDoomed()) {
1308 CacheFileIOManager::UpdateIndexEntry(mHandle, nullptr, &hasAltData, nullptr,
1309 nullptr, nullptr);
1310 }
1311 return rv;
1312 }
1313
GetLastModified(uint32_t * _retval)1314 nsresult CacheFile::GetLastModified(uint32_t* _retval) {
1315 CacheFileAutoLock lock(this);
1316 MOZ_ASSERT(mMetadata);
1317 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1318
1319 *_retval = mMetadata->GetLastModified();
1320 return NS_OK;
1321 }
1322
GetLastFetched(uint32_t * _retval)1323 nsresult CacheFile::GetLastFetched(uint32_t* _retval) {
1324 CacheFileAutoLock lock(this);
1325 MOZ_ASSERT(mMetadata);
1326 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1327
1328 *_retval = mMetadata->GetLastFetched();
1329 return NS_OK;
1330 }
1331
GetFetchCount(uint32_t * _retval)1332 nsresult CacheFile::GetFetchCount(uint32_t* _retval) {
1333 CacheFileAutoLock lock(this);
1334 MOZ_ASSERT(mMetadata);
1335 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1336 *_retval = mMetadata->GetFetchCount();
1337 return NS_OK;
1338 }
1339
GetDiskStorageSizeInKB(uint32_t * aDiskStorageSize)1340 nsresult CacheFile::GetDiskStorageSizeInKB(uint32_t* aDiskStorageSize) {
1341 if (!mHandle) {
1342 return NS_ERROR_NOT_AVAILABLE;
1343 }
1344
1345 *aDiskStorageSize = mHandle->FileSizeInK();
1346 return NS_OK;
1347 }
1348
OnFetched()1349 nsresult CacheFile::OnFetched() {
1350 CacheFileAutoLock lock(this);
1351
1352 LOG(("CacheFile::OnFetched() this=%p", this));
1353
1354 MOZ_ASSERT(mMetadata);
1355 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1356
1357 PostWriteTimer();
1358
1359 mMetadata->OnFetched();
1360 return NS_OK;
1361 }
1362
Lock()1363 void CacheFile::Lock() { mLock.Lock(); }
1364
Unlock()1365 void CacheFile::Unlock() {
1366 // move the elements out of mObjsToRelease
1367 // so that they can be released after we unlock
1368 nsTArray<RefPtr<nsISupports>> objs;
1369 objs.SwapElements(mObjsToRelease);
1370
1371 mLock.Unlock();
1372 }
1373
AssertOwnsLock() const1374 void CacheFile::AssertOwnsLock() const { mLock.AssertCurrentThreadOwns(); }
1375
ReleaseOutsideLock(RefPtr<nsISupports> aObject)1376 void CacheFile::ReleaseOutsideLock(RefPtr<nsISupports> aObject) {
1377 AssertOwnsLock();
1378
1379 mObjsToRelease.AppendElement(std::move(aObject));
1380 }
1381
GetChunkLocked(uint32_t aIndex,ECallerType aCaller,CacheFileChunkListener * aCallback,CacheFileChunk ** _retval)1382 nsresult CacheFile::GetChunkLocked(uint32_t aIndex, ECallerType aCaller,
1383 CacheFileChunkListener* aCallback,
1384 CacheFileChunk** _retval) {
1385 AssertOwnsLock();
1386
1387 LOG(("CacheFile::GetChunkLocked() [this=%p, idx=%u, caller=%d, listener=%p]",
1388 this, aIndex, aCaller, aCallback));
1389
1390 MOZ_ASSERT(mReady);
1391 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
1392 MOZ_ASSERT((aCaller == READER && aCallback) ||
1393 (aCaller == WRITER && !aCallback) ||
1394 (aCaller == PRELOADER && !aCallback));
1395
1396 // Preload chunks from disk when this is disk backed entry and the listener
1397 // is reader.
1398 bool preload = !mMemoryOnly && (aCaller == READER);
1399
1400 nsresult rv;
1401
1402 RefPtr<CacheFileChunk> chunk;
1403 if (mChunks.Get(aIndex, getter_AddRefs(chunk))) {
1404 LOG(("CacheFile::GetChunkLocked() - Found chunk %p in mChunks [this=%p]",
1405 chunk.get(), this));
1406
1407 // Preloader calls this method to preload only non-loaded chunks.
1408 MOZ_ASSERT(aCaller != PRELOADER, "Unexpected!");
1409
1410 // We might get failed chunk between releasing the lock in
1411 // CacheFileChunk::OnDataWritten/Read and CacheFile::OnChunkWritten/Read
1412 rv = chunk->GetStatus();
1413 if (NS_FAILED(rv)) {
1414 SetError(rv);
1415 LOG(
1416 ("CacheFile::GetChunkLocked() - Found failed chunk in mChunks "
1417 "[this=%p]",
1418 this));
1419 return rv;
1420 }
1421
1422 if (chunk->IsReady() || aCaller == WRITER) {
1423 chunk.swap(*_retval);
1424 } else {
1425 QueueChunkListener(aIndex, aCallback);
1426 }
1427
1428 if (preload) {
1429 PreloadChunks(aIndex + 1);
1430 }
1431
1432 return NS_OK;
1433 }
1434
1435 if (mCachedChunks.Get(aIndex, getter_AddRefs(chunk))) {
1436 LOG(("CacheFile::GetChunkLocked() - Reusing cached chunk %p [this=%p]",
1437 chunk.get(), this));
1438
1439 // Preloader calls this method to preload only non-loaded chunks.
1440 MOZ_ASSERT(aCaller != PRELOADER, "Unexpected!");
1441
1442 mChunks.Put(aIndex, RefPtr{chunk});
1443 mCachedChunks.Remove(aIndex);
1444 chunk->mFile = this;
1445 chunk->mActiveChunk = true;
1446
1447 MOZ_ASSERT(chunk->IsReady());
1448
1449 chunk.swap(*_retval);
1450
1451 if (preload) {
1452 PreloadChunks(aIndex + 1);
1453 }
1454
1455 return NS_OK;
1456 }
1457
1458 int64_t off = aIndex * static_cast<int64_t>(kChunkSize);
1459
1460 if (off < mDataSize) {
1461 // We cannot be here if this is memory only entry since the chunk must exist
1462 MOZ_ASSERT(!mMemoryOnly);
1463 if (mMemoryOnly) {
1464 // If this ever really happen it is better to fail rather than crashing on
1465 // a null handle.
1466 LOG(
1467 ("CacheFile::GetChunkLocked() - Unexpected state! Offset < mDataSize "
1468 "for memory-only entry. [this=%p, off=%" PRId64
1469 ", mDataSize=%" PRId64 "]",
1470 this, off, mDataSize));
1471
1472 return NS_ERROR_UNEXPECTED;
1473 }
1474
1475 chunk = new CacheFileChunk(this, aIndex, aCaller == WRITER);
1476 mChunks.Put(aIndex, RefPtr{chunk});
1477 chunk->mActiveChunk = true;
1478
1479 LOG(
1480 ("CacheFile::GetChunkLocked() - Reading newly created chunk %p from "
1481 "the disk [this=%p]",
1482 chunk.get(), this));
1483
1484 // Read the chunk from the disk
1485 rv = chunk->Read(mHandle,
1486 std::min(static_cast<uint32_t>(mDataSize - off),
1487 static_cast<uint32_t>(kChunkSize)),
1488 mMetadata->GetHash(aIndex), this);
1489 if (NS_WARN_IF(NS_FAILED(rv))) {
1490 RemoveChunkInternal(chunk, false);
1491 return rv;
1492 }
1493
1494 if (aCaller == WRITER) {
1495 chunk.swap(*_retval);
1496 } else if (aCaller != PRELOADER) {
1497 QueueChunkListener(aIndex, aCallback);
1498 }
1499
1500 if (preload) {
1501 PreloadChunks(aIndex + 1);
1502 }
1503
1504 return NS_OK;
1505 } else if (off == mDataSize) {
1506 if (aCaller == WRITER) {
1507 // this listener is going to write to the chunk
1508 chunk = new CacheFileChunk(this, aIndex, true);
1509 mChunks.Put(aIndex, RefPtr{chunk});
1510 chunk->mActiveChunk = true;
1511
1512 LOG(("CacheFile::GetChunkLocked() - Created new empty chunk %p [this=%p]",
1513 chunk.get(), this));
1514
1515 chunk->InitNew();
1516 mMetadata->SetHash(aIndex, chunk->Hash());
1517
1518 if (HaveChunkListeners(aIndex)) {
1519 rv = NotifyChunkListeners(aIndex, NS_OK, chunk);
1520 NS_ENSURE_SUCCESS(rv, rv);
1521 }
1522
1523 chunk.swap(*_retval);
1524 return NS_OK;
1525 }
1526 } else {
1527 if (aCaller == WRITER) {
1528 // this chunk was requested by writer, but we need to fill the gap first
1529
1530 // Fill with zero the last chunk if it is incomplete
1531 if (mDataSize % kChunkSize) {
1532 rv = PadChunkWithZeroes(mDataSize / kChunkSize);
1533 NS_ENSURE_SUCCESS(rv, rv);
1534
1535 MOZ_ASSERT(!(mDataSize % kChunkSize));
1536 }
1537
1538 uint32_t startChunk = mDataSize / kChunkSize;
1539
1540 if (mMemoryOnly) {
1541 // We need to create all missing CacheFileChunks if this is memory-only
1542 // entry
1543 for (uint32_t i = startChunk; i < aIndex; i++) {
1544 rv = PadChunkWithZeroes(i);
1545 NS_ENSURE_SUCCESS(rv, rv);
1546 }
1547 } else {
1548 // We don't need to create CacheFileChunk for other empty chunks unless
1549 // there is some input stream waiting for this chunk.
1550
1551 if (startChunk != aIndex) {
1552 // Make sure the file contains zeroes at the end of the file
1553 rv = CacheFileIOManager::TruncateSeekSetEOF(
1554 mHandle, startChunk * kChunkSize, aIndex * kChunkSize, nullptr);
1555 NS_ENSURE_SUCCESS(rv, rv);
1556 }
1557
1558 for (uint32_t i = startChunk; i < aIndex; i++) {
1559 if (HaveChunkListeners(i)) {
1560 rv = PadChunkWithZeroes(i);
1561 NS_ENSURE_SUCCESS(rv, rv);
1562 } else {
1563 mMetadata->SetHash(i, kEmptyChunkHash);
1564 mDataSize = (i + 1) * kChunkSize;
1565 }
1566 }
1567 }
1568
1569 MOZ_ASSERT(mDataSize == off);
1570 rv = GetChunkLocked(aIndex, WRITER, nullptr, getter_AddRefs(chunk));
1571 NS_ENSURE_SUCCESS(rv, rv);
1572
1573 chunk.swap(*_retval);
1574 return NS_OK;
1575 }
1576 }
1577
1578 // We can be here only if the caller is reader since writer always create a
1579 // new chunk above and preloader calls this method to preload only chunks that
1580 // are not loaded but that do exist.
1581 MOZ_ASSERT(aCaller == READER, "Unexpected!");
1582
1583 if (mOutput) {
1584 // the chunk doesn't exist but mOutput may create it
1585 QueueChunkListener(aIndex, aCallback);
1586 } else {
1587 return NS_ERROR_NOT_AVAILABLE;
1588 }
1589
1590 return NS_OK;
1591 }
1592
PreloadChunks(uint32_t aIndex)1593 void CacheFile::PreloadChunks(uint32_t aIndex) {
1594 AssertOwnsLock();
1595
1596 uint32_t limit = aIndex + mPreloadChunkCount;
1597
1598 for (uint32_t i = aIndex; i < limit; ++i) {
1599 int64_t off = i * static_cast<int64_t>(kChunkSize);
1600
1601 if (off >= mDataSize) {
1602 // This chunk is beyond EOF.
1603 return;
1604 }
1605
1606 if (mChunks.GetWeak(i) || mCachedChunks.GetWeak(i)) {
1607 // This chunk is already in memory or is being read right now.
1608 continue;
1609 }
1610
1611 LOG(("CacheFile::PreloadChunks() - Preloading chunk [this=%p, idx=%u]",
1612 this, i));
1613
1614 RefPtr<CacheFileChunk> chunk;
1615 GetChunkLocked(i, PRELOADER, nullptr, getter_AddRefs(chunk));
1616 // We've checked that we don't have this chunk, so no chunk must be
1617 // returned.
1618 MOZ_ASSERT(!chunk);
1619 }
1620 }
1621
ShouldCacheChunk(uint32_t aIndex)1622 bool CacheFile::ShouldCacheChunk(uint32_t aIndex) {
1623 AssertOwnsLock();
1624
1625 #ifdef CACHE_CHUNKS
1626 // We cache all chunks.
1627 return true;
1628 #else
1629
1630 if (mPreloadChunkCount != 0 && mInputs.Length() == 0 &&
1631 mPreloadWithoutInputStreams && aIndex < mPreloadChunkCount) {
1632 // We don't have any input stream yet, but it is likely that some will be
1633 // opened soon. Keep first mPreloadChunkCount chunks in memory. The
1634 // condition is here instead of in MustKeepCachedChunk() since these
1635 // chunks should be preloaded and can be kept in memory as an optimization,
1636 // but they can be released at any time until they are considered as
1637 // preloaded chunks for any input stream.
1638 return true;
1639 }
1640
1641 // Cache only chunks that we really need to keep.
1642 return MustKeepCachedChunk(aIndex);
1643 #endif
1644 }
1645
MustKeepCachedChunk(uint32_t aIndex)1646 bool CacheFile::MustKeepCachedChunk(uint32_t aIndex) {
1647 AssertOwnsLock();
1648
1649 // We must keep the chunk when this is memory only entry or we don't have
1650 // a handle yet.
1651 if (mMemoryOnly || mOpeningFile) {
1652 return true;
1653 }
1654
1655 if (mPreloadChunkCount == 0) {
1656 // Preloading of chunks is disabled
1657 return false;
1658 }
1659
1660 // Check whether this chunk should be considered as preloaded chunk for any
1661 // existing input stream.
1662
1663 // maxPos is the position of the last byte in the given chunk
1664 int64_t maxPos = static_cast<int64_t>(aIndex + 1) * kChunkSize - 1;
1665
1666 // minPos is the position of the first byte in a chunk that precedes the given
1667 // chunk by mPreloadChunkCount chunks
1668 int64_t minPos;
1669 if (mPreloadChunkCount >= aIndex) {
1670 minPos = 0;
1671 } else {
1672 minPos = static_cast<int64_t>(aIndex - mPreloadChunkCount) * kChunkSize;
1673 }
1674
1675 for (uint32_t i = 0; i < mInputs.Length(); ++i) {
1676 int64_t inputPos = mInputs[i]->GetPosition();
1677 if (inputPos >= minPos && inputPos <= maxPos) {
1678 return true;
1679 }
1680 }
1681
1682 return false;
1683 }
1684
DeactivateChunk(CacheFileChunk * aChunk)1685 nsresult CacheFile::DeactivateChunk(CacheFileChunk* aChunk) {
1686 nsresult rv;
1687
1688 // Avoid lock reentrancy by increasing the RefCnt
1689 RefPtr<CacheFileChunk> chunk = aChunk;
1690
1691 {
1692 CacheFileAutoLock lock(this);
1693
1694 LOG(("CacheFile::DeactivateChunk() [this=%p, chunk=%p, idx=%u]", this,
1695 aChunk, aChunk->Index()));
1696
1697 MOZ_ASSERT(mReady);
1698 MOZ_ASSERT((mHandle && !mMemoryOnly && !mOpeningFile) ||
1699 (!mHandle && mMemoryOnly && !mOpeningFile) ||
1700 (!mHandle && !mMemoryOnly && mOpeningFile));
1701
1702 if (aChunk->mRefCnt != 2) {
1703 LOG(
1704 ("CacheFile::DeactivateChunk() - Chunk is still used [this=%p, "
1705 "chunk=%p, refcnt=%" PRIuPTR "]",
1706 this, aChunk, aChunk->mRefCnt.get()));
1707
1708 // somebody got the reference before the lock was acquired
1709 return NS_OK;
1710 }
1711
1712 if (aChunk->mDiscardedChunk) {
1713 aChunk->mActiveChunk = false;
1714 ReleaseOutsideLock(
1715 RefPtr<CacheFileChunkListener>(std::move(aChunk->mFile)));
1716
1717 DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk);
1718 MOZ_ASSERT(removed);
1719 return NS_OK;
1720 }
1721
1722 #ifdef DEBUG
1723 {
1724 // We can be here iff the chunk is in the hash table
1725 RefPtr<CacheFileChunk> chunkCheck;
1726 mChunks.Get(chunk->Index(), getter_AddRefs(chunkCheck));
1727 MOZ_ASSERT(chunkCheck == chunk);
1728
1729 // We also shouldn't have any queued listener for this chunk
1730 ChunkListeners* listeners;
1731 mChunkListeners.Get(chunk->Index(), &listeners);
1732 MOZ_ASSERT(!listeners);
1733 }
1734 #endif
1735
1736 if (NS_FAILED(chunk->GetStatus())) {
1737 SetError(chunk->GetStatus());
1738 }
1739
1740 if (NS_FAILED(mStatus)) {
1741 // Don't write any chunk to disk since this entry will be doomed
1742 LOG(
1743 ("CacheFile::DeactivateChunk() - Releasing chunk because of status "
1744 "[this=%p, chunk=%p, mStatus=0x%08" PRIx32 "]",
1745 this, chunk.get(), static_cast<uint32_t>(mStatus)));
1746
1747 RemoveChunkInternal(chunk, false);
1748 return mStatus;
1749 }
1750
1751 if (chunk->IsDirty() && !mMemoryOnly && !mOpeningFile) {
1752 LOG(
1753 ("CacheFile::DeactivateChunk() - Writing dirty chunk to the disk "
1754 "[this=%p]",
1755 this));
1756
1757 mDataIsDirty = true;
1758
1759 rv = chunk->Write(mHandle, this);
1760 if (NS_FAILED(rv)) {
1761 LOG(
1762 ("CacheFile::DeactivateChunk() - CacheFileChunk::Write() failed "
1763 "synchronously. Removing it. [this=%p, chunk=%p, rv=0x%08" PRIx32
1764 "]",
1765 this, chunk.get(), static_cast<uint32_t>(rv)));
1766
1767 RemoveChunkInternal(chunk, false);
1768
1769 SetError(rv);
1770 return rv;
1771 }
1772
1773 // Chunk will be removed in OnChunkWritten if it is still unused
1774
1775 // chunk needs to be released under the lock to be able to rely on
1776 // CacheFileChunk::mRefCnt in CacheFile::OnChunkWritten()
1777 chunk = nullptr;
1778 return NS_OK;
1779 }
1780
1781 bool keepChunk = ShouldCacheChunk(aChunk->Index());
1782 LOG(("CacheFile::DeactivateChunk() - %s unused chunk [this=%p, chunk=%p]",
1783 keepChunk ? "Caching" : "Releasing", this, chunk.get()));
1784
1785 RemoveChunkInternal(chunk, keepChunk);
1786
1787 if (!mMemoryOnly) WriteMetadataIfNeededLocked();
1788 }
1789
1790 return NS_OK;
1791 }
1792
RemoveChunkInternal(CacheFileChunk * aChunk,bool aCacheChunk)1793 void CacheFile::RemoveChunkInternal(CacheFileChunk* aChunk, bool aCacheChunk) {
1794 AssertOwnsLock();
1795
1796 aChunk->mActiveChunk = false;
1797 ReleaseOutsideLock(RefPtr<CacheFileChunkListener>(std::move(aChunk->mFile)));
1798
1799 if (aCacheChunk) {
1800 mCachedChunks.Put(aChunk->Index(), RefPtr{aChunk});
1801 }
1802
1803 mChunks.Remove(aChunk->Index());
1804 }
1805
OutputStreamExists(bool aAlternativeData)1806 bool CacheFile::OutputStreamExists(bool aAlternativeData) {
1807 AssertOwnsLock();
1808
1809 if (!mOutput) {
1810 return false;
1811 }
1812
1813 return mOutput->IsAlternativeData() == aAlternativeData;
1814 }
1815
BytesFromChunk(uint32_t aIndex,bool aAlternativeData)1816 int64_t CacheFile::BytesFromChunk(uint32_t aIndex, bool aAlternativeData) {
1817 AssertOwnsLock();
1818
1819 int64_t dataSize;
1820
1821 if (mAltDataOffset != -1) {
1822 if (aAlternativeData) {
1823 dataSize = mDataSize;
1824 } else {
1825 dataSize = mAltDataOffset;
1826 }
1827 } else {
1828 MOZ_ASSERT(!aAlternativeData);
1829 dataSize = mDataSize;
1830 }
1831
1832 if (!dataSize) {
1833 return 0;
1834 }
1835
1836 // Index of the last existing chunk.
1837 uint32_t lastChunk = (dataSize - 1) / kChunkSize;
1838 if (aIndex > lastChunk) {
1839 return 0;
1840 }
1841
1842 // We can use only preloaded chunks for the given stream to calculate
1843 // available bytes if this is an entry stored on disk, since only those
1844 // chunks are guaranteed not to be released.
1845 uint32_t maxPreloadedChunk;
1846 if (mMemoryOnly) {
1847 maxPreloadedChunk = lastChunk;
1848 } else {
1849 maxPreloadedChunk = std::min(aIndex + mPreloadChunkCount, lastChunk);
1850 }
1851
1852 uint32_t i;
1853 for (i = aIndex; i <= maxPreloadedChunk; ++i) {
1854 CacheFileChunk* chunk;
1855
1856 chunk = mChunks.GetWeak(i);
1857 if (chunk) {
1858 MOZ_ASSERT(i == lastChunk || chunk->DataSize() == kChunkSize);
1859 if (chunk->IsReady()) {
1860 continue;
1861 }
1862
1863 // don't search this chunk in cached
1864 break;
1865 }
1866
1867 chunk = mCachedChunks.GetWeak(i);
1868 if (chunk) {
1869 MOZ_ASSERT(i == lastChunk || chunk->DataSize() == kChunkSize);
1870 continue;
1871 }
1872
1873 break;
1874 }
1875
1876 // theoretic bytes in advance
1877 int64_t advance = int64_t(i - aIndex) * kChunkSize;
1878 // real bytes till the end of the file
1879 int64_t tail = dataSize - (aIndex * kChunkSize);
1880
1881 return std::min(advance, tail);
1882 }
1883
Truncate(int64_t aOffset)1884 nsresult CacheFile::Truncate(int64_t aOffset) {
1885 AssertOwnsLock();
1886
1887 LOG(("CacheFile::Truncate() [this=%p, offset=%" PRId64 "]", this, aOffset));
1888
1889 nsresult rv;
1890
1891 // If we ever need to truncate on non alt-data boundary, we need to handle
1892 // existing input streams.
1893 MOZ_ASSERT(aOffset == mAltDataOffset,
1894 "Truncating normal data not implemented");
1895 MOZ_ASSERT(mReady);
1896 MOZ_ASSERT(!mOutput);
1897
1898 uint32_t lastChunk = 0;
1899 if (mDataSize > 0) {
1900 lastChunk = (mDataSize - 1) / kChunkSize;
1901 }
1902
1903 uint32_t newLastChunk = 0;
1904 if (aOffset > 0) {
1905 newLastChunk = (aOffset - 1) / kChunkSize;
1906 }
1907
1908 uint32_t bytesInNewLastChunk = aOffset - newLastChunk * kChunkSize;
1909
1910 LOG(
1911 ("CacheFileTruncate() - lastChunk=%u, newLastChunk=%u, "
1912 "bytesInNewLastChunk=%u",
1913 lastChunk, newLastChunk, bytesInNewLastChunk));
1914
1915 // Remove all truncated chunks from mCachedChunks
1916 for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) {
1917 uint32_t idx = iter.Key();
1918
1919 if (idx > newLastChunk) {
1920 // This is unused chunk, simply remove it.
1921 LOG(("CacheFile::Truncate() - removing cached chunk [idx=%u]", idx));
1922 iter.Remove();
1923 }
1924 }
1925
1926 // We need to make sure no input stream holds a reference to a chunk we're
1927 // going to discard. In theory, if alt-data begins at chunk boundary, input
1928 // stream for normal data can get the chunk containing only alt-data via
1929 // EnsureCorrectChunk() call. The input stream won't read the data from such
1930 // chunk, but it will keep the reference until the stream is closed and we
1931 // cannot simply discard this chunk.
1932 int64_t maxInputChunk = -1;
1933 for (uint32_t i = 0; i < mInputs.Length(); ++i) {
1934 int64_t inputChunk = mInputs[i]->GetChunkIdx();
1935
1936 if (maxInputChunk < inputChunk) {
1937 maxInputChunk = inputChunk;
1938 }
1939
1940 MOZ_RELEASE_ASSERT(mInputs[i]->GetPosition() <= aOffset);
1941 }
1942
1943 MOZ_RELEASE_ASSERT(maxInputChunk <= newLastChunk + 1);
1944 if (maxInputChunk == newLastChunk + 1) {
1945 // Truncating must be done at chunk boundary
1946 MOZ_RELEASE_ASSERT(bytesInNewLastChunk == kChunkSize);
1947 newLastChunk++;
1948 bytesInNewLastChunk = 0;
1949 LOG(
1950 ("CacheFile::Truncate() - chunk %p is still in use, using "
1951 "newLastChunk=%u and bytesInNewLastChunk=%u",
1952 mChunks.GetWeak(newLastChunk), newLastChunk, bytesInNewLastChunk));
1953 }
1954
1955 // Discard all truncated chunks in mChunks
1956 for (auto iter = mChunks.Iter(); !iter.Done(); iter.Next()) {
1957 uint32_t idx = iter.Key();
1958
1959 if (idx > newLastChunk) {
1960 RefPtr<CacheFileChunk>& chunk = iter.Data();
1961 LOG(("CacheFile::Truncate() - discarding chunk [idx=%u, chunk=%p]", idx,
1962 chunk.get()));
1963
1964 if (HaveChunkListeners(idx)) {
1965 NotifyChunkListeners(idx, NS_ERROR_NOT_AVAILABLE, chunk);
1966 }
1967
1968 chunk->mDiscardedChunk = true;
1969 mDiscardedChunks.AppendElement(chunk);
1970 iter.Remove();
1971 }
1972 }
1973
1974 // Remove hashes of all removed chunks from the metadata
1975 for (uint32_t i = lastChunk; i > newLastChunk; --i) {
1976 mMetadata->RemoveHash(i);
1977 }
1978
1979 // Truncate new last chunk
1980 if (bytesInNewLastChunk == kChunkSize) {
1981 LOG(("CacheFile::Truncate() - not truncating last chunk."));
1982 } else {
1983 RefPtr<CacheFileChunk> chunk;
1984 if (mChunks.Get(newLastChunk, getter_AddRefs(chunk))) {
1985 LOG(("CacheFile::Truncate() - New last chunk %p got from mChunks.",
1986 chunk.get()));
1987 } else if (mCachedChunks.Get(newLastChunk, getter_AddRefs(chunk))) {
1988 LOG(("CacheFile::Truncate() - New last chunk %p got from mCachedChunks.",
1989 chunk.get()));
1990 } else {
1991 // New last chunk isn't loaded but we need to update the hash.
1992 MOZ_ASSERT(!mMemoryOnly);
1993 MOZ_ASSERT(mHandle);
1994
1995 rv = GetChunkLocked(newLastChunk, PRELOADER, nullptr,
1996 getter_AddRefs(chunk));
1997 if (NS_FAILED(rv)) {
1998 return rv;
1999 }
2000 // We've checked that we don't have this chunk, so no chunk must be
2001 // returned.
2002 MOZ_ASSERT(!chunk);
2003
2004 if (!mChunks.Get(newLastChunk, getter_AddRefs(chunk))) {
2005 return NS_ERROR_UNEXPECTED;
2006 }
2007
2008 LOG(("CacheFile::Truncate() - New last chunk %p got from preloader.",
2009 chunk.get()));
2010 }
2011
2012 rv = chunk->GetStatus();
2013 if (NS_FAILED(rv)) {
2014 LOG(
2015 ("CacheFile::Truncate() - New last chunk is failed "
2016 "[status=0x%08" PRIx32 "]",
2017 static_cast<uint32_t>(rv)));
2018 return rv;
2019 }
2020
2021 chunk->Truncate(bytesInNewLastChunk);
2022
2023 // If the chunk is ready set the new hash now. If it's still being loaded
2024 // CacheChunk::Truncate() made the chunk dirty and the hash will be updated
2025 // in OnChunkWritten().
2026 if (chunk->IsReady()) {
2027 mMetadata->SetHash(newLastChunk, chunk->Hash());
2028 }
2029 }
2030
2031 if (mHandle) {
2032 rv = CacheFileIOManager::TruncateSeekSetEOF(mHandle, aOffset, aOffset,
2033 nullptr);
2034 if (NS_FAILED(rv)) {
2035 return rv;
2036 }
2037 }
2038
2039 mDataSize = aOffset;
2040
2041 return NS_OK;
2042 }
2043
StatusToTelemetryEnum(nsresult aStatus)2044 static uint32_t StatusToTelemetryEnum(nsresult aStatus) {
2045 if (NS_SUCCEEDED(aStatus)) {
2046 return 0;
2047 }
2048
2049 switch (aStatus) {
2050 case NS_BASE_STREAM_CLOSED:
2051 return 0; // Log this as a success
2052 case NS_ERROR_OUT_OF_MEMORY:
2053 return 2;
2054 case NS_ERROR_FILE_DISK_FULL:
2055 return 3;
2056 case NS_ERROR_FILE_CORRUPTED:
2057 return 4;
2058 case NS_ERROR_FILE_NOT_FOUND:
2059 return 5;
2060 case NS_BINDING_ABORTED:
2061 return 6;
2062 default:
2063 return 1; // other error
2064 }
2065
2066 MOZ_ASSERT_UNREACHABLE("We should never get here");
2067 }
2068
RemoveInput(CacheFileInputStream * aInput,nsresult aStatus)2069 void CacheFile::RemoveInput(CacheFileInputStream* aInput, nsresult aStatus) {
2070 AssertOwnsLock();
2071
2072 LOG(("CacheFile::RemoveInput() [this=%p, input=%p, status=0x%08" PRIx32 "]",
2073 this, aInput, static_cast<uint32_t>(aStatus)));
2074
2075 DebugOnly<bool> found;
2076 found = mInputs.RemoveElement(aInput);
2077 MOZ_ASSERT(found);
2078
2079 ReleaseOutsideLock(
2080 already_AddRefed<nsIInputStream>(static_cast<nsIInputStream*>(aInput)));
2081
2082 if (!mMemoryOnly) WriteMetadataIfNeededLocked();
2083
2084 // If the input didn't read all data, there might be left some preloaded
2085 // chunks that won't be used anymore.
2086 CleanUpCachedChunks();
2087
2088 Telemetry::Accumulate(Telemetry::NETWORK_CACHE_V2_INPUT_STREAM_STATUS,
2089 StatusToTelemetryEnum(aStatus));
2090 }
2091
RemoveOutput(CacheFileOutputStream * aOutput,nsresult aStatus)2092 void CacheFile::RemoveOutput(CacheFileOutputStream* aOutput, nsresult aStatus) {
2093 AssertOwnsLock();
2094
2095 nsresult rv;
2096
2097 LOG(("CacheFile::RemoveOutput() [this=%p, output=%p, status=0x%08" PRIx32 "]",
2098 this, aOutput, static_cast<uint32_t>(aStatus)));
2099
2100 if (mOutput != aOutput) {
2101 LOG(
2102 ("CacheFile::RemoveOutput() - This output was already removed, ignoring"
2103 " call [this=%p]",
2104 this));
2105 return;
2106 }
2107
2108 mOutput = nullptr;
2109
2110 // Cancel all queued chunk and update listeners that cannot be satisfied
2111 NotifyListenersAboutOutputRemoval();
2112
2113 if (!mMemoryOnly) WriteMetadataIfNeededLocked();
2114
2115 // Make sure the CacheFile status is set to a failure when the output stream
2116 // is closed with a fatal error. This way we propagate correctly and w/o any
2117 // windows the failure state of this entry to end consumers.
2118 if (NS_SUCCEEDED(mStatus) && NS_FAILED(aStatus) &&
2119 aStatus != NS_BASE_STREAM_CLOSED) {
2120 if (aOutput->IsAlternativeData()) {
2121 MOZ_ASSERT(mAltDataOffset != -1);
2122 // If there is no alt-data input stream truncate only alt-data, otherwise
2123 // doom the entry.
2124 bool altDataInputExists = false;
2125 for (uint32_t i = 0; i < mInputs.Length(); ++i) {
2126 if (mInputs[i]->IsAlternativeData()) {
2127 altDataInputExists = true;
2128 break;
2129 }
2130 }
2131 if (altDataInputExists) {
2132 SetError(aStatus);
2133 } else {
2134 rv = Truncate(mAltDataOffset);
2135 if (NS_FAILED(rv)) {
2136 LOG(
2137 ("CacheFile::RemoveOutput() - Truncating alt-data failed "
2138 "[rv=0x%08" PRIx32 "]",
2139 static_cast<uint32_t>(rv)));
2140 SetError(aStatus);
2141 } else {
2142 SetAltMetadata(nullptr);
2143 mAltDataOffset = -1;
2144 mAltDataType.Truncate();
2145 }
2146 }
2147 } else {
2148 SetError(aStatus);
2149 }
2150 }
2151
2152 // Notify close listener as the last action
2153 aOutput->NotifyCloseListener();
2154
2155 Telemetry::Accumulate(Telemetry::NETWORK_CACHE_V2_OUTPUT_STREAM_STATUS,
2156 StatusToTelemetryEnum(aStatus));
2157 }
2158
NotifyChunkListener(CacheFileChunkListener * aCallback,nsIEventTarget * aTarget,nsresult aResult,uint32_t aChunkIdx,CacheFileChunk * aChunk)2159 nsresult CacheFile::NotifyChunkListener(CacheFileChunkListener* aCallback,
2160 nsIEventTarget* aTarget,
2161 nsresult aResult, uint32_t aChunkIdx,
2162 CacheFileChunk* aChunk) {
2163 LOG(
2164 ("CacheFile::NotifyChunkListener() [this=%p, listener=%p, target=%p, "
2165 "rv=0x%08" PRIx32 ", idx=%u, chunk=%p]",
2166 this, aCallback, aTarget, static_cast<uint32_t>(aResult), aChunkIdx,
2167 aChunk));
2168
2169 RefPtr<NotifyChunkListenerEvent> ev;
2170 ev = new NotifyChunkListenerEvent(aCallback, aResult, aChunkIdx, aChunk);
2171 if (aTarget) {
2172 return aTarget->Dispatch(ev, NS_DISPATCH_NORMAL);
2173 }
2174 return NS_DispatchToCurrentThread(ev);
2175 }
2176
QueueChunkListener(uint32_t aIndex,CacheFileChunkListener * aCallback)2177 void CacheFile::QueueChunkListener(uint32_t aIndex,
2178 CacheFileChunkListener* aCallback) {
2179 LOG(("CacheFile::QueueChunkListener() [this=%p, idx=%u, listener=%p]", this,
2180 aIndex, aCallback));
2181
2182 AssertOwnsLock();
2183
2184 MOZ_ASSERT(aCallback);
2185
2186 ChunkListenerItem* item = new ChunkListenerItem();
2187 item->mTarget = CacheFileIOManager::IOTarget();
2188 if (!item->mTarget) {
2189 LOG(
2190 ("CacheFile::QueueChunkListener() - Cannot get Cache I/O thread! Using "
2191 "main thread for callback."));
2192 item->mTarget = GetMainThreadEventTarget();
2193 }
2194 item->mCallback = aCallback;
2195
2196 ChunkListeners* listeners;
2197 if (!mChunkListeners.Get(aIndex, &listeners)) {
2198 listeners = new ChunkListeners();
2199 mChunkListeners.Put(aIndex, listeners);
2200 }
2201
2202 listeners->mItems.AppendElement(item);
2203 }
2204
NotifyChunkListeners(uint32_t aIndex,nsresult aResult,CacheFileChunk * aChunk)2205 nsresult CacheFile::NotifyChunkListeners(uint32_t aIndex, nsresult aResult,
2206 CacheFileChunk* aChunk) {
2207 LOG(("CacheFile::NotifyChunkListeners() [this=%p, idx=%u, rv=0x%08" PRIx32
2208 ", "
2209 "chunk=%p]",
2210 this, aIndex, static_cast<uint32_t>(aResult), aChunk));
2211
2212 AssertOwnsLock();
2213
2214 nsresult rv, rv2;
2215
2216 ChunkListeners* listeners;
2217 mChunkListeners.Get(aIndex, &listeners);
2218 MOZ_ASSERT(listeners);
2219
2220 rv = NS_OK;
2221 for (uint32_t i = 0; i < listeners->mItems.Length(); i++) {
2222 ChunkListenerItem* item = listeners->mItems[i];
2223 rv2 = NotifyChunkListener(item->mCallback, item->mTarget, aResult, aIndex,
2224 aChunk);
2225 if (NS_FAILED(rv2) && NS_SUCCEEDED(rv)) rv = rv2;
2226 delete item;
2227 }
2228
2229 mChunkListeners.Remove(aIndex);
2230
2231 return rv;
2232 }
2233
HaveChunkListeners(uint32_t aIndex)2234 bool CacheFile::HaveChunkListeners(uint32_t aIndex) {
2235 ChunkListeners* listeners;
2236 mChunkListeners.Get(aIndex, &listeners);
2237 return !!listeners;
2238 }
2239
NotifyListenersAboutOutputRemoval()2240 void CacheFile::NotifyListenersAboutOutputRemoval() {
2241 LOG(("CacheFile::NotifyListenersAboutOutputRemoval() [this=%p]", this));
2242
2243 AssertOwnsLock();
2244
2245 // First fail all chunk listeners that wait for non-existent chunk
2246 for (auto iter = mChunkListeners.Iter(); !iter.Done(); iter.Next()) {
2247 uint32_t idx = iter.Key();
2248 auto listeners = iter.UserData();
2249
2250 LOG(
2251 ("CacheFile::NotifyListenersAboutOutputRemoval() - fail "
2252 "[this=%p, idx=%u]",
2253 this, idx));
2254
2255 RefPtr<CacheFileChunk> chunk;
2256 mChunks.Get(idx, getter_AddRefs(chunk));
2257 if (chunk) {
2258 // Skip these listeners because the chunk is being read. We don't have
2259 // assertion here to check its state because it might be already in READY
2260 // state while CacheFile::OnChunkRead() is waiting on Cache I/O thread for
2261 // a lock so the listeners hasn't been notified yet. In any case, the
2262 // listeners will be notified from CacheFile::OnChunkRead().
2263 continue;
2264 }
2265
2266 for (uint32_t i = 0; i < listeners->mItems.Length(); i++) {
2267 ChunkListenerItem* item = listeners->mItems[i];
2268 NotifyChunkListener(item->mCallback, item->mTarget,
2269 NS_ERROR_NOT_AVAILABLE, idx, nullptr);
2270 delete item;
2271 }
2272
2273 iter.Remove();
2274 }
2275
2276 // Fail all update listeners
2277 for (auto iter = mChunks.Iter(); !iter.Done(); iter.Next()) {
2278 const RefPtr<CacheFileChunk>& chunk = iter.Data();
2279 LOG(
2280 ("CacheFile::NotifyListenersAboutOutputRemoval() - fail2 "
2281 "[this=%p, idx=%u]",
2282 this, iter.Key()));
2283
2284 if (chunk->IsReady()) {
2285 chunk->NotifyUpdateListeners();
2286 }
2287 }
2288 }
2289
DataSize(int64_t * aSize)2290 bool CacheFile::DataSize(int64_t* aSize) {
2291 CacheFileAutoLock lock(this);
2292
2293 if (OutputStreamExists(false)) {
2294 return false;
2295 }
2296
2297 if (mAltDataOffset == -1) {
2298 *aSize = mDataSize;
2299 } else {
2300 *aSize = mAltDataOffset;
2301 }
2302
2303 return true;
2304 }
2305
GetAltDataSize(int64_t * aSize)2306 nsresult CacheFile::GetAltDataSize(int64_t* aSize) {
2307 CacheFileAutoLock lock(this);
2308 if (mOutput) {
2309 return NS_ERROR_IN_PROGRESS;
2310 }
2311
2312 if (mAltDataOffset == -1) {
2313 return NS_ERROR_NOT_AVAILABLE;
2314 }
2315
2316 *aSize = mDataSize - mAltDataOffset;
2317 return NS_OK;
2318 }
2319
GetAltDataType(nsACString & aType)2320 nsresult CacheFile::GetAltDataType(nsACString& aType) {
2321 CacheFileAutoLock lock(this);
2322
2323 if (mAltDataOffset == -1) {
2324 return NS_ERROR_NOT_AVAILABLE;
2325 }
2326
2327 aType = mAltDataType;
2328 return NS_OK;
2329 }
2330
IsDoomed()2331 bool CacheFile::IsDoomed() {
2332 CacheFileAutoLock lock(this);
2333
2334 if (!mHandle) return false;
2335
2336 return mHandle->IsDoomed();
2337 }
2338
IsWriteInProgress()2339 bool CacheFile::IsWriteInProgress() {
2340 CacheFileAutoLock lock(this);
2341
2342 bool result = false;
2343
2344 if (!mMemoryOnly) {
2345 result =
2346 mDataIsDirty || (mMetadata && mMetadata->IsDirty()) || mWritingMetadata;
2347 }
2348
2349 result = result || mOpeningFile || mOutput || mChunks.Count();
2350
2351 return result;
2352 }
2353
EntryWouldExceedLimit(int64_t aOffset,int64_t aSize,bool aIsAltData)2354 bool CacheFile::EntryWouldExceedLimit(int64_t aOffset, int64_t aSize,
2355 bool aIsAltData) {
2356 CacheFileAutoLock lock(this);
2357
2358 if (mSkipSizeCheck || aSize < 0) {
2359 return false;
2360 }
2361
2362 int64_t totalSize = aOffset + aSize;
2363 if (aIsAltData) {
2364 totalSize += (mAltDataOffset == -1) ? mDataSize : mAltDataOffset;
2365 }
2366
2367 if (CacheObserver::EntryIsTooBig(totalSize, !mMemoryOnly)) {
2368 return true;
2369 }
2370
2371 return false;
2372 }
2373
IsDirty()2374 bool CacheFile::IsDirty() { return mDataIsDirty || mMetadata->IsDirty(); }
2375
WriteMetadataIfNeeded()2376 void CacheFile::WriteMetadataIfNeeded() {
2377 LOG(("CacheFile::WriteMetadataIfNeeded() [this=%p]", this));
2378
2379 CacheFileAutoLock lock(this);
2380
2381 if (!mMemoryOnly) WriteMetadataIfNeededLocked();
2382 }
2383
WriteMetadataIfNeededLocked(bool aFireAndForget)2384 void CacheFile::WriteMetadataIfNeededLocked(bool aFireAndForget) {
2385 // When aFireAndForget is set to true, we are called from dtor.
2386 // |this| must not be referenced after this method returns!
2387
2388 LOG(("CacheFile::WriteMetadataIfNeededLocked() [this=%p]", this));
2389
2390 nsresult rv;
2391
2392 AssertOwnsLock();
2393 MOZ_ASSERT(!mMemoryOnly);
2394
2395 if (!mMetadata) {
2396 MOZ_CRASH("Must have metadata here");
2397 return;
2398 }
2399
2400 if (NS_FAILED(mStatus)) return;
2401
2402 if (!IsDirty() || mOutput || mInputs.Length() || mChunks.Count() ||
2403 mWritingMetadata || mOpeningFile || mKill)
2404 return;
2405
2406 if (!aFireAndForget) {
2407 // if aFireAndForget is set, we are called from dtor. Write
2408 // scheduler hard-refers CacheFile otherwise, so we cannot be here.
2409 CacheFileIOManager::UnscheduleMetadataWrite(this);
2410 }
2411
2412 LOG(("CacheFile::WriteMetadataIfNeededLocked() - Writing metadata [this=%p]",
2413 this));
2414
2415 rv = mMetadata->WriteMetadata(mDataSize, aFireAndForget ? nullptr : this);
2416 if (NS_SUCCEEDED(rv)) {
2417 mWritingMetadata = true;
2418 mDataIsDirty = false;
2419 } else {
2420 LOG(
2421 ("CacheFile::WriteMetadataIfNeededLocked() - Writing synchronously "
2422 "failed [this=%p]",
2423 this));
2424 // TODO: close streams with error
2425 SetError(rv);
2426 }
2427 }
2428
PostWriteTimer()2429 void CacheFile::PostWriteTimer() {
2430 if (mMemoryOnly) return;
2431
2432 LOG(("CacheFile::PostWriteTimer() [this=%p]", this));
2433
2434 CacheFileIOManager::ScheduleMetadataWrite(this);
2435 }
2436
CleanUpCachedChunks()2437 void CacheFile::CleanUpCachedChunks() {
2438 for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) {
2439 uint32_t idx = iter.Key();
2440 const RefPtr<CacheFileChunk>& chunk = iter.Data();
2441
2442 LOG(("CacheFile::CleanUpCachedChunks() [this=%p, idx=%u, chunk=%p]", this,
2443 idx, chunk.get()));
2444
2445 if (MustKeepCachedChunk(idx)) {
2446 LOG(("CacheFile::CleanUpCachedChunks() - Keeping chunk"));
2447 continue;
2448 }
2449
2450 LOG(("CacheFile::CleanUpCachedChunks() - Removing chunk"));
2451 iter.Remove();
2452 }
2453 }
2454
PadChunkWithZeroes(uint32_t aChunkIdx)2455 nsresult CacheFile::PadChunkWithZeroes(uint32_t aChunkIdx) {
2456 AssertOwnsLock();
2457
2458 // This method is used to pad last incomplete chunk with zeroes or create
2459 // a new chunk full of zeroes
2460 MOZ_ASSERT(mDataSize / kChunkSize == aChunkIdx);
2461
2462 nsresult rv;
2463 RefPtr<CacheFileChunk> chunk;
2464 rv = GetChunkLocked(aChunkIdx, WRITER, nullptr, getter_AddRefs(chunk));
2465 NS_ENSURE_SUCCESS(rv, rv);
2466
2467 LOG(
2468 ("CacheFile::PadChunkWithZeroes() - Zeroing hole in chunk %d, range %d-%d"
2469 " [this=%p]",
2470 aChunkIdx, chunk->DataSize(), kChunkSize - 1, this));
2471
2472 CacheFileChunkWriteHandle hnd = chunk->GetWriteHandle(kChunkSize);
2473 if (!hnd.Buf()) {
2474 ReleaseOutsideLock(std::move(chunk));
2475 SetError(NS_ERROR_OUT_OF_MEMORY);
2476 return NS_ERROR_OUT_OF_MEMORY;
2477 }
2478
2479 uint32_t offset = hnd.DataSize();
2480 memset(hnd.Buf() + offset, 0, kChunkSize - offset);
2481 hnd.UpdateDataSize(offset, kChunkSize - offset);
2482
2483 ReleaseOutsideLock(std::move(chunk));
2484
2485 return NS_OK;
2486 }
2487
SetError(nsresult aStatus)2488 void CacheFile::SetError(nsresult aStatus) {
2489 AssertOwnsLock();
2490
2491 if (NS_SUCCEEDED(mStatus)) {
2492 mStatus = aStatus;
2493 if (mHandle) {
2494 CacheFileIOManager::DoomFile(mHandle, nullptr);
2495 }
2496 }
2497 }
2498
InitIndexEntry()2499 nsresult CacheFile::InitIndexEntry() {
2500 MOZ_ASSERT(mHandle);
2501
2502 if (mHandle->IsDoomed()) return NS_OK;
2503
2504 nsresult rv;
2505
2506 rv = CacheFileIOManager::InitIndexEntry(
2507 mHandle, GetOriginAttrsHash(mMetadata->OriginAttributes()),
2508 mMetadata->IsAnonymous(), mPinned);
2509 NS_ENSURE_SUCCESS(rv, rv);
2510
2511 uint32_t frecency = mMetadata->GetFrecency();
2512
2513 bool hasAltData =
2514 mMetadata->GetElement(CacheFileUtils::kAltDataKey) ? true : false;
2515
2516 static auto toUint16 = [](const char* s) -> uint16_t {
2517 if (s) {
2518 nsresult rv;
2519 uint64_t n64 = nsDependentCString(s).ToInteger64(&rv);
2520 MOZ_ASSERT(NS_SUCCEEDED(rv));
2521 return n64 <= kIndexTimeOutOfBound ? n64 : kIndexTimeOutOfBound;
2522 }
2523 return kIndexTimeNotAvailable;
2524 };
2525
2526 const char* onStartTimeStr =
2527 mMetadata->GetElement("net-response-time-onstart");
2528 uint16_t onStartTime = toUint16(onStartTimeStr);
2529
2530 const char* onStopTimeStr = mMetadata->GetElement("net-response-time-onstop");
2531 uint16_t onStopTime = toUint16(onStopTimeStr);
2532
2533 const char* contentTypeStr = mMetadata->GetElement("ctid");
2534 uint8_t contentType = nsICacheEntry::CONTENT_TYPE_UNKNOWN;
2535 if (contentTypeStr) {
2536 int64_t n64 = nsDependentCString(contentTypeStr).ToInteger64(&rv);
2537 if (NS_FAILED(rv) || n64 < nsICacheEntry::CONTENT_TYPE_UNKNOWN ||
2538 n64 >= nsICacheEntry::CONTENT_TYPE_LAST) {
2539 n64 = nsICacheEntry::CONTENT_TYPE_UNKNOWN;
2540 }
2541 contentType = n64;
2542 }
2543
2544 rv = CacheFileIOManager::UpdateIndexEntry(
2545 mHandle, &frecency, &hasAltData, &onStartTime, &onStopTime, &contentType);
2546 NS_ENSURE_SUCCESS(rv, rv);
2547
2548 return NS_OK;
2549 }
2550
SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const2551 size_t CacheFile::SizeOfExcludingThis(
2552 mozilla::MallocSizeOf mallocSizeOf) const {
2553 CacheFileAutoLock lock(const_cast<CacheFile*>(this));
2554
2555 size_t n = 0;
2556 n += mKey.SizeOfExcludingThisIfUnshared(mallocSizeOf);
2557 n += mChunks.ShallowSizeOfExcludingThis(mallocSizeOf);
2558 for (auto iter = mChunks.ConstIter(); !iter.Done(); iter.Next()) {
2559 n += iter.Data()->SizeOfIncludingThis(mallocSizeOf);
2560 }
2561 n += mCachedChunks.ShallowSizeOfExcludingThis(mallocSizeOf);
2562 for (auto iter = mCachedChunks.ConstIter(); !iter.Done(); iter.Next()) {
2563 n += iter.Data()->SizeOfIncludingThis(mallocSizeOf);
2564 }
2565 // Ignore metadata if it's still being read. It's not safe to access buffers
2566 // in CacheFileMetadata because they might be reallocated on another thread
2567 // outside CacheFile's lock.
2568 if (mMetadata && mReady) {
2569 n += mMetadata->SizeOfIncludingThis(mallocSizeOf);
2570 }
2571
2572 // Input streams are not elsewhere reported.
2573 n += mInputs.ShallowSizeOfExcludingThis(mallocSizeOf);
2574 for (uint32_t i = 0; i < mInputs.Length(); ++i) {
2575 n += mInputs[i]->SizeOfIncludingThis(mallocSizeOf);
2576 }
2577
2578 // Output streams are not elsewhere reported.
2579 if (mOutput) {
2580 n += mOutput->SizeOfIncludingThis(mallocSizeOf);
2581 }
2582
2583 // The listeners are usually classes reported just above.
2584 n += mChunkListeners.ShallowSizeOfExcludingThis(mallocSizeOf);
2585 n += mObjsToRelease.ShallowSizeOfExcludingThis(mallocSizeOf);
2586
2587 // mHandle reported directly from CacheFileIOManager.
2588
2589 return n;
2590 }
2591
SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const2592 size_t CacheFile::SizeOfIncludingThis(
2593 mozilla::MallocSizeOf mallocSizeOf) const {
2594 return mallocSizeOf(this) + SizeOfExcludingThis(mallocSizeOf);
2595 }
2596
2597 } // namespace net
2598 } // namespace mozilla
2599