1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
5
6 /**
7 * SurfaceCache is a service for caching temporary surfaces in imagelib.
8 */
9
10 #include "SurfaceCache.h"
11
12 #include <algorithm>
13 #include "mozilla/Assertions.h"
14 #include "mozilla/Attributes.h"
15 #include "mozilla/DebugOnly.h"
16 #include "mozilla/Likely.h"
17 #include "mozilla/Move.h"
18 #include "mozilla/Pair.h"
19 #include "mozilla/RefPtr.h"
20 #include "mozilla/StaticMutex.h"
21 #include "mozilla/StaticPtr.h"
22 #include "mozilla/Tuple.h"
23 #include "nsIMemoryReporter.h"
24 #include "gfx2DGlue.h"
25 #include "gfxPlatform.h"
26 #include "gfxPrefs.h"
27 #include "imgFrame.h"
28 #include "Image.h"
29 #include "ISurfaceProvider.h"
30 #include "LookupResult.h"
31 #include "nsExpirationTracker.h"
32 #include "nsHashKeys.h"
33 #include "nsRefPtrHashtable.h"
34 #include "nsSize.h"
35 #include "nsTArray.h"
36 #include "prsystem.h"
37 #include "ShutdownTracker.h"
38
39 using std::max;
40 using std::min;
41
42 namespace mozilla {
43
44 using namespace gfx;
45
46 namespace image {
47
48 class CachedSurface;
49 class SurfaceCacheImpl;
50
51 ///////////////////////////////////////////////////////////////////////////////
52 // Static Data
53 ///////////////////////////////////////////////////////////////////////////////
54
55 // The single surface cache instance.
56 static StaticRefPtr<SurfaceCacheImpl> sInstance;
57
58 // The mutex protecting the surface cache.
59 static StaticMutex sInstanceMutex;
60
61 ///////////////////////////////////////////////////////////////////////////////
62 // SurfaceCache Implementation
63 ///////////////////////////////////////////////////////////////////////////////
64
65 /**
66 * Cost models the cost of storing a surface in the cache. Right now, this is
67 * simply an estimate of the size of the surface in bytes, but in the future it
68 * may be worth taking into account the cost of rematerializing the surface as
69 * well.
70 */
71 typedef size_t Cost;
72
ComputeCost(const IntSize & aSize,uint32_t aBytesPerPixel)73 static Cost ComputeCost(const IntSize& aSize, uint32_t aBytesPerPixel) {
74 MOZ_ASSERT(aBytesPerPixel == 1 || aBytesPerPixel == 4);
75 return aSize.width * aSize.height * aBytesPerPixel;
76 }
77
78 /**
79 * Since we want to be able to make eviction decisions based on cost, we need to
80 * be able to look up the CachedSurface which has a certain cost as well as the
81 * cost associated with a certain CachedSurface. To make this possible, in data
82 * structures we actually store a CostEntry, which contains a weak pointer to
83 * its associated surface.
84 *
85 * To make usage of the weak pointer safe, SurfaceCacheImpl always calls
86 * StartTracking after a surface is stored in the cache and StopTracking before
87 * it is removed.
88 */
89 class CostEntry {
90 public:
CostEntry(NotNull<CachedSurface * > aSurface,Cost aCost)91 CostEntry(NotNull<CachedSurface*> aSurface, Cost aCost)
92 : mSurface(aSurface), mCost(aCost) {}
93
Surface() const94 NotNull<CachedSurface*> Surface() const { return mSurface; }
GetCost() const95 Cost GetCost() const { return mCost; }
96
operator ==(const CostEntry & aOther) const97 bool operator==(const CostEntry& aOther) const {
98 return mSurface == aOther.mSurface && mCost == aOther.mCost;
99 }
100
operator <(const CostEntry & aOther) const101 bool operator<(const CostEntry& aOther) const {
102 return mCost < aOther.mCost ||
103 (mCost == aOther.mCost && mSurface < aOther.mSurface);
104 }
105
106 private:
107 NotNull<CachedSurface*> mSurface;
108 Cost mCost;
109 };
110
111 /**
112 * A CachedSurface associates a surface with a key that uniquely identifies that
113 * surface.
114 */
115 class CachedSurface {
~CachedSurface()116 ~CachedSurface() {}
117
118 public:
119 MOZ_DECLARE_REFCOUNTED_TYPENAME(CachedSurface)
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(CachedSurface)120 NS_INLINE_DECL_THREADSAFE_REFCOUNTING(CachedSurface)
121
122 explicit CachedSurface(NotNull<ISurfaceProvider*> aProvider)
123 : mProvider(aProvider), mIsLocked(false) {}
124
GetDrawableSurface() const125 DrawableSurface GetDrawableSurface() const {
126 if (MOZ_UNLIKELY(IsPlaceholder())) {
127 MOZ_ASSERT_UNREACHABLE("Called GetDrawableSurface() on a placeholder");
128 return DrawableSurface();
129 }
130
131 return mProvider->Surface();
132 }
133
SetLocked(bool aLocked)134 void SetLocked(bool aLocked) {
135 if (IsPlaceholder()) {
136 return; // Can't lock a placeholder.
137 }
138
139 // Update both our state and our provider's state. Some surface providers
140 // are permanently locked; maintaining our own locking state enables us to
141 // respect SetLocked() even when it's meaningless from the provider's
142 // perspective.
143 mIsLocked = aLocked;
144 mProvider->SetLocked(aLocked);
145 }
146
IsLocked() const147 bool IsLocked() const {
148 return !IsPlaceholder() && mIsLocked && mProvider->IsLocked();
149 }
150
SetCannotSubstitute()151 void SetCannotSubstitute() {
152 mProvider->Availability().SetCannotSubstitute();
153 }
CannotSubstitute() const154 bool CannotSubstitute() const {
155 return mProvider->Availability().CannotSubstitute();
156 }
157
IsPlaceholder() const158 bool IsPlaceholder() const {
159 return mProvider->Availability().IsPlaceholder();
160 }
IsDecoded() const161 bool IsDecoded() const { return !IsPlaceholder() && mProvider->IsFinished(); }
162
GetImageKey() const163 ImageKey GetImageKey() const { return mProvider->GetImageKey(); }
GetSurfaceKey() const164 const SurfaceKey& GetSurfaceKey() const { return mProvider->GetSurfaceKey(); }
GetExpirationState()165 nsExpirationState* GetExpirationState() { return &mExpirationState; }
166
GetCostEntry()167 CostEntry GetCostEntry() {
168 return image::CostEntry(WrapNotNull(this), mProvider->LogicalSizeInBytes());
169 }
170
171 // A helper type used by SurfaceCacheImpl::CollectSizeOfSurfaces.
172 struct MOZ_STACK_CLASS SurfaceMemoryReport {
SurfaceMemoryReportmozilla::image::CachedSurface::SurfaceMemoryReport173 SurfaceMemoryReport(nsTArray<SurfaceMemoryCounter>& aCounters,
174 MallocSizeOf aMallocSizeOf)
175 : mCounters(aCounters), mMallocSizeOf(aMallocSizeOf) {}
176
Addmozilla::image::CachedSurface::SurfaceMemoryReport177 void Add(NotNull<CachedSurface*> aCachedSurface, bool aIsFactor2) {
178 SurfaceMemoryCounter counter(
179 aCachedSurface->GetSurfaceKey(), aCachedSurface->IsLocked(),
180 aCachedSurface->CannotSubstitute(), aIsFactor2);
181
182 if (aCachedSurface->IsPlaceholder()) {
183 return;
184 }
185
186 // Record the memory used by the ISurfaceProvider. This may not have a
187 // straightforward relationship to the size of the surface that
188 // DrawableRef() returns if the surface is generated dynamically. (i.e.,
189 // for surfaces with PlaybackType::eAnimated.)
190 size_t heap = 0;
191 size_t nonHeap = 0;
192 size_t handles = 0;
193 aCachedSurface->mProvider->AddSizeOfExcludingThis(mMallocSizeOf, heap,
194 nonHeap, handles);
195 counter.Values().SetDecodedHeap(heap);
196 counter.Values().SetDecodedNonHeap(nonHeap);
197 counter.Values().SetExternalHandles(handles);
198
199 mCounters.AppendElement(counter);
200 }
201
202 private:
203 nsTArray<SurfaceMemoryCounter>& mCounters;
204 MallocSizeOf mMallocSizeOf;
205 };
206
207 private:
208 nsExpirationState mExpirationState;
209 NotNull<RefPtr<ISurfaceProvider>> mProvider;
210 bool mIsLocked;
211 };
212
AreaOfIntSize(const IntSize & aSize)213 static int64_t AreaOfIntSize(const IntSize& aSize) {
214 return static_cast<int64_t>(aSize.width) * static_cast<int64_t>(aSize.height);
215 }
216
217 /**
218 * An ImageSurfaceCache is a per-image surface cache. For correctness we must be
219 * able to remove all surfaces associated with an image when the image is
220 * destroyed or invalidated. Since this will happen frequently, it makes sense
221 * to make it cheap by storing the surfaces for each image separately.
222 *
223 * ImageSurfaceCache also keeps track of whether its associated image is locked
224 * or unlocked.
225 *
226 * The cache may also enter "factor of 2" mode which occurs when the number of
227 * surfaces in the cache exceeds the "image.cache.factor2.threshold-surfaces"
228 * pref plus the number of native sizes of the image. When in "factor of 2"
229 * mode, the cache will strongly favour sizes which are a factor of 2 of the
230 * largest native size. It accomplishes this by suggesting a factor of 2 size
231 * when lookups fail and substituting the nearest factor of 2 surface to the
232 * ideal size as the "best" available (as opposed to subsitution but not found).
233 * This allows us to minimize memory consumption and CPU time spent decoding
234 * when a website requires many variants of the same surface.
235 */
236 class ImageSurfaceCache {
~ImageSurfaceCache()237 ~ImageSurfaceCache() {}
238
239 public:
ImageSurfaceCache()240 ImageSurfaceCache()
241 : mLocked(false), mFactor2Mode(false), mFactor2Pruned(false) {}
242
243 MOZ_DECLARE_REFCOUNTED_TYPENAME(ImageSurfaceCache)
244 NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ImageSurfaceCache)
245
246 typedef nsRefPtrHashtable<nsGenericHashKey<SurfaceKey>, CachedSurface>
247 SurfaceTable;
248
IsEmpty() const249 bool IsEmpty() const { return mSurfaces.Count() == 0; }
250
Insert(NotNull<CachedSurface * > aSurface)251 MOZ_MUST_USE bool Insert(NotNull<CachedSurface*> aSurface) {
252 MOZ_ASSERT(!mLocked || aSurface->IsPlaceholder() || aSurface->IsLocked(),
253 "Inserting an unlocked surface for a locked image");
254 return mSurfaces.Put(aSurface->GetSurfaceKey(), aSurface, fallible);
255 }
256
Remove(NotNull<CachedSurface * > aSurface)257 already_AddRefed<CachedSurface> Remove(NotNull<CachedSurface*> aSurface) {
258 MOZ_ASSERT(mSurfaces.GetWeak(aSurface->GetSurfaceKey()),
259 "Should not be removing a surface we don't have");
260
261 RefPtr<CachedSurface> surface;
262 mSurfaces.Remove(aSurface->GetSurfaceKey(), getter_AddRefs(surface));
263 AfterMaybeRemove();
264 return surface.forget();
265 }
266
Lookup(const SurfaceKey & aSurfaceKey,bool aForAccess)267 already_AddRefed<CachedSurface> Lookup(const SurfaceKey& aSurfaceKey,
268 bool aForAccess) {
269 RefPtr<CachedSurface> surface;
270 mSurfaces.Get(aSurfaceKey, getter_AddRefs(surface));
271
272 if (aForAccess) {
273 if (surface) {
274 // We don't want to allow factor of 2 mode pruning to release surfaces
275 // for which the callers will accept no substitute.
276 surface->SetCannotSubstitute();
277 } else if (!mFactor2Mode) {
278 // If no exact match is found, and this is for use rather than internal
279 // accounting (i.e. insert and removal), we know this will trigger a
280 // decode. Make sure we switch now to factor of 2 mode if necessary.
281 MaybeSetFactor2Mode();
282 }
283 }
284
285 return surface.forget();
286 }
287
288 /**
289 * @returns A tuple containing the best matching CachedSurface if available,
290 * a MatchType describing how the CachedSurface was selected, and
291 * an IntSize which is the size the caller should choose to decode
292 * at should it attempt to do so.
293 */
LookupBestMatch(const SurfaceKey & aIdealKey)294 Tuple<already_AddRefed<CachedSurface>, MatchType, IntSize> LookupBestMatch(
295 const SurfaceKey& aIdealKey) {
296 // Try for an exact match first.
297 RefPtr<CachedSurface> exactMatch;
298 mSurfaces.Get(aIdealKey, getter_AddRefs(exactMatch));
299 if (exactMatch) {
300 if (exactMatch->IsDecoded()) {
301 return MakeTuple(exactMatch.forget(), MatchType::EXACT, IntSize());
302 }
303 } else if (!mFactor2Mode) {
304 // If no exact match is found, and we are not in factor of 2 mode, then
305 // we know that we will trigger a decode because at best we will provide
306 // a substitute. Make sure we switch now to factor of 2 mode if necessary.
307 MaybeSetFactor2Mode();
308 }
309
310 // Try for a best match second, if using compact.
311 IntSize suggestedSize = SuggestedSize(aIdealKey.Size());
312 if (mFactor2Mode) {
313 if (!exactMatch) {
314 SurfaceKey compactKey = aIdealKey.CloneWithSize(suggestedSize);
315 mSurfaces.Get(compactKey, getter_AddRefs(exactMatch));
316 if (exactMatch && exactMatch->IsDecoded()) {
317 MOZ_ASSERT(suggestedSize != aIdealKey.Size());
318 return MakeTuple(exactMatch.forget(),
319 MatchType::SUBSTITUTE_BECAUSE_BEST, suggestedSize);
320 }
321 }
322 }
323
324 // There's no perfect match, so find the best match we can.
325 RefPtr<CachedSurface> bestMatch;
326 for (auto iter = ConstIter(); !iter.Done(); iter.Next()) {
327 NotNull<CachedSurface*> current = WrapNotNull(iter.UserData());
328 const SurfaceKey& currentKey = current->GetSurfaceKey();
329
330 // We never match a placeholder.
331 if (current->IsPlaceholder()) {
332 continue;
333 }
334 // Matching the playback type and SVG context is required.
335 if (currentKey.Playback() != aIdealKey.Playback() ||
336 currentKey.SVGContext() != aIdealKey.SVGContext()) {
337 continue;
338 }
339 // Matching the flags is required.
340 if (currentKey.Flags() != aIdealKey.Flags()) {
341 continue;
342 }
343 // Anything is better than nothing! (Within the constraints we just
344 // checked, of course.)
345 if (!bestMatch) {
346 bestMatch = current;
347 continue;
348 }
349
350 MOZ_ASSERT(bestMatch, "Should have a current best match");
351
352 // Always prefer completely decoded surfaces.
353 bool bestMatchIsDecoded = bestMatch->IsDecoded();
354 if (bestMatchIsDecoded && !current->IsDecoded()) {
355 continue;
356 }
357 if (!bestMatchIsDecoded && current->IsDecoded()) {
358 bestMatch = current;
359 continue;
360 }
361
362 SurfaceKey bestMatchKey = bestMatch->GetSurfaceKey();
363 if (CompareArea(aIdealKey.Size(), bestMatchKey.Size(),
364 currentKey.Size())) {
365 bestMatch = current;
366 }
367 }
368
369 MatchType matchType;
370 if (bestMatch) {
371 if (!exactMatch) {
372 // No exact match, neither ideal nor factor of 2.
373 MOZ_ASSERT(suggestedSize != bestMatch->GetSurfaceKey().Size(),
374 "No exact match despite the fact the sizes match!");
375 matchType = MatchType::SUBSTITUTE_BECAUSE_NOT_FOUND;
376 } else if (exactMatch != bestMatch) {
377 // The exact match is still decoding, but we found a substitute.
378 matchType = MatchType::SUBSTITUTE_BECAUSE_PENDING;
379 } else if (aIdealKey.Size() != bestMatch->GetSurfaceKey().Size()) {
380 // The best factor of 2 match is still decoding, but the best we've got.
381 MOZ_ASSERT(suggestedSize != aIdealKey.Size());
382 MOZ_ASSERT(mFactor2Mode);
383 matchType = MatchType::SUBSTITUTE_BECAUSE_BEST;
384 } else {
385 // The exact match is still decoding, but it's the best we've got.
386 matchType = MatchType::EXACT;
387 }
388 } else {
389 if (exactMatch) {
390 // We found an "exact match"; it must have been a placeholder.
391 MOZ_ASSERT(exactMatch->IsPlaceholder());
392 matchType = MatchType::PENDING;
393 } else {
394 // We couldn't find an exact match *or* a substitute.
395 matchType = MatchType::NOT_FOUND;
396 }
397 }
398
399 return MakeTuple(bestMatch.forget(), matchType, suggestedSize);
400 }
401
MaybeSetFactor2Mode()402 void MaybeSetFactor2Mode() {
403 MOZ_ASSERT(!mFactor2Mode);
404
405 // Typically an image cache will not have too many size-varying surfaces, so
406 // if we exceed the given threshold, we should consider using a subset.
407 int32_t thresholdSurfaces = gfxPrefs::ImageCacheFactor2ThresholdSurfaces();
408 if (thresholdSurfaces < 0 ||
409 mSurfaces.Count() <= static_cast<uint32_t>(thresholdSurfaces)) {
410 return;
411 }
412
413 // Determine how many native surfaces this image has. Zero means we either
414 // don't know yet (in which case do nothing), or we don't want to limit the
415 // number of surfaces for this image.
416 //
417 // XXX(aosmond): Vector images have zero native sizes. This is because they
418 // are regenerated at the given size. There isn't an equivalent concept to
419 // the native size (and w/h ratio) to provide a frame of reference to what
420 // are "good" sizes. While it is desirable to have a similar mechanism as
421 // that for raster images, it will need a different approach.
422 auto first = ConstIter();
423 NotNull<CachedSurface*> current = WrapNotNull(first.UserData());
424 Image* image = static_cast<Image*>(current->GetImageKey());
425 size_t nativeSizes = image->GetNativeSizesLength();
426 if (nativeSizes == 0) {
427 return;
428 }
429
430 // Increase the threshold by the number of native sizes. This ensures that
431 // we do not prevent decoding of the image at all its native sizes. It does
432 // not guarantee we will provide a surface at that size however (i.e. many
433 // other sized surfaces are requested, in addition to the native sizes).
434 thresholdSurfaces += nativeSizes;
435 if (mSurfaces.Count() <= static_cast<uint32_t>(thresholdSurfaces)) {
436 return;
437 }
438
439 // Get our native size. While we know the image should be fully decoded,
440 // if it is an SVG, it is valid to have a zero size. We can't do compacting
441 // in that case because we need to know the width/height ratio to define a
442 // candidate set.
443 IntSize nativeSize;
444 if (NS_FAILED(image->GetWidth(&nativeSize.width)) ||
445 NS_FAILED(image->GetHeight(&nativeSize.height)) ||
446 nativeSize.IsEmpty()) {
447 return;
448 }
449
450 // We have a valid size, we can change modes.
451 mFactor2Mode = true;
452 }
453
454 template <typename Function>
Prune(Function && aRemoveCallback)455 void Prune(Function&& aRemoveCallback) {
456 if (!mFactor2Mode || mFactor2Pruned) {
457 return;
458 }
459
460 // Attempt to discard any surfaces which are not factor of 2 and the best
461 // factor of 2 match exists.
462 bool hasNotFactorSize = false;
463 for (auto iter = mSurfaces.Iter(); !iter.Done(); iter.Next()) {
464 NotNull<CachedSurface*> current = WrapNotNull(iter.UserData());
465 const SurfaceKey& currentKey = current->GetSurfaceKey();
466 const IntSize& currentSize = currentKey.Size();
467
468 // First we check if someone requested this size and would not accept
469 // an alternatively sized surface.
470 if (current->CannotSubstitute()) {
471 continue;
472 }
473
474 // Next we find the best factor of 2 size for this surface. If this
475 // surface is a factor of 2 size, then we want to keep it.
476 IntSize bestSize = SuggestedSize(currentSize);
477 if (bestSize == currentSize) {
478 continue;
479 }
480
481 // Check the cache for a surface with the same parameters except for the
482 // size which uses the closest factor of 2 size.
483 SurfaceKey compactKey = currentKey.CloneWithSize(bestSize);
484 RefPtr<CachedSurface> compactMatch;
485 mSurfaces.Get(compactKey, getter_AddRefs(compactMatch));
486 if (compactMatch && compactMatch->IsDecoded()) {
487 aRemoveCallback(current);
488 iter.Remove();
489 } else {
490 hasNotFactorSize = true;
491 }
492 }
493
494 // We have no surfaces that are not factor of 2 sized, so we can stop
495 // pruning henceforth, because we avoid the insertion of new surfaces that
496 // don't match our sizing set (unless the caller won't accept a
497 // substitution.)
498 if (!hasNotFactorSize) {
499 mFactor2Pruned = true;
500 }
501
502 // We should never leave factor of 2 mode due to pruning in of itself, but
503 // if we discarded surfaces due to the volatile buffers getting released,
504 // it is possible.
505 AfterMaybeRemove();
506 }
507
SuggestedSize(const IntSize & aSize) const508 IntSize SuggestedSize(const IntSize& aSize) const {
509 // When not in factor of 2 mode, we can always decode at the given size.
510 if (!mFactor2Mode) {
511 return aSize;
512 }
513
514 // We cannot enter factor of 2 mode unless we have a minimum number of
515 // surfaces, and we should have left it if the cache was emptied.
516 if (MOZ_UNLIKELY(IsEmpty())) {
517 MOZ_ASSERT_UNREACHABLE("Should not be empty and in factor of 2 mode!");
518 return aSize;
519 }
520
521 // This bit of awkwardness gets the largest native size of the image.
522 auto iter = ConstIter();
523 NotNull<CachedSurface*> firstSurface = WrapNotNull(iter.UserData());
524 Image* image = static_cast<Image*>(firstSurface->GetImageKey());
525 IntSize factorSize;
526 if (NS_FAILED(image->GetWidth(&factorSize.width)) ||
527 NS_FAILED(image->GetHeight(&factorSize.height)) ||
528 factorSize.IsEmpty()) {
529 // We should not have entered factor of 2 mode without a valid size, and
530 // several successfully decoded surfaces.
531 MOZ_ASSERT_UNREACHABLE("Expected valid native size!");
532 return aSize;
533 }
534
535 // Start with the native size as the best first guess.
536 IntSize bestSize = factorSize;
537 factorSize.width /= 2;
538 factorSize.height /= 2;
539
540 while (!factorSize.IsEmpty()) {
541 if (!CompareArea(aSize, bestSize, factorSize)) {
542 // This size is not better than the last. Since we proceed from largest
543 // to smallest, we know that the next size will not be better if the
544 // previous size was rejected. Break early.
545 break;
546 }
547
548 // The current factor of 2 size is better than the last selected size.
549 bestSize = factorSize;
550 factorSize.width /= 2;
551 factorSize.height /= 2;
552 }
553
554 return bestSize;
555 }
556
CompareArea(const IntSize & aIdealSize,const IntSize & aBestSize,const IntSize & aSize) const557 bool CompareArea(const IntSize& aIdealSize, const IntSize& aBestSize,
558 const IntSize& aSize) const {
559 // Compare sizes. We use an area-based heuristic here instead of computing a
560 // truly optimal answer, since it seems very unlikely to make a difference
561 // for realistic sizes.
562 int64_t idealArea = AreaOfIntSize(aIdealSize);
563 int64_t currentArea = AreaOfIntSize(aSize);
564 int64_t bestMatchArea = AreaOfIntSize(aBestSize);
565
566 // If the best match is smaller than the ideal size, prefer bigger sizes.
567 if (bestMatchArea < idealArea) {
568 if (currentArea > bestMatchArea) {
569 return true;
570 }
571 return false;
572 }
573
574 // Other, prefer sizes closer to the ideal size, but still not smaller.
575 if (idealArea <= currentArea && currentArea < bestMatchArea) {
576 return true;
577 }
578
579 // This surface isn't an improvement over the current best match.
580 return false;
581 }
582
583 template <typename Function>
CollectSizeOfSurfaces(nsTArray<SurfaceMemoryCounter> & aCounters,MallocSizeOf aMallocSizeOf,Function && aRemoveCallback)584 void CollectSizeOfSurfaces(nsTArray<SurfaceMemoryCounter>& aCounters,
585 MallocSizeOf aMallocSizeOf,
586 Function&& aRemoveCallback) {
587 CachedSurface::SurfaceMemoryReport report(aCounters, aMallocSizeOf);
588 for (auto iter = mSurfaces.Iter(); !iter.Done(); iter.Next()) {
589 NotNull<CachedSurface*> surface = WrapNotNull(iter.UserData());
590
591 // We don't need the drawable surface for ourselves, but adding a surface
592 // to the report will trigger this indirectly. If the surface was
593 // discarded by the OS because it was in volatile memory, we should remove
594 // it from the cache immediately rather than include it in the report.
595 DrawableSurface drawableSurface;
596 if (!surface->IsPlaceholder()) {
597 drawableSurface = surface->GetDrawableSurface();
598 if (!drawableSurface) {
599 aRemoveCallback(surface);
600 iter.Remove();
601 continue;
602 }
603 }
604
605 const IntSize& size = surface->GetSurfaceKey().Size();
606 bool factor2Size = false;
607 if (mFactor2Mode) {
608 factor2Size = (size == SuggestedSize(size));
609 }
610 report.Add(surface, factor2Size);
611 }
612
613 AfterMaybeRemove();
614 }
615
ConstIter() const616 SurfaceTable::Iterator ConstIter() const { return mSurfaces.ConstIter(); }
617
SetLocked(bool aLocked)618 void SetLocked(bool aLocked) { mLocked = aLocked; }
IsLocked() const619 bool IsLocked() const { return mLocked; }
620
621 private:
AfterMaybeRemove()622 void AfterMaybeRemove() {
623 if (IsEmpty() && mFactor2Mode) {
624 // The last surface for this cache was removed. This can happen if the
625 // surface was stored in a volatile buffer and got purged, or the surface
626 // expired from the cache. If the cache itself lingers for some reason
627 // (e.g. in the process of performing a lookup, the cache itself is
628 // locked), then we need to reset the factor of 2 state because it
629 // requires at least one surface present to get the native size
630 // information from the image.
631 mFactor2Mode = mFactor2Pruned = false;
632 }
633 }
634
635 SurfaceTable mSurfaces;
636
637 bool mLocked;
638
639 // True in "factor of 2" mode.
640 bool mFactor2Mode;
641
642 // True if all non-factor of 2 surfaces have been removed from the cache. Note
643 // that this excludes unsubstitutable sizes.
644 bool mFactor2Pruned;
645 };
646
647 /**
648 * SurfaceCacheImpl is responsible for determining which surfaces will be cached
649 * and managing the surface cache data structures. Rather than interact with
650 * SurfaceCacheImpl directly, client code interacts with SurfaceCache, which
651 * maintains high-level invariants and encapsulates the details of the surface
652 * cache's implementation.
653 */
654 class SurfaceCacheImpl final : public nsIMemoryReporter {
655 public:
656 NS_DECL_ISUPPORTS
657
SurfaceCacheImpl(uint32_t aSurfaceCacheExpirationTimeMS,uint32_t aSurfaceCacheDiscardFactor,uint32_t aSurfaceCacheSize)658 SurfaceCacheImpl(uint32_t aSurfaceCacheExpirationTimeMS,
659 uint32_t aSurfaceCacheDiscardFactor,
660 uint32_t aSurfaceCacheSize)
661 : mExpirationTracker(aSurfaceCacheExpirationTimeMS),
662 mMemoryPressureObserver(new MemoryPressureObserver),
663 mDiscardFactor(aSurfaceCacheDiscardFactor),
664 mMaxCost(aSurfaceCacheSize),
665 mAvailableCost(aSurfaceCacheSize),
666 mLockedCost(0),
667 mOverflowCount(0) {
668 nsCOMPtr<nsIObserverService> os = services::GetObserverService();
669 if (os) {
670 os->AddObserver(mMemoryPressureObserver, "memory-pressure", false);
671 }
672 }
673
674 private:
~SurfaceCacheImpl()675 virtual ~SurfaceCacheImpl() {
676 nsCOMPtr<nsIObserverService> os = services::GetObserverService();
677 if (os) {
678 os->RemoveObserver(mMemoryPressureObserver, "memory-pressure");
679 }
680
681 UnregisterWeakMemoryReporter(this);
682 }
683
684 public:
InitMemoryReporter()685 void InitMemoryReporter() { RegisterWeakMemoryReporter(this); }
686
Insert(NotNull<ISurfaceProvider * > aProvider,bool aSetAvailable,const StaticMutexAutoLock & aAutoLock)687 InsertOutcome Insert(NotNull<ISurfaceProvider*> aProvider, bool aSetAvailable,
688 const StaticMutexAutoLock& aAutoLock) {
689 // If this is a duplicate surface, refuse to replace the original.
690 // XXX(seth): Calling Lookup() and then RemoveEntry() does the lookup
691 // twice. We'll make this more efficient in bug 1185137.
692 LookupResult result =
693 Lookup(aProvider->GetImageKey(), aProvider->GetSurfaceKey(), aAutoLock,
694 /* aMarkUsed = */ false);
695 if (MOZ_UNLIKELY(result)) {
696 return InsertOutcome::FAILURE_ALREADY_PRESENT;
697 }
698
699 if (result.Type() == MatchType::PENDING) {
700 RemoveEntry(aProvider->GetImageKey(), aProvider->GetSurfaceKey(),
701 aAutoLock);
702 }
703
704 MOZ_ASSERT(result.Type() == MatchType::NOT_FOUND ||
705 result.Type() == MatchType::PENDING,
706 "A LookupResult with no surface should be NOT_FOUND or PENDING");
707
708 // If this is bigger than we can hold after discarding everything we can,
709 // refuse to cache it.
710 Cost cost = aProvider->LogicalSizeInBytes();
711 if (MOZ_UNLIKELY(!CanHoldAfterDiscarding(cost))) {
712 mOverflowCount++;
713 return InsertOutcome::FAILURE;
714 }
715
716 // Remove elements in order of cost until we can fit this in the cache. Note
717 // that locked surfaces aren't in mCosts, so we never remove them here.
718 while (cost > mAvailableCost) {
719 MOZ_ASSERT(!mCosts.IsEmpty(),
720 "Removed everything and it still won't fit");
721 Remove(mCosts.LastElement().Surface(), /* aStopTracking */ true,
722 aAutoLock);
723 }
724
725 // Locate the appropriate per-image cache. If there's not an existing cache
726 // for this image, create it.
727 RefPtr<ImageSurfaceCache> cache = GetImageCache(aProvider->GetImageKey());
728 if (!cache) {
729 cache = new ImageSurfaceCache;
730 mImageCaches.Put(aProvider->GetImageKey(), cache);
731 }
732
733 // If we were asked to mark the cache entry available, do so.
734 if (aSetAvailable) {
735 aProvider->Availability().SetAvailable();
736 }
737
738 auto surface = MakeNotNull<RefPtr<CachedSurface>>(aProvider);
739
740 // We require that locking succeed if the image is locked and we're not
741 // inserting a placeholder; the caller may need to know this to handle
742 // errors correctly.
743 bool mustLock = cache->IsLocked() && !surface->IsPlaceholder();
744 if (mustLock) {
745 surface->SetLocked(true);
746 if (!surface->IsLocked()) {
747 return InsertOutcome::FAILURE;
748 }
749 }
750
751 // Insert.
752 MOZ_ASSERT(cost <= mAvailableCost, "Inserting despite too large a cost");
753 if (!cache->Insert(surface)) {
754 if (mustLock) {
755 surface->SetLocked(false);
756 }
757 return InsertOutcome::FAILURE;
758 }
759
760 if (MOZ_UNLIKELY(!StartTracking(surface, aAutoLock))) {
761 MOZ_ASSERT(!mustLock);
762 Remove(surface, /* aStopTracking */ false, aAutoLock);
763 return InsertOutcome::FAILURE;
764 }
765
766 return InsertOutcome::SUCCESS;
767 }
768
Remove(NotNull<CachedSurface * > aSurface,bool aStopTracking,const StaticMutexAutoLock & aAutoLock)769 void Remove(NotNull<CachedSurface*> aSurface, bool aStopTracking,
770 const StaticMutexAutoLock& aAutoLock) {
771 ImageKey imageKey = aSurface->GetImageKey();
772
773 RefPtr<ImageSurfaceCache> cache = GetImageCache(imageKey);
774 MOZ_ASSERT(cache, "Shouldn't try to remove a surface with no image cache");
775
776 // If the surface was not a placeholder, tell its image that we discarded
777 // it.
778 if (!aSurface->IsPlaceholder()) {
779 static_cast<Image*>(imageKey)->OnSurfaceDiscarded(
780 aSurface->GetSurfaceKey());
781 }
782
783 // If we failed during StartTracking, we can skip this step.
784 if (aStopTracking) {
785 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
786 }
787
788 // Individual surfaces must be freed outside the lock.
789 mCachedSurfacesDiscard.AppendElement(cache->Remove(aSurface));
790
791 MaybeRemoveEmptyCache(imageKey, cache);
792 }
793
StartTracking(NotNull<CachedSurface * > aSurface,const StaticMutexAutoLock & aAutoLock)794 bool StartTracking(NotNull<CachedSurface*> aSurface,
795 const StaticMutexAutoLock& aAutoLock) {
796 CostEntry costEntry = aSurface->GetCostEntry();
797 MOZ_ASSERT(costEntry.GetCost() <= mAvailableCost,
798 "Cost too large and the caller didn't catch it");
799
800 if (aSurface->IsLocked()) {
801 mLockedCost += costEntry.GetCost();
802 MOZ_ASSERT(mLockedCost <= mMaxCost, "Locked more than we can hold?");
803 } else {
804 if (NS_WARN_IF(!mCosts.InsertElementSorted(costEntry, fallible))) {
805 return false;
806 }
807
808 // This may fail during XPCOM shutdown, so we need to ensure the object is
809 // tracked before calling RemoveObject in StopTracking.
810 nsresult rv = mExpirationTracker.AddObjectLocked(aSurface, aAutoLock);
811 if (NS_WARN_IF(NS_FAILED(rv))) {
812 DebugOnly<bool> foundInCosts = mCosts.RemoveElementSorted(costEntry);
813 MOZ_ASSERT(foundInCosts, "Lost track of costs for this surface");
814 return false;
815 }
816 }
817
818 mAvailableCost -= costEntry.GetCost();
819 return true;
820 }
821
StopTracking(NotNull<CachedSurface * > aSurface,bool aIsTracked,const StaticMutexAutoLock & aAutoLock)822 void StopTracking(NotNull<CachedSurface*> aSurface, bool aIsTracked,
823 const StaticMutexAutoLock& aAutoLock) {
824 CostEntry costEntry = aSurface->GetCostEntry();
825
826 if (aSurface->IsLocked()) {
827 MOZ_ASSERT(mLockedCost >= costEntry.GetCost(), "Costs don't balance");
828 mLockedCost -= costEntry.GetCost();
829 // XXX(seth): It'd be nice to use an O(log n) lookup here. This is O(n).
830 MOZ_ASSERT(!mCosts.Contains(costEntry),
831 "Shouldn't have a cost entry for a locked surface");
832 } else {
833 if (MOZ_LIKELY(aSurface->GetExpirationState()->IsTracked())) {
834 MOZ_ASSERT(aIsTracked, "Expiration-tracking a surface unexpectedly!");
835 mExpirationTracker.RemoveObjectLocked(aSurface, aAutoLock);
836 } else {
837 // Our call to AddObject must have failed in StartTracking; most likely
838 // we're in XPCOM shutdown right now.
839 MOZ_ASSERT(!aIsTracked, "Not expiration-tracking an unlocked surface!");
840 }
841
842 DebugOnly<bool> foundInCosts = mCosts.RemoveElementSorted(costEntry);
843 MOZ_ASSERT(foundInCosts, "Lost track of costs for this surface");
844 }
845
846 mAvailableCost += costEntry.GetCost();
847 MOZ_ASSERT(mAvailableCost <= mMaxCost,
848 "More available cost than we started with");
849 }
850
Lookup(const ImageKey aImageKey,const SurfaceKey & aSurfaceKey,const StaticMutexAutoLock & aAutoLock,bool aMarkUsed=true)851 LookupResult Lookup(const ImageKey aImageKey, const SurfaceKey& aSurfaceKey,
852 const StaticMutexAutoLock& aAutoLock,
853 bool aMarkUsed = true) {
854 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
855 if (!cache) {
856 // No cached surfaces for this image.
857 return LookupResult(MatchType::NOT_FOUND);
858 }
859
860 RefPtr<CachedSurface> surface = cache->Lookup(aSurfaceKey, aMarkUsed);
861 if (!surface) {
862 // Lookup in the per-image cache missed.
863 return LookupResult(MatchType::NOT_FOUND);
864 }
865
866 if (surface->IsPlaceholder()) {
867 return LookupResult(MatchType::PENDING);
868 }
869
870 DrawableSurface drawableSurface = surface->GetDrawableSurface();
871 if (!drawableSurface) {
872 // The surface was released by the operating system. Remove the cache
873 // entry as well.
874 Remove(WrapNotNull(surface), /* aStopTracking */ true, aAutoLock);
875 return LookupResult(MatchType::NOT_FOUND);
876 }
877
878 if (aMarkUsed &&
879 !MarkUsed(WrapNotNull(surface), WrapNotNull(cache), aAutoLock)) {
880 Remove(WrapNotNull(surface), /* aStopTracking */ false, aAutoLock);
881 return LookupResult(MatchType::NOT_FOUND);
882 }
883
884 MOZ_ASSERT(surface->GetSurfaceKey() == aSurfaceKey,
885 "Lookup() not returning an exact match?");
886 return LookupResult(Move(drawableSurface), MatchType::EXACT);
887 }
888
LookupBestMatch(const ImageKey aImageKey,const SurfaceKey & aSurfaceKey,const StaticMutexAutoLock & aAutoLock)889 LookupResult LookupBestMatch(const ImageKey aImageKey,
890 const SurfaceKey& aSurfaceKey,
891 const StaticMutexAutoLock& aAutoLock) {
892 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
893 if (!cache) {
894 // No cached surfaces for this image.
895 return LookupResult(MatchType::NOT_FOUND);
896 }
897
898 // Repeatedly look up the best match, trying again if the resulting surface
899 // has been freed by the operating system, until we can either lock a
900 // surface for drawing or there are no matching surfaces left.
901 // XXX(seth): This is O(N^2), but N is expected to be very small. If we
902 // encounter a performance problem here we can revisit this.
903
904 RefPtr<CachedSurface> surface;
905 DrawableSurface drawableSurface;
906 MatchType matchType = MatchType::NOT_FOUND;
907 IntSize suggestedSize;
908 while (true) {
909 Tie(surface, matchType, suggestedSize) =
910 cache->LookupBestMatch(aSurfaceKey);
911
912 if (!surface) {
913 return LookupResult(
914 matchType); // Lookup in the per-image cache missed.
915 }
916
917 drawableSurface = surface->GetDrawableSurface();
918 if (drawableSurface) {
919 break;
920 }
921
922 // The surface was released by the operating system. Remove the cache
923 // entry as well.
924 Remove(WrapNotNull(surface), /* aStopTracking */ true, aAutoLock);
925 }
926
927 MOZ_ASSERT_IF(matchType == MatchType::EXACT,
928 surface->GetSurfaceKey() == aSurfaceKey);
929 MOZ_ASSERT_IF(
930 matchType == MatchType::SUBSTITUTE_BECAUSE_NOT_FOUND ||
931 matchType == MatchType::SUBSTITUTE_BECAUSE_PENDING,
932 surface->GetSurfaceKey().SVGContext() == aSurfaceKey.SVGContext() &&
933 surface->GetSurfaceKey().Playback() == aSurfaceKey.Playback() &&
934 surface->GetSurfaceKey().Flags() == aSurfaceKey.Flags());
935
936 if (matchType == MatchType::EXACT ||
937 matchType == MatchType::SUBSTITUTE_BECAUSE_BEST) {
938 if (!MarkUsed(WrapNotNull(surface), WrapNotNull(cache), aAutoLock)) {
939 Remove(WrapNotNull(surface), /* aStopTracking */ false, aAutoLock);
940 }
941 }
942
943 return LookupResult(Move(drawableSurface), matchType, suggestedSize);
944 }
945
CanHold(const Cost aCost) const946 bool CanHold(const Cost aCost) const { return aCost <= mMaxCost; }
947
MaximumCapacity() const948 size_t MaximumCapacity() const { return size_t(mMaxCost); }
949
SurfaceAvailable(NotNull<ISurfaceProvider * > aProvider,const StaticMutexAutoLock & aAutoLock)950 void SurfaceAvailable(NotNull<ISurfaceProvider*> aProvider,
951 const StaticMutexAutoLock& aAutoLock) {
952 if (!aProvider->Availability().IsPlaceholder()) {
953 MOZ_ASSERT_UNREACHABLE("Calling SurfaceAvailable on non-placeholder");
954 return;
955 }
956
957 // Reinsert the provider, requesting that Insert() mark it available. This
958 // may or may not succeed, depending on whether some other decoder has
959 // beaten us to the punch and inserted a non-placeholder version of this
960 // surface first, but it's fine either way.
961 // XXX(seth): This could be implemented more efficiently; we should be able
962 // to just update our data structures without reinserting.
963 Insert(aProvider, /* aSetAvailable = */ true, aAutoLock);
964 }
965
LockImage(const ImageKey aImageKey)966 void LockImage(const ImageKey aImageKey) {
967 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
968 if (!cache) {
969 cache = new ImageSurfaceCache;
970 mImageCaches.Put(aImageKey, cache);
971 }
972
973 cache->SetLocked(true);
974
975 // We don't relock this image's existing surfaces right away; instead, the
976 // image should arrange for Lookup() to touch them if they are still useful.
977 }
978
UnlockImage(const ImageKey aImageKey,const StaticMutexAutoLock & aAutoLock)979 void UnlockImage(const ImageKey aImageKey,
980 const StaticMutexAutoLock& aAutoLock) {
981 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
982 if (!cache || !cache->IsLocked()) {
983 return; // Already unlocked.
984 }
985
986 cache->SetLocked(false);
987 DoUnlockSurfaces(WrapNotNull(cache), /* aStaticOnly = */ false, aAutoLock);
988 }
989
UnlockEntries(const ImageKey aImageKey,const StaticMutexAutoLock & aAutoLock)990 void UnlockEntries(const ImageKey aImageKey,
991 const StaticMutexAutoLock& aAutoLock) {
992 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
993 if (!cache || !cache->IsLocked()) {
994 return; // Already unlocked.
995 }
996
997 // (Note that we *don't* unlock the per-image cache here; that's the
998 // difference between this and UnlockImage.)
999 DoUnlockSurfaces(
1000 WrapNotNull(cache),
1001 /* aStaticOnly = */ !gfxPrefs::ImageMemAnimatedDiscardable(),
1002 aAutoLock);
1003 }
1004
RemoveImage(const ImageKey aImageKey,const StaticMutexAutoLock & aAutoLock)1005 already_AddRefed<ImageSurfaceCache> RemoveImage(
1006 const ImageKey aImageKey, const StaticMutexAutoLock& aAutoLock) {
1007 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1008 if (!cache) {
1009 return nullptr; // No cached surfaces for this image, so nothing to do.
1010 }
1011
1012 // Discard all of the cached surfaces for this image.
1013 // XXX(seth): This is O(n^2) since for each item in the cache we are
1014 // removing an element from the costs array. Since n is expected to be
1015 // small, performance should be good, but if usage patterns change we should
1016 // change the data structure used for mCosts.
1017 for (auto iter = cache->ConstIter(); !iter.Done(); iter.Next()) {
1018 StopTracking(WrapNotNull(iter.UserData()),
1019 /* aIsTracked */ true, aAutoLock);
1020 }
1021
1022 // The per-image cache isn't needed anymore, so remove it as well.
1023 // This implicitly unlocks the image if it was locked.
1024 mImageCaches.Remove(aImageKey);
1025
1026 // Since we did not actually remove any of the surfaces from the cache
1027 // itself, only stopped tracking them, we should free it outside the lock.
1028 return cache.forget();
1029 }
1030
PruneImage(const ImageKey aImageKey,const StaticMutexAutoLock & aAutoLock)1031 void PruneImage(const ImageKey aImageKey,
1032 const StaticMutexAutoLock& aAutoLock) {
1033 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1034 if (!cache) {
1035 return; // No cached surfaces for this image, so nothing to do.
1036 }
1037
1038 cache->Prune([this, &aAutoLock](NotNull<CachedSurface*> aSurface) -> void {
1039 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
1040 // Individual surfaces must be freed outside the lock.
1041 mCachedSurfacesDiscard.AppendElement(aSurface);
1042 });
1043
1044 MaybeRemoveEmptyCache(aImageKey, cache);
1045 }
1046
DiscardAll(const StaticMutexAutoLock & aAutoLock)1047 void DiscardAll(const StaticMutexAutoLock& aAutoLock) {
1048 // Remove in order of cost because mCosts is an array and the other data
1049 // structures are all hash tables. Note that locked surfaces are not
1050 // removed, since they aren't present in mCosts.
1051 while (!mCosts.IsEmpty()) {
1052 Remove(mCosts.LastElement().Surface(), /* aStopTracking */ true,
1053 aAutoLock);
1054 }
1055 }
1056
DiscardForMemoryPressure(const StaticMutexAutoLock & aAutoLock)1057 void DiscardForMemoryPressure(const StaticMutexAutoLock& aAutoLock) {
1058 // Compute our discardable cost. Since locked surfaces aren't discardable,
1059 // we exclude them.
1060 const Cost discardableCost = (mMaxCost - mAvailableCost) - mLockedCost;
1061 MOZ_ASSERT(discardableCost <= mMaxCost, "Discardable cost doesn't add up");
1062
1063 // Our target is to raise our available cost by (1 / mDiscardFactor) of our
1064 // discardable cost - in other words, we want to end up with about
1065 // (discardableCost / mDiscardFactor) fewer bytes stored in the surface
1066 // cache after we're done.
1067 const Cost targetCost = mAvailableCost + (discardableCost / mDiscardFactor);
1068
1069 if (targetCost > mMaxCost - mLockedCost) {
1070 MOZ_ASSERT_UNREACHABLE("Target cost is more than we can discard");
1071 DiscardAll(aAutoLock);
1072 return;
1073 }
1074
1075 // Discard surfaces until we've reduced our cost to our target cost.
1076 while (mAvailableCost < targetCost) {
1077 MOZ_ASSERT(!mCosts.IsEmpty(), "Removed everything and still not done");
1078 Remove(mCosts.LastElement().Surface(), /* aStopTracking */ true,
1079 aAutoLock);
1080 }
1081 }
1082
TakeDiscard(nsTArray<RefPtr<CachedSurface>> & aDiscard,const StaticMutexAutoLock & aAutoLock)1083 void TakeDiscard(nsTArray<RefPtr<CachedSurface>>& aDiscard,
1084 const StaticMutexAutoLock& aAutoLock) {
1085 MOZ_ASSERT(aDiscard.IsEmpty());
1086 aDiscard = Move(mCachedSurfacesDiscard);
1087 }
1088
LockSurface(NotNull<CachedSurface * > aSurface,const StaticMutexAutoLock & aAutoLock)1089 void LockSurface(NotNull<CachedSurface*> aSurface,
1090 const StaticMutexAutoLock& aAutoLock) {
1091 if (aSurface->IsPlaceholder() || aSurface->IsLocked()) {
1092 return;
1093 }
1094
1095 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
1096
1097 // Lock the surface. This can fail.
1098 aSurface->SetLocked(true);
1099 DebugOnly<bool> tracking = StartTracking(aSurface, aAutoLock);
1100 MOZ_ASSERT(tracking);
1101 }
1102
1103 NS_IMETHOD
CollectReports(nsIHandleReportCallback * aHandleReport,nsISupports * aData,bool aAnonymize)1104 CollectReports(nsIHandleReportCallback* aHandleReport, nsISupports* aData,
1105 bool aAnonymize) override {
1106 StaticMutexAutoLock lock(sInstanceMutex);
1107
1108 // clang-format off
1109 // We have explicit memory reporting for the surface cache which is more
1110 // accurate than the cost metrics we report here, but these metrics are
1111 // still useful to report, since they control the cache's behavior.
1112 MOZ_COLLECT_REPORT(
1113 "imagelib-surface-cache-estimated-total",
1114 KIND_OTHER, UNITS_BYTES, (mMaxCost - mAvailableCost),
1115 "Estimated total memory used by the imagelib surface cache.");
1116
1117 MOZ_COLLECT_REPORT(
1118 "imagelib-surface-cache-estimated-locked",
1119 KIND_OTHER, UNITS_BYTES, mLockedCost,
1120 "Estimated memory used by locked surfaces in the imagelib surface cache.");
1121
1122 MOZ_COLLECT_REPORT(
1123 "imagelib-surface-cache-overflow-count",
1124 KIND_OTHER, UNITS_COUNT, mOverflowCount,
1125 "Count of how many times the surface cache has hit its capacity and been "
1126 "unable to insert a new surface.");
1127 // clang-format on
1128
1129 return NS_OK;
1130 }
1131
CollectSizeOfSurfaces(const ImageKey aImageKey,nsTArray<SurfaceMemoryCounter> & aCounters,MallocSizeOf aMallocSizeOf,const StaticMutexAutoLock & aAutoLock)1132 void CollectSizeOfSurfaces(const ImageKey aImageKey,
1133 nsTArray<SurfaceMemoryCounter>& aCounters,
1134 MallocSizeOf aMallocSizeOf,
1135 const StaticMutexAutoLock& aAutoLock) {
1136 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1137 if (!cache) {
1138 return; // No surfaces for this image.
1139 }
1140
1141 // Report all surfaces in the per-image cache.
1142 cache->CollectSizeOfSurfaces(
1143 aCounters, aMallocSizeOf,
1144 [this, &aAutoLock](NotNull<CachedSurface*> aSurface) -> void {
1145 StopTracking(aSurface, /* aIsTracked */ true, aAutoLock);
1146 // Individual surfaces must be freed outside the lock.
1147 mCachedSurfacesDiscard.AppendElement(aSurface);
1148 });
1149
1150 MaybeRemoveEmptyCache(aImageKey, cache);
1151 }
1152
1153 private:
GetImageCache(const ImageKey aImageKey)1154 already_AddRefed<ImageSurfaceCache> GetImageCache(const ImageKey aImageKey) {
1155 RefPtr<ImageSurfaceCache> imageCache;
1156 mImageCaches.Get(aImageKey, getter_AddRefs(imageCache));
1157 return imageCache.forget();
1158 }
1159
MaybeRemoveEmptyCache(const ImageKey aImageKey,ImageSurfaceCache * aCache)1160 void MaybeRemoveEmptyCache(const ImageKey aImageKey,
1161 ImageSurfaceCache* aCache) {
1162 // Remove the per-image cache if it's unneeded now. Keep it if the image is
1163 // locked, since the per-image cache is where we store that state. Note that
1164 // we don't push it into mImageCachesDiscard because all of its surfaces
1165 // have been removed, so it is safe to free while holding the lock.
1166 if (aCache->IsEmpty() && !aCache->IsLocked()) {
1167 mImageCaches.Remove(aImageKey);
1168 }
1169 }
1170
1171 // This is similar to CanHold() except that it takes into account the costs of
1172 // locked surfaces. It's used internally in Insert(), but it's not exposed
1173 // publicly because we permit multithreaded access to the surface cache, which
1174 // means that the result would be meaningless: another thread could insert a
1175 // surface or lock an image at any time.
CanHoldAfterDiscarding(const Cost aCost) const1176 bool CanHoldAfterDiscarding(const Cost aCost) const {
1177 return aCost <= mMaxCost - mLockedCost;
1178 }
1179
MarkUsed(NotNull<CachedSurface * > aSurface,NotNull<ImageSurfaceCache * > aCache,const StaticMutexAutoLock & aAutoLock)1180 bool MarkUsed(NotNull<CachedSurface*> aSurface,
1181 NotNull<ImageSurfaceCache*> aCache,
1182 const StaticMutexAutoLock& aAutoLock) {
1183 if (aCache->IsLocked()) {
1184 LockSurface(aSurface, aAutoLock);
1185 return true;
1186 }
1187
1188 nsresult rv = mExpirationTracker.MarkUsedLocked(aSurface, aAutoLock);
1189 if (NS_WARN_IF(NS_FAILED(rv))) {
1190 // If mark used fails, it is because it failed to reinsert the surface
1191 // after removing it from the tracker. Thus we need to update our
1192 // own accounting but otherwise expect it to be untracked.
1193 StopTracking(aSurface, /* aIsTracked */ false, aAutoLock);
1194 return false;
1195 }
1196 return true;
1197 }
1198
DoUnlockSurfaces(NotNull<ImageSurfaceCache * > aCache,bool aStaticOnly,const StaticMutexAutoLock & aAutoLock)1199 void DoUnlockSurfaces(NotNull<ImageSurfaceCache*> aCache, bool aStaticOnly,
1200 const StaticMutexAutoLock& aAutoLock) {
1201 AutoTArray<NotNull<CachedSurface*>, 8> discard;
1202
1203 // Unlock all the surfaces the per-image cache is holding.
1204 for (auto iter = aCache->ConstIter(); !iter.Done(); iter.Next()) {
1205 NotNull<CachedSurface*> surface = WrapNotNull(iter.UserData());
1206 if (surface->IsPlaceholder() || !surface->IsLocked()) {
1207 continue;
1208 }
1209 if (aStaticOnly &&
1210 surface->GetSurfaceKey().Playback() != PlaybackType::eStatic) {
1211 continue;
1212 }
1213 StopTracking(surface, /* aIsTracked */ true, aAutoLock);
1214 surface->SetLocked(false);
1215 if (MOZ_UNLIKELY(!StartTracking(surface, aAutoLock))) {
1216 discard.AppendElement(surface);
1217 }
1218 }
1219
1220 // Discard any that we failed to track.
1221 for (auto iter = discard.begin(); iter != discard.end(); ++iter) {
1222 Remove(*iter, /* aStopTracking */ false, aAutoLock);
1223 }
1224 }
1225
RemoveEntry(const ImageKey aImageKey,const SurfaceKey & aSurfaceKey,const StaticMutexAutoLock & aAutoLock)1226 void RemoveEntry(const ImageKey aImageKey, const SurfaceKey& aSurfaceKey,
1227 const StaticMutexAutoLock& aAutoLock) {
1228 RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
1229 if (!cache) {
1230 return; // No cached surfaces for this image.
1231 }
1232
1233 RefPtr<CachedSurface> surface =
1234 cache->Lookup(aSurfaceKey, /* aForAccess = */ false);
1235 if (!surface) {
1236 return; // Lookup in the per-image cache missed.
1237 }
1238
1239 Remove(WrapNotNull(surface), /* aStopTracking */ true, aAutoLock);
1240 }
1241
1242 class SurfaceTracker final
1243 : public ExpirationTrackerImpl<CachedSurface, 2, StaticMutex,
1244 StaticMutexAutoLock> {
1245 public:
SurfaceTracker(uint32_t aSurfaceCacheExpirationTimeMS)1246 explicit SurfaceTracker(uint32_t aSurfaceCacheExpirationTimeMS)
1247 : ExpirationTrackerImpl<CachedSurface, 2, StaticMutex,
1248 StaticMutexAutoLock>(
1249 aSurfaceCacheExpirationTimeMS, "SurfaceTracker",
1250 SystemGroup::EventTargetFor(TaskCategory::Other)) {}
1251
1252 protected:
NotifyExpiredLocked(CachedSurface * aSurface,const StaticMutexAutoLock & aAutoLock)1253 void NotifyExpiredLocked(CachedSurface* aSurface,
1254 const StaticMutexAutoLock& aAutoLock) override {
1255 sInstance->Remove(WrapNotNull(aSurface), /* aStopTracking */ true,
1256 aAutoLock);
1257 }
1258
NotifyHandlerEndLocked(const StaticMutexAutoLock & aAutoLock)1259 void NotifyHandlerEndLocked(const StaticMutexAutoLock& aAutoLock) override {
1260 sInstance->TakeDiscard(mDiscard, aAutoLock);
1261 }
1262
NotifyHandlerEnd()1263 void NotifyHandlerEnd() override {
1264 nsTArray<RefPtr<CachedSurface>> discard(Move(mDiscard));
1265 }
1266
GetMutex()1267 StaticMutex& GetMutex() override { return sInstanceMutex; }
1268
1269 nsTArray<RefPtr<CachedSurface>> mDiscard;
1270 };
1271
1272 class MemoryPressureObserver final : public nsIObserver {
1273 public:
1274 NS_DECL_ISUPPORTS
1275
Observe(nsISupports *,const char * aTopic,const char16_t *)1276 NS_IMETHOD Observe(nsISupports*, const char* aTopic,
1277 const char16_t*) override {
1278 nsTArray<RefPtr<CachedSurface>> discard;
1279 {
1280 StaticMutexAutoLock lock(sInstanceMutex);
1281 if (sInstance && strcmp(aTopic, "memory-pressure") == 0) {
1282 sInstance->DiscardForMemoryPressure(lock);
1283 sInstance->TakeDiscard(discard, lock);
1284 }
1285 }
1286 return NS_OK;
1287 }
1288
1289 private:
~MemoryPressureObserver()1290 virtual ~MemoryPressureObserver() {}
1291 };
1292
1293 nsTArray<CostEntry> mCosts;
1294 nsRefPtrHashtable<nsPtrHashKey<Image>, ImageSurfaceCache> mImageCaches;
1295 nsTArray<RefPtr<CachedSurface>> mCachedSurfacesDiscard;
1296 SurfaceTracker mExpirationTracker;
1297 RefPtr<MemoryPressureObserver> mMemoryPressureObserver;
1298 const uint32_t mDiscardFactor;
1299 const Cost mMaxCost;
1300 Cost mAvailableCost;
1301 Cost mLockedCost;
1302 size_t mOverflowCount;
1303 };
1304
NS_IMPL_ISUPPORTS(SurfaceCacheImpl,nsIMemoryReporter)1305 NS_IMPL_ISUPPORTS(SurfaceCacheImpl, nsIMemoryReporter)
1306 NS_IMPL_ISUPPORTS(SurfaceCacheImpl::MemoryPressureObserver, nsIObserver)
1307
1308 ///////////////////////////////////////////////////////////////////////////////
1309 // Public API
1310 ///////////////////////////////////////////////////////////////////////////////
1311
1312 /* static */ void SurfaceCache::Initialize() {
1313 // Initialize preferences.
1314 MOZ_ASSERT(NS_IsMainThread());
1315 MOZ_ASSERT(!sInstance, "Shouldn't initialize more than once");
1316
1317 // See gfxPrefs for the default values of these preferences.
1318
1319 // Length of time before an unused surface is removed from the cache, in
1320 // milliseconds.
1321 uint32_t surfaceCacheExpirationTimeMS =
1322 gfxPrefs::ImageMemSurfaceCacheMinExpirationMS();
1323
1324 // What fraction of the memory used by the surface cache we should discard
1325 // when we get a memory pressure notification. This value is interpreted as
1326 // 1/N, so 1 means to discard everything, 2 means to discard about half of the
1327 // memory we're using, and so forth. We clamp it to avoid division by zero.
1328 uint32_t surfaceCacheDiscardFactor =
1329 max(gfxPrefs::ImageMemSurfaceCacheDiscardFactor(), 1u);
1330
1331 // Maximum size of the surface cache, in kilobytes.
1332 uint64_t surfaceCacheMaxSizeKB = gfxPrefs::ImageMemSurfaceCacheMaxSizeKB();
1333
1334 // A knob determining the actual size of the surface cache. Currently the
1335 // cache is (size of main memory) / (surface cache size factor) KB
1336 // or (surface cache max size) KB, whichever is smaller. The formula
1337 // may change in the future, though.
1338 // For example, a value of 4 would yield a 256MB cache on a 1GB machine.
1339 // The smallest machines we are likely to run this code on have 256MB
1340 // of memory, which would yield a 64MB cache on this setting.
1341 // We clamp this value to avoid division by zero.
1342 uint32_t surfaceCacheSizeFactor =
1343 max(gfxPrefs::ImageMemSurfaceCacheSizeFactor(), 1u);
1344
1345 // Compute the size of the surface cache.
1346 uint64_t memorySize = PR_GetPhysicalMemorySize();
1347 if (memorySize == 0) {
1348 MOZ_ASSERT_UNREACHABLE("PR_GetPhysicalMemorySize not implemented here");
1349 memorySize = 256 * 1024 * 1024; // Fall back to 256MB.
1350 }
1351 uint64_t proposedSize = memorySize / surfaceCacheSizeFactor;
1352 uint64_t surfaceCacheSizeBytes =
1353 min(proposedSize, surfaceCacheMaxSizeKB * 1024);
1354 uint32_t finalSurfaceCacheSizeBytes =
1355 min(surfaceCacheSizeBytes, uint64_t(UINT32_MAX));
1356
1357 // Create the surface cache singleton with the requested settings. Note that
1358 // the size is a limit that the cache may not grow beyond, but we do not
1359 // actually allocate any storage for surfaces at this time.
1360 sInstance = new SurfaceCacheImpl(surfaceCacheExpirationTimeMS,
1361 surfaceCacheDiscardFactor,
1362 finalSurfaceCacheSizeBytes);
1363 sInstance->InitMemoryReporter();
1364 }
1365
Shutdown()1366 /* static */ void SurfaceCache::Shutdown() {
1367 RefPtr<SurfaceCacheImpl> cache;
1368 {
1369 StaticMutexAutoLock lock(sInstanceMutex);
1370 MOZ_ASSERT(NS_IsMainThread());
1371 MOZ_ASSERT(sInstance, "No singleton - was Shutdown() called twice?");
1372 cache = sInstance.forget();
1373 }
1374 }
1375
Lookup(const ImageKey aImageKey,const SurfaceKey & aSurfaceKey)1376 /* static */ LookupResult SurfaceCache::Lookup(const ImageKey aImageKey,
1377 const SurfaceKey& aSurfaceKey) {
1378 nsTArray<RefPtr<CachedSurface>> discard;
1379 LookupResult rv(MatchType::NOT_FOUND);
1380
1381 {
1382 StaticMutexAutoLock lock(sInstanceMutex);
1383 if (!sInstance) {
1384 return rv;
1385 }
1386
1387 rv = sInstance->Lookup(aImageKey, aSurfaceKey, lock);
1388 sInstance->TakeDiscard(discard, lock);
1389 }
1390
1391 return rv;
1392 }
1393
LookupBestMatch(const ImageKey aImageKey,const SurfaceKey & aSurfaceKey)1394 /* static */ LookupResult SurfaceCache::LookupBestMatch(
1395 const ImageKey aImageKey, const SurfaceKey& aSurfaceKey) {
1396 nsTArray<RefPtr<CachedSurface>> discard;
1397 LookupResult rv(MatchType::NOT_FOUND);
1398
1399 {
1400 StaticMutexAutoLock lock(sInstanceMutex);
1401 if (!sInstance) {
1402 return rv;
1403 }
1404
1405 rv = sInstance->LookupBestMatch(aImageKey, aSurfaceKey, lock);
1406 sInstance->TakeDiscard(discard, lock);
1407 }
1408
1409 return rv;
1410 }
1411
Insert(NotNull<ISurfaceProvider * > aProvider)1412 /* static */ InsertOutcome SurfaceCache::Insert(
1413 NotNull<ISurfaceProvider*> aProvider) {
1414 nsTArray<RefPtr<CachedSurface>> discard;
1415 InsertOutcome rv(InsertOutcome::FAILURE);
1416
1417 {
1418 StaticMutexAutoLock lock(sInstanceMutex);
1419 if (!sInstance) {
1420 return rv;
1421 }
1422
1423 rv = sInstance->Insert(aProvider, /* aSetAvailable = */ false, lock);
1424 sInstance->TakeDiscard(discard, lock);
1425 }
1426
1427 return rv;
1428 }
1429
CanHold(const IntSize & aSize,uint32_t aBytesPerPixel)1430 /* static */ bool SurfaceCache::CanHold(const IntSize& aSize,
1431 uint32_t aBytesPerPixel /* = 4 */) {
1432 StaticMutexAutoLock lock(sInstanceMutex);
1433 if (!sInstance) {
1434 return false;
1435 }
1436
1437 Cost cost = ComputeCost(aSize, aBytesPerPixel);
1438 return sInstance->CanHold(cost);
1439 }
1440
CanHold(size_t aSize)1441 /* static */ bool SurfaceCache::CanHold(size_t aSize) {
1442 StaticMutexAutoLock lock(sInstanceMutex);
1443 if (!sInstance) {
1444 return false;
1445 }
1446
1447 return sInstance->CanHold(aSize);
1448 }
1449
SurfaceAvailable(NotNull<ISurfaceProvider * > aProvider)1450 /* static */ void SurfaceCache::SurfaceAvailable(
1451 NotNull<ISurfaceProvider*> aProvider) {
1452 StaticMutexAutoLock lock(sInstanceMutex);
1453 if (!sInstance) {
1454 return;
1455 }
1456
1457 sInstance->SurfaceAvailable(aProvider, lock);
1458 }
1459
LockImage(const ImageKey aImageKey)1460 /* static */ void SurfaceCache::LockImage(const ImageKey aImageKey) {
1461 StaticMutexAutoLock lock(sInstanceMutex);
1462 if (sInstance) {
1463 return sInstance->LockImage(aImageKey);
1464 }
1465 }
1466
UnlockImage(const ImageKey aImageKey)1467 /* static */ void SurfaceCache::UnlockImage(const ImageKey aImageKey) {
1468 StaticMutexAutoLock lock(sInstanceMutex);
1469 if (sInstance) {
1470 return sInstance->UnlockImage(aImageKey, lock);
1471 }
1472 }
1473
UnlockEntries(const ImageKey aImageKey)1474 /* static */ void SurfaceCache::UnlockEntries(const ImageKey aImageKey) {
1475 StaticMutexAutoLock lock(sInstanceMutex);
1476 if (sInstance) {
1477 return sInstance->UnlockEntries(aImageKey, lock);
1478 }
1479 }
1480
RemoveImage(const ImageKey aImageKey)1481 /* static */ void SurfaceCache::RemoveImage(const ImageKey aImageKey) {
1482 RefPtr<ImageSurfaceCache> discard;
1483 {
1484 StaticMutexAutoLock lock(sInstanceMutex);
1485 if (sInstance) {
1486 discard = sInstance->RemoveImage(aImageKey, lock);
1487 }
1488 }
1489 }
1490
PruneImage(const ImageKey aImageKey)1491 /* static */ void SurfaceCache::PruneImage(const ImageKey aImageKey) {
1492 nsTArray<RefPtr<CachedSurface>> discard;
1493 {
1494 StaticMutexAutoLock lock(sInstanceMutex);
1495 if (sInstance) {
1496 sInstance->PruneImage(aImageKey, lock);
1497 sInstance->TakeDiscard(discard, lock);
1498 }
1499 }
1500 }
1501
DiscardAll()1502 /* static */ void SurfaceCache::DiscardAll() {
1503 nsTArray<RefPtr<CachedSurface>> discard;
1504 {
1505 StaticMutexAutoLock lock(sInstanceMutex);
1506 if (sInstance) {
1507 sInstance->DiscardAll(lock);
1508 sInstance->TakeDiscard(discard, lock);
1509 }
1510 }
1511 }
1512
CollectSizeOfSurfaces(const ImageKey aImageKey,nsTArray<SurfaceMemoryCounter> & aCounters,MallocSizeOf aMallocSizeOf)1513 /* static */ void SurfaceCache::CollectSizeOfSurfaces(
1514 const ImageKey aImageKey, nsTArray<SurfaceMemoryCounter>& aCounters,
1515 MallocSizeOf aMallocSizeOf) {
1516 nsTArray<RefPtr<CachedSurface>> discard;
1517 {
1518 StaticMutexAutoLock lock(sInstanceMutex);
1519 if (!sInstance) {
1520 return;
1521 }
1522
1523 sInstance->CollectSizeOfSurfaces(aImageKey, aCounters, aMallocSizeOf, lock);
1524 sInstance->TakeDiscard(discard, lock);
1525 }
1526 }
1527
MaximumCapacity()1528 /* static */ size_t SurfaceCache::MaximumCapacity() {
1529 StaticMutexAutoLock lock(sInstanceMutex);
1530 if (!sInstance) {
1531 return 0;
1532 }
1533
1534 return sInstance->MaximumCapacity();
1535 }
1536
1537 } // namespace image
1538 } // namespace mozilla
1539