1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "imgFrame.h"
8 #include "ImageRegion.h"
9 #include "ShutdownTracker.h"
10 #include "SurfaceCache.h"
11 
12 #include "prenv.h"
13 
14 #include "gfx2DGlue.h"
15 #include "gfxContext.h"
16 #include "gfxPlatform.h"
17 
18 #include "gfxUtils.h"
19 
20 #include "MainThreadUtils.h"
21 #include "mozilla/CheckedInt.h"
22 #include "mozilla/gfx/gfxVars.h"
23 #include "mozilla/gfx/Tools.h"
24 #include "mozilla/gfx/SourceSurfaceRawData.h"
25 #include "mozilla/layers/SourceSurfaceSharedData.h"
26 #include "mozilla/layers/SourceSurfaceVolatileData.h"
27 #include "mozilla/Likely.h"
28 #include "mozilla/MemoryReporting.h"
29 #include "mozilla/ProfilerLabels.h"
30 #include "mozilla/StaticPrefs_browser.h"
31 #include "mozilla/StaticPrefs_image.h"
32 #include "nsMargin.h"
33 #include "nsRefreshDriver.h"
34 #include "nsThreadUtils.h"
35 
36 #include <algorithm>  // for min, max
37 
38 namespace mozilla {
39 
40 using namespace gfx;
41 
42 namespace image {
43 
44 /**
45  * This class is identical to SourceSurfaceSharedData but returns a different
46  * type so that SharedSurfacesChild is aware imagelib wants to recycle this
47  * surface for future animation frames.
48  */
49 class RecyclingSourceSurfaceSharedData final : public SourceSurfaceSharedData {
50  public:
MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(RecyclingSourceSurfaceSharedData,override)51   MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(RecyclingSourceSurfaceSharedData,
52                                           override)
53 
54   SurfaceType GetType() const override {
55     return SurfaceType::DATA_RECYCLING_SHARED;
56   }
57 };
58 
VolatileSurfaceStride(const IntSize & size,SurfaceFormat format)59 static int32_t VolatileSurfaceStride(const IntSize& size,
60                                      SurfaceFormat format) {
61   // Stride must be a multiple of four or cairo will complain.
62   return (size.width * BytesPerPixel(format) + 0x3) & ~0x3;
63 }
64 
CreateLockedSurface(DataSourceSurface * aSurface,const IntSize & size,SurfaceFormat format)65 static already_AddRefed<DataSourceSurface> CreateLockedSurface(
66     DataSourceSurface* aSurface, const IntSize& size, SurfaceFormat format) {
67   switch (aSurface->GetType()) {
68     case SurfaceType::DATA_SHARED:
69     case SurfaceType::DATA_RECYCLING_SHARED:
70     case SurfaceType::DATA_ALIGNED: {
71       // Shared memory is never released until the surface itself is released.
72       // Similar for aligned/heap surfaces.
73       RefPtr<DataSourceSurface> surf(aSurface);
74       return surf.forget();
75     }
76     default: {
77       // Volatile memory requires us to map it first, and it is fallible.
78       DataSourceSurface::ScopedMap smap(aSurface,
79                                         DataSourceSurface::READ_WRITE);
80       if (smap.IsMapped()) {
81         return MakeAndAddRef<SourceSurfaceMappedData>(std::move(smap), size,
82                                                       format);
83       }
84       break;
85     }
86   }
87 
88   return nullptr;
89 }
90 
ShouldUseHeap(const IntSize & aSize,int32_t aStride,bool aIsAnimated)91 static bool ShouldUseHeap(const IntSize& aSize, int32_t aStride,
92                           bool aIsAnimated) {
93   // On some platforms (i.e. Android), a volatile buffer actually keeps a file
94   // handle active. We would like to avoid too many since we could easily
95   // exhaust the pool. However, other platforms we do not have the file handle
96   // problem, and additionally we may avoid a superfluous memset since the
97   // volatile memory starts out as zero-filled. Hence the knobs below.
98 
99   // For as long as an animated image is retained, its frames will never be
100   // released to let the OS purge volatile buffers.
101   if (aIsAnimated && StaticPrefs::image_mem_animated_use_heap()) {
102     return true;
103   }
104 
105   // Lets us avoid too many small images consuming all of the handles. The
106   // actual allocation checks for overflow.
107   int32_t bufferSize = (aStride * aSize.height) / 1024;
108   return bufferSize < StaticPrefs::image_mem_volatile_min_threshold_kb();
109 }
110 
AllocateBufferForImage(const IntSize & size,SurfaceFormat format,bool aShouldRecycle=false,bool aIsAnimated=false)111 static already_AddRefed<DataSourceSurface> AllocateBufferForImage(
112     const IntSize& size, SurfaceFormat format, bool aShouldRecycle = false,
113     bool aIsAnimated = false) {
114   int32_t stride = VolatileSurfaceStride(size, format);
115 
116   if (gfxVars::GetUseWebRenderOrDefault() && StaticPrefs::image_mem_shared()) {
117     RefPtr<SourceSurfaceSharedData> newSurf;
118     if (aShouldRecycle) {
119       newSurf = new RecyclingSourceSurfaceSharedData();
120     } else {
121       newSurf = new SourceSurfaceSharedData();
122     }
123     if (newSurf->Init(size, stride, format)) {
124       return newSurf.forget();
125     }
126   } else if (ShouldUseHeap(size, stride, aIsAnimated)) {
127     RefPtr<SourceSurfaceAlignedRawData> newSurf =
128         new SourceSurfaceAlignedRawData();
129     if (newSurf->Init(size, format, false, 0, stride)) {
130       return newSurf.forget();
131     }
132   } else {
133     RefPtr<SourceSurfaceVolatileData> newSurf = new SourceSurfaceVolatileData();
134     if (newSurf->Init(size, stride, format)) {
135       return newSurf.forget();
136     }
137   }
138   return nullptr;
139 }
140 
GreenSurface(DataSourceSurface * aSurface,const IntSize & aSize,SurfaceFormat aFormat)141 static bool GreenSurface(DataSourceSurface* aSurface, const IntSize& aSize,
142                          SurfaceFormat aFormat) {
143   int32_t stride = aSurface->Stride();
144   uint32_t* surfaceData = reinterpret_cast<uint32_t*>(aSurface->GetData());
145   uint32_t surfaceDataLength = (stride * aSize.height) / sizeof(uint32_t);
146 
147   // Start by assuming that GG is in the second byte and
148   // AA is in the final byte -- the most common case.
149   uint32_t color = mozilla::NativeEndian::swapFromBigEndian(0x00FF00FF);
150 
151   // We are only going to handle this type of test under
152   // certain circumstances.
153   MOZ_ASSERT(surfaceData);
154   MOZ_ASSERT(aFormat == SurfaceFormat::B8G8R8A8 ||
155              aFormat == SurfaceFormat::B8G8R8X8 ||
156              aFormat == SurfaceFormat::R8G8B8A8 ||
157              aFormat == SurfaceFormat::R8G8B8X8 ||
158              aFormat == SurfaceFormat::A8R8G8B8 ||
159              aFormat == SurfaceFormat::X8R8G8B8);
160   MOZ_ASSERT((stride * aSize.height) % sizeof(uint32_t));
161 
162   if (aFormat == SurfaceFormat::A8R8G8B8 ||
163       aFormat == SurfaceFormat::X8R8G8B8) {
164     color = mozilla::NativeEndian::swapFromBigEndian(0xFF00FF00);
165   }
166 
167   for (uint32_t i = 0; i < surfaceDataLength; i++) {
168     surfaceData[i] = color;
169   }
170 
171   return true;
172 }
173 
ClearSurface(DataSourceSurface * aSurface,const IntSize & aSize,SurfaceFormat aFormat)174 static bool ClearSurface(DataSourceSurface* aSurface, const IntSize& aSize,
175                          SurfaceFormat aFormat) {
176   int32_t stride = aSurface->Stride();
177   uint8_t* data = aSurface->GetData();
178   MOZ_ASSERT(data);
179 
180   if (aFormat == SurfaceFormat::OS_RGBX) {
181     // Skia doesn't support RGBX surfaces, so ensure the alpha value is set
182     // to opaque white. While it would be nice to only do this for Skia,
183     // imgFrame can run off main thread and past shutdown where
184     // we might not have gfxPlatform, so just memset every time instead.
185     memset(data, 0xFF, stride * aSize.height);
186   } else if (aSurface->OnHeap()) {
187     // We only need to memset it if the buffer was allocated on the heap.
188     // Otherwise, it's allocated via mmap and refers to a zeroed page and will
189     // be COW once it's written to.
190     memset(data, 0, stride * aSize.height);
191   }
192 
193   return true;
194 }
195 
imgFrame()196 imgFrame::imgFrame()
197     : mMonitor("imgFrame"),
198       mDecoded(0, 0, 0, 0),
199       mLockCount(0),
200       mAborted(false),
201       mFinished(false),
202       mOptimizable(false),
203       mShouldRecycle(false),
204       mTimeout(FrameTimeout::FromRawMilliseconds(100)),
205       mDisposalMethod(DisposalMethod::NOT_SPECIFIED),
206       mBlendMethod(BlendMethod::OVER),
207       mFormat(SurfaceFormat::UNKNOWN),
208       mNonPremult(false) {}
209 
~imgFrame()210 imgFrame::~imgFrame() {
211 #ifdef DEBUG
212   MonitorAutoLock lock(mMonitor);
213   MOZ_ASSERT(mAborted || AreAllPixelsWritten());
214   MOZ_ASSERT(mAborted || mFinished);
215 #endif
216 }
217 
InitForDecoder(const nsIntSize & aImageSize,SurfaceFormat aFormat,bool aNonPremult,const Maybe<AnimationParams> & aAnimParams,bool aShouldRecycle)218 nsresult imgFrame::InitForDecoder(const nsIntSize& aImageSize,
219                                   SurfaceFormat aFormat, bool aNonPremult,
220                                   const Maybe<AnimationParams>& aAnimParams,
221                                   bool aShouldRecycle) {
222   // Assert for properties that should be verified by decoders,
223   // warn for properties related to bad content.
224   if (!SurfaceCache::IsLegalSize(aImageSize)) {
225     NS_WARNING("Should have legal image size");
226     mAborted = true;
227     return NS_ERROR_FAILURE;
228   }
229 
230   mImageSize = aImageSize;
231 
232   // May be updated shortly after InitForDecoder by BlendAnimationFilter
233   // because it needs to take into consideration the previous frames to
234   // properly calculate. We start with the whole frame as dirty.
235   mDirtyRect = GetRect();
236 
237   if (aAnimParams) {
238     mBlendRect = aAnimParams->mBlendRect;
239     mTimeout = aAnimParams->mTimeout;
240     mBlendMethod = aAnimParams->mBlendMethod;
241     mDisposalMethod = aAnimParams->mDisposalMethod;
242   } else {
243     mBlendRect = GetRect();
244   }
245 
246   if (aShouldRecycle) {
247     // If we are recycling then we should always use BGRA for the underlying
248     // surface because if we use BGRX, the next frame composited into the
249     // surface could be BGRA and cause rendering problems.
250     MOZ_ASSERT(aAnimParams);
251     mFormat = SurfaceFormat::OS_RGBA;
252   } else {
253     mFormat = aFormat;
254   }
255 
256   mNonPremult = aNonPremult;
257   mShouldRecycle = aShouldRecycle;
258 
259   MOZ_ASSERT(!mLockedSurface, "Called imgFrame::InitForDecoder() twice?");
260 
261   bool postFirstFrame = aAnimParams && aAnimParams->mFrameNum > 0;
262   mRawSurface = AllocateBufferForImage(mImageSize, mFormat, mShouldRecycle,
263                                        postFirstFrame);
264   if (!mRawSurface) {
265     mAborted = true;
266     return NS_ERROR_OUT_OF_MEMORY;
267   }
268 
269   if (StaticPrefs::browser_measurement_render_anims_and_video_solid() &&
270       aAnimParams) {
271     mBlankRawSurface = AllocateBufferForImage(mImageSize, mFormat);
272     if (!mBlankRawSurface) {
273       mAborted = true;
274       return NS_ERROR_OUT_OF_MEMORY;
275     }
276   }
277 
278   mLockedSurface = CreateLockedSurface(mRawSurface, mImageSize, mFormat);
279   if (!mLockedSurface) {
280     NS_WARNING("Failed to create LockedSurface");
281     mAborted = true;
282     return NS_ERROR_OUT_OF_MEMORY;
283   }
284 
285   if (mBlankRawSurface) {
286     mBlankLockedSurface =
287         CreateLockedSurface(mBlankRawSurface, mImageSize, mFormat);
288     if (!mBlankLockedSurface) {
289       NS_WARNING("Failed to create BlankLockedSurface");
290       mAborted = true;
291       return NS_ERROR_OUT_OF_MEMORY;
292     }
293   }
294 
295   if (!ClearSurface(mRawSurface, mImageSize, mFormat)) {
296     NS_WARNING("Could not clear allocated buffer");
297     mAborted = true;
298     return NS_ERROR_OUT_OF_MEMORY;
299   }
300 
301   if (mBlankRawSurface) {
302     if (!GreenSurface(mBlankRawSurface, mImageSize, mFormat)) {
303       NS_WARNING("Could not clear allocated blank buffer");
304       mAborted = true;
305       return NS_ERROR_OUT_OF_MEMORY;
306     }
307   }
308 
309   return NS_OK;
310 }
311 
InitForDecoderRecycle(const AnimationParams & aAnimParams)312 nsresult imgFrame::InitForDecoderRecycle(const AnimationParams& aAnimParams) {
313   // We want to recycle this frame, but there is no guarantee that consumers are
314   // done with it in a timely manner. Let's ensure they are done with it first.
315   MonitorAutoLock lock(mMonitor);
316 
317   MOZ_ASSERT(mLockCount > 0);
318   MOZ_ASSERT(mLockedSurface);
319 
320   if (!mShouldRecycle) {
321     // This frame either was never marked as recyclable, or the flag was cleared
322     // for a caller which does not support recycling.
323     return NS_ERROR_NOT_AVAILABLE;
324   }
325 
326   // Ensure we account for all internal references to the surface.
327   MozRefCountType internalRefs = 1;
328   if (mRawSurface == mLockedSurface) {
329     ++internalRefs;
330   }
331   if (mOptSurface == mLockedSurface) {
332     ++internalRefs;
333   }
334 
335   if (mLockedSurface->refCount() > internalRefs) {
336     if (NS_IsMainThread()) {
337       // We should never be both decoding and recycling on the main thread. Sync
338       // decoding can only be used to produce the first set of frames. Those
339       // either never use recycling because advancing was blocked (main thread
340       // is busy) or we were auto-advancing (to seek to a frame) and the frames
341       // were never accessed (and thus cannot have recycle locks).
342       MOZ_ASSERT_UNREACHABLE("Recycling/decoding on the main thread?");
343       return NS_ERROR_NOT_AVAILABLE;
344     }
345 
346     // We don't want to wait forever to reclaim the frame because we have no
347     // idea why it is still held. It is possibly due to OMTP. Since we are off
348     // the main thread, and we generally have frames already buffered for the
349     // animation, we can afford to wait a short period of time to hopefully
350     // complete the transaction and reclaim the buffer.
351     //
352     // We choose to wait for, at most, the refresh driver interval, so that we
353     // won't skip more than one frame. If the frame is still in use due to
354     // outstanding transactions, we are already skipping frames. If the frame
355     // is still in use for some other purpose, it won't be returned to the pool
356     // and its owner can hold onto it forever without additional impact here.
357     int32_t refreshInterval =
358         std::max(std::min(nsRefreshDriver::DefaultInterval(), 20), 4);
359     TimeDuration waitInterval =
360         TimeDuration::FromMilliseconds(refreshInterval >> 2);
361     TimeStamp timeout =
362         TimeStamp::Now() + TimeDuration::FromMilliseconds(refreshInterval);
363     while (true) {
364       mMonitor.Wait(waitInterval);
365       if (mLockedSurface->refCount() <= internalRefs) {
366         break;
367       }
368 
369       if (timeout <= TimeStamp::Now()) {
370         // We couldn't secure the frame for recycling. It will allocate a new
371         // frame instead.
372         return NS_ERROR_NOT_AVAILABLE;
373       }
374     }
375   }
376 
377   mBlendRect = aAnimParams.mBlendRect;
378   mTimeout = aAnimParams.mTimeout;
379   mBlendMethod = aAnimParams.mBlendMethod;
380   mDisposalMethod = aAnimParams.mDisposalMethod;
381   mDirtyRect = GetRect();
382 
383   return NS_OK;
384 }
385 
InitWithDrawable(gfxDrawable * aDrawable,const nsIntSize & aSize,const SurfaceFormat aFormat,SamplingFilter aSamplingFilter,uint32_t aImageFlags,gfx::BackendType aBackend)386 nsresult imgFrame::InitWithDrawable(gfxDrawable* aDrawable,
387                                     const nsIntSize& aSize,
388                                     const SurfaceFormat aFormat,
389                                     SamplingFilter aSamplingFilter,
390                                     uint32_t aImageFlags,
391                                     gfx::BackendType aBackend) {
392   // Assert for properties that should be verified by decoders,
393   // warn for properties related to bad content.
394   if (!SurfaceCache::IsLegalSize(aSize)) {
395     NS_WARNING("Should have legal image size");
396     mAborted = true;
397     return NS_ERROR_FAILURE;
398   }
399 
400   mImageSize = aSize;
401   mFormat = aFormat;
402 
403   RefPtr<DrawTarget> target;
404 
405   bool canUseDataSurface = Factory::DoesBackendSupportDataDrawtarget(aBackend);
406   if (canUseDataSurface) {
407     // It's safe to use data surfaces for content on this platform, so we can
408     // get away with using volatile buffers.
409     MOZ_ASSERT(!mLockedSurface, "Called imgFrame::InitWithDrawable() twice?");
410 
411     mRawSurface = AllocateBufferForImage(mImageSize, mFormat);
412     if (!mRawSurface) {
413       mAborted = true;
414       return NS_ERROR_OUT_OF_MEMORY;
415     }
416 
417     mLockedSurface = CreateLockedSurface(mRawSurface, mImageSize, mFormat);
418     if (!mLockedSurface) {
419       NS_WARNING("Failed to create LockedSurface");
420       mAborted = true;
421       return NS_ERROR_OUT_OF_MEMORY;
422     }
423 
424     if (!ClearSurface(mRawSurface, mImageSize, mFormat)) {
425       NS_WARNING("Could not clear allocated buffer");
426       mAborted = true;
427       return NS_ERROR_OUT_OF_MEMORY;
428     }
429 
430     target = gfxPlatform::CreateDrawTargetForData(
431         mLockedSurface->GetData(), mImageSize, mLockedSurface->Stride(),
432         mFormat);
433   } else {
434     // We can't use data surfaces for content, so we'll create an offscreen
435     // surface instead.  This means if someone later calls RawAccessRef(), we
436     // may have to do an expensive readback, but we warned callers about that in
437     // the documentation for this method.
438     MOZ_ASSERT(!mOptSurface, "Called imgFrame::InitWithDrawable() twice?");
439 
440     if (gfxPlatform::GetPlatform()->SupportsAzureContentForType(aBackend)) {
441       target = gfxPlatform::GetPlatform()->CreateDrawTargetForBackend(
442           aBackend, mImageSize, mFormat);
443     } else {
444       target = gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(
445           mImageSize, mFormat);
446     }
447   }
448 
449   if (!target || !target->IsValid()) {
450     mAborted = true;
451     return NS_ERROR_OUT_OF_MEMORY;
452   }
453 
454   // Draw using the drawable the caller provided.
455   RefPtr<gfxContext> ctx = gfxContext::CreateOrNull(target);
456   MOZ_ASSERT(ctx);  // Already checked the draw target above.
457   gfxUtils::DrawPixelSnapped(ctx, aDrawable, SizeDouble(mImageSize),
458                              ImageRegion::Create(ThebesRect(GetRect())),
459                              mFormat, aSamplingFilter, aImageFlags);
460 
461   if (canUseDataSurface && !mLockedSurface) {
462     NS_WARNING("Failed to create VolatileDataSourceSurface");
463     mAborted = true;
464     return NS_ERROR_OUT_OF_MEMORY;
465   }
466 
467   if (!canUseDataSurface) {
468     // We used an offscreen surface, which is an "optimized" surface from
469     // imgFrame's perspective.
470     mOptSurface = target->Snapshot();
471   } else {
472     FinalizeSurface();
473   }
474 
475   // If we reach this point, we should regard ourselves as complete.
476   mDecoded = GetRect();
477   mFinished = true;
478 
479 #ifdef DEBUG
480   MonitorAutoLock lock(mMonitor);
481   MOZ_ASSERT(AreAllPixelsWritten());
482 #endif
483 
484   return NS_OK;
485 }
486 
Optimize(DrawTarget * aTarget)487 nsresult imgFrame::Optimize(DrawTarget* aTarget) {
488   MOZ_ASSERT(NS_IsMainThread());
489   mMonitor.AssertCurrentThreadOwns();
490 
491   if (mLockCount > 0 || !mOptimizable) {
492     // Don't optimize right now.
493     return NS_OK;
494   }
495 
496   // Check whether image optimization is disabled -- not thread safe!
497   static bool gDisableOptimize = false;
498   static bool hasCheckedOptimize = false;
499   if (!hasCheckedOptimize) {
500     if (PR_GetEnv("MOZ_DISABLE_IMAGE_OPTIMIZE")) {
501       gDisableOptimize = true;
502     }
503     hasCheckedOptimize = true;
504   }
505 
506   // Don't optimize during shutdown because gfxPlatform may not be available.
507   if (ShutdownTracker::ShutdownHasStarted()) {
508     return NS_OK;
509   }
510 
511   if (gDisableOptimize) {
512     return NS_OK;
513   }
514 
515   if (mOptSurface) {
516     return NS_OK;
517   }
518 
519   // XXX(seth): It's currently unclear if there's any reason why we can't
520   // optimize non-premult surfaces. We should look into removing this.
521   if (mNonPremult) {
522     return NS_OK;
523   }
524   if (!gfxVars::UseWebRender()) {
525     mOptSurface = aTarget->OptimizeSourceSurface(mLockedSurface);
526   } else {
527     mOptSurface = gfxPlatform::GetPlatform()
528                       ->ScreenReferenceDrawTarget()
529                       ->OptimizeSourceSurface(mLockedSurface);
530   }
531   if (mOptSurface == mLockedSurface) {
532     mOptSurface = nullptr;
533   }
534 
535   if (mOptSurface) {
536     // There's no reason to keep our original surface around if we have an
537     // optimized surface. Release our reference to it. This will leave
538     // |mLockedSurface| as the only thing keeping it alive, so it'll get freed
539     // below.
540     mRawSurface = nullptr;
541   }
542 
543   // Release all strong references to the surface's memory. If the underlying
544   // surface is volatile, this will allow the operating system to free the
545   // memory if it needs to.
546   mLockedSurface = nullptr;
547   mOptimizable = false;
548 
549   return NS_OK;
550 }
551 
DrawableRef()552 DrawableFrameRef imgFrame::DrawableRef() { return DrawableFrameRef(this); }
553 
RawAccessRef(bool aOnlyFinished)554 RawAccessFrameRef imgFrame::RawAccessRef(bool aOnlyFinished /*= false*/) {
555   return RawAccessFrameRef(this, aOnlyFinished);
556 }
557 
SetRawAccessOnly()558 void imgFrame::SetRawAccessOnly() {
559   AssertImageDataLocked();
560 
561   // Lock our data and throw away the key.
562   LockImageData(false);
563 }
564 
SurfaceForDrawing(bool aDoPartialDecode,bool aDoTile,ImageRegion & aRegion,SourceSurface * aSurface)565 imgFrame::SurfaceWithFormat imgFrame::SurfaceForDrawing(
566     bool aDoPartialDecode, bool aDoTile, ImageRegion& aRegion,
567     SourceSurface* aSurface) {
568   MOZ_ASSERT(NS_IsMainThread());
569   mMonitor.AssertCurrentThreadOwns();
570 
571   if (!aDoPartialDecode) {
572     return SurfaceWithFormat(new gfxSurfaceDrawable(aSurface, mImageSize),
573                              mFormat);
574   }
575 
576   gfxRect available =
577       gfxRect(mDecoded.X(), mDecoded.Y(), mDecoded.Width(), mDecoded.Height());
578 
579   if (aDoTile) {
580     // Create a temporary surface.
581     // Give this surface an alpha channel because there are
582     // transparent pixels in the padding or undecoded area
583     RefPtr<DrawTarget> target =
584         gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(
585             mImageSize, SurfaceFormat::OS_RGBA);
586     if (!target) {
587       return SurfaceWithFormat();
588     }
589 
590     SurfacePattern pattern(aSurface, aRegion.GetExtendMode(),
591                            Matrix::Translation(mDecoded.X(), mDecoded.Y()));
592     target->FillRect(ToRect(aRegion.Intersect(available).Rect()), pattern);
593 
594     RefPtr<SourceSurface> newsurf = target->Snapshot();
595     return SurfaceWithFormat(new gfxSurfaceDrawable(newsurf, mImageSize),
596                              target->GetFormat());
597   }
598 
599   // Not tiling, and we have a surface, so we can account for
600   // a partial decode just by twiddling parameters.
601   aRegion = aRegion.Intersect(available);
602   IntSize availableSize(mDecoded.Width(), mDecoded.Height());
603 
604   return SurfaceWithFormat(new gfxSurfaceDrawable(aSurface, availableSize),
605                            mFormat);
606 }
607 
Draw(gfxContext * aContext,const ImageRegion & aRegion,SamplingFilter aSamplingFilter,uint32_t aImageFlags,float aOpacity)608 bool imgFrame::Draw(gfxContext* aContext, const ImageRegion& aRegion,
609                     SamplingFilter aSamplingFilter, uint32_t aImageFlags,
610                     float aOpacity) {
611   AUTO_PROFILER_LABEL("imgFrame::Draw", GRAPHICS);
612 
613   MOZ_ASSERT(NS_IsMainThread());
614   NS_ASSERTION(!aRegion.Rect().IsEmpty(), "Drawing empty region!");
615   NS_ASSERTION(!aRegion.IsRestricted() ||
616                    !aRegion.Rect().Intersect(aRegion.Restriction()).IsEmpty(),
617                "We must be allowed to sample *some* source pixels!");
618 
619   // Perform the draw and freeing of the surface outside the lock. We want to
620   // avoid contention with the decoder if we can. The surface may also attempt
621   // to relock the monitor if it is freed (e.g. RecyclingSourceSurface).
622   RefPtr<SourceSurface> surf;
623   SurfaceWithFormat surfaceResult;
624   ImageRegion region(aRegion);
625   gfxRect imageRect(0, 0, mImageSize.width, mImageSize.height);
626 
627   {
628     MonitorAutoLock lock(mMonitor);
629 
630     // Possibly convert this image into a GPU texture, this may also cause our
631     // mLockedSurface to be released and the OS to release the underlying
632     // memory.
633     Optimize(aContext->GetDrawTarget());
634 
635     bool doPartialDecode = !AreAllPixelsWritten();
636 
637     // Most draw targets will just use the surface only during DrawPixelSnapped
638     // but captures/recordings will retain a reference outside this stack
639     // context. While in theory a decoder thread could be trying to recycle this
640     // frame at this very moment, in practice the only way we can get here is if
641     // this frame is the current frame of the animation. Since we can only
642     // advance on the main thread, we know nothing else will try to use it.
643     DrawTarget* drawTarget = aContext->GetDrawTarget();
644     bool recording = drawTarget->GetBackendType() == BackendType::RECORDING;
645     RefPtr<SourceSurface> surf = GetSourceSurfaceInternal();
646     if (!surf) {
647       return false;
648     }
649 
650     bool doTile = !imageRect.Contains(aRegion.Rect()) &&
651                   !(aImageFlags & imgIContainer::FLAG_CLAMP);
652 
653     surfaceResult = SurfaceForDrawing(doPartialDecode, doTile, region, surf);
654 
655     // If we are recording, then we cannot recycle the surface. The blob
656     // rasterizer is not properly synchronized for recycling in the compositor
657     // process. The easiest thing to do is just mark the frames it consumes as
658     // non-recyclable.
659     if (recording && surfaceResult.IsValid()) {
660       mShouldRecycle = false;
661     }
662   }
663 
664   if (surfaceResult.IsValid()) {
665     gfxUtils::DrawPixelSnapped(aContext, surfaceResult.mDrawable,
666                                imageRect.Size(), region, surfaceResult.mFormat,
667                                aSamplingFilter, aImageFlags, aOpacity);
668   }
669 
670   return true;
671 }
672 
ImageUpdated(const nsIntRect & aUpdateRect)673 nsresult imgFrame::ImageUpdated(const nsIntRect& aUpdateRect) {
674   MonitorAutoLock lock(mMonitor);
675   return ImageUpdatedInternal(aUpdateRect);
676 }
677 
ImageUpdatedInternal(const nsIntRect & aUpdateRect)678 nsresult imgFrame::ImageUpdatedInternal(const nsIntRect& aUpdateRect) {
679   mMonitor.AssertCurrentThreadOwns();
680 
681   // Clamp to the frame rect to ensure that decoder bugs don't result in a
682   // decoded rect that extends outside the bounds of the frame rect.
683   IntRect updateRect = aUpdateRect.Intersect(GetRect());
684   if (updateRect.IsEmpty()) {
685     return NS_OK;
686   }
687 
688   mDecoded.UnionRect(mDecoded, updateRect);
689 
690   // Update our invalidation counters for any consumers watching for changes
691   // in the surface.
692   if (mRawSurface) {
693     mRawSurface->Invalidate(updateRect);
694   }
695   if (mLockedSurface && mRawSurface != mLockedSurface) {
696     mLockedSurface->Invalidate(updateRect);
697   }
698   return NS_OK;
699 }
700 
Finish(Opacity aFrameOpacity,bool aFinalize)701 void imgFrame::Finish(Opacity aFrameOpacity /* = Opacity::SOME_TRANSPARENCY */,
702                       bool aFinalize /* = true */) {
703   MonitorAutoLock lock(mMonitor);
704   MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
705 
706   IntRect frameRect(GetRect());
707   if (!mDecoded.IsEqualEdges(frameRect)) {
708     // The decoder should have produced rows starting from either the bottom or
709     // the top of the image. We need to calculate the region for which we have
710     // not yet invalidated.
711     IntRect delta(0, 0, frameRect.width, 0);
712     if (mDecoded.y == 0) {
713       delta.y = mDecoded.height;
714       delta.height = frameRect.height - mDecoded.height;
715     } else if (mDecoded.y + mDecoded.height == frameRect.height) {
716       delta.height = frameRect.height - mDecoded.y;
717     } else {
718       MOZ_ASSERT_UNREACHABLE("Decoder only updated middle of image!");
719       delta = frameRect;
720     }
721 
722     ImageUpdatedInternal(delta);
723   }
724 
725   MOZ_ASSERT(mDecoded.IsEqualEdges(frameRect));
726 
727   if (aFinalize) {
728     FinalizeSurfaceInternal();
729   }
730 
731   mFinished = true;
732 
733   // The image is now complete, wake up anyone who's waiting.
734   mMonitor.NotifyAll();
735 }
736 
GetImageBytesPerRow() const737 uint32_t imgFrame::GetImageBytesPerRow() const {
738   mMonitor.AssertCurrentThreadOwns();
739 
740   if (mRawSurface) {
741     return mImageSize.width * BytesPerPixel(mFormat);
742   }
743 
744   return 0;
745 }
746 
GetImageDataLength() const747 uint32_t imgFrame::GetImageDataLength() const {
748   return GetImageBytesPerRow() * mImageSize.height;
749 }
750 
GetImageData(uint8_t ** aData,uint32_t * aLength) const751 void imgFrame::GetImageData(uint8_t** aData, uint32_t* aLength) const {
752   MonitorAutoLock lock(mMonitor);
753   GetImageDataInternal(aData, aLength);
754 }
755 
GetImageDataInternal(uint8_t ** aData,uint32_t * aLength) const756 void imgFrame::GetImageDataInternal(uint8_t** aData, uint32_t* aLength) const {
757   mMonitor.AssertCurrentThreadOwns();
758   MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
759   MOZ_ASSERT(mLockedSurface);
760 
761   if (mLockedSurface) {
762     // TODO: This is okay for now because we only realloc shared surfaces on
763     // the main thread after decoding has finished, but if animations want to
764     // read frame data off the main thread, we will need to reconsider this.
765     *aData = mLockedSurface->GetData();
766     MOZ_ASSERT(
767         *aData,
768         "mLockedSurface is non-null, but GetData is null in GetImageData");
769   } else {
770     *aData = nullptr;
771   }
772 
773   *aLength = GetImageDataLength();
774 }
775 
GetImageData() const776 uint8_t* imgFrame::GetImageData() const {
777   uint8_t* data;
778   uint32_t length;
779   GetImageData(&data, &length);
780   return data;
781 }
782 
LockImageData(bool aOnlyFinished)783 uint8_t* imgFrame::LockImageData(bool aOnlyFinished) {
784   MonitorAutoLock lock(mMonitor);
785 
786   MOZ_ASSERT(mLockCount >= 0, "Unbalanced locks and unlocks");
787   if (mLockCount < 0 || (aOnlyFinished && !mFinished)) {
788     return nullptr;
789   }
790 
791   uint8_t* data;
792   if (mLockedSurface) {
793     data = mLockedSurface->GetData();
794   } else {
795     data = nullptr;
796   }
797 
798   // If the raw data is still available, we should get a valid pointer for it.
799   if (!data) {
800     MOZ_ASSERT_UNREACHABLE("It's illegal to re-lock an optimized imgFrame");
801     return nullptr;
802   }
803 
804   ++mLockCount;
805   return data;
806 }
807 
AssertImageDataLocked() const808 void imgFrame::AssertImageDataLocked() const {
809 #ifdef DEBUG
810   MonitorAutoLock lock(mMonitor);
811   MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
812 #endif
813 }
814 
UnlockImageData()815 nsresult imgFrame::UnlockImageData() {
816   MonitorAutoLock lock(mMonitor);
817 
818   MOZ_ASSERT(mLockCount > 0, "Unlocking an unlocked image!");
819   if (mLockCount <= 0) {
820     return NS_ERROR_FAILURE;
821   }
822 
823   MOZ_ASSERT(mLockCount > 1 || mFinished || mAborted,
824              "Should have Finish()'d or aborted before unlocking");
825 
826   mLockCount--;
827 
828   return NS_OK;
829 }
830 
SetOptimizable()831 void imgFrame::SetOptimizable() {
832   AssertImageDataLocked();
833   MonitorAutoLock lock(mMonitor);
834   mOptimizable = true;
835 }
836 
FinalizeSurface()837 void imgFrame::FinalizeSurface() {
838   MonitorAutoLock lock(mMonitor);
839   FinalizeSurfaceInternal();
840 }
841 
FinalizeSurfaceInternal()842 void imgFrame::FinalizeSurfaceInternal() {
843   mMonitor.AssertCurrentThreadOwns();
844 
845   // Not all images will have mRawSurface to finalize (i.e. paletted images).
846   if (mShouldRecycle || !mRawSurface ||
847       mRawSurface->GetType() != SurfaceType::DATA_SHARED) {
848     return;
849   }
850 
851   auto* sharedSurf = static_cast<SourceSurfaceSharedData*>(mRawSurface.get());
852   sharedSurf->Finalize();
853 }
854 
GetSourceSurface()855 already_AddRefed<SourceSurface> imgFrame::GetSourceSurface() {
856   MonitorAutoLock lock(mMonitor);
857   return GetSourceSurfaceInternal();
858 }
859 
GetSourceSurfaceInternal()860 already_AddRefed<SourceSurface> imgFrame::GetSourceSurfaceInternal() {
861   mMonitor.AssertCurrentThreadOwns();
862 
863   if (mOptSurface) {
864     if (mOptSurface->IsValid()) {
865       RefPtr<SourceSurface> surf(mOptSurface);
866       return surf.forget();
867     }
868     mOptSurface = nullptr;
869   }
870 
871   if (mBlankLockedSurface) {
872     // We are going to return the blank surface because of the flags.
873     // We are including comments here that are copied from below
874     // just so that we are on the same page!
875     RefPtr<SourceSurface> surf(mBlankLockedSurface);
876     return surf.forget();
877   }
878 
879   if (mLockedSurface) {
880     RefPtr<SourceSurface> surf(mLockedSurface);
881     return surf.forget();
882   }
883 
884   MOZ_ASSERT(!mShouldRecycle, "Should recycle but no locked surface!");
885 
886   if (!mRawSurface) {
887     return nullptr;
888   }
889 
890   return CreateLockedSurface(mRawSurface, mImageSize, mFormat);
891 }
892 
Abort()893 void imgFrame::Abort() {
894   MonitorAutoLock lock(mMonitor);
895 
896   mAborted = true;
897 
898   // Wake up anyone who's waiting.
899   mMonitor.NotifyAll();
900 }
901 
IsAborted() const902 bool imgFrame::IsAborted() const {
903   MonitorAutoLock lock(mMonitor);
904   return mAborted;
905 }
906 
IsFinished() const907 bool imgFrame::IsFinished() const {
908   MonitorAutoLock lock(mMonitor);
909   return mFinished;
910 }
911 
WaitUntilFinished() const912 void imgFrame::WaitUntilFinished() const {
913   MonitorAutoLock lock(mMonitor);
914 
915   while (true) {
916     // Return if we're aborted or complete.
917     if (mAborted || mFinished) {
918       return;
919     }
920 
921     // Not complete yet, so we'll have to wait.
922     mMonitor.Wait();
923   }
924 }
925 
AreAllPixelsWritten() const926 bool imgFrame::AreAllPixelsWritten() const {
927   mMonitor.AssertCurrentThreadOwns();
928   return mDecoded.IsEqualInterior(GetRect());
929 }
930 
AddSizeOfExcludingThis(MallocSizeOf aMallocSizeOf,const AddSizeOfCb & aCallback) const931 void imgFrame::AddSizeOfExcludingThis(MallocSizeOf aMallocSizeOf,
932                                       const AddSizeOfCb& aCallback) const {
933   MonitorAutoLock lock(mMonitor);
934 
935   AddSizeOfCbData metadata;
936   metadata.mSurface = mOptSurface ? mOptSurface.get() : mRawSurface.get();
937   metadata.mFinished = mFinished;
938 
939   if (mLockedSurface) {
940     // The locked surface should only be present if we have mRawSurface. Hence
941     // we only need to get its allocation size to avoid double counting.
942     metadata.mHeapBytes += aMallocSizeOf(mLockedSurface);
943     metadata.AddType(mLockedSurface->GetType());
944   }
945   if (mOptSurface) {
946     metadata.mHeapBytes += aMallocSizeOf(mOptSurface);
947 
948     SourceSurface::SizeOfInfo info;
949     mOptSurface->SizeOfExcludingThis(aMallocSizeOf, info);
950     metadata.Accumulate(info);
951   }
952   if (mRawSurface) {
953     metadata.mHeapBytes += aMallocSizeOf(mRawSurface);
954 
955     SourceSurface::SizeOfInfo info;
956     mRawSurface->SizeOfExcludingThis(aMallocSizeOf, info);
957     metadata.Accumulate(info);
958   }
959 
960   aCallback(metadata);
961 }
962 
963 }  // namespace image
964 }  // namespace mozilla
965