1 /*
2  * Copyright 2014 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrGpuResource_DEFINED
9 #define GrGpuResource_DEFINED
10 
11 #include "include/private/GrResourceKey.h"
12 #include "include/private/GrTypesPriv.h"
13 #include "include/private/SkNoncopyable.h"
14 
15 class GrGpu;
16 class GrResourceCache;
17 class SkTraceMemoryDump;
18 
19 /**
20  * Base class for GrGpuResource. Provides the hooks for resources to interact with the cache.
21  * Separated out as a base class to isolate the ref-cnting behavior and provide friendship without
22  * exposing all of GrGpuResource.
23  *
24  * PRIOR to the last ref being removed DERIVED::notifyRefCntWillBeZero() will be called
25  * (static poly morphism using CRTP). It is legal for additional ref's to be added
26  * during this time. AFTER the ref count reaches zero DERIVED::notifyRefCntIsZero() will be
27  * called.
28  */
29 template <typename DERIVED> class GrIORef : public SkNoncopyable {
30 public:
unique()31     bool unique() const { return fRefCnt == 1; }
32 
ref()33     void ref() const {
34         // Only the cache should be able to add the first ref to a resource.
35         SkASSERT(this->getRefCnt() > 0);
36         // No barrier required.
37         (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
38     }
39 
unref()40     void unref() const {
41         SkASSERT(this->getRefCnt() > 0);
42         if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel) &&
43             this->hasNoCommandBufferUsages()) {
44             this->notifyWillBeZero();
45         }
46     }
47 
addCommandBufferUsage()48     void addCommandBufferUsage() const {
49         // No barrier required.
50         (void)fCommandBufferUsageCnt.fetch_add(+1, std::memory_order_relaxed);
51     }
52 
removeCommandBufferUsage()53     void removeCommandBufferUsage() const {
54         SkASSERT(!this->hasNoCommandBufferUsages());
55         if (1 == fCommandBufferUsageCnt.fetch_add(-1, std::memory_order_acq_rel) &&
56             0 == this->getRefCnt()) {
57             this->notifyWillBeZero();
58         }
59     }
60 
61 #if GR_TEST_UTILS
testingOnly_getRefCnt()62     int32_t testingOnly_getRefCnt() const { return this->getRefCnt(); }
63 #endif
64 
65 protected:
66     friend class GrResourceCache; // for internalHasRef
67 
GrIORef()68     GrIORef() : fRefCnt(1), fCommandBufferUsageCnt(0) {}
69 
internalHasRef()70     bool internalHasRef() const { return SkToBool(this->getRefCnt()); }
internalHasNoCommandBufferUsages()71     bool internalHasNoCommandBufferUsages() const {
72         return SkToBool(this->hasNoCommandBufferUsages());
73     }
74 
75     // Privileged method that allows going from ref count = 0 to ref count = 1.
addInitialRef()76     void addInitialRef() const {
77         SkASSERT(fRefCnt >= 0);
78         // No barrier required.
79         (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
80     }
81 
82 private:
notifyWillBeZero()83     void notifyWillBeZero() const {
84         // At this point we better be the only thread accessing this resource.
85         // Trick out the notifyRefCntWillBeZero() call by adding back one more ref.
86         fRefCnt.fetch_add(+1, std::memory_order_relaxed);
87         static_cast<const DERIVED*>(this)->notifyRefCntWillBeZero();
88         // notifyRefCntWillBeZero() could have done anything, including re-refing this and
89         // passing on to another thread. Take away the ref-count we re-added above and see
90         // if we're back to zero.
91         // TODO: Consider making it so that refs can't be added and merge
92         //  notifyRefCntWillBeZero()/willRemoveLastRef() with notifyRefCntIsZero().
93         if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
94             static_cast<const DERIVED*>(this)->notifyRefCntIsZero();
95         }
96     }
97 
getRefCnt()98     int32_t getRefCnt() const { return fRefCnt.load(std::memory_order_relaxed); }
99 
hasNoCommandBufferUsages()100     bool hasNoCommandBufferUsages() const {
101         if (0 == fCommandBufferUsageCnt.load(std::memory_order_acquire)) {
102             // The acquire barrier is only really needed if we return true.  It
103             // prevents code conditioned on the result of hasNoCommandBufferUsages() from running
104             // until previous owners are all totally done calling removeCommandBufferUsage().
105             return true;
106         }
107         return false;
108     }
109 
110     mutable std::atomic<int32_t> fRefCnt;
111     mutable std::atomic<int32_t> fCommandBufferUsageCnt;
112 
113     using INHERITED = SkNoncopyable;
114 };
115 
116 /**
117  * Base class for objects that can be kept in the GrResourceCache.
118  */
119 class GrGpuResource : public GrIORef<GrGpuResource> {
120 public:
121     /**
122      * Tests whether a object has been abandoned or released. All objects will
123      * be in this state after their creating GrContext is destroyed or has
124      * contextLost called. It's up to the client to test wasDestroyed() before
125      * attempting to use an object if it holds refs on objects across
126      * ~GrContext, freeResources with the force flag, or contextLost.
127      *
128      * @return true if the object has been released or abandoned,
129      *         false otherwise.
130      */
wasDestroyed()131     bool wasDestroyed() const { return nullptr == fGpu; }
132 
133     /**
134      * Retrieves the context that owns the object. Note that it is possible for
135      * this to return NULL. When objects have been release()ed or abandon()ed
136      * they no longer have an owning context. Destroying a GrContext
137      * automatically releases all its resources.
138      */
139     const GrDirectContext* getContext() const;
140     GrDirectContext* getContext();
141 
142     /**
143      * Retrieves the amount of GPU memory used by this resource in bytes. It is
144      * approximate since we aren't aware of additional padding or copies made
145      * by the driver.
146      *
147      * @return the amount of GPU memory used in bytes
148      */
gpuMemorySize()149     size_t gpuMemorySize() const {
150         if (kInvalidGpuMemorySize == fGpuMemorySize) {
151             fGpuMemorySize = this->onGpuMemorySize();
152             SkASSERT(kInvalidGpuMemorySize != fGpuMemorySize);
153         }
154         return fGpuMemorySize;
155     }
156 
157     class UniqueID {
158     public:
159         UniqueID() = default;
160 
UniqueID(uint32_t id)161         explicit UniqueID(uint32_t id) : fID(id) {}
162 
asUInt()163         uint32_t asUInt() const { return fID; }
164 
165         bool operator==(const UniqueID& other) const { return fID == other.fID; }
166         bool operator!=(const UniqueID& other) const { return !(*this == other); }
167 
makeInvalid()168         void makeInvalid() { fID = SK_InvalidUniqueID; }
isInvalid()169         bool isInvalid() const { return  fID == SK_InvalidUniqueID; }
170 
171     protected:
172         uint32_t fID = SK_InvalidUniqueID;
173     };
174 
175     /**
176      * Gets an id that is unique for this GrGpuResource object. It is static in that it does
177      * not change when the content of the GrGpuResource object changes. This will never return
178      * 0.
179      */
uniqueID()180     UniqueID uniqueID() const { return fUniqueID; }
181 
182     /** Returns the current unique key for the resource. It will be invalid if the resource has no
183         associated unique key. */
getUniqueKey()184     const GrUniqueKey& getUniqueKey() const { return fUniqueKey; }
185 
186     /**
187      * Internal-only helper class used for manipulations of the resource by the cache.
188      */
189     class CacheAccess;
190     inline CacheAccess cacheAccess();
191     inline const CacheAccess cacheAccess() const;  // NOLINT(readability-const-return-type)
192 
193     /**
194      * Internal-only helper class used for manipulations of the resource by GrSurfaceProxy.
195      */
196     class ProxyAccess;
197     inline ProxyAccess proxyAccess();
198 
199     /**
200      * Internal-only helper class used for manipulations of the resource by internal code.
201      */
202     class ResourcePriv;
203     inline ResourcePriv resourcePriv();
204     inline const ResourcePriv resourcePriv() const;  // NOLINT(readability-const-return-type)
205 
206     /**
207      * Dumps memory usage information for this GrGpuResource to traceMemoryDump.
208      * Typically, subclasses should not need to override this, and should only
209      * need to override setMemoryBacking.
210      **/
211     virtual void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
212 
213     /**
214      * Describes the type of gpu resource that is represented by the implementing
215      * class (e.g. texture, buffer object, stencil).  This data is used for diagnostic
216      * purposes by dumpMemoryStatistics().
217      *
218      * The value returned is expected to be long lived and will not be copied by the caller.
219      */
220     virtual const char* getResourceType() const = 0;
221 
222     static uint32_t CreateUniqueID();
223 
224 protected:
225     // This must be called by every non-wrapped GrGpuObject. It should be called once the object is
226     // fully initialized (i.e. only from the constructors of the final class).
227     void registerWithCache(SkBudgeted);
228 
229     // This must be called by every GrGpuObject that references any wrapped backend objects. It
230     // should be called once the object is fully initialized (i.e. only from the constructors of the
231     // final class).
232     void registerWithCacheWrapped(GrWrapCacheable);
233 
234     GrGpuResource(GrGpu*);
235     virtual ~GrGpuResource();
236 
getGpu()237     GrGpu* getGpu() const { return fGpu; }
238 
239     /** Overridden to free GPU resources in the backend API. */
onRelease()240     virtual void onRelease() { }
241     /** Overridden to abandon any internal handles, ptrs, etc to backend API resources.
242         This may be called when the underlying 3D context is no longer valid and so no
243         backend API calls should be made. */
onAbandon()244     virtual void onAbandon() { }
245 
246     /**
247      * Allows subclasses to add additional backing information to the SkTraceMemoryDump.
248      **/
setMemoryBacking(SkTraceMemoryDump *,const SkString &)249     virtual void setMemoryBacking(SkTraceMemoryDump*, const SkString&) const {}
250 
251     /**
252      * Returns a string that uniquely identifies this resource.
253      */
254     SkString getResourceName() const;
255 
256     /**
257      * A helper for subclasses that override dumpMemoryStatistics(). This method using a format
258      * consistent with the default implementation of dumpMemoryStatistics() but allows the caller
259      * to customize various inputs.
260      */
261     void dumpMemoryStatisticsPriv(SkTraceMemoryDump* traceMemoryDump, const SkString& resourceName,
262                                   const char* type, size_t size) const;
263 
264 
265 private:
266     bool isPurgeable() const;
267     bool hasRef() const;
268     bool hasNoCommandBufferUsages() const;
269 
270     /**
271      * Called by the registerWithCache if the resource is available to be used as scratch.
272      * Resource subclasses should override this if the instances should be recycled as scratch
273      * resources and populate the scratchKey with the key.
274      * By default resources are not recycled as scratch.
275      **/
computeScratchKey(GrScratchKey *)276     virtual void computeScratchKey(GrScratchKey*) const {}
277 
278     /**
279      * Removes references to objects in the underlying 3D API without freeing them.
280      * Called by CacheAccess.
281      */
282     void abandon();
283 
284     /**
285      * Frees the object in the underlying 3D API. Called by CacheAccess.
286      */
287     void release();
288 
289     virtual size_t onGpuMemorySize() const = 0;
290 
291     /**
292      * Called by GrIORef when a resource is about to lose its last ref
293      */
willRemoveLastRef()294     virtual void willRemoveLastRef() {}
295 
296     // See comments in CacheAccess and ResourcePriv.
297     void setUniqueKey(const GrUniqueKey&);
298     void removeUniqueKey();
299     void notifyRefCntWillBeZero() const;
300     void notifyRefCntIsZero() const;
301     void removeScratchKey();
302     void makeBudgeted();
303     void makeUnbudgeted();
304 
305 #ifdef SK_DEBUG
306     friend class GrGpu;  // for assert in GrGpu to access getGpu
307 #endif
308 
309     // An index into a heap when this resource is purgeable or an array when not. This is maintained
310     // by the cache.
311     int fCacheArrayIndex;
312     // This value reflects how recently this resource was accessed in the cache. This is maintained
313     // by the cache.
314     uint32_t fTimestamp;
315     GrStdSteadyClock::time_point fTimeWhenBecamePurgeable;
316 
317     static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0);
318     GrScratchKey fScratchKey;
319     GrUniqueKey fUniqueKey;
320 
321     // This is not ref'ed but abandon() or release() will be called before the GrGpu object
322     // is destroyed. Those calls set will this to NULL.
323     GrGpu* fGpu;
324     mutable size_t fGpuMemorySize = kInvalidGpuMemorySize;
325 
326     GrBudgetedType fBudgetedType = GrBudgetedType::kUnbudgetedUncacheable;
327     bool fRefsWrappedObjects = false;
328     const UniqueID fUniqueID;
329 
330     using INHERITED = GrIORef<GrGpuResource>;
331     friend class GrIORef<GrGpuResource>; // to access notifyRefCntWillBeZero and notifyRefCntIsZero.
332 };
333 
334 class GrGpuResource::ProxyAccess {
335 private:
ProxyAccess(GrGpuResource * resource)336     ProxyAccess(GrGpuResource* resource) : fResource(resource) {}
337 
338     /** Proxies are allowed to take a resource from no refs to one ref. */
339     void ref(GrResourceCache* cache);
340 
341     // No taking addresses of this type.
342     const CacheAccess* operator&() const = delete;
343     CacheAccess* operator&() = delete;
344 
345     GrGpuResource* fResource;
346 
347     friend class GrGpuResource;
348     friend class GrSurfaceProxy;
349 };
350 
proxyAccess()351 inline GrGpuResource::ProxyAccess GrGpuResource::proxyAccess() { return ProxyAccess(this); }
352 
353 #endif
354