1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrDrawingManager_DEFINED
9 #define GrDrawingManager_DEFINED
10 
11 #include "include/core/SkSurface.h"
12 #include "include/private/SkTArray.h"
13 #include "include/private/SkTHash.h"
14 #include "src/core/SkSpan.h"
15 #include "src/gpu/GrBufferAllocPool.h"
16 #include "src/gpu/GrDeferredUpload.h"
17 #include "src/gpu/GrHashMapWithCache.h"
18 #include "src/gpu/GrPathRenderer.h"
19 #include "src/gpu/GrPathRendererChain.h"
20 #include "src/gpu/GrResourceCache.h"
21 #include "src/gpu/GrSurfaceProxy.h"
22 
23 // Enabling this will print out which path renderers are being chosen
24 #define GR_PATH_RENDERER_SPEW 0
25 
26 class GrCoverageCountingPathRenderer;
27 class GrGpuBuffer;
28 class GrOnFlushCallbackObject;
29 class GrOpFlushState;
30 class GrOpsTask;
31 class GrRecordingContext;
32 class GrRenderTargetContext;
33 class GrRenderTargetProxy;
34 class GrRenderTask;
35 class GrSemaphore;
36 class GrSoftwarePathRenderer;
37 class GrSurfaceContext;
38 class GrSurfaceProxyView;
39 class GrTextureResolveRenderTask;
40 class SkDeferredDisplayList;
41 
42 class GrDrawingManager {
43 public:
44     ~GrDrawingManager();
45 
46     void freeGpuResources();
47 
48     // A managed opsTask is controlled by the drawing manager (i.e., sorted & flushed with the
49     // others). An unmanaged one is created and used by the onFlushCallback.
50     sk_sp<GrOpsTask> newOpsTask(GrSurfaceProxyView, bool managedOpsTask);
51 
52     // Create a render task that can resolve MSAA and/or regenerate mipmap levels on proxies. This
53     // method will only add the new render task to the list. It is up to the caller to call
54     // addProxy() on the returned object.
55     GrTextureResolveRenderTask* newTextureResolveRenderTask(const GrCaps&);
56 
57     // Create a new render task that will cause the gpu to wait on semaphores before executing any
58     // more RenderTasks that target proxy. It is possible for this wait to also block additional
59     // work (even to other proxies) that has already been recorded or will be recorded later. The
60     // only guarantee is that future work to the passed in proxy will wait on the semaphores to be
61     // signaled.
62     void newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,
63                            std::unique_ptr<std::unique_ptr<GrSemaphore>[]>,
64                            int numSemaphores);
65 
66     // Create a new render task which copies the pixels from the srcProxy into the dstBuffer. This
67     // is used to support the asynchronous readback API. The srcRect is the region of the srcProxy
68     // to be copied. The surfaceColorType says how we should interpret the data when reading back
69     // from the source. DstColorType describes how the data should be stored in the dstBuffer.
70     // DstOffset is the offset into the dstBuffer where we will start writing data.
71     void newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy, const SkIRect& srcRect,
72                                    GrColorType surfaceColorType, GrColorType dstColorType,
73                                    sk_sp<GrGpuBuffer> dstBuffer, size_t dstOffset);
74 
75     // Creates a new render task which copies a pixel rectangle from srcView into dstView. The src
76     // pixels copied are specified by srcRect. They are copied to a rect of the same size in
77     // dstProxy with top left at dstPoint. If the src rect is clipped by the src bounds then  pixel
78     // values in the dst rect corresponding to the area clipped by the src rect are not overwritten.
79     // This method is not guaranteed to succeed depending on the type of surface, formats, etc, and
80     // the backend-specific limitations.
81     bool newCopyRenderTask(GrSurfaceProxyView srcView, const SkIRect& srcRect,
82                            GrSurfaceProxyView dstView, const SkIPoint& dstPoint);
83 
getContext()84     GrRecordingContext* getContext() { return fContext; }
85 
86     GrPathRenderer* getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args,
87                                     bool allowSW,
88                                     GrPathRendererChain::DrawType drawType,
89                                     GrPathRenderer::StencilSupport* stencilSupport = nullptr);
90 
91     GrPathRenderer* getSoftwarePathRenderer();
92 
93     // Returns a direct pointer to the coverage counting path renderer, or null if it is not
94     // supported and turned on.
95     GrCoverageCountingPathRenderer* getCoverageCountingPathRenderer();
96 
97     void flushIfNecessary();
98 
99     static bool ProgramUnitTest(GrDirectContext*, int maxStages, int maxLevels);
100 
101     GrSemaphoresSubmitted flushSurfaces(SkSpan<GrSurfaceProxy*>,
102                                         SkSurface::BackendSurfaceAccess,
103                                         const GrFlushInfo&,
104                                         const GrBackendSurfaceMutableState* newState);
105 
106     void addOnFlushCallbackObject(GrOnFlushCallbackObject*);
107 
108 #if GR_TEST_UTILS
109     void testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject*);
testingOnly_getOptionsForPathRendererChain()110     GrPathRendererChain::Options testingOnly_getOptionsForPathRendererChain() {
111         return fOptionsForPathRendererChain;
112     }
113 #endif
114 
115     GrRenderTask* getLastRenderTask(const GrSurfaceProxy*) const;
116     GrOpsTask* getLastOpsTask(const GrSurfaceProxy*) const;
117     void setLastRenderTask(const GrSurfaceProxy*, GrRenderTask*);
118 
119     void moveRenderTasksToDDL(SkDeferredDisplayList* ddl);
120     void copyRenderTasksFromDDL(sk_sp<const SkDeferredDisplayList>, GrRenderTargetProxy* newDest);
121 
122 private:
123     // This class encapsulates maintenance and manipulation of the drawing manager's DAG of
124     // renderTasks.
125     class RenderTaskDAG {
126     public:
127         // This call will topologically sort the GrRenderTasks.
128         void prepForFlush();
129 
130         void closeAll(const GrCaps* caps);
131 
132         void gatherIDs(SkSTArray<8, uint32_t, true>* idArray) const;
133 
134         void reset();
135 
136         // This call forceably removes GrRenderTasks from the DAG. It is problematic bc it
137         // just removes the GrRenderTasks but doesn't cleanup any referring pointers (i.e.
138         // dependency pointers in the DAG). It works right now bc it is only called after the
139         // topological sort is complete (so the dangling pointers aren't used).
140         void rawRemoveRenderTasks(int startIndex, int stopIndex);
141 
empty()142         bool empty() const { return fRenderTasks.empty(); }
numRenderTasks()143         int numRenderTasks() const { return fRenderTasks.count(); }
144 
145         bool isUsed(GrSurfaceProxy*) const;
146 
renderTask(int index)147         GrRenderTask* renderTask(int index) { return fRenderTasks[index].get(); }
renderTask(int index)148         const GrRenderTask* renderTask(int index) const { return fRenderTasks[index].get(); }
149 
back()150         GrRenderTask* back() { return fRenderTasks.back().get(); }
back()151         const GrRenderTask* back() const { return fRenderTasks.back().get(); }
152 
153         GrRenderTask* add(sk_sp<GrRenderTask>);
154         GrRenderTask* addBeforeLast(sk_sp<GrRenderTask>);
155         void add(const SkTArray<sk_sp<GrRenderTask>>&);
156 
157         void swap(SkTArray<sk_sp<GrRenderTask>>* renderTasks);
158 
159     private:
160         SkTArray<sk_sp<GrRenderTask>> fRenderTasks;
161     };
162 
163     GrDrawingManager(GrRecordingContext*,
164                      const GrPathRendererChain::Options&,
165                      bool reduceOpsTaskSplitting);
166 
167     bool wasAbandoned() const;
168 
169     // Closes the target's dependent render tasks (or, if not in sorting/opsTask-splitting-reduction
170     // mode, closes fActiveOpsTask) in preparation for us opening a new opsTask that will write to
171     // 'target'.
172     void closeRenderTasksForNewRenderTask(GrSurfaceProxy* target);
173 
174     // return true if any GrRenderTasks were actually executed; false otherwise
175     bool executeRenderTasks(int startIndex, int stopIndex, GrOpFlushState*,
176                             int* numRenderTasksExecuted);
177 
178     void removeRenderTasks(int startIndex, int stopIndex);
179 
180     bool flush(SkSpan<GrSurfaceProxy*> proxies,
181                SkSurface::BackendSurfaceAccess access,
182                const GrFlushInfo&,
183                const GrBackendSurfaceMutableState* newState);
184 
185     bool submitToGpu(bool syncToCpu);
186 
187     SkDEBUGCODE(void validate() const);
188 
189     friend class GrDirectContext; // access to: flush & cleanup
190     friend class GrDirectContextPriv; // access to: flush
191     friend class GrOnFlushResourceProvider; // this is just a shallow wrapper around this class
192     friend class GrRecordingContext;  // access to: ctor
193     friend class SkImage; // for access to: flush
194 
195     static const int kNumPixelGeometries = 5; // The different pixel geometries
196     static const int kNumDFTOptions = 2;      // DFT or no DFT
197 
198     GrRecordingContext*               fContext;
199     GrPathRendererChain::Options      fOptionsForPathRendererChain;
200 
201     // This cache is used by both the vertex and index pools. It reuses memory across multiple
202     // flushes.
203     sk_sp<GrBufferAllocPool::CpuBufferCache> fCpuBufferCache;
204 
205     RenderTaskDAG                     fDAG;
206     GrOpsTask*                        fActiveOpsTask = nullptr;
207     // These are the IDs of the opsTask currently being flushed (in internalFlush)
208     SkSTArray<8, uint32_t, true>      fFlushingRenderTaskIDs;
209     // These are the new renderTasks generated by the onFlush CBs
210     SkSTArray<4, sk_sp<GrRenderTask>> fOnFlushRenderTasks;
211 
212     std::unique_ptr<GrPathRendererChain> fPathRendererChain;
213     sk_sp<GrSoftwarePathRenderer>     fSoftwarePathRenderer;
214 
215     GrTokenTracker                    fTokenTracker;
216     bool                              fFlushing;
217     const bool                        fReduceOpsTaskSplitting;
218 
219     SkTArray<GrOnFlushCallbackObject*> fOnFlushCBObjects;
220 
addDDLTarget(GrSurfaceProxy * newTarget,GrRenderTargetProxy * ddlTarget)221     void addDDLTarget(GrSurfaceProxy* newTarget, GrRenderTargetProxy* ddlTarget) {
222         fDDLTargets.set(newTarget->uniqueID().asUInt(), ddlTarget);
223     }
isDDLTarget(GrSurfaceProxy * newTarget)224     bool isDDLTarget(GrSurfaceProxy* newTarget) {
225         return SkToBool(fDDLTargets.find(newTarget->uniqueID().asUInt()));
226     }
getDDLTarget(GrSurfaceProxy * newTarget)227     GrRenderTargetProxy* getDDLTarget(GrSurfaceProxy* newTarget) {
228         auto entry = fDDLTargets.find(newTarget->uniqueID().asUInt());
229         return entry ? *entry : nullptr;
230     }
clearDDLTargets()231     void clearDDLTargets() { fDDLTargets.reset(); }
232 
233     // We play a trick with lazy proxies to retarget the base target of a DDL to the SkSurface
234     // it is replayed on. 'fDDLTargets' stores this mapping from SkSurface unique proxy ID
235     // to the DDL's lazy proxy.
236     // Note: we do not expect a whole lot of these per flush
237     SkTHashMap<uint32_t, GrRenderTargetProxy*> fDDLTargets;
238 
239     struct SurfaceIDKeyTraits {
GetInvalidKeySurfaceIDKeyTraits240         static uint32_t GetInvalidKey() {
241             return GrSurfaceProxy::UniqueID::InvalidID().asUInt();
242         }
243     };
244 
245     GrHashMapWithCache<uint32_t, GrRenderTask*, SurfaceIDKeyTraits, GrCheapHash> fLastRenderTasks;
246 };
247 
248 #endif
249