1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/GrDrawingManager.h"
9
10 #include "include/core/SkDeferredDisplayList.h"
11 #include "include/gpu/GrBackendSemaphore.h"
12 #include "include/private/GrRecordingContext.h"
13 #include "src/core/SkTTopoSort.h"
14 #include "src/gpu/GrAuditTrail.h"
15 #include "src/gpu/GrClientMappedBufferManager.h"
16 #include "src/gpu/GrContextPriv.h"
17 #include "src/gpu/GrCopyRenderTask.h"
18 #include "src/gpu/GrGpu.h"
19 #include "src/gpu/GrMemoryPool.h"
20 #include "src/gpu/GrOnFlushResourceProvider.h"
21 #include "src/gpu/GrRecordingContextPriv.h"
22 #include "src/gpu/GrRenderTargetContext.h"
23 #include "src/gpu/GrRenderTargetProxy.h"
24 #include "src/gpu/GrRenderTask.h"
25 #include "src/gpu/GrResourceAllocator.h"
26 #include "src/gpu/GrResourceProvider.h"
27 #include "src/gpu/GrSoftwarePathRenderer.h"
28 #include "src/gpu/GrSurfaceContext.h"
29 #include "src/gpu/GrSurfaceProxyPriv.h"
30 #include "src/gpu/GrTexture.h"
31 #include "src/gpu/GrTexturePriv.h"
32 #include "src/gpu/GrTextureProxy.h"
33 #include "src/gpu/GrTextureProxyPriv.h"
34 #include "src/gpu/GrTextureResolveRenderTask.h"
35 #include "src/gpu/GrTracing.h"
36 #include "src/gpu/GrTransferFromRenderTask.h"
37 #include "src/gpu/GrWaitRenderTask.h"
38 #include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
39 #include "src/gpu/text/GrTextContext.h"
40 #include "src/image/SkSurface_Gpu.h"
41
RenderTaskDAG(bool sortRenderTasks)42 GrDrawingManager::RenderTaskDAG::RenderTaskDAG(bool sortRenderTasks)
43 : fSortRenderTasks(sortRenderTasks) {}
44
~RenderTaskDAG()45 GrDrawingManager::RenderTaskDAG::~RenderTaskDAG() {}
46
gatherIDs(SkSTArray<8,uint32_t,true> * idArray) const47 void GrDrawingManager::RenderTaskDAG::gatherIDs(SkSTArray<8, uint32_t, true>* idArray) const {
48 idArray->reset(fRenderTasks.count());
49 for (int i = 0; i < fRenderTasks.count(); ++i) {
50 if (fRenderTasks[i]) {
51 (*idArray)[i] = fRenderTasks[i]->uniqueID();
52 }
53 }
54 }
55
reset()56 void GrDrawingManager::RenderTaskDAG::reset() {
57 fRenderTasks.reset();
58 }
59
removeRenderTask(int index)60 void GrDrawingManager::RenderTaskDAG::removeRenderTask(int index) {
61 if (!fRenderTasks[index]->unique()) {
62 // TODO: Eventually this should be guaranteed unique: http://skbug.com/7111
63 fRenderTasks[index]->endFlush();
64 }
65
66 fRenderTasks[index] = nullptr;
67 }
68
removeRenderTasks(int startIndex,int stopIndex)69 void GrDrawingManager::RenderTaskDAG::removeRenderTasks(int startIndex, int stopIndex) {
70 for (int i = startIndex; i < stopIndex; ++i) {
71 if (!fRenderTasks[i]) {
72 continue;
73 }
74 this->removeRenderTask(i);
75 }
76 }
77
isUsed(GrSurfaceProxy * proxy) const78 bool GrDrawingManager::RenderTaskDAG::isUsed(GrSurfaceProxy* proxy) const {
79 for (int i = 0; i < fRenderTasks.count(); ++i) {
80 if (fRenderTasks[i] && fRenderTasks[i]->isUsed(proxy)) {
81 return true;
82 }
83 }
84
85 return false;
86 }
87
add(sk_sp<GrRenderTask> renderTask)88 GrRenderTask* GrDrawingManager::RenderTaskDAG::add(sk_sp<GrRenderTask> renderTask) {
89 if (renderTask) {
90 return fRenderTasks.emplace_back(std::move(renderTask)).get();
91 }
92 return nullptr;
93 }
94
addBeforeLast(sk_sp<GrRenderTask> renderTask)95 GrRenderTask* GrDrawingManager::RenderTaskDAG::addBeforeLast(sk_sp<GrRenderTask> renderTask) {
96 SkASSERT(!fRenderTasks.empty());
97 if (renderTask) {
98 // Release 'fRenderTasks.back()' and grab the raw pointer, in case the SkTArray grows
99 // and reallocates during emplace_back.
100 fRenderTasks.emplace_back(fRenderTasks.back().release());
101 return (fRenderTasks[fRenderTasks.count() - 2] = std::move(renderTask)).get();
102 }
103 return nullptr;
104 }
105
add(const SkTArray<sk_sp<GrRenderTask>> & renderTasks)106 void GrDrawingManager::RenderTaskDAG::add(const SkTArray<sk_sp<GrRenderTask>>& renderTasks) {
107 #ifdef SK_DEBUG
108 for (auto& renderTask : renderTasks) {
109 SkASSERT(renderTask->unique());
110 }
111 #endif
112
113 fRenderTasks.push_back_n(renderTasks.count(), renderTasks.begin());
114 }
115
swap(SkTArray<sk_sp<GrRenderTask>> * renderTasks)116 void GrDrawingManager::RenderTaskDAG::swap(SkTArray<sk_sp<GrRenderTask>>* renderTasks) {
117 SkASSERT(renderTasks->empty());
118 renderTasks->swap(fRenderTasks);
119 }
120
prepForFlush()121 void GrDrawingManager::RenderTaskDAG::prepForFlush() {
122 if (fSortRenderTasks) {
123 SkDEBUGCODE(bool result =) SkTTopoSort<GrRenderTask, GrRenderTask::TopoSortTraits>(
124 &fRenderTasks);
125 SkASSERT(result);
126 }
127
128 #ifdef SK_DEBUG
129 // This block checks for any unnecessary splits in the opsTasks. If two sequential opsTasks
130 // share the same backing GrSurfaceProxy it means the opsTask was artificially split.
131 if (fRenderTasks.count()) {
132 GrOpsTask* prevOpsTask = fRenderTasks[0]->asOpsTask();
133 for (int i = 1; i < fRenderTasks.count(); ++i) {
134 GrOpsTask* curOpsTask = fRenderTasks[i]->asOpsTask();
135
136 if (prevOpsTask && curOpsTask) {
137 SkASSERT(prevOpsTask->fTargetView != curOpsTask->fTargetView);
138 }
139
140 prevOpsTask = curOpsTask;
141 }
142 }
143 #endif
144 }
145
closeAll(const GrCaps * caps)146 void GrDrawingManager::RenderTaskDAG::closeAll(const GrCaps* caps) {
147 for (int i = 0; i < fRenderTasks.count(); ++i) {
148 if (fRenderTasks[i]) {
149 fRenderTasks[i]->makeClosed(*caps);
150 }
151 }
152 }
153
cleanup(const GrCaps * caps)154 void GrDrawingManager::RenderTaskDAG::cleanup(const GrCaps* caps) {
155 for (int i = 0; i < fRenderTasks.count(); ++i) {
156 if (!fRenderTasks[i]) {
157 continue;
158 }
159
160 // no renderTask should receive a dependency
161 fRenderTasks[i]->makeClosed(*caps);
162
163 // We shouldn't need to do this, but it turns out some clients still hold onto opsTasks
164 // after a cleanup.
165 // MDB TODO: is this still true?
166 if (!fRenderTasks[i]->unique()) {
167 // TODO: Eventually this should be guaranteed unique.
168 // https://bugs.chromium.org/p/skia/issues/detail?id=7111
169 fRenderTasks[i]->endFlush();
170 }
171 }
172
173 fRenderTasks.reset();
174 }
175
176 ///////////////////////////////////////////////////////////////////////////////////////////////////
GrDrawingManager(GrRecordingContext * context,const GrPathRendererChain::Options & optionsForPathRendererChain,const GrTextContext::Options & optionsForTextContext,bool sortRenderTasks,bool reduceOpsTaskSplitting)177 GrDrawingManager::GrDrawingManager(GrRecordingContext* context,
178 const GrPathRendererChain::Options& optionsForPathRendererChain,
179 const GrTextContext::Options& optionsForTextContext,
180 bool sortRenderTasks,
181 bool reduceOpsTaskSplitting)
182 : fContext(context)
183 , fOptionsForPathRendererChain(optionsForPathRendererChain)
184 , fOptionsForTextContext(optionsForTextContext)
185 , fDAG(sortRenderTasks)
186 , fTextContext(nullptr)
187 , fPathRendererChain(nullptr)
188 , fSoftwarePathRenderer(nullptr)
189 , fFlushing(false)
190 , fReduceOpsTaskSplitting(reduceOpsTaskSplitting) {
191 }
192
cleanup()193 void GrDrawingManager::cleanup() {
194 fDAG.cleanup(fContext->priv().caps());
195
196 fPathRendererChain = nullptr;
197 fSoftwarePathRenderer = nullptr;
198
199 fOnFlushCBObjects.reset();
200 }
201
~GrDrawingManager()202 GrDrawingManager::~GrDrawingManager() {
203 this->cleanup();
204 }
205
wasAbandoned() const206 bool GrDrawingManager::wasAbandoned() const {
207 return fContext->priv().abandoned();
208 }
209
freeGpuResources()210 void GrDrawingManager::freeGpuResources() {
211 for (int i = fOnFlushCBObjects.count() - 1; i >= 0; --i) {
212 if (!fOnFlushCBObjects[i]->retainOnFreeGpuResources()) {
213 // it's safe to just do this because we're iterating in reverse
214 fOnFlushCBObjects.removeShuffle(i);
215 }
216 }
217
218 // a path renderer may be holding onto resources
219 fPathRendererChain = nullptr;
220 fSoftwarePathRenderer = nullptr;
221 }
222
223 // MDB TODO: make use of the 'proxy' parameter.
flush(GrSurfaceProxy * proxies[],int numProxies,SkSurface::BackendSurfaceAccess access,const GrFlushInfo & info,const GrPrepareForExternalIORequests & externalRequests)224 GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy* proxies[], int numProxies,
225 SkSurface::BackendSurfaceAccess access, const GrFlushInfo& info,
226 const GrPrepareForExternalIORequests& externalRequests) {
227 SkASSERT(numProxies >= 0);
228 SkASSERT(!numProxies || proxies);
229 GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "flush", fContext);
230
231 if (fFlushing || this->wasAbandoned()) {
232 if (info.fFinishedProc) {
233 info.fFinishedProc(info.fFinishedContext);
234 }
235 return GrSemaphoresSubmitted::kNo;
236 }
237
238 SkDEBUGCODE(this->validate());
239
240 if (kNone_GrFlushFlags == info.fFlags && !info.fNumSemaphores && !info.fFinishedProc &&
241 !externalRequests.hasRequests()) {
242 bool canSkip = numProxies > 0;
243 for (int i = 0; i < numProxies && canSkip; ++i) {
244 canSkip = !fDAG.isUsed(proxies[i]) && !this->isDDLTarget(proxies[i]);
245 }
246 if (canSkip) {
247 return GrSemaphoresSubmitted::kNo;
248 }
249 }
250
251 auto direct = fContext->priv().asDirectContext();
252 if (!direct) {
253 if (info.fFinishedProc) {
254 info.fFinishedProc(info.fFinishedContext);
255 }
256 return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
257 }
258 direct->priv().clientMappedBufferManager()->process();
259
260 GrGpu* gpu = direct->priv().getGpu();
261 if (!gpu) {
262 if (info.fFinishedProc) {
263 info.fFinishedProc(info.fFinishedContext);
264 }
265 return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
266 }
267
268 fFlushing = true;
269
270 auto resourceProvider = direct->priv().resourceProvider();
271 auto resourceCache = direct->priv().getResourceCache();
272
273 // Semi-usually the GrRenderTasks are already closed at this point, but sometimes Ganesh needs
274 // to flush mid-draw. In that case, the SkGpuDevice's opsTasks won't be closed but need to be
275 // flushed anyway. Closing such opsTasks here will mean new ones will be created to replace them
276 // if the SkGpuDevice(s) write to them again.
277 fDAG.closeAll(fContext->priv().caps());
278 fActiveOpsTask = nullptr;
279
280 fDAG.prepForFlush();
281 if (!fCpuBufferCache) {
282 // We cache more buffers when the backend is using client side arrays. Otherwise, we
283 // expect each pool will use a CPU buffer as a staging buffer before uploading to a GPU
284 // buffer object. Each pool only requires one staging buffer at a time.
285 int maxCachedBuffers = fContext->priv().caps()->preferClientSideDynamicBuffers() ? 2 : 6;
286 fCpuBufferCache = GrBufferAllocPool::CpuBufferCache::Make(maxCachedBuffers);
287 }
288
289 GrOpFlushState flushState(gpu, resourceProvider, &fTokenTracker, fCpuBufferCache);
290
291 GrOnFlushResourceProvider onFlushProvider(this);
292 // TODO: AFAICT the only reason fFlushState is on GrDrawingManager rather than on the
293 // stack here is to preserve the flush tokens.
294
295 // Prepare any onFlush op lists (e.g. atlases).
296 if (!fOnFlushCBObjects.empty()) {
297 fDAG.gatherIDs(&fFlushingRenderTaskIDs);
298
299 for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
300 onFlushCBObject->preFlush(&onFlushProvider, fFlushingRenderTaskIDs.begin(),
301 fFlushingRenderTaskIDs.count());
302 }
303 for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
304 onFlushRenderTask->makeClosed(*fContext->priv().caps());
305 #ifdef SK_DEBUG
306 // OnFlush callbacks are invoked during flush, and are therefore expected to handle
307 // resource allocation & usage on their own. (No deferred or lazy proxies!)
308 onFlushRenderTask->visitTargetAndSrcProxies_debugOnly(
309 [](GrSurfaceProxy* p, GrMipMapped mipMapped) {
310 SkASSERT(!p->asTextureProxy() || !p->asTextureProxy()->texPriv().isDeferred());
311 SkASSERT(!p->isLazy());
312 if (p->requiresManualMSAAResolve()) {
313 // The onFlush callback is responsible for ensuring MSAA gets resolved.
314 SkASSERT(p->asRenderTargetProxy() && !p->asRenderTargetProxy()->isMSAADirty());
315 }
316 if (GrMipMapped::kYes == mipMapped) {
317 // The onFlush callback is responsible for regenerating mips if needed.
318 SkASSERT(p->asTextureProxy() && !p->asTextureProxy()->mipMapsAreDirty());
319 }
320 });
321 #endif
322 onFlushRenderTask->prepare(&flushState);
323 }
324 }
325
326 #if 0
327 // Enable this to print out verbose GrOp information
328 SkDEBUGCODE(SkDebugf("onFlush renderTasks:"));
329 for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
330 SkDEBUGCODE(onFlushRenderTask->dump();)
331 }
332 SkDEBUGCODE(SkDebugf("Normal renderTasks:"));
333 for (int i = 0; i < fRenderTasks.count(); ++i) {
334 SkDEBUGCODE(fRenderTasks[i]->dump();)
335 }
336 #endif
337
338 int startIndex, stopIndex;
339 bool flushed = false;
340
341 {
342 GrResourceAllocator alloc(resourceProvider SkDEBUGCODE(, fDAG.numRenderTasks()));
343 for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
344 if (fDAG.renderTask(i)) {
345 fDAG.renderTask(i)->gatherProxyIntervals(&alloc);
346 }
347 alloc.markEndOfOpsTask(i);
348 }
349 alloc.determineRecyclability();
350
351 GrResourceAllocator::AssignError error = GrResourceAllocator::AssignError::kNoError;
352 int numRenderTasksExecuted = 0;
353 while (alloc.assign(&startIndex, &stopIndex, &error)) {
354 if (GrResourceAllocator::AssignError::kFailedProxyInstantiation == error) {
355 for (int i = startIndex; i < stopIndex; ++i) {
356 GrRenderTask* renderTask = fDAG.renderTask(i);
357 if (!renderTask) {
358 continue;
359 }
360 if (!renderTask->isInstantiated()) {
361 // No need to call the renderTask's handleInternalAllocationFailure
362 // since we will already skip executing the renderTask since it is not
363 // instantiated.
364 continue;
365 }
366 renderTask->handleInternalAllocationFailure();
367 }
368 }
369
370 if (this->executeRenderTasks(
371 startIndex, stopIndex, &flushState, &numRenderTasksExecuted)) {
372 flushed = true;
373 }
374 }
375 }
376
377 #ifdef SK_DEBUG
378 for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
379 // If there are any remaining opsTaskss at this point, make sure they will not survive the
380 // flush. Otherwise we need to call endFlush() on them.
381 // http://skbug.com/7111
382 SkASSERT(!fDAG.renderTask(i) || fDAG.renderTask(i)->unique());
383 }
384 #endif
385 fDAG.reset();
386 this->clearDDLTargets();
387
388 #ifdef SK_DEBUG
389 // In non-DDL mode this checks that all the flushed ops have been freed from the memory pool.
390 // When we move to partial flushes this assert will no longer be valid.
391 // In DDL mode this check is somewhat superfluous since the memory for most of the ops/opsTasks
392 // will be stored in the DDL's GrOpMemoryPools.
393 GrOpMemoryPool* opMemoryPool = fContext->priv().opMemoryPool();
394 opMemoryPool->isEmpty();
395 #endif
396
397 GrSemaphoresSubmitted result = gpu->finishFlush(proxies, numProxies, access, info,
398 externalRequests);
399
400 // Give the cache a chance to purge resources that become purgeable due to flushing.
401 if (flushed) {
402 resourceCache->purgeAsNeeded();
403 flushed = false;
404 }
405 for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
406 onFlushCBObject->postFlush(fTokenTracker.nextTokenToFlush(), fFlushingRenderTaskIDs.begin(),
407 fFlushingRenderTaskIDs.count());
408 flushed = true;
409 }
410 if (flushed) {
411 resourceCache->purgeAsNeeded();
412 }
413 fFlushingRenderTaskIDs.reset();
414 fFlushing = false;
415
416 return result;
417 }
418
executeRenderTasks(int startIndex,int stopIndex,GrOpFlushState * flushState,int * numRenderTasksExecuted)419 bool GrDrawingManager::executeRenderTasks(int startIndex, int stopIndex, GrOpFlushState* flushState,
420 int* numRenderTasksExecuted) {
421 SkASSERT(startIndex <= stopIndex && stopIndex <= fDAG.numRenderTasks());
422
423 #if GR_FLUSH_TIME_OP_SPEW
424 SkDebugf("Flushing opsTask: %d to %d out of [%d, %d]\n",
425 startIndex, stopIndex, 0, fDAG.numRenderTasks());
426 for (int i = startIndex; i < stopIndex; ++i) {
427 if (fDAG.renderTask(i)) {
428 fDAG.renderTask(i)->dump(true);
429 }
430 }
431 #endif
432
433 bool anyRenderTasksExecuted = false;
434
435 for (int i = startIndex; i < stopIndex; ++i) {
436 GrRenderTask* renderTask = fDAG.renderTask(i);
437 if (!renderTask || !renderTask->isInstantiated()) {
438 continue;
439 }
440
441 SkASSERT(renderTask->deferredProxiesAreInstantiated());
442
443 renderTask->prepare(flushState);
444 }
445
446 // Upload all data to the GPU
447 flushState->preExecuteDraws();
448
449 // For Vulkan, if we have too many oplists to be flushed we end up allocating a lot of resources
450 // for each command buffer associated with the oplists. If this gets too large we can cause the
451 // devices to go OOM. In practice we usually only hit this case in our tests, but to be safe we
452 // put a cap on the number of oplists we will execute before flushing to the GPU to relieve some
453 // memory pressure.
454 static constexpr int kMaxRenderTasksBeforeFlush = 100;
455
456 // Execute the onFlush renderTasks first, if any.
457 for (sk_sp<GrRenderTask>& onFlushRenderTask : fOnFlushRenderTasks) {
458 if (!onFlushRenderTask->execute(flushState)) {
459 SkDebugf("WARNING: onFlushRenderTask failed to execute.\n");
460 }
461 SkASSERT(onFlushRenderTask->unique());
462 onFlushRenderTask = nullptr;
463 (*numRenderTasksExecuted)++;
464 if (*numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
465 flushState->gpu()->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
466 GrFlushInfo(), GrPrepareForExternalIORequests());
467 *numRenderTasksExecuted = 0;
468 }
469 }
470 fOnFlushRenderTasks.reset();
471
472 // Execute the normal op lists.
473 for (int i = startIndex; i < stopIndex; ++i) {
474 GrRenderTask* renderTask = fDAG.renderTask(i);
475 if (!renderTask || !renderTask->isInstantiated()) {
476 continue;
477 }
478
479 if (renderTask->execute(flushState)) {
480 anyRenderTasksExecuted = true;
481 }
482 (*numRenderTasksExecuted)++;
483 if (*numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
484 flushState->gpu()->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
485 GrFlushInfo(), GrPrepareForExternalIORequests());
486 *numRenderTasksExecuted = 0;
487 }
488 }
489
490 SkASSERT(!flushState->opsRenderPass());
491 SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextTokenToFlush());
492
493 // We reset the flush state before the RenderTasks so that the last resources to be freed are
494 // those that are written to in the RenderTasks. This helps to make sure the most recently used
495 // resources are the last to be purged by the resource cache.
496 flushState->reset();
497
498 fDAG.removeRenderTasks(startIndex, stopIndex);
499
500 return anyRenderTasksExecuted;
501 }
502
flushSurfaces(GrSurfaceProxy * proxies[],int numProxies,SkSurface::BackendSurfaceAccess access,const GrFlushInfo & info)503 GrSemaphoresSubmitted GrDrawingManager::flushSurfaces(GrSurfaceProxy* proxies[], int numProxies,
504 SkSurface::BackendSurfaceAccess access,
505 const GrFlushInfo& info) {
506 if (this->wasAbandoned()) {
507 return GrSemaphoresSubmitted::kNo;
508 }
509 SkDEBUGCODE(this->validate());
510 SkASSERT(numProxies >= 0);
511 SkASSERT(!numProxies || proxies);
512
513 auto direct = fContext->priv().asDirectContext();
514 if (!direct) {
515 return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
516 }
517
518 GrGpu* gpu = direct->priv().getGpu();
519 if (!gpu) {
520 return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
521 }
522
523 // TODO: It is important to upgrade the drawingmanager to just flushing the
524 // portion of the DAG required by 'proxies' in order to restore some of the
525 // semantics of this method.
526 GrSemaphoresSubmitted result = this->flush(proxies, numProxies, access, info,
527 GrPrepareForExternalIORequests());
528 for (int i = 0; i < numProxies; ++i) {
529 GrSurfaceProxy* proxy = proxies[i];
530 if (!proxy->isInstantiated()) {
531 return result;
532 }
533 // In the flushSurfaces case, we need to resolve MSAA immediately after flush. This is
534 // because the client will call through to this method when drawing into a target created by
535 // wrapBackendTextureAsRenderTarget, and will expect the original texture to be fully
536 // resolved upon return.
537 if (proxy->requiresManualMSAAResolve()) {
538 auto* rtProxy = proxy->asRenderTargetProxy();
539 SkASSERT(rtProxy);
540 if (rtProxy->isMSAADirty()) {
541 SkASSERT(rtProxy->peekRenderTarget());
542 gpu->resolveRenderTarget(rtProxy->peekRenderTarget(), rtProxy->msaaDirtyRect(),
543 GrGpu::ForExternalIO::kYes);
544 rtProxy->markMSAAResolved();
545 }
546 }
547 // If, after a flush, any of the proxies of interest have dirty mipmaps, regenerate them in
548 // case their backend textures are being stolen.
549 // (This special case is exercised by the ReimportImageTextureWithMipLevels test.)
550 // FIXME: It may be more ideal to plumb down a "we're going to steal the backends" flag.
551 if (auto* textureProxy = proxy->asTextureProxy()) {
552 if (textureProxy->mipMapsAreDirty()) {
553 SkASSERT(textureProxy->peekTexture());
554 gpu->regenerateMipMapLevels(textureProxy->peekTexture());
555 textureProxy->markMipMapsClean();
556 }
557 }
558 }
559
560 SkDEBUGCODE(this->validate());
561 return result;
562 }
563
addOnFlushCallbackObject(GrOnFlushCallbackObject * onFlushCBObject)564 void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
565 fOnFlushCBObjects.push_back(onFlushCBObject);
566 }
567
568 #if GR_TEST_UTILS
testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject * cb)569 void GrDrawingManager::testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject* cb) {
570 int n = std::find(fOnFlushCBObjects.begin(), fOnFlushCBObjects.end(), cb) -
571 fOnFlushCBObjects.begin();
572 SkASSERT(n < fOnFlushCBObjects.count());
573 fOnFlushCBObjects.removeShuffle(n);
574 }
575 #endif
576
moveRenderTasksToDDL(SkDeferredDisplayList * ddl)577 void GrDrawingManager::moveRenderTasksToDDL(SkDeferredDisplayList* ddl) {
578 SkDEBUGCODE(this->validate());
579
580 // no renderTask should receive a new command after this
581 fDAG.closeAll(fContext->priv().caps());
582 fActiveOpsTask = nullptr;
583
584 fDAG.swap(&ddl->fRenderTasks);
585 SkASSERT(!fDAG.numRenderTasks());
586
587 for (auto& renderTask : ddl->fRenderTasks) {
588 renderTask->prePrepare(fContext);
589 }
590
591 ddl->fArenas = std::move(fContext->priv().detachArenas());
592
593 fContext->priv().detachProgramData(&ddl->fProgramData);
594
595 if (fPathRendererChain) {
596 if (auto ccpr = fPathRendererChain->getCoverageCountingPathRenderer()) {
597 ddl->fPendingPaths = ccpr->detachPendingPaths();
598 }
599 }
600
601 SkDEBUGCODE(this->validate());
602 }
603
copyRenderTasksFromDDL(const SkDeferredDisplayList * ddl,GrRenderTargetProxy * newDest)604 void GrDrawingManager::copyRenderTasksFromDDL(const SkDeferredDisplayList* ddl,
605 GrRenderTargetProxy* newDest) {
606 SkDEBUGCODE(this->validate());
607
608 if (fActiveOpsTask) {
609 // This is a temporary fix for the partial-MDB world. In that world we're not
610 // reordering so ops that (in the single opsTask world) would've just glommed onto the
611 // end of the single opsTask but referred to a far earlier RT need to appear in their
612 // own opsTask.
613 fActiveOpsTask->makeClosed(*fContext->priv().caps());
614 fActiveOpsTask = nullptr;
615 }
616
617 this->addDDLTarget(newDest);
618
619 // Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData.
620 // The lazy proxy that references it (in the copied opsTasks) will steal its GrTexture.
621 ddl->fLazyProxyData->fReplayDest = newDest;
622
623 if (ddl->fPendingPaths.size()) {
624 GrCoverageCountingPathRenderer* ccpr = this->getCoverageCountingPathRenderer();
625
626 ccpr->mergePendingPaths(ddl->fPendingPaths);
627 }
628
629 fDAG.add(ddl->fRenderTasks);
630
631 SkDEBUGCODE(this->validate());
632 }
633
634 #ifdef SK_DEBUG
validate() const635 void GrDrawingManager::validate() const {
636 if (fDAG.sortingRenderTasks() && fReduceOpsTaskSplitting) {
637 SkASSERT(!fActiveOpsTask);
638 } else {
639 if (fActiveOpsTask) {
640 SkASSERT(!fDAG.empty());
641 SkASSERT(!fActiveOpsTask->isClosed());
642 SkASSERT(fActiveOpsTask == fDAG.back());
643 }
644
645 for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
646 if (fActiveOpsTask != fDAG.renderTask(i)) {
647 // The resolveTask associated with the activeTask remains open for as long as the
648 // activeTask does.
649 bool isActiveResolveTask =
650 fActiveOpsTask && fActiveOpsTask->fTextureResolveTask == fDAG.renderTask(i);
651 SkASSERT(isActiveResolveTask || fDAG.renderTask(i)->isClosed());
652 }
653 }
654
655 if (!fDAG.empty() && !fDAG.back()->isClosed()) {
656 SkASSERT(fActiveOpsTask == fDAG.back());
657 }
658 }
659 }
660 #endif
661
closeRenderTasksForNewRenderTask(GrSurfaceProxy * target)662 void GrDrawingManager::closeRenderTasksForNewRenderTask(GrSurfaceProxy* target) {
663 if (target && fDAG.sortingRenderTasks() && fReduceOpsTaskSplitting) {
664 // In this case we need to close all the renderTasks that rely on the current contents of
665 // 'target'. That is bc we're going to update the content of the proxy so they need to be
666 // split in case they use both the old and new content. (This is a bit of an overkill: they
667 // really only need to be split if they ever reference proxy's contents again but that is
668 // hard to predict/handle).
669 if (GrRenderTask* lastRenderTask = target->getLastRenderTask()) {
670 lastRenderTask->closeThoseWhoDependOnMe(*fContext->priv().caps());
671 }
672 } else if (fActiveOpsTask) {
673 // This is a temporary fix for the partial-MDB world. In that world we're not
674 // reordering so ops that (in the single opsTask world) would've just glommed onto the
675 // end of the single opsTask but referred to a far earlier RT need to appear in their
676 // own opsTask.
677 fActiveOpsTask->makeClosed(*fContext->priv().caps());
678 fActiveOpsTask = nullptr;
679 }
680 }
681
newOpsTask(GrSurfaceProxyView surfaceView,bool managedOpsTask)682 sk_sp<GrOpsTask> GrDrawingManager::newOpsTask(GrSurfaceProxyView surfaceView,
683 bool managedOpsTask) {
684 SkDEBUGCODE(this->validate());
685 SkASSERT(fContext);
686
687 GrSurfaceProxy* proxy = surfaceView.proxy();
688 this->closeRenderTasksForNewRenderTask(proxy);
689
690 sk_sp<GrOpsTask> opsTask(new GrOpsTask(fContext->priv().arenas(),
691 std::move(surfaceView),
692 fContext->priv().auditTrail()));
693 SkASSERT(proxy->getLastRenderTask() == opsTask.get());
694
695 if (managedOpsTask) {
696 fDAG.add(opsTask);
697
698 if (!fDAG.sortingRenderTasks() || !fReduceOpsTaskSplitting) {
699 fActiveOpsTask = opsTask.get();
700 }
701 }
702
703 SkDEBUGCODE(this->validate());
704 return opsTask;
705 }
706
newTextureResolveRenderTask(const GrCaps & caps)707 GrTextureResolveRenderTask* GrDrawingManager::newTextureResolveRenderTask(const GrCaps& caps) {
708 // Unlike in the "new opsTask" case, we do not want to close the active opsTask, nor (if we are
709 // in sorting and opsTask reduction mode) the render tasks that depend on any proxy's current
710 // state. This is because those opsTasks can still receive new ops and because if they refer to
711 // the mipmapped version of 'proxy', they will then come to depend on the render task being
712 // created here.
713 //
714 // Add the new textureResolveTask before the fActiveOpsTask (if not in
715 // sorting/opsTask-splitting-reduction mode) because it will depend upon this resolve task.
716 // NOTE: Putting it here will also reduce the amount of work required by the topological sort.
717 return static_cast<GrTextureResolveRenderTask*>(fDAG.addBeforeLast(
718 sk_make_sp<GrTextureResolveRenderTask>()));
719 }
720
newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,int numSemaphores)721 void GrDrawingManager::newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,
722 std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,
723 int numSemaphores) {
724 SkDEBUGCODE(this->validate());
725 SkASSERT(fContext);
726
727 const GrCaps& caps = *fContext->priv().caps();
728
729 sk_sp<GrWaitRenderTask> waitTask = sk_make_sp<GrWaitRenderTask>(GrSurfaceProxyView(proxy),
730 std::move(semaphores),
731 numSemaphores);
732 if (fReduceOpsTaskSplitting) {
733 GrRenderTask* lastTask = proxy->getLastRenderTask();
734 if (lastTask && !lastTask->isClosed()) {
735 // We directly make the currently open renderTask depend on waitTask instead of using
736 // the proxy version of addDependency. The waitTask will never need to trigger any
737 // resolves or mip map generation which is the main advantage of going through the proxy
738 // version. Additionally we would've had to temporarily set the wait task as the
739 // lastRenderTask on the proxy, add the dependency, and then reset the lastRenderTask to
740 // lastTask. Additionally we add all dependencies of lastTask to waitTask so that the
741 // waitTask doesn't get reordered before them and unnecessarily block those tasks.
742 // Note: Any previous Ops already in lastTask will get blocked by the wait semaphore
743 // even though they don't need to be for correctness.
744
745 // Make sure we add the dependencies of lastTask to waitTask first or else we'll get a
746 // circular self dependency of waitTask on waitTask.
747 waitTask->addDependenciesFromOtherTask(lastTask);
748 lastTask->addDependency(waitTask.get());
749 } else {
750 // If there is a last task we set the waitTask to depend on it so that it doesn't get
751 // reordered in front of the lastTask causing the lastTask to be blocked by the
752 // semaphore. Again we directly just go through adding the dependency to the task and
753 // not the proxy since we don't need to worry about resolving anything.
754 if (lastTask) {
755 waitTask->addDependency(lastTask);
756 }
757 proxy->setLastRenderTask(waitTask.get());
758 }
759 fDAG.add(waitTask);
760 } else {
761 if (fActiveOpsTask && (fActiveOpsTask->fTargetView.proxy() == proxy.get())) {
762 SkASSERT(proxy->getLastRenderTask() == fActiveOpsTask);
763 fDAG.addBeforeLast(waitTask);
764 // In this case we keep the current renderTask open but just insert the new waitTask
765 // before it in the list. The waitTask will never need to trigger any resolves or mip
766 // map generation which is the main advantage of going through the proxy version.
767 // Additionally we would've had to temporarily set the wait task as the lastRenderTask
768 // on the proxy, add the dependency, and then reset the lastRenderTask to
769 // fActiveOpsTask. Additionally we make the waitTask depend on all of fActiveOpsTask
770 // dependencies so that we don't unnecessarily reorder the waitTask before them.
771 // Note: Any previous Ops already in fActiveOpsTask will get blocked by the wait
772 // semaphore even though they don't need to be for correctness.
773
774 // Make sure we add the dependencies of fActiveOpsTask to waitTask first or else we'll
775 // get a circular self dependency of waitTask on waitTask.
776 waitTask->addDependenciesFromOtherTask(fActiveOpsTask);
777 fActiveOpsTask->addDependency(waitTask.get());
778 } else {
779 // In this case we just close the previous RenderTask and start and append the waitTask
780 // to the DAG. Since it is the last task now we call setLastRenderTask on the proxy. If
781 // there is a lastTask on the proxy we make waitTask depend on that task. This
782 // dependency isn't strictly needed but it does keep the DAG from reordering the
783 // waitTask earlier and blocking more tasks.
784 if (GrRenderTask* lastTask = proxy->getLastRenderTask()) {
785 waitTask->addDependency(lastTask);
786 }
787 proxy->setLastRenderTask(waitTask.get());
788 this->closeRenderTasksForNewRenderTask(proxy.get());
789 fDAG.add(waitTask);
790 }
791 }
792 waitTask->makeClosed(caps);
793
794 SkDEBUGCODE(this->validate());
795 }
796
newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy,const SkIRect & srcRect,GrColorType surfaceColorType,GrColorType dstColorType,sk_sp<GrGpuBuffer> dstBuffer,size_t dstOffset)797 void GrDrawingManager::newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy,
798 const SkIRect& srcRect,
799 GrColorType surfaceColorType,
800 GrColorType dstColorType,
801 sk_sp<GrGpuBuffer> dstBuffer,
802 size_t dstOffset) {
803 SkDEBUGCODE(this->validate());
804 SkASSERT(fContext);
805 // This copies from srcProxy to dstBuffer so it doesn't have a real target.
806 this->closeRenderTasksForNewRenderTask(nullptr);
807
808 GrRenderTask* task = fDAG.add(sk_make_sp<GrTransferFromRenderTask>(
809 srcProxy, srcRect, surfaceColorType, dstColorType, std::move(dstBuffer), dstOffset));
810
811 const GrCaps& caps = *fContext->priv().caps();
812
813 // We always say GrMipMapped::kNo here since we are always just copying from the base layer. We
814 // don't need to make sure the whole mip map chain is valid.
815 task->addDependency(srcProxy.get(), GrMipMapped::kNo, GrTextureResolveManager(this), caps);
816 task->makeClosed(caps);
817
818 // We have closed the previous active oplist but since a new oplist isn't being added there
819 // shouldn't be an active one.
820 SkASSERT(!fActiveOpsTask);
821 SkDEBUGCODE(this->validate());
822 }
823
newCopyRenderTask(GrSurfaceProxyView srcView,const SkIRect & srcRect,GrSurfaceProxyView dstView,const SkIPoint & dstPoint)824 bool GrDrawingManager::newCopyRenderTask(GrSurfaceProxyView srcView,
825 const SkIRect& srcRect,
826 GrSurfaceProxyView dstView,
827 const SkIPoint& dstPoint) {
828 SkDEBUGCODE(this->validate());
829 SkASSERT(fContext);
830
831 this->closeRenderTasksForNewRenderTask(dstView.proxy());
832 const GrCaps& caps = *fContext->priv().caps();
833
834 GrSurfaceProxy* srcProxy = srcView.proxy();
835
836 GrRenderTask* task =
837 fDAG.add(GrCopyRenderTask::Make(std::move(srcView), srcRect, std::move(dstView),
838 dstPoint, &caps));
839 if (!task) {
840 return false;
841 }
842
843 // We always say GrMipMapped::kNo here since we are always just copying from the base layer to
844 // another base layer. We don't need to make sure the whole mip map chain is valid.
845 task->addDependency(srcProxy, GrMipMapped::kNo, GrTextureResolveManager(this), caps);
846 task->makeClosed(caps);
847
848 // We have closed the previous active oplist but since a new oplist isn't being added there
849 // shouldn't be an active one.
850 SkASSERT(!fActiveOpsTask);
851 SkDEBUGCODE(this->validate());
852 return true;
853 }
854
getTextContext()855 GrTextContext* GrDrawingManager::getTextContext() {
856 if (!fTextContext) {
857 fTextContext = GrTextContext::Make(fOptionsForTextContext);
858 }
859
860 return fTextContext.get();
861 }
862
863 /*
864 * This method finds a path renderer that can draw the specified path on
865 * the provided target.
866 * Due to its expense, the software path renderer has split out so it can
867 * can be individually allowed/disallowed via the "allowSW" boolean.
868 */
getPathRenderer(const GrPathRenderer::CanDrawPathArgs & args,bool allowSW,GrPathRendererChain::DrawType drawType,GrPathRenderer::StencilSupport * stencilSupport)869 GrPathRenderer* GrDrawingManager::getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args,
870 bool allowSW,
871 GrPathRendererChain::DrawType drawType,
872 GrPathRenderer::StencilSupport* stencilSupport) {
873
874 if (!fPathRendererChain) {
875 fPathRendererChain.reset(new GrPathRendererChain(fContext, fOptionsForPathRendererChain));
876 }
877
878 GrPathRenderer* pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport);
879 if (!pr && allowSW) {
880 auto swPR = this->getSoftwarePathRenderer();
881 if (GrPathRenderer::CanDrawPath::kNo != swPR->canDrawPath(args)) {
882 pr = swPR;
883 }
884 }
885
886 return pr;
887 }
888
getSoftwarePathRenderer()889 GrPathRenderer* GrDrawingManager::getSoftwarePathRenderer() {
890 if (!fSoftwarePathRenderer) {
891 fSoftwarePathRenderer.reset(
892 new GrSoftwarePathRenderer(fContext->priv().proxyProvider(),
893 fOptionsForPathRendererChain.fAllowPathMaskCaching));
894 }
895 return fSoftwarePathRenderer.get();
896 }
897
getCoverageCountingPathRenderer()898 GrCoverageCountingPathRenderer* GrDrawingManager::getCoverageCountingPathRenderer() {
899 if (!fPathRendererChain) {
900 fPathRendererChain.reset(new GrPathRendererChain(fContext, fOptionsForPathRendererChain));
901 }
902 return fPathRendererChain->getCoverageCountingPathRenderer();
903 }
904
flushIfNecessary()905 void GrDrawingManager::flushIfNecessary() {
906 auto direct = fContext->priv().asDirectContext();
907 if (!direct) {
908 return;
909 }
910
911 auto resourceCache = direct->priv().getResourceCache();
912 if (resourceCache && resourceCache->requestsFlush()) {
913 this->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo(),
914 GrPrepareForExternalIORequests());
915 resourceCache->purgeAsNeeded();
916 }
917 }
918
919