1 /*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/GrResourceAllocator.h"
9
10 #include "src/gpu/GrGpuResourcePriv.h"
11 #include "src/gpu/GrOpsTask.h"
12 #include "src/gpu/GrRenderTargetProxy.h"
13 #include "src/gpu/GrResourceProvider.h"
14 #include "src/gpu/GrSurfacePriv.h"
15 #include "src/gpu/GrSurfaceProxy.h"
16 #include "src/gpu/GrSurfaceProxyPriv.h"
17 #include "src/gpu/GrTextureProxy.h"
18
19 #if GR_TRACK_INTERVAL_CREATION
20 #include <atomic>
21
CreateUniqueID()22 uint32_t GrResourceAllocator::Interval::CreateUniqueID() {
23 static std::atomic<uint32_t> nextID{1};
24 uint32_t id;
25 do {
26 id = nextID++;
27 } while (id == SK_InvalidUniqueID);
28 return id;
29 }
30 #endif
31
assign(sk_sp<GrSurface> s)32 void GrResourceAllocator::Interval::assign(sk_sp<GrSurface> s) {
33 SkASSERT(!fAssignedSurface);
34 fAssignedSurface = s;
35 fProxy->priv().assign(std::move(s));
36 }
37
determineRecyclability()38 void GrResourceAllocator::determineRecyclability() {
39 for (Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
40 if (cur->proxy()->canSkipResourceAllocator()) {
41 // These types of proxies can slip in here if they require a stencil buffer
42 continue;
43 }
44
45 if (cur->uses() >= cur->proxy()->refCnt()) {
46 // All the refs on the proxy are known to the resource allocator thus no one
47 // should be holding onto it outside of Ganesh.
48 SkASSERT(cur->uses() == cur->proxy()->refCnt());
49 cur->markAsRecyclable();
50 }
51 }
52 }
53
markEndOfOpsTask(int opsTaskIndex)54 void GrResourceAllocator::markEndOfOpsTask(int opsTaskIndex) {
55 SkASSERT(!fAssigned); // We shouldn't be adding any opsTasks after (or during) assignment
56
57 SkASSERT(fEndOfOpsTaskOpIndices.count() == opsTaskIndex);
58 if (!fEndOfOpsTaskOpIndices.empty()) {
59 SkASSERT(fEndOfOpsTaskOpIndices.back() < this->curOp());
60 }
61
62 // This is the first op index of the next opsTask
63 fEndOfOpsTaskOpIndices.push_back(this->curOp());
64 SkASSERT(fEndOfOpsTaskOpIndices.count() <= fNumOpsTasks);
65 }
66
~GrResourceAllocator()67 GrResourceAllocator::~GrResourceAllocator() {
68 SkASSERT(fIntvlList.empty());
69 SkASSERT(fActiveIntvls.empty());
70 SkASSERT(!fIntvlHash.count());
71 }
72
73 void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end,
74 ActualUse actualUse
75 SkDEBUGCODE(, bool isDirectDstRead)) {
76 SkASSERT(start <= end);
77 SkASSERT(!fAssigned); // We shouldn't be adding any intervals after (or during) assignment
78
79 if (proxy->canSkipResourceAllocator()) {
80 // If the proxy is still not instantiated at this point but will need stencil, it will
81 // attach its own stencil buffer upon onFlush instantiation.
82 if (proxy->isInstantiated()) {
83 auto rt = proxy->asRenderTargetProxy();
84 int minStencilSampleCount = rt ? rt->numStencilSamples() : 0;
85 if (minStencilSampleCount) {
86 if (!GrSurfaceProxyPriv::AttachStencilIfNeeded(
87 fResourceProvider, proxy->peekSurface(), minStencilSampleCount)) {
88 SkDebugf("WARNING: failed to attach stencil buffer. "
89 "Rendering may be incorrect.\n");
90 }
91 }
92 }
93 return;
94 }
95
96 // If a proxy is read only it must refer to a texture with specific content that cannot be
97 // recycled. We don't need to assign a texture to it and no other proxy can be instantiated
98 // with the same texture.
99 if (proxy->readOnly()) {
100 if (proxy->isLazy() && !proxy->priv().doLazyInstantiation(fResourceProvider)) {
101 fLazyInstantiationError = true;
102 } else {
103 // Since we aren't going to add an interval we won't revisit this proxy in assign(). So
104 // must already be instantiated or it must be a lazy proxy that we instantiated above.
105 SkASSERT(proxy->isInstantiated());
106 }
107 return;
108 }
109 if (Interval* intvl = fIntvlHash.find(proxy->uniqueID().asUInt())) {
110 // Revise the interval for an existing use
111 #ifdef SK_DEBUG
112 if (0 == start && 0 == end) {
113 // This interval is for the initial upload to a deferred proxy. Due to the vagaries
114 // of how deferred proxies are collected they can appear as uploads multiple times
115 // in a single opsTasks' list and as uploads in several opsTasks.
116 SkASSERT(0 == intvl->start());
117 } else if (isDirectDstRead) {
118 // Direct reads from the render target itself should occur w/in the existing
119 // interval
120 SkASSERT(intvl->start() <= start && intvl->end() >= end);
121 } else {
122 SkASSERT(intvl->end() <= start && intvl->end() <= end);
123 }
124 #endif
125 if (ActualUse::kYes == actualUse) {
126 intvl->addUse();
127 }
128 intvl->extendEnd(end);
129 return;
130 }
131 Interval* newIntvl;
132 if (fFreeIntervalList) {
133 newIntvl = fFreeIntervalList;
134 fFreeIntervalList = newIntvl->next();
135 newIntvl->setNext(nullptr);
136 newIntvl->resetTo(proxy, start, end);
137 } else {
138 newIntvl = fIntervalAllocator.make<Interval>(proxy, start, end);
139 }
140
141 if (ActualUse::kYes == actualUse) {
142 newIntvl->addUse();
143 }
144 fIntvlList.insertByIncreasingStart(newIntvl);
145 fIntvlHash.add(newIntvl);
146 }
147
popHead()148 GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() {
149 SkDEBUGCODE(this->validate());
150
151 Interval* temp = fHead;
152 if (temp) {
153 fHead = temp->next();
154 if (!fHead) {
155 fTail = nullptr;
156 }
157 temp->setNext(nullptr);
158 }
159
160 SkDEBUGCODE(this->validate());
161 return temp;
162 }
163
164 // TODO: fuse this with insertByIncreasingEnd
insertByIncreasingStart(Interval * intvl)165 void GrResourceAllocator::IntervalList::insertByIncreasingStart(Interval* intvl) {
166 SkDEBUGCODE(this->validate());
167 SkASSERT(!intvl->next());
168
169 if (!fHead) {
170 // 14%
171 fHead = fTail = intvl;
172 } else if (intvl->start() <= fHead->start()) {
173 // 3%
174 intvl->setNext(fHead);
175 fHead = intvl;
176 } else if (fTail->start() <= intvl->start()) {
177 // 83%
178 fTail->setNext(intvl);
179 fTail = intvl;
180 } else {
181 // almost never
182 Interval* prev = fHead;
183 Interval* next = prev->next();
184 for (; intvl->start() > next->start(); prev = next, next = next->next()) {
185 }
186
187 SkASSERT(next);
188 intvl->setNext(next);
189 prev->setNext(intvl);
190 }
191
192 SkDEBUGCODE(this->validate());
193 }
194
195 // TODO: fuse this with insertByIncreasingStart
insertByIncreasingEnd(Interval * intvl)196 void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) {
197 SkDEBUGCODE(this->validate());
198 SkASSERT(!intvl->next());
199
200 if (!fHead) {
201 // 14%
202 fHead = fTail = intvl;
203 } else if (intvl->end() <= fHead->end()) {
204 // 64%
205 intvl->setNext(fHead);
206 fHead = intvl;
207 } else if (fTail->end() <= intvl->end()) {
208 // 3%
209 fTail->setNext(intvl);
210 fTail = intvl;
211 } else {
212 // 19% but 81% of those land right after the list's head
213 Interval* prev = fHead;
214 Interval* next = prev->next();
215 for (; intvl->end() > next->end(); prev = next, next = next->next()) {
216 }
217
218 SkASSERT(next);
219 intvl->setNext(next);
220 prev->setNext(intvl);
221 }
222
223 SkDEBUGCODE(this->validate());
224 }
225
226 #ifdef SK_DEBUG
validate() const227 void GrResourceAllocator::IntervalList::validate() const {
228 SkASSERT(SkToBool(fHead) == SkToBool(fTail));
229
230 Interval* prev = nullptr;
231 for (Interval* cur = fHead; cur; prev = cur, cur = cur->next()) {
232 }
233
234 SkASSERT(fTail == prev);
235 }
236 #endif
237
detachAll()238 GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::detachAll() {
239 Interval* tmp = fHead;
240 fHead = nullptr;
241 fTail = nullptr;
242 return tmp;
243 }
244
245 // 'surface' can be reused. Add it back to the free pool.
recycleSurface(sk_sp<GrSurface> surface)246 void GrResourceAllocator::recycleSurface(sk_sp<GrSurface> surface) {
247 const GrScratchKey &key = surface->resourcePriv().getScratchKey();
248
249 if (!key.isValid()) {
250 return; // can't do it w/o a valid scratch key
251 }
252
253 if (surface->getUniqueKey().isValid()) {
254 // If the surface has a unique key we throw it back into the resource cache.
255 // If things get really tight 'findSurfaceFor' may pull it back out but there is
256 // no need to have it in tight rotation.
257 return;
258 }
259
260 #if GR_ALLOCATION_SPEW
261 SkDebugf("putting surface %d back into pool\n", surface->uniqueID().asUInt());
262 #endif
263 // TODO: fix this insertion so we get a more LRU-ish behavior
264 fFreePool.insert(key, surface.release());
265 }
266
267 // First try to reuse one of the recently allocated/used GrSurfaces in the free pool.
268 // If we can't find a useable one, create a new one.
findSurfaceFor(const GrSurfaceProxy * proxy,int minStencilSampleCount)269 sk_sp<GrSurface> GrResourceAllocator::findSurfaceFor(const GrSurfaceProxy* proxy,
270 int minStencilSampleCount) {
271
272 if (proxy->asTextureProxy() && proxy->asTextureProxy()->getUniqueKey().isValid()) {
273 // First try to reattach to a cached version if the proxy is uniquely keyed
274 sk_sp<GrSurface> surface = fResourceProvider->findByUniqueKey<GrSurface>(
275 proxy->asTextureProxy()->getUniqueKey());
276 if (surface) {
277 if (!GrSurfaceProxyPriv::AttachStencilIfNeeded(fResourceProvider, surface.get(),
278 minStencilSampleCount)) {
279 return nullptr;
280 }
281
282 return surface;
283 }
284 }
285
286 // First look in the free pool
287 GrScratchKey key;
288
289 proxy->priv().computeScratchKey(&key);
290
291 auto filter = [] (const GrSurface* s) {
292 return true;
293 };
294 sk_sp<GrSurface> surface(fFreePool.findAndRemove(key, filter));
295 if (surface) {
296 if (SkBudgeted::kYes == proxy->isBudgeted() &&
297 GrBudgetedType::kBudgeted != surface->resourcePriv().budgetedType()) {
298 // This gets the job done but isn't quite correct. It would be better to try to
299 // match budgeted proxies w/ budgeted surfaces and unbudgeted w/ unbudgeted.
300 surface->resourcePriv().makeBudgeted();
301 }
302
303 if (!GrSurfaceProxyPriv::AttachStencilIfNeeded(fResourceProvider, surface.get(),
304 minStencilSampleCount)) {
305 return nullptr;
306 }
307 SkASSERT(!surface->getUniqueKey().isValid());
308 return surface;
309 }
310
311 // Failing that, try to grab a new one from the resource cache
312 return proxy->priv().createSurface(fResourceProvider);
313 }
314
315 // Remove any intervals that end before the current index. Return their GrSurfaces
316 // to the free pool if possible.
expire(unsigned int curIndex)317 void GrResourceAllocator::expire(unsigned int curIndex) {
318 while (!fActiveIntvls.empty() && fActiveIntvls.peekHead()->end() < curIndex) {
319 Interval* temp = fActiveIntvls.popHead();
320 SkASSERT(!temp->next());
321
322 if (temp->wasAssignedSurface()) {
323 sk_sp<GrSurface> surface = temp->detachSurface();
324
325 if (temp->isRecyclable()) {
326 this->recycleSurface(std::move(surface));
327 }
328 }
329
330 // Add temp to the free interval list so it can be reused
331 SkASSERT(!temp->wasAssignedSurface()); // it had better not have a ref on a surface
332 temp->setNext(fFreeIntervalList);
333 fFreeIntervalList = temp;
334 }
335 }
336
onOpsTaskBoundary() const337 bool GrResourceAllocator::onOpsTaskBoundary() const {
338 if (fIntvlList.empty()) {
339 SkASSERT(fCurOpsTaskIndex+1 <= fNumOpsTasks);
340 // Although technically on an opsTask boundary there is no need to force an
341 // intermediate flush here
342 return false;
343 }
344
345 const Interval* tmp = fIntvlList.peekHead();
346 return fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= tmp->start();
347 }
348
forceIntermediateFlush(int * stopIndex)349 void GrResourceAllocator::forceIntermediateFlush(int* stopIndex) {
350 *stopIndex = fCurOpsTaskIndex+1;
351
352 // This is interrupting the allocation of resources for this flush. We need to
353 // proactively clear the active interval list of any intervals that aren't
354 // guaranteed to survive the partial flush lest they become zombies (i.e.,
355 // holding a deleted surface proxy).
356 const Interval* tmp = fIntvlList.peekHead();
357 SkASSERT(fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= tmp->start());
358
359 fCurOpsTaskIndex++;
360 SkASSERT(fCurOpsTaskIndex < fNumOpsTasks);
361
362 this->expire(tmp->start());
363 }
364
assign(int * startIndex,int * stopIndex,AssignError * outError)365 bool GrResourceAllocator::assign(int* startIndex, int* stopIndex, AssignError* outError) {
366 SkASSERT(outError);
367 *outError = fLazyInstantiationError ? AssignError::kFailedProxyInstantiation
368 : AssignError::kNoError;
369
370 SkASSERT(fNumOpsTasks == fEndOfOpsTaskOpIndices.count());
371
372 fIntvlHash.reset(); // we don't need the interval hash anymore
373
374 if (fCurOpsTaskIndex >= fEndOfOpsTaskOpIndices.count()) {
375 return false; // nothing to render
376 }
377
378 *startIndex = fCurOpsTaskIndex;
379 *stopIndex = fEndOfOpsTaskOpIndices.count();
380
381 if (fIntvlList.empty()) {
382 fCurOpsTaskIndex = fEndOfOpsTaskOpIndices.count();
383 return true; // no resources to assign
384 }
385
386 #if GR_ALLOCATION_SPEW
387 SkDebugf("assigning opsTasks %d through %d out of %d numOpsTasks\n",
388 *startIndex, *stopIndex, fNumOpsTasks);
389 SkDebugf("EndOfOpsTaskIndices: ");
390 for (int i = 0; i < fEndOfOpsTaskOpIndices.count(); ++i) {
391 SkDebugf("%d ", fEndOfOpsTaskOpIndices[i]);
392 }
393 SkDebugf("\n");
394 #endif
395
396 SkDEBUGCODE(fAssigned = true;)
397
398 #if GR_ALLOCATION_SPEW
399 this->dumpIntervals();
400 #endif
401 while (Interval* cur = fIntvlList.popHead()) {
402 while (fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= cur->start()) {
403 fCurOpsTaskIndex++;
404 SkASSERT(fCurOpsTaskIndex < fNumOpsTasks);
405 }
406
407 this->expire(cur->start());
408
409 int minStencilSampleCount = (cur->proxy()->asRenderTargetProxy())
410 ? cur->proxy()->asRenderTargetProxy()->numStencilSamples()
411 : 0;
412
413 if (cur->proxy()->isInstantiated()) {
414 if (!GrSurfaceProxyPriv::AttachStencilIfNeeded(
415 fResourceProvider, cur->proxy()->peekSurface(), minStencilSampleCount)) {
416 *outError = AssignError::kFailedProxyInstantiation;
417 }
418
419 fActiveIntvls.insertByIncreasingEnd(cur);
420
421 if (fResourceProvider->overBudget()) {
422 // Only force intermediate draws on opsTask boundaries
423 if (this->onOpsTaskBoundary()) {
424 this->forceIntermediateFlush(stopIndex);
425 return true;
426 }
427 }
428
429 continue;
430 }
431
432 if (cur->proxy()->isLazy()) {
433 if (!cur->proxy()->priv().doLazyInstantiation(fResourceProvider)) {
434 *outError = AssignError::kFailedProxyInstantiation;
435 }
436 } else if (sk_sp<GrSurface> surface =
437 this->findSurfaceFor(cur->proxy(), minStencilSampleCount)) {
438 // TODO: make getUniqueKey virtual on GrSurfaceProxy
439 GrTextureProxy* texProxy = cur->proxy()->asTextureProxy();
440
441 if (texProxy && texProxy->getUniqueKey().isValid()) {
442 if (!surface->getUniqueKey().isValid()) {
443 fResourceProvider->assignUniqueKeyToResource(texProxy->getUniqueKey(),
444 surface.get());
445 }
446 SkASSERT(surface->getUniqueKey() == texProxy->getUniqueKey());
447 }
448
449 #if GR_ALLOCATION_SPEW
450 SkDebugf("Assigning %d to %d\n",
451 surface->uniqueID().asUInt(),
452 cur->proxy()->uniqueID().asUInt());
453 #endif
454
455 cur->assign(std::move(surface));
456 } else {
457 SkASSERT(!cur->proxy()->isInstantiated());
458 *outError = AssignError::kFailedProxyInstantiation;
459 }
460
461 fActiveIntvls.insertByIncreasingEnd(cur);
462
463 if (fResourceProvider->overBudget()) {
464 // Only force intermediate draws on opsTask boundaries
465 if (this->onOpsTaskBoundary()) {
466 this->forceIntermediateFlush(stopIndex);
467 return true;
468 }
469 }
470 }
471
472 // expire all the remaining intervals to drain the active interval list
473 this->expire(std::numeric_limits<unsigned int>::max());
474 return true;
475 }
476
477 #if GR_ALLOCATION_SPEW
dumpIntervals()478 void GrResourceAllocator::dumpIntervals() {
479 // Print all the intervals while computing their range
480 SkDebugf("------------------------------------------------------------\n");
481 unsigned int min = std::numeric_limits<unsigned int>::max();
482 unsigned int max = 0;
483 for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
484 SkDebugf("{ %3d,%3d }: [%2d, %2d] - proxyRefs:%d surfaceRefs:%d\n",
485 cur->proxy()->uniqueID().asUInt(),
486 cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1,
487 cur->start(),
488 cur->end(),
489 cur->proxy()->priv().getProxyRefCnt(),
490 cur->proxy()->testingOnly_getBackingRefCnt());
491 min = SkTMin(min, cur->start());
492 max = SkTMax(max, cur->end());
493 }
494
495 // Draw a graph of the useage intervals
496 for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
497 SkDebugf("{ %3d,%3d }: ",
498 cur->proxy()->uniqueID().asUInt(),
499 cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1);
500 for (unsigned int i = min; i <= max; ++i) {
501 if (i >= cur->start() && i <= cur->end()) {
502 SkDebugf("x");
503 } else {
504 SkDebugf(" ");
505 }
506 }
507 SkDebugf("\n");
508 }
509 }
510 #endif
511