1 /*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrDrawTarget.h"
9
10 #include "GrAppliedClip.h"
11 #include "GrAuditTrail.h"
12 #include "GrCaps.h"
13 #include "GrDrawContext.h"
14 #include "GrGpu.h"
15 #include "GrGpuCommandBuffer.h"
16 #include "GrPath.h"
17 #include "GrPipeline.h"
18 #include "GrMemoryPool.h"
19 #include "GrPipelineBuilder.h"
20 #include "GrRenderTarget.h"
21 #include "GrResourceProvider.h"
22 #include "GrRenderTargetPriv.h"
23 #include "GrStencilAttachment.h"
24 #include "GrSurfacePriv.h"
25 #include "GrTexture.h"
26 #include "gl/GrGLRenderTarget.h"
27
28 #include "SkStrokeRec.h"
29
30 #include "batches/GrClearBatch.h"
31 #include "batches/GrClearStencilClipBatch.h"
32 #include "batches/GrCopySurfaceBatch.h"
33 #include "batches/GrDiscardBatch.h"
34 #include "batches/GrDrawBatch.h"
35 #include "batches/GrDrawPathBatch.h"
36 #include "batches/GrRectBatchFactory.h"
37 #include "batches/GrStencilPathBatch.h"
38
39 #include "instanced/InstancedRendering.h"
40
41 ////////////////////////////////////////////////////////////////////////////////
42
43 // Experimentally we have found that most batching occurs within the first 10 comparisons.
44 static const int kDefaultMaxBatchLookback = 10;
45 static const int kDefaultMaxBatchLookahead = 10;
46
GrDrawTarget(GrRenderTarget * rt,GrGpu * gpu,GrResourceProvider * resourceProvider,GrAuditTrail * auditTrail,const Options & options)47 GrDrawTarget::GrDrawTarget(GrRenderTarget* rt, GrGpu* gpu, GrResourceProvider* resourceProvider,
48 GrAuditTrail* auditTrail, const Options& options)
49 : fLastFullClearBatch(nullptr)
50 , fGpu(SkRef(gpu))
51 , fResourceProvider(resourceProvider)
52 , fAuditTrail(auditTrail)
53 , fFlags(0)
54 , fRenderTarget(rt) {
55 // TODO: Stop extracting the context (currently needed by GrClip)
56 fContext = fGpu->getContext();
57
58 fClipBatchToBounds = options.fClipBatchToBounds;
59 fDrawBatchBounds = options.fDrawBatchBounds;
60 fMaxBatchLookback = (options.fMaxBatchLookback < 0) ? kDefaultMaxBatchLookback :
61 options.fMaxBatchLookback;
62 fMaxBatchLookahead = (options.fMaxBatchLookahead < 0) ? kDefaultMaxBatchLookahead :
63 options.fMaxBatchLookahead;
64
65 if (GrCaps::InstancedSupport::kNone != this->caps()->instancedSupport()) {
66 fInstancedRendering.reset(fGpu->createInstancedRendering());
67 }
68
69 rt->setLastDrawTarget(this);
70
71 #ifdef SK_DEBUG
72 static int debugID = 0;
73 fDebugID = debugID++;
74 #endif
75 }
76
~GrDrawTarget()77 GrDrawTarget::~GrDrawTarget() {
78 if (fRenderTarget && this == fRenderTarget->getLastDrawTarget()) {
79 fRenderTarget->setLastDrawTarget(nullptr);
80 }
81
82 fGpu->unref();
83 }
84
85 ////////////////////////////////////////////////////////////////////////////////
86
87 // Add a GrDrawTarget-based dependency
addDependency(GrDrawTarget * dependedOn)88 void GrDrawTarget::addDependency(GrDrawTarget* dependedOn) {
89 SkASSERT(!dependedOn->dependsOn(this)); // loops are bad
90
91 if (this->dependsOn(dependedOn)) {
92 return; // don't add duplicate dependencies
93 }
94
95 *fDependencies.push() = dependedOn;
96 }
97
98 // Convert from a GrSurface-based dependency to a GrDrawTarget one
addDependency(GrSurface * dependedOn)99 void GrDrawTarget::addDependency(GrSurface* dependedOn) {
100 if (dependedOn->asRenderTarget() && dependedOn->asRenderTarget()->getLastDrawTarget()) {
101 // If it is still receiving dependencies, this DT shouldn't be closed
102 SkASSERT(!this->isClosed());
103
104 GrDrawTarget* dt = dependedOn->asRenderTarget()->getLastDrawTarget();
105 if (dt == this) {
106 // self-read - presumably for dst reads
107 } else {
108 this->addDependency(dt);
109
110 // Can't make it closed in the self-read case
111 dt->makeClosed();
112 }
113 }
114 }
115
116 #ifdef SK_DEBUG
dump() const117 void GrDrawTarget::dump() const {
118 SkDebugf("--------------------------------------------------------------\n");
119 SkDebugf("node: %d -> RT: %d\n", fDebugID, fRenderTarget ? fRenderTarget->uniqueID() : -1);
120 SkDebugf("relies On (%d): ", fDependencies.count());
121 for (int i = 0; i < fDependencies.count(); ++i) {
122 SkDebugf("%d, ", fDependencies[i]->fDebugID);
123 }
124 SkDebugf("\n");
125 SkDebugf("batches (%d):\n", fRecordedBatches.count());
126 for (int i = 0; i < fRecordedBatches.count(); ++i) {
127 SkDebugf("*******************************\n");
128 if (!fRecordedBatches[i].fBatch) {
129 SkDebugf("%d: <combined forward>\n", i);
130 } else {
131 SkDebugf("%d: %s\n", i, fRecordedBatches[i].fBatch->name());
132 SkString str = fRecordedBatches[i].fBatch->dumpInfo();
133 SkDebugf("%s\n", str.c_str());
134 const SkRect& clippedBounds = fRecordedBatches[i].fClippedBounds;
135 SkDebugf("ClippedBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
136 clippedBounds.fLeft, clippedBounds.fTop, clippedBounds.fRight,
137 clippedBounds.fBottom);
138 }
139 }
140 }
141 #endif
142
setupDstReadIfNecessary(const GrPipelineBuilder & pipelineBuilder,GrRenderTarget * rt,const GrClip & clip,const GrPipelineOptimizations & optimizations,GrXferProcessor::DstTexture * dstTexture,const SkRect & batchBounds)143 bool GrDrawTarget::setupDstReadIfNecessary(const GrPipelineBuilder& pipelineBuilder,
144 GrRenderTarget* rt,
145 const GrClip& clip,
146 const GrPipelineOptimizations& optimizations,
147 GrXferProcessor::DstTexture* dstTexture,
148 const SkRect& batchBounds) {
149 SkRect bounds = batchBounds;
150 bounds.outset(0.5f, 0.5f);
151
152 if (!pipelineBuilder.willXPNeedDstTexture(*this->caps(), optimizations)) {
153 return true;
154 }
155
156 if (this->caps()->textureBarrierSupport()) {
157 if (GrTexture* rtTex = rt->asTexture()) {
158 // The render target is a texture, so we can read from it directly in the shader. The XP
159 // will be responsible to detect this situation and request a texture barrier.
160 dstTexture->setTexture(rtTex);
161 dstTexture->setOffset(0, 0);
162 return true;
163 }
164 }
165
166 SkIRect copyRect;
167 clip.getConservativeBounds(rt->width(), rt->height(), ©Rect);
168
169 SkIRect drawIBounds;
170 bounds.roundOut(&drawIBounds);
171 if (!copyRect.intersect(drawIBounds)) {
172 #ifdef SK_DEBUG
173 GrCapsDebugf(this->caps(), "Missed an early reject. "
174 "Bailing on draw from setupDstReadIfNecessary.\n");
175 #endif
176 return false;
177 }
178
179 // MSAA consideration: When there is support for reading MSAA samples in the shader we could
180 // have per-sample dst values by making the copy multisampled.
181 GrSurfaceDesc desc;
182 if (!fGpu->initDescForDstCopy(rt, &desc)) {
183 desc.fOrigin = kDefault_GrSurfaceOrigin;
184 desc.fFlags = kRenderTarget_GrSurfaceFlag;
185 desc.fConfig = rt->config();
186 }
187
188 desc.fWidth = copyRect.width();
189 desc.fHeight = copyRect.height();
190
191 static const uint32_t kFlags = 0;
192 SkAutoTUnref<GrTexture> copy(fResourceProvider->createApproxTexture(desc, kFlags));
193
194 if (!copy) {
195 SkDebugf("Failed to create temporary copy of destination texture.\n");
196 return false;
197 }
198 SkIPoint dstPoint = {0, 0};
199 this->copySurface(copy, rt, copyRect, dstPoint);
200 dstTexture->setTexture(copy);
201 dstTexture->setOffset(copyRect.fLeft, copyRect.fTop);
202 return true;
203 }
204
prepareBatches(GrBatchFlushState * flushState)205 void GrDrawTarget::prepareBatches(GrBatchFlushState* flushState) {
206 // Semi-usually the drawTargets are already closed at this point, but sometimes Ganesh
207 // needs to flush mid-draw. In that case, the SkGpuDevice's drawTargets won't be closed
208 // but need to be flushed anyway. Closing such drawTargets here will mean new
209 // drawTargets will be created to replace them if the SkGpuDevice(s) write to them again.
210 this->makeClosed();
211
212 // Loop over the batches that haven't yet generated their geometry
213 for (int i = 0; i < fRecordedBatches.count(); ++i) {
214 if (fRecordedBatches[i].fBatch) {
215 fRecordedBatches[i].fBatch->prepare(flushState);
216 }
217 }
218
219 if (fInstancedRendering) {
220 fInstancedRendering->beginFlush(flushState->resourceProvider());
221 }
222 }
223
drawBatches(GrBatchFlushState * flushState)224 bool GrDrawTarget::drawBatches(GrBatchFlushState* flushState) {
225 if (0 == fRecordedBatches.count()) {
226 return false;
227 }
228 // Draw all the generated geometry.
229 SkRandom random;
230 GrRenderTarget* currentRT = nullptr;
231 SkAutoTDelete<GrGpuCommandBuffer> commandBuffer;
232 SkRect bounds = SkRect::MakeEmpty();
233 for (int i = 0; i < fRecordedBatches.count(); ++i) {
234 if (!fRecordedBatches[i].fBatch) {
235 continue;
236 }
237 if (fRecordedBatches[i].fBatch->renderTarget() != currentRT) {
238 if (commandBuffer) {
239 commandBuffer->end();
240 if (bounds.intersect(0, 0,
241 SkIntToScalar(currentRT->width()),
242 SkIntToScalar(currentRT->height()))) {
243 SkIRect iBounds;
244 bounds.roundOut(&iBounds);
245 commandBuffer->submit(iBounds);
246 }
247 commandBuffer.reset();
248 }
249 bounds.setEmpty();
250 currentRT = fRecordedBatches[i].fBatch->renderTarget();
251 if (currentRT) {
252 static const GrGpuCommandBuffer::LoadAndStoreInfo kBasicLoadStoreInfo
253 { GrGpuCommandBuffer::LoadOp::kLoad,GrGpuCommandBuffer::StoreOp::kStore,
254 GrColor_ILLEGAL };
255 commandBuffer.reset(fGpu->createCommandBuffer(currentRT,
256 kBasicLoadStoreInfo, // Color
257 kBasicLoadStoreInfo)); // Stencil
258 }
259 flushState->setCommandBuffer(commandBuffer);
260 }
261 if (commandBuffer) {
262 bounds.join(fRecordedBatches[i].fClippedBounds);
263 }
264 if (fDrawBatchBounds) {
265 const SkRect& bounds = fRecordedBatches[i].fClippedBounds;
266 SkIRect ibounds;
267 bounds.roundOut(&ibounds);
268 // In multi-draw buffer all the batches use the same render target and we won't need to
269 // get the batchs bounds.
270 if (GrRenderTarget* rt = fRecordedBatches[i].fBatch->renderTarget()) {
271 fGpu->drawDebugWireRect(rt, ibounds, 0xFF000000 | random.nextU());
272 }
273 }
274 fRecordedBatches[i].fBatch->draw(flushState);
275 }
276 if (commandBuffer) {
277 commandBuffer->end();
278 if (bounds.intersect(0, 0,
279 SkIntToScalar(currentRT->width()),
280 SkIntToScalar(currentRT->height()))) {
281 SkIRect iBounds;
282 bounds.roundOut(&iBounds);
283 commandBuffer->submit(iBounds);
284 }
285 flushState->setCommandBuffer(nullptr);
286 }
287
288 fGpu->finishDrawTarget();
289 return true;
290 }
291
reset()292 void GrDrawTarget::reset() {
293 fLastFullClearBatch = nullptr;
294 fRecordedBatches.reset();
295 if (fInstancedRendering) {
296 fInstancedRendering->endFlush();
297 }
298 }
299
batch_bounds(SkRect * bounds,const GrBatch * batch)300 static void batch_bounds(SkRect* bounds, const GrBatch* batch) {
301 *bounds = batch->bounds();
302 if (batch->hasZeroArea()) {
303 if (batch->hasAABloat()) {
304 bounds->outset(0.5f, 0.5f);
305 } else {
306 // We don't know which way the particular GPU will snap lines or points at integer
307 // coords. So we ensure that the bounds is large enough for either snap.
308 SkRect before = *bounds;
309 bounds->roundOut(bounds);
310 if (bounds->fLeft == before.fLeft) {
311 bounds->fLeft -= 1;
312 }
313 if (bounds->fTop == before.fTop) {
314 bounds->fTop -= 1;
315 }
316 if (bounds->fRight == before.fRight) {
317 bounds->fRight += 1;
318 }
319 if (bounds->fBottom == before.fBottom) {
320 bounds->fBottom += 1;
321 }
322 }
323 }
324 }
325
drawBatch(const GrPipelineBuilder & pipelineBuilder,GrDrawContext * drawContext,const GrClip & clip,GrDrawBatch * batch)326 void GrDrawTarget::drawBatch(const GrPipelineBuilder& pipelineBuilder,
327 GrDrawContext* drawContext,
328 const GrClip& clip,
329 GrDrawBatch* batch) {
330 // Setup clip
331 SkRect bounds;
332 batch_bounds(&bounds, batch);
333 GrAppliedClip appliedClip(bounds);
334 if (!clip.apply(fContext, drawContext, pipelineBuilder.isHWAntialias(),
335 pipelineBuilder.hasUserStencilSettings(), &appliedClip)) {
336 return;
337 }
338
339 // TODO: this is the only remaining usage of the AutoRestoreFragmentProcessorState - remove it
340 GrPipelineBuilder::AutoRestoreFragmentProcessorState arfps;
341 if (appliedClip.clipCoverageFragmentProcessor()) {
342 arfps.set(&pipelineBuilder);
343 arfps.addCoverageFragmentProcessor(sk_ref_sp(appliedClip.clipCoverageFragmentProcessor()));
344 }
345
346 if (pipelineBuilder.hasUserStencilSettings() || appliedClip.hasStencilClip()) {
347 if (!fResourceProvider->attachStencilAttachment(drawContext->accessRenderTarget())) {
348 SkDebugf("ERROR creating stencil attachment. Draw skipped.\n");
349 return;
350 }
351 }
352
353 GrPipeline::CreateArgs args;
354 args.fPipelineBuilder = &pipelineBuilder;
355 args.fDrawContext = drawContext;
356 args.fCaps = this->caps();
357 batch->getPipelineOptimizations(&args.fOpts);
358 if (args.fOpts.fOverrides.fUsePLSDstRead || fClipBatchToBounds) {
359 GrGLIRect viewport;
360 viewport.fLeft = 0;
361 viewport.fBottom = 0;
362 viewport.fWidth = drawContext->width();
363 viewport.fHeight = drawContext->height();
364 SkIRect ibounds;
365 ibounds.fLeft = SkTPin(SkScalarFloorToInt(batch->bounds().fLeft), viewport.fLeft,
366 viewport.fWidth);
367 ibounds.fTop = SkTPin(SkScalarFloorToInt(batch->bounds().fTop), viewport.fBottom,
368 viewport.fHeight);
369 ibounds.fRight = SkTPin(SkScalarCeilToInt(batch->bounds().fRight), viewport.fLeft,
370 viewport.fWidth);
371 ibounds.fBottom = SkTPin(SkScalarCeilToInt(batch->bounds().fBottom), viewport.fBottom,
372 viewport.fHeight);
373 if (!appliedClip.addScissor(ibounds)) {
374 return;
375 }
376 }
377 args.fOpts.fColorPOI.completeCalculations(
378 sk_sp_address_as_pointer_address(pipelineBuilder.fColorFragmentProcessors.begin()),
379 pipelineBuilder.numColorFragmentProcessors());
380 args.fOpts.fCoveragePOI.completeCalculations(
381 sk_sp_address_as_pointer_address(pipelineBuilder.fCoverageFragmentProcessors.begin()),
382 pipelineBuilder.numCoverageFragmentProcessors());
383 args.fScissor = &appliedClip.scissorState();
384 args.fWindowRectsState = &appliedClip.windowRectsState();
385 args.fHasStencilClip = appliedClip.hasStencilClip();
386 if (!this->setupDstReadIfNecessary(pipelineBuilder, drawContext->accessRenderTarget(),
387 clip, args.fOpts,
388 &args.fDstTexture, batch->bounds())) {
389 return;
390 }
391
392 if (!batch->installPipeline(args)) {
393 return;
394 }
395
396 #ifdef ENABLE_MDB
397 SkASSERT(fRenderTarget);
398 batch->pipeline()->addDependenciesTo(fRenderTarget);
399 #endif
400 this->recordBatch(batch, appliedClip.clippedDrawBounds());
401 }
402
stencilPath(GrDrawContext * drawContext,const GrClip & clip,bool useHWAA,const SkMatrix & viewMatrix,const GrPath * path)403 void GrDrawTarget::stencilPath(GrDrawContext* drawContext,
404 const GrClip& clip,
405 bool useHWAA,
406 const SkMatrix& viewMatrix,
407 const GrPath* path) {
408 // TODO: extract portions of checkDraw that are relevant to path stenciling.
409 SkASSERT(path);
410 SkASSERT(this->caps()->shaderCaps()->pathRenderingSupport());
411
412 // FIXME: Use path bounds instead of this WAR once
413 // https://bugs.chromium.org/p/skia/issues/detail?id=5640 is resolved.
414 SkRect bounds = SkRect::MakeIWH(drawContext->width(), drawContext->height());
415
416 // Setup clip
417 GrAppliedClip appliedClip(bounds);
418 if (!clip.apply(fContext, drawContext, useHWAA, true, &appliedClip)) {
419 return;
420 }
421 // TODO: respect fClipBatchToBounds if we ever start computing bounds here.
422
423 // Coverage AA does not make sense when rendering to the stencil buffer. The caller should never
424 // attempt this in a situation that would require coverage AA.
425 SkASSERT(!appliedClip.clipCoverageFragmentProcessor());
426
427 GrStencilAttachment* stencilAttachment = fResourceProvider->attachStencilAttachment(
428 drawContext->accessRenderTarget());
429 if (!stencilAttachment) {
430 SkDebugf("ERROR creating stencil attachment. Draw skipped.\n");
431 return;
432 }
433
434 GrBatch* batch = GrStencilPathBatch::Create(viewMatrix,
435 useHWAA,
436 path->getFillType(),
437 appliedClip.hasStencilClip(),
438 stencilAttachment->bits(),
439 appliedClip.scissorState(),
440 drawContext->accessRenderTarget(),
441 path);
442 this->recordBatch(batch, appliedClip.clippedDrawBounds());
443 batch->unref();
444 }
445
addBatch(sk_sp<GrBatch> batch)446 void GrDrawTarget::addBatch(sk_sp<GrBatch> batch) {
447 this->recordBatch(batch.get(), batch->bounds());
448 }
449
fullClear(GrRenderTarget * renderTarget,GrColor color)450 void GrDrawTarget::fullClear(GrRenderTarget* renderTarget, GrColor color) {
451 // Currently this just inserts or updates the last clear batch. However, once in MDB this can
452 // remove all the previously recorded batches and change the load op to clear with supplied
453 // color.
454 if (fLastFullClearBatch &&
455 fLastFullClearBatch->renderTargetUniqueID() == renderTarget->uniqueID()) {
456 // As currently implemented, fLastFullClearBatch should be the last batch because we would
457 // have cleared it when another batch was recorded.
458 SkASSERT(fRecordedBatches.back().fBatch.get() == fLastFullClearBatch);
459 fLastFullClearBatch->setColor(color);
460 return;
461 }
462 sk_sp<GrClearBatch> batch(GrClearBatch::Make(GrFixedClip::Disabled(), color, renderTarget));
463 if (batch.get() == this->recordBatch(batch.get(), batch->bounds())) {
464 fLastFullClearBatch = batch.get();
465 }
466 }
467
discard(GrRenderTarget * renderTarget)468 void GrDrawTarget::discard(GrRenderTarget* renderTarget) {
469 // Currently this just inserts a discard batch. However, once in MDB this can remove all the
470 // previously recorded batches and change the load op to discard.
471 if (this->caps()->discardRenderTargetSupport()) {
472 GrBatch* batch = new GrDiscardBatch(renderTarget);
473 this->recordBatch(batch, batch->bounds());
474 batch->unref();
475 }
476 }
477
478 ////////////////////////////////////////////////////////////////////////////////
479
copySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)480 bool GrDrawTarget::copySurface(GrSurface* dst,
481 GrSurface* src,
482 const SkIRect& srcRect,
483 const SkIPoint& dstPoint) {
484 GrBatch* batch = GrCopySurfaceBatch::Create(dst, src, srcRect, dstPoint);
485 if (!batch) {
486 return false;
487 }
488 #ifdef ENABLE_MDB
489 this->addDependency(src);
490 #endif
491
492 this->recordBatch(batch, batch->bounds());
493 batch->unref();
494 return true;
495 }
496
can_reorder(const SkRect & a,const SkRect & b)497 static inline bool can_reorder(const SkRect& a, const SkRect& b) {
498 return a.fRight <= b.fLeft || a.fBottom <= b.fTop ||
499 b.fRight <= a.fLeft || b.fBottom <= a.fTop;
500 }
501
join(SkRect * out,const SkRect & a,const SkRect & b)502 static void join(SkRect* out, const SkRect& a, const SkRect& b) {
503 SkASSERT(a.fLeft <= a.fRight && a.fTop <= a.fBottom);
504 SkASSERT(b.fLeft <= b.fRight && b.fTop <= b.fBottom);
505 out->fLeft = SkTMin(a.fLeft, b.fLeft);
506 out->fTop = SkTMin(a.fTop, b.fTop);
507 out->fRight = SkTMax(a.fRight, b.fRight);
508 out->fBottom = SkTMax(a.fBottom, b.fBottom);
509 }
510
recordBatch(GrBatch * batch,const SkRect & clippedBounds)511 GrBatch* GrDrawTarget::recordBatch(GrBatch* batch, const SkRect& clippedBounds) {
512 // A closed drawTarget should never receive new/more batches
513 SkASSERT(!this->isClosed());
514
515 // Check if there is a Batch Draw we can batch with by linearly searching back until we either
516 // 1) check every draw
517 // 2) intersect with something
518 // 3) find a 'blocker'
519 GR_AUDIT_TRAIL_ADDBATCH(fAuditTrail, batch);
520 GrBATCH_INFO("Re-Recording (%s, B%u)\n"
521 "\tBounds LRTB (%f, %f, %f, %f)\n",
522 batch->name(),
523 batch->uniqueID(),
524 batch->bounds().fLeft, batch->bounds().fRight,
525 batch->bounds().fTop, batch->bounds().fBottom);
526 GrBATCH_INFO(SkTabString(batch->dumpInfo(), 1).c_str());
527 GrBATCH_INFO("\tClipped Bounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
528 clippedBounds.fLeft, clippedBounds.fTop, clippedBounds.fRight,
529 clippedBounds.fBottom);
530 GrBATCH_INFO("\tOutcome:\n");
531 int maxCandidates = SkTMin(fMaxBatchLookback, fRecordedBatches.count());
532 if (maxCandidates) {
533 int i = 0;
534 while (true) {
535 GrBatch* candidate = fRecordedBatches.fromBack(i).fBatch.get();
536 // We cannot continue to search backwards if the render target changes
537 if (candidate->renderTargetUniqueID() != batch->renderTargetUniqueID()) {
538 GrBATCH_INFO("\t\tBreaking because of (%s, B%u) Rendertarget\n",
539 candidate->name(), candidate->uniqueID());
540 break;
541 }
542 if (candidate->combineIfPossible(batch, *this->caps())) {
543 GrBATCH_INFO("\t\tCombining with (%s, B%u)\n", candidate->name(),
544 candidate->uniqueID());
545 GR_AUDIT_TRAIL_BATCHING_RESULT_COMBINED(fAuditTrail, candidate, batch);
546 join(&fRecordedBatches.fromBack(i).fClippedBounds,
547 fRecordedBatches.fromBack(i).fClippedBounds, clippedBounds);
548 return candidate;
549 }
550 // Stop going backwards if we would cause a painter's order violation.
551 const SkRect& candidateBounds = fRecordedBatches.fromBack(i).fClippedBounds;
552 if (!can_reorder(candidateBounds, clippedBounds)) {
553 GrBATCH_INFO("\t\tIntersects with (%s, B%u)\n", candidate->name(),
554 candidate->uniqueID());
555 break;
556 }
557 ++i;
558 if (i == maxCandidates) {
559 GrBATCH_INFO("\t\tReached max lookback or beginning of batch array %d\n", i);
560 break;
561 }
562 }
563 } else {
564 GrBATCH_INFO("\t\tFirstBatch\n");
565 }
566 GR_AUDIT_TRAIL_BATCHING_RESULT_NEW(fAuditTrail, batch);
567 fRecordedBatches.emplace_back(RecordedBatch{sk_ref_sp(batch), clippedBounds});
568 fLastFullClearBatch = nullptr;
569 return batch;
570 }
571
forwardCombine()572 void GrDrawTarget::forwardCombine() {
573 if (fMaxBatchLookahead <= 0) {
574 return;
575 }
576 for (int i = 0; i < fRecordedBatches.count() - 2; ++i) {
577 GrBatch* batch = fRecordedBatches[i].fBatch.get();
578 const SkRect& batchBounds = fRecordedBatches[i].fClippedBounds;
579 int maxCandidateIdx = SkTMin(i + fMaxBatchLookahead, fRecordedBatches.count() - 1);
580 int j = i + 1;
581 while (true) {
582 GrBatch* candidate = fRecordedBatches[j].fBatch.get();
583 // We cannot continue to search if the render target changes
584 if (candidate->renderTargetUniqueID() != batch->renderTargetUniqueID()) {
585 GrBATCH_INFO("\t\tBreaking because of (%s, B%u) Rendertarget\n",
586 candidate->name(), candidate->uniqueID());
587 break;
588 }
589 if (j == i +1) {
590 // We assume batch would have combined with candidate when the candidate was added
591 // via backwards combining in recordBatch.
592 SkASSERT(!batch->combineIfPossible(candidate, *this->caps()));
593 } else if (batch->combineIfPossible(candidate, *this->caps())) {
594 GrBATCH_INFO("\t\tCombining with (%s, B%u)\n", candidate->name(),
595 candidate->uniqueID());
596 GR_AUDIT_TRAIL_BATCHING_RESULT_COMBINED(fAuditTrail, batch, candidate);
597 fRecordedBatches[j].fBatch = std::move(fRecordedBatches[i].fBatch);
598 join(&fRecordedBatches[j].fClippedBounds, fRecordedBatches[j].fClippedBounds,
599 batchBounds);
600 break;
601 }
602 // Stop going traversing if we would cause a painter's order violation.
603 const SkRect& candidateBounds = fRecordedBatches[j].fClippedBounds;
604 if (!can_reorder(candidateBounds, batchBounds)) {
605 GrBATCH_INFO("\t\tIntersects with (%s, B%u)\n", candidate->name(),
606 candidate->uniqueID());
607 break;
608 }
609 ++j;
610 if (j > maxCandidateIdx) {
611 GrBATCH_INFO("\t\tReached max lookahead or end of batch array %d\n", i);
612 break;
613 }
614 }
615 }
616 }
617
618 ///////////////////////////////////////////////////////////////////////////////
619
clearStencilClip(const GrFixedClip & clip,bool insideStencilMask,GrRenderTarget * rt)620 void GrDrawTarget::clearStencilClip(const GrFixedClip& clip,
621 bool insideStencilMask,
622 GrRenderTarget* rt) {
623 GrBatch* batch = new GrClearStencilClipBatch(clip, insideStencilMask, rt);
624 this->recordBatch(batch, batch->bounds());
625 batch->unref();
626 }
627