1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "InstancedRendering.h"
9
10 #include "GrBatchFlushState.h"
11 #include "GrCaps.h"
12 #include "GrPipeline.h"
13 #include "GrResourceProvider.h"
14 #include "instanced/InstanceProcessor.h"
15
16 namespace gr_instanced {
17
InstancedRendering(GrGpu * gpu)18 InstancedRendering::InstancedRendering(GrGpu* gpu)
19 : fGpu(SkRef(gpu)),
20 fState(State::kRecordingDraws),
21 fDrawPool(1024 * sizeof(Batch::Draw), 1024 * sizeof(Batch::Draw)) {
22 }
23
recordRect(const SkRect & rect,const SkMatrix & viewMatrix,GrColor color,bool antialias,const GrInstancedPipelineInfo & info,bool * useHWAA)24 GrDrawBatch* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix,
25 GrColor color, bool antialias,
26 const GrInstancedPipelineInfo& info, bool* useHWAA) {
27 return this->recordShape(ShapeType::kRect, rect, viewMatrix, color, rect, antialias, info,
28 useHWAA);
29 }
30
recordRect(const SkRect & rect,const SkMatrix & viewMatrix,GrColor color,const SkRect & localRect,bool antialias,const GrInstancedPipelineInfo & info,bool * useHWAA)31 GrDrawBatch* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix,
32 GrColor color, const SkRect& localRect, bool antialias,
33 const GrInstancedPipelineInfo& info, bool* useHWAA) {
34 return this->recordShape(ShapeType::kRect, rect, viewMatrix, color, localRect, antialias, info,
35 useHWAA);
36 }
37
recordRect(const SkRect & rect,const SkMatrix & viewMatrix,GrColor color,const SkMatrix & localMatrix,bool antialias,const GrInstancedPipelineInfo & info,bool * useHWAA)38 GrDrawBatch* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix,
39 GrColor color, const SkMatrix& localMatrix,
40 bool antialias, const GrInstancedPipelineInfo& info,
41 bool* useHWAA) {
42 if (localMatrix.hasPerspective()) {
43 return nullptr; // Perspective is not yet supported in the local matrix.
44 }
45 if (Batch* batch = this->recordShape(ShapeType::kRect, rect, viewMatrix, color, rect, antialias,
46 info, useHWAA)) {
47 batch->getSingleInstance().fInfo |= kLocalMatrix_InfoFlag;
48 batch->appendParamsTexel(localMatrix.getScaleX(), localMatrix.getSkewX(),
49 localMatrix.getTranslateX());
50 batch->appendParamsTexel(localMatrix.getSkewY(), localMatrix.getScaleY(),
51 localMatrix.getTranslateY());
52 batch->fInfo.fHasLocalMatrix = true;
53 return batch;
54 }
55 return nullptr;
56 }
57
recordOval(const SkRect & oval,const SkMatrix & viewMatrix,GrColor color,bool antialias,const GrInstancedPipelineInfo & info,bool * useHWAA)58 GrDrawBatch* InstancedRendering::recordOval(const SkRect& oval, const SkMatrix& viewMatrix,
59 GrColor color, bool antialias,
60 const GrInstancedPipelineInfo& info, bool* useHWAA) {
61 return this->recordShape(ShapeType::kOval, oval, viewMatrix, color, oval, antialias, info,
62 useHWAA);
63 }
64
recordRRect(const SkRRect & rrect,const SkMatrix & viewMatrix,GrColor color,bool antialias,const GrInstancedPipelineInfo & info,bool * useHWAA)65 GrDrawBatch* InstancedRendering::recordRRect(const SkRRect& rrect, const SkMatrix& viewMatrix,
66 GrColor color, bool antialias,
67 const GrInstancedPipelineInfo& info, bool* useHWAA) {
68 if (Batch* batch = this->recordShape(GetRRectShapeType(rrect), rrect.rect(), viewMatrix, color,
69 rrect.rect(), antialias, info, useHWAA)) {
70 batch->appendRRectParams(rrect);
71 return batch;
72 }
73 return nullptr;
74 }
75
recordDRRect(const SkRRect & outer,const SkRRect & inner,const SkMatrix & viewMatrix,GrColor color,bool antialias,const GrInstancedPipelineInfo & info,bool * useHWAA)76 GrDrawBatch* InstancedRendering::recordDRRect(const SkRRect& outer, const SkRRect& inner,
77 const SkMatrix& viewMatrix, GrColor color,
78 bool antialias, const GrInstancedPipelineInfo& info,
79 bool* useHWAA) {
80 if (inner.getType() > SkRRect::kSimple_Type) {
81 return nullptr; // Complex inner round rects are not yet supported.
82 }
83 if (SkRRect::kEmpty_Type == inner.getType()) {
84 return this->recordRRect(outer, viewMatrix, color, antialias, info, useHWAA);
85 }
86 if (Batch* batch = this->recordShape(GetRRectShapeType(outer), outer.rect(), viewMatrix, color,
87 outer.rect(), antialias, info, useHWAA)) {
88 batch->appendRRectParams(outer);
89 ShapeType innerShapeType = GetRRectShapeType(inner);
90 batch->fInfo.fInnerShapeTypes |= GetShapeFlag(innerShapeType);
91 batch->getSingleInstance().fInfo |= ((int)innerShapeType << kInnerShapeType_InfoBit);
92 batch->appendParamsTexel(inner.rect().asScalars(), 4);
93 batch->appendRRectParams(inner);
94 return batch;
95 }
96 return nullptr;
97 }
98
recordShape(ShapeType type,const SkRect & bounds,const SkMatrix & viewMatrix,GrColor color,const SkRect & localRect,bool antialias,const GrInstancedPipelineInfo & info,bool * useHWAA)99 InstancedRendering::Batch* InstancedRendering::recordShape(ShapeType type, const SkRect& bounds,
100 const SkMatrix& viewMatrix,
101 GrColor color, const SkRect& localRect,
102 bool antialias,
103 const GrInstancedPipelineInfo& info,
104 bool* useHWAA) {
105 SkASSERT(State::kRecordingDraws == fState);
106
107 if (info.fIsRenderingToFloat && fGpu->caps()->avoidInstancedDrawsToFPTargets()) {
108 return nullptr;
109 }
110
111 AntialiasMode antialiasMode;
112 if (!this->selectAntialiasMode(viewMatrix, antialias, info, useHWAA, &antialiasMode)) {
113 return nullptr;
114 }
115
116 Batch* batch = this->createBatch();
117 batch->fInfo.fAntialiasMode = antialiasMode;
118 batch->fInfo.fShapeTypes = GetShapeFlag(type);
119 batch->fInfo.fCannotDiscard = !info.fCanDiscard;
120
121 Instance& instance = batch->getSingleInstance();
122 instance.fInfo = (int)type << kShapeType_InfoBit;
123
124 Batch::HasAABloat aaBloat = (antialiasMode == AntialiasMode::kCoverage)
125 ? Batch::HasAABloat::kYes
126 : Batch::HasAABloat::kNo;
127 Batch::IsZeroArea zeroArea = (bounds.isEmpty()) ? Batch::IsZeroArea::kYes
128 : Batch::IsZeroArea::kNo;
129
130 // The instanced shape renderer draws rectangles of [-1, -1, +1, +1], so we find the matrix that
131 // will map this rectangle to the same device coordinates as "viewMatrix * bounds".
132 float sx = 0.5f * bounds.width();
133 float sy = 0.5f * bounds.height();
134 float tx = sx + bounds.fLeft;
135 float ty = sy + bounds.fTop;
136 if (!viewMatrix.hasPerspective()) {
137 float* m = instance.fShapeMatrix2x3;
138 m[0] = viewMatrix.getScaleX() * sx;
139 m[1] = viewMatrix.getSkewX() * sy;
140 m[2] = viewMatrix.getTranslateX() +
141 viewMatrix.getScaleX() * tx + viewMatrix.getSkewX() * ty;
142
143 m[3] = viewMatrix.getSkewY() * sx;
144 m[4] = viewMatrix.getScaleY() * sy;
145 m[5] = viewMatrix.getTranslateY() +
146 viewMatrix.getSkewY() * tx + viewMatrix.getScaleY() * ty;
147
148 // Since 'm' is a 2x3 matrix that maps the rect [-1, +1] into the shape's device-space quad,
149 // it's quite simple to find the bounding rectangle:
150 float devBoundsHalfWidth = fabsf(m[0]) + fabsf(m[1]);
151 float devBoundsHalfHeight = fabsf(m[3]) + fabsf(m[4]);
152 SkRect batchBounds;
153 batchBounds.fLeft = m[2] - devBoundsHalfWidth;
154 batchBounds.fRight = m[2] + devBoundsHalfWidth;
155 batchBounds.fTop = m[5] - devBoundsHalfHeight;
156 batchBounds.fBottom = m[5] + devBoundsHalfHeight;
157 batch->setBounds(batchBounds, aaBloat, zeroArea);
158
159 // TODO: Is this worth the CPU overhead?
160 batch->fInfo.fNonSquare =
161 fabsf(devBoundsHalfHeight - devBoundsHalfWidth) > 0.5f || // Early out.
162 fabs(m[0] * m[3] + m[1] * m[4]) > 1e-3f || // Skew?
163 fabs(m[0] * m[0] + m[1] * m[1] - m[3] * m[3] - m[4] * m[4]) > 1e-2f; // Diff. lengths?
164 } else {
165 SkMatrix shapeMatrix(viewMatrix);
166 shapeMatrix.preTranslate(tx, ty);
167 shapeMatrix.preScale(sx, sy);
168 instance.fInfo |= kPerspective_InfoFlag;
169
170 float* m = instance.fShapeMatrix2x3;
171 m[0] = SkScalarToFloat(shapeMatrix.getScaleX());
172 m[1] = SkScalarToFloat(shapeMatrix.getSkewX());
173 m[2] = SkScalarToFloat(shapeMatrix.getTranslateX());
174 m[3] = SkScalarToFloat(shapeMatrix.getSkewY());
175 m[4] = SkScalarToFloat(shapeMatrix.getScaleY());
176 m[5] = SkScalarToFloat(shapeMatrix.getTranslateY());
177
178 // Send the perspective column as a param.
179 batch->appendParamsTexel(shapeMatrix[SkMatrix::kMPersp0], shapeMatrix[SkMatrix::kMPersp1],
180 shapeMatrix[SkMatrix::kMPersp2]);
181 batch->fInfo.fHasPerspective = true;
182
183 batch->setBounds(bounds, aaBloat, zeroArea);
184 batch->fInfo.fNonSquare = true;
185 }
186
187 instance.fColor = color;
188
189 const float* rectAsFloats = localRect.asScalars(); // Ensure SkScalar == float.
190 memcpy(&instance.fLocalRect, rectAsFloats, 4 * sizeof(float));
191
192 batch->fPixelLoad = batch->bounds().height() * batch->bounds().width();
193 return batch;
194 }
195
selectAntialiasMode(const SkMatrix & viewMatrix,bool antialias,const GrInstancedPipelineInfo & info,bool * useHWAA,AntialiasMode * antialiasMode)196 inline bool InstancedRendering::selectAntialiasMode(const SkMatrix& viewMatrix, bool antialias,
197 const GrInstancedPipelineInfo& info,
198 bool* useHWAA, AntialiasMode* antialiasMode) {
199 SkASSERT(!info.fColorDisabled || info.fDrawingShapeToStencil);
200 SkASSERT(!info.fIsMixedSampled || info.fIsMultisampled);
201 SkASSERT(GrCaps::InstancedSupport::kNone != fGpu->caps()->instancedSupport());
202
203 if (!info.fIsMultisampled || fGpu->caps()->multisampleDisableSupport()) {
204 if (!antialias) {
205 if (info.fDrawingShapeToStencil && !info.fCanDiscard) {
206 // We can't draw to the stencil buffer without discard (or sample mask if MSAA).
207 return false;
208 }
209 *antialiasMode = AntialiasMode::kNone;
210 *useHWAA = false;
211 return true;
212 }
213
214 if (info.canUseCoverageAA() && viewMatrix.preservesRightAngles()) {
215 *antialiasMode = AntialiasMode::kCoverage;
216 *useHWAA = false;
217 return true;
218 }
219 }
220
221 if (info.fIsMultisampled &&
222 fGpu->caps()->instancedSupport() >= GrCaps::InstancedSupport::kMultisampled) {
223 if (!info.fIsMixedSampled || info.fColorDisabled) {
224 *antialiasMode = AntialiasMode::kMSAA;
225 *useHWAA = true;
226 return true;
227 }
228 if (fGpu->caps()->instancedSupport() >= GrCaps::InstancedSupport::kMixedSampled) {
229 *antialiasMode = AntialiasMode::kMixedSamples;
230 *useHWAA = true;
231 return true;
232 }
233 }
234
235 return false;
236 }
237
Batch(uint32_t classID,InstancedRendering * ir)238 InstancedRendering::Batch::Batch(uint32_t classID, InstancedRendering* ir)
239 : INHERITED(classID),
240 fInstancedRendering(ir),
241 fIsTracked(false),
242 fNumDraws(1),
243 fNumChangesInGeometry(0) {
244 fHeadDraw = fTailDraw = (Draw*)fInstancedRendering->fDrawPool.allocate(sizeof(Draw));
245 #ifdef SK_DEBUG
246 fHeadDraw->fGeometry = {-1, 0};
247 #endif
248 fHeadDraw->fNext = nullptr;
249 }
250
~Batch()251 InstancedRendering::Batch::~Batch() {
252 if (fIsTracked) {
253 fInstancedRendering->fTrackedBatches.remove(this);
254 }
255
256 Draw* draw = fHeadDraw;
257 while (draw) {
258 Draw* next = draw->fNext;
259 fInstancedRendering->fDrawPool.release(draw);
260 draw = next;
261 }
262 }
263
appendRRectParams(const SkRRect & rrect)264 void InstancedRendering::Batch::appendRRectParams(const SkRRect& rrect) {
265 SkASSERT(!fIsTracked);
266 switch (rrect.getType()) {
267 case SkRRect::kSimple_Type: {
268 const SkVector& radii = rrect.getSimpleRadii();
269 this->appendParamsTexel(radii.x(), radii.y(), rrect.width(), rrect.height());
270 return;
271 }
272 case SkRRect::kNinePatch_Type: {
273 float twoOverW = 2 / rrect.width();
274 float twoOverH = 2 / rrect.height();
275 const SkVector& radiiTL = rrect.radii(SkRRect::kUpperLeft_Corner);
276 const SkVector& radiiBR = rrect.radii(SkRRect::kLowerRight_Corner);
277 this->appendParamsTexel(radiiTL.x() * twoOverW, radiiBR.x() * twoOverW,
278 radiiTL.y() * twoOverH, radiiBR.y() * twoOverH);
279 return;
280 }
281 case SkRRect::kComplex_Type: {
282 /**
283 * The x and y radii of each arc are stored in separate vectors,
284 * in the following order:
285 *
286 * __x1 _ _ _ x3__
287 * y1 | | y2
288 *
289 * | |
290 *
291 * y3 |__ _ _ _ __| y4
292 * x2 x4
293 *
294 */
295 float twoOverW = 2 / rrect.width();
296 float twoOverH = 2 / rrect.height();
297 const SkVector& radiiTL = rrect.radii(SkRRect::kUpperLeft_Corner);
298 const SkVector& radiiTR = rrect.radii(SkRRect::kUpperRight_Corner);
299 const SkVector& radiiBR = rrect.radii(SkRRect::kLowerRight_Corner);
300 const SkVector& radiiBL = rrect.radii(SkRRect::kLowerLeft_Corner);
301 this->appendParamsTexel(radiiTL.x() * twoOverW, radiiBL.x() * twoOverW,
302 radiiTR.x() * twoOverW, radiiBR.x() * twoOverW);
303 this->appendParamsTexel(radiiTL.y() * twoOverH, radiiTR.y() * twoOverH,
304 radiiBL.y() * twoOverH, radiiBR.y() * twoOverH);
305 return;
306 }
307 default: return;
308 }
309 }
310
appendParamsTexel(const SkScalar * vals,int count)311 void InstancedRendering::Batch::appendParamsTexel(const SkScalar* vals, int count) {
312 SkASSERT(!fIsTracked);
313 SkASSERT(count <= 4 && count >= 0);
314 const float* valsAsFloats = vals; // Ensure SkScalar == float.
315 memcpy(&fParams.push_back(), valsAsFloats, count * sizeof(float));
316 fInfo.fHasParams = true;
317 }
318
appendParamsTexel(SkScalar x,SkScalar y,SkScalar z,SkScalar w)319 void InstancedRendering::Batch::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w) {
320 SkASSERT(!fIsTracked);
321 ParamsTexel& texel = fParams.push_back();
322 texel.fX = SkScalarToFloat(x);
323 texel.fY = SkScalarToFloat(y);
324 texel.fZ = SkScalarToFloat(z);
325 texel.fW = SkScalarToFloat(w);
326 fInfo.fHasParams = true;
327 }
328
appendParamsTexel(SkScalar x,SkScalar y,SkScalar z)329 void InstancedRendering::Batch::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z) {
330 SkASSERT(!fIsTracked);
331 ParamsTexel& texel = fParams.push_back();
332 texel.fX = SkScalarToFloat(x);
333 texel.fY = SkScalarToFloat(y);
334 texel.fZ = SkScalarToFloat(z);
335 fInfo.fHasParams = true;
336 }
337
computePipelineOptimizations(GrInitInvariantOutput * color,GrInitInvariantOutput * coverage,GrBatchToXPOverrides * overrides) const338 void InstancedRendering::Batch::computePipelineOptimizations(GrInitInvariantOutput* color,
339 GrInitInvariantOutput* coverage,
340 GrBatchToXPOverrides* overrides) const {
341 color->setKnownFourComponents(this->getSingleInstance().fColor);
342
343 if (AntialiasMode::kCoverage == fInfo.fAntialiasMode ||
344 (AntialiasMode::kNone == fInfo.fAntialiasMode &&
345 !fInfo.isSimpleRects() && fInfo.fCannotDiscard)) {
346 coverage->setUnknownSingleComponent();
347 } else {
348 coverage->setKnownSingleComponent(255);
349 }
350 }
351
initBatchTracker(const GrXPOverridesForBatch & overrides)352 void InstancedRendering::Batch::initBatchTracker(const GrXPOverridesForBatch& overrides) {
353 Draw& draw = this->getSingleDraw(); // This will assert if we have > 1 command.
354 SkASSERT(draw.fGeometry.isEmpty());
355 SkASSERT(SkIsPow2(fInfo.fShapeTypes));
356 SkASSERT(!fIsTracked);
357
358 if (kRect_ShapeFlag == fInfo.fShapeTypes) {
359 draw.fGeometry = InstanceProcessor::GetIndexRangeForRect(fInfo.fAntialiasMode);
360 } else if (kOval_ShapeFlag == fInfo.fShapeTypes) {
361 draw.fGeometry = InstanceProcessor::GetIndexRangeForOval(fInfo.fAntialiasMode,
362 this->bounds());
363 } else {
364 draw.fGeometry = InstanceProcessor::GetIndexRangeForRRect(fInfo.fAntialiasMode);
365 }
366
367 if (!fParams.empty()) {
368 SkASSERT(fInstancedRendering->fParams.count() < (int)kParamsIdx_InfoMask); // TODO: cleaner.
369 this->getSingleInstance().fInfo |= fInstancedRendering->fParams.count();
370 fInstancedRendering->fParams.push_back_n(fParams.count(), fParams.begin());
371 }
372
373 GrColor overrideColor;
374 if (overrides.getOverrideColorIfSet(&overrideColor)) {
375 SkASSERT(State::kRecordingDraws == fInstancedRendering->fState);
376 this->getSingleInstance().fColor = overrideColor;
377 }
378 fInfo.fUsesLocalCoords = overrides.readsLocalCoords();
379 fInfo.fCannotTweakAlphaForCoverage = !overrides.canTweakAlphaForCoverage();
380
381 fInstancedRendering->fTrackedBatches.addToTail(this);
382 fIsTracked = true;
383 }
384
onCombineIfPossible(GrBatch * other,const GrCaps & caps)385 bool InstancedRendering::Batch::onCombineIfPossible(GrBatch* other, const GrCaps& caps) {
386 Batch* that = static_cast<Batch*>(other);
387 SkASSERT(fInstancedRendering == that->fInstancedRendering);
388 SkASSERT(fTailDraw);
389 SkASSERT(that->fTailDraw);
390
391 if (!BatchInfo::CanCombine(fInfo, that->fInfo) ||
392 !GrPipeline::CanCombine(*this->pipeline(), this->bounds(),
393 *that->pipeline(), that->bounds(), caps)) {
394 return false;
395 }
396
397 BatchInfo combinedInfo = fInfo | that->fInfo;
398 if (!combinedInfo.isSimpleRects()) {
399 // This threshold was chosen with the "shapes_mixed" bench on a MacBook with Intel graphics.
400 // There seems to be a wide range where it doesn't matter if we combine or not. What matters
401 // is that the itty bitty rects combine with other shapes and the giant ones don't.
402 constexpr SkScalar kMaxPixelsToGeneralizeRects = 256 * 256;
403 if (fInfo.isSimpleRects() && fPixelLoad > kMaxPixelsToGeneralizeRects) {
404 return false;
405 }
406 if (that->fInfo.isSimpleRects() && that->fPixelLoad > kMaxPixelsToGeneralizeRects) {
407 return false;
408 }
409 }
410
411 this->joinBounds(*that);
412 fInfo = combinedInfo;
413 fPixelLoad += that->fPixelLoad;
414
415 // Adopt the other batch's draws.
416 fNumDraws += that->fNumDraws;
417 fNumChangesInGeometry += that->fNumChangesInGeometry;
418 if (fTailDraw->fGeometry != that->fHeadDraw->fGeometry) {
419 ++fNumChangesInGeometry;
420 }
421 fTailDraw->fNext = that->fHeadDraw;
422 fTailDraw = that->fTailDraw;
423
424 that->fHeadDraw = that->fTailDraw = nullptr;
425
426 return true;
427 }
428
beginFlush(GrResourceProvider * rp)429 void InstancedRendering::beginFlush(GrResourceProvider* rp) {
430 SkASSERT(State::kRecordingDraws == fState);
431 fState = State::kFlushing;
432
433 if (fTrackedBatches.isEmpty()) {
434 return;
435 }
436
437 if (!fVertexBuffer) {
438 fVertexBuffer.reset(InstanceProcessor::FindOrCreateVertexBuffer(fGpu));
439 if (!fVertexBuffer) {
440 return;
441 }
442 }
443
444 if (!fIndexBuffer) {
445 fIndexBuffer.reset(InstanceProcessor::FindOrCreateIndex8Buffer(fGpu));
446 if (!fIndexBuffer) {
447 return;
448 }
449 }
450
451 if (!fParams.empty()) {
452 fParamsBuffer.reset(rp->createBuffer(fParams.count() * sizeof(ParamsTexel),
453 kTexel_GrBufferType, kDynamic_GrAccessPattern,
454 GrResourceProvider::kNoPendingIO_Flag |
455 GrResourceProvider::kRequireGpuMemory_Flag,
456 fParams.begin()));
457 if (!fParamsBuffer) {
458 return;
459 }
460 }
461
462 this->onBeginFlush(rp);
463 }
464
onDraw(GrBatchFlushState * state)465 void InstancedRendering::Batch::onDraw(GrBatchFlushState* state) {
466 SkASSERT(State::kFlushing == fInstancedRendering->fState);
467 SkASSERT(state->gpu() == fInstancedRendering->gpu());
468
469 state->gpu()->handleDirtyContext();
470 if (GrXferBarrierType barrierType = this->pipeline()->xferBarrierType(*state->gpu()->caps())) {
471 state->gpu()->xferBarrier(this->pipeline()->getRenderTarget(), barrierType);
472 }
473
474 InstanceProcessor instProc(fInfo, fInstancedRendering->fParamsBuffer);
475 fInstancedRendering->onDraw(*this->pipeline(), instProc, this);
476 }
477
endFlush()478 void InstancedRendering::endFlush() {
479 // The caller is expected to delete all tracked batches (i.e. batches whose initBatchTracker
480 // method has been called) before ending the flush.
481 SkASSERT(fTrackedBatches.isEmpty());
482 fParams.reset();
483 fParamsBuffer.reset();
484 this->onEndFlush();
485 fState = State::kRecordingDraws;
486 // Hold on to the shape coords and index buffers.
487 }
488
resetGpuResources(ResetType resetType)489 void InstancedRendering::resetGpuResources(ResetType resetType) {
490 fVertexBuffer.reset();
491 fIndexBuffer.reset();
492 fParamsBuffer.reset();
493 this->onResetGpuResources(resetType);
494 }
495
496 }
497