1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GLInstancedRendering.h"
9
10 #include "GrResourceProvider.h"
11 #include "gl/GrGLGpu.h"
12 #include "instanced/InstanceProcessor.h"
13
14 #define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
15
16 namespace gr_instanced {
17
18 class GLInstancedRendering::GLBatch : public InstancedRendering::Batch {
19 public:
20 DEFINE_BATCH_CLASS_ID
21
GLBatch(GLInstancedRendering * instRendering)22 GLBatch(GLInstancedRendering* instRendering) : INHERITED(ClassID(), instRendering) {}
numGLCommands() const23 int numGLCommands() const { return 1 + fNumChangesInGeometry; }
24
25 private:
26 int fEmulatedBaseInstance;
27 int fGLDrawCmdsIdx;
28
29 friend class GLInstancedRendering;
30
31 typedef Batch INHERITED;
32 };
33
CheckSupport(const GrGLCaps & glCaps)34 GrCaps::InstancedSupport GLInstancedRendering::CheckSupport(const GrGLCaps& glCaps) {
35 // This method is only intended to be used for initializing fInstancedSupport in the caps.
36 SkASSERT(GrCaps::InstancedSupport::kNone == glCaps.instancedSupport());
37 if (!glCaps.vertexArrayObjectSupport() ||
38 (!glCaps.drawIndirectSupport() && !glCaps.drawInstancedSupport())) {
39 return GrCaps::InstancedSupport::kNone;
40 }
41 return InstanceProcessor::CheckSupport(*glCaps.glslCaps(), glCaps);
42 }
43
GLInstancedRendering(GrGLGpu * gpu)44 GLInstancedRendering::GLInstancedRendering(GrGLGpu* gpu)
45 : INHERITED(gpu),
46 fVertexArrayID(0),
47 fGLDrawCmdsInfo(0),
48 fInstanceAttribsBufferUniqueId(SK_InvalidUniqueID) {
49 SkASSERT(GrCaps::InstancedSupport::kNone != this->gpu()->caps()->instancedSupport());
50 }
51
~GLInstancedRendering()52 GLInstancedRendering::~GLInstancedRendering() {
53 if (fVertexArrayID) {
54 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID));
55 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID);
56 }
57 }
58
glGpu() const59 inline GrGLGpu* GLInstancedRendering::glGpu() const {
60 return static_cast<GrGLGpu*>(this->gpu());
61 }
62
createBatch()63 InstancedRendering::Batch* GLInstancedRendering::createBatch() {
64 return new GLBatch(this);
65 }
66
onBeginFlush(GrResourceProvider * rp)67 void GLInstancedRendering::onBeginFlush(GrResourceProvider* rp) {
68 // Count what there is to draw.
69 BatchList::Iter iter;
70 iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart);
71 int numGLInstances = 0;
72 int numGLDrawCmds = 0;
73 while (Batch* b = iter.get()) {
74 GLBatch* batch = static_cast<GLBatch*>(b);
75 iter.next();
76
77 numGLInstances += batch->fNumDraws;
78 numGLDrawCmds += batch->numGLCommands();
79 }
80 if (!numGLDrawCmds) {
81 return;
82 }
83 SkASSERT(numGLInstances);
84
85 // Lazily create a vertex array object.
86 if (!fVertexArrayID) {
87 GL_CALL(GenVertexArrays(1, &fVertexArrayID));
88 if (!fVertexArrayID) {
89 return;
90 }
91 this->glGpu()->bindVertexArray(fVertexArrayID);
92
93 // Attach our index buffer to the vertex array.
94 SkASSERT(!this->indexBuffer()->isCPUBacked());
95 GL_CALL(BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER,
96 static_cast<const GrGLBuffer*>(this->indexBuffer())->bufferID()));
97
98 // Set up the non-instanced attribs.
99 this->glGpu()->bindBuffer(kVertex_GrBufferType, this->vertexBuffer());
100 GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeCoords));
101 GL_CALL(VertexAttribPointer((int)Attrib::kShapeCoords, 2, GR_GL_FLOAT, GR_GL_FALSE,
102 sizeof(ShapeVertex), (void*) offsetof(ShapeVertex, fX)));
103 GL_CALL(EnableVertexAttribArray((int)Attrib::kVertexAttrs));
104 GL_CALL(VertexAttribIPointer((int)Attrib::kVertexAttrs, 1, GR_GL_INT, sizeof(ShapeVertex),
105 (void*) offsetof(ShapeVertex, fAttrs)));
106
107 SkASSERT(SK_InvalidUniqueID == fInstanceAttribsBufferUniqueId);
108 }
109
110 // Create and map instance and draw-indirect buffers.
111 SkASSERT(!fInstanceBuffer);
112 fInstanceBuffer.reset(
113 rp->createBuffer(sizeof(Instance) * numGLInstances, kVertex_GrBufferType,
114 kDynamic_GrAccessPattern,
115 GrResourceProvider::kNoPendingIO_Flag |
116 GrResourceProvider::kRequireGpuMemory_Flag));
117 if (!fInstanceBuffer) {
118 return;
119 }
120
121 SkASSERT(!fDrawIndirectBuffer);
122 if (this->glGpu()->glCaps().drawIndirectSupport()) {
123 fDrawIndirectBuffer.reset(
124 rp->createBuffer(sizeof(GrGLDrawElementsIndirectCommand) * numGLDrawCmds,
125 kDrawIndirect_GrBufferType, kDynamic_GrAccessPattern,
126 GrResourceProvider::kNoPendingIO_Flag |
127 GrResourceProvider::kRequireGpuMemory_Flag));
128 if (!fDrawIndirectBuffer) {
129 return;
130 }
131 }
132
133 Instance* glMappedInstances = static_cast<Instance*>(fInstanceBuffer->map());
134 SkASSERT(glMappedInstances);
135 int glInstancesIdx = 0;
136
137 GrGLDrawElementsIndirectCommand* glMappedCmds = nullptr;
138 int glDrawCmdsIdx = 0;
139 if (fDrawIndirectBuffer) {
140 glMappedCmds = static_cast<GrGLDrawElementsIndirectCommand*>(fDrawIndirectBuffer->map());
141 SkASSERT(glMappedCmds);
142 }
143
144 bool baseInstanceSupport = this->glGpu()->glCaps().baseInstanceSupport();
145 SkASSERT(!baseInstanceSupport || fDrawIndirectBuffer);
146
147 SkASSERT(!fGLDrawCmdsInfo);
148 if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) {
149 fGLDrawCmdsInfo.reset(numGLDrawCmds);
150 }
151
152 // Generate the instance and draw-indirect buffer contents based on the tracked batches.
153 iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart);
154 while (Batch* b = iter.get()) {
155 GLBatch* batch = static_cast<GLBatch*>(b);
156 iter.next();
157
158 batch->fEmulatedBaseInstance = baseInstanceSupport ? 0 : glInstancesIdx;
159 batch->fGLDrawCmdsIdx = glDrawCmdsIdx;
160
161 const Batch::Draw* draw = batch->fHeadDraw;
162 SkASSERT(draw);
163 do {
164 int instanceCount = 0;
165 IndexRange geometry = draw->fGeometry;
166 SkASSERT(!geometry.isEmpty());
167
168 do {
169 glMappedInstances[glInstancesIdx + instanceCount++] = draw->fInstance;
170 draw = draw->fNext;
171 } while (draw && draw->fGeometry == geometry);
172
173 if (fDrawIndirectBuffer) {
174 GrGLDrawElementsIndirectCommand& glCmd = glMappedCmds[glDrawCmdsIdx];
175 glCmd.fCount = geometry.fCount;
176 glCmd.fInstanceCount = instanceCount;
177 glCmd.fFirstIndex = geometry.fStart;
178 glCmd.fBaseVertex = 0;
179 glCmd.fBaseInstance = baseInstanceSupport ? glInstancesIdx : 0;
180 }
181
182 if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) {
183 GLDrawCmdInfo& cmdInfo = fGLDrawCmdsInfo[glDrawCmdsIdx];
184 cmdInfo.fGeometry = geometry;
185 cmdInfo.fInstanceCount = instanceCount;
186 }
187
188 glInstancesIdx += instanceCount;
189 ++glDrawCmdsIdx;
190 } while (draw);
191 }
192
193 SkASSERT(glDrawCmdsIdx == numGLDrawCmds);
194 if (fDrawIndirectBuffer) {
195 fDrawIndirectBuffer->unmap();
196 }
197
198 SkASSERT(glInstancesIdx == numGLInstances);
199 fInstanceBuffer->unmap();
200 }
201
onDraw(const GrPipeline & pipeline,const InstanceProcessor & instProc,const Batch * baseBatch)202 void GLInstancedRendering::onDraw(const GrPipeline& pipeline, const InstanceProcessor& instProc,
203 const Batch* baseBatch) {
204 if (!fDrawIndirectBuffer && !fGLDrawCmdsInfo) {
205 return; // beginFlush was not successful.
206 }
207 if (!this->glGpu()->flushGLState(pipeline, instProc, false)) {
208 return;
209 }
210
211 if (fDrawIndirectBuffer) {
212 this->glGpu()->bindBuffer(kDrawIndirect_GrBufferType, fDrawIndirectBuffer.get());
213 }
214
215 const GrGLCaps& glCaps = this->glGpu()->glCaps();
216 const GLBatch* batch = static_cast<const GLBatch*>(baseBatch);
217 int numCommands = batch->numGLCommands();
218
219 #if GR_GL_LOG_INSTANCED_BATCHES
220 SkASSERT(fGLDrawCmdsInfo);
221 SkDebugf("Instanced batch: [");
222 for (int i = 0; i < numCommands; ++i) {
223 int glCmdIdx = batch->fGLDrawCmdsIdx + i;
224 SkDebugf("%s%i * %s", (i ? ", " : ""), fGLDrawCmdsInfo[glCmdIdx].fInstanceCount,
225 InstanceProcessor::GetNameOfIndexRange(fGLDrawCmdsInfo[glCmdIdx].fGeometry));
226 }
227 SkDebugf("]\n");
228 #else
229 SkASSERT(SkToBool(fGLDrawCmdsInfo) == !glCaps.baseInstanceSupport());
230 #endif
231
232 if (numCommands > 1 && glCaps.multiDrawIndirectSupport() && glCaps.baseInstanceSupport()) {
233 SkASSERT(fDrawIndirectBuffer);
234 int glCmdsIdx = batch->fGLDrawCmdsIdx;
235 this->flushInstanceAttribs(batch->fEmulatedBaseInstance);
236 GL_CALL(MultiDrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
237 (GrGLDrawElementsIndirectCommand*) nullptr + glCmdsIdx,
238 numCommands, 0));
239 return;
240 }
241
242 int emulatedBaseInstance = batch->fEmulatedBaseInstance;
243 for (int i = 0; i < numCommands; ++i) {
244 int glCmdIdx = batch->fGLDrawCmdsIdx + i;
245 this->flushInstanceAttribs(emulatedBaseInstance);
246 if (fDrawIndirectBuffer) {
247 GL_CALL(DrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
248 (GrGLDrawElementsIndirectCommand*) nullptr + glCmdIdx));
249 } else {
250 const GLDrawCmdInfo& cmdInfo = fGLDrawCmdsInfo[glCmdIdx];
251 GL_CALL(DrawElementsInstanced(GR_GL_TRIANGLES, cmdInfo.fGeometry.fCount,
252 GR_GL_UNSIGNED_BYTE,
253 (GrGLubyte*) nullptr + cmdInfo.fGeometry.fStart,
254 cmdInfo.fInstanceCount));
255 }
256 if (!glCaps.baseInstanceSupport()) {
257 const GLDrawCmdInfo& cmdInfo = fGLDrawCmdsInfo[glCmdIdx];
258 emulatedBaseInstance += cmdInfo.fInstanceCount;
259 }
260 }
261 }
262
flushInstanceAttribs(int baseInstance)263 void GLInstancedRendering::flushInstanceAttribs(int baseInstance) {
264 SkASSERT(fVertexArrayID);
265 this->glGpu()->bindVertexArray(fVertexArrayID);
266
267 SkASSERT(fInstanceBuffer);
268 if (fInstanceAttribsBufferUniqueId != fInstanceBuffer->uniqueID() ||
269 fInstanceAttribsBaseInstance != baseInstance) {
270 Instance* offsetInBuffer = (Instance*) nullptr + baseInstance;
271
272 this->glGpu()->bindBuffer(kVertex_GrBufferType, fInstanceBuffer.get());
273
274 // Info attrib.
275 GL_CALL(EnableVertexAttribArray((int)Attrib::kInstanceInfo));
276 GL_CALL(VertexAttribIPointer((int)Attrib::kInstanceInfo, 1, GR_GL_UNSIGNED_INT,
277 sizeof(Instance), &offsetInBuffer->fInfo));
278 GL_CALL(VertexAttribDivisor((int)Attrib::kInstanceInfo, 1));
279
280 // Shape matrix attrib.
281 GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeMatrixX));
282 GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeMatrixY));
283 GL_CALL(VertexAttribPointer((int)Attrib::kShapeMatrixX, 3, GR_GL_FLOAT, GR_GL_FALSE,
284 sizeof(Instance), &offsetInBuffer->fShapeMatrix2x3[0]));
285 GL_CALL(VertexAttribPointer((int)Attrib::kShapeMatrixY, 3, GR_GL_FLOAT, GR_GL_FALSE,
286 sizeof(Instance), &offsetInBuffer->fShapeMatrix2x3[3]));
287 GL_CALL(VertexAttribDivisor((int)Attrib::kShapeMatrixX, 1));
288 GL_CALL(VertexAttribDivisor((int)Attrib::kShapeMatrixY, 1));
289
290 // Color attrib.
291 GL_CALL(EnableVertexAttribArray((int)Attrib::kColor));
292 GL_CALL(VertexAttribPointer((int)Attrib::kColor, 4, GR_GL_UNSIGNED_BYTE, GR_GL_TRUE,
293 sizeof(Instance), &offsetInBuffer->fColor));
294 GL_CALL(VertexAttribDivisor((int)Attrib::kColor, 1));
295
296 // Local rect attrib.
297 GL_CALL(EnableVertexAttribArray((int)Attrib::kLocalRect));
298 GL_CALL(VertexAttribPointer((int)Attrib::kLocalRect, 4, GR_GL_FLOAT, GR_GL_FALSE,
299 sizeof(Instance), &offsetInBuffer->fLocalRect));
300 GL_CALL(VertexAttribDivisor((int)Attrib::kLocalRect, 1));
301
302 fInstanceAttribsBufferUniqueId = fInstanceBuffer->uniqueID();
303 fInstanceAttribsBaseInstance = baseInstance;
304 }
305 }
306
onEndFlush()307 void GLInstancedRendering::onEndFlush() {
308 fInstanceBuffer.reset();
309 fDrawIndirectBuffer.reset();
310 fGLDrawCmdsInfo.reset(0);
311 }
312
onResetGpuResources(ResetType resetType)313 void GLInstancedRendering::onResetGpuResources(ResetType resetType) {
314 if (fVertexArrayID && ResetType::kDestroy == resetType) {
315 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID));
316 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID);
317 }
318 fVertexArrayID = 0;
319 fInstanceBuffer.reset();
320 fDrawIndirectBuffer.reset();
321 fInstanceAttribsBufferUniqueId = SK_InvalidUniqueID;
322 }
323
324 }
325