1 /*
2  * Copyright 2010 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 
9 #include "GrGpu.h"
10 
11 #include "GrBuffer.h"
12 #include "GrCaps.h"
13 #include "GrContext.h"
14 #include "GrGpuResourcePriv.h"
15 #include "GrMesh.h"
16 #include "GrPathRendering.h"
17 #include "GrPipeline.h"
18 #include "GrResourceCache.h"
19 #include "GrResourceProvider.h"
20 #include "GrRenderTargetPriv.h"
21 #include "GrStencilAttachment.h"
22 #include "GrSurfacePriv.h"
23 #include "GrTexturePriv.h"
24 #include "SkMathPriv.h"
25 
26 GrMesh& GrMesh::operator =(const GrMesh& di) {
27     fPrimitiveType  = di.fPrimitiveType;
28     fStartVertex    = di.fStartVertex;
29     fStartIndex     = di.fStartIndex;
30     fVertexCount    = di.fVertexCount;
31     fIndexCount     = di.fIndexCount;
32 
33     fInstanceCount          = di.fInstanceCount;
34     fVerticesPerInstance    = di.fVerticesPerInstance;
35     fIndicesPerInstance     = di.fIndicesPerInstance;
36     fMaxInstancesPerDraw    = di.fMaxInstancesPerDraw;
37 
38     fVertexBuffer.reset(di.vertexBuffer());
39     fIndexBuffer.reset(di.indexBuffer());
40 
41     return *this;
42 }
43 
44 ////////////////////////////////////////////////////////////////////////////////
45 
46 GrGpu::GrGpu(GrContext* context)
47     : fResetTimestamp(kExpiredTimestamp+1)
48     , fResetBits(kAll_GrBackendState)
49     , fContext(context) {
50     fMultisampleSpecs.emplace_back(0, 0, nullptr); // Index 0 is an invalid unique id.
51 }
52 
53 GrGpu::~GrGpu() {}
54 
55 void GrGpu::disconnect(DisconnectType) {}
56 
57 ////////////////////////////////////////////////////////////////////////////////
58 
59 bool GrGpu::makeCopyForTextureParams(int width, int height, const GrTextureParams& textureParams,
60                                      GrTextureProducer::CopyParams* copyParams) const {
61     const GrCaps& caps = *this->caps();
62     if (textureParams.isTiled() && !caps.npotTextureTileSupport() &&
63         (!SkIsPow2(width) || !SkIsPow2(height))) {
64         copyParams->fWidth = GrNextPow2(width);
65         copyParams->fHeight = GrNextPow2(height);
66         switch (textureParams.filterMode()) {
67             case GrTextureParams::kNone_FilterMode:
68                 copyParams->fFilter = GrTextureParams::kNone_FilterMode;
69                 break;
70             case GrTextureParams::kBilerp_FilterMode:
71             case GrTextureParams::kMipMap_FilterMode:
72                 // We are only ever scaling up so no reason to ever indicate kMipMap.
73                 copyParams->fFilter = GrTextureParams::kBilerp_FilterMode;
74                 break;
75         }
76         return true;
77     }
78     return false;
79 }
80 
81 static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin, bool renderTarget) {
82     // By default, GrRenderTargets are GL's normal orientation so that they
83     // can be drawn to by the outside world without the client having
84     // to render upside down.
85     if (kDefault_GrSurfaceOrigin == origin) {
86         return renderTarget ? kBottomLeft_GrSurfaceOrigin : kTopLeft_GrSurfaceOrigin;
87     } else {
88         return origin;
89     }
90 }
91 
92 /**
93  * Prior to creating a texture, make sure the type of texture being created is
94  * supported by calling check_texture_creation_params.
95  *
96  * @param caps The capabilities of the GL device.
97  * @param desc The descriptor of the texture to create.
98  * @param isRT Indicates if the texture can be a render target.
99  */
100 static bool check_texture_creation_params(const GrCaps& caps, const GrSurfaceDesc& desc,
101                                           bool* isRT, const SkTArray<GrMipLevel>& texels) {
102     if (!caps.isConfigTexturable(desc.fConfig)) {
103         return false;
104     }
105 
106     *isRT = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
107     if (*isRT && !caps.isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) {
108         return false;
109     }
110 
111     // We currently do not support multisampled textures
112     if (!*isRT && desc.fSampleCnt > 0) {
113         return false;
114     }
115 
116     if (*isRT) {
117         int maxRTSize = caps.maxRenderTargetSize();
118         if (desc.fWidth > maxRTSize || desc.fHeight > maxRTSize) {
119             return false;
120         }
121     } else {
122         int maxSize = caps.maxTextureSize();
123         if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
124             return false;
125         }
126     }
127 
128     for (int i = 0; i < texels.count(); ++i) {
129         if (!texels[i].fPixels) {
130             return false;
131         }
132     }
133     return true;
134 }
135 
136 GrTexture* GrGpu::createTexture(const GrSurfaceDesc& origDesc, SkBudgeted budgeted,
137                                 const SkTArray<GrMipLevel>& texels) {
138     GrSurfaceDesc desc = origDesc;
139 
140     const GrCaps* caps = this->caps();
141     bool isRT = false;
142     bool textureCreationParamsValid = check_texture_creation_params(*caps, desc, &isRT, texels);
143     if (!textureCreationParamsValid) {
144         return nullptr;
145     }
146 
147     desc.fSampleCnt = SkTMin(desc.fSampleCnt, caps->maxSampleCount());
148     // Attempt to catch un- or wrongly intialized sample counts;
149     SkASSERT(desc.fSampleCnt >= 0 && desc.fSampleCnt <= 64);
150 
151     desc.fOrigin = resolve_origin(desc.fOrigin, isRT);
152 
153     GrTexture* tex = nullptr;
154 
155     if (GrPixelConfigIsCompressed(desc.fConfig)) {
156         // We shouldn't be rendering into this
157         SkASSERT(!isRT);
158         SkASSERT(0 == desc.fSampleCnt);
159 
160         if (!caps->npotTextureTileSupport() &&
161             (!SkIsPow2(desc.fWidth) || !SkIsPow2(desc.fHeight))) {
162             return nullptr;
163         }
164 
165         this->handleDirtyContext();
166         tex = this->onCreateCompressedTexture(desc, budgeted, texels);
167     } else {
168         this->handleDirtyContext();
169         tex = this->onCreateTexture(desc, budgeted, texels);
170     }
171     if (tex) {
172         if (!caps->reuseScratchTextures() && !isRT) {
173             tex->resourcePriv().removeScratchKey();
174         }
175         fStats.incTextureCreates();
176         if (!texels.empty()) {
177             if (texels[0].fPixels) {
178                 fStats.incTextureUploads();
179             }
180         }
181         // This is a current work around to get discards into newly created textures. Once we are in
182         // MDB world, we should remove this code a rely on the draw target having specified load
183         // operations.
184         if (isRT && texels.empty()) {
185             GrRenderTarget* rt = tex->asRenderTarget();
186             SkASSERT(rt);
187             rt->discard();
188         }
189     }
190     return tex;
191 }
192 
193 GrTexture* GrGpu::wrapBackendTexture(const GrBackendTextureDesc& desc, GrWrapOwnership ownership) {
194     this->handleDirtyContext();
195     if (!this->caps()->isConfigTexturable(desc.fConfig)) {
196         return nullptr;
197     }
198     if ((desc.fFlags & kRenderTarget_GrBackendTextureFlag) &&
199         !this->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) {
200         return nullptr;
201     }
202     int maxSize = this->caps()->maxTextureSize();
203     if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
204         return nullptr;
205     }
206     GrTexture* tex = this->onWrapBackendTexture(desc, ownership);
207     if (nullptr == tex) {
208         return nullptr;
209     }
210     // TODO: defer this and attach dynamically
211     GrRenderTarget* tgt = tex->asRenderTarget();
212     if (tgt && !fContext->resourceProvider()->attachStencilAttachment(tgt)) {
213         tex->unref();
214         return nullptr;
215     } else {
216         return tex;
217     }
218 }
219 
220 GrRenderTarget* GrGpu::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc,
221                                                GrWrapOwnership ownership) {
222     if (!this->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) {
223         return nullptr;
224     }
225     this->handleDirtyContext();
226     return this->onWrapBackendRenderTarget(desc, ownership);
227 }
228 
229 GrRenderTarget* GrGpu::wrapBackendTextureAsRenderTarget(const GrBackendTextureDesc& desc) {
230     this->handleDirtyContext();
231     if (!(desc.fFlags & kRenderTarget_GrBackendTextureFlag)) {
232       return nullptr;
233     }
234     if (!this->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) {
235         return nullptr;
236     }
237     int maxSize = this->caps()->maxTextureSize();
238     if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
239         return nullptr;
240     }
241     return this->onWrapBackendTextureAsRenderTarget(desc);
242 }
243 
244 GrBuffer* GrGpu::createBuffer(size_t size, GrBufferType intendedType,
245                               GrAccessPattern accessPattern, const void* data) {
246     this->handleDirtyContext();
247     GrBuffer* buffer = this->onCreateBuffer(size, intendedType, accessPattern, data);
248     if (!this->caps()->reuseScratchBuffers()) {
249         buffer->resourcePriv().removeScratchKey();
250     }
251     return buffer;
252 }
253 
254 gr_instanced::InstancedRendering* GrGpu::createInstancedRendering() {
255     SkASSERT(GrCaps::InstancedSupport::kNone != this->caps()->instancedSupport());
256     return this->onCreateInstancedRendering();
257 }
258 
259 bool GrGpu::copySurface(GrSurface* dst,
260                         GrSurface* src,
261                         const SkIRect& srcRect,
262                         const SkIPoint& dstPoint) {
263     SkASSERT(dst && src);
264     this->handleDirtyContext();
265     return this->onCopySurface(dst, src, srcRect, dstPoint);
266 }
267 
268 bool GrGpu::getReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
269                               GrPixelConfig readConfig, DrawPreference* drawPreference,
270                               ReadPixelTempDrawInfo* tempDrawInfo) {
271     SkASSERT(drawPreference);
272     SkASSERT(tempDrawInfo);
273     SkASSERT(kGpuPrefersDraw_DrawPreference != *drawPreference);
274 
275     // We currently do not support reading into a compressed buffer
276     if (GrPixelConfigIsCompressed(readConfig)) {
277         return false;
278     }
279 
280     // We currently do not support reading into the packed formats 565 or 4444 as they are not
281     // required to have read back support on all devices and backends.
282     if (kRGB_565_GrPixelConfig == readConfig || kRGBA_4444_GrPixelConfig == readConfig) {
283         return false;
284     }
285 
286     if (!this->onGetReadPixelsInfo(srcSurface, width, height, rowBytes, readConfig, drawPreference,
287                                    tempDrawInfo)) {
288         return false;
289     }
290 
291     // Check to see if we're going to request that the caller draw when drawing is not possible.
292     if (!srcSurface->asTexture() ||
293         !this->caps()->isConfigRenderable(tempDrawInfo->fTempSurfaceDesc.fConfig, false)) {
294         // If we don't have a fallback to a straight read then fail.
295         if (kRequireDraw_DrawPreference == *drawPreference) {
296             return false;
297         }
298         *drawPreference = kNoDraw_DrawPreference;
299     }
300 
301     return true;
302 }
303 bool GrGpu::getWritePixelsInfo(GrSurface* dstSurface, int width, int height,
304                                GrPixelConfig srcConfig, DrawPreference* drawPreference,
305                                WritePixelTempDrawInfo* tempDrawInfo) {
306     SkASSERT(drawPreference);
307     SkASSERT(tempDrawInfo);
308     SkASSERT(kGpuPrefersDraw_DrawPreference != *drawPreference);
309 
310     if (GrPixelConfigIsCompressed(dstSurface->desc().fConfig) &&
311         dstSurface->desc().fConfig != srcConfig) {
312         return false;
313     }
314 
315     if (SkToBool(dstSurface->asRenderTarget())) {
316         if (this->caps()->useDrawInsteadOfAllRenderTargetWrites()) {
317             ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
318         } else if (this->caps()->useDrawInsteadOfPartialRenderTargetWrite() &&
319                    (width < dstSurface->width() || height < dstSurface->height())) {
320             ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
321         }
322     }
323 
324     if (!this->onGetWritePixelsInfo(dstSurface, width, height, srcConfig, drawPreference,
325                                     tempDrawInfo)) {
326         return false;
327     }
328 
329     // Check to see if we're going to request that the caller draw when drawing is not possible.
330     if (!dstSurface->asRenderTarget() ||
331         !this->caps()->isConfigTexturable(tempDrawInfo->fTempSurfaceDesc.fConfig)) {
332         // If we don't have a fallback to a straight upload then fail.
333         if (kRequireDraw_DrawPreference == *drawPreference ||
334             !this->caps()->isConfigTexturable(srcConfig)) {
335             return false;
336         }
337         *drawPreference = kNoDraw_DrawPreference;
338     }
339     return true;
340 }
341 
342 bool GrGpu::readPixels(GrSurface* surface,
343                        int left, int top, int width, int height,
344                        GrPixelConfig config, void* buffer,
345                        size_t rowBytes) {
346     this->handleDirtyContext();
347 
348     // We cannot read pixels into a compressed buffer
349     if (GrPixelConfigIsCompressed(config)) {
350         return false;
351     }
352 
353     size_t bpp = GrBytesPerPixel(config);
354     if (!GrSurfacePriv::AdjustReadPixelParams(surface->width(), surface->height(), bpp,
355                                               &left, &top, &width, &height,
356                                               &buffer,
357                                               &rowBytes)) {
358         return false;
359     }
360 
361     return this->onReadPixels(surface,
362                               left, top, width, height,
363                               config, buffer,
364                               rowBytes);
365 }
366 
367 bool GrGpu::writePixels(GrSurface* surface,
368                         int left, int top, int width, int height,
369                         GrPixelConfig config, const SkTArray<GrMipLevel>& texels) {
370     if (!surface) {
371         return false;
372     }
373     for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
374         if (!texels[currentMipLevel].fPixels ) {
375             return false;
376         }
377     }
378 
379     this->handleDirtyContext();
380     if (this->onWritePixels(surface, left, top, width, height, config, texels)) {
381         SkIRect rect = SkIRect::MakeXYWH(left, top, width, height);
382         this->didWriteToSurface(surface, &rect, texels.count());
383         fStats.incTextureUploads();
384         return true;
385     }
386     return false;
387 }
388 
389 bool GrGpu::writePixels(GrSurface* surface,
390                         int left, int top, int width, int height,
391                         GrPixelConfig config, const void* buffer,
392                         size_t rowBytes) {
393     GrMipLevel mipLevel;
394     mipLevel.fPixels = buffer;
395     mipLevel.fRowBytes = rowBytes;
396     SkSTArray<1, GrMipLevel> texels;
397     texels.push_back(mipLevel);
398 
399     return this->writePixels(surface, left, top, width, height, config, texels);
400 }
401 
402 bool GrGpu::transferPixels(GrSurface* surface,
403                            int left, int top, int width, int height,
404                            GrPixelConfig config, GrBuffer* transferBuffer,
405                            size_t offset, size_t rowBytes, GrFence* fence) {
406     SkASSERT(transferBuffer);
407     SkASSERT(fence);
408 
409     this->handleDirtyContext();
410     if (this->onTransferPixels(surface, left, top, width, height, config,
411                                transferBuffer, offset, rowBytes)) {
412         SkIRect rect = SkIRect::MakeXYWH(left, top, width, height);
413         this->didWriteToSurface(surface, &rect);
414         fStats.incTransfersToTexture();
415 
416         if (*fence) {
417             this->deleteFence(*fence);
418         }
419         *fence = this->insertFence();
420 
421         return true;
422     }
423     return false;
424 }
425 
426 void GrGpu::resolveRenderTarget(GrRenderTarget* target) {
427     SkASSERT(target);
428     this->handleDirtyContext();
429     this->onResolveRenderTarget(target);
430 }
431 
432 void GrGpu::didWriteToSurface(GrSurface* surface, const SkIRect* bounds, uint32_t mipLevels) const {
433     SkASSERT(surface);
434     // Mark any MIP chain and resolve buffer as dirty if and only if there is a non-empty bounds.
435     if (nullptr == bounds || !bounds->isEmpty()) {
436         if (GrRenderTarget* target = surface->asRenderTarget()) {
437             target->flagAsNeedingResolve(bounds);
438         }
439         GrTexture* texture = surface->asTexture();
440         if (texture && 1 == mipLevels) {
441             texture->texturePriv().dirtyMipMaps(true);
442         }
443     }
444 }
445 
446 const GrGpu::MultisampleSpecs& GrGpu::getMultisampleSpecs(GrRenderTarget* rt,
447                                                           const GrStencilSettings& stencil) {
448     SkASSERT(rt->desc().fSampleCnt > 1);
449 
450 #ifndef SK_DEBUG
451     // In debug mode we query the multisample info every time to verify the caching is correct.
452     if (uint8_t id = rt->renderTargetPriv().accessMultisampleSpecsID()) {
453         SkASSERT(id > 0 && id < fMultisampleSpecs.count());
454         return fMultisampleSpecs[id];
455     }
456 #endif
457 
458     int effectiveSampleCnt;
459     SkSTArray<16, SkPoint, true> pattern;
460     this->onGetMultisampleSpecs(rt, stencil, &effectiveSampleCnt, &pattern);
461     SkASSERT(effectiveSampleCnt >= rt->desc().fSampleCnt);
462 
463     uint8_t id;
464     if (this->caps()->sampleLocationsSupport()) {
465         SkASSERT(pattern.count() == effectiveSampleCnt);
466         const auto& insertResult = fMultisampleSpecsIdMap.insert(
467             MultisampleSpecsIdMap::value_type(pattern, SkTMin(fMultisampleSpecs.count(), 255)));
468         id = insertResult.first->second;
469         if (insertResult.second) {
470             // This means the insert did not find the pattern in the map already, and therefore an
471             // actual insertion took place. (We don't expect to see many unique sample patterns.)
472             const SkPoint* sampleLocations = insertResult.first->first.begin();
473             SkASSERT(id == fMultisampleSpecs.count());
474             fMultisampleSpecs.emplace_back(id, effectiveSampleCnt, sampleLocations);
475         }
476     } else {
477         id = effectiveSampleCnt;
478         for (int i = fMultisampleSpecs.count(); i <= id; ++i) {
479             fMultisampleSpecs.emplace_back(i, i, nullptr);
480         }
481     }
482     SkASSERT(id > 0);
483     SkASSERT(!rt->renderTargetPriv().accessMultisampleSpecsID() ||
484              rt->renderTargetPriv().accessMultisampleSpecsID() == id);
485 
486     rt->renderTargetPriv().accessMultisampleSpecsID() = id;
487     return fMultisampleSpecs[id];
488 }
489 
490 bool GrGpu::SamplePatternComparator::operator()(const SamplePattern& a,
491                                                 const SamplePattern& b) const {
492     if (a.count() != b.count()) {
493         return a.count() < b.count();
494     }
495     for (int i = 0; i < a.count(); ++i) {
496         // This doesn't have geometric meaning. We just need to define an ordering for std::map.
497         if (a[i].x() != b[i].x()) {
498             return a[i].x() < b[i].x();
499         }
500         if (a[i].y() != b[i].y()) {
501             return a[i].y() < b[i].y();
502         }
503     }
504     return false; // Equal.
505 }
506