1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "GrResourceProvider.h"
9 
10 #include "GrBuffer.h"
11 #include "GrCaps.h"
12 #include "GrGpu.h"
13 #include "GrPathRendering.h"
14 #include "GrRenderTarget.h"
15 #include "GrRenderTargetPriv.h"
16 #include "GrResourceCache.h"
17 #include "GrResourceKey.h"
18 #include "GrStencilAttachment.h"
19 #include "SkMathPriv.h"
20 
21 GR_DECLARE_STATIC_UNIQUE_KEY(gQuadIndexBufferKey);
22 
GrResourceProvider(GrGpu * gpu,GrResourceCache * cache,GrSingleOwner * owner)23 GrResourceProvider::GrResourceProvider(GrGpu* gpu, GrResourceCache* cache, GrSingleOwner* owner)
24     : INHERITED(gpu, cache, owner) {
25     GR_DEFINE_STATIC_UNIQUE_KEY(gQuadIndexBufferKey);
26     fQuadIndexBufferKey = gQuadIndexBufferKey;
27 }
28 
createInstancedIndexBuffer(const uint16_t * pattern,int patternSize,int reps,int vertCount,const GrUniqueKey & key)29 const GrBuffer* GrResourceProvider::createInstancedIndexBuffer(const uint16_t* pattern,
30                                                                int patternSize,
31                                                                int reps,
32                                                                int vertCount,
33                                                                const GrUniqueKey& key) {
34     size_t bufferSize = patternSize * reps * sizeof(uint16_t);
35 
36     // This is typically used in GrBatchs, so we assume kNoPendingIO.
37     GrBuffer* buffer = this->createBuffer(bufferSize, kIndex_GrBufferType, kStatic_GrAccessPattern,
38                                           kNoPendingIO_Flag);
39     if (!buffer) {
40         return nullptr;
41     }
42     uint16_t* data = (uint16_t*) buffer->map();
43     bool useTempData = (nullptr == data);
44     if (useTempData) {
45         data = new uint16_t[reps * patternSize];
46     }
47     for (int i = 0; i < reps; ++i) {
48         int baseIdx = i * patternSize;
49         uint16_t baseVert = (uint16_t)(i * vertCount);
50         for (int j = 0; j < patternSize; ++j) {
51             data[baseIdx+j] = baseVert + pattern[j];
52         }
53     }
54     if (useTempData) {
55         if (!buffer->updateData(data, bufferSize)) {
56             buffer->unref();
57             return nullptr;
58         }
59         delete[] data;
60     } else {
61         buffer->unmap();
62     }
63     this->assignUniqueKeyToResource(key, buffer);
64     return buffer;
65 }
66 
createQuadIndexBuffer()67 const GrBuffer* GrResourceProvider::createQuadIndexBuffer() {
68     static const int kMaxQuads = 1 << 12; // max possible: (1 << 14) - 1;
69     GR_STATIC_ASSERT(4 * kMaxQuads <= 65535);
70     static const uint16_t kPattern[] = { 0, 1, 2, 0, 2, 3 };
71 
72     return this->createInstancedIndexBuffer(kPattern, 6, kMaxQuads, 4, fQuadIndexBufferKey);
73 }
74 
createPath(const SkPath & path,const GrStyle & style)75 GrPath* GrResourceProvider::createPath(const SkPath& path, const GrStyle& style) {
76     SkASSERT(this->gpu()->pathRendering());
77     return this->gpu()->pathRendering()->createPath(path, style);
78 }
79 
createPathRange(GrPathRange::PathGenerator * gen,const GrStyle & style)80 GrPathRange* GrResourceProvider::createPathRange(GrPathRange::PathGenerator* gen,
81                                                  const GrStyle& style) {
82     SkASSERT(this->gpu()->pathRendering());
83     return this->gpu()->pathRendering()->createPathRange(gen, style);
84 }
85 
createGlyphs(const SkTypeface * tf,const SkScalerContextEffects & effects,const SkDescriptor * desc,const GrStyle & style)86 GrPathRange* GrResourceProvider::createGlyphs(const SkTypeface* tf,
87                                               const SkScalerContextEffects& effects,
88                                               const SkDescriptor* desc,
89                                               const GrStyle& style) {
90 
91     SkASSERT(this->gpu()->pathRendering());
92     return this->gpu()->pathRendering()->createGlyphs(tf, effects, desc, style);
93 }
94 
createBuffer(size_t size,GrBufferType intendedType,GrAccessPattern accessPattern,uint32_t flags,const void * data)95 GrBuffer* GrResourceProvider::createBuffer(size_t size, GrBufferType intendedType,
96                                            GrAccessPattern accessPattern, uint32_t flags,
97                                            const void* data) {
98     if (this->isAbandoned()) {
99         return nullptr;
100     }
101     if (kDynamic_GrAccessPattern != accessPattern) {
102         return this->gpu()->createBuffer(size, intendedType, accessPattern, data);
103     }
104     if (!(flags & kRequireGpuMemory_Flag) &&
105         this->gpu()->caps()->preferClientSideDynamicBuffers() &&
106         GrBufferTypeIsVertexOrIndex(intendedType) &&
107         kDynamic_GrAccessPattern == accessPattern) {
108         return GrBuffer::CreateCPUBacked(this->gpu(), size, intendedType, data);
109     }
110 
111     // bin by pow2 with a reasonable min
112     static const size_t MIN_SIZE = 1 << 12;
113     size_t allocSize = size > (1u << 31)
114                        ? size_t(SkTMin(uint64_t(SIZE_MAX), uint64_t(GrNextPow2(uint32_t(uint64_t(size) >> 32))) << 32))
115                        : size_t(GrNextPow2(uint32_t(size)));
116     allocSize = SkTMax(allocSize, MIN_SIZE);
117 
118     GrScratchKey key;
119     GrBuffer::ComputeScratchKeyForDynamicVBO(allocSize, intendedType, &key);
120     uint32_t scratchFlags = 0;
121     if (flags & kNoPendingIO_Flag) {
122         scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
123     } else {
124         scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
125     }
126     GrBuffer* buffer = static_cast<GrBuffer*>(
127         this->cache()->findAndRefScratchResource(key, allocSize, scratchFlags));
128     if (!buffer) {
129         buffer = this->gpu()->createBuffer(allocSize, intendedType, kDynamic_GrAccessPattern);
130         if (!buffer) {
131             return nullptr;
132         }
133     }
134     if (data) {
135         buffer->updateData(data, size);
136     }
137     SkASSERT(!buffer->isCPUBacked()); // We should only cache real VBOs.
138     return buffer;
139 }
140 
createAtlas(GrPixelConfig config,int width,int height,int numPlotsX,int numPlotsY,GrBatchAtlas::EvictionFunc func,void * data)141 GrBatchAtlas* GrResourceProvider::createAtlas(GrPixelConfig config,
142                                               int width, int height,
143                                               int numPlotsX, int numPlotsY,
144                                               GrBatchAtlas::EvictionFunc func, void* data) {
145     GrSurfaceDesc desc;
146     desc.fFlags = kNone_GrSurfaceFlags;
147     desc.fWidth = width;
148     desc.fHeight = height;
149     desc.fConfig = config;
150 
151     // We don't want to flush the context so we claim we're in the middle of flushing so as to
152     // guarantee we do not recieve a texture with pending IO
153     // TODO: Determine how to avoid having to do this. (https://bug.skia.org/4156)
154     static const uint32_t kFlags = GrResourceProvider::kNoPendingIO_Flag;
155     GrTexture* texture = this->createApproxTexture(desc, kFlags);
156     if (!texture) {
157         return nullptr;
158     }
159     GrBatchAtlas* atlas = new GrBatchAtlas(texture, numPlotsX, numPlotsY);
160     atlas->registerEvictionCallback(func, data);
161     return atlas;
162 }
163 
attachStencilAttachment(GrRenderTarget * rt)164 GrStencilAttachment* GrResourceProvider::attachStencilAttachment(GrRenderTarget* rt) {
165     SkASSERT(rt);
166     if (rt->renderTargetPriv().getStencilAttachment()) {
167         return rt->renderTargetPriv().getStencilAttachment();
168     }
169 
170     if (!rt->wasDestroyed() && rt->canAttemptStencilAttachment()) {
171         GrUniqueKey sbKey;
172 
173         int width = rt->width();
174         int height = rt->height();
175 #if 0
176         if (this->caps()->oversizedStencilSupport()) {
177             width  = SkNextPow2(width);
178             height = SkNextPow2(height);
179         }
180 #endif
181         bool newStencil = false;
182         GrStencilAttachment::ComputeSharedStencilAttachmentKey(width, height,
183                                                                rt->numStencilSamples(), &sbKey);
184         GrStencilAttachment* stencil = static_cast<GrStencilAttachment*>(
185             this->findAndRefResourceByUniqueKey(sbKey));
186         if (!stencil) {
187             // Need to try and create a new stencil
188             stencil = this->gpu()->createStencilAttachmentForRenderTarget(rt, width, height);
189             if (stencil) {
190                 stencil->resourcePriv().setUniqueKey(sbKey);
191                 newStencil = true;
192             }
193         }
194         if (rt->renderTargetPriv().attachStencilAttachment(stencil)) {
195             if (newStencil) {
196                 // Right now we're clearing the stencil attachment here after it is
197                 // attached to a RT for the first time. When we start matching
198                 // stencil buffers with smaller color targets this will no longer
199                 // be correct because it won't be guaranteed to clear the entire
200                 // sb.
201                 // We used to clear down in the GL subclass using a special purpose
202                 // FBO. But iOS doesn't allow a stencil-only FBO. It reports unsupported
203                 // FBO status.
204                 this->gpu()->clearStencil(rt);
205             }
206         }
207     }
208     return rt->renderTargetPriv().getStencilAttachment();
209 }
210 
wrapBackendTextureAsRenderTarget(const GrBackendTextureDesc & desc)211 GrRenderTarget* GrResourceProvider::wrapBackendTextureAsRenderTarget(
212         const GrBackendTextureDesc& desc) {
213     if (this->isAbandoned()) {
214         return nullptr;
215     }
216     return this->gpu()->wrapBackendTextureAsRenderTarget(desc);
217 }
218