1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/gpu/GrContext.h"
9 #include "src/core/SkMipMap.h"
10 #include "src/gpu/GrContextPriv.h"
11 #include "src/gpu/GrPipeline.h"
12 #include "src/gpu/GrRenderTarget.h"
13 #include "src/gpu/GrTexturePriv.h"
14 #include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
15 #include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
16 #include "src/gpu/glsl/GrGLSLXferProcessor.h"
17 #include "src/gpu/vk/GrVkBufferView.h"
18 #include "src/gpu/vk/GrVkCommandBuffer.h"
19 #include "src/gpu/vk/GrVkDescriptorPool.h"
20 #include "src/gpu/vk/GrVkDescriptorSet.h"
21 #include "src/gpu/vk/GrVkGpu.h"
22 #include "src/gpu/vk/GrVkImageView.h"
23 #include "src/gpu/vk/GrVkMemory.h"
24 #include "src/gpu/vk/GrVkPipeline.h"
25 #include "src/gpu/vk/GrVkPipelineState.h"
26 #include "src/gpu/vk/GrVkSampler.h"
27 #include "src/gpu/vk/GrVkTexture.h"
28 #include "src/gpu/vk/GrVkUniformBuffer.h"
29
GrVkPipelineState(GrVkGpu * gpu,GrVkPipeline * pipeline,const GrVkDescriptorSetManager::Handle & samplerDSHandle,const GrGLSLBuiltinUniformHandles & builtinUniformHandles,const UniformInfoArray & uniforms,uint32_t uniformSize,const UniformInfoArray & samplers,std::unique_ptr<GrGLSLPrimitiveProcessor> geometryProcessor,std::unique_ptr<GrGLSLXferProcessor> xferProcessor,std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fragmentProcessors,int fragmentProcessorCnt)30 GrVkPipelineState::GrVkPipelineState(
31 GrVkGpu* gpu,
32 GrVkPipeline* pipeline,
33 const GrVkDescriptorSetManager::Handle& samplerDSHandle,
34 const GrGLSLBuiltinUniformHandles& builtinUniformHandles,
35 const UniformInfoArray& uniforms,
36 uint32_t uniformSize,
37 const UniformInfoArray& samplers,
38 std::unique_ptr<GrGLSLPrimitiveProcessor> geometryProcessor,
39 std::unique_ptr<GrGLSLXferProcessor> xferProcessor,
40 std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fragmentProcessors,
41 int fragmentProcessorCnt)
42 : fPipeline(pipeline)
43 , fUniformDescriptorSet(nullptr)
44 , fSamplerDescriptorSet(nullptr)
45 , fSamplerDSHandle(samplerDSHandle)
46 , fBuiltinUniformHandles(builtinUniformHandles)
47 , fGeometryProcessor(std::move(geometryProcessor))
48 , fXferProcessor(std::move(xferProcessor))
49 , fFragmentProcessors(std::move(fragmentProcessors))
50 , fFragmentProcessorCnt(fragmentProcessorCnt)
51 , fDataManager(uniforms, uniformSize) {
52 fDescriptorSets[0] = VK_NULL_HANDLE;
53 fDescriptorSets[1] = VK_NULL_HANDLE;
54 fDescriptorSets[2] = VK_NULL_HANDLE;
55
56 fUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, uniformSize));
57
58 fNumSamplers = samplers.count();
59
60 for (int i = 0; i < fNumSamplers; ++i) {
61 // We store the immutable samplers here and take ownership of the ref from the
62 // GrVkUnformHandler.
63 fImmutableSamplers.push_back(samplers[i].fImmutableSampler);
64 }
65 }
66
~GrVkPipelineState()67 GrVkPipelineState::~GrVkPipelineState() {
68 // Must have freed all GPU resources before this is destroyed
69 SkASSERT(!fPipeline);
70 }
71
freeGPUResources(GrVkGpu * gpu)72 void GrVkPipelineState::freeGPUResources(GrVkGpu* gpu) {
73 if (fPipeline) {
74 fPipeline->unref(gpu);
75 fPipeline = nullptr;
76 }
77
78 if (fUniformBuffer) {
79 fUniformBuffer->release(gpu);
80 fUniformBuffer.reset();
81 }
82
83 if (fUniformDescriptorSet) {
84 fUniformDescriptorSet->recycle(const_cast<GrVkGpu*>(gpu));
85 fUniformDescriptorSet = nullptr;
86 }
87
88 if (fSamplerDescriptorSet) {
89 fSamplerDescriptorSet->recycle(const_cast<GrVkGpu*>(gpu));
90 fSamplerDescriptorSet = nullptr;
91 }
92 }
93
abandonGPUResources()94 void GrVkPipelineState::abandonGPUResources() {
95 if (fPipeline) {
96 fPipeline->unrefAndAbandon();
97 fPipeline = nullptr;
98 }
99
100 if (fUniformBuffer) {
101 fUniformBuffer->abandon();
102 fUniformBuffer.reset();
103 }
104
105 if (fUniformDescriptorSet) {
106 fUniformDescriptorSet->unrefAndAbandon();
107 fUniformDescriptorSet = nullptr;
108 }
109
110 if (fSamplerDescriptorSet) {
111 fSamplerDescriptorSet->unrefAndAbandon();
112 fSamplerDescriptorSet = nullptr;
113 }
114 }
115
setAndBindUniforms(GrVkGpu * gpu,const GrRenderTarget * renderTarget,const GrProgramInfo & programInfo,GrVkCommandBuffer * commandBuffer)116 void GrVkPipelineState::setAndBindUniforms(GrVkGpu* gpu,
117 const GrRenderTarget* renderTarget,
118 const GrProgramInfo& programInfo,
119 GrVkCommandBuffer* commandBuffer) {
120 this->setRenderTargetState(renderTarget, programInfo.origin());
121
122 fGeometryProcessor->setData(fDataManager, programInfo.primProc(),
123 GrFragmentProcessor::CoordTransformIter(programInfo.pipeline()));
124 GrFragmentProcessor::Iter iter(programInfo.pipeline());
125 GrGLSLFragmentProcessor::Iter glslIter(fFragmentProcessors.get(), fFragmentProcessorCnt);
126 const GrFragmentProcessor* fp = iter.next();
127 GrGLSLFragmentProcessor* glslFP = glslIter.next();
128 while (fp && glslFP) {
129 glslFP->setData(fDataManager, *fp);
130 fp = iter.next();
131 glslFP = glslIter.next();
132 }
133 SkASSERT(!fp && !glslFP);
134
135 {
136 SkIPoint offset;
137 GrTexture* dstTexture = programInfo.pipeline().peekDstTexture(&offset);
138
139 fXferProcessor->setData(fDataManager, programInfo.pipeline().getXferProcessor(),
140 dstTexture, offset);
141 }
142
143 // Get new descriptor set
144 if (fUniformBuffer) {
145 int uniformDSIdx = GrVkUniformHandler::kUniformBufferDescSet;
146 if (fDataManager.uploadUniformBuffers(gpu, fUniformBuffer.get()) ||
147 !fUniformDescriptorSet) {
148 if (fUniformDescriptorSet) {
149 fUniformDescriptorSet->recycle(gpu);
150 }
151 fUniformDescriptorSet = gpu->resourceProvider().getUniformDescriptorSet();
152 fDescriptorSets[uniformDSIdx] = fUniformDescriptorSet->descriptorSet();
153 this->writeUniformBuffers(gpu);
154 }
155 commandBuffer->bindDescriptorSets(gpu, this, fPipeline->layout(), uniformDSIdx, 1,
156 &fDescriptorSets[uniformDSIdx], 0, nullptr);
157 if (fUniformDescriptorSet) {
158 commandBuffer->addRecycledResource(fUniformDescriptorSet);
159 }
160 if (fUniformBuffer) {
161 commandBuffer->addRecycledResource(fUniformBuffer->resource());
162 }
163 }
164 }
165
setAndBindTextures(GrVkGpu * gpu,const GrPrimitiveProcessor & primProc,const GrPipeline & pipeline,const GrTextureProxy * const primProcTextures[],GrVkCommandBuffer * commandBuffer)166 void GrVkPipelineState::setAndBindTextures(GrVkGpu* gpu,
167 const GrPrimitiveProcessor& primProc,
168 const GrPipeline& pipeline,
169 const GrTextureProxy* const primProcTextures[],
170 GrVkCommandBuffer* commandBuffer) {
171 SkASSERT(primProcTextures || !primProc.numTextureSamplers());
172
173 struct SamplerBindings {
174 GrSamplerState fState;
175 GrVkTexture* fTexture;
176 };
177 SkAutoSTMalloc<8, SamplerBindings> samplerBindings(fNumSamplers);
178 int currTextureBinding = 0;
179
180 fGeometryProcessor->setData(fDataManager, primProc,
181 GrFragmentProcessor::CoordTransformIter(pipeline));
182 for (int i = 0; i < primProc.numTextureSamplers(); ++i) {
183 const auto& sampler = primProc.textureSampler(i);
184 auto texture = static_cast<GrVkTexture*>(primProcTextures[i]->peekTexture());
185 samplerBindings[currTextureBinding++] = {sampler.samplerState(), texture};
186 }
187
188 GrFragmentProcessor::Iter iter(pipeline);
189 GrGLSLFragmentProcessor::Iter glslIter(fFragmentProcessors.get(), fFragmentProcessorCnt);
190 const GrFragmentProcessor* fp = iter.next();
191 GrGLSLFragmentProcessor* glslFP = glslIter.next();
192 while (fp && glslFP) {
193 for (int i = 0; i < fp->numTextureSamplers(); ++i) {
194 const auto& sampler = fp->textureSampler(i);
195 samplerBindings[currTextureBinding++] =
196 {sampler.samplerState(), static_cast<GrVkTexture*>(sampler.peekTexture())};
197 }
198 fp = iter.next();
199 glslFP = glslIter.next();
200 }
201 SkASSERT(!fp && !glslFP);
202
203 if (GrTextureProxy* dstTextureProxy = pipeline.dstTextureProxy()) {
204 samplerBindings[currTextureBinding++] = {
205 GrSamplerState::ClampNearest(),
206 static_cast<GrVkTexture*>(dstTextureProxy->peekTexture())};
207 }
208
209 // Get new descriptor set
210 SkASSERT(fNumSamplers == currTextureBinding);
211 if (fNumSamplers) {
212 if (fSamplerDescriptorSet) {
213 fSamplerDescriptorSet->recycle(gpu);
214 }
215 fSamplerDescriptorSet = gpu->resourceProvider().getSamplerDescriptorSet(fSamplerDSHandle);
216 int samplerDSIdx = GrVkUniformHandler::kSamplerDescSet;
217 fDescriptorSets[samplerDSIdx] = fSamplerDescriptorSet->descriptorSet();
218 for (int i = 0; i < fNumSamplers; ++i) {
219 const GrSamplerState& state = samplerBindings[i].fState;
220 GrVkTexture* texture = samplerBindings[i].fTexture;
221
222 const GrVkImageView* textureView = texture->textureView();
223 const GrVkSampler* sampler = nullptr;
224 if (fImmutableSamplers[i]) {
225 sampler = fImmutableSamplers[i];
226 } else {
227 sampler = gpu->resourceProvider().findOrCreateCompatibleSampler(
228 state, texture->ycbcrConversionInfo());
229 }
230 SkASSERT(sampler);
231
232 VkDescriptorImageInfo imageInfo;
233 memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo));
234 imageInfo.sampler = sampler->sampler();
235 imageInfo.imageView = textureView->imageView();
236 imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
237
238 VkWriteDescriptorSet writeInfo;
239 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
240 writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
241 writeInfo.pNext = nullptr;
242 writeInfo.dstSet = fDescriptorSets[GrVkUniformHandler::kSamplerDescSet];
243 writeInfo.dstBinding = i;
244 writeInfo.dstArrayElement = 0;
245 writeInfo.descriptorCount = 1;
246 writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
247 writeInfo.pImageInfo = &imageInfo;
248 writeInfo.pBufferInfo = nullptr;
249 writeInfo.pTexelBufferView = nullptr;
250
251 GR_VK_CALL(gpu->vkInterface(),
252 UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr));
253 commandBuffer->addResource(sampler);
254 if (!fImmutableSamplers[i]) {
255 sampler->unref(gpu);
256 }
257 commandBuffer->addResource(samplerBindings[i].fTexture->textureView());
258 commandBuffer->addResource(samplerBindings[i].fTexture->resource());
259 }
260
261 commandBuffer->bindDescriptorSets(gpu, this, fPipeline->layout(), samplerDSIdx, 1,
262 &fDescriptorSets[samplerDSIdx], 0, nullptr);
263 commandBuffer->addRecycledResource(fSamplerDescriptorSet);
264 }
265 }
266
set_uniform_descriptor_writes(VkWriteDescriptorSet * descriptorWrite,VkDescriptorBufferInfo * bufferInfo,const GrVkUniformBuffer * buffer,VkDescriptorSet descriptorSet)267 void set_uniform_descriptor_writes(VkWriteDescriptorSet* descriptorWrite,
268 VkDescriptorBufferInfo* bufferInfo,
269 const GrVkUniformBuffer* buffer,
270 VkDescriptorSet descriptorSet) {
271
272 memset(bufferInfo, 0, sizeof(VkDescriptorBufferInfo));
273 bufferInfo->buffer = buffer->buffer();
274 bufferInfo->offset = buffer->offset();
275 bufferInfo->range = buffer->size();
276
277 memset(descriptorWrite, 0, sizeof(VkWriteDescriptorSet));
278 descriptorWrite->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
279 descriptorWrite->pNext = nullptr;
280 descriptorWrite->dstSet = descriptorSet;
281 descriptorWrite->dstBinding = GrVkUniformHandler::kUniformBinding;
282 descriptorWrite->dstArrayElement = 0;
283 descriptorWrite->descriptorCount = 1;
284 descriptorWrite->descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
285 descriptorWrite->pImageInfo = nullptr;
286 descriptorWrite->pBufferInfo = bufferInfo;
287 descriptorWrite->pTexelBufferView = nullptr;
288 }
289
writeUniformBuffers(const GrVkGpu * gpu)290 void GrVkPipelineState::writeUniformBuffers(const GrVkGpu* gpu) {
291 VkWriteDescriptorSet descriptorWrites[3];
292 VkDescriptorBufferInfo bufferInfos[3];
293
294 uint32_t writeCount = 0;
295
296 if (fUniformBuffer.get()) {
297 set_uniform_descriptor_writes(&descriptorWrites[writeCount],
298 &bufferInfos[writeCount],
299 fUniformBuffer.get(),
300 fDescriptorSets[GrVkUniformHandler::kUniformBufferDescSet]);
301 ++writeCount;
302 }
303
304 if (writeCount) {
305 GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(),
306 writeCount,
307 descriptorWrites,
308 0, nullptr));
309 }
310 }
311
setRenderTargetState(const GrRenderTarget * rt,GrSurfaceOrigin origin)312 void GrVkPipelineState::setRenderTargetState(const GrRenderTarget* rt, GrSurfaceOrigin origin) {
313
314 // Load the RT height uniform if it is needed to y-flip gl_FragCoord.
315 if (fBuiltinUniformHandles.fRTHeightUni.isValid() &&
316 fRenderTargetState.fRenderTargetSize.fHeight != rt->height()) {
317 fDataManager.set1f(fBuiltinUniformHandles.fRTHeightUni, SkIntToScalar(rt->height()));
318 }
319
320 // set RT adjustment
321 SkISize size;
322 size.set(rt->width(), rt->height());
323 SkASSERT(fBuiltinUniformHandles.fRTAdjustmentUni.isValid());
324 if (fRenderTargetState.fRenderTargetOrigin != origin ||
325 fRenderTargetState.fRenderTargetSize != size) {
326 fRenderTargetState.fRenderTargetSize = size;
327 fRenderTargetState.fRenderTargetOrigin = origin;
328
329 float rtAdjustmentVec[4];
330 fRenderTargetState.getRTAdjustmentVec(rtAdjustmentVec);
331 fDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, rtAdjustmentVec);
332 }
333 }
334
bindPipeline(const GrVkGpu * gpu,GrVkCommandBuffer * commandBuffer)335 void GrVkPipelineState::bindPipeline(const GrVkGpu* gpu, GrVkCommandBuffer* commandBuffer) {
336 commandBuffer->bindPipeline(gpu, fPipeline);
337 }
338