1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkOpsRenderPass.h"
9
10 #include "include/core/SkDrawable.h"
11 #include "include/core/SkRect.h"
12 #include "include/gpu/GrBackendDrawableInfo.h"
13 #include "src/gpu/GrContextPriv.h"
14 #include "src/gpu/GrFixedClip.h"
15 #include "src/gpu/GrOpFlushState.h"
16 #include "src/gpu/GrPipeline.h"
17 #include "src/gpu/GrRenderTargetPriv.h"
18 #include "src/gpu/vk/GrVkCommandBuffer.h"
19 #include "src/gpu/vk/GrVkCommandPool.h"
20 #include "src/gpu/vk/GrVkGpu.h"
21 #include "src/gpu/vk/GrVkPipeline.h"
22 #include "src/gpu/vk/GrVkRenderPass.h"
23 #include "src/gpu/vk/GrVkRenderTarget.h"
24 #include "src/gpu/vk/GrVkResourceProvider.h"
25 #include "src/gpu/vk/GrVkSemaphore.h"
26 #include "src/gpu/vk/GrVkTexture.h"
27
28 /////////////////////////////////////////////////////////////////////////////
29
get_vk_load_store_ops(GrLoadOp loadOpIn,GrStoreOp storeOpIn,VkAttachmentLoadOp * loadOp,VkAttachmentStoreOp * storeOp)30 void get_vk_load_store_ops(GrLoadOp loadOpIn, GrStoreOp storeOpIn,
31 VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* storeOp) {
32 switch (loadOpIn) {
33 case GrLoadOp::kLoad:
34 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
35 break;
36 case GrLoadOp::kClear:
37 *loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
38 break;
39 case GrLoadOp::kDiscard:
40 *loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
41 break;
42 default:
43 SK_ABORT("Invalid LoadOp");
44 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
45 }
46
47 switch (storeOpIn) {
48 case GrStoreOp::kStore:
49 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
50 break;
51 case GrStoreOp::kDiscard:
52 *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
53 break;
54 default:
55 SK_ABORT("Invalid StoreOp");
56 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
57 }
58 }
59
GrVkOpsRenderPass(GrVkGpu * gpu)60 GrVkOpsRenderPass::GrVkOpsRenderPass(GrVkGpu* gpu) : fGpu(gpu) {}
61
init(const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkPMColor4f & clearColor)62 bool GrVkOpsRenderPass::init(const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
63 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
64 const SkPMColor4f& clearColor) {
65
66 VkAttachmentLoadOp loadOp;
67 VkAttachmentStoreOp storeOp;
68 get_vk_load_store_ops(colorInfo.fLoadOp, colorInfo.fStoreOp,
69 &loadOp, &storeOp);
70 GrVkRenderPass::LoadStoreOps vkColorOps(loadOp, storeOp);
71
72 get_vk_load_store_ops(stencilInfo.fLoadOp, stencilInfo.fStoreOp,
73 &loadOp, &storeOp);
74 GrVkRenderPass::LoadStoreOps vkStencilOps(loadOp, storeOp);
75
76 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
77 GrVkImage* targetImage = vkRT->msaaImage() ? vkRT->msaaImage() : vkRT;
78
79 // Change layout of our render target so it can be used as the color attachment.
80 // TODO: If we know that we will never be blending or loading the attachment we could drop the
81 // VK_ACCESS_COLOR_ATTACHMENT_READ_BIT.
82 targetImage->setImageLayout(fGpu,
83 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
84 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
85 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
86 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
87 false);
88
89 // If we are using a stencil attachment we also need to update its layout
90 if (GrStencilAttachment* stencil = fRenderTarget->renderTargetPriv().getStencilAttachment()) {
91 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
92 // We need the write and read access bits since we may load and store the stencil.
93 // The initial load happens in the VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT so we
94 // wait there.
95 vkStencil->setImageLayout(fGpu,
96 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
97 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
98 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
99 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
100 false);
101 }
102
103 const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
104 vkRT->compatibleRenderPassHandle();
105 if (rpHandle.isValid()) {
106 fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
107 vkColorOps,
108 vkStencilOps);
109 } else {
110 fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(vkRT,
111 vkColorOps,
112 vkStencilOps);
113 }
114 if (!fCurrentRenderPass) {
115 return false;
116 }
117
118 VkClearValue vkClearColor;
119 vkClearColor.color.float32[0] = clearColor[0];
120 vkClearColor.color.float32[1] = clearColor[1];
121 vkClearColor.color.float32[2] = clearColor[2];
122 vkClearColor.color.float32[3] = clearColor[3];
123
124 if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers()) {
125 SkASSERT(fGpu->cmdPool());
126 fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
127 if (!fCurrentSecondaryCommandBuffer) {
128 fCurrentRenderPass = nullptr;
129 return false;
130 }
131 fCurrentSecondaryCommandBuffer->begin(fGpu, vkRT->getFramebuffer(), fCurrentRenderPass);
132 }
133
134 if (!fGpu->beginRenderPass(fCurrentRenderPass, &vkClearColor, vkRT, fOrigin, fBounds,
135 SkToBool(fCurrentSecondaryCommandBuffer))) {
136 if (fCurrentSecondaryCommandBuffer) {
137 fCurrentSecondaryCommandBuffer->end(fGpu);
138 }
139 fCurrentRenderPass = nullptr;
140 return false;
141 }
142 return true;
143 }
144
initWrapped()145 bool GrVkOpsRenderPass::initWrapped() {
146 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
147 SkASSERT(vkRT->wrapsSecondaryCommandBuffer());
148 fCurrentRenderPass = vkRT->externalRenderPass();
149 SkASSERT(fCurrentRenderPass);
150 fCurrentRenderPass->ref();
151
152 fCurrentSecondaryCommandBuffer.reset(
153 GrVkSecondaryCommandBuffer::Create(vkRT->getExternalSecondaryCommandBuffer()));
154 if (!fCurrentSecondaryCommandBuffer) {
155 return false;
156 }
157 fCurrentSecondaryCommandBuffer->begin(fGpu, nullptr, fCurrentRenderPass);
158 return true;
159 }
160
~GrVkOpsRenderPass()161 GrVkOpsRenderPass::~GrVkOpsRenderPass() {
162 this->reset();
163 }
164
gpu()165 GrGpu* GrVkOpsRenderPass::gpu() { return fGpu; }
166
currentCommandBuffer()167 GrVkCommandBuffer* GrVkOpsRenderPass::currentCommandBuffer() {
168 if (fCurrentSecondaryCommandBuffer) {
169 return fCurrentSecondaryCommandBuffer.get();
170 }
171 return fGpu->currentCommandBuffer();
172 }
173
submit()174 void GrVkOpsRenderPass::submit() {
175 if (!fRenderTarget) {
176 return;
177 }
178 if (!fCurrentRenderPass) {
179 SkASSERT(fGpu->isDeviceLost());
180 return;
181 }
182
183 // We don't want to actually submit the secondary command buffer if it is wrapped.
184 if (this->wrapsSecondaryCommandBuffer()) {
185 // We pass the ownership of the GrVkSecondaryCommandBuffer to the special wrapped
186 // GrVkRenderTarget since it's lifetime matches the lifetime we need to keep the
187 // GrManagedResources on the GrVkSecondaryCommandBuffer alive.
188 static_cast<GrVkRenderTarget*>(fRenderTarget)->addWrappedGrSecondaryCommandBuffer(
189 std::move(fCurrentSecondaryCommandBuffer));
190 return;
191 }
192
193 if (fCurrentSecondaryCommandBuffer) {
194 fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
195 }
196 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
197 }
198
set(GrRenderTarget * rt,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkTArray<GrSurfaceProxy *,true> & sampledProxies)199 bool GrVkOpsRenderPass::set(GrRenderTarget* rt, GrSurfaceOrigin origin, const SkIRect& bounds,
200 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
201 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
202 const SkTArray<GrSurfaceProxy*, true>& sampledProxies) {
203 SkASSERT(!fRenderTarget);
204 SkASSERT(fGpu == rt->getContext()->priv().getGpu());
205
206 #ifdef SK_DEBUG
207 fIsActive = true;
208 #endif
209
210 this->INHERITED::set(rt, origin);
211
212 for (int i = 0; i < sampledProxies.count(); ++i) {
213 if (sampledProxies[i]->isInstantiated()) {
214 SkASSERT(sampledProxies[i]->asTextureProxy());
215 GrVkTexture* vkTex = static_cast<GrVkTexture*>(sampledProxies[i]->peekTexture());
216 SkASSERT(vkTex);
217 vkTex->setImageLayout(
218 fGpu, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT,
219 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, false);
220 }
221 }
222
223 SkASSERT(bounds.isEmpty() || SkIRect::MakeWH(rt->width(), rt->height()).contains(bounds));
224 fBounds = bounds;
225
226 if (this->wrapsSecondaryCommandBuffer()) {
227 return this->initWrapped();
228 }
229
230 return this->init(colorInfo, stencilInfo, colorInfo.fClearColor);
231 }
232
reset()233 void GrVkOpsRenderPass::reset() {
234 if (fCurrentSecondaryCommandBuffer) {
235 // The active GrVkCommandPool on the GrVkGpu should still be the same pool we got the
236 // secondary command buffer from since we haven't submitted any work yet.
237 SkASSERT(fGpu->cmdPool());
238 fCurrentSecondaryCommandBuffer.release()->recycle(fGpu->cmdPool());
239 }
240 if (fCurrentRenderPass) {
241 fCurrentRenderPass->unref();
242 fCurrentRenderPass = nullptr;
243 }
244 fCurrentCBIsEmpty = true;
245
246 fRenderTarget = nullptr;
247
248 #ifdef SK_DEBUG
249 fIsActive = false;
250 #endif
251 }
252
wrapsSecondaryCommandBuffer() const253 bool GrVkOpsRenderPass::wrapsSecondaryCommandBuffer() const {
254 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
255 return vkRT->wrapsSecondaryCommandBuffer();
256 }
257
258 ////////////////////////////////////////////////////////////////////////////////
259
onClearStencilClip(const GrFixedClip & clip,bool insideStencilMask)260 void GrVkOpsRenderPass::onClearStencilClip(const GrFixedClip& clip, bool insideStencilMask) {
261 if (!fCurrentRenderPass) {
262 SkASSERT(fGpu->isDeviceLost());
263 return;
264 }
265
266 SkASSERT(!clip.hasWindowRectangles());
267
268 GrStencilAttachment* sb = fRenderTarget->renderTargetPriv().getStencilAttachment();
269 // this should only be called internally when we know we have a
270 // stencil buffer.
271 SkASSERT(sb);
272 int stencilBitCount = sb->bits();
273
274 // The contract with the callers does not guarantee that we preserve all bits in the stencil
275 // during this clear. Thus we will clear the entire stencil to the desired value.
276
277 VkClearDepthStencilValue vkStencilColor;
278 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
279 if (insideStencilMask) {
280 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
281 } else {
282 vkStencilColor.stencil = 0;
283 }
284
285 VkClearRect clearRect;
286 // Flip rect if necessary
287 SkIRect vkRect;
288 if (!clip.scissorEnabled()) {
289 vkRect.setXYWH(0, 0, fRenderTarget->width(), fRenderTarget->height());
290 } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
291 vkRect = clip.scissorRect();
292 } else {
293 const SkIRect& scissor = clip.scissorRect();
294 vkRect.setLTRB(scissor.fLeft, fRenderTarget->height() - scissor.fBottom,
295 scissor.fRight, fRenderTarget->height() - scissor.fTop);
296 }
297
298 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
299 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
300
301 clearRect.baseArrayLayer = 0;
302 clearRect.layerCount = 1;
303
304 uint32_t stencilIndex;
305 SkAssertResult(fCurrentRenderPass->stencilAttachmentIndex(&stencilIndex));
306
307 VkClearAttachment attachment;
308 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
309 attachment.colorAttachment = 0; // this value shouldn't matter
310 attachment.clearValue.depthStencil = vkStencilColor;
311
312 this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
313 fCurrentCBIsEmpty = false;
314 }
315
onClear(const GrFixedClip & clip,const SkPMColor4f & color)316 void GrVkOpsRenderPass::onClear(const GrFixedClip& clip, const SkPMColor4f& color) {
317 if (!fCurrentRenderPass) {
318 SkASSERT(fGpu->isDeviceLost());
319 return;
320 }
321
322 // parent class should never let us get here with no RT
323 SkASSERT(!clip.hasWindowRectangles());
324
325 VkClearColorValue vkColor = {{color.fR, color.fG, color.fB, color.fA}};
326
327 // If we end up in a situation where we are calling clear without a scissior then in general it
328 // means we missed an opportunity higher up the stack to set the load op to be a clear. However,
329 // there are situations where higher up we couldn't discard the previous ops and set a clear
330 // load op (e.g. if we needed to execute a wait op). Thus we also have the empty check here.
331 // TODO: Make the waitOp a RenderTask instead so we can clear out the GrOpsTask for a clear. We
332 // can then reenable this assert assuming we can't get messed up by a waitOp.
333 //SkASSERT(!fCurrentCBIsEmpty || clip.scissorEnabled());
334
335 // We always do a sub rect clear with clearAttachments since we are inside a render pass
336 VkClearRect clearRect;
337 // Flip rect if necessary
338 SkIRect vkRect;
339 if (!clip.scissorEnabled()) {
340 vkRect.setXYWH(0, 0, fRenderTarget->width(), fRenderTarget->height());
341 } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
342 vkRect = clip.scissorRect();
343 } else {
344 const SkIRect& scissor = clip.scissorRect();
345 vkRect.setLTRB(scissor.fLeft, fRenderTarget->height() - scissor.fBottom,
346 scissor.fRight, fRenderTarget->height() - scissor.fTop);
347 }
348 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
349 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
350 clearRect.baseArrayLayer = 0;
351 clearRect.layerCount = 1;
352
353 uint32_t colorIndex;
354 SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&colorIndex));
355
356 VkClearAttachment attachment;
357 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
358 attachment.colorAttachment = colorIndex;
359 attachment.clearValue.color = vkColor;
360
361 this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
362 fCurrentCBIsEmpty = false;
363 return;
364 }
365
366 ////////////////////////////////////////////////////////////////////////////////
367
addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer)368 void GrVkOpsRenderPass::addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer) {
369 SkASSERT(!this->wrapsSecondaryCommandBuffer());
370 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
371
372 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_LOAD,
373 VK_ATTACHMENT_STORE_OP_STORE);
374 GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
375 VK_ATTACHMENT_STORE_OP_STORE);
376
377 const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
378 vkRT->compatibleRenderPassHandle();
379 SkASSERT(fCurrentRenderPass);
380 fCurrentRenderPass->unref();
381 if (rpHandle.isValid()) {
382 fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
383 vkColorOps,
384 vkStencilOps);
385 } else {
386 fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(vkRT,
387 vkColorOps,
388 vkStencilOps);
389 }
390 if (!fCurrentRenderPass) {
391 return;
392 }
393
394 VkClearValue vkClearColor;
395 memset(&vkClearColor, 0, sizeof(VkClearValue));
396
397 if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers() ||
398 mustUseSecondaryCommandBuffer) {
399 SkASSERT(fGpu->cmdPool());
400 fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
401 if (!fCurrentSecondaryCommandBuffer) {
402 fCurrentRenderPass = nullptr;
403 return;
404 }
405 fCurrentSecondaryCommandBuffer->begin(fGpu, vkRT->getFramebuffer(), fCurrentRenderPass);
406 }
407
408 // We use the same fBounds as the whole GrVkOpsRenderPass since we have no way of tracking the
409 // bounds in GrOpsTask for parts before and after inline uploads separately.
410 if (!fGpu->beginRenderPass(fCurrentRenderPass, &vkClearColor, vkRT, fOrigin, fBounds,
411 SkToBool(fCurrentSecondaryCommandBuffer))) {
412 if (fCurrentSecondaryCommandBuffer) {
413 fCurrentSecondaryCommandBuffer->end(fGpu);
414 }
415 fCurrentRenderPass = nullptr;
416 }
417 }
418
inlineUpload(GrOpFlushState * state,GrDeferredTextureUploadFn & upload)419 void GrVkOpsRenderPass::inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) {
420 if (!fCurrentRenderPass) {
421 SkASSERT(fGpu->isDeviceLost());
422 return;
423 }
424 if (fCurrentSecondaryCommandBuffer) {
425 fCurrentSecondaryCommandBuffer->end(fGpu);
426 fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer));
427 }
428 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
429
430 // We pass in true here to signal that after the upload we need to set the upload textures
431 // layout back to VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL.
432 state->doUpload(upload, true);
433
434 this->addAdditionalRenderPass(false);
435 }
436
437 ////////////////////////////////////////////////////////////////////////////////
438
onEnd()439 void GrVkOpsRenderPass::onEnd() {
440 if (fCurrentSecondaryCommandBuffer) {
441 fCurrentSecondaryCommandBuffer->end(fGpu);
442 }
443 }
444
onBindPipeline(const GrProgramInfo & programInfo,const SkRect & drawBounds)445 bool GrVkOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
446 if (!fCurrentRenderPass) {
447 SkASSERT(fGpu->isDeviceLost());
448 return false;
449 }
450
451 SkRect rtRect = SkRect::Make(fBounds);
452 if (rtRect.intersect(drawBounds)) {
453 rtRect.roundOut(&fCurrentPipelineBounds);
454 } else {
455 fCurrentPipelineBounds.setEmpty();
456 }
457
458 GrVkCommandBuffer* currentCB = this->currentCommandBuffer();
459 SkASSERT(fCurrentRenderPass);
460
461 VkRenderPass compatibleRenderPass = fCurrentRenderPass->vkRenderPass();
462
463 fCurrentPipelineState = fGpu->resourceProvider().findOrCreateCompatiblePipelineState(
464 fRenderTarget, programInfo, compatibleRenderPass);
465 if (!fCurrentPipelineState) {
466 return false;
467 }
468
469 fCurrentPipelineState->bindPipeline(fGpu, currentCB);
470
471 // Both the 'programInfo' and this renderPass have an origin. Since they come from the
472 // same place (i.e., the target renderTargetProxy) they had best agree.
473 SkASSERT(programInfo.origin() == fOrigin);
474
475 if (!fCurrentPipelineState->setAndBindUniforms(fGpu, fRenderTarget, programInfo, currentCB)) {
476 return false;
477 }
478
479 if (!programInfo.pipeline().isScissorTestEnabled()) {
480 // "Disable" scissor by setting it to the full pipeline bounds.
481 GrVkPipeline::SetDynamicScissorRectState(fGpu, currentCB, fRenderTarget, fOrigin,
482 fCurrentPipelineBounds);
483 }
484 GrVkPipeline::SetDynamicViewportState(fGpu, currentCB, fRenderTarget);
485 GrVkPipeline::SetDynamicBlendConstantState(fGpu, currentCB,
486 programInfo.pipeline().writeSwizzle(),
487 programInfo.pipeline().getXferProcessor());
488
489 return true;
490 }
491
onSetScissorRect(const SkIRect & scissor)492 void GrVkOpsRenderPass::onSetScissorRect(const SkIRect& scissor) {
493 SkIRect combinedScissorRect;
494 if (!combinedScissorRect.intersect(fCurrentPipelineBounds, scissor)) {
495 combinedScissorRect = SkIRect::MakeEmpty();
496 }
497 GrVkPipeline::SetDynamicScissorRectState(fGpu, this->currentCommandBuffer(), fRenderTarget,
498 fOrigin, combinedScissorRect);
499 }
500
501 #ifdef SK_DEBUG
check_sampled_texture(GrTexture * tex,GrRenderTarget * rt,GrVkGpu * gpu)502 void check_sampled_texture(GrTexture* tex, GrRenderTarget* rt, GrVkGpu* gpu) {
503 SkASSERT(!tex->isProtected() || (rt->isProtected() && gpu->protectedContext()));
504 GrVkTexture* vkTex = static_cast<GrVkTexture*>(tex);
505 SkASSERT(vkTex->currentLayout() == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
506 }
507 #endif
508
onBindTextures(const GrPrimitiveProcessor & primProc,const GrSurfaceProxy * const primProcTextures[],const GrPipeline & pipeline)509 bool GrVkOpsRenderPass::onBindTextures(const GrPrimitiveProcessor& primProc,
510 const GrSurfaceProxy* const primProcTextures[],
511 const GrPipeline& pipeline) {
512 #ifdef SK_DEBUG
513 SkASSERT(fCurrentPipelineState);
514 for (int i = 0; i < primProc.numTextureSamplers(); ++i) {
515 check_sampled_texture(primProcTextures[i]->peekTexture(), fRenderTarget, fGpu);
516 }
517
518 GrFragmentProcessor::PipelineTextureSamplerRange textureSamplerRange(pipeline);
519 const auto& rng = textureSamplerRange;
520 for (auto it = rng.begin(); it != rng.end(); ++it) {
521 auto t = *it;
522 const GrFragmentProcessor::TextureSampler& sampler = t.first;
523 check_sampled_texture(sampler.peekTexture(), fRenderTarget, fGpu);
524 }
525 if (GrTexture* dstTexture = pipeline.peekDstTexture()) {
526 check_sampled_texture(dstTexture, fRenderTarget, fGpu);
527 }
528 #endif
529 return fCurrentPipelineState->setAndBindTextures(fGpu, primProc, pipeline, primProcTextures,
530 this->currentCommandBuffer());
531 }
532
onBindBuffers(const GrBuffer * indexBuffer,const GrBuffer * instanceBuffer,const GrBuffer * vertexBuffer,GrPrimitiveRestart primRestart)533 void GrVkOpsRenderPass::onBindBuffers(const GrBuffer* indexBuffer, const GrBuffer* instanceBuffer,
534 const GrBuffer* vertexBuffer,
535 GrPrimitiveRestart primRestart) {
536 SkASSERT(GrPrimitiveRestart::kNo == primRestart);
537 if (!fCurrentRenderPass) {
538 SkASSERT(fGpu->isDeviceLost());
539 return;
540 }
541 SkASSERT(fCurrentPipelineState);
542 SkASSERT(!fGpu->caps()->usePrimitiveRestart()); // Ignore primitiveRestart parameter.
543
544 GrVkCommandBuffer* currCmdBuf = this->currentCommandBuffer();
545 SkASSERT(currCmdBuf);
546
547 // There is no need to put any memory barriers to make sure host writes have finished here.
548 // When a command buffer is submitted to a queue, there is an implicit memory barrier that
549 // occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of
550 // an active RenderPass.
551
552 // Here our vertex and instance inputs need to match the same 0-based bindings they were
553 // assigned in GrVkPipeline. That is, vertex first (if any) followed by instance.
554 uint32_t binding = 0;
555 if (auto* vkVertexBuffer = static_cast<const GrVkVertexBuffer*>(vertexBuffer)) {
556 SkASSERT(!vkVertexBuffer->isCpuBuffer());
557 SkASSERT(!vkVertexBuffer->isMapped());
558 currCmdBuf->bindInputBuffer(fGpu, binding++, vkVertexBuffer);
559 }
560 if (auto* vkInstanceBuffer = static_cast<const GrVkVertexBuffer*>(instanceBuffer)) {
561 SkASSERT(!vkInstanceBuffer->isCpuBuffer());
562 SkASSERT(!vkInstanceBuffer->isMapped());
563 currCmdBuf->bindInputBuffer(fGpu, binding++, vkInstanceBuffer);
564 }
565 if (auto* vkIndexBuffer = static_cast<const GrVkIndexBuffer*>(indexBuffer)) {
566 SkASSERT(!vkIndexBuffer->isCpuBuffer());
567 SkASSERT(!vkIndexBuffer->isMapped());
568 currCmdBuf->bindIndexBuffer(fGpu, vkIndexBuffer);
569 }
570 }
571
onDrawInstanced(int instanceCount,int baseInstance,int vertexCount,int baseVertex)572 void GrVkOpsRenderPass::onDrawInstanced(int instanceCount,
573 int baseInstance,
574 int vertexCount, int baseVertex) {
575 if (!fCurrentRenderPass) {
576 SkASSERT(fGpu->isDeviceLost());
577 return;
578 }
579 SkASSERT(fCurrentPipelineState);
580 this->currentCommandBuffer()->draw(fGpu, vertexCount, instanceCount, baseVertex, baseInstance);
581 fGpu->stats()->incNumDraws();
582 fCurrentCBIsEmpty = false;
583 }
584
onDrawIndexedInstanced(int indexCount,int baseIndex,int instanceCount,int baseInstance,int baseVertex)585 void GrVkOpsRenderPass::onDrawIndexedInstanced(int indexCount, int baseIndex, int instanceCount,
586 int baseInstance, int baseVertex) {
587 if (!fCurrentRenderPass) {
588 SkASSERT(fGpu->isDeviceLost());
589 return;
590 }
591 SkASSERT(fCurrentPipelineState);
592 this->currentCommandBuffer()->drawIndexed(fGpu, indexCount, instanceCount,
593 baseIndex, baseVertex, baseInstance);
594 fGpu->stats()->incNumDraws();
595 fCurrentCBIsEmpty = false;
596 }
597
598 ////////////////////////////////////////////////////////////////////////////////
599
onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)600 void GrVkOpsRenderPass::onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
601 if (!fCurrentRenderPass) {
602 SkASSERT(fGpu->isDeviceLost());
603 return;
604 }
605 GrVkRenderTarget* target = static_cast<GrVkRenderTarget*>(fRenderTarget);
606
607 GrVkImage* targetImage = target->msaaImage() ? target->msaaImage() : target;
608
609 VkRect2D bounds;
610 bounds.offset = { 0, 0 };
611 bounds.extent = { 0, 0 };
612
613 if (!fCurrentSecondaryCommandBuffer) {
614 fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds);
615 this->addAdditionalRenderPass(true);
616 // We may have failed to start a new render pass
617 if (!fCurrentRenderPass) {
618 SkASSERT(fGpu->isDeviceLost());
619 return;
620 }
621 }
622 SkASSERT(fCurrentSecondaryCommandBuffer);
623
624 GrVkDrawableInfo vkInfo;
625 vkInfo.fSecondaryCommandBuffer = fCurrentSecondaryCommandBuffer->vkCommandBuffer();
626 vkInfo.fCompatibleRenderPass = fCurrentRenderPass->vkRenderPass();
627 SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&vkInfo.fColorAttachmentIndex));
628 vkInfo.fFormat = targetImage->imageFormat();
629 vkInfo.fDrawBounds = &bounds;
630 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
631 vkInfo.fImage = targetImage->image();
632 #else
633 vkInfo.fImage = VK_NULL_HANDLE;
634 #endif //SK_BUILD_FOR_ANDROID_FRAMEWORK
635
636 GrBackendDrawableInfo info(vkInfo);
637
638 // After we draw into the command buffer via the drawable, cached state we have may be invalid.
639 this->currentCommandBuffer()->invalidateState();
640 // Also assume that the drawable produced output.
641 fCurrentCBIsEmpty = false;
642
643 drawable->draw(info);
644 fGpu->addDrawable(std::move(drawable));
645 }
646
647