1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkUtil.h"
9
10 #include "include/gpu/GrDirectContext.h"
11 #include "src/gpu/GrDataUtils.h"
12 #include "src/gpu/GrDirectContextPriv.h"
13 #include "src/gpu/vk/GrVkGpu.h"
14 #include "src/sksl/SkSLCompiler.h"
15
GrVkFormatIsSupported(VkFormat format)16 bool GrVkFormatIsSupported(VkFormat format) {
17 switch (format) {
18 case VK_FORMAT_R8G8B8A8_UNORM:
19 case VK_FORMAT_B8G8R8A8_UNORM:
20 case VK_FORMAT_R8G8B8A8_SRGB:
21 case VK_FORMAT_R8G8B8_UNORM:
22 case VK_FORMAT_R8G8_UNORM:
23 case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
24 case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
25 case VK_FORMAT_R5G6B5_UNORM_PACK16:
26 case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
27 case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
28 case VK_FORMAT_R8_UNORM:
29 case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
30 case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
31 case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
32 case VK_FORMAT_R16G16B16A16_SFLOAT:
33 case VK_FORMAT_R16_SFLOAT:
34 case VK_FORMAT_R16_UNORM:
35 case VK_FORMAT_R16G16_UNORM:
36 case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
37 case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
38 case VK_FORMAT_R16G16B16A16_UNORM:
39 case VK_FORMAT_R16G16_SFLOAT:
40 case VK_FORMAT_S8_UINT:
41 case VK_FORMAT_D24_UNORM_S8_UINT:
42 case VK_FORMAT_D32_SFLOAT_S8_UINT:
43 return true;
44 default:
45 return false;
46 }
47 }
48
GrVkFormatNeedsYcbcrSampler(VkFormat format)49 bool GrVkFormatNeedsYcbcrSampler(VkFormat format) {
50 return format == VK_FORMAT_G8_B8R8_2PLANE_420_UNORM ||
51 format == VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM;
52 }
53
GrSampleCountToVkSampleCount(uint32_t samples,VkSampleCountFlagBits * vkSamples)54 bool GrSampleCountToVkSampleCount(uint32_t samples, VkSampleCountFlagBits* vkSamples) {
55 SkASSERT(samples >= 1);
56 switch (samples) {
57 case 1:
58 *vkSamples = VK_SAMPLE_COUNT_1_BIT;
59 return true;
60 case 2:
61 *vkSamples = VK_SAMPLE_COUNT_2_BIT;
62 return true;
63 case 4:
64 *vkSamples = VK_SAMPLE_COUNT_4_BIT;
65 return true;
66 case 8:
67 *vkSamples = VK_SAMPLE_COUNT_8_BIT;
68 return true;
69 case 16:
70 *vkSamples = VK_SAMPLE_COUNT_16_BIT;
71 return true;
72 default:
73 return false;
74 }
75 }
76
vk_shader_stage_to_skiasl_kind(VkShaderStageFlagBits stage)77 SkSL::Program::Kind vk_shader_stage_to_skiasl_kind(VkShaderStageFlagBits stage) {
78 if (VK_SHADER_STAGE_VERTEX_BIT == stage) {
79 return SkSL::Program::kVertex_Kind;
80 }
81 if (VK_SHADER_STAGE_GEOMETRY_BIT == stage) {
82 return SkSL::Program::kGeometry_Kind;
83 }
84 SkASSERT(VK_SHADER_STAGE_FRAGMENT_BIT == stage);
85 return SkSL::Program::kFragment_Kind;
86 }
87
GrCompileVkShaderModule(GrVkGpu * gpu,const SkSL::String & shaderString,VkShaderStageFlagBits stage,VkShaderModule * shaderModule,VkPipelineShaderStageCreateInfo * stageInfo,const SkSL::Program::Settings & settings,SkSL::String * outSPIRV,SkSL::Program::Inputs * outInputs)88 bool GrCompileVkShaderModule(GrVkGpu* gpu,
89 const SkSL::String& shaderString,
90 VkShaderStageFlagBits stage,
91 VkShaderModule* shaderModule,
92 VkPipelineShaderStageCreateInfo* stageInfo,
93 const SkSL::Program::Settings& settings,
94 SkSL::String* outSPIRV,
95 SkSL::Program::Inputs* outInputs) {
96 auto errorHandler = gpu->getContext()->priv().getShaderErrorHandler();
97 std::unique_ptr<SkSL::Program> program = gpu->shaderCompiler()->convertProgram(
98 vk_shader_stage_to_skiasl_kind(stage), shaderString, settings);
99 if (!program) {
100 errorHandler->compileError(shaderString.c_str(),
101 gpu->shaderCompiler()->errorText().c_str());
102 return false;
103 }
104 *outInputs = program->fInputs;
105 if (!gpu->shaderCompiler()->toSPIRV(*program, outSPIRV)) {
106 errorHandler->compileError(shaderString.c_str(),
107 gpu->shaderCompiler()->errorText().c_str());
108 return false;
109 }
110
111 return GrInstallVkShaderModule(gpu, *outSPIRV, stage, shaderModule, stageInfo);
112 }
113
GrInstallVkShaderModule(GrVkGpu * gpu,const SkSL::String & spirv,VkShaderStageFlagBits stage,VkShaderModule * shaderModule,VkPipelineShaderStageCreateInfo * stageInfo)114 bool GrInstallVkShaderModule(GrVkGpu* gpu,
115 const SkSL::String& spirv,
116 VkShaderStageFlagBits stage,
117 VkShaderModule* shaderModule,
118 VkPipelineShaderStageCreateInfo* stageInfo) {
119 VkShaderModuleCreateInfo moduleCreateInfo;
120 memset(&moduleCreateInfo, 0, sizeof(VkShaderModuleCreateInfo));
121 moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
122 moduleCreateInfo.pNext = nullptr;
123 moduleCreateInfo.flags = 0;
124 moduleCreateInfo.codeSize = spirv.size();
125 moduleCreateInfo.pCode = (const uint32_t*)spirv.c_str();
126
127 VkResult err;
128 GR_VK_CALL_RESULT(gpu, err, CreateShaderModule(gpu->device(), &moduleCreateInfo, nullptr,
129 shaderModule));
130 if (err) {
131 return false;
132 }
133
134 memset(stageInfo, 0, sizeof(VkPipelineShaderStageCreateInfo));
135 stageInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
136 stageInfo->pNext = nullptr;
137 stageInfo->flags = 0;
138 stageInfo->stage = stage;
139 stageInfo->module = *shaderModule;
140 stageInfo->pName = "main";
141 stageInfo->pSpecializationInfo = nullptr;
142
143 return true;
144 }
145
GrVkFormatIsCompressed(VkFormat vkFormat)146 bool GrVkFormatIsCompressed(VkFormat vkFormat) {
147 switch (vkFormat) {
148 case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
149 case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
150 case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
151 return true;
152 default:
153 return false;
154 }
155 SkUNREACHABLE;
156 }
157
158