1 // Copyright 2018 The Dawn Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "dawn_native/vulkan/TextureVk.h"
16 
17 #include "common/Assert.h"
18 #include "common/Math.h"
19 #include "dawn_native/DynamicUploader.h"
20 #include "dawn_native/EnumMaskIterator.h"
21 #include "dawn_native/Error.h"
22 #include "dawn_native/VulkanBackend.h"
23 #include "dawn_native/vulkan/AdapterVk.h"
24 #include "dawn_native/vulkan/BufferVk.h"
25 #include "dawn_native/vulkan/CommandRecordingContext.h"
26 #include "dawn_native/vulkan/DeviceVk.h"
27 #include "dawn_native/vulkan/FencedDeleter.h"
28 #include "dawn_native/vulkan/ResourceHeapVk.h"
29 #include "dawn_native/vulkan/StagingBufferVk.h"
30 #include "dawn_native/vulkan/UtilsVulkan.h"
31 #include "dawn_native/vulkan/VulkanError.h"
32 
33 namespace dawn_native { namespace vulkan {
34 
35     namespace {
36         // Converts an Dawn texture dimension to a Vulkan image view type.
37         // Contrary to image types, image view types include arrayness and cubemapness
VulkanImageViewType(wgpu::TextureViewDimension dimension)38         VkImageViewType VulkanImageViewType(wgpu::TextureViewDimension dimension) {
39             switch (dimension) {
40                 case wgpu::TextureViewDimension::e2D:
41                     return VK_IMAGE_VIEW_TYPE_2D;
42                 case wgpu::TextureViewDimension::e2DArray:
43                     return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
44                 case wgpu::TextureViewDimension::Cube:
45                     return VK_IMAGE_VIEW_TYPE_CUBE;
46                 case wgpu::TextureViewDimension::CubeArray:
47                     return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
48 
49                 case wgpu::TextureViewDimension::e1D:
50                 case wgpu::TextureViewDimension::e3D:
51                 case wgpu::TextureViewDimension::Undefined:
52                     UNREACHABLE();
53             }
54         }
55 
56         // Computes which vulkan access type could be required for the given Dawn usage.
57         // TODO(cwallez@chromium.org): We shouldn't need any access usages for srcAccessMask when
58         // the previous usage is readonly because an execution dependency is sufficient.
VulkanAccessFlags(wgpu::TextureUsage usage,const Format & format)59         VkAccessFlags VulkanAccessFlags(wgpu::TextureUsage usage, const Format& format) {
60             VkAccessFlags flags = 0;
61 
62             if (usage & wgpu::TextureUsage::CopySrc) {
63                 flags |= VK_ACCESS_TRANSFER_READ_BIT;
64             }
65             if (usage & wgpu::TextureUsage::CopyDst) {
66                 flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
67             }
68             if (usage & wgpu::TextureUsage::Sampled) {
69                 flags |= VK_ACCESS_SHADER_READ_BIT;
70             }
71             if (usage & wgpu::TextureUsage::Storage) {
72                 flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
73             }
74             if (usage & wgpu::TextureUsage::RenderAttachment) {
75                 if (format.HasDepthOrStencil()) {
76                     flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
77                              VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
78                 } else {
79                     flags |=
80                         VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
81                 }
82             }
83             if (usage & kPresentTextureUsage) {
84                 // The present usage is only used internally by the swapchain and is never used in
85                 // combination with other usages.
86                 ASSERT(usage == kPresentTextureUsage);
87                 // The Vulkan spec has the following note:
88                 //
89                 //   When transitioning the image to VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or
90                 //   VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, there is no need to delay subsequent
91                 //   processing, or perform any visibility operations (as vkQueuePresentKHR performs
92                 //   automatic visibility operations). To achieve this, the dstAccessMask member of
93                 //   the VkImageMemoryBarrier should be set to 0, and the dstStageMask parameter
94                 //   should be set to VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT.
95                 //
96                 // So on the transition to Present we don't need an access flag. The other
97                 // direction doesn't matter because swapchain textures always start a new frame
98                 // as uninitialized.
99                 flags |= 0;
100             }
101 
102             return flags;
103         }
104 
105         // Chooses which Vulkan image layout should be used for the given Dawn usage
VulkanImageLayout(wgpu::TextureUsage usage,const Format & format)106         VkImageLayout VulkanImageLayout(wgpu::TextureUsage usage, const Format& format) {
107             if (usage == wgpu::TextureUsage::None) {
108                 return VK_IMAGE_LAYOUT_UNDEFINED;
109             }
110 
111             if (!wgpu::HasZeroOrOneBits(usage)) {
112                 return VK_IMAGE_LAYOUT_GENERAL;
113             }
114 
115             // Usage has a single bit so we can switch on its value directly.
116             switch (usage) {
117                 case wgpu::TextureUsage::CopyDst:
118                     return VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
119                 case wgpu::TextureUsage::Sampled:
120                     return VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
121                 // Vulkan texture copy functions require the image to be in _one_  known layout.
122                 // Depending on whether parts of the texture have been transitioned to only
123                 // CopySrc or a combination with something else, the texture could be in a
124                 // combination of GENERAL and TRANSFER_SRC_OPTIMAL. This would be a problem, so we
125                 // make CopySrc use GENERAL.
126                 case wgpu::TextureUsage::CopySrc:
127                 // Read-only and write-only storage textures must use general layout because load
128                 // and store operations on storage images can only be done on the images in
129                 // VK_IMAGE_LAYOUT_GENERAL layout.
130                 case wgpu::TextureUsage::Storage:
131                 case kReadonlyStorageTexture:
132                     return VK_IMAGE_LAYOUT_GENERAL;
133                 case wgpu::TextureUsage::RenderAttachment:
134                     if (format.HasDepthOrStencil()) {
135                         return VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
136                     } else {
137                         return VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
138                     }
139                 case kPresentTextureUsage:
140                     return VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
141 
142                 case wgpu::TextureUsage::None:
143                     UNREACHABLE();
144             }
145         }
146 
147         // Computes which Vulkan pipeline stage can access a texture in the given Dawn usage
VulkanPipelineStage(wgpu::TextureUsage usage,const Format & format)148         VkPipelineStageFlags VulkanPipelineStage(wgpu::TextureUsage usage, const Format& format) {
149             VkPipelineStageFlags flags = 0;
150 
151             if (usage == wgpu::TextureUsage::None) {
152                 // This only happens when a texture is initially created (and for srcAccessMask) in
153                 // which case there is no need to wait on anything to stop accessing this texture.
154                 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
155             }
156             if (usage & (wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst)) {
157                 flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
158             }
159             if (usage & (wgpu::TextureUsage::Sampled | kReadonlyStorageTexture)) {
160                 // TODO(cwallez@chromium.org): Only transition to the usage we care about to avoid
161                 // introducing FS -> VS dependencies that would prevent parallelization on tiler
162                 // GPUs
163                 flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
164                          VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
165                          VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
166             }
167             if (usage & wgpu::TextureUsage::Storage) {
168                 flags |=
169                     VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
170             }
171             if (usage & wgpu::TextureUsage::RenderAttachment) {
172                 if (format.HasDepthOrStencil()) {
173                     flags |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
174                              VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
175                     // TODO(cwallez@chromium.org): This is missing the stage where the depth and
176                     // stencil values are written, but it isn't clear which one it is.
177                 } else {
178                     flags |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
179                 }
180             }
181             if (usage & kPresentTextureUsage) {
182                 // The present usage is only used internally by the swapchain and is never used in
183                 // combination with other usages.
184                 ASSERT(usage == kPresentTextureUsage);
185                 // The Vulkan spec has the following note:
186                 //
187                 //   When transitioning the image to VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or
188                 //   VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, there is no need to delay subsequent
189                 //   processing, or perform any visibility operations (as vkQueuePresentKHR performs
190                 //   automatic visibility operations). To achieve this, the dstAccessMask member of
191                 //   the VkImageMemoryBarrier should be set to 0, and the dstStageMask parameter
192                 //   should be set to VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT.
193                 //
194                 // So on the transition to Present we use the "bottom of pipe" stage. The other
195                 // direction doesn't matter because swapchain textures always start a new frame
196                 // as uninitialized.
197                 flags |= VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
198             }
199 
200             // A zero value isn't a valid pipeline stage mask
201             ASSERT(flags != 0);
202             return flags;
203         }
204 
BuildMemoryBarrier(const Format & format,const VkImage & image,wgpu::TextureUsage lastUsage,wgpu::TextureUsage usage,const SubresourceRange & range)205         VkImageMemoryBarrier BuildMemoryBarrier(const Format& format,
206                                                 const VkImage& image,
207                                                 wgpu::TextureUsage lastUsage,
208                                                 wgpu::TextureUsage usage,
209                                                 const SubresourceRange& range) {
210             VkImageMemoryBarrier barrier;
211             barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
212             barrier.pNext = nullptr;
213             barrier.srcAccessMask = VulkanAccessFlags(lastUsage, format);
214             barrier.dstAccessMask = VulkanAccessFlags(usage, format);
215             barrier.oldLayout = VulkanImageLayout(lastUsage, format);
216             barrier.newLayout = VulkanImageLayout(usage, format);
217             barrier.image = image;
218             barrier.subresourceRange.aspectMask = VulkanAspectMask(format.aspects);
219             barrier.subresourceRange.baseMipLevel = range.baseMipLevel;
220             barrier.subresourceRange.levelCount = range.levelCount;
221             barrier.subresourceRange.baseArrayLayer = range.baseArrayLayer;
222             barrier.subresourceRange.layerCount = range.layerCount;
223 
224             barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
225             barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
226             return barrier;
227         }
228 
FillVulkanCreateInfoSizesAndType(const Texture & texture,VkImageCreateInfo * info)229         void FillVulkanCreateInfoSizesAndType(const Texture& texture, VkImageCreateInfo* info) {
230             const Extent3D& size = texture.GetSize();
231 
232             info->mipLevels = texture.GetNumMipLevels();
233             info->samples = VulkanSampleCount(texture.GetSampleCount());
234 
235             // Fill in the image type, and paper over differences in how the array layer count is
236             // specified between WebGPU and Vulkan.
237             switch (texture.GetDimension()) {
238                 case wgpu::TextureDimension::e2D:
239                     info->imageType = VK_IMAGE_TYPE_2D;
240                     info->extent = {size.width, size.height, 1};
241                     info->arrayLayers = size.depth;
242                     break;
243 
244                 case wgpu::TextureDimension::e1D:
245                 case wgpu::TextureDimension::e3D:
246                     UNREACHABLE();
247             }
248         }
249 
250     }  // namespace
251 
252     // Converts Dawn texture format to Vulkan formats.
VulkanImageFormat(const Device * device,wgpu::TextureFormat format)253     VkFormat VulkanImageFormat(const Device* device, wgpu::TextureFormat format) {
254         switch (format) {
255             case wgpu::TextureFormat::R8Unorm:
256                 return VK_FORMAT_R8_UNORM;
257             case wgpu::TextureFormat::R8Snorm:
258                 return VK_FORMAT_R8_SNORM;
259             case wgpu::TextureFormat::R8Uint:
260                 return VK_FORMAT_R8_UINT;
261             case wgpu::TextureFormat::R8Sint:
262                 return VK_FORMAT_R8_SINT;
263 
264             case wgpu::TextureFormat::R16Uint:
265                 return VK_FORMAT_R16_UINT;
266             case wgpu::TextureFormat::R16Sint:
267                 return VK_FORMAT_R16_SINT;
268             case wgpu::TextureFormat::R16Float:
269                 return VK_FORMAT_R16_SFLOAT;
270             case wgpu::TextureFormat::RG8Unorm:
271                 return VK_FORMAT_R8G8_UNORM;
272             case wgpu::TextureFormat::RG8Snorm:
273                 return VK_FORMAT_R8G8_SNORM;
274             case wgpu::TextureFormat::RG8Uint:
275                 return VK_FORMAT_R8G8_UINT;
276             case wgpu::TextureFormat::RG8Sint:
277                 return VK_FORMAT_R8G8_SINT;
278 
279             case wgpu::TextureFormat::R32Uint:
280                 return VK_FORMAT_R32_UINT;
281             case wgpu::TextureFormat::R32Sint:
282                 return VK_FORMAT_R32_SINT;
283             case wgpu::TextureFormat::R32Float:
284                 return VK_FORMAT_R32_SFLOAT;
285             case wgpu::TextureFormat::RG16Uint:
286                 return VK_FORMAT_R16G16_UINT;
287             case wgpu::TextureFormat::RG16Sint:
288                 return VK_FORMAT_R16G16_SINT;
289             case wgpu::TextureFormat::RG16Float:
290                 return VK_FORMAT_R16G16_SFLOAT;
291             case wgpu::TextureFormat::RGBA8Unorm:
292                 return VK_FORMAT_R8G8B8A8_UNORM;
293             case wgpu::TextureFormat::RGBA8UnormSrgb:
294                 return VK_FORMAT_R8G8B8A8_SRGB;
295             case wgpu::TextureFormat::RGBA8Snorm:
296                 return VK_FORMAT_R8G8B8A8_SNORM;
297             case wgpu::TextureFormat::RGBA8Uint:
298                 return VK_FORMAT_R8G8B8A8_UINT;
299             case wgpu::TextureFormat::RGBA8Sint:
300                 return VK_FORMAT_R8G8B8A8_SINT;
301             case wgpu::TextureFormat::BGRA8Unorm:
302                 return VK_FORMAT_B8G8R8A8_UNORM;
303             case wgpu::TextureFormat::BGRA8UnormSrgb:
304                 return VK_FORMAT_B8G8R8A8_SRGB;
305             case wgpu::TextureFormat::RGB10A2Unorm:
306                 return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
307             case wgpu::TextureFormat::RG11B10Ufloat:
308                 return VK_FORMAT_B10G11R11_UFLOAT_PACK32;
309             case wgpu::TextureFormat::RGB9E5Ufloat:
310                 return VK_FORMAT_E5B9G9R9_UFLOAT_PACK32;
311 
312             case wgpu::TextureFormat::RG32Uint:
313                 return VK_FORMAT_R32G32_UINT;
314             case wgpu::TextureFormat::RG32Sint:
315                 return VK_FORMAT_R32G32_SINT;
316             case wgpu::TextureFormat::RG32Float:
317                 return VK_FORMAT_R32G32_SFLOAT;
318             case wgpu::TextureFormat::RGBA16Uint:
319                 return VK_FORMAT_R16G16B16A16_UINT;
320             case wgpu::TextureFormat::RGBA16Sint:
321                 return VK_FORMAT_R16G16B16A16_SINT;
322             case wgpu::TextureFormat::RGBA16Float:
323                 return VK_FORMAT_R16G16B16A16_SFLOAT;
324 
325             case wgpu::TextureFormat::RGBA32Uint:
326                 return VK_FORMAT_R32G32B32A32_UINT;
327             case wgpu::TextureFormat::RGBA32Sint:
328                 return VK_FORMAT_R32G32B32A32_SINT;
329             case wgpu::TextureFormat::RGBA32Float:
330                 return VK_FORMAT_R32G32B32A32_SFLOAT;
331 
332             case wgpu::TextureFormat::Depth32Float:
333                 return VK_FORMAT_D32_SFLOAT;
334             case wgpu::TextureFormat::Depth24Plus:
335                 return VK_FORMAT_D32_SFLOAT;
336             case wgpu::TextureFormat::Depth24PlusStencil8:
337                 // Depth24PlusStencil8 maps to either of these two formats because only requires
338                 // that one of the two be present. The VulkanUseD32S8 toggle combines the wish of
339                 // the environment, default to using D32S8, and availability information so we know
340                 // that the format is available.
341                 if (device->IsToggleEnabled(Toggle::VulkanUseD32S8)) {
342                     return VK_FORMAT_D32_SFLOAT_S8_UINT;
343                 } else {
344                     return VK_FORMAT_D24_UNORM_S8_UINT;
345                 }
346 
347             case wgpu::TextureFormat::BC1RGBAUnorm:
348                 return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
349             case wgpu::TextureFormat::BC1RGBAUnormSrgb:
350                 return VK_FORMAT_BC1_RGBA_SRGB_BLOCK;
351             case wgpu::TextureFormat::BC2RGBAUnorm:
352                 return VK_FORMAT_BC2_UNORM_BLOCK;
353             case wgpu::TextureFormat::BC2RGBAUnormSrgb:
354                 return VK_FORMAT_BC2_SRGB_BLOCK;
355             case wgpu::TextureFormat::BC3RGBAUnorm:
356                 return VK_FORMAT_BC3_UNORM_BLOCK;
357             case wgpu::TextureFormat::BC3RGBAUnormSrgb:
358                 return VK_FORMAT_BC3_SRGB_BLOCK;
359             case wgpu::TextureFormat::BC4RSnorm:
360                 return VK_FORMAT_BC4_SNORM_BLOCK;
361             case wgpu::TextureFormat::BC4RUnorm:
362                 return VK_FORMAT_BC4_UNORM_BLOCK;
363             case wgpu::TextureFormat::BC5RGSnorm:
364                 return VK_FORMAT_BC5_SNORM_BLOCK;
365             case wgpu::TextureFormat::BC5RGUnorm:
366                 return VK_FORMAT_BC5_UNORM_BLOCK;
367             case wgpu::TextureFormat::BC6HRGBFloat:
368                 return VK_FORMAT_BC6H_SFLOAT_BLOCK;
369             case wgpu::TextureFormat::BC6HRGBUfloat:
370                 return VK_FORMAT_BC6H_UFLOAT_BLOCK;
371             case wgpu::TextureFormat::BC7RGBAUnorm:
372                 return VK_FORMAT_BC7_UNORM_BLOCK;
373             case wgpu::TextureFormat::BC7RGBAUnormSrgb:
374                 return VK_FORMAT_BC7_SRGB_BLOCK;
375 
376             case wgpu::TextureFormat::Undefined:
377                 UNREACHABLE();
378         }
379     }
380 
381     // Converts the Dawn usage flags to Vulkan usage flags. Also needs the format to choose
382     // between color and depth attachment usages.
VulkanImageUsage(wgpu::TextureUsage usage,const Format & format)383     VkImageUsageFlags VulkanImageUsage(wgpu::TextureUsage usage, const Format& format) {
384         VkImageUsageFlags flags = 0;
385 
386         if (usage & wgpu::TextureUsage::CopySrc) {
387             flags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
388         }
389         if (usage & wgpu::TextureUsage::CopyDst) {
390             flags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
391         }
392         if (usage & wgpu::TextureUsage::Sampled) {
393             flags |= VK_IMAGE_USAGE_SAMPLED_BIT;
394         }
395         if (usage & wgpu::TextureUsage::Storage) {
396             flags |= VK_IMAGE_USAGE_STORAGE_BIT;
397         }
398         if (usage & wgpu::TextureUsage::RenderAttachment) {
399             if (format.HasDepthOrStencil()) {
400                 flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
401             } else {
402                 flags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
403             }
404         }
405 
406         return flags;
407     }
408 
VulkanSampleCount(uint32_t sampleCount)409     VkSampleCountFlagBits VulkanSampleCount(uint32_t sampleCount) {
410         switch (sampleCount) {
411             case 1:
412                 return VK_SAMPLE_COUNT_1_BIT;
413             case 4:
414                 return VK_SAMPLE_COUNT_4_BIT;
415             default:
416                 UNREACHABLE();
417         }
418     }
419 
ValidateVulkanImageCanBeWrapped(const DeviceBase *,const TextureDescriptor * descriptor)420     MaybeError ValidateVulkanImageCanBeWrapped(const DeviceBase*,
421                                                const TextureDescriptor* descriptor) {
422         if (descriptor->dimension != wgpu::TextureDimension::e2D) {
423             return DAWN_VALIDATION_ERROR("Texture must be 2D");
424         }
425 
426         if (descriptor->mipLevelCount != 1) {
427             return DAWN_VALIDATION_ERROR("Mip level count must be 1");
428         }
429 
430         if (descriptor->size.depth != 1) {
431             return DAWN_VALIDATION_ERROR("Array layer count must be 1");
432         }
433 
434         if (descriptor->sampleCount != 1) {
435             return DAWN_VALIDATION_ERROR("Sample count must be 1");
436         }
437 
438         return {};
439     }
440 
IsSampleCountSupported(const dawn_native::vulkan::Device * device,const VkImageCreateInfo & imageCreateInfo)441     bool IsSampleCountSupported(const dawn_native::vulkan::Device* device,
442                                 const VkImageCreateInfo& imageCreateInfo) {
443         ASSERT(device);
444 
445         VkPhysicalDevice physicalDevice = ToBackend(device->GetAdapter())->GetPhysicalDevice();
446         VkImageFormatProperties properties;
447         if (device->fn.GetPhysicalDeviceImageFormatProperties(
448                 physicalDevice, imageCreateInfo.format, imageCreateInfo.imageType,
449                 imageCreateInfo.tiling, imageCreateInfo.usage, imageCreateInfo.flags,
450                 &properties) != VK_SUCCESS) {
451             UNREACHABLE();
452         }
453 
454         return properties.sampleCounts & imageCreateInfo.samples;
455     }
456 
457     // static
Create(Device * device,const TextureDescriptor * descriptor,VkImageUsageFlags extraUsages)458     ResultOrError<Ref<Texture>> Texture::Create(Device* device,
459                                                 const TextureDescriptor* descriptor,
460                                                 VkImageUsageFlags extraUsages) {
461         Ref<Texture> texture =
462             AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
463         DAWN_TRY(texture->InitializeAsInternalTexture(extraUsages));
464         return std::move(texture);
465     }
466 
467     // static
CreateFromExternal(Device * device,const ExternalImageDescriptorVk * descriptor,const TextureDescriptor * textureDescriptor,external_memory::Service * externalMemoryService)468     ResultOrError<Texture*> Texture::CreateFromExternal(
469         Device* device,
470         const ExternalImageDescriptorVk* descriptor,
471         const TextureDescriptor* textureDescriptor,
472         external_memory::Service* externalMemoryService) {
473         Ref<Texture> texture =
474             AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedInternal));
475         DAWN_TRY(texture->InitializeFromExternal(descriptor, externalMemoryService));
476         return texture.Detach();
477     }
478 
479     // static
CreateForSwapChain(Device * device,const TextureDescriptor * descriptor,VkImage nativeImage)480     Ref<Texture> Texture::CreateForSwapChain(Device* device,
481                                              const TextureDescriptor* descriptor,
482                                              VkImage nativeImage) {
483         Ref<Texture> texture =
484             AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
485         texture->InitializeForSwapChain(nativeImage);
486         return texture;
487     }
488 
InitializeAsInternalTexture(VkImageUsageFlags extraUsages)489     MaybeError Texture::InitializeAsInternalTexture(VkImageUsageFlags extraUsages) {
490         Device* device = ToBackend(GetDevice());
491 
492         // Create the Vulkan image "container". We don't need to check that the format supports the
493         // combination of sample, usage etc. because validation should have been done in the Dawn
494         // frontend already based on the minimum supported formats in the Vulkan spec
495         VkImageCreateInfo createInfo = {};
496         FillVulkanCreateInfoSizesAndType(*this, &createInfo);
497 
498         createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
499         createInfo.pNext = nullptr;
500         createInfo.flags = 0;
501         createInfo.format = VulkanImageFormat(device, GetFormat().format);
502         createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
503         createInfo.usage = VulkanImageUsage(GetUsage(), GetFormat()) | extraUsages;
504         createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
505         createInfo.queueFamilyIndexCount = 0;
506         createInfo.pQueueFamilyIndices = nullptr;
507         createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
508 
509         ASSERT(IsSampleCountSupported(device, createInfo));
510 
511         if (GetArrayLayers() >= 6 && GetWidth() == GetHeight()) {
512             createInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
513         }
514 
515         // We always set VK_IMAGE_USAGE_TRANSFER_DST_BIT unconditionally beause the Vulkan images
516         // that are used in vkCmdClearColorImage() must have been created with this flag, which is
517         // also required for the implementation of robust resource initialization.
518         createInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
519 
520         DAWN_TRY(CheckVkSuccess(
521             device->fn.CreateImage(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
522             "CreateImage"));
523 
524         // Create the image memory and associate it with the container
525         VkMemoryRequirements requirements;
526         device->fn.GetImageMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
527 
528         DAWN_TRY_ASSIGN(mMemoryAllocation, device->AllocateMemory(requirements, false));
529 
530         DAWN_TRY(CheckVkSuccess(
531             device->fn.BindImageMemory(device->GetVkDevice(), mHandle,
532                                        ToBackend(mMemoryAllocation.GetResourceHeap())->GetMemory(),
533                                        mMemoryAllocation.GetOffset()),
534             "BindImageMemory"));
535 
536         if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
537             DAWN_TRY(ClearTexture(ToBackend(GetDevice())->GetPendingRecordingContext(),
538                                   GetAllSubresources(), TextureBase::ClearValue::NonZero));
539         }
540 
541         return {};
542     }
543 
544     // Internally managed, but imported from external handle
InitializeFromExternal(const ExternalImageDescriptorVk * descriptor,external_memory::Service * externalMemoryService)545     MaybeError Texture::InitializeFromExternal(const ExternalImageDescriptorVk* descriptor,
546                                                external_memory::Service* externalMemoryService) {
547         VkFormat format = VulkanImageFormat(ToBackend(GetDevice()), GetFormat().format);
548         VkImageUsageFlags usage = VulkanImageUsage(GetUsage(), GetFormat());
549         if (!externalMemoryService->SupportsCreateImage(descriptor, format, usage)) {
550             return DAWN_VALIDATION_ERROR("Creating an image from external memory is not supported");
551         }
552 
553         mExternalState = ExternalState::PendingAcquire;
554 
555         mPendingAcquireOldLayout = descriptor->releasedOldLayout;
556         mPendingAcquireNewLayout = descriptor->releasedNewLayout;
557 
558         VkImageCreateInfo baseCreateInfo = {};
559         FillVulkanCreateInfoSizesAndType(*this, &baseCreateInfo);
560 
561         baseCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
562         baseCreateInfo.pNext = nullptr;
563         baseCreateInfo.format = format;
564         baseCreateInfo.usage = usage;
565         baseCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
566         baseCreateInfo.queueFamilyIndexCount = 0;
567         baseCreateInfo.pQueueFamilyIndices = nullptr;
568 
569         // We always set VK_IMAGE_USAGE_TRANSFER_DST_BIT unconditionally beause the Vulkan images
570         // that are used in vkCmdClearColorImage() must have been created with this flag, which is
571         // also required for the implementation of robust resource initialization.
572         baseCreateInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
573 
574         DAWN_TRY_ASSIGN(mHandle, externalMemoryService->CreateImage(descriptor, baseCreateInfo));
575         return {};
576     }
577 
InitializeForSwapChain(VkImage nativeImage)578     void Texture::InitializeForSwapChain(VkImage nativeImage) {
579         mHandle = nativeImage;
580     }
581 
BindExternalMemory(const ExternalImageDescriptorVk * descriptor,VkSemaphore signalSemaphore,VkDeviceMemory externalMemoryAllocation,std::vector<VkSemaphore> waitSemaphores)582     MaybeError Texture::BindExternalMemory(const ExternalImageDescriptorVk* descriptor,
583                                            VkSemaphore signalSemaphore,
584                                            VkDeviceMemory externalMemoryAllocation,
585                                            std::vector<VkSemaphore> waitSemaphores) {
586         Device* device = ToBackend(GetDevice());
587         DAWN_TRY(CheckVkSuccess(
588             device->fn.BindImageMemory(device->GetVkDevice(), mHandle, externalMemoryAllocation, 0),
589             "BindImageMemory (external)"));
590 
591         // Don't clear imported texture if already initialized
592         if (descriptor->isInitialized) {
593             SetIsSubresourceContentInitialized(true, GetAllSubresources());
594         }
595 
596         // Success, acquire all the external objects.
597         mExternalAllocation = externalMemoryAllocation;
598         mSignalSemaphore = signalSemaphore;
599         mWaitRequirements = std::move(waitSemaphores);
600         return {};
601     }
602 
ExportExternalTexture(VkImageLayout desiredLayout,VkSemaphore * signalSemaphore,VkImageLayout * releasedOldLayout,VkImageLayout * releasedNewLayout)603     MaybeError Texture::ExportExternalTexture(VkImageLayout desiredLayout,
604                                               VkSemaphore* signalSemaphore,
605                                               VkImageLayout* releasedOldLayout,
606                                               VkImageLayout* releasedNewLayout) {
607         Device* device = ToBackend(GetDevice());
608 
609         if (mExternalState == ExternalState::Released) {
610             return DAWN_VALIDATION_ERROR("Can't export signal semaphore from signaled texture");
611         }
612 
613         if (mExternalAllocation == VK_NULL_HANDLE) {
614             return DAWN_VALIDATION_ERROR(
615                 "Can't export signal semaphore from destroyed / non-external texture");
616         }
617 
618         ASSERT(mSignalSemaphore != VK_NULL_HANDLE);
619         ASSERT(GetNumMipLevels() == 1 && GetArrayLayers() == 1);
620 
621         // Release the texture
622         mExternalState = ExternalState::Released;
623 
624         wgpu::TextureUsage usage = mSubresourceLastUsages[0];
625 
626         VkImageMemoryBarrier barrier;
627         barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
628         barrier.pNext = nullptr;
629         barrier.image = GetHandle();
630         barrier.subresourceRange.aspectMask = VulkanAspectMask(GetFormat().aspects);
631         barrier.subresourceRange.baseMipLevel = 0;
632         barrier.subresourceRange.levelCount = 1;
633         barrier.subresourceRange.baseArrayLayer = 0;
634         barrier.subresourceRange.layerCount = 1;
635 
636         barrier.srcAccessMask = VulkanAccessFlags(usage, GetFormat());
637         barrier.dstAccessMask = 0;  // The barrier must be paired with another barrier that will
638                                     // specify the dst access mask on the importing queue.
639 
640         barrier.oldLayout = VulkanImageLayout(usage, GetFormat());
641         if (desiredLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
642             // VK_IMAGE_LAYOUT_UNDEFINED is invalid here. We use it as a
643             // special value to indicate no layout transition should be done.
644             barrier.newLayout = barrier.oldLayout;
645         } else {
646             barrier.newLayout = desiredLayout;
647         }
648 
649         barrier.srcQueueFamilyIndex = device->GetGraphicsQueueFamily();
650         barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
651 
652         VkPipelineStageFlags srcStages = VulkanPipelineStage(usage, GetFormat());
653         VkPipelineStageFlags dstStages =
654             VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;  // We don't know when the importing queue will need
655                                                 // the texture, so pass
656                                                 // VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT to ensure
657                                                 // the barrier happens-before any usage in the
658                                                 // importing queue.
659 
660         CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
661         device->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
662                                       nullptr, 0, nullptr, 1, &barrier);
663 
664         // Queue submit to signal we are done with the texture
665         recordingContext->signalSemaphores.push_back(mSignalSemaphore);
666         DAWN_TRY(device->SubmitPendingCommands());
667 
668         // Write out the layouts and signal semaphore
669         *releasedOldLayout = barrier.oldLayout;
670         *releasedNewLayout = barrier.newLayout;
671         *signalSemaphore = mSignalSemaphore;
672 
673         mSignalSemaphore = VK_NULL_HANDLE;
674 
675         // Destroy the texture so it can't be used again
676         DestroyInternal();
677         return {};
678     }
679 
~Texture()680     Texture::~Texture() {
681         DestroyInternal();
682     }
683 
DestroyImpl()684     void Texture::DestroyImpl() {
685         if (GetTextureState() == TextureState::OwnedInternal) {
686             Device* device = ToBackend(GetDevice());
687 
688             // For textures created from a VkImage, the allocation if kInvalid so the Device knows
689             // to skip the deallocation of the (absence of) VkDeviceMemory.
690             device->DeallocateMemory(&mMemoryAllocation);
691 
692             if (mHandle != VK_NULL_HANDLE) {
693                 device->GetFencedDeleter()->DeleteWhenUnused(mHandle);
694             }
695 
696             if (mExternalAllocation != VK_NULL_HANDLE) {
697                 device->GetFencedDeleter()->DeleteWhenUnused(mExternalAllocation);
698             }
699 
700             mHandle = VK_NULL_HANDLE;
701             mExternalAllocation = VK_NULL_HANDLE;
702             // If a signal semaphore exists it should be requested before we delete the texture
703             ASSERT(mSignalSemaphore == VK_NULL_HANDLE);
704         }
705     }
706 
GetHandle() const707     VkImage Texture::GetHandle() const {
708         return mHandle;
709     }
710 
GetVkAspectMask(wgpu::TextureAspect aspect) const711     VkImageAspectFlags Texture::GetVkAspectMask(wgpu::TextureAspect aspect) const {
712         // TODO(enga): These masks could be precomputed.
713         switch (aspect) {
714             case wgpu::TextureAspect::All:
715                 return VulkanAspectMask(GetFormat().aspects);
716             case wgpu::TextureAspect::DepthOnly:
717                 ASSERT(GetFormat().aspects & Aspect::Depth);
718                 return VulkanAspectMask(Aspect::Depth);
719             case wgpu::TextureAspect::StencilOnly:
720                 ASSERT(GetFormat().aspects & Aspect::Stencil);
721                 return VulkanAspectMask(Aspect::Stencil);
722         }
723     }
724 
TweakTransitionForExternalUsage(CommandRecordingContext * recordingContext,std::vector<VkImageMemoryBarrier> * barriers,size_t transitionBarrierStart)725     void Texture::TweakTransitionForExternalUsage(CommandRecordingContext* recordingContext,
726                                                   std::vector<VkImageMemoryBarrier>* barriers,
727                                                   size_t transitionBarrierStart) {
728         ASSERT(GetNumMipLevels() == 1 && GetArrayLayers() == 1);
729 
730         // transitionBarrierStart specify the index where barriers for current transition start in
731         // the vector. barriers->size() - transitionBarrierStart is the number of barriers that we
732         // have already added into the vector during current transition.
733         ASSERT(barriers->size() - transitionBarrierStart <= 1);
734 
735         if (mExternalState == ExternalState::PendingAcquire) {
736             if (barriers->size() == transitionBarrierStart) {
737                 barriers->push_back(BuildMemoryBarrier(
738                     GetFormat(), mHandle, wgpu::TextureUsage::None, wgpu::TextureUsage::None,
739                     SubresourceRange::SingleMipAndLayer(0, 0, GetFormat().aspects)));
740             }
741 
742             VkImageMemoryBarrier* barrier = &(*barriers)[transitionBarrierStart];
743             // Transfer texture from external queue to graphics queue
744             barrier->srcQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
745             barrier->dstQueueFamilyIndex = ToBackend(GetDevice())->GetGraphicsQueueFamily();
746 
747             // srcAccessMask means nothing when importing. Queue transfers require a barrier on
748             // both the importing and exporting queues. The exporting queue should have specified
749             // this.
750             barrier->srcAccessMask = 0;
751 
752             // This should be the first barrier after import.
753             ASSERT(barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED);
754 
755             // Save the desired layout. We may need to transition through an intermediate
756             // |mPendingAcquireLayout| first.
757             VkImageLayout desiredLayout = barrier->newLayout;
758 
759             bool isInitialized = IsSubresourceContentInitialized(GetAllSubresources());
760 
761             // We don't care about the pending old layout if the texture is uninitialized. The
762             // driver is free to discard it. Likewise, we don't care about the pending new layout if
763             // the texture is uninitialized. We can skip the layout transition.
764             if (!isInitialized) {
765                 barrier->oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
766                 barrier->newLayout = desiredLayout;
767             } else {
768                 barrier->oldLayout = mPendingAcquireOldLayout;
769                 barrier->newLayout = mPendingAcquireNewLayout;
770             }
771 
772             // If these are unequal, we need an another barrier to transition the layout.
773             if (barrier->newLayout != desiredLayout) {
774                 VkImageMemoryBarrier layoutBarrier;
775                 layoutBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
776                 layoutBarrier.pNext = nullptr;
777                 layoutBarrier.image = GetHandle();
778                 layoutBarrier.subresourceRange = barrier->subresourceRange;
779 
780                 // Transition from the acquired new layout to the desired layout.
781                 layoutBarrier.oldLayout = barrier->newLayout;
782                 layoutBarrier.newLayout = desiredLayout;
783 
784                 // We already transitioned these.
785                 layoutBarrier.srcAccessMask = 0;
786                 layoutBarrier.dstAccessMask = 0;
787                 layoutBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
788                 layoutBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
789 
790                 barriers->push_back(layoutBarrier);
791             }
792 
793             mExternalState = ExternalState::Acquired;
794         }
795 
796         mLastExternalState = mExternalState;
797 
798         recordingContext->waitSemaphores.insert(recordingContext->waitSemaphores.end(),
799                                                 mWaitRequirements.begin(), mWaitRequirements.end());
800         mWaitRequirements.clear();
801     }
802 
CanReuseWithoutBarrier(wgpu::TextureUsage lastUsage,wgpu::TextureUsage usage)803     bool Texture::CanReuseWithoutBarrier(wgpu::TextureUsage lastUsage, wgpu::TextureUsage usage) {
804         // Reuse the texture directly and avoid encoding barriers when it isn't needed.
805         bool lastReadOnly = IsSubset(lastUsage, kReadOnlyTextureUsages);
806         if (lastReadOnly && lastUsage == usage && mLastExternalState == mExternalState) {
807             return true;
808         }
809         return false;
810     }
811 
TransitionFullUsage(CommandRecordingContext * recordingContext,wgpu::TextureUsage usage)812     void Texture::TransitionFullUsage(CommandRecordingContext* recordingContext,
813                                       wgpu::TextureUsage usage) {
814         TransitionUsageNow(recordingContext, usage, GetAllSubresources());
815     }
816 
TransitionUsageForPass(CommandRecordingContext * recordingContext,const PassTextureUsage & textureUsages,std::vector<VkImageMemoryBarrier> * imageBarriers,VkPipelineStageFlags * srcStages,VkPipelineStageFlags * dstStages)817     void Texture::TransitionUsageForPass(CommandRecordingContext* recordingContext,
818                                          const PassTextureUsage& textureUsages,
819                                          std::vector<VkImageMemoryBarrier>* imageBarriers,
820                                          VkPipelineStageFlags* srcStages,
821                                          VkPipelineStageFlags* dstStages) {
822         size_t transitionBarrierStart = imageBarriers->size();
823         const Format& format = GetFormat();
824 
825         wgpu::TextureUsage allUsages = wgpu::TextureUsage::None;
826         wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
827 
828         uint32_t subresourceCount = GetSubresourceCount();
829         ASSERT(textureUsages.subresourceUsages.size() == subresourceCount);
830         // This transitions assume it is a 2D texture
831         ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
832 
833         // If new usages of all subresources are the same and old usages of all subresources are
834         // the same too, we can use one barrier to do state transition for all subresources.
835         // Note that if the texture has only one mip level and one array slice, it will fall into
836         // this category.
837         if (textureUsages.sameUsagesAcrossSubresources && mSameLastUsagesAcrossSubresources) {
838             if (CanReuseWithoutBarrier(mSubresourceLastUsages[0], textureUsages.usage)) {
839                 return;
840             }
841 
842             imageBarriers->push_back(BuildMemoryBarrier(format, mHandle, mSubresourceLastUsages[0],
843                                                         textureUsages.usage, GetAllSubresources()));
844             allLastUsages = mSubresourceLastUsages[0];
845             allUsages = textureUsages.usage;
846             for (uint32_t i = 0; i < subresourceCount; ++i) {
847                 mSubresourceLastUsages[i] = textureUsages.usage;
848             }
849         } else {
850             for (uint32_t arrayLayer = 0; arrayLayer < GetArrayLayers(); ++arrayLayer) {
851                 for (uint32_t mipLevel = 0; mipLevel < GetNumMipLevels(); ++mipLevel) {
852                     wgpu::TextureUsage lastUsage = wgpu::TextureUsage::None;
853                     wgpu::TextureUsage usage = wgpu::TextureUsage::None;
854 
855                     // Accumulate usage for all format aspects because we cannot transition
856                     // separately.
857                     // TODO(enga): Use VK_KHR_separate_depth_stencil_layouts.
858                     for (Aspect aspect : IterateEnumMask(GetFormat().aspects)) {
859                         uint32_t index = GetSubresourceIndex(mipLevel, arrayLayer, aspect);
860 
861                         usage |= textureUsages.subresourceUsages[index];
862                         lastUsage |= mSubresourceLastUsages[index];
863                     }
864 
865                     // Avoid encoding barriers when it isn't needed.
866                     if (usage == wgpu::TextureUsage::None) {
867                         continue;
868                     }
869 
870                     if (CanReuseWithoutBarrier(lastUsage, usage)) {
871                         continue;
872                     }
873 
874                     allLastUsages |= lastUsage;
875                     allUsages |= usage;
876 
877                     for (Aspect aspect : IterateEnumMask(GetFormat().aspects)) {
878                         uint32_t index = GetSubresourceIndex(mipLevel, arrayLayer, aspect);
879                         mSubresourceLastUsages[index] = usage;
880                     }
881 
882                     imageBarriers->push_back(
883                         BuildMemoryBarrier(format, mHandle, lastUsage, usage,
884                                            SubresourceRange::SingleMipAndLayer(
885                                                mipLevel, arrayLayer, GetFormat().aspects)));
886                 }
887             }
888         }
889 
890         if (mExternalState != ExternalState::InternalOnly) {
891             TweakTransitionForExternalUsage(recordingContext, imageBarriers,
892                                             transitionBarrierStart);
893         }
894 
895         *srcStages |= VulkanPipelineStage(allLastUsages, format);
896         *dstStages |= VulkanPipelineStage(allUsages, format);
897         mSameLastUsagesAcrossSubresources = textureUsages.sameUsagesAcrossSubresources;
898     }
899 
TransitionUsageNow(CommandRecordingContext * recordingContext,wgpu::TextureUsage usage,const SubresourceRange & range)900     void Texture::TransitionUsageNow(CommandRecordingContext* recordingContext,
901                                      wgpu::TextureUsage usage,
902                                      const SubresourceRange& range) {
903         std::vector<VkImageMemoryBarrier> barriers;
904 
905         VkPipelineStageFlags srcStages = 0;
906         VkPipelineStageFlags dstStages = 0;
907 
908         TransitionUsageAndGetResourceBarrier(usage, range, &barriers, &srcStages, &dstStages);
909 
910         if (mExternalState != ExternalState::InternalOnly) {
911             TweakTransitionForExternalUsage(recordingContext, &barriers, 0);
912         }
913 
914         if (!barriers.empty()) {
915             ASSERT(srcStages != 0 && dstStages != 0);
916             ToBackend(GetDevice())
917                 ->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
918                                         nullptr, 0, nullptr, barriers.size(), barriers.data());
919         }
920     }
921 
TransitionUsageAndGetResourceBarrier(wgpu::TextureUsage usage,const SubresourceRange & range,std::vector<VkImageMemoryBarrier> * imageBarriers,VkPipelineStageFlags * srcStages,VkPipelineStageFlags * dstStages)922     void Texture::TransitionUsageAndGetResourceBarrier(
923         wgpu::TextureUsage usage,
924         const SubresourceRange& range,
925         std::vector<VkImageMemoryBarrier>* imageBarriers,
926         VkPipelineStageFlags* srcStages,
927         VkPipelineStageFlags* dstStages) {
928         ASSERT(imageBarriers != nullptr);
929 
930         const Format& format = GetFormat();
931 
932         wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
933 
934         // This transitions assume it is a 2D texture
935         ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
936 
937         // If the usages transitions can cover all subresources, and old usages of all subresources
938         // are the same, then we can use one barrier to do state transition for all subresources.
939         // Note that if the texture has only one mip level and one array slice, it will fall into
940         // this category.
941         bool areAllSubresourcesCovered = (range.levelCount == GetNumMipLevels() &&  //
942                                           range.layerCount == GetArrayLayers() &&   //
943                                           range.aspects == format.aspects);
944         if (mSameLastUsagesAcrossSubresources && areAllSubresourcesCovered) {
945             ASSERT(range.baseMipLevel == 0 && range.baseArrayLayer == 0);
946             if (CanReuseWithoutBarrier(mSubresourceLastUsages[0], usage)) {
947                 return;
948             }
949             imageBarriers->push_back(
950                 BuildMemoryBarrier(format, mHandle, mSubresourceLastUsages[0], usage, range));
951             allLastUsages = mSubresourceLastUsages[0];
952             for (uint32_t i = 0; i < GetSubresourceCount(); ++i) {
953                 mSubresourceLastUsages[i] = usage;
954             }
955         } else {
956             for (uint32_t layer = range.baseArrayLayer;
957                  layer < range.baseArrayLayer + range.layerCount; ++layer) {
958                 for (uint32_t level = range.baseMipLevel;
959                      level < range.baseMipLevel + range.levelCount; ++level) {
960                     // Accumulate usage for all format aspects because we cannot transition
961                     // separately.
962                     // TODO(enga): Use VK_KHR_separate_depth_stencil_layouts.
963                     wgpu::TextureUsage lastUsage = wgpu::TextureUsage::None;
964                     for (Aspect aspect : IterateEnumMask(format.aspects)) {
965                         uint32_t index = GetSubresourceIndex(level, layer, aspect);
966                         lastUsage |= mSubresourceLastUsages[index];
967                     }
968 
969                     if (CanReuseWithoutBarrier(lastUsage, usage)) {
970                         continue;
971                     }
972 
973                     allLastUsages |= lastUsage;
974 
975                     for (Aspect aspect : IterateEnumMask(format.aspects)) {
976                         uint32_t index = GetSubresourceIndex(level, layer, aspect);
977                         mSubresourceLastUsages[index] = usage;
978                     }
979 
980                     imageBarriers->push_back(BuildMemoryBarrier(
981                         format, mHandle, lastUsage, usage,
982                         SubresourceRange::SingleMipAndLayer(level, layer, format.aspects)));
983                 }
984             }
985         }
986 
987         *srcStages |= VulkanPipelineStage(allLastUsages, format);
988         *dstStages |= VulkanPipelineStage(usage, format);
989 
990         mSameLastUsagesAcrossSubresources = areAllSubresourcesCovered;
991     }
992 
ClearTexture(CommandRecordingContext * recordingContext,const SubresourceRange & range,TextureBase::ClearValue clearValue)993     MaybeError Texture::ClearTexture(CommandRecordingContext* recordingContext,
994                                      const SubresourceRange& range,
995                                      TextureBase::ClearValue clearValue) {
996         Device* device = ToBackend(GetDevice());
997 
998         const bool isZero = clearValue == TextureBase::ClearValue::Zero;
999         uint32_t uClearColor = isZero ? 0 : 1;
1000         int32_t sClearColor = isZero ? 0 : 1;
1001         float fClearColor = isZero ? 0.f : 1.f;
1002 
1003         TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
1004 
1005         VkImageSubresourceRange imageRange = {};
1006         imageRange.levelCount = 1;
1007         imageRange.layerCount = 1;
1008 
1009         for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
1010              ++level) {
1011             imageRange.baseMipLevel = level;
1012             for (uint32_t layer = range.baseArrayLayer;
1013                  layer < range.baseArrayLayer + range.layerCount; ++layer) {
1014                 Aspect aspects = Aspect::None;
1015                 for (Aspect aspect : IterateEnumMask(range.aspects)) {
1016                     if (clearValue == TextureBase::ClearValue::Zero &&
1017                         IsSubresourceContentInitialized(
1018                             SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
1019                         // Skip lazy clears if already initialized.
1020                         continue;
1021                     }
1022                     aspects |= aspect;
1023                 }
1024 
1025                 if (aspects == Aspect::None) {
1026                     continue;
1027                 }
1028 
1029                 imageRange.aspectMask = VulkanAspectMask(aspects);
1030                 imageRange.baseArrayLayer = layer;
1031 
1032                 if (aspects & (Aspect::Depth | Aspect::Stencil)) {
1033                     VkClearDepthStencilValue clearDepthStencilValue[1];
1034                     clearDepthStencilValue[0].depth = fClearColor;
1035                     clearDepthStencilValue[0].stencil = uClearColor;
1036                     device->fn.CmdClearDepthStencilImage(recordingContext->commandBuffer,
1037                                                          GetHandle(),
1038                                                          VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1039                                                          clearDepthStencilValue, 1, &imageRange);
1040                 } else {
1041                     ASSERT(aspects == Aspect::Color);
1042                     VkClearColorValue clearColorValue;
1043                     switch (GetFormat().GetAspectInfo(Aspect::Color).baseType) {
1044                         case wgpu::TextureComponentType::Float:
1045                             clearColorValue.float32[0] = fClearColor;
1046                             clearColorValue.float32[1] = fClearColor;
1047                             clearColorValue.float32[2] = fClearColor;
1048                             clearColorValue.float32[3] = fClearColor;
1049                             break;
1050                         case wgpu::TextureComponentType::Sint:
1051                             clearColorValue.int32[0] = sClearColor;
1052                             clearColorValue.int32[1] = sClearColor;
1053                             clearColorValue.int32[2] = sClearColor;
1054                             clearColorValue.int32[3] = sClearColor;
1055                             break;
1056                         case wgpu::TextureComponentType::Uint:
1057                             clearColorValue.uint32[0] = uClearColor;
1058                             clearColorValue.uint32[1] = uClearColor;
1059                             clearColorValue.uint32[2] = uClearColor;
1060                             clearColorValue.uint32[3] = uClearColor;
1061                             break;
1062                         case wgpu::TextureComponentType::DepthComparison:
1063                             UNREACHABLE();
1064                     }
1065                     device->fn.CmdClearColorImage(recordingContext->commandBuffer, GetHandle(),
1066                                                   VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1067                                                   &clearColorValue, 1, &imageRange);
1068                 }
1069             }
1070         }
1071 
1072         if (clearValue == TextureBase::ClearValue::Zero) {
1073             SetIsSubresourceContentInitialized(true, range);
1074             device->IncrementLazyClearCountForTesting();
1075         }
1076         return {};
1077     }
1078 
EnsureSubresourceContentInitialized(CommandRecordingContext * recordingContext,const SubresourceRange & range)1079     void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
1080                                                       const SubresourceRange& range) {
1081         if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
1082             return;
1083         }
1084         if (!IsSubresourceContentInitialized(range)) {
1085             // TODO(jiawei.shao@intel.com): initialize textures in BC formats with Buffer-to-Texture
1086             // copies.
1087             if (GetFormat().isCompressed) {
1088                 return;
1089             }
1090 
1091             // If subresource has not been initialized, clear it to black as it could contain dirty
1092             // bits from recycled memory
1093             GetDevice()->ConsumedError(
1094                 ClearTexture(recordingContext, range, TextureBase::ClearValue::Zero));
1095         }
1096     }
1097 
GetCurrentLayoutForSwapChain() const1098     VkImageLayout Texture::GetCurrentLayoutForSwapChain() const {
1099         ASSERT(mSubresourceLastUsages.size() == 1);
1100         return VulkanImageLayout(mSubresourceLastUsages[0], GetFormat());
1101     }
1102 
1103     // static
Create(TextureBase * texture,const TextureViewDescriptor * descriptor)1104     ResultOrError<TextureView*> TextureView::Create(TextureBase* texture,
1105                                                     const TextureViewDescriptor* descriptor) {
1106         Ref<TextureView> view = AcquireRef(new TextureView(texture, descriptor));
1107         DAWN_TRY(view->Initialize(descriptor));
1108         return view.Detach();
1109     }
1110 
Initialize(const TextureViewDescriptor * descriptor)1111     MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) {
1112         if ((GetTexture()->GetUsage() &
1113              ~(wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst)) == 0) {
1114             // If the texture view has no other usage than CopySrc and CopyDst, then it can't
1115             // actually be used as a render pass attachment or sampled/storage texture. The Vulkan
1116             // validation errors warn if you create such a vkImageView, so return early.
1117             return {};
1118         }
1119 
1120         Device* device = ToBackend(GetTexture()->GetDevice());
1121 
1122         VkImageViewCreateInfo createInfo;
1123         createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
1124         createInfo.pNext = nullptr;
1125         createInfo.flags = 0;
1126         createInfo.image = ToBackend(GetTexture())->GetHandle();
1127         createInfo.viewType = VulkanImageViewType(descriptor->dimension);
1128         createInfo.format = VulkanImageFormat(device, descriptor->format);
1129         createInfo.components = VkComponentMapping{VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G,
1130                                                    VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A};
1131 
1132         const SubresourceRange& subresources = GetSubresourceRange();
1133         createInfo.subresourceRange.baseMipLevel = subresources.baseMipLevel;
1134         createInfo.subresourceRange.levelCount = subresources.levelCount;
1135         createInfo.subresourceRange.baseArrayLayer = subresources.baseArrayLayer;
1136         createInfo.subresourceRange.layerCount = subresources.layerCount;
1137         createInfo.subresourceRange.aspectMask = VulkanAspectMask(subresources.aspects);
1138 
1139         return CheckVkSuccess(
1140             device->fn.CreateImageView(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
1141             "CreateImageView");
1142     }
1143 
~TextureView()1144     TextureView::~TextureView() {
1145         Device* device = ToBackend(GetTexture()->GetDevice());
1146 
1147         if (mHandle != VK_NULL_HANDLE) {
1148             device->GetFencedDeleter()->DeleteWhenUnused(mHandle);
1149             mHandle = VK_NULL_HANDLE;
1150         }
1151     }
1152 
GetHandle() const1153     VkImageView TextureView::GetHandle() const {
1154         return mHandle;
1155     }
1156 
1157 }}  // namespace dawn_native::vulkan
1158