1 // Copyright 2020 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "gpu/vulkan/init/gr_vk_memory_allocator_impl.h"
6 
7 #include <vk_mem_alloc.h>
8 
9 #include "base/feature_list.h"
10 #include "base/trace_event/trace_event.h"
11 #include "gpu/vulkan/vma_wrapper.h"
12 #include "gpu/vulkan/vulkan_device_queue.h"
13 #include "gpu/vulkan/vulkan_function_pointers.h"
14 
15 namespace gpu {
16 
17 namespace {
18 
19 const base::Feature kCpuWritesGpuReadsCached{"CpuWritesGpuReadsCached",
20                                              base::FEATURE_ENABLED_BY_DEFAULT};
21 
22 class GrVkMemoryAllocatorImpl : public GrVkMemoryAllocator {
23  public:
GrVkMemoryAllocatorImpl(VmaAllocator allocator)24   explicit GrVkMemoryAllocatorImpl(VmaAllocator allocator)
25       : allocator_(allocator) {}
26   ~GrVkMemoryAllocatorImpl() override = default;
27 
28   GrVkMemoryAllocatorImpl(const GrVkMemoryAllocatorImpl&) = delete;
29   GrVkMemoryAllocatorImpl& operator=(const GrVkMemoryAllocatorImpl&) = delete;
30 
31  private:
allocateImageMemory(VkImage image,AllocationPropertyFlags flags,GrVkBackendMemory * backend_memory)32   VkResult allocateImageMemory(VkImage image,
33                                AllocationPropertyFlags flags,
34                                GrVkBackendMemory* backend_memory) override {
35     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.vulkan.vma"),
36                  "GrVkMemoryAllocatorImpl::allocateMemoryForImage");
37     VmaAllocationCreateInfo info;
38     info.flags = 0;
39     info.usage = VMA_MEMORY_USAGE_UNKNOWN;
40     info.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
41     info.preferredFlags = 0;
42     info.memoryTypeBits = 0;
43     info.pool = VK_NULL_HANDLE;
44     info.pUserData = nullptr;
45 
46     if (AllocationPropertyFlags::kDedicatedAllocation & flags) {
47       info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
48     }
49 
50     if (AllocationPropertyFlags::kLazyAllocation & flags) {
51       info.preferredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
52     }
53 
54     if (AllocationPropertyFlags::kProtected & flags) {
55       info.requiredFlags |= VK_MEMORY_PROPERTY_PROTECTED_BIT;
56     }
57 
58     VmaAllocation allocation;
59     VkResult result = vma::AllocateMemoryForImage(allocator_, image, &info,
60                                                   &allocation, nullptr);
61     if (VK_SUCCESS == result)
62       *backend_memory = reinterpret_cast<GrVkBackendMemory>(allocation);
63     return result;
64   }
65 
allocateBufferMemory(VkBuffer buffer,BufferUsage usage,AllocationPropertyFlags flags,GrVkBackendMemory * backend_memory)66   VkResult allocateBufferMemory(VkBuffer buffer,
67                                 BufferUsage usage,
68                                 AllocationPropertyFlags flags,
69                                 GrVkBackendMemory* backend_memory) override {
70     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.vulkan.vma"),
71                  "GrVkMemoryAllocatorImpl::allocateMemoryForBuffer");
72     VmaAllocationCreateInfo info;
73     info.flags = 0;
74     info.usage = VMA_MEMORY_USAGE_UNKNOWN;
75     info.memoryTypeBits = 0;
76     info.pool = VK_NULL_HANDLE;
77     info.pUserData = nullptr;
78 
79     switch (usage) {
80       case BufferUsage::kGpuOnly:
81         info.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
82         info.preferredFlags = 0;
83         break;
84       case BufferUsage::kCpuOnly:
85         info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
86                              VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
87         info.preferredFlags = VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
88         break;
89       case BufferUsage::kCpuWritesGpuReads:
90         info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
91         if (base::FeatureList::IsEnabled(kCpuWritesGpuReadsCached))
92           info.requiredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
93 
94         info.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
95         break;
96       case BufferUsage::kGpuWritesCpuReads:
97         info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
98         info.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
99                               VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
100         break;
101     }
102 
103     if (AllocationPropertyFlags::kDedicatedAllocation & flags) {
104       info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
105     }
106 
107     if ((AllocationPropertyFlags::kLazyAllocation & flags) &&
108         BufferUsage::kGpuOnly == usage) {
109       info.preferredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
110     }
111 
112     if (AllocationPropertyFlags::kPersistentlyMapped & flags) {
113       SkASSERT(BufferUsage::kGpuOnly != usage);
114       info.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
115     }
116 
117     VmaAllocation allocation;
118     VkResult result = vma::AllocateMemoryForBuffer(allocator_, buffer, &info,
119                                                    &allocation, nullptr);
120     if (VK_SUCCESS != result) {
121       if (usage == BufferUsage::kCpuWritesGpuReads) {
122         // We try again but this time drop the requirement for cached
123         info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
124         result = vma::AllocateMemoryForBuffer(allocator_, buffer, &info,
125                                               &allocation, nullptr);
126       }
127     }
128 
129     if (VK_SUCCESS == result)
130       *backend_memory = reinterpret_cast<GrVkBackendMemory>(allocation);
131 
132     return result;
133   }
134 
freeMemory(const GrVkBackendMemory & memory)135   void freeMemory(const GrVkBackendMemory& memory) override {
136     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.vulkan.vma"),
137                  "GrVkMemoryAllocatorImpl::freeMemory");
138     vma::FreeMemory(allocator_, reinterpret_cast<const VmaAllocation>(memory));
139   }
140 
getAllocInfo(const GrVkBackendMemory & memory,GrVkAlloc * alloc) const141   void getAllocInfo(const GrVkBackendMemory& memory,
142                     GrVkAlloc* alloc) const override {
143     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.vulkan.vma"),
144                  "GrVkMemoryAllocatorImpl::getAllocInfo");
145     const VmaAllocation allocation =
146         reinterpret_cast<const VmaAllocation>(memory);
147     VmaAllocationInfo vma_info;
148     vma::GetAllocationInfo(allocator_, allocation, &vma_info);
149 
150     VkMemoryPropertyFlags mem_flags;
151     vma::GetMemoryTypeProperties(allocator_, vma_info.memoryType, &mem_flags);
152 
153     uint32_t flags = 0;
154     if (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT & mem_flags) {
155       flags |= GrVkAlloc::kMappable_Flag;
156     }
157     if (!SkToBool(VK_MEMORY_PROPERTY_HOST_COHERENT_BIT & mem_flags)) {
158       flags |= GrVkAlloc::kNoncoherent_Flag;
159     }
160 
161     alloc->fMemory = vma_info.deviceMemory;
162     alloc->fOffset = vma_info.offset;
163     alloc->fSize = vma_info.size;
164     alloc->fFlags = flags;
165     alloc->fBackendMemory = memory;
166   }
167 
mapMemory(const GrVkBackendMemory & memory,void ** data)168   VkResult mapMemory(const GrVkBackendMemory& memory, void** data) override {
169     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.vulkan.vma"),
170                  "GrVkMemoryAllocatorImpl::mapMemory");
171     const VmaAllocation allocation =
172         reinterpret_cast<const VmaAllocation>(memory);
173     return vma::MapMemory(allocator_, allocation, data);
174   }
175 
unmapMemory(const GrVkBackendMemory & memory)176   void unmapMemory(const GrVkBackendMemory& memory) override {
177     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.vulkan.vma"),
178                  "GrVkMemoryAllocatorImpl::unmapMemory");
179     const VmaAllocation allocation =
180         reinterpret_cast<const VmaAllocation>(memory);
181     vma::UnmapMemory(allocator_, allocation);
182   }
183 
flushMemory(const GrVkBackendMemory & memory,VkDeviceSize offset,VkDeviceSize size)184   VkResult flushMemory(const GrVkBackendMemory& memory,
185                        VkDeviceSize offset,
186                        VkDeviceSize size) override {
187     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.vulkan.vma"),
188                  "GrVkMemoryAllocatorImpl::flushMappedMemory");
189     const VmaAllocation allocation =
190         reinterpret_cast<const VmaAllocation>(memory);
191     return vma::FlushAllocation(allocator_, allocation, offset, size);
192   }
193 
invalidateMemory(const GrVkBackendMemory & memory,VkDeviceSize offset,VkDeviceSize size)194   VkResult invalidateMemory(const GrVkBackendMemory& memory,
195                             VkDeviceSize offset,
196                             VkDeviceSize size) override {
197     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.vulkan.vma"),
198                  "GrVkMemoryAllocatorImpl::invalidateMappedMemory");
199     const VmaAllocation allocation =
200         reinterpret_cast<const VmaAllocation>(memory);
201     return vma::InvalidateAllocation(allocator_, allocation, offset, size);
202   }
203 
totalUsedMemory() const204   uint64_t totalUsedMemory() const override {
205     VmaStats stats;
206     vma::CalculateStats(allocator_, &stats);
207     return stats.total.usedBytes;
208   }
209 
totalAllocatedMemory() const210   uint64_t totalAllocatedMemory() const override {
211     VmaStats stats;
212     vma::CalculateStats(allocator_, &stats);
213     return stats.total.usedBytes + stats.total.unusedBytes;
214   }
215 
216   const VmaAllocator allocator_;
217 };
218 
219 }  // namespace
220 
CreateGrVkMemoryAllocator(VulkanDeviceQueue * device_queue)221 sk_sp<GrVkMemoryAllocator> CreateGrVkMemoryAllocator(
222     VulkanDeviceQueue* device_queue) {
223   return sk_make_sp<GrVkMemoryAllocatorImpl>(device_queue->vma_allocator());
224 }
225 
226 }  // namespace gpu
227