1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7 
8 #include "src/gpu/vk/GrVkMemory.h"
9 
10 #include "include/gpu/vk/GrVkMemoryAllocator.h"
11 #include "src/gpu/vk/GrVkGpu.h"
12 #include "src/gpu/vk/GrVkUtil.h"
13 
14 using AllocationPropertyFlags = GrVkMemoryAllocator::AllocationPropertyFlags;
15 using BufferUsage = GrVkMemoryAllocator::BufferUsage;
16 
get_buffer_usage(GrVkBuffer::Type type,bool dynamic)17 static BufferUsage get_buffer_usage(GrVkBuffer::Type type, bool dynamic) {
18     switch (type) {
19         case GrVkBuffer::kVertex_Type: // fall through
20         case GrVkBuffer::kIndex_Type: // fall through
21         case GrVkBuffer::kTexel_Type:
22             return dynamic ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
23         case GrVkBuffer::kUniform_Type:
24             SkASSERT(dynamic);
25             return BufferUsage::kCpuWritesGpuReads;
26         case GrVkBuffer::kCopyRead_Type: // fall through
27         case GrVkBuffer::kCopyWrite_Type:
28             return BufferUsage::kCpuOnly;
29     }
30     SK_ABORT("Invalid GrVkBuffer::Type");
31 }
32 
AllocAndBindBufferMemory(const GrVkGpu * gpu,VkBuffer buffer,GrVkBuffer::Type type,bool dynamic,GrVkAlloc * alloc)33 bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
34                                           VkBuffer buffer,
35                                           GrVkBuffer::Type type,
36                                           bool dynamic,
37                                           GrVkAlloc* alloc) {
38     GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
39     GrVkBackendMemory memory = 0;
40 
41     GrVkMemoryAllocator::BufferUsage usage = get_buffer_usage(type, dynamic);
42 
43     AllocationPropertyFlags propFlags;
44     if (usage == GrVkMemoryAllocator::BufferUsage::kCpuWritesGpuReads) {
45         // In general it is always fine (and often better) to keep buffers always mapped.
46         // TODO: According to AMDs guide for the VulkanMemoryAllocator they suggest there are two
47         // cases when keeping it mapped can hurt. The first is when running on Win7 or Win8 (Win 10
48         // is fine). In general, by the time Vulkan ships it is probably less likely to be running
49         // on non Win10 or newer machines. The second use case is if running on an AMD card and you
50         // are using the special GPU local and host mappable memory. However, in general we don't
51         // pick this memory as we've found it slower than using the cached host visible memory. In
52         // the future if we find the need to special case either of these two issues we can add
53         // checks for them here.
54         propFlags = AllocationPropertyFlags::kPersistentlyMapped;
55     } else {
56         propFlags = AllocationPropertyFlags::kNone;
57     }
58 
59     if (!allocator->allocateMemoryForBuffer(buffer, usage, propFlags, &memory)) {
60         return false;
61     }
62     allocator->getAllocInfo(memory, alloc);
63 
64     // Bind buffer
65     VkResult err = GR_VK_CALL(gpu->vkInterface(), BindBufferMemory(gpu->device(), buffer,
66                                                                    alloc->fMemory,
67                                                                    alloc->fOffset));
68     if (err) {
69         FreeBufferMemory(gpu, type, *alloc);
70         return false;
71     }
72 
73     return true;
74 }
75 
FreeBufferMemory(const GrVkGpu * gpu,GrVkBuffer::Type type,const GrVkAlloc & alloc)76 void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type,
77                                   const GrVkAlloc& alloc) {
78     if (alloc.fBackendMemory) {
79         GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
80         allocator->freeMemory(alloc.fBackendMemory);
81     } else {
82         GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
83     }
84 }
85 
86 const VkDeviceSize kMaxSmallImageSize = 256 * 1024;
87 
AllocAndBindImageMemory(const GrVkGpu * gpu,VkImage image,bool linearTiling,GrVkAlloc * alloc)88 bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
89                                          VkImage image,
90                                          bool linearTiling,
91                                          GrVkAlloc* alloc) {
92     SkASSERT(!linearTiling);
93     GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
94     GrVkBackendMemory memory = 0;
95 
96     VkMemoryRequirements memReqs;
97     GR_VK_CALL(gpu->vkInterface(), GetImageMemoryRequirements(gpu->device(), image, &memReqs));
98 
99     AllocationPropertyFlags propFlags;
100     if (memReqs.size > kMaxSmallImageSize ||
101                gpu->vkCaps().shouldAlwaysUseDedicatedImageMemory()) {
102         propFlags = AllocationPropertyFlags::kDedicatedAllocation;
103     } else {
104         propFlags = AllocationPropertyFlags::kNone;
105     }
106 
107     if (gpu->protectedContext()) {
108         propFlags |= AllocationPropertyFlags::kProtected;
109     }
110 
111     if (!allocator->allocateMemoryForImage(image, propFlags, &memory)) {
112         return false;
113     }
114     allocator->getAllocInfo(memory, alloc);
115 
116     // Bind buffer
117     VkResult err = GR_VK_CALL(gpu->vkInterface(), BindImageMemory(gpu->device(), image,
118                                                                   alloc->fMemory, alloc->fOffset));
119     if (err) {
120         FreeImageMemory(gpu, linearTiling, *alloc);
121         return false;
122     }
123 
124     return true;
125 }
126 
FreeImageMemory(const GrVkGpu * gpu,bool linearTiling,const GrVkAlloc & alloc)127 void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling,
128                                  const GrVkAlloc& alloc) {
129     if (alloc.fBackendMemory) {
130         GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
131         allocator->freeMemory(alloc.fBackendMemory);
132     } else {
133         GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
134     }
135 }
136 
MapAlloc(const GrVkGpu * gpu,const GrVkAlloc & alloc)137 void* GrVkMemory::MapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
138     SkASSERT(GrVkAlloc::kMappable_Flag & alloc.fFlags);
139 #ifdef SK_DEBUG
140     if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
141         VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
142         SkASSERT(0 == (alloc.fOffset & (alignment-1)));
143         SkASSERT(0 == (alloc.fSize & (alignment-1)));
144     }
145 #endif
146     if (alloc.fBackendMemory) {
147         GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
148         return allocator->mapMemory(alloc.fBackendMemory);
149     }
150 
151     void* mapPtr;
152     VkResult err = GR_VK_CALL(gpu->vkInterface(), MapMemory(gpu->device(), alloc.fMemory,
153                                                             alloc.fOffset,
154                                                             alloc.fSize, 0, &mapPtr));
155     if (err) {
156         mapPtr = nullptr;
157     }
158     return mapPtr;
159 }
160 
UnmapAlloc(const GrVkGpu * gpu,const GrVkAlloc & alloc)161 void GrVkMemory::UnmapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
162     if (alloc.fBackendMemory) {
163         GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
164         allocator->unmapMemory(alloc.fBackendMemory);
165     } else {
166         GR_VK_CALL(gpu->vkInterface(), UnmapMemory(gpu->device(), alloc.fMemory));
167     }
168 }
169 
GetNonCoherentMappedMemoryRange(const GrVkAlloc & alloc,VkDeviceSize offset,VkDeviceSize size,VkDeviceSize alignment,VkMappedMemoryRange * range)170 void GrVkMemory::GetNonCoherentMappedMemoryRange(const GrVkAlloc& alloc, VkDeviceSize offset,
171                                                  VkDeviceSize size, VkDeviceSize alignment,
172                                                  VkMappedMemoryRange* range) {
173     SkASSERT(alloc.fFlags & GrVkAlloc::kNoncoherent_Flag);
174     offset = offset + alloc.fOffset;
175     VkDeviceSize offsetDiff = offset & (alignment -1);
176     offset = offset - offsetDiff;
177     size = (size + alignment - 1) & ~(alignment - 1);
178 #ifdef SK_DEBUG
179     SkASSERT(offset >= alloc.fOffset);
180     SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
181     SkASSERT(0 == (offset & (alignment-1)));
182     SkASSERT(size > 0);
183     SkASSERT(0 == (size & (alignment-1)));
184 #endif
185 
186     memset(range, 0, sizeof(VkMappedMemoryRange));
187     range->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
188     range->memory = alloc.fMemory;
189     range->offset = offset;
190     range->size = size;
191 }
192 
FlushMappedAlloc(const GrVkGpu * gpu,const GrVkAlloc & alloc,VkDeviceSize offset,VkDeviceSize size)193 void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
194                                   VkDeviceSize size) {
195     if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
196         SkASSERT(offset == 0);
197         SkASSERT(size <= alloc.fSize);
198         if (alloc.fBackendMemory) {
199             GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
200             allocator->flushMappedMemory(alloc.fBackendMemory, offset, size);
201         } else {
202             VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
203             VkMappedMemoryRange mappedMemoryRange;
204             GrVkMemory::GetNonCoherentMappedMemoryRange(alloc, offset, size, alignment,
205                                                         &mappedMemoryRange);
206             GR_VK_CALL(gpu->vkInterface(), FlushMappedMemoryRanges(gpu->device(), 1,
207                                                                    &mappedMemoryRange));
208         }
209     }
210 }
211 
InvalidateMappedAlloc(const GrVkGpu * gpu,const GrVkAlloc & alloc,VkDeviceSize offset,VkDeviceSize size)212 void GrVkMemory::InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc,
213                                        VkDeviceSize offset, VkDeviceSize size) {
214     if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
215         SkASSERT(offset == 0);
216         SkASSERT(size <= alloc.fSize);
217         if (alloc.fBackendMemory) {
218             GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
219             allocator->invalidateMappedMemory(alloc.fBackendMemory, offset, size);
220         } else {
221             VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
222             VkMappedMemoryRange mappedMemoryRange;
223             GrVkMemory::GetNonCoherentMappedMemoryRange(alloc, offset, size, alignment,
224                                                         &mappedMemoryRange);
225             GR_VK_CALL(gpu->vkInterface(), InvalidateMappedMemoryRanges(gpu->device(), 1,
226                                                                         &mappedMemoryRange));
227         }
228     }
229 }
230 
231