1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkBuffer.h"
9 #include "src/gpu/vk/GrVkGpu.h"
10 #include "src/gpu/vk/GrVkMemory.h"
11 #include "src/gpu/vk/GrVkTransferBuffer.h"
12 #include "src/gpu/vk/GrVkUtil.h"
13
14 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
15
16 #ifdef SK_DEBUG
17 #define VALIDATE() this->validate()
18 #else
19 #define VALIDATE() do {} while(false)
20 #endif
21
Create(GrVkGpu * gpu,const Desc & desc)22 const GrVkBuffer::Resource* GrVkBuffer::Create(GrVkGpu* gpu, const Desc& desc) {
23 SkASSERT(!gpu->protectedContext() || (gpu->protectedContext() == desc.fDynamic));
24 VkBuffer buffer;
25 GrVkAlloc alloc;
26
27 // create the buffer object
28 VkBufferCreateInfo bufInfo;
29 memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
30 bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
31 bufInfo.flags = 0;
32 bufInfo.size = desc.fSizeInBytes;
33 switch (desc.fType) {
34 case kVertex_Type:
35 bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
36 break;
37 case kIndex_Type:
38 bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
39 break;
40 case kIndirect_Type:
41 bufInfo.usage = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
42 break;
43 case kUniform_Type:
44 bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
45 break;
46 case kCopyRead_Type:
47 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
48 break;
49 case kCopyWrite_Type:
50 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
51 break;
52 case kTexel_Type:
53 bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
54 }
55 if (!desc.fDynamic) {
56 bufInfo.usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
57 }
58
59 bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
60 bufInfo.queueFamilyIndexCount = 0;
61 bufInfo.pQueueFamilyIndices = nullptr;
62
63 VkResult err;
64 err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
65 if (err) {
66 return nullptr;
67 }
68
69 if (!GrVkMemory::AllocAndBindBufferMemory(gpu,
70 buffer,
71 desc.fType,
72 desc.fDynamic,
73 &alloc)) {
74 return nullptr;
75 }
76
77 const GrVkBuffer::Resource* resource = new GrVkBuffer::Resource(gpu, buffer, alloc, desc.fType);
78 if (!resource) {
79 VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
80 GrVkMemory::FreeBufferMemory(gpu, desc.fType, alloc);
81 return nullptr;
82 }
83
84 return resource;
85 }
86
addMemoryBarrier(const GrVkGpu * gpu,VkAccessFlags srcAccessMask,VkAccessFlags dstAccesMask,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion) const87 void GrVkBuffer::addMemoryBarrier(const GrVkGpu* gpu,
88 VkAccessFlags srcAccessMask,
89 VkAccessFlags dstAccesMask,
90 VkPipelineStageFlags srcStageMask,
91 VkPipelineStageFlags dstStageMask,
92 bool byRegion) const {
93 VkBufferMemoryBarrier bufferMemoryBarrier = {
94 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType
95 nullptr, // pNext
96 srcAccessMask, // srcAccessMask
97 dstAccesMask, // dstAccessMask
98 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
99 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
100 this->buffer(), // buffer
101 0, // offset
102 fDesc.fSizeInBytes, // size
103 };
104
105 // TODO: restrict to area of buffer we're interested in
106 gpu->addBufferMemoryBarrier(this->resource(), srcStageMask, dstStageMask, byRegion,
107 &bufferMemoryBarrier);
108 }
109
freeGPUData() const110 void GrVkBuffer::Resource::freeGPUData() const {
111 SkASSERT(fBuffer);
112 SkASSERT(fAlloc.fMemory);
113 VK_CALL(fGpu, DestroyBuffer(fGpu->device(), fBuffer, nullptr));
114 GrVkMemory::FreeBufferMemory(fGpu, fType, fAlloc);
115 }
116
vkRelease(GrVkGpu * gpu)117 void GrVkBuffer::vkRelease(GrVkGpu* gpu) {
118 VALIDATE();
119 if (this->vkIsMapped()) {
120 // Only unmap resources that are not backed by a CPU buffer. Otherwise we may end up
121 // creating a new transfer buffer resources that sends us into a spiral of creating and
122 // destroying resources if we are at our budget limit. Also there really isn't a need to
123 // upload the CPU data if we are deleting this buffer.
124 if (fDesc.fDynamic) {
125 this->vkUnmap(gpu);
126 }
127 }
128 fResource->recycle();
129 fResource = nullptr;
130 if (!fDesc.fDynamic) {
131 delete[] (unsigned char*)fMapPtr;
132 }
133 fMapPtr = nullptr;
134 VALIDATE();
135 }
136
buffer_type_to_access_flags(GrVkBuffer::Type type)137 VkAccessFlags buffer_type_to_access_flags(GrVkBuffer::Type type) {
138 switch (type) {
139 case GrVkBuffer::kIndex_Type:
140 return VK_ACCESS_INDEX_READ_BIT;
141 case GrVkBuffer::kVertex_Type:
142 return VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
143 default:
144 // This helper is only called for static buffers so we should only ever see index or
145 // vertex buffers types
146 SkASSERT(false);
147 return 0;
148 }
149 }
150
internalMap(GrVkGpu * gpu,size_t size,bool * createdNewBuffer)151 void GrVkBuffer::internalMap(GrVkGpu* gpu, size_t size, bool* createdNewBuffer) {
152 VALIDATE();
153 SkASSERT(!this->vkIsMapped());
154
155 if (!fResource->unique()) {
156 if (fDesc.fDynamic) {
157 // in use by the command buffer, so we need to create a new one
158 fResource->recycle();
159 fResource = this->createResource(gpu, fDesc);
160 if (createdNewBuffer) {
161 *createdNewBuffer = true;
162 }
163 } else {
164 SkASSERT(fMapPtr);
165 this->addMemoryBarrier(gpu,
166 buffer_type_to_access_flags(fDesc.fType),
167 VK_ACCESS_TRANSFER_WRITE_BIT,
168 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
169 VK_PIPELINE_STAGE_TRANSFER_BIT,
170 false);
171 }
172 }
173
174 if (fDesc.fDynamic) {
175 const GrVkAlloc& alloc = this->alloc();
176 SkASSERT(alloc.fSize > 0);
177 SkASSERT(alloc.fSize >= size);
178 SkASSERT(0 == fOffset);
179
180 fMapPtr = GrVkMemory::MapAlloc(gpu, alloc);
181 } else {
182 if (!fMapPtr) {
183 fMapPtr = new unsigned char[this->size()];
184 }
185 }
186
187 VALIDATE();
188 }
189
copyCpuDataToGpuBuffer(GrVkGpu * gpu,const void * src,size_t size)190 void GrVkBuffer::copyCpuDataToGpuBuffer(GrVkGpu* gpu, const void* src, size_t size) {
191 SkASSERT(src);
192 // We should never call this method in protected contexts.
193 SkASSERT(!gpu->protectedContext());
194 // The vulkan api restricts the use of vkCmdUpdateBuffer to updates that are less than or equal
195 // to 65536 bytes and a size the is 4 byte aligned.
196 if ((size <= 65536) && (0 == (size & 0x3)) && !gpu->vkCaps().avoidUpdateBuffers()) {
197 gpu->updateBuffer(this, src, this->offset(), size);
198 } else {
199 sk_sp<GrVkTransferBuffer> transferBuffer =
200 GrVkTransferBuffer::Make(gpu, size, GrVkBuffer::kCopyRead_Type);
201 if (!transferBuffer) {
202 return;
203 }
204
205 char* buffer = (char*) transferBuffer->map();
206 memcpy (buffer, src, size);
207 transferBuffer->unmap();
208
209 gpu->copyBuffer(transferBuffer.get(), this, 0, this->offset(), size);
210 }
211 this->addMemoryBarrier(gpu,
212 VK_ACCESS_TRANSFER_WRITE_BIT,
213 buffer_type_to_access_flags(fDesc.fType),
214 VK_PIPELINE_STAGE_TRANSFER_BIT,
215 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
216 false);
217 }
218
internalUnmap(GrVkGpu * gpu,size_t size)219 void GrVkBuffer::internalUnmap(GrVkGpu* gpu, size_t size) {
220 VALIDATE();
221 SkASSERT(this->vkIsMapped());
222
223 if (fDesc.fDynamic) {
224 const GrVkAlloc& alloc = this->alloc();
225 SkASSERT(alloc.fSize > 0);
226 SkASSERT(alloc.fSize >= size);
227 // We currently don't use fOffset
228 SkASSERT(0 == fOffset);
229
230 GrVkMemory::FlushMappedAlloc(gpu, alloc, 0, size);
231 GrVkMemory::UnmapAlloc(gpu, alloc);
232 fMapPtr = nullptr;
233 } else {
234 SkASSERT(fMapPtr);
235 this->copyCpuDataToGpuBuffer(gpu, fMapPtr, size);
236 }
237 }
238
vkIsMapped() const239 bool GrVkBuffer::vkIsMapped() const {
240 VALIDATE();
241 return SkToBool(fMapPtr);
242 }
243
vkUpdateData(GrVkGpu * gpu,const void * src,size_t srcSizeInBytes,bool * createdNewBuffer)244 bool GrVkBuffer::vkUpdateData(GrVkGpu* gpu, const void* src, size_t srcSizeInBytes,
245 bool* createdNewBuffer) {
246 if (srcSizeInBytes > fDesc.fSizeInBytes) {
247 return false;
248 }
249
250 if (fDesc.fDynamic) {
251 this->internalMap(gpu, srcSizeInBytes, createdNewBuffer);
252 if (!fMapPtr) {
253 return false;
254 }
255
256 memcpy(fMapPtr, src, srcSizeInBytes);
257 this->internalUnmap(gpu, srcSizeInBytes);
258 } else {
259 this->copyCpuDataToGpuBuffer(gpu, src, srcSizeInBytes);
260 }
261
262
263 return true;
264 }
265
validate() const266 void GrVkBuffer::validate() const {
267 SkASSERT(!fResource || kVertex_Type == fDesc.fType || kIndex_Type == fDesc.fType ||
268 kIndirect_Type == fDesc.fType || kTexel_Type == fDesc.fType ||
269 kCopyRead_Type == fDesc.fType || kCopyWrite_Type == fDesc.fType ||
270 kUniform_Type == fDesc.fType);
271 }
272