1diff --git a/src/3rdparty/VulkanMemoryAllocator/vk_mem_alloc.h b/src/3rdparty/VulkanMemoryAllocator/vk_mem_alloc.h 2index a2f7a1b..fbe6f9e 100644 3--- a/src/3rdparty/VulkanMemoryAllocator/vk_mem_alloc.h 4+++ b/src/3rdparty/VulkanMemoryAllocator/vk_mem_alloc.h 5@@ -3661,7 +3661,7 @@ static void VmaWriteMagicValue(void* pData, VkDeviceSize offset) 6 { 7 uint32_t* pDst = (uint32_t*)((char*)pData + offset); 8 const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); 9- for(size_t i = 0; i < numberCount; ++i, ++pDst) 10+ for(size_t i = 0; i != numberCount; ++i, ++pDst) 11 { 12 *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE; 13 } 14@@ -3671,7 +3671,7 @@ static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset) 15 { 16 const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset); 17 const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); 18- for(size_t i = 0; i < numberCount; ++i, ++pSrc) 19+ for(size_t i = 0; i != numberCount; ++i, ++pSrc) 20 { 21 if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE) 22 { 23@@ -3866,7 +3866,7 @@ public: 24 template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { } 25 26 T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); } 27- void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); } 28+ void deallocate(T* p, size_t /*n*/) { VmaFree(m_pCallbacks, p); } 29 30 template<typename U> 31 bool operator==(const VmaStlAllocator<U>& rhs) const 32@@ -5214,7 +5214,7 @@ public: 33 virtual void FreeAtOffset(VkDeviceSize offset) = 0; 34 35 // Tries to resize (grow or shrink) space for given allocation, in place. 36- virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; } 37+ virtual bool ResizeAllocation(const VmaAllocation /*alloc*/, VkDeviceSize /*newSize*/) { return false; } 38 39 protected: 40 const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; } 41@@ -5574,7 +5574,7 @@ public: 42 43 virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount); 44 45- virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; } 46+ virtual VkResult CheckCorruption(const void* /*pBlockData*/) { return VK_ERROR_FEATURE_NOT_PRESENT; } 47 48 virtual void Alloc( 49 const VmaAllocationRequest& request, 50@@ -6133,7 +6133,7 @@ public: 51 bool overlappingMoveSupported); 52 virtual ~VmaDefragmentationAlgorithm_Fast(); 53 54- virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; } 55+ virtual void AddAllocation(VmaAllocation /*hAlloc*/, VkBool32* /*pChanged*/) { ++m_AllocationCount; } 56 virtual void AddAll() { m_AllAllocations = true; } 57 58 virtual VkResult Defragment( 59@@ -6318,7 +6318,7 @@ private: 60 // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors. 61 VmaBlockVector* const m_pBlockVector; 62 const uint32_t m_CurrFrameIndex; 63- const uint32_t m_AlgorithmFlags; 64+ /*const uint32_t m_AlgorithmFlags;*/ 65 // Owner of this object. 66 VmaDefragmentationAlgorithm* m_pAlgorithm; 67 68@@ -7073,6 +7073,7 @@ void VmaJsonWriter::BeginValue(bool isString) 69 if(currItem.type == COLLECTION_TYPE_OBJECT && 70 currItem.valueCount % 2 == 0) 71 { 72+ (void) isString; 73 VMA_ASSERT(isString); 74 } 75 76@@ -7660,7 +7661,9 @@ bool VmaBlockMetadata_Generic::Validate() const 77 } 78 79 // Margin required between allocations - every free space must be at least that large. 80+#if VMA_DEBUG_MARGIN 81 VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN); 82+#endif 83 } 84 else 85 { 86@@ -7806,6 +7809,7 @@ bool VmaBlockMetadata_Generic::CreateAllocationRequest( 87 { 88 VMA_ASSERT(allocSize > 0); 89 VMA_ASSERT(!upperAddress); 90+ (void) upperAddress; 91 VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); 92 VMA_ASSERT(pAllocationRequest != VMA_NULL); 93 VMA_HEAVY_ASSERT(Validate()); 94@@ -8033,6 +8037,7 @@ void VmaBlockMetadata_Generic::Alloc( 95 VmaAllocation hAllocation) 96 { 97 VMA_ASSERT(!upperAddress); 98+ (void) upperAddress; 99 VMA_ASSERT(request.item != m_Suballocations.end()); 100 VmaSuballocation& suballoc = *request.item; 101 // Given suballocation is a free block. 102@@ -9609,7 +9614,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest( 103 bool upperAddress, 104 VmaSuballocationType allocType, 105 bool canMakeOtherLost, 106- uint32_t strategy, 107+ uint32_t /*strategy*/, 108 VmaAllocationRequest* pAllocationRequest) 109 { 110 VMA_ASSERT(allocSize > 0); 111@@ -9651,10 +9656,12 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest( 112 // Apply VMA_DEBUG_MARGIN at the end. 113 if(VMA_DEBUG_MARGIN > 0) 114 { 115+#if VMA_DEBUG_MARGIN 116 if(resultOffset < VMA_DEBUG_MARGIN) 117 { 118 return false; 119 } 120+#endif 121 resultOffset -= VMA_DEBUG_MARGIN; 122 } 123 124@@ -10542,18 +10549,19 @@ void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const 125 #endif // #if VMA_STATS_STRING_ENABLED 126 127 bool VmaBlockMetadata_Buddy::CreateAllocationRequest( 128- uint32_t currentFrameIndex, 129- uint32_t frameInUseCount, 130+ uint32_t /*currentFrameIndex*/, 131+ uint32_t /*frameInUseCount*/, 132 VkDeviceSize bufferImageGranularity, 133 VkDeviceSize allocSize, 134 VkDeviceSize allocAlignment, 135 bool upperAddress, 136 VmaSuballocationType allocType, 137- bool canMakeOtherLost, 138- uint32_t strategy, 139+ bool /*canMakeOtherLost*/, 140+ uint32_t /*strategy*/, 141 VmaAllocationRequest* pAllocationRequest) 142 { 143 VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm."); 144+ (void) upperAddress; 145 146 // Simple way to respect bufferImageGranularity. May be optimized some day. 147 // Whenever it might be an OPTIMAL image... 148@@ -10593,8 +10601,8 @@ bool VmaBlockMetadata_Buddy::CreateAllocationRequest( 149 } 150 151 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost( 152- uint32_t currentFrameIndex, 153- uint32_t frameInUseCount, 154+ uint32_t /*currentFrameIndex*/, 155+ uint32_t /*frameInUseCount*/, 156 VmaAllocationRequest* pAllocationRequest) 157 { 158 /* 159@@ -10604,7 +10612,7 @@ bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost( 160 return pAllocationRequest->itemsToMakeLostCount == 0; 161 } 162 163-uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) 164+uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t /*currentFrameIndex*/, uint32_t /*frameInUseCount*/) 165 { 166 /* 167 Lost allocations are not supported in buddy allocator at the moment. 168@@ -10615,9 +10623,9 @@ uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, 169 170 void VmaBlockMetadata_Buddy::Alloc( 171 const VmaAllocationRequest& request, 172- VmaSuballocationType type, 173+ VmaSuballocationType /*type*/, 174 VkDeviceSize allocSize, 175- bool upperAddress, 176+ bool /*upperAddress*/, 177 VmaAllocation hAllocation) 178 { 179 const uint32_t targetLevel = AllocSizeToLevel(allocSize); 180@@ -10941,7 +10949,7 @@ void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, con 181 //////////////////////////////////////////////////////////////////////////////// 182 // class VmaDeviceMemoryBlock 183 184-VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) : 185+VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator /*hAllocator*/) : 186 m_pMetadata(VMA_NULL), 187 m_MemoryTypeIndex(UINT32_MAX), 188 m_Id(0), 189@@ -11691,6 +11699,7 @@ VkResult VmaBlockVector::AllocatePage( 190 if(IsCorruptionDetectionEnabled()) 191 { 192 VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size); 193+ (void) res; 194 VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value."); 195 } 196 return VK_SUCCESS; 197@@ -11729,6 +11738,7 @@ void VmaBlockVector::Free( 198 if(IsCorruptionDetectionEnabled()) 199 { 200 VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize()); 201+ (void) res; 202 VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value."); 203 } 204 205@@ -11894,6 +11904,7 @@ VkResult VmaBlockVector::AllocateFromBlock( 206 if(IsCorruptionDetectionEnabled()) 207 { 208 VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size); 209+ (void) res; 210 VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value."); 211 } 212 return VK_SUCCESS; 213@@ -11903,7 +11914,8 @@ VkResult VmaBlockVector::AllocateFromBlock( 214 215 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex) 216 { 217- VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; 218+ VkMemoryAllocateInfo allocInfo = {}; 219+ allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; 220 allocInfo.memoryTypeIndex = m_MemoryTypeIndex; 221 allocInfo.allocationSize = blockSize; 222 VkDeviceMemory mem = VK_NULL_HANDLE; 223@@ -11991,7 +12003,8 @@ void VmaBlockVector::ApplyDefragmentationMovesCpu( 224 if(pDefragCtx->res == VK_SUCCESS) 225 { 226 const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize; 227- VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE }; 228+ VkMappedMemoryRange memRange = {}; 229+ memRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; 230 231 for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) 232 { 233@@ -12076,7 +12089,8 @@ void VmaBlockVector::ApplyDefragmentationMovesGpu( 234 235 // Go over all blocks. Create and bind buffer for whole block if necessary. 236 { 237- VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; 238+ VkBufferCreateInfo bufCreateInfo = {}; 239+ bufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; 240 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | 241 VK_BUFFER_USAGE_TRANSFER_DST_BIT; 242 243@@ -12101,8 +12115,9 @@ void VmaBlockVector::ApplyDefragmentationMovesGpu( 244 // Go over all moves. Post data transfer commands to command buffer. 245 if(pDefragCtx->res == VK_SUCCESS) 246 { 247- const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize; 248- VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE }; 249+ /*const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize; 250+ VkMappedMemoryRange memRange = {}; 251+ memRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;*/ 252 253 for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) 254 { 255@@ -12435,10 +12450,10 @@ VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic( 256 VmaAllocator hAllocator, 257 VmaBlockVector* pBlockVector, 258 uint32_t currentFrameIndex, 259- bool overlappingMoveSupported) : 260+ bool /*overlappingMoveSupported*/) : 261 VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex), 262- m_AllAllocations(false), 263 m_AllocationCount(0), 264+ m_AllAllocations(false), 265 m_BytesMoved(0), 266 m_AllocationsMoved(0), 267 m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks())) 268@@ -12813,7 +12828,7 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment( 269 size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex; 270 VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex); 271 VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata; 272- VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize(); 273+ /*VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();*/ 274 275 // Same block 276 if(freeSpaceInfoIndex == srcBlockInfoIndex) 277@@ -13098,7 +13113,7 @@ VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext( 278 VmaPool hCustomPool, 279 VmaBlockVector* pBlockVector, 280 uint32_t currFrameIndex, 281- uint32_t algorithmFlags) : 282+ uint32_t /*algorithmFlags*/) : 283 res(VK_SUCCESS), 284 mutexLocked(false), 285 blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())), 286@@ -13106,7 +13121,7 @@ VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext( 287 m_hCustomPool(hCustomPool), 288 m_pBlockVector(pBlockVector), 289 m_CurrFrameIndex(currFrameIndex), 290- m_AlgorithmFlags(algorithmFlags), 291+ /*m_AlgorithmFlags(algorithmFlags),*/ 292 m_pAlgorithm(VMA_NULL), 293 m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())), 294 m_AllAllocations(false) 295@@ -14311,19 +14326,21 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory( 296 bool map, 297 bool isUserDataString, 298 void* pUserData, 299- VkBuffer dedicatedBuffer, 300- VkImage dedicatedImage, 301+ VkBuffer /*dedicatedBuffer*/, 302+ VkImage /*dedicatedImage*/, 303 size_t allocationCount, 304 VmaAllocation* pAllocations) 305 { 306 VMA_ASSERT(allocationCount > 0 && pAllocations); 307 308- VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; 309+ VkMemoryAllocateInfo allocInfo = {}; 310+ allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; 311 allocInfo.memoryTypeIndex = memTypeIndex; 312 allocInfo.allocationSize = size; 313 314 #if VMA_DEDICATED_ALLOCATION 315- VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR }; 316+ VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = {}; 317+ dedicatedAllocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR; 318 if(m_UseKhrDedicatedAllocation) 319 { 320 if(dedicatedBuffer != VK_NULL_HANDLE) 321@@ -14341,7 +14358,7 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory( 322 #endif // #if VMA_DEDICATED_ALLOCATION 323 324 size_t allocIndex; 325- VkResult res; 326+ VkResult res = VK_SUCCESS; 327 for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) 328 { 329 res = AllocateDedicatedMemoryPage( 330@@ -14460,12 +14477,15 @@ void VmaAllocator_T::GetBufferMemoryRequirements( 331 #if VMA_DEDICATED_ALLOCATION 332 if(m_UseKhrDedicatedAllocation) 333 { 334- VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR }; 335+ VkBufferMemoryRequirementsInfo2KHR memReqInfo = {}; 336+ memReqInfo.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR; 337 memReqInfo.buffer = hBuffer; 338 339- VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; 340+ VkMemoryDedicatedRequirementsKHR memDedicatedReq = {}; 341+ memDedicatedReq.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR; 342 343- VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; 344+ VkMemoryRequirements2KHR memReq2 = {}; 345+ memReq2.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR; 346 memReq2.pNext = &memDedicatedReq; 347 348 (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); 349@@ -14492,12 +14512,15 @@ void VmaAllocator_T::GetImageMemoryRequirements( 350 #if VMA_DEDICATED_ALLOCATION 351 if(m_UseKhrDedicatedAllocation) 352 { 353- VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR }; 354+ VkImageMemoryRequirementsInfo2KHR memReqInfo = {}; 355+ memReqInfo.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR; 356 memReqInfo.image = hImage; 357 358- VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; 359+ VkMemoryDedicatedRequirementsKHR memDedicatedReq = {}; 360+ memDedicatedReq.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR; 361 362- VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; 363+ VkMemoryRequirements2KHR memReq2 = {}; 364+ memReq2.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR; 365 memReq2.pNext = &memDedicatedReq; 366 367 (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); 368@@ -14734,7 +14757,7 @@ VkResult VmaAllocator_T::ResizeAllocation( 369 } 370 else 371 { 372- return VK_ERROR_OUT_OF_POOL_MEMORY; 373+ return VkResult(-1000069000); // VK_ERROR_OUT_OF_POOL_MEMORY 374 } 375 default: 376 VMA_ASSERT(0); 377@@ -15000,6 +15023,7 @@ void VmaAllocator_T::DestroyPool(VmaPool pool) 378 { 379 VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); 380 bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool); 381+ (void) success; 382 VMA_ASSERT(success && "Pool not found in Allocator."); 383 } 384 385@@ -15248,7 +15272,8 @@ void VmaAllocator_T::FlushOrInvalidateAllocation( 386 387 const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize; 388 389- VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE }; 390+ VkMappedMemoryRange memRange = {}; 391+ memRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; 392 memRange.memory = hAllocation->GetMemory(); 393 394 switch(hAllocation->GetType()) 395@@ -15321,6 +15346,7 @@ void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation) 396 AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex]; 397 VMA_ASSERT(pDedicatedAllocations); 398 bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation); 399+ (void) success; 400 VMA_ASSERT(success); 401 } 402 403