summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/VulkanMemoryAllocator/patches/0001-Avoid-compiler-warnings.patch
blob: f459db6c7a1eba3240bd6fa25ffba8181ad09174 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
diff --git a/src/3rdparty/VulkanMemoryAllocator/vk_mem_alloc.h b/src/3rdparty/VulkanMemoryAllocator/vk_mem_alloc.h
index a2f7a1b..fbe6f9e 100644
--- a/src/3rdparty/VulkanMemoryAllocator/vk_mem_alloc.h
+++ b/src/3rdparty/VulkanMemoryAllocator/vk_mem_alloc.h
@@ -3661,7 +3661,7 @@ static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
 {
     uint32_t* pDst = (uint32_t*)((char*)pData + offset);
     const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
-    for(size_t i = 0; i < numberCount; ++i, ++pDst)
+    for(size_t i = 0; i != numberCount; ++i, ++pDst)
     {
         *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
     }
@@ -3671,7 +3671,7 @@ static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
 {
     const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
     const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
-    for(size_t i = 0; i < numberCount; ++i, ++pSrc)
+    for(size_t i = 0; i != numberCount; ++i, ++pSrc)
     {
         if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
         {
@@ -3866,7 +3866,7 @@ public:
     template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
 
     T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
-    void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
+    void deallocate(T* p, size_t /*n*/) { VmaFree(m_pCallbacks, p); }
 
     template<typename U>
     bool operator==(const VmaStlAllocator<U>& rhs) const
@@ -5214,7 +5214,7 @@ public:
     virtual void FreeAtOffset(VkDeviceSize offset) = 0;
 
     // Tries to resize (grow or shrink) space for given allocation, in place.
-    virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
+    virtual bool ResizeAllocation(const VmaAllocation /*alloc*/, VkDeviceSize /*newSize*/) { return false; }
 
 protected:
     const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
@@ -5574,7 +5574,7 @@ public:
 
     virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
 
-    virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
+    virtual VkResult CheckCorruption(const void* /*pBlockData*/) { return VK_ERROR_FEATURE_NOT_PRESENT; }
 
     virtual void Alloc(
         const VmaAllocationRequest& request,
@@ -6133,7 +6133,7 @@ public:
         bool overlappingMoveSupported);
     virtual ~VmaDefragmentationAlgorithm_Fast();
 
-    virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
+    virtual void AddAllocation(VmaAllocation /*hAlloc*/, VkBool32* /*pChanged*/) { ++m_AllocationCount; }
     virtual void AddAll() { m_AllAllocations = true; }
 
     virtual VkResult Defragment(
@@ -6318,7 +6318,7 @@ private:
     // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
     VmaBlockVector* const m_pBlockVector;
     const uint32_t m_CurrFrameIndex;
-    const uint32_t m_AlgorithmFlags;
+    /*const uint32_t m_AlgorithmFlags;*/
     // Owner of this object.
     VmaDefragmentationAlgorithm* m_pAlgorithm;
 
@@ -7073,6 +7073,7 @@ void VmaJsonWriter::BeginValue(bool isString)
         if(currItem.type == COLLECTION_TYPE_OBJECT &&
             currItem.valueCount % 2 == 0)
         {
+            (void) isString;
             VMA_ASSERT(isString);
         }
 
@@ -7660,7 +7661,9 @@ bool VmaBlockMetadata_Generic::Validate() const
             }
 
             // Margin required between allocations - every free space must be at least that large.
+#if VMA_DEBUG_MARGIN
             VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
+#endif
         }
         else
         {
@@ -7806,6 +7809,7 @@ bool VmaBlockMetadata_Generic::CreateAllocationRequest(
 {
     VMA_ASSERT(allocSize > 0);
     VMA_ASSERT(!upperAddress);
+    (void) upperAddress;
     VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
     VMA_ASSERT(pAllocationRequest != VMA_NULL);
     VMA_HEAVY_ASSERT(Validate());
@@ -8033,6 +8037,7 @@ void VmaBlockMetadata_Generic::Alloc(
     VmaAllocation hAllocation)
 {
     VMA_ASSERT(!upperAddress);
+    (void) upperAddress;
     VMA_ASSERT(request.item != m_Suballocations.end());
     VmaSuballocation& suballoc = *request.item;
     // Given suballocation is a free block.
@@ -9609,7 +9614,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest(
     bool upperAddress,
     VmaSuballocationType allocType,
     bool canMakeOtherLost,
-    uint32_t strategy,
+    uint32_t /*strategy*/,
     VmaAllocationRequest* pAllocationRequest)
 {
     VMA_ASSERT(allocSize > 0);
@@ -9651,10 +9656,12 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest(
         // Apply VMA_DEBUG_MARGIN at the end.
         if(VMA_DEBUG_MARGIN > 0)
         {
+#if VMA_DEBUG_MARGIN
             if(resultOffset < VMA_DEBUG_MARGIN)
             {
                 return false;
             }
+#endif
             resultOffset -= VMA_DEBUG_MARGIN;
         }
 
@@ -10542,18 +10549,19 @@ void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
 #endif // #if VMA_STATS_STRING_ENABLED
 
 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
-    uint32_t currentFrameIndex,
-    uint32_t frameInUseCount,
+    uint32_t /*currentFrameIndex*/,
+    uint32_t /*frameInUseCount*/,
     VkDeviceSize bufferImageGranularity,
     VkDeviceSize allocSize,
     VkDeviceSize allocAlignment,
     bool upperAddress,
     VmaSuballocationType allocType,
-    bool canMakeOtherLost,
-    uint32_t strategy,
+    bool /*canMakeOtherLost*/,
+    uint32_t /*strategy*/,
     VmaAllocationRequest* pAllocationRequest)
 {
     VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
+    (void) upperAddress;
 
     // Simple way to respect bufferImageGranularity. May be optimized some day.
     // Whenever it might be an OPTIMAL image...
@@ -10593,8 +10601,8 @@ bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
 }
 
 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
-    uint32_t currentFrameIndex,
-    uint32_t frameInUseCount,
+    uint32_t /*currentFrameIndex*/,
+    uint32_t /*frameInUseCount*/,
     VmaAllocationRequest* pAllocationRequest)
 {
     /*
@@ -10604,7 +10612,7 @@ bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
     return pAllocationRequest->itemsToMakeLostCount == 0;
 }
 
-uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
+uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t /*currentFrameIndex*/, uint32_t /*frameInUseCount*/)
 {
     /*
     Lost allocations are not supported in buddy allocator at the moment.
@@ -10615,9 +10623,9 @@ uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex,
 
 void VmaBlockMetadata_Buddy::Alloc(
     const VmaAllocationRequest& request,
-    VmaSuballocationType type,
+    VmaSuballocationType /*type*/,
     VkDeviceSize allocSize,
-    bool upperAddress,
+    bool /*upperAddress*/,
     VmaAllocation hAllocation)
 {
     const uint32_t targetLevel = AllocSizeToLevel(allocSize);
@@ -10941,7 +10949,7 @@ void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, con
 ////////////////////////////////////////////////////////////////////////////////
 // class VmaDeviceMemoryBlock
 
-VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
+VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator /*hAllocator*/) :
     m_pMetadata(VMA_NULL),
     m_MemoryTypeIndex(UINT32_MAX),
     m_Id(0),
@@ -11691,6 +11699,7 @@ VkResult VmaBlockVector::AllocatePage(
                     if(IsCorruptionDetectionEnabled())
                     {
                         VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
+                        (void) res;
                         VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
                     }
                     return VK_SUCCESS;
@@ -11729,6 +11738,7 @@ void VmaBlockVector::Free(
         if(IsCorruptionDetectionEnabled())
         {
             VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
+            (void) res;
             VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
         }
 
@@ -11894,6 +11904,7 @@ VkResult VmaBlockVector::AllocateFromBlock(
         if(IsCorruptionDetectionEnabled())
         {
             VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
+            (void) res;
             VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
         }
         return VK_SUCCESS;
@@ -11903,7 +11914,8 @@ VkResult VmaBlockVector::AllocateFromBlock(
 
 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
 {
-    VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+    VkMemoryAllocateInfo allocInfo = {};
+    allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
     allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
     allocInfo.allocationSize = blockSize;
     VkDeviceMemory mem = VK_NULL_HANDLE;
@@ -11991,7 +12003,8 @@ void VmaBlockVector::ApplyDefragmentationMovesCpu(
     if(pDefragCtx->res == VK_SUCCESS)
     {
         const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
-        VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
+        VkMappedMemoryRange memRange = {};
+        memRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
 
         for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
         {
@@ -12076,7 +12089,8 @@ void VmaBlockVector::ApplyDefragmentationMovesGpu(
 
     // Go over all blocks. Create and bind buffer for whole block if necessary.
     {
-        VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+        VkBufferCreateInfo bufCreateInfo = {};
+        bufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
         bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
             VK_BUFFER_USAGE_TRANSFER_DST_BIT;
 
@@ -12101,8 +12115,9 @@ void VmaBlockVector::ApplyDefragmentationMovesGpu(
     // Go over all moves. Post data transfer commands to command buffer.
     if(pDefragCtx->res == VK_SUCCESS)
     {
-        const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
-        VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
+        /*const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
+        VkMappedMemoryRange memRange = {};
+        memRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;*/
 
         for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
         {
@@ -12435,10 +12450,10 @@ VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
     VmaAllocator hAllocator,
     VmaBlockVector* pBlockVector,
     uint32_t currentFrameIndex,
-    bool overlappingMoveSupported) :
+    bool /*overlappingMoveSupported*/) :
     VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
-    m_AllAllocations(false),
     m_AllocationCount(0),
+    m_AllAllocations(false),
     m_BytesMoved(0),
     m_AllocationsMoved(0),
     m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
@@ -12813,7 +12828,7 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
                 size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
                 VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
                 VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
-                VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();
+                /*VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();*/
 
                 // Same block
                 if(freeSpaceInfoIndex == srcBlockInfoIndex)
@@ -13098,7 +13113,7 @@ VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
     VmaPool hCustomPool,
     VmaBlockVector* pBlockVector,
     uint32_t currFrameIndex,
-    uint32_t algorithmFlags) :
+    uint32_t /*algorithmFlags*/) :
     res(VK_SUCCESS),
     mutexLocked(false),
     blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
@@ -13106,7 +13121,7 @@ VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
     m_hCustomPool(hCustomPool),
     m_pBlockVector(pBlockVector),
     m_CurrFrameIndex(currFrameIndex),
-    m_AlgorithmFlags(algorithmFlags),
+    /*m_AlgorithmFlags(algorithmFlags),*/
     m_pAlgorithm(VMA_NULL),
     m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
     m_AllAllocations(false)
@@ -14311,19 +14326,21 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory(
     bool map,
     bool isUserDataString,
     void* pUserData,
-    VkBuffer dedicatedBuffer,
-    VkImage dedicatedImage,
+    VkBuffer /*dedicatedBuffer*/,
+    VkImage /*dedicatedImage*/,
     size_t allocationCount,
     VmaAllocation* pAllocations)
 {
     VMA_ASSERT(allocationCount > 0 && pAllocations);
 
-    VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+    VkMemoryAllocateInfo allocInfo = {};
+    allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
     allocInfo.memoryTypeIndex = memTypeIndex;
     allocInfo.allocationSize = size;
 
 #if VMA_DEDICATED_ALLOCATION
-    VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
+    VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = {};
+    dedicatedAllocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR;
     if(m_UseKhrDedicatedAllocation)
     {
         if(dedicatedBuffer != VK_NULL_HANDLE)
@@ -14341,7 +14358,7 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory(
 #endif // #if VMA_DEDICATED_ALLOCATION
 
     size_t allocIndex;
-    VkResult res;
+    VkResult res = VK_SUCCESS;
     for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
     {
         res = AllocateDedicatedMemoryPage(
@@ -14460,12 +14477,15 @@ void VmaAllocator_T::GetBufferMemoryRequirements(
 #if VMA_DEDICATED_ALLOCATION
     if(m_UseKhrDedicatedAllocation)
     {
-        VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
+        VkBufferMemoryRequirementsInfo2KHR memReqInfo = {};
+        memReqInfo.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR;
         memReqInfo.buffer = hBuffer;
 
-        VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
+        VkMemoryDedicatedRequirementsKHR memDedicatedReq = {};
+        memDedicatedReq.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR;
 
-        VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
+        VkMemoryRequirements2KHR memReq2 = {};
+        memReq2.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR;
         memReq2.pNext = &memDedicatedReq;
 
         (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
@@ -14492,12 +14512,15 @@ void VmaAllocator_T::GetImageMemoryRequirements(
 #if VMA_DEDICATED_ALLOCATION
     if(m_UseKhrDedicatedAllocation)
     {
-        VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
+        VkImageMemoryRequirementsInfo2KHR memReqInfo = {};
+        memReqInfo.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR;
         memReqInfo.image = hImage;
 
-        VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
+        VkMemoryDedicatedRequirementsKHR memDedicatedReq = {};
+        memDedicatedReq.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR;
 
-        VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
+        VkMemoryRequirements2KHR memReq2 = {};
+        memReq2.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR;
         memReq2.pNext = &memDedicatedReq;
 
         (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
@@ -14734,7 +14757,7 @@ VkResult VmaAllocator_T::ResizeAllocation(
         }
         else
         {
-            return VK_ERROR_OUT_OF_POOL_MEMORY;
+            return VkResult(-1000069000); // VK_ERROR_OUT_OF_POOL_MEMORY
         }
     default:
         VMA_ASSERT(0);
@@ -15000,6 +15023,7 @@ void VmaAllocator_T::DestroyPool(VmaPool pool)
     {
         VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
         bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
+        (void) success;
         VMA_ASSERT(success && "Pool not found in Allocator.");
     }
 
@@ -15248,7 +15272,8 @@ void VmaAllocator_T::FlushOrInvalidateAllocation(
 
         const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
 
-        VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
+        VkMappedMemoryRange memRange = {};
+        memRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
         memRange.memory = hAllocation->GetMemory();
         
         switch(hAllocation->GetType())
@@ -15321,6 +15346,7 @@ void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
         AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
         VMA_ASSERT(pDedicatedAllocations);
         bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
+        (void) success;
         VMA_ASSERT(success);
     }