summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/assimp/contrib/rapidjson/include/rapidjson/allocators.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/assimp/contrib/rapidjson/include/rapidjson/allocators.h')
-rw-r--r--src/3rdparty/assimp/contrib/rapidjson/include/rapidjson/allocators.h40
1 files changed, 25 insertions, 15 deletions
diff --git a/src/3rdparty/assimp/contrib/rapidjson/include/rapidjson/allocators.h b/src/3rdparty/assimp/contrib/rapidjson/include/rapidjson/allocators.h
index 24e6044b3..655f4a385 100644
--- a/src/3rdparty/assimp/contrib/rapidjson/include/rapidjson/allocators.h
+++ b/src/3rdparty/assimp/contrib/rapidjson/include/rapidjson/allocators.h
@@ -179,7 +179,8 @@ public:
size = RAPIDJSON_ALIGN(size);
if (chunkHead_ == 0 || chunkHead_->size + size > chunkHead_->capacity)
- AddChunk(chunk_capacity_ > size ? chunk_capacity_ : size);
+ if (!AddChunk(chunk_capacity_ > size ? chunk_capacity_ : size))
+ return NULL;
void *buffer = reinterpret_cast<char *>(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size;
chunkHead_->size += size;
@@ -194,14 +195,16 @@ public:
if (newSize == 0)
return NULL;
+ originalSize = RAPIDJSON_ALIGN(originalSize);
+ newSize = RAPIDJSON_ALIGN(newSize);
+
// Do not shrink if new size is smaller than original
if (originalSize >= newSize)
return originalPtr;
// Simply expand it if it is the last allocation and there is sufficient space
- if (originalPtr == (char *)(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size - originalSize) {
+ if (originalPtr == reinterpret_cast<char *>(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size - originalSize) {
size_t increment = static_cast<size_t>(newSize - originalSize);
- increment = RAPIDJSON_ALIGN(increment);
if (chunkHead_->size + increment <= chunkHead_->capacity) {
chunkHead_->size += increment;
return originalPtr;
@@ -209,11 +212,13 @@ public:
}
// Realloc process: allocate and copy memory, do not free original buffer.
- void* newBuffer = Malloc(newSize);
- RAPIDJSON_ASSERT(newBuffer != 0); // Do not handle out-of-memory explicitly.
- if (originalSize)
- std::memcpy(newBuffer, originalPtr, originalSize);
- return newBuffer;
+ if (void* newBuffer = Malloc(newSize)) {
+ if (originalSize)
+ std::memcpy(newBuffer, originalPtr, originalSize);
+ return newBuffer;
+ }
+ else
+ return NULL;
}
//! Frees a memory block (concept Allocator)
@@ -227,15 +232,20 @@ private:
//! Creates a new chunk.
/*! \param capacity Capacity of the chunk in bytes.
+ \return true if success.
*/
- void AddChunk(size_t capacity) {
+ bool AddChunk(size_t capacity) {
if (!baseAllocator_)
- ownBaseAllocator_ = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator());
- ChunkHeader* chunk = reinterpret_cast<ChunkHeader*>(baseAllocator_->Malloc(RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + capacity));
- chunk->capacity = capacity;
- chunk->size = 0;
- chunk->next = chunkHead_;
- chunkHead_ = chunk;
+ ownBaseAllocator_ = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator)();
+ if (ChunkHeader* chunk = reinterpret_cast<ChunkHeader*>(baseAllocator_->Malloc(RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + capacity))) {
+ chunk->capacity = capacity;
+ chunk->size = 0;
+ chunk->next = chunkHead_;
+ chunkHead_ = chunk;
+ return true;
+ }
+ else
+ return false;
}
static const int kDefaultChunkCapacity = 64 * 1024; //!< Default chunk capacity.