summaryrefslogtreecommitdiffstats
path: root/chromium/third_party/skia/src/gpu/GrBufferAllocPool.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/third_party/skia/src/gpu/GrBufferAllocPool.cpp')
-rw-r--r--chromium/third_party/skia/src/gpu/GrBufferAllocPool.cpp86
1 files changed, 43 insertions, 43 deletions
diff --git a/chromium/third_party/skia/src/gpu/GrBufferAllocPool.cpp b/chromium/third_party/skia/src/gpu/GrBufferAllocPool.cpp
index b34fe8a8e34..03d43c9b932 100644
--- a/chromium/third_party/skia/src/gpu/GrBufferAllocPool.cpp
+++ b/chromium/third_party/skia/src/gpu/GrBufferAllocPool.cpp
@@ -28,7 +28,7 @@ GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
bool frequentResetHint,
size_t blockSize,
int preallocBufferCnt) :
- fBlocks(GrMax(8, 2*preallocBufferCnt)) {
+ fBlocks(SkTMax(8, 2*preallocBufferCnt)) {
SkASSERT(NULL != gpu);
fGpu = gpu;
@@ -38,7 +38,7 @@ GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
fBufferType = bufferType;
fFrequentResetHint = frequentResetHint;
fBufferPtr = NULL;
- fMinBlockSize = GrMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
+ fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
fBytesInUse = 0;
@@ -56,8 +56,8 @@ GrBufferAllocPool::~GrBufferAllocPool() {
VALIDATE();
if (fBlocks.count()) {
GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
- if (buffer->isLocked()) {
- buffer->unlock();
+ if (buffer->isMapped()) {
+ buffer->unmap();
}
}
while (!fBlocks.empty()) {
@@ -79,8 +79,8 @@ void GrBufferAllocPool::reset() {
fBytesInUse = 0;
if (fBlocks.count()) {
GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
- if (buffer->isLocked()) {
- buffer->unlock();
+ if (buffer->isMapped()) {
+ buffer->unmap();
}
}
// fPreallocBuffersInUse will be decremented down to zero in the while loop
@@ -101,16 +101,16 @@ void GrBufferAllocPool::reset() {
VALIDATE();
}
-void GrBufferAllocPool::unlock() {
+void GrBufferAllocPool::unmap() {
VALIDATE();
if (NULL != fBufferPtr) {
BufferBlock& block = fBlocks.back();
- if (block.fBuffer->isLocked()) {
- block.fBuffer->unlock();
+ if (block.fBuffer->isMapped()) {
+ block.fBuffer->unmap();
} else {
- size_t flushSize = block.fBuffer->sizeInBytes() - block.fBytesFree;
- flushCpuData(fBlocks.back().fBuffer, flushSize);
+ size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree;
+ this->flushCpuData(fBlocks.back().fBuffer, flushSize);
}
fBufferPtr = NULL;
}
@@ -121,21 +121,21 @@ void GrBufferAllocPool::unlock() {
void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
if (NULL != fBufferPtr) {
SkASSERT(!fBlocks.empty());
- if (fBlocks.back().fBuffer->isLocked()) {
+ if (fBlocks.back().fBuffer->isMapped()) {
GrGeometryBuffer* buf = fBlocks.back().fBuffer;
- SkASSERT(buf->lockPtr() == fBufferPtr);
+ SkASSERT(buf->mapPtr() == fBufferPtr);
} else {
SkASSERT(fCpuData.get() == fBufferPtr);
}
} else {
- SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isLocked());
+ SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped());
}
size_t bytesInUse = 0;
for (int i = 0; i < fBlocks.count() - 1; ++i) {
- SkASSERT(!fBlocks[i].fBuffer->isLocked());
+ SkASSERT(!fBlocks[i].fBuffer->isMapped());
}
for (int i = 0; i < fBlocks.count(); ++i) {
- size_t bytes = fBlocks[i].fBuffer->sizeInBytes() - fBlocks[i].fBytesFree;
+ size_t bytes = fBlocks[i].fBuffer->gpuMemorySize() - fBlocks[i].fBytesFree;
bytesInUse += bytes;
SkASSERT(bytes || unusedBlockAllowed);
}
@@ -161,7 +161,7 @@ void* GrBufferAllocPool::makeSpace(size_t size,
if (NULL != fBufferPtr) {
BufferBlock& back = fBlocks.back();
- size_t usedBytes = back.fBuffer->sizeInBytes() - back.fBytesFree;
+ size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
size_t pad = GrSizeAlignUpPad(usedBytes,
alignment);
if ((size + pad) <= back.fBytesFree) {
@@ -201,7 +201,7 @@ int GrBufferAllocPool::currentBufferItems(size_t itemSize) const {
VALIDATE();
if (NULL != fBufferPtr) {
const BufferBlock& back = fBlocks.back();
- size_t usedBytes = back.fBuffer->sizeInBytes() - back.fBytesFree;
+ size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
size_t pad = GrSizeAlignUpPad(usedBytes, itemSize);
return static_cast<int>((back.fBytesFree - pad) / itemSize);
} else if (fPreallocBuffersInUse < fPreallocBuffers.count()) {
@@ -231,14 +231,14 @@ void GrBufferAllocPool::putBack(size_t bytes) {
// caller shouldnt try to put back more than they've taken
SkASSERT(!fBlocks.empty());
BufferBlock& block = fBlocks.back();
- size_t bytesUsed = block.fBuffer->sizeInBytes() - block.fBytesFree;
+ size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree;
if (bytes >= bytesUsed) {
bytes -= bytesUsed;
fBytesInUse -= bytesUsed;
// if we locked a vb to satisfy the make space and we're releasing
- // beyond it, then unlock it.
- if (block.fBuffer->isLocked()) {
- block.fBuffer->unlock();
+ // beyond it, then unmap it.
+ if (block.fBuffer->isMapped()) {
+ block.fBuffer->unmap();
}
this->destroyBlock();
} else {
@@ -258,7 +258,7 @@ void GrBufferAllocPool::putBack(size_t bytes) {
bool GrBufferAllocPool::createBlock(size_t requestSize) {
- size_t size = GrMax(requestSize, fMinBlockSize);
+ size_t size = SkTMax(requestSize, fMinBlockSize);
SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE);
VALIDATE();
@@ -286,33 +286,33 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
if (NULL != fBufferPtr) {
SkASSERT(fBlocks.count() > 1);
BufferBlock& prev = fBlocks.fromBack(1);
- if (prev.fBuffer->isLocked()) {
- prev.fBuffer->unlock();
+ if (prev.fBuffer->isMapped()) {
+ prev.fBuffer->unmap();
} else {
flushCpuData(prev.fBuffer,
- prev.fBuffer->sizeInBytes() - prev.fBytesFree);
+ prev.fBuffer->gpuMemorySize() - prev.fBytesFree);
}
fBufferPtr = NULL;
}
SkASSERT(NULL == fBufferPtr);
- // If the buffer is CPU-backed we lock it because it is free to do so and saves a copy.
- // Otherwise when buffer locking is supported:
- // a) If the frequently reset hint is set we only lock when the requested size meets a
+ // If the buffer is CPU-backed we map it because it is free to do so and saves a copy.
+ // Otherwise when buffer mapping is supported:
+ // a) If the frequently reset hint is set we only map when the requested size meets a
// threshold (since we don't expect it is likely that we will see more vertex data)
- // b) If the hint is not set we lock if the buffer size is greater than the threshold.
- bool attemptLock = block.fBuffer->isCPUBacked();
- if (!attemptLock && fGpu->caps()->bufferLockSupport()) {
+ // b) If the hint is not set we map if the buffer size is greater than the threshold.
+ bool attemptMap = block.fBuffer->isCPUBacked();
+ if (!attemptMap && GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
if (fFrequentResetHint) {
- attemptLock = requestSize > GR_GEOM_BUFFER_LOCK_THRESHOLD;
+ attemptMap = requestSize > GR_GEOM_BUFFER_MAP_THRESHOLD;
} else {
- attemptLock = size > GR_GEOM_BUFFER_LOCK_THRESHOLD;
+ attemptMap = size > GR_GEOM_BUFFER_MAP_THRESHOLD;
}
}
- if (attemptLock) {
- fBufferPtr = block.fBuffer->lock();
+ if (attemptMap) {
+ fBufferPtr = block.fBuffer->map();
}
if (NULL == fBufferPtr) {
@@ -337,7 +337,7 @@ void GrBufferAllocPool::destroyBlock() {
--fPreallocBuffersInUse;
}
}
- SkASSERT(!block.fBuffer->isLocked());
+ SkASSERT(!block.fBuffer->isMapped());
block.fBuffer->unref();
fBlocks.pop_back();
fBufferPtr = NULL;
@@ -346,17 +346,17 @@ void GrBufferAllocPool::destroyBlock() {
void GrBufferAllocPool::flushCpuData(GrGeometryBuffer* buffer,
size_t flushSize) {
SkASSERT(NULL != buffer);
- SkASSERT(!buffer->isLocked());
+ SkASSERT(!buffer->isMapped());
SkASSERT(fCpuData.get() == fBufferPtr);
- SkASSERT(flushSize <= buffer->sizeInBytes());
+ SkASSERT(flushSize <= buffer->gpuMemorySize());
VALIDATE(true);
- if (fGpu->caps()->bufferLockSupport() &&
- flushSize > GR_GEOM_BUFFER_LOCK_THRESHOLD) {
- void* data = buffer->lock();
+ if (GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
+ flushSize > GR_GEOM_BUFFER_MAP_THRESHOLD) {
+ void* data = buffer->map();
if (NULL != data) {
memcpy(data, fBufferPtr, flushSize);
- buffer->unlock();
+ buffer->unmap();
return;
}
}