Fixing populated / allocated tracking for textures

This commit is contained in:
Brad Davis 2018-05-21 11:51:05 -07:00
parent fb101cf036
commit 6daffba18e
6 changed files with 44 additions and 49 deletions

View file

@ -159,9 +159,13 @@ GLExternalTexture::~GLExternalTexture() {
}
GLVariableAllocationSupport::GLVariableAllocationSupport() {
Backend::textureResourceCount.increment();
}
GLVariableAllocationSupport::~GLVariableAllocationSupport() {
Backend::textureResourceCount.decrement();
Backend::textureResourceGPUMemSize.update(_size, 0);
Backend::textureResourcePopulatedGPUMemSize.update(_populatedSize, 0);
}
void GLVariableAllocationSupport::incrementPopulatedSize(Size delta) const {
@ -235,7 +239,6 @@ TransferJob::TransferJob(const Texture& texture,
_transferLambda = [=](const TexturePointer& texture) {
if (_mipData) {
auto gltexture = Backend::getGPUObject<GLTexture>(*texture);
;
gltexture->copyMipFaceLinesFromTexture(targetMip, face, transferDimensions, lineOffset, internalFormat, format,
type, _mipData->size(), _mipData->readData());
_mipData.reset();
@ -246,8 +249,8 @@ TransferJob::TransferJob(const Texture& texture,
};
}
TransferJob::TransferJob(const std::function<void()>& transferLambda) :
_bufferingRequired(false), _transferLambda([=](const TexturePointer&) { transferLambda(); }) {}
TransferJob::TransferJob(uint16_t sourceMip, const std::function<void()>& transferLambda) :
_sourceMip(sourceMip), _bufferingRequired(false), _transferLambda([=](const TexturePointer&) { transferLambda(); }) {}
TransferJob::~TransferJob() {
Backend::texturePendingGPUTransferMemSize.update(_transferSize, 0);

View file

@ -70,14 +70,16 @@ private:
Texture::PixelsPointer _mipData;
size_t _transferOffset{ 0 };
size_t _transferSize{ 0 };
uint16_t _sourceMip{ 0 };
bool _bufferingRequired{ true };
Lambda _transferLambda{ [](const TexturePointer&) {} };
Lambda _bufferingLambda{ [](const TexturePointer&) {} };
public:
TransferJob(const TransferJob& other) = delete;
TransferJob(const std::function<void()>& transferLambda);
TransferJob(uint16_t sourceMip, const std::function<void()>& transferLambda);
TransferJob(const Texture& texture, uint16_t sourceMip, uint16_t targetMip, uint8_t face, uint32_t lines = 0, uint32_t lineOffset = 0);
~TransferJob();
const uint16_t& sourceMip() const { return _sourceMip; }
const size_t& size() const { return _transferSize; }
bool bufferingRequired() const { return _bufferingRequired; }
void buffer(const TexturePointer& texture) { _bufferingLambda(texture); }
@ -96,6 +98,7 @@ public:
virtual void populateTransferQueue(TransferQueue& pendingTransfers) = 0;
void sanityCheck() const;
uint16 populatedMip() const { return _populatedMip; }
bool canPromote() const { return _allocatedMip > _minAllocatedMip; }
bool canDemote() const { return _allocatedMip < _maxAllocatedMip; }
bool hasPendingTransfers() const { return _populatedMip > _allocatedMip; }
@ -109,7 +112,6 @@ public:
static const size_t MAX_BUFFER_SIZE;
protected:
// THe amount of memory currently allocated
Size _size { 0 };

View file

@ -43,7 +43,6 @@ using QueuePair = std::pair<TextureWeakPointer, float>;
// Uses a weak pointer to the texture to avoid keeping it in scope if the client stops using it
using WorkQueue = std::priority_queue<QueuePair, std::vector<QueuePair>, LessPairSecond<QueuePair>>;
using ImmediateQueuePair = std::pair<TexturePointer, float>;
// Contains a priority sorted list of textures on which work is to be done in the current frame
using ImmediateWorkQueue = std::priority_queue<ImmediateQueuePair, std::vector<ImmediateQueuePair>, LessPairSecond<ImmediateQueuePair>>;
@ -53,9 +52,10 @@ using TransferMap = std::map<TextureWeakPointer, TransferQueue, std::owner_less<
class GLTextureTransferEngineDefault : public GLTextureTransferEngine {
using Parent = GLTextureTransferEngine;
public:
// Called once per frame by the GLBackend to manage texture memory
// Will deallocate textures if oversubscribed,
// Will deallocate textures if oversubscribed,
void manageMemory() override;
void shutdown() override;
@ -92,11 +92,11 @@ protected:
private:
std::atomic<bool> _shutdown{ false };
// Contains a priority sorted list of weak texture pointers that have been determined to be eligible for additional allocation
// While the memory state is 'undersubscribed', items will be removed from this list and processed, allocating additional memory
// While the memory state is 'undersubscribed', items will be removed from this list and processed, allocating additional memory
// per frame
WorkQueue _promoteQueue;
// This queue contains jobs that will buffer data from the texture backing store (ideally a memory mapped KTX file)
// to a CPU memory buffer. This queue is populated on the main GPU thread, and drained on a dedicated thread.
// to a CPU memory buffer. This queue is populated on the main GPU thread, and drained on a dedicated thread.
// When an item on the _activeBufferQueue is completed it is put into the _activeTransferQueue
ActiveTransferQueue _activeBufferQueue;
// This queue contains jobs that will upload data from a CPU buffer into a GPU. This queue is populated on the background
@ -129,16 +129,19 @@ void GLBackend::killTextureManagementStage() {
}
std::vector<TexturePointer> GLTextureTransferEngine::getAllTextures() {
std::remove_if(_registeredTextures.begin(), _registeredTextures.end(), [&](const std::weak_ptr<Texture>& weak) -> bool {
return weak.expired();
});
std::vector<TexturePointer> result;
result.reserve(_registeredTextures.size());
std::remove_if(_registeredTextures.begin(), _registeredTextures.end(), [&](const std::weak_ptr<Texture>& weak)->bool {
for (const auto& weak : _registeredTextures) {
auto strong = weak.lock();
bool strongResult = strong.operator bool();
if (strongResult) {
result.push_back(strong);
if (!strong) {
continue;
}
return strongResult;
});
result.push_back(strong);
}
return result;
}
@ -158,13 +161,12 @@ void GLTextureTransferEngineDefault::shutdown() {
#endif
}
void GLTextureTransferEngineDefault::manageMemory() {
PROFILE_RANGE(render_gpu_gl, __FUNCTION__);
// reset the count used to limit the number of textures created per frame
resetFrameTextureCreated();
// Determine the current memory management state. It will be either idle (no work to do),
// undersubscribed (need to do more allocation) or transfer (need to upload content from the
// undersubscribed (need to do more allocation) or transfer (need to upload content from the
// backing store to the GPU
updateMemoryPressure();
if (MemoryPressureState::Undersubscribed == _memoryPressureState) {
@ -176,7 +178,7 @@ void GLTextureTransferEngineDefault::manageMemory() {
}
}
// Each frame we will check if our memory pressure state has changed.
// Each frame we will check if our memory pressure state has changed.
void GLTextureTransferEngineDefault::updateMemoryPressure() {
PROFILE_RANGE(render_gpu_gl, __FUNCTION__);
@ -225,7 +227,6 @@ void GLTextureTransferEngineDefault::updateMemoryPressure() {
return;
}
auto newState = MemoryPressureState::Idle;
if (pressure < UNDERSUBSCRIBED_PRESSURE_VALUE && (unallocated != 0 && canPromote)) {
newState = MemoryPressureState::Undersubscribed;
@ -270,11 +271,10 @@ void GLTextureTransferEngineDefault::processTransferQueues() {
}
#endif
// From the pendingTransferMap, queue jobs into the _activeBufferQueue
// Doing so will lock the weak texture pointer so that it can't be destroyed
// Doing so will lock the weak texture pointer so that it can't be destroyed
// while the background thread is working.
//
//
// This will queue jobs until _queuedBufferSize can't be increased without exceeding
// GLVariableAllocationTexture::MAX_BUFFER_SIZE or there is no more work to be done
populateActiveBufferQueue();
@ -294,15 +294,19 @@ void GLTextureTransferEngineDefault::processTransferQueues() {
while (!activeTransferQueue.empty()) {
const auto& activeTransferJob = activeTransferQueue.front();
const auto& texturePointer = activeTransferJob.first;
GLTexture* gltexture = Backend::getGPUObject<GLTexture>(*texturePointer);
GLVariableAllocationSupport* vargltexture = dynamic_cast<GLVariableAllocationSupport*>(gltexture);
const auto& tranferJob = activeTransferJob.second;
tranferJob->transfer(texturePointer);
if (tranferJob->sourceMip() < vargltexture->populatedMip()) {
tranferJob->transfer(texturePointer);
}
// The pop_front MUST be the last call since all of these varaibles in scope are
// references that will be invalid after the pop
activeTransferQueue.pop_front();
}
}
// If we have no more work in any of the structures, reset the memory state to idle to
// If we have no more work in any of the structures, reset the memory state to idle to
// force reconstruction of the _pendingTransfersMap if necessary
{
Lock lock(_bufferMutex);
@ -322,7 +326,7 @@ void GLTextureTransferEngineDefault::populateActiveBufferQueue() {
ActiveTransferQueue newBufferJobs;
size_t newTransferSize{ 0 };
for (auto itr = _pendingTransfersMap.begin(); itr != _pendingTransfersMap.end(); ) {
for (auto itr = _pendingTransfersMap.begin(); itr != _pendingTransfersMap.end();) {
const auto& weakTexture = itr->first;
const auto texture = weakTexture.lock();
@ -384,7 +388,7 @@ bool GLTextureTransferEngineDefault::processActiveBufferQueue() {
for (const auto& activeJob : activeBufferQueue) {
const auto& texture = activeJob.first;
const auto& transferJob = activeJob.second;
// Some jobs don't require buffering, but they pass through this queue to ensure that we don't re-order
// Some jobs don't require buffering, but they pass through this queue to ensure that we don't re-order
// the buffering & transfer operations. All jobs in the queue must be processed in order.
if (!transferJob->bufferingRequired()) {
continue;
@ -488,14 +492,14 @@ void GLTextureTransferEngineDefault::processDemotes(size_t reliefRequired, const
// FIXME hack for stats display
QString getTextureMemoryPressureModeString() {
switch (_memoryPressureState) {
case MemoryPressureState::Undersubscribed:
return "Undersubscribed";
case MemoryPressureState::Undersubscribed:
return "Undersubscribed";
case MemoryPressureState::Transfer:
return "Transfer";
case MemoryPressureState::Transfer:
return "Transfer";
case MemoryPressureState::Idle:
return "Idle";
case MemoryPressureState::Idle:
return "Idle";
}
Q_UNREACHABLE();
return "Unknown";

View file

@ -279,8 +279,6 @@ using GL41VariableAllocationTexture = GL41Backend::GL41VariableAllocationTexture
GL41VariableAllocationTexture::GL41VariableAllocationTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture) :
GL41Texture(backend, texture)
{
Backend::textureResourceCount.increment();
auto mipLevels = texture.getNumMips();
_allocatedMip = mipLevels;
_maxAllocatedMip = _populatedMip = mipLevels;
@ -303,9 +301,6 @@ GL41VariableAllocationTexture::GL41VariableAllocationTexture(const std::weak_ptr
}
GL41VariableAllocationTexture::~GL41VariableAllocationTexture() {
Backend::textureResourceCount.decrement();
Backend::textureResourceGPUMemSize.update(_size, 0);
Backend::textureResourcePopulatedGPUMemSize.update(_populatedSize, 0);
}
void GL41VariableAllocationTexture::allocateStorage(uint16 allocatedMip) {
@ -605,7 +600,7 @@ void GL41VariableAllocationTexture::populateTransferQueue(TransferQueue& pending
}
// queue up the sampler and populated mip change for after the transfer has completed
pendingTransfers.emplace(new TransferJob([=] {
pendingTransfers.emplace(new TransferJob(sourceMip, [=] {
_populatedMip = sourceMip;
incrementPopulatedSize(_gpuObject.evalMipSize(sourceMip));
sanityCheck();

View file

@ -31,13 +31,9 @@ using GL45Texture = GL45Backend::GL45Texture;
using GL45VariableAllocationTexture = GL45Backend::GL45VariableAllocationTexture;
GL45VariableAllocationTexture::GL45VariableAllocationTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture) : GL45Texture(backend, texture) {
Backend::textureResourceCount.increment();
}
GL45VariableAllocationTexture::~GL45VariableAllocationTexture() {
Backend::textureResourceCount.decrement();
Backend::textureResourceGPUMemSize.update(_size, 0);
Backend::textureResourcePopulatedGPUMemSize.update(_populatedSize, 0);
}
#if GPU_BINDLESS_TEXTURES
@ -286,7 +282,7 @@ void GL45ResourceTexture::populateTransferQueue(TransferQueue& pendingTransfers)
}
// queue up the sampler and populated mip change for after the transfer has completed
pendingTransfers.emplace(new TransferJob([=] {
pendingTransfers.emplace(new TransferJob(sourceMip, [=] {
_populatedMip = sourceMip;
incrementPopulatedSize(_gpuObject.evalMipSize(sourceMip));
sanityCheck();

View file

@ -341,8 +341,6 @@ using GLESVariableAllocationTexture = GLESBackend::GLESVariableAllocationTexture
GLESVariableAllocationTexture::GLESVariableAllocationTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture) :
GLESTexture(backend, texture)
{
Backend::textureResourceCount.increment();
auto mipLevels = texture.getNumMips();
_allocatedMip = mipLevels;
_maxAllocatedMip = _populatedMip = mipLevels;
@ -366,9 +364,6 @@ GLESVariableAllocationTexture::GLESVariableAllocationTexture(const std::weak_ptr
}
GLESVariableAllocationTexture::~GLESVariableAllocationTexture() {
Backend::textureResourceCount.decrement();
Backend::textureResourceGPUMemSize.update(_size, 0);
Backend::textureResourcePopulatedGPUMemSize.update(_populatedSize, 0);
}
void GLESVariableAllocationTexture::allocateStorage(uint16 allocatedMip) {
@ -669,7 +664,7 @@ void GLESVariableAllocationTexture::populateTransferQueue(TransferJob::Queue& qu
}
// queue up the sampler and populated mip change for after the transfer has completed
queue.emplace(new TransferJob([=] {
queue.emplace(new TransferJob(sourceMip, [=] {
_populatedMip = sourceMip;
incrementPopulatedSize(_gpuObject.evalMipSize(sourceMip));
sanityCheck();