Force all sparse allocation and deallocation onto one thread / context

This commit is contained in:
Brad Davis 2016-09-20 19:47:31 -07:00
parent 255e9e6435
commit 28bb82c8a3

View file

@ -94,7 +94,7 @@ SparseInfo::SparseInfo(GL45Texture& texture)
void SparseInfo::maybeMakeSparse() { void SparseInfo::maybeMakeSparse() {
// Don't enable sparse for objects with explicitly managed mip levels // Don't enable sparse for objects with explicitly managed mip levels
if (!_texture._gpuObject.isAutogenerateMips()) { if (!_texture._gpuObject.isAutogenerateMips()) {
qCDebug(gpugl45logging) << "Don't enable sparse texture for explicitly generated mipmaps on texture " << _texture._gpuObject.source().c_str(); qCDebug(gpugl45logging) << "Don't enable sparse texture for explicitly generated mipmaps on texture " << _texture._source.c_str();
return; return;
} }
@ -106,7 +106,7 @@ void SparseInfo::maybeMakeSparse() {
_pageDimensions = allowedPageDimensions[i]; _pageDimensions = allowedPageDimensions[i];
// Is this texture an integer multiple of page dimensions? // Is this texture an integer multiple of page dimensions?
if (uvec3(0) == (dimensions % _pageDimensions)) { if (uvec3(0) == (dimensions % _pageDimensions)) {
qCDebug(gpugl45logging) << "Enabling sparse for texture " << _texture._gpuObject.source().c_str(); qCDebug(gpugl45logging) << "Enabling sparse for texture " << _texture._source.c_str();
_sparse = true; _sparse = true;
break; break;
} }
@ -117,7 +117,7 @@ void SparseInfo::maybeMakeSparse() {
glTextureParameteri(_texture._id, GL_VIRTUAL_PAGE_SIZE_INDEX_ARB, _pageDimensionsIndex); glTextureParameteri(_texture._id, GL_VIRTUAL_PAGE_SIZE_INDEX_ARB, _pageDimensionsIndex);
} else { } else {
qCDebug(gpugl45logging) << "Size " << dimensions.x << " x " << dimensions.y << qCDebug(gpugl45logging) << "Size " << dimensions.x << " x " << dimensions.y <<
" is not supported by any sparse page size for texture" << _texture._gpuObject.source().c_str(); " is not supported by any sparse page size for texture" << _texture._source.c_str();
} }
} }
@ -255,12 +255,11 @@ GL45Texture::GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture&
} }
GL45Texture::~GL45Texture() { GL45Texture::~GL45Texture() {
qCDebug(gpugl45logging) << "Destroying texture " << _id << " from source " << _source.c_str();
if (_sparseInfo._sparse) { if (_sparseInfo._sparse) {
auto backend = _backend.lock(); // Remove this texture from the candidate list of derezzable textures
if (backend) {
auto id = _id;
auto mipLevels = usedMipLevels();
{ {
auto mipLevels = usedMipLevels();
Lock lock(texturesByMipCountsMutex); Lock lock(texturesByMipCountsMutex);
if (texturesByMipCounts.count(mipLevels)) { if (texturesByMipCounts.count(mipLevels)) {
auto& textures = texturesByMipCounts[mipLevels]; auto& textures = texturesByMipCounts[mipLevels];
@ -271,15 +270,23 @@ GL45Texture::~GL45Texture() {
} }
} }
auto maxSparseMip = std::min<uint16_t>(_maxMip, _sparseInfo._maxSparseLevel); // Experimenation suggests that allocating sparse textures on one context/thread and deallocating
// them on another is buggy. So for sparse textures we need to queue a lambda with the deallocation
// callls to the transfer thread
auto id = _id;
// Set the class _id to 0 so we don't try to double delete
const_cast<GLuint&>(_id) = 0;
std::list<std::function<void()>> destructionFunctions;
auto minMip = _minMip;
uint8_t maxFace = (uint8_t)((_target == GL_TEXTURE_CUBE_MAP) ? GLTexture::CUBE_NUM_FACES : 1); uint8_t maxFace = (uint8_t)((_target == GL_TEXTURE_CUBE_MAP) ? GLTexture::CUBE_NUM_FACES : 1);
auto maxSparseMip = std::min<uint16_t>(_maxMip, _sparseInfo._maxSparseLevel);
for (uint16_t mipLevel = _minMip; mipLevel <= maxSparseMip; ++mipLevel) { for (uint16_t mipLevel = _minMip; mipLevel <= maxSparseMip; ++mipLevel) {
auto mipDimensions = _gpuObject.evalMipDimensions(mipLevel); auto mipDimensions = _gpuObject.evalMipDimensions(mipLevel);
// Destructors get called on the main thread, potentially without a context active. destructionFunctions.push_back([id, maxFace, mipLevel, mipDimensions] {
// We need to queue the deallocation of the sparse pages for this content.
backend->releaseLambda([=] {
glTexturePageCommitmentEXT(id, mipLevel, 0, 0, 0, mipDimensions.x, mipDimensions.y, maxFace, GL_FALSE); glTexturePageCommitmentEXT(id, mipLevel, 0, 0, 0, mipDimensions.x, mipDimensions.y, maxFace, GL_FALSE);
}); });
auto deallocatedPages = _sparseInfo.getPageCount(mipDimensions) * maxFace; auto deallocatedPages = _sparseInfo.getPageCount(mipDimensions) * maxFace;
assert(deallocatedPages <= _allocatedPages); assert(deallocatedPages <= _allocatedPages);
_allocatedPages -= deallocatedPages; _allocatedPages -= deallocatedPages;
@ -288,7 +295,16 @@ GL45Texture::~GL45Texture() {
if (0 != _allocatedPages) { if (0 != _allocatedPages) {
qCWarning(gpugl45logging) << "Allocated pages remaining " << _id << " " << _allocatedPages; qCWarning(gpugl45logging) << "Allocated pages remaining " << _id << " " << _allocatedPages;
} }
auto size = _size;
_textureTransferHelper->queueExecution([id, size, destructionFunctions] {
for (auto function : destructionFunctions) {
function();
} }
glDeleteTextures(1, &id);
Backend::decrementTextureGPUCount();
Backend::updateTextureGPUMemoryUsage(size, 0);
});
} }
} }
@ -297,7 +313,7 @@ void GL45Texture::withPreservedTexture(std::function<void()> f) const {
} }
void GL45Texture::generateMips() const { void GL45Texture::generateMips() const {
qCDebug(gpugl45logging) << "Generating mipmaps for " << _gpuObject.source().c_str(); qCDebug(gpugl45logging) << "Generating mipmaps for " << _source.c_str();
glGenerateTextureMipmap(_id); glGenerateTextureMipmap(_id);
(void)CHECK_GL_ERROR(); (void)CHECK_GL_ERROR();
} }