mirror of
https://github.com/overte-org/overte.git
synced 2025-04-07 00:52:58 +02:00
Automatic texture memory based on free memory
This commit is contained in:
parent
349ba16a3c
commit
991cfdae69
3 changed files with 53 additions and 8 deletions
|
@ -148,6 +148,8 @@ void GLBackend::init() {
|
|||
GL_GET_INTEGER(MAX_UNIFORM_BLOCK_SIZE);
|
||||
GL_GET_INTEGER(UNIFORM_BUFFER_OFFSET_ALIGNMENT);
|
||||
|
||||
GPUIdent* gpu = GPUIdent::getInstance(vendor, renderer);
|
||||
|
||||
if (vendor.contains("NVIDIA") ) {
|
||||
qCDebug(gpugllogging) << "NVIDIA card detected";
|
||||
GL_GET_INTEGER(GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX);
|
||||
|
@ -178,12 +180,17 @@ void GLBackend::init() {
|
|||
_totalMemory = mem * BYTES_PER_MIB;
|
||||
_dedicatedMemory = _totalMemory;
|
||||
_videoCard = Intel;
|
||||
} else {
|
||||
qCWarning(gpugllogging) << "Intel card on non-Linux system, trying GPUIdent fallback";
|
||||
_videoCard = Unknown;
|
||||
_dedicatedMemory = gpu->getMemory();
|
||||
_totalMemory = _dedicatedMemory;
|
||||
}
|
||||
} else {
|
||||
qCCritical(gpugllogging) << "Don't know how to get memory for OpenGL vendor " << vendor;
|
||||
_videoCard = Unknown;
|
||||
_dedicatedMemory = 0;
|
||||
_totalMemory = 0;
|
||||
_dedicatedMemory = gpu->getMemory();
|
||||
_totalMemory = _dedicatedMemory;
|
||||
}
|
||||
|
||||
qCDebug(gpugllogging) << "dedicated: " << _dedicatedMemory;
|
||||
|
@ -191,7 +198,7 @@ void GLBackend::init() {
|
|||
|
||||
|
||||
LOG_GL_CONTEXT_INFO(gpugllogging, contextInfo);
|
||||
GPUIdent* gpu = GPUIdent::getInstance(vendor, renderer);
|
||||
|
||||
|
||||
// From here on, GPUIdent::getInstance()->getMumble() should efficiently give the same answers.
|
||||
qCDebug(gpugllogging) << "GPU:";
|
||||
|
@ -233,6 +240,17 @@ size_t GLBackend::getAvailableMemory() {
|
|||
|
||||
}
|
||||
|
||||
bool GLBackend::availableMemoryKnown() {
|
||||
switch( _videoCard ) {
|
||||
case NVIDIA: return true;
|
||||
case ATI: return true;
|
||||
case Intel: return false;
|
||||
case Unknown: return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
GLBackend::GLBackend(bool syncCache) {
|
||||
_pipeline._cameraCorrectionBuffer._buffer->flush();
|
||||
initShaderBinaryCache();
|
||||
|
|
|
@ -111,6 +111,7 @@ public:
|
|||
static size_t getDedicatedMemory() { return _dedicatedMemory; }
|
||||
|
||||
static size_t getAvailableMemory();
|
||||
static bool availableMemoryKnown();
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
#define MAX_RESOURCE_TEXTURES_PER_FRAME 2
|
||||
#define NO_BUFFER_WORK_SLEEP_TIME_MS 2
|
||||
#define THREADED_TEXTURE_BUFFERING 1
|
||||
#define MAX_AUTO_FRACTION_OF_TOTAL_MEMORY 0.8f
|
||||
#define AUTO_RESERVE_TEXTURE_MEMORY MB_TO_BYTES(64)
|
||||
|
||||
static const size_t DEFAULT_ALLOWED_TEXTURE_MEMORY = MB_TO_BYTES(DEFAULT_ALLOWED_TEXTURE_MEMORY_MB);
|
||||
|
||||
|
@ -183,12 +185,24 @@ void GLTextureTransferEngineDefault::manageMemory() {
|
|||
void GLTextureTransferEngineDefault::updateMemoryPressure() {
|
||||
PROFILE_RANGE(render_gpu_gl, __FUNCTION__);
|
||||
|
||||
bool useAvailableGlMemory = false;
|
||||
size_t allowedMemoryAllocation = gpu::Texture::getAllowedGPUMemoryUsage();
|
||||
|
||||
if (0 == allowedMemoryAllocation) {
|
||||
allowedMemoryAllocation = GLBackend::getTotalMemory();
|
||||
if ( 0 == allowedMemoryAllocation ) {
|
||||
// Last resort, if we failed to detect
|
||||
allowedMemoryAllocation = DEFAULT_ALLOWED_TEXTURE_MEMORY;
|
||||
// Automatic allocation
|
||||
|
||||
if ( GLBackend::availableMemoryKnown() ) {
|
||||
// If we know how much is free, then we use that
|
||||
useAvailableGlMemory = true;
|
||||
} else {
|
||||
// We don't know how much is free, so leave some reasonable spare room
|
||||
// and hope it works.
|
||||
allowedMemoryAllocation = GLBackend::getTotalMemory() * MAX_AUTO_FRACTION_OF_TOTAL_MEMORY;
|
||||
|
||||
if ( 0 == allowedMemoryAllocation ) {
|
||||
// Last resort, if we failed to detect
|
||||
allowedMemoryAllocation = DEFAULT_ALLOWED_TEXTURE_MEMORY;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -222,7 +236,19 @@ void GLTextureTransferEngineDefault::updateMemoryPressure() {
|
|||
|
||||
Backend::textureResourceIdealGPUMemSize.set(idealMemoryAllocation);
|
||||
size_t unallocated = idealMemoryAllocation - totalVariableMemoryAllocation;
|
||||
float pressure = (float)totalVariableMemoryAllocation / (float)allowedMemoryAllocation;
|
||||
float pressure = 0;
|
||||
|
||||
if ( useAvailableGlMemory ) {
|
||||
float total_mem = GLBackend::getTotalMemory();
|
||||
float avail_mem = GLBackend::getAvailableMemory() - AUTO_RESERVE_TEXTURE_MEMORY;
|
||||
if ( avail_mem < 0 ) {
|
||||
avail_mem = 0;
|
||||
}
|
||||
|
||||
pressure = (total_mem - avail_mem) / total_mem;
|
||||
} else {
|
||||
pressure = (float)totalVariableMemoryAllocation / (float)allowedMemoryAllocation;
|
||||
}
|
||||
|
||||
// If we're oversubscribed we need to demote textures IMMEDIATELY
|
||||
if (pressure > OVERSUBSCRIBED_PRESSURE_VALUE && canDemote) {
|
||||
|
|
Loading…
Reference in a new issue