diff --git a/libraries/gl/src/gl/Config.cpp b/libraries/gl/src/gl/Config.cpp index 94bb91a3e9..ab1dfac97c 100644 --- a/libraries/gl/src/gl/Config.cpp +++ b/libraries/gl/src/gl/Config.cpp @@ -74,6 +74,10 @@ static void* getGlProcessAddress(const char *namez) { #else + +typedef Bool (*PFNGLXQUERYCURRENTRENDERERINTEGERMESAPROC) (int attribute, unsigned int *value); +PFNGLXQUERYCURRENTRENDERERINTEGERMESAPROC QueryCurrentRendererIntegerMESA; + static void* getGlProcessAddress(const char *namez) { return (void*)glXGetProcAddressARB((const GLubyte*)namez); } @@ -92,6 +96,10 @@ void gl::initModuleGl() { wglCreateContextAttribsARB = (PFNWGLCREATECONTEXTATTRIBSARBPROC)getGlProcessAddress("wglCreateContextAttribsARB"); #endif +#if defined(Q_OS_LINUX) + QueryCurrentRendererIntegerMESA = (PFNGLXQUERYCURRENTRENDERERINTEGERMESAPROC)getGlProcessAddress("glXQueryCurrentRendererIntegerMESA"); +#endif + #if defined(USE_GLES) gladLoadGLES2Loader(getGlProcessAddress); #else @@ -124,3 +132,14 @@ void gl::setSwapInterval(int interval) { Q_UNUSED(interval); #endif } + +bool gl::queryCurrentRendererIntegerMESA(int attr, unsigned int *value) { + #if defined(Q_OS_LINUX) + if (QueryCurrentRendererIntegerMESA) { + return QueryCurrentRendererIntegerMESA(attr, value); + } + #endif + + *value = 0; + return false; +} diff --git a/libraries/gl/src/gl/Config.h b/libraries/gl/src/gl/Config.h index aad000a242..1c80cf4a10 100644 --- a/libraries/gl/src/gl/Config.h +++ b/libraries/gl/src/gl/Config.h @@ -52,6 +52,7 @@ namespace gl { void initModuleGl(); int getSwapInterval(); void setSwapInterval(int swapInterval); + bool queryCurrentRendererIntegerMESA(int attr, unsigned int *value); } #endif // hifi_gpu_GPUConfig_h diff --git a/libraries/gpu-gl-common/src/gpu/gl/GLBackend.cpp b/libraries/gpu-gl-common/src/gpu/gl/GLBackend.cpp index 3e5043003b..602ab1c320 100644 --- a/libraries/gpu-gl-common/src/gpu/gl/GLBackend.cpp +++ b/libraries/gpu-gl-common/src/gpu/gl/GLBackend.cpp @@ -10,11 +10,13 @@ // #include "GLBackend.h" + #include #include #include #include #include +#include "gl/Config.h" #if defined(NSIGHT_FOUND) #include "nvToolsExt.h" @@ -105,13 +107,27 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] = }; #define GL_GET_INTEGER(NAME) glGetIntegerv(GL_##NAME, &const_cast(NAME)); - + +#define BYTES_PER_KIB 1024L +#define BYTES_PER_MIB (1024L * BYTES_PER_KIB) + GLint GLBackend::MAX_TEXTURE_IMAGE_UNITS{ 0 }; GLint GLBackend::MAX_UNIFORM_BUFFER_BINDINGS{ 0 }; GLint GLBackend::MAX_COMBINED_UNIFORM_BLOCKS{ 0 }; GLint GLBackend::MAX_COMBINED_TEXTURE_IMAGE_UNITS{ 0 }; GLint GLBackend::MAX_UNIFORM_BLOCK_SIZE{ 0 }; GLint GLBackend::UNIFORM_BUFFER_OFFSET_ALIGNMENT{ 1 }; +GLint GLBackend::GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX{ 0 }; +GLint GLBackend::GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX{ 0 }; +GLint GLBackend::GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX{ 0 }; +GLint GLBackend::TEXTURE_FREE_MEMORY_ATI{ 0 }; + +size_t GLBackend::_totalMemory{ 0 }; +size_t GLBackend::_dedicatedMemory{ 0 }; +GLBackend::VideoCardType GLBackend::_videoCard{ GLBackend::Unknown }; + + +#define GLX_RENDERER_VIDEO_MEMORY_MESA 0x8187 void GLBackend::init() { static std::once_flag once; @@ -132,13 +148,59 @@ void GLBackend::init() { GL_GET_INTEGER(MAX_UNIFORM_BLOCK_SIZE); GL_GET_INTEGER(UNIFORM_BUFFER_OFFSET_ALIGNMENT); + GPUIdent* gpu = GPUIdent::getInstance(vendor, renderer); + unsigned int mem; + + if (vendor.contains("NVIDIA") ) { + qCDebug(gpugllogging) << "NVIDIA card detected"; + GL_GET_INTEGER(GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX); + GL_GET_INTEGER(GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX); + GL_GET_INTEGER(GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX); + + qCDebug(gpugllogging) << "GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX: " << GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX; + qCDebug(gpugllogging) << "GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX: " << GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX; + qCDebug(gpugllogging) << "GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX: " << GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX; + + _totalMemory = GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX * BYTES_PER_KIB; + _dedicatedMemory = GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX * BYTES_PER_KIB; + _videoCard = NVIDIA; + + + } else if (vendor.contains("ATI")) { + qCDebug(gpugllogging) << "ATI card detected"; + GL_GET_INTEGER(TEXTURE_FREE_MEMORY_ATI); + + _totalMemory = TEXTURE_FREE_MEMORY_ATI * BYTES_PER_KIB; + _dedicatedMemory = _totalMemory; + _videoCard = ATI; + } else if ( ::gl::queryCurrentRendererIntegerMESA(GLX_RENDERER_VIDEO_MEMORY_MESA, &mem) ) { + // This works only on Linux. queryCurrentRendererIntegerMESA will return false if the + // function is not supported because we're not on Linux, or for any other reason. + qCDebug(gpugllogging) << "MESA card detected"; + _totalMemory = mem * BYTES_PER_MIB; + _dedicatedMemory = _totalMemory; + _videoCard = MESA; + } else { + qCCritical(gpugllogging) << "Don't know how to get memory for OpenGL vendor " << vendor << "; renderer " << renderer << ", trying fallback"; + _videoCard = Unknown; + _dedicatedMemory = gpu->getMemory(); + _totalMemory = _dedicatedMemory; + } + + qCDebug(gpugllogging) << "dedicated: " << _dedicatedMemory; + qCDebug(gpugllogging) << "total: " << _totalMemory; + + LOG_GL_CONTEXT_INFO(gpugllogging, contextInfo); - GPUIdent* gpu = GPUIdent::getInstance(vendor, renderer); + + // From here on, GPUIdent::getInstance()->getMumble() should efficiently give the same answers. qCDebug(gpugllogging) << "GPU:"; qCDebug(gpugllogging) << "\tcard:" << gpu->getName(); qCDebug(gpugllogging) << "\tdriver:" << gpu->getDriver(); - qCDebug(gpugllogging) << "\tdedicated memory:" << gpu->getMemory() << "MB"; + qCDebug(gpugllogging) << "\ttotal memory:" << (_totalMemory / BYTES_PER_KIB) << "KB"; + qCDebug(gpugllogging) << "\tdedicated memory:" << (_dedicatedMemory / BYTES_PER_KIB) << "KB"; + qCDebug(gpugllogging) << "\tavailable memory:" << (getAvailableMemory() / BYTES_PER_KIB) << "KB"; qCDebug(gpugllogging) << "Limits:"; qCDebug(gpugllogging) << "\tmax textures:" << MAX_TEXTURE_IMAGE_UNITS; qCDebug(gpugllogging) << "\tmax texture binding:" << MAX_COMBINED_TEXTURE_IMAGE_UNITS; @@ -152,6 +214,41 @@ void GLBackend::init() { }); } +size_t GLBackend::getAvailableMemory() { + GLint mem; + + switch( _videoCard ) { + case NVIDIA: + glGetIntegerv(GL_GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX, &mem); + return mem * BYTES_PER_KIB; + case ATI: + glGetIntegerv(GL_TEXTURE_FREE_MEMORY_ATI, &mem); + return mem * BYTES_PER_KIB; + case MESA: + return 0; // Don't know the current value + case Unknown: + break; + } + + return 0; + +} + +bool GLBackend::availableMemoryKnown() { + switch( _videoCard ) { + case NVIDIA: + return true; + case ATI: + return true; + case MESA: + return false; + case Unknown: + return false; + } + + return false; +} + GLBackend::GLBackend(bool syncCache) { _pipeline._cameraCorrectionBuffer._buffer->flush(); initShaderBinaryCache(); diff --git a/libraries/gpu-gl-common/src/gpu/gl/GLBackend.h b/libraries/gpu-gl-common/src/gpu/gl/GLBackend.h index a13718f5a2..0c8676493b 100644 --- a/libraries/gpu-gl-common/src/gpu/gl/GLBackend.h +++ b/libraries/gpu-gl-common/src/gpu/gl/GLBackend.h @@ -67,6 +67,13 @@ protected: GLBackend(); public: + enum VideoCardType { + ATI, + NVIDIA, + MESA, + Unknown + }; + #if defined(USE_GLES) // https://www.khronos.org/registry/OpenGL-Refpages/es3/html/glGet.xhtml static const GLint MIN_REQUIRED_TEXTURE_IMAGE_UNITS = 16; @@ -89,6 +96,25 @@ public: static GLint MAX_COMBINED_TEXTURE_IMAGE_UNITS; static GLint MAX_UNIFORM_BLOCK_SIZE; static GLint UNIFORM_BUFFER_OFFSET_ALIGNMENT; + static GLint GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX; + static GLint GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX; + static GLint GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX; + static GLint TEXTURE_FREE_MEMORY_ATI; + + + static size_t _totalMemory; + static size_t _dedicatedMemory; + static VideoCardType _videoCard; + + + static size_t getTotalMemory() { return _totalMemory; } + static size_t getDedicatedMemory() { return _dedicatedMemory; } + + static size_t getAvailableMemory(); + static bool availableMemoryKnown(); + + + virtual ~GLBackend(); diff --git a/libraries/gpu-gl-common/src/gpu/gl/GLTextureTransfer.cpp b/libraries/gpu-gl-common/src/gpu/gl/GLTextureTransfer.cpp index c9a5856a8d..af865b3ad7 100644 --- a/libraries/gpu-gl-common/src/gpu/gl/GLTextureTransfer.cpp +++ b/libraries/gpu-gl-common/src/gpu/gl/GLTextureTransfer.cpp @@ -19,6 +19,8 @@ #define MAX_RESOURCE_TEXTURES_PER_FRAME 2 #define NO_BUFFER_WORK_SLEEP_TIME_MS 2 #define THREADED_TEXTURE_BUFFERING 1 +#define MAX_AUTO_FRACTION_OF_TOTAL_MEMORY 0.8f +#define AUTO_RESERVE_TEXTURE_MEMORY MB_TO_BYTES(64) static const size_t DEFAULT_ALLOWED_TEXTURE_MEMORY = MB_TO_BYTES(DEFAULT_ALLOWED_TEXTURE_MEMORY_MB); @@ -183,9 +185,25 @@ void GLTextureTransferEngineDefault::manageMemory() { void GLTextureTransferEngineDefault::updateMemoryPressure() { PROFILE_RANGE(render_gpu_gl, __FUNCTION__); + bool useAvailableGlMemory = false; size_t allowedMemoryAllocation = gpu::Texture::getAllowedGPUMemoryUsage(); + if (0 == allowedMemoryAllocation) { - allowedMemoryAllocation = DEFAULT_ALLOWED_TEXTURE_MEMORY; + // Automatic allocation + + if (GLBackend::availableMemoryKnown()) { + // If we know how much is free, then we use that + useAvailableGlMemory = true; + } else { + // We don't know how much is free, so leave some reasonable spare room + // and hope it works. + allowedMemoryAllocation = GLBackend::getTotalMemory() * MAX_AUTO_FRACTION_OF_TOTAL_MEMORY; + + if (0 == allowedMemoryAllocation) { + // Last resort, if we failed to detect + allowedMemoryAllocation = DEFAULT_ALLOWED_TEXTURE_MEMORY; + } + } } // Clear any defunct textures (weak pointers that no longer have a valid texture) @@ -205,7 +223,7 @@ void GLTextureTransferEngineDefault::updateMemoryPressure() { idealMemoryAllocation += texture->evalTotalSize(); // Track how much we're actually using totalVariableMemoryAllocation += gltexture->size(); - if (vartexture->canDemote()) { + if (!gltexture->_gpuObject.getImportant() && vartexture->canDemote()) { canDemote |= true; } if (vartexture->canPromote()) { @@ -218,7 +236,22 @@ void GLTextureTransferEngineDefault::updateMemoryPressure() { Backend::textureResourceIdealGPUMemSize.set(idealMemoryAllocation); size_t unallocated = idealMemoryAllocation - totalVariableMemoryAllocation; - float pressure = (float)totalVariableMemoryAllocation / (float)allowedMemoryAllocation; + float pressure = 0; + + if (useAvailableGlMemory) { + size_t totalMem = GLBackend::getTotalMemory(); + size_t availMem = GLBackend::getAvailableMemory(); + + if (availMem >= AUTO_RESERVE_TEXTURE_MEMORY) { + availMem -= AUTO_RESERVE_TEXTURE_MEMORY; + } else { + availMem = 0; + } + + pressure = ((float)totalMem - (float)availMem) / (float)totalMem; + } else { + pressure = (float)totalVariableMemoryAllocation / (float)allowedMemoryAllocation; + } // If we're oversubscribed we need to demote textures IMMEDIATELY if (pressure > OVERSUBSCRIBED_PRESSURE_VALUE && canDemote) { @@ -470,7 +503,7 @@ void GLTextureTransferEngineDefault::processDemotes(size_t reliefRequired, const for (const auto& texture : strongTextures) { GLTexture* gltexture = Backend::getGPUObject(*texture); GLVariableAllocationSupport* vargltexture = dynamic_cast(gltexture); - if (vargltexture->canDemote()) { + if (!gltexture->_gpuObject.getImportant() && vargltexture->canDemote()) { demoteQueue.push({ texture, (float)gltexture->size() }); } } diff --git a/libraries/gpu/src/gpu/Texture.h b/libraries/gpu/src/gpu/Texture.h index debedf02a5..54c7d49421 100755 --- a/libraries/gpu/src/gpu/Texture.h +++ b/libraries/gpu/src/gpu/Texture.h @@ -571,6 +571,9 @@ public: void setExternalRecycler(const ExternalRecycler& recycler); ExternalRecycler getExternalRecycler() const; + bool getImportant() const { return _important; } + void setImportant(bool important) { _important = important; } + const GPUObjectPointer gpuObject {}; ExternalUpdates getUpdates() const; @@ -632,6 +635,7 @@ protected: bool _autoGenerateMips = false; bool _isIrradianceValid = false; bool _defined = false; + bool _important = false; static TexturePointer create(TextureUsageType usageType, Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, uint16 numMips, const Sampler& sampler); diff --git a/libraries/render-utils/src/text/Font.cpp b/libraries/render-utils/src/text/Font.cpp index 024be6598d..a30bbad0e5 100644 --- a/libraries/render-utils/src/text/Font.cpp +++ b/libraries/render-utils/src/text/Font.cpp @@ -260,6 +260,7 @@ void Font::read(QIODevice& in) { gpu::Sampler(gpu::Sampler::FILTER_MIN_POINT_MAG_LINEAR)); _texture->setStoredMipFormat(formatMip); _texture->assignStoredMip(0, image.sizeInBytes(), image.constBits()); + _texture->setImportant(true); } void Font::setupGPU() {