mirror of
https://github.com/AleziaKurdis/overte.git
synced 2025-04-13 21:27:21 +02:00
Check the total GPU memory usage from the card as well as our computed memory usage
This commit is contained in:
parent
26a06b2914
commit
12de36a534
4 changed files with 43 additions and 2 deletions
|
@ -39,6 +39,10 @@ static GLBackend* INSTANCE{ nullptr };
|
|||
static const char* GL_BACKEND_PROPERTY_NAME = "com.highfidelity.gl.backend";
|
||||
|
||||
BackendPointer GLBackend::createBackend() {
|
||||
// The ATI memory info extension only exposes 'free memory' so we want to force it to
|
||||
// cache the value as early as possible
|
||||
getDedicatedMemory();
|
||||
|
||||
// FIXME provide a mechanism to override the backend for testing
|
||||
// Where the gpuContext is initialized and where the TRUE Backend is created and assigned
|
||||
auto version = QOpenGLContextWrapper::currentContextVersion();
|
||||
|
|
|
@ -60,6 +60,32 @@ bool checkGLErrorDebug(const char* name) {
|
|||
#endif
|
||||
}
|
||||
|
||||
gpu::Size getFreeDedicatedMemory() {
|
||||
Size result { 0 };
|
||||
static bool nvidiaMemorySupported { true };
|
||||
static bool atiMemorySupported { true };
|
||||
if (nvidiaMemorySupported) {
|
||||
|
||||
GLint nvGpuMemory { 0 };
|
||||
glGetIntegerv(GL_GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX, &nvGpuMemory);
|
||||
if (GL_NO_ERROR == glGetError()) {
|
||||
result = KB_TO_BYTES(nvGpuMemory);
|
||||
} else {
|
||||
nvidiaMemorySupported = false;
|
||||
}
|
||||
} else if (atiMemorySupported) {
|
||||
GLint atiGpuMemory[4];
|
||||
// not really total memory, but close enough if called early enough in the application lifecycle
|
||||
glGetIntegerv(GL_TEXTURE_FREE_MEMORY_ATI, atiGpuMemory);
|
||||
if (GL_NO_ERROR == glGetError()) {
|
||||
result = KB_TO_BYTES(atiGpuMemory[0]);
|
||||
} else {
|
||||
atiMemorySupported = false;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
gpu::Size getDedicatedMemory() {
|
||||
static Size dedicatedMemory { 0 };
|
||||
static std::once_flag once;
|
||||
|
|
|
@ -25,6 +25,7 @@ void serverWait();
|
|||
void clientWait();
|
||||
|
||||
gpu::Size getDedicatedMemory();
|
||||
gpu::Size getFreeDedicatedMemory();
|
||||
ComparisonFunction comparisonFuncFromGL(GLenum func);
|
||||
State::StencilOp stencilOpFromGL(GLenum stencilOp);
|
||||
State::BlendOp blendOpFromGL(GLenum blendOp);
|
||||
|
|
|
@ -92,6 +92,7 @@ const std::vector<GLenum>& GLTexture::getFaceTargets(GLenum target) {
|
|||
return faceTargets;
|
||||
}
|
||||
|
||||
#define MIN_FREE_GPU_MEMORY_PERCENTAGE 0.25f
|
||||
float GLTexture::getMemoryPressure() {
|
||||
// Check for an explicit memory limit
|
||||
auto availableTextureMemory = Texture::getAllowedGPUMemoryUsage();
|
||||
|
@ -100,10 +101,19 @@ float GLTexture::getMemoryPressure() {
|
|||
if (!availableTextureMemory) {
|
||||
auto totalGpuMemory = getDedicatedMemory();
|
||||
|
||||
// If no limit has been explicitly set, and the dedicated memory can't be determined,
|
||||
// just use a fallback fixed value of 256 MB
|
||||
if (!totalGpuMemory) {
|
||||
// If we can't query the dedicated memory just use a fallback fixed value of 256 MB
|
||||
totalGpuMemory = MB_TO_BYTES(DEFAULT_MAX_MEMORY_MB);
|
||||
} else {
|
||||
// Check the global free GPU memory
|
||||
auto freeGpuMemory = getFreeDedicatedMemory();
|
||||
if (freeGpuMemory) {
|
||||
auto freePercentage = (float)freeGpuMemory / (float)totalGpuMemory;
|
||||
if (freePercentage < MIN_FREE_GPU_MEMORY_PERCENTAGE) {
|
||||
qDebug() << "Exceeded max GPU memory";
|
||||
return 2.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Allow 75% of all available GPU memory to be consumed by textures
|
||||
|
|
Loading…
Reference in a new issue