Fix automatic texture memory bug

Fixes int32 overflow in automatic texture memory procedure
This commit is contained in:
ksuprynowicz 2021-11-24 22:20:52 +01:00
parent bfbbb2f528
commit 65576ba99e

View file

@ -155,7 +155,7 @@ void GLBackend::init() {
#if defined(Q_OS_ANDROID) || defined(USE_GLES) || defined(Q_OS_DARWIN)
qCDebug(gpugllogging) << "Automatic texture memory not supported in this configuration";
_videoCard = Unknown;
_dedicatedMemory = gpu->getMemory() * BYTES_PER_MIB;
_dedicatedMemory = (size_t)(gpu->getMemory()) * BYTES_PER_MIB;
_totalMemory = _dedicatedMemory;
#endif
@ -171,8 +171,8 @@ void GLBackend::init() {
qCDebug(gpugllogging) << "GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX: " << GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX;
qCDebug(gpugllogging) << "GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX: " << GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX;
_totalMemory = GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX * BYTES_PER_KIB;
_dedicatedMemory = GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX * BYTES_PER_KIB;
_totalMemory = (size_t)(GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX) * BYTES_PER_KIB;
_dedicatedMemory = (size_t)(GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX) * BYTES_PER_KIB;
_videoCard = NVIDIA;
@ -182,20 +182,20 @@ void GLBackend::init() {
GL_GET_INTEGER(TEXTURE_FREE_MEMORY_ATI);
// We are actually getting free memory instead of total memory
_totalMemory = TEXTURE_FREE_MEMORY_ATI * BYTES_PER_KIB;
_totalMemory = (size_t)(TEXTURE_FREE_MEMORY_ATI) * BYTES_PER_KIB;
_dedicatedMemory = _totalMemory;
_videoCard = ATI;
} else if ( ::gl::queryCurrentRendererIntegerMESA(GLX_RENDERER_VIDEO_MEMORY_MESA, &mem) ) {
// This works only on Linux. queryCurrentRendererIntegerMESA will return false if the
// function is not supported because we're not on Linux, or for any other reason.
qCDebug(gpugllogging) << "MESA card detected";
_totalMemory = mem * BYTES_PER_MIB;
_totalMemory = (size_t)(mem) * BYTES_PER_MIB;
_dedicatedMemory = _totalMemory;
_videoCard = MESA;
} else {
qCCritical(gpugllogging) << "Don't know how to get memory for OpenGL vendor " << vendor << "; renderer " << renderer << ", trying fallback";
_videoCard = Unknown;
_dedicatedMemory = gpu->getMemory() * BYTES_PER_MIB;
_dedicatedMemory = (size_t)(gpu->getMemory()) * BYTES_PER_MIB;
_totalMemory = _dedicatedMemory;
}
#endif
@ -237,12 +237,12 @@ size_t GLBackend::getAvailableMemory() {
#if !defined(Q_OS_ANDROID) && !defined(USE_GLES)
glGetIntegerv(GL_GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX, &mem[0]);
#endif
return mem[0] * BYTES_PER_KIB;
return (size_t)(mem[0]) * BYTES_PER_KIB;
case ATI:
#if !defined(Q_OS_ANDROID) && !defined(USE_GLES)
glGetIntegerv(GL_TEXTURE_FREE_MEMORY_ATI, &mem[0]);
#endif
return mem[0] * BYTES_PER_KIB;
return (size_t)(mem[0]) * BYTES_PER_KIB;
case MESA:
return 0; // Don't know the current value
case Unknown: