mirror of
https://github.com/HifiExperiments/overte.git
synced 2025-08-09 17:58:45 +02:00
Merge pull request #599 from daleglass/fix-auto-texture-memory
Initial version of automatic texture memory size
This commit is contained in:
commit
82a0815964
7 changed files with 188 additions and 7 deletions
|
@ -74,6 +74,10 @@ static void* getGlProcessAddress(const char *namez) {
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
|
|
||||||
|
typedef Bool (*PFNGLXQUERYCURRENTRENDERERINTEGERMESAPROC) (int attribute, unsigned int *value);
|
||||||
|
PFNGLXQUERYCURRENTRENDERERINTEGERMESAPROC QueryCurrentRendererIntegerMESA;
|
||||||
|
|
||||||
static void* getGlProcessAddress(const char *namez) {
|
static void* getGlProcessAddress(const char *namez) {
|
||||||
return (void*)glXGetProcAddressARB((const GLubyte*)namez);
|
return (void*)glXGetProcAddressARB((const GLubyte*)namez);
|
||||||
}
|
}
|
||||||
|
@ -92,6 +96,10 @@ void gl::initModuleGl() {
|
||||||
wglCreateContextAttribsARB = (PFNWGLCREATECONTEXTATTRIBSARBPROC)getGlProcessAddress("wglCreateContextAttribsARB");
|
wglCreateContextAttribsARB = (PFNWGLCREATECONTEXTATTRIBSARBPROC)getGlProcessAddress("wglCreateContextAttribsARB");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(Q_OS_LINUX)
|
||||||
|
QueryCurrentRendererIntegerMESA = (PFNGLXQUERYCURRENTRENDERERINTEGERMESAPROC)getGlProcessAddress("glXQueryCurrentRendererIntegerMESA");
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(USE_GLES)
|
#if defined(USE_GLES)
|
||||||
gladLoadGLES2Loader(getGlProcessAddress);
|
gladLoadGLES2Loader(getGlProcessAddress);
|
||||||
#else
|
#else
|
||||||
|
@ -124,3 +132,14 @@ void gl::setSwapInterval(int interval) {
|
||||||
Q_UNUSED(interval);
|
Q_UNUSED(interval);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool gl::queryCurrentRendererIntegerMESA(int attr, unsigned int *value) {
|
||||||
|
#if defined(Q_OS_LINUX)
|
||||||
|
if (QueryCurrentRendererIntegerMESA) {
|
||||||
|
return QueryCurrentRendererIntegerMESA(attr, value);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
*value = 0;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
|
@ -52,6 +52,7 @@ namespace gl {
|
||||||
void initModuleGl();
|
void initModuleGl();
|
||||||
int getSwapInterval();
|
int getSwapInterval();
|
||||||
void setSwapInterval(int swapInterval);
|
void setSwapInterval(int swapInterval);
|
||||||
|
bool queryCurrentRendererIntegerMESA(int attr, unsigned int *value);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // hifi_gpu_GPUConfig_h
|
#endif // hifi_gpu_GPUConfig_h
|
||||||
|
|
|
@ -10,11 +10,13 @@
|
||||||
//
|
//
|
||||||
#include "GLBackend.h"
|
#include "GLBackend.h"
|
||||||
|
|
||||||
|
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <queue>
|
#include <queue>
|
||||||
#include <list>
|
#include <list>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include <glm/gtc/type_ptr.hpp>
|
#include <glm/gtc/type_ptr.hpp>
|
||||||
|
#include "gl/Config.h"
|
||||||
|
|
||||||
#if defined(NSIGHT_FOUND)
|
#if defined(NSIGHT_FOUND)
|
||||||
#include "nvToolsExt.h"
|
#include "nvToolsExt.h"
|
||||||
|
@ -105,13 +107,27 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
|
||||||
};
|
};
|
||||||
|
|
||||||
#define GL_GET_INTEGER(NAME) glGetIntegerv(GL_##NAME, &const_cast<GLint&>(NAME));
|
#define GL_GET_INTEGER(NAME) glGetIntegerv(GL_##NAME, &const_cast<GLint&>(NAME));
|
||||||
|
|
||||||
|
#define BYTES_PER_KIB 1024L
|
||||||
|
#define BYTES_PER_MIB (1024L * BYTES_PER_KIB)
|
||||||
|
|
||||||
GLint GLBackend::MAX_TEXTURE_IMAGE_UNITS{ 0 };
|
GLint GLBackend::MAX_TEXTURE_IMAGE_UNITS{ 0 };
|
||||||
GLint GLBackend::MAX_UNIFORM_BUFFER_BINDINGS{ 0 };
|
GLint GLBackend::MAX_UNIFORM_BUFFER_BINDINGS{ 0 };
|
||||||
GLint GLBackend::MAX_COMBINED_UNIFORM_BLOCKS{ 0 };
|
GLint GLBackend::MAX_COMBINED_UNIFORM_BLOCKS{ 0 };
|
||||||
GLint GLBackend::MAX_COMBINED_TEXTURE_IMAGE_UNITS{ 0 };
|
GLint GLBackend::MAX_COMBINED_TEXTURE_IMAGE_UNITS{ 0 };
|
||||||
GLint GLBackend::MAX_UNIFORM_BLOCK_SIZE{ 0 };
|
GLint GLBackend::MAX_UNIFORM_BLOCK_SIZE{ 0 };
|
||||||
GLint GLBackend::UNIFORM_BUFFER_OFFSET_ALIGNMENT{ 1 };
|
GLint GLBackend::UNIFORM_BUFFER_OFFSET_ALIGNMENT{ 1 };
|
||||||
|
GLint GLBackend::GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX{ 0 };
|
||||||
|
GLint GLBackend::GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX{ 0 };
|
||||||
|
GLint GLBackend::GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX{ 0 };
|
||||||
|
GLint GLBackend::TEXTURE_FREE_MEMORY_ATI{ 0 };
|
||||||
|
|
||||||
|
size_t GLBackend::_totalMemory{ 0 };
|
||||||
|
size_t GLBackend::_dedicatedMemory{ 0 };
|
||||||
|
GLBackend::VideoCardType GLBackend::_videoCard{ GLBackend::Unknown };
|
||||||
|
|
||||||
|
|
||||||
|
#define GLX_RENDERER_VIDEO_MEMORY_MESA 0x8187
|
||||||
|
|
||||||
void GLBackend::init() {
|
void GLBackend::init() {
|
||||||
static std::once_flag once;
|
static std::once_flag once;
|
||||||
|
@ -132,13 +148,59 @@ void GLBackend::init() {
|
||||||
GL_GET_INTEGER(MAX_UNIFORM_BLOCK_SIZE);
|
GL_GET_INTEGER(MAX_UNIFORM_BLOCK_SIZE);
|
||||||
GL_GET_INTEGER(UNIFORM_BUFFER_OFFSET_ALIGNMENT);
|
GL_GET_INTEGER(UNIFORM_BUFFER_OFFSET_ALIGNMENT);
|
||||||
|
|
||||||
|
GPUIdent* gpu = GPUIdent::getInstance(vendor, renderer);
|
||||||
|
unsigned int mem;
|
||||||
|
|
||||||
|
if (vendor.contains("NVIDIA") ) {
|
||||||
|
qCDebug(gpugllogging) << "NVIDIA card detected";
|
||||||
|
GL_GET_INTEGER(GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX);
|
||||||
|
GL_GET_INTEGER(GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX);
|
||||||
|
GL_GET_INTEGER(GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX);
|
||||||
|
|
||||||
|
qCDebug(gpugllogging) << "GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX: " << GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX;
|
||||||
|
qCDebug(gpugllogging) << "GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX: " << GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX;
|
||||||
|
qCDebug(gpugllogging) << "GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX: " << GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX;
|
||||||
|
|
||||||
|
_totalMemory = GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX * BYTES_PER_KIB;
|
||||||
|
_dedicatedMemory = GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX * BYTES_PER_KIB;
|
||||||
|
_videoCard = NVIDIA;
|
||||||
|
|
||||||
|
|
||||||
|
} else if (vendor.contains("ATI")) {
|
||||||
|
qCDebug(gpugllogging) << "ATI card detected";
|
||||||
|
GL_GET_INTEGER(TEXTURE_FREE_MEMORY_ATI);
|
||||||
|
|
||||||
|
_totalMemory = TEXTURE_FREE_MEMORY_ATI * BYTES_PER_KIB;
|
||||||
|
_dedicatedMemory = _totalMemory;
|
||||||
|
_videoCard = ATI;
|
||||||
|
} else if ( ::gl::queryCurrentRendererIntegerMESA(GLX_RENDERER_VIDEO_MEMORY_MESA, &mem) ) {
|
||||||
|
// This works only on Linux. queryCurrentRendererIntegerMESA will return false if the
|
||||||
|
// function is not supported because we're not on Linux, or for any other reason.
|
||||||
|
qCDebug(gpugllogging) << "MESA card detected";
|
||||||
|
_totalMemory = mem * BYTES_PER_MIB;
|
||||||
|
_dedicatedMemory = _totalMemory;
|
||||||
|
_videoCard = MESA;
|
||||||
|
} else {
|
||||||
|
qCCritical(gpugllogging) << "Don't know how to get memory for OpenGL vendor " << vendor << "; renderer " << renderer << ", trying fallback";
|
||||||
|
_videoCard = Unknown;
|
||||||
|
_dedicatedMemory = gpu->getMemory();
|
||||||
|
_totalMemory = _dedicatedMemory;
|
||||||
|
}
|
||||||
|
|
||||||
|
qCDebug(gpugllogging) << "dedicated: " << _dedicatedMemory;
|
||||||
|
qCDebug(gpugllogging) << "total: " << _totalMemory;
|
||||||
|
|
||||||
|
|
||||||
LOG_GL_CONTEXT_INFO(gpugllogging, contextInfo);
|
LOG_GL_CONTEXT_INFO(gpugllogging, contextInfo);
|
||||||
GPUIdent* gpu = GPUIdent::getInstance(vendor, renderer);
|
|
||||||
|
|
||||||
// From here on, GPUIdent::getInstance()->getMumble() should efficiently give the same answers.
|
// From here on, GPUIdent::getInstance()->getMumble() should efficiently give the same answers.
|
||||||
qCDebug(gpugllogging) << "GPU:";
|
qCDebug(gpugllogging) << "GPU:";
|
||||||
qCDebug(gpugllogging) << "\tcard:" << gpu->getName();
|
qCDebug(gpugllogging) << "\tcard:" << gpu->getName();
|
||||||
qCDebug(gpugllogging) << "\tdriver:" << gpu->getDriver();
|
qCDebug(gpugllogging) << "\tdriver:" << gpu->getDriver();
|
||||||
qCDebug(gpugllogging) << "\tdedicated memory:" << gpu->getMemory() << "MB";
|
qCDebug(gpugllogging) << "\ttotal memory:" << (_totalMemory / BYTES_PER_KIB) << "KB";
|
||||||
|
qCDebug(gpugllogging) << "\tdedicated memory:" << (_dedicatedMemory / BYTES_PER_KIB) << "KB";
|
||||||
|
qCDebug(gpugllogging) << "\tavailable memory:" << (getAvailableMemory() / BYTES_PER_KIB) << "KB";
|
||||||
qCDebug(gpugllogging) << "Limits:";
|
qCDebug(gpugllogging) << "Limits:";
|
||||||
qCDebug(gpugllogging) << "\tmax textures:" << MAX_TEXTURE_IMAGE_UNITS;
|
qCDebug(gpugllogging) << "\tmax textures:" << MAX_TEXTURE_IMAGE_UNITS;
|
||||||
qCDebug(gpugllogging) << "\tmax texture binding:" << MAX_COMBINED_TEXTURE_IMAGE_UNITS;
|
qCDebug(gpugllogging) << "\tmax texture binding:" << MAX_COMBINED_TEXTURE_IMAGE_UNITS;
|
||||||
|
@ -152,6 +214,41 @@ void GLBackend::init() {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t GLBackend::getAvailableMemory() {
|
||||||
|
GLint mem;
|
||||||
|
|
||||||
|
switch( _videoCard ) {
|
||||||
|
case NVIDIA:
|
||||||
|
glGetIntegerv(GL_GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX, &mem);
|
||||||
|
return mem * BYTES_PER_KIB;
|
||||||
|
case ATI:
|
||||||
|
glGetIntegerv(GL_TEXTURE_FREE_MEMORY_ATI, &mem);
|
||||||
|
return mem * BYTES_PER_KIB;
|
||||||
|
case MESA:
|
||||||
|
return 0; // Don't know the current value
|
||||||
|
case Unknown:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
bool GLBackend::availableMemoryKnown() {
|
||||||
|
switch( _videoCard ) {
|
||||||
|
case NVIDIA:
|
||||||
|
return true;
|
||||||
|
case ATI:
|
||||||
|
return true;
|
||||||
|
case MESA:
|
||||||
|
return false;
|
||||||
|
case Unknown:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
GLBackend::GLBackend(bool syncCache) {
|
GLBackend::GLBackend(bool syncCache) {
|
||||||
_pipeline._cameraCorrectionBuffer._buffer->flush();
|
_pipeline._cameraCorrectionBuffer._buffer->flush();
|
||||||
initShaderBinaryCache();
|
initShaderBinaryCache();
|
||||||
|
|
|
@ -67,6 +67,13 @@ protected:
|
||||||
GLBackend();
|
GLBackend();
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
enum VideoCardType {
|
||||||
|
ATI,
|
||||||
|
NVIDIA,
|
||||||
|
MESA,
|
||||||
|
Unknown
|
||||||
|
};
|
||||||
|
|
||||||
#if defined(USE_GLES)
|
#if defined(USE_GLES)
|
||||||
// https://www.khronos.org/registry/OpenGL-Refpages/es3/html/glGet.xhtml
|
// https://www.khronos.org/registry/OpenGL-Refpages/es3/html/glGet.xhtml
|
||||||
static const GLint MIN_REQUIRED_TEXTURE_IMAGE_UNITS = 16;
|
static const GLint MIN_REQUIRED_TEXTURE_IMAGE_UNITS = 16;
|
||||||
|
@ -89,6 +96,25 @@ public:
|
||||||
static GLint MAX_COMBINED_TEXTURE_IMAGE_UNITS;
|
static GLint MAX_COMBINED_TEXTURE_IMAGE_UNITS;
|
||||||
static GLint MAX_UNIFORM_BLOCK_SIZE;
|
static GLint MAX_UNIFORM_BLOCK_SIZE;
|
||||||
static GLint UNIFORM_BUFFER_OFFSET_ALIGNMENT;
|
static GLint UNIFORM_BUFFER_OFFSET_ALIGNMENT;
|
||||||
|
static GLint GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX;
|
||||||
|
static GLint GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX;
|
||||||
|
static GLint GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX;
|
||||||
|
static GLint TEXTURE_FREE_MEMORY_ATI;
|
||||||
|
|
||||||
|
|
||||||
|
static size_t _totalMemory;
|
||||||
|
static size_t _dedicatedMemory;
|
||||||
|
static VideoCardType _videoCard;
|
||||||
|
|
||||||
|
|
||||||
|
static size_t getTotalMemory() { return _totalMemory; }
|
||||||
|
static size_t getDedicatedMemory() { return _dedicatedMemory; }
|
||||||
|
|
||||||
|
static size_t getAvailableMemory();
|
||||||
|
static bool availableMemoryKnown();
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
virtual ~GLBackend();
|
virtual ~GLBackend();
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,8 @@
|
||||||
#define MAX_RESOURCE_TEXTURES_PER_FRAME 2
|
#define MAX_RESOURCE_TEXTURES_PER_FRAME 2
|
||||||
#define NO_BUFFER_WORK_SLEEP_TIME_MS 2
|
#define NO_BUFFER_WORK_SLEEP_TIME_MS 2
|
||||||
#define THREADED_TEXTURE_BUFFERING 1
|
#define THREADED_TEXTURE_BUFFERING 1
|
||||||
|
#define MAX_AUTO_FRACTION_OF_TOTAL_MEMORY 0.8f
|
||||||
|
#define AUTO_RESERVE_TEXTURE_MEMORY MB_TO_BYTES(64)
|
||||||
|
|
||||||
static const size_t DEFAULT_ALLOWED_TEXTURE_MEMORY = MB_TO_BYTES(DEFAULT_ALLOWED_TEXTURE_MEMORY_MB);
|
static const size_t DEFAULT_ALLOWED_TEXTURE_MEMORY = MB_TO_BYTES(DEFAULT_ALLOWED_TEXTURE_MEMORY_MB);
|
||||||
|
|
||||||
|
@ -183,9 +185,25 @@ void GLTextureTransferEngineDefault::manageMemory() {
|
||||||
void GLTextureTransferEngineDefault::updateMemoryPressure() {
|
void GLTextureTransferEngineDefault::updateMemoryPressure() {
|
||||||
PROFILE_RANGE(render_gpu_gl, __FUNCTION__);
|
PROFILE_RANGE(render_gpu_gl, __FUNCTION__);
|
||||||
|
|
||||||
|
bool useAvailableGlMemory = false;
|
||||||
size_t allowedMemoryAllocation = gpu::Texture::getAllowedGPUMemoryUsage();
|
size_t allowedMemoryAllocation = gpu::Texture::getAllowedGPUMemoryUsage();
|
||||||
|
|
||||||
if (0 == allowedMemoryAllocation) {
|
if (0 == allowedMemoryAllocation) {
|
||||||
allowedMemoryAllocation = DEFAULT_ALLOWED_TEXTURE_MEMORY;
|
// Automatic allocation
|
||||||
|
|
||||||
|
if (GLBackend::availableMemoryKnown()) {
|
||||||
|
// If we know how much is free, then we use that
|
||||||
|
useAvailableGlMemory = true;
|
||||||
|
} else {
|
||||||
|
// We don't know how much is free, so leave some reasonable spare room
|
||||||
|
// and hope it works.
|
||||||
|
allowedMemoryAllocation = GLBackend::getTotalMemory() * MAX_AUTO_FRACTION_OF_TOTAL_MEMORY;
|
||||||
|
|
||||||
|
if (0 == allowedMemoryAllocation) {
|
||||||
|
// Last resort, if we failed to detect
|
||||||
|
allowedMemoryAllocation = DEFAULT_ALLOWED_TEXTURE_MEMORY;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clear any defunct textures (weak pointers that no longer have a valid texture)
|
// Clear any defunct textures (weak pointers that no longer have a valid texture)
|
||||||
|
@ -205,7 +223,7 @@ void GLTextureTransferEngineDefault::updateMemoryPressure() {
|
||||||
idealMemoryAllocation += texture->evalTotalSize();
|
idealMemoryAllocation += texture->evalTotalSize();
|
||||||
// Track how much we're actually using
|
// Track how much we're actually using
|
||||||
totalVariableMemoryAllocation += gltexture->size();
|
totalVariableMemoryAllocation += gltexture->size();
|
||||||
if (vartexture->canDemote()) {
|
if (!gltexture->_gpuObject.getImportant() && vartexture->canDemote()) {
|
||||||
canDemote |= true;
|
canDemote |= true;
|
||||||
}
|
}
|
||||||
if (vartexture->canPromote()) {
|
if (vartexture->canPromote()) {
|
||||||
|
@ -218,7 +236,22 @@ void GLTextureTransferEngineDefault::updateMemoryPressure() {
|
||||||
|
|
||||||
Backend::textureResourceIdealGPUMemSize.set(idealMemoryAllocation);
|
Backend::textureResourceIdealGPUMemSize.set(idealMemoryAllocation);
|
||||||
size_t unallocated = idealMemoryAllocation - totalVariableMemoryAllocation;
|
size_t unallocated = idealMemoryAllocation - totalVariableMemoryAllocation;
|
||||||
float pressure = (float)totalVariableMemoryAllocation / (float)allowedMemoryAllocation;
|
float pressure = 0;
|
||||||
|
|
||||||
|
if (useAvailableGlMemory) {
|
||||||
|
size_t totalMem = GLBackend::getTotalMemory();
|
||||||
|
size_t availMem = GLBackend::getAvailableMemory();
|
||||||
|
|
||||||
|
if (availMem >= AUTO_RESERVE_TEXTURE_MEMORY) {
|
||||||
|
availMem -= AUTO_RESERVE_TEXTURE_MEMORY;
|
||||||
|
} else {
|
||||||
|
availMem = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
pressure = ((float)totalMem - (float)availMem) / (float)totalMem;
|
||||||
|
} else {
|
||||||
|
pressure = (float)totalVariableMemoryAllocation / (float)allowedMemoryAllocation;
|
||||||
|
}
|
||||||
|
|
||||||
// If we're oversubscribed we need to demote textures IMMEDIATELY
|
// If we're oversubscribed we need to demote textures IMMEDIATELY
|
||||||
if (pressure > OVERSUBSCRIBED_PRESSURE_VALUE && canDemote) {
|
if (pressure > OVERSUBSCRIBED_PRESSURE_VALUE && canDemote) {
|
||||||
|
@ -470,7 +503,7 @@ void GLTextureTransferEngineDefault::processDemotes(size_t reliefRequired, const
|
||||||
for (const auto& texture : strongTextures) {
|
for (const auto& texture : strongTextures) {
|
||||||
GLTexture* gltexture = Backend::getGPUObject<GLTexture>(*texture);
|
GLTexture* gltexture = Backend::getGPUObject<GLTexture>(*texture);
|
||||||
GLVariableAllocationSupport* vargltexture = dynamic_cast<GLVariableAllocationSupport*>(gltexture);
|
GLVariableAllocationSupport* vargltexture = dynamic_cast<GLVariableAllocationSupport*>(gltexture);
|
||||||
if (vargltexture->canDemote()) {
|
if (!gltexture->_gpuObject.getImportant() && vargltexture->canDemote()) {
|
||||||
demoteQueue.push({ texture, (float)gltexture->size() });
|
demoteQueue.push({ texture, (float)gltexture->size() });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -571,6 +571,9 @@ public:
|
||||||
void setExternalRecycler(const ExternalRecycler& recycler);
|
void setExternalRecycler(const ExternalRecycler& recycler);
|
||||||
ExternalRecycler getExternalRecycler() const;
|
ExternalRecycler getExternalRecycler() const;
|
||||||
|
|
||||||
|
bool getImportant() const { return _important; }
|
||||||
|
void setImportant(bool important) { _important = important; }
|
||||||
|
|
||||||
const GPUObjectPointer gpuObject {};
|
const GPUObjectPointer gpuObject {};
|
||||||
|
|
||||||
ExternalUpdates getUpdates() const;
|
ExternalUpdates getUpdates() const;
|
||||||
|
@ -632,6 +635,7 @@ protected:
|
||||||
bool _autoGenerateMips = false;
|
bool _autoGenerateMips = false;
|
||||||
bool _isIrradianceValid = false;
|
bool _isIrradianceValid = false;
|
||||||
bool _defined = false;
|
bool _defined = false;
|
||||||
|
bool _important = false;
|
||||||
|
|
||||||
static TexturePointer create(TextureUsageType usageType, Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, uint16 numMips, const Sampler& sampler);
|
static TexturePointer create(TextureUsageType usageType, Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, uint16 numMips, const Sampler& sampler);
|
||||||
|
|
||||||
|
|
|
@ -260,6 +260,7 @@ void Font::read(QIODevice& in) {
|
||||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_POINT_MAG_LINEAR));
|
gpu::Sampler(gpu::Sampler::FILTER_MIN_POINT_MAG_LINEAR));
|
||||||
_texture->setStoredMipFormat(formatMip);
|
_texture->setStoredMipFormat(formatMip);
|
||||||
_texture->assignStoredMip(0, image.sizeInBytes(), image.constBits());
|
_texture->assignStoredMip(0, image.sizeInBytes(), image.constBits());
|
||||||
|
_texture->setImportant(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Font::setupGPU() {
|
void Font::setupGPU() {
|
||||||
|
|
Loading…
Reference in a new issue