Dynamic texture memory management

This commit is contained in:
Brad Davis 2016-09-10 13:26:30 -07:00
parent 3c82a489fa
commit a7f0ba24ed
15 changed files with 733 additions and 244 deletions

View file

@ -58,7 +58,10 @@ void CachesSizeDialog::confirmClicked(bool checked) {
DependencyManager::get<AnimationCache>()->setUnusedResourceCacheSize(_animations->value() * BYTES_PER_MEGABYTES);
DependencyManager::get<ModelCache>()->setUnusedResourceCacheSize(_geometries->value() * BYTES_PER_MEGABYTES);
DependencyManager::get<SoundCache>()->setUnusedResourceCacheSize(_sounds->value() * BYTES_PER_MEGABYTES);
// Disabling the texture cache because it's a liability in cases where we're overcommiting GPU memory
#if 0
DependencyManager::get<TextureCache>()->setUnusedResourceCacheSize(_textures->value() * BYTES_PER_MEGABYTES);
#endif
QDialog::close();
}
@ -78,4 +81,4 @@ void CachesSizeDialog::reject() {
void CachesSizeDialog::closeEvent(QCloseEvent* event) {
QDialog::closeEvent(event);
emit closed();
}
}

View file

@ -365,6 +365,7 @@ protected:
typedef void (GLBackend::*CommandCall)(const Batch&, size_t);
static CommandCall _commandCalls[Batch::NUM_COMMANDS];
friend class GLState;
friend class GLTexture;
};
} }

View file

@ -12,9 +12,244 @@ using namespace gpu;
using namespace gpu::gl;
GLTexelFormat GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
GLTexelFormat texel = { GL_RGBA, GL_RGBA, GL_UNSIGNED_BYTE };
return texel;
GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
GLenum result = GL_RGBA8;
switch (dstFormat.getDimension()) {
case gpu::SCALAR: {
switch (dstFormat.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
case gpu::SRGB:
case gpu::SRGBA:
switch (dstFormat.getType()) {
case gpu::UINT32:
result = GL_R32UI;
break;
case gpu::INT32:
result = GL_R32I;
break;
case gpu::NUINT32:
result = GL_R8;
break;
case gpu::NINT32:
result = GL_R8_SNORM;
break;
case gpu::FLOAT:
result = GL_R32F;
break;
case gpu::UINT16:
result = GL_R16UI;
break;
case gpu::INT16:
result = GL_R16I;
break;
case gpu::NUINT16:
result = GL_R16;
break;
case gpu::NINT16:
result = GL_R16_SNORM;
break;
case gpu::HALF:
result = GL_R16F;
break;
case gpu::UINT8:
result = GL_R8UI;
break;
case gpu::INT8:
result = GL_R8I;
break;
case gpu::NUINT8:
if ((dstFormat.getSemantic() == gpu::SRGB || dstFormat.getSemantic() == gpu::SRGBA)) {
result = GL_SLUMINANCE8;
} else {
result = GL_R8;
}
break;
case gpu::NINT8:
result = GL_R8_SNORM;
break;
default:
Q_UNREACHABLE();
break;
}
break;
case gpu::COMPRESSED_R:
result = GL_COMPRESSED_RED_RGTC1;
break;
case gpu::R11G11B10:
// the type should be float
result = GL_R11F_G11F_B10F;
break;
case gpu::DEPTH:
result = GL_DEPTH_COMPONENT32;
switch (dstFormat.getType()) {
case gpu::UINT32:
case gpu::INT32:
case gpu::NUINT32:
case gpu::NINT32:
result = GL_DEPTH_COMPONENT32;
break;
case gpu::FLOAT:
result = GL_DEPTH_COMPONENT32F;
break;
case gpu::UINT16:
case gpu::INT16:
case gpu::NUINT16:
case gpu::NINT16:
case gpu::HALF:
result = GL_DEPTH_COMPONENT16;
break;
case gpu::UINT8:
case gpu::INT8:
case gpu::NUINT8:
case gpu::NINT8:
result = GL_DEPTH_COMPONENT24;
break;
default:
Q_UNREACHABLE();
break;
}
break;
case gpu::DEPTH_STENCIL:
result = GL_DEPTH24_STENCIL8;
break;
default:
qCDebug(gpugllogging) << "Unknown combination of texel format";
}
break;
}
case gpu::VEC2: {
switch (dstFormat.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
result = GL_RG8;
break;
default:
qCDebug(gpugllogging) << "Unknown combination of texel format";
}
break;
}
case gpu::VEC3: {
switch (dstFormat.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
result = GL_RGB8;
break;
case gpu::SRGB:
case gpu::SRGBA:
result = GL_SRGB8; // standard 2.2 gamma correction color
break;
case gpu::COMPRESSED_RGB:
result = GL_COMPRESSED_RGB;
break;
case gpu::COMPRESSED_SRGB:
result = GL_COMPRESSED_SRGB;
break;
default:
qCDebug(gpugllogging) << "Unknown combination of texel format";
}
break;
}
case gpu::VEC4: {
switch (dstFormat.getSemantic()) {
case gpu::RGB:
result = GL_RGB8;
break;
case gpu::RGBA:
switch (dstFormat.getType()) {
case gpu::UINT32:
result = GL_RGBA32UI;
break;
case gpu::INT32:
result = GL_RGBA32I;
break;
case gpu::FLOAT:
result = GL_RGBA32F;
break;
case gpu::UINT16:
result = GL_RGBA16UI;
break;
case gpu::INT16:
result = GL_RGBA16I;
break;
case gpu::NUINT16:
result = GL_RGBA16;
break;
case gpu::NINT16:
result = GL_RGBA16_SNORM;
break;
case gpu::HALF:
result = GL_RGBA16F;
break;
case gpu::UINT8:
result = GL_RGBA8UI;
break;
case gpu::INT8:
result = GL_RGBA8I;
break;
case gpu::NUINT8:
result = GL_RGBA8;
break;
case gpu::NINT8:
result = GL_RGBA8_SNORM;
break;
case gpu::NUINT32:
case gpu::NINT32:
case gpu::NUM_TYPES: // quiet compiler
Q_UNREACHABLE();
}
break;
case gpu::SRGB:
result = GL_SRGB8;
break;
case gpu::SRGBA:
result = GL_SRGB8_ALPHA8; // standard 2.2 gamma correction color
break;
case gpu::COMPRESSED_RGBA:
result = GL_COMPRESSED_RGBA;
break;
case gpu::COMPRESSED_SRGBA:
result = GL_COMPRESSED_SRGB_ALPHA;
break;
// FIXME: WE will want to support this later
/*
case gpu::COMPRESSED_BC3_RGBA:
result = GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
break;
case gpu::COMPRESSED_BC3_SRGBA:
result = GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT;
break;
case gpu::COMPRESSED_BC7_RGBA:
result = GL_COMPRESSED_RGBA_BPTC_UNORM_ARB;
break;
case gpu::COMPRESSED_BC7_SRGBA:
result = GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM;
break;
*/
default:
qCDebug(gpugllogging) << "Unknown combination of texel format";
}
break;
}
default:
qCDebug(gpugllogging) << "Unknown combination of texel format";
}
return result;
}
GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const Element& srcFormat) {

View file

@ -21,7 +21,7 @@ public:
static GLTexelFormat evalGLTexelFormat(const Element& dstFormat) {
return evalGLTexelFormat(dstFormat, dstFormat);
}
static GLTexelFormat evalGLTexelFormatInternal(const Element& dstFormat);
static GLenum evalGLTexelFormatInternal(const Element& dstFormat);
static GLTexelFormat evalGLTexelFormat(const Element& dstFormat, const Element& srcFormat);
};

View file

@ -118,34 +118,19 @@ float GLTexture::getMemoryPressure() {
return (float)consumedGpuMemory / (float)availableTextureMemory;
}
GLTexture::DownsampleSource::DownsampleSource(const std::weak_ptr<GLBackend>& backend, GLTexture* oldTexture) :
_backend(backend),
_size(oldTexture ? oldTexture->_size : 0),
_texture(oldTexture ? oldTexture->takeOwnership() : 0),
_minMip(oldTexture ? oldTexture->_minMip : 0),
_maxMip(oldTexture ? oldTexture->_maxMip : 0)
{
}
GLTexture::DownsampleSource::~DownsampleSource() {
if (_texture) {
auto backend = _backend.lock();
if (backend) {
backend->releaseTexture(_texture, _size);
}
}
}
GLTexture::GLTexture(const std::weak_ptr<GLBackend>& backend, const gpu::Texture& texture, GLuint id, GLTexture* originalTexture, bool transferrable) :
// Create the texture and allocate storage
GLTexture::GLTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLuint id, bool transferrable) :
GLObject(backend, texture, id),
_storageStamp(texture.getStamp()),
_target(getGLTextureType(texture)),
_maxMip(texture.maxMip()),
_minMip(texture.minMip()),
_virtualSize(texture.evalTotalSize()),
_transferrable(transferrable),
_downsampleSource(backend, originalTexture)
_transferrable(transferrable)
{
auto strongBackend = _backend.lock();
strongBackend->recycle();
if (_transferrable) {
uint16 mipCount = usedMipLevels();
_currentMaxMipCount = std::max(_currentMaxMipCount, mipCount);
@ -154,27 +139,9 @@ GLTexture::GLTexture(const std::weak_ptr<GLBackend>& backend, const gpu::Texture
} else {
++_textureCountByMips[mipCount];
}
}
}
Backend::incrementTextureGPUCount();
Backend::updateTextureGPUVirtualMemoryUsage(0, _virtualSize);
}
// Create the texture and allocate storage
GLTexture::GLTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLuint id, bool transferrable) :
GLTexture(backend, texture, id, nullptr, transferrable)
{
// FIXME, do during allocation
//Backend::updateTextureGPUMemoryUsage(0, _size);
Backend::setGPUObject(texture, this);
}
// Create the texture and copy from the original higher resolution version
GLTexture::GLTexture(const std::weak_ptr<GLBackend>& backend, const gpu::Texture& texture, GLuint id, GLTexture* originalTexture) :
GLTexture(backend, texture, id, originalTexture, originalTexture->_transferrable)
{
Q_ASSERT(_minMip >= originalTexture->_minMip);
// Set the GPU object last because that implicitly destroys the originalTexture object
Backend::setGPUObject(texture, this);
}
@ -196,6 +163,7 @@ GLTexture::~GLTexture() {
auto backend = _backend.lock();
if (backend) {
backend->releaseTexture(_id, _size);
backend->recycle();
}
}
Backend::updateTextureGPUVirtualMemoryUsage(_virtualSize, 0);
@ -210,6 +178,28 @@ void GLTexture::createTexture() {
});
}
void GLTexture::withPreservedTexture(std::function<void()> f) const {
GLint boundTex = -1;
switch (_target) {
case GL_TEXTURE_2D:
glGetIntegerv(GL_TEXTURE_BINDING_2D, &boundTex);
break;
case GL_TEXTURE_CUBE_MAP:
glGetIntegerv(GL_TEXTURE_BINDING_CUBE_MAP, &boundTex);
break;
default:
qFatal("Unsupported texture type");
}
(void)CHECK_GL_ERROR();
glBindTexture(_target, _texture);
f();
glBindTexture(_target, boundTex);
(void)CHECK_GL_ERROR();
}
void GLTexture::setSize(GLuint size) const {
Backend::updateTextureGPUMemoryUsage(_size, size);
const_cast<GLuint&>(_size) = size;
@ -257,11 +247,6 @@ void GLTexture::postTransfer() {
setSyncState(GLSyncState::Idle);
++_transferCount;
//// The public gltexture becaomes available
//_id = _privateTexture;
_downsampleSource.reset();
// At this point the mip pixels have been loaded, we can notify the gpu texture to abandon it's memory
switch (_gpuObject.getType()) {
case Texture::TEX_2D:

View file

@ -73,14 +73,7 @@ public:
return nullptr;
}
// Do we need to reduce texture memory usage?
if (object->isOverMaxMemory() && texturePointer->incremementMinMip()) {
// WARNING, this code path will essentially `delete this`,
// so no dereferencing of this instance should be done past this point
object = new GLTextureType(backend.shared_from_this(), texture, object);
_textureTransferHelper->transferTexture(texturePointer);
return nullptr;
}
((GLTexture*)object)->updateMips();
return object;
}
@ -96,30 +89,23 @@ public:
} else {
object = Backend::getGPUObject<GLTextureType>(*texture);
}
if (!object) {
return 0;
}
GLuint result = object->_id;
if (!shouldSync) {
return object->_id;
}
// Don't return textures that are in transfer state
if (shouldSync) {
if ((object->getSyncState() != GLSyncState::Idle) ||
// Don't return transferrable textures that have never completed transfer
(!object->_transferrable || 0 != object->_transferCount)) {
// Will be either 0 or the original texture being downsampled.
result = object->_downsampleSource._texture;
}
if ((object->getSyncState() != GLSyncState::Idle) ||
// Don't return transferrable textures that have never completed transfer
(!object->_transferrable || 0 != object->_transferCount)) {
return 0;
}
return result;
}
// Used by derived classes and helpers to ensure the actual GL object exceeds the lifetime of `this`
GLuint takeOwnership() {
GLuint result = _id;
const_cast<GLuint&>(_id) = 0;
return result;
return object->_id;
}
~GLTexture();
@ -128,25 +114,11 @@ public:
const Stamp _storageStamp;
const GLenum _target;
const uint16 _maxMip;
const uint16 _minMip;
uint16 _minMip;
const GLuint _virtualSize; // theoretical size as expected
Stamp _contentStamp { 0 };
const bool _transferrable;
Size _transferCount { 0 };
struct DownsampleSource {
using Pointer = std::shared_ptr<DownsampleSource>;
DownsampleSource(const std::weak_ptr<gl::GLBackend>& backend) : _backend(backend), _size(0), _texture(0), _minMip(0), _maxMip(0) {}
DownsampleSource(const std::weak_ptr<gl::GLBackend>& backend, GLTexture* originalTexture);
~DownsampleSource();
void reset() const { const_cast<GLuint&>(_texture) = 0; }
const std::weak_ptr<gl::GLBackend>& _backend;
const GLuint _size { 0 };
const GLuint _texture { 0 };
const uint16 _minMip { 0 };
const uint16 _maxMip { 0 };
} _downsampleSource;
GLuint size() const { return _size; }
GLSyncState getSyncState() const { return _syncState; }
@ -160,7 +132,7 @@ public:
bool isReady() const;
// Execute any post-move operations that must occur only on the main thread
void postTransfer();
virtual void postTransfer();
bool isOverMaxMemory() const;
@ -170,33 +142,34 @@ public:
static const GLenum CUBE_FACE_LAYOUT[6];
static const GLFilterMode FILTER_MODES[Sampler::NUM_FILTERS];
static const GLenum WRAP_MODES[Sampler::NUM_WRAP_MODES];
protected:
static const std::vector<GLenum>& getFaceTargets(GLenum textureType);
static GLenum getGLTextureType(const Texture& texture);
// Return a floating point value indicating how much of the allowed
// texture memory we are currently consuming. A value of 0 indicates
// no texture memory usage, while a value of 1 indicates all available / allowed memory
// is consumed. A value above 1 indicates that there is a problem.
static float getMemoryPressure();
protected:
static const std::vector<GLenum>& getFaceTargets(GLenum textureType);
static GLenum getGLTextureType(const Texture& texture);
const GLuint _size { 0 }; // true size as reported by the gl api
std::atomic<GLSyncState> _syncState { GLSyncState::Idle };
GLTexture(const std::weak_ptr<gl::GLBackend>& backend, const Texture& texture, GLuint id, bool transferrable);
GLTexture(const std::weak_ptr<gl::GLBackend>& backend, const Texture& texture, GLuint id, GLTexture* originalTexture);
void setSyncState(GLSyncState syncState) { _syncState = syncState; }
void createTexture();
virtual void updateMips() {}
virtual void allocateStorage() const = 0;
virtual void updateSize() const = 0;
virtual void syncSampler() const = 0;
virtual void generateMips() const = 0;
virtual void withPreservedTexture(std::function<void()> f) const = 0;
virtual void withPreservedTexture(std::function<void()> f) const;
protected:
void setSize(GLuint size) const;
@ -207,9 +180,6 @@ protected:
virtual void finishTransfer();
private:
GLTexture(const std::weak_ptr<GLBackend>& backend, const gpu::Texture& gpuTexture, GLuint id, GLTexture* originalTexture, bool transferrable);
friend class GLTextureTransferHelper;
friend class GLBackend;
};

View file

@ -144,9 +144,9 @@ bool GLTextureTransferHelper::process() {
}
gltexture->finishTransfer();
glNamedFramebufferTexture(_readFramebuffer, GL_COLOR_ATTACHMENT0, gltexture->_id, 0);
glBlitNamedFramebuffer(_readFramebuffer, _drawFramebuffer, 0, 0, 1, 1, 0, 0, 1, 1, GL_COLOR_BUFFER_BIT, GL_NEAREST);
clientWait();
//glNamedFramebufferTexture(_readFramebuffer, GL_COLOR_ATTACHMENT0, gltexture->_id, 0);
//glBlitNamedFramebuffer(_readFramebuffer, _drawFramebuffer, 0, 0, 1, 1, 0, 0, 1, 1, GL_COLOR_BUFFER_BIT, GL_NEAREST);
//clientWait();
gltexture->_contentStamp = gltexture->_gpuObject.getDataStamp();
gltexture->updateSize();
gltexture->setSyncState(gpu::gl::GLSyncState::Transferred);

View file

@ -43,7 +43,6 @@ public:
GLuint allocate();
public:
GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& buffer, bool transferrable);
GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& buffer, GL41Texture* original);
protected:
void transferMip(uint16_t mipLevel, uint8_t face) const;
@ -52,7 +51,6 @@ public:
void updateSize() const override;
void syncSampler() const override;
void generateMips() const override;
void withPreservedTexture(std::function<void()> f) const override;
};

View file

@ -40,30 +40,6 @@ GLTexture* GL41Backend::syncGPUObject(const TexturePointer& texture, bool transf
GL41Texture::GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, bool transferrable) : GLTexture(backend, texture, allocate(), transferrable) {}
GL41Texture::GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GL41Texture* original) : GLTexture(backend, texture, allocate(), original) {}
void GL41Texture::withPreservedTexture(std::function<void()> f) const {
GLint boundTex = -1;
switch (_target) {
case GL_TEXTURE_2D:
glGetIntegerv(GL_TEXTURE_BINDING_2D, &boundTex);
break;
case GL_TEXTURE_CUBE_MAP:
glGetIntegerv(GL_TEXTURE_BINDING_CUBE_MAP, &boundTex);
break;
default:
qFatal("Unsupported texture type");
}
(void)CHECK_GL_ERROR();
glBindTexture(_target, _texture);
f();
glBindTexture(_target, boundTex);
(void)CHECK_GL_ERROR();
}
void GL41Texture::generateMips() const {
withPreservedTexture([&] {
glGenerateMipmap(_target);
@ -147,35 +123,12 @@ void GL41Texture::startTransfer() {
glBindTexture(_target, _id);
(void)CHECK_GL_ERROR();
if (_downsampleSource._texture) {
GLuint fbo { 0 };
glGenFramebuffers(1, &fbo);
(void)CHECK_GL_ERROR();
glBindFramebuffer(GL_READ_FRAMEBUFFER, fbo);
(void)CHECK_GL_ERROR();
// Find the distance between the old min mip and the new one
uint16 mipOffset = _minMip - _downsampleSource._minMip;
for (uint16 i = _minMip; i <= _maxMip; ++i) {
uint16 targetMip = i - _minMip;
uint16 sourceMip = targetMip + mipOffset;
Vec3u dimensions = _gpuObject.evalMipDimensions(i);
for (GLenum target : getFaceTargets(_target)) {
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, target, _downsampleSource._texture, sourceMip);
(void)CHECK_GL_ERROR();
glCopyTexSubImage2D(target, targetMip, 0, 0, 0, 0, dimensions.x, dimensions.y);
(void)CHECK_GL_ERROR();
}
}
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
glDeleteFramebuffers(1, &fbo);
} else {
// transfer pixels from each faces
uint8_t numFaces = (Texture::TEX_CUBE == _gpuObject.getType()) ? CUBE_NUM_FACES : 1;
for (uint8_t f = 0; f < numFaces; f++) {
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
if (_gpuObject.isStoredMipFaceAvailable(i, f)) {
transferMip(i, f);
}
// transfer pixels from each faces
uint8_t numFaces = (Texture::TEX_CUBE == _gpuObject.getType()) ? CUBE_NUM_FACES : 1;
for (uint8_t f = 0; f < numFaces; f++) {
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
if (_gpuObject.isStoredMipFaceAvailable(i, f)) {
transferMip(i, f);
}
}
}

View file

@ -147,3 +147,8 @@ void GL45Backend::do_multiDrawIndexedIndirect(const Batch& batch, size_t paramOf
_stats._DSNumAPIDrawcalls++;
(void)CHECK_GL_ERROR();
}
void GL45Backend::recycle() const {
Parent::recycle();
derezTextures();
}

View file

@ -18,29 +18,6 @@ namespace gpu { namespace gl45 {
using namespace gpu::gl;
struct TransferState {
GLTexture& _texture;
GLenum _internalFormat { GL_RGBA8 };
GLTexelFormat _texelFormat;
uint8_t _face { 0 };
uint16_t _mipLevel { 0 };
uint32_t _bytesPerLine { 0 };
uint32_t _bytesPerPixel { 0 };
uint32_t _bytesPerPage { 0 };
GLuint _maxSparseLevel { 0 };
uvec3 _mipDimensions;
uvec3 _mipOffset;
uvec3 _pageSize;
const uint8_t* _srcPointer { nullptr };
uvec3 currentPageSize() const;
void updateSparse();
void updateMip();
void populatePage(std::vector<uint8_t>& dest);
bool increment();
TransferState(GLTexture& texture);
};
class GL45Backend : public GLBackend {
using Parent = GLBackend;
// Context Backend static interface required
@ -55,12 +32,47 @@ public:
static GLuint allocate(const Texture& texture);
public:
GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, bool transferrable);
GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLTexture* original);
~GL45Texture();
void postTransfer() override;
struct SparseInfo {
GL45Texture& _texture;
uvec3 _pageDimensions;
GLuint _maxSparseLevel { 0 };
uint32_t _maxPages { 0 };
uint32_t _pageBytes { 0 };
SparseInfo(GL45Texture& texture);
void update();
uvec3 getPageCounts(const uvec3& dimensions) const;
uint32_t getPageCount(const uvec3& dimensions) const;
};
struct TransferState {
GL45Texture& _texture;
GLTexelFormat _texelFormat;
uint8_t _face { 0 };
uint16_t _mipLevel { 0 };
uint32_t _bytesPerLine { 0 };
uint32_t _bytesPerPixel { 0 };
uint32_t _bytesPerPage { 0 };
uvec3 _mipDimensions;
uvec3 _mipOffset;
const uint8_t* _srcPointer { nullptr };
uvec3 currentPageSize() const;
void updateMip();
void populatePage(std::vector<uint8_t>& dest);
bool increment();
TransferState(GL45Texture& texture);
};
protected:
void updateMips() override;
void stripToMip(uint16_t newMinMip);
void startTransfer() override;
bool continueTransfer() override;
void finishTransfer() override;
void incrementalTransfer(const uvec3& size, const gpu::Texture::PixelsPointer& mip, std::function<void(const ivec3& offset, const uvec3& size)> f) const;
void transferMip(uint16_t mipLevel, uint8_t face = 0) const;
void allocateMip(uint16_t mipLevel, uint8_t face = 0) const;
@ -69,12 +81,21 @@ public:
void syncSampler() const override;
void generateMips() const override;
void withPreservedTexture(std::function<void()> f) const override;
void derez();
SparseInfo _sparseInfo;
TransferState _transferState;
uint32_t _allocatedPages { 0 };
uint32_t _lastMipAllocatedPages { 0 };
bool _sparse { false };
friend class GL45Backend;
};
protected:
void recycle() const override;
void derezTextures() const;
GLuint getFramebufferID(const FramebufferPointer& framebuffer) override;
GLFramebuffer* syncGPUObject(const Framebuffer& framebuffer) override;

View file

@ -24,28 +24,66 @@ using namespace gpu;
using namespace gpu::gl;
using namespace gpu::gl45;
#ifdef THREADED_TEXTURE_TRANSFER
#define SPARSE_TEXTURES 1
#else
#define SPARSE_TEXTURES 0
#endif
// Allocate 1 MB of buffer space for paged transfers
#define DEFAULT_PAGE_BUFFER_SIZE (1024*1024)
using GL45Texture = GL45Backend::GL45Texture;
static std::map<uint16_t, std::unordered_set<GL45Texture*>> texturesByMipCounts;
GLTexture* GL45Backend::syncGPUObject(const TexturePointer& texture, bool transfer) {
return GL45Texture::sync<GL45Texture>(*this, texture, transfer);
}
TransferState::TransferState(GLTexture& texture) : _texture(texture) {
using SparseInfo = GL45Backend::GL45Texture::SparseInfo;
SparseInfo::SparseInfo(GL45Texture& texture)
: _texture(texture) {
}
void TransferState::updateSparse() {
void SparseInfo::update() {
glGetTextureParameterIuiv(_texture._id, GL_NUM_SPARSE_LEVELS_ARB, &_maxSparseLevel);
_internalFormat = gl::GLTexelFormat::evalGLTexelFormat(_texture._gpuObject.getTexelFormat(), _texture._gpuObject.getTexelFormat()).internalFormat;
GLenum internalFormat = gl::GLTexelFormat::evalGLTexelFormat(_texture._gpuObject.getTexelFormat(), _texture._gpuObject.getTexelFormat()).internalFormat;
ivec3 pageSize;
glGetInternalformativ(_texture._target, _internalFormat, GL_VIRTUAL_PAGE_SIZE_X_ARB, 1, &pageSize.x);
glGetInternalformativ(_texture._target, _internalFormat, GL_VIRTUAL_PAGE_SIZE_Y_ARB, 1, &pageSize.y);
glGetInternalformativ(_texture._target, _internalFormat, GL_VIRTUAL_PAGE_SIZE_Z_ARB, 1, &pageSize.z);
_pageSize = uvec3(pageSize);
glGetInternalformativ(_texture._target, internalFormat, GL_VIRTUAL_PAGE_SIZE_X_ARB, 1, &pageSize.x);
glGetInternalformativ(_texture._target, internalFormat, GL_VIRTUAL_PAGE_SIZE_Y_ARB, 1, &pageSize.y);
glGetInternalformativ(_texture._target, internalFormat, GL_VIRTUAL_PAGE_SIZE_Z_ARB, 1, &pageSize.z);
_pageDimensions = uvec3(pageSize);
_pageBytes = _texture._gpuObject.getTexelFormat().getSize();
_pageBytes *= _pageDimensions.x * _pageDimensions.y * _pageDimensions.z;
for (uint16_t mipLevel = 0; mipLevel <= _maxSparseLevel; ++mipLevel) {
auto mipDimensions = _texture._gpuObject.evalMipDimensions(mipLevel);
auto mipPageCount = getPageCount(mipDimensions);
_maxPages += mipPageCount;
}
if (_texture._target == GL_TEXTURE_CUBE_MAP) {
_maxPages *= GLTexture::CUBE_NUM_FACES;
}
}
uvec3 SparseInfo::getPageCounts(const uvec3& dimensions) const {
auto result = (dimensions / _pageDimensions) +
glm::clamp(dimensions % _pageDimensions, glm::uvec3(0), glm::uvec3(1));
return result;
}
uint32_t SparseInfo::getPageCount(const uvec3& dimensions) const {
auto pageCounts = getPageCounts(dimensions);
return pageCounts.x * pageCounts.y * pageCounts.z;
}
using TransferState = GL45Backend::GL45Texture::TransferState;
TransferState::TransferState(GL45Texture& texture) : _texture(texture) {
}
void TransferState::updateMip() {
@ -64,18 +102,19 @@ void TransferState::updateMip() {
}
bool TransferState::increment() {
if ((_mipOffset.x + _pageSize.x) < _mipDimensions.x) {
_mipOffset.x += _pageSize.x;
const SparseInfo& sparse = _texture._sparseInfo;
if ((_mipOffset.x + sparse._pageDimensions.x) < _mipDimensions.x) {
_mipOffset.x += sparse._pageDimensions.x;
return true;
}
if ((_mipOffset.y + _pageSize.y) < _mipDimensions.y) {
if ((_mipOffset.y + sparse._pageDimensions.y) < _mipDimensions.y) {
_mipOffset.x = 0;
_mipOffset.y += _pageSize.y;
_mipOffset.y += sparse._pageDimensions.y;
return true;
}
if (_mipOffset.z + _pageSize.z < _mipDimensions.z) {
if (_mipOffset.z + sparse._pageDimensions.z < _mipDimensions.z) {
_mipOffset.x = 0;
_mipOffset.y = 0;
++_mipOffset.z;
@ -125,7 +164,7 @@ void TransferState::populatePage(std::vector<uint8_t>& buffer) {
}
uvec3 TransferState::currentPageSize() const {
return glm::clamp(_mipDimensions - _mipOffset, uvec3(1), _pageSize);
return glm::clamp(_mipDimensions - _mipOffset, uvec3(1), _texture._sparseInfo._pageDimensions);
}
GLuint GL45Texture::allocate(const Texture& texture) {
@ -139,25 +178,44 @@ GLuint GL45Backend::getTextureID(const TexturePointer& texture, bool transfer) {
}
GL45Texture::GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, bool transferrable)
: GLTexture(backend, texture, allocate(texture), transferrable), _transferState(*this) {
: GLTexture(backend, texture, allocate(texture), transferrable), _sparseInfo(*this), _transferState(*this) {
#if SPARSE_TEXTURES
if (transferrable) {
_sparse = _transferrable;
#endif
if (_sparse) {
glTextureParameteri(_id, GL_TEXTURE_SPARSE_ARB, GL_TRUE);
}
#endif
}
GL45Texture::GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLTexture* original)
: GLTexture(backend, texture, allocate(texture), original), _transferState(*this) { }
GL45Texture::~GL45Texture() {
// FIXME do we need to explicitly deallocate the virtual memory here?
//if (_transferrable) {
// for (uint16_t mipLevel = 0; mipLevel < usedMipLevels(); ++i) {
// glTexturePageCommitmentEXT(_id, mipLevel, offset.x, offset.y, offset.z, size.x, size.y, size.z, GL_TRUE);
// }
//}
if (_sparse) {
auto mipLevels = usedMipLevels();
if (texturesByMipCounts.count(mipLevels)) {
auto& textures = texturesByMipCounts[mipLevels];
textures.erase(this);
if (textures.empty()) {
texturesByMipCounts.erase(mipLevels);
}
}
auto originalAllocatedPages = _allocatedPages;
auto maxSparseMip = std::min<uint16_t>(_maxMip, _sparseInfo._maxSparseLevel);
uint8_t maxFace = (uint8_t)((_target == GL_TEXTURE_CUBE_MAP) ? GLTexture::CUBE_NUM_FACES : 1);
for (uint16_t mipLevel = _minMip; mipLevel <= maxSparseMip; ++mipLevel) {
auto mipDimensions = _gpuObject.evalMipDimensions(mipLevel);
auto deallocatedPages = _sparseInfo.getPageCount(mipDimensions);
for (uint8_t face = 0; face < maxFace; ++face) {
glTexturePageCommitmentEXT(_id, mipLevel, 0, 0, face, mipDimensions.x, mipDimensions.y, mipDimensions.z, GL_FALSE);
assert(deallocatedPages <= _allocatedPages);
_allocatedPages -= deallocatedPages;
}
}
if (0 != _allocatedPages) {
auto maxSize = _gpuObject.evalMipDimensions(0);
qDebug() << "Allocated pages remaining " << _id << " " << _allocatedPages;
qDebug() << originalAllocatedPages;
}
}
}
void GL45Texture::withPreservedTexture(std::function<void()> f) const {
@ -183,19 +241,20 @@ void GL45Texture::allocateStorage() const {
}
void GL45Texture::updateSize() const {
setSize(_virtualSize);
if (!_id) {
return;
}
if (_gpuObject.getTexelFormat().isCompressed()) {
qFatal("Compressed textures not yet supported");
}
if (_transferrable) {
setSize(_allocatedPages * _sparseInfo._pageBytes);
} else {
setSize(_virtualSize);
}
}
void GL45Texture::startTransfer() {
Parent::startTransfer();
_transferState.updateSparse();
_sparseInfo.update();
_transferState.updateMip();
}
@ -207,14 +266,20 @@ bool GL45Texture::continueTransfer() {
uvec3 pageSize = _transferState.currentPageSize();
uvec3 offset = _transferState._mipOffset;
#if SPARSE_TEXTURES
if (_transferState._mipLevel <= _transferState._maxSparseLevel) {
glTexturePageCommitmentEXT(_id, _transferState._mipLevel,
// FIXME we should be using the DSA for all of this
if (_sparse && _transferState._mipLevel <= _sparseInfo._maxSparseLevel) {
if (_allocatedPages > _sparseInfo._maxPages) {
qDebug() << "Exceeded max page allocation!";
}
glBindTexture(_target, _id);
// FIXME we should be using glTexturePageCommitmentEXT, but for some reason it causes out of memory errors.
// Either I'm not understanding how it should work or there's a driver bug.
glTexPageCommitmentARB(_target, _transferState._mipLevel,
offset.x, offset.y, _transferState._face,
pageSize.x, pageSize.y, pageSize.z,
GL_TRUE);
++_allocatedPages;
}
#endif
if (_transferState._srcPointer) {
// Transfer the mip data
@ -236,16 +301,31 @@ bool GL45Texture::continueTransfer() {
}
serverWait();
return _transferState.increment();
auto currentMip = _transferState._mipLevel;
auto result = _transferState.increment();
if (_transferState._mipLevel != currentMip && currentMip <= _sparseInfo._maxSparseLevel) {
auto mipDimensions = _gpuObject.evalMipDimensions(currentMip);
auto mipExpectedPages = _sparseInfo.getPageCount(mipDimensions);
auto newPages = _allocatedPages - _lastMipAllocatedPages;
if (newPages != mipExpectedPages) {
qWarning() << "Unexpected page allocation size... " << newPages << " " << mipExpectedPages;
}
_lastMipAllocatedPages = _allocatedPages;
}
return result;
}
void GL45Backend::GL45Texture::syncSampler() const {
void GL45Texture::finishTransfer() {
Parent::finishTransfer();
}
void GL45Texture::syncSampler() const {
const Sampler& sampler = _gpuObject.getSampler();
const auto& fm = FILTER_MODES[sampler.getFilter()];
glTextureParameteri(_id, GL_TEXTURE_MIN_FILTER, fm.minFilter);
glTextureParameteri(_id, GL_TEXTURE_MAG_FILTER, fm.magFilter);
if (sampler.doComparison()) {
glTextureParameteri(_id, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_R_TO_TEXTURE);
glTextureParameteri(_id, GL_TEXTURE_COMPARE_FUNC, COMPARISON_TO_GL[sampler.getComparisonFunction()]);
@ -257,9 +337,149 @@ void GL45Backend::GL45Texture::syncSampler() const {
glTextureParameteri(_id, GL_TEXTURE_WRAP_T, WRAP_MODES[sampler.getWrapModeV()]);
glTextureParameteri(_id, GL_TEXTURE_WRAP_R, WRAP_MODES[sampler.getWrapModeW()]);
glTextureParameterfv(_id, GL_TEXTURE_BORDER_COLOR, (const float*)&sampler.getBorderColor());
glTextureParameteri(_id, GL_TEXTURE_BASE_LEVEL, (uint16)sampler.getMipOffset());
auto baseMip = std::max<uint16_t>(sampler.getMipOffset(), _minMip);
glTextureParameteri(_id, GL_TEXTURE_BASE_LEVEL, baseMip);
glTextureParameterf(_id, GL_TEXTURE_MIN_LOD, (float)sampler.getMinMip());
glTextureParameterf(_id, GL_TEXTURE_MAX_LOD, (sampler.getMaxMip() == Sampler::MAX_MIP_LEVEL ? 1000.f : sampler.getMaxMip()));
glTextureParameterf(_id, GL_TEXTURE_MAX_ANISOTROPY_EXT, sampler.getMaxAnisotropy());
}
void GL45Texture::postTransfer() {
Parent::postTransfer();
if (_transferrable) {
auto mipLevels = usedMipLevels();
if (mipLevels > 1 && _minMip < _sparseInfo._maxSparseLevel) {
auto& textureMap = texturesByMipCounts;
texturesByMipCounts[mipLevels].insert(this);
}
}
}
void GL45Texture::stripToMip(uint16_t newMinMip) {
if (!_sparse) {
return;
}
if (newMinMip < _minMip) {
qWarning() << "Cannot decrease the min mip";
return;
}
if (newMinMip >= _sparseInfo._maxSparseLevel) {
qWarning() << "Cannot increase the min mip into the mip tail";
return;
}
auto mipLevels = usedMipLevels();
assert(0 != texturesByMipCounts.count(mipLevels));
assert(0 != texturesByMipCounts[mipLevels].count(this));
texturesByMipCounts[mipLevels].erase(this);
if (texturesByMipCounts[mipLevels].empty()) {
texturesByMipCounts.erase(mipLevels);
}
// FIXME this shouldn't be necessary should it?
#if 1
glGenerateTextureMipmap(_id);
#else
static GLuint framebuffers[2] = { 0, 0 };
static std::once_flag initFramebuffers;
std::call_once(initFramebuffers, [&] {
glCreateFramebuffers(2, framebuffers);
});
auto readSize = _gpuObject.evalMipDimensions(_minMip);
auto drawSize = _gpuObject.evalMipDimensions(newMinMip);
glNamedFramebufferTexture(framebuffers[0], GL_COLOR_ATTACHMENT0, _id, _minMip);
glNamedFramebufferTexture(framebuffers[1], GL_COLOR_ATTACHMENT0, _id, newMinMip);
glBlitNamedFramebuffer(framebuffers[0], framebuffers[1],
0, 0, readSize.x, readSize.y,
0, 0, drawSize.x, drawSize.y,
GL_COLOR_BUFFER_BIT, GL_LINEAR);
#endif
uint8_t maxFace = (uint8_t)((_target == GL_TEXTURE_CUBE_MAP) ? GLTexture::CUBE_NUM_FACES : 1);
for (uint16_t mip = _minMip; mip < newMinMip; ++mip) {
auto mipDimensions = _gpuObject.evalMipDimensions(mip);
auto deallocatedPages = _sparseInfo.getPageCount(mipDimensions);
for (uint8_t face = 0; face < maxFace; ++face) {
glTexturePageCommitmentEXT(_id, mip,
0, 0, face,
mipDimensions.x, mipDimensions.y, mipDimensions.z,
GL_FALSE);
assert(deallocatedPages < _allocatedPages);
_allocatedPages -= deallocatedPages;
}
}
_minMip = newMinMip;
// Re-sync the sampler to force access to the new mip level
syncSampler();
size_t oldSize = _size;
updateSize();
size_t newSize = _size;
if (newSize > oldSize) {
qDebug() << "WTF";
qDebug() << "\told size " << oldSize;
qDebug() << "\tnew size " << newSize;
}
// Re-insert into the texture-by-mips map if appropriate
mipLevels = usedMipLevels();
if (mipLevels > 1 && _minMip < _sparseInfo._maxSparseLevel) {
auto& textureMap = texturesByMipCounts;
texturesByMipCounts[mipLevels].insert(this);
}
}
void GL45Texture::updateMips() {
if (!_sparse) {
return;
}
bool modified = false;
auto newMinMip = std::min<uint16_t>(_gpuObject.minMip(), _sparseInfo._maxSparseLevel);
if (_minMip < newMinMip) {
stripToMip(newMinMip);
}
}
void GL45Texture::derez() {
if (!_sparse) {
return;
}
assert(_minMip < _sparseInfo._maxSparseLevel);
assert(_minMip < _maxMip);
assert(_transferrable);
stripToMip(_minMip + 1);
}
void GL45Backend::derezTextures() const {
if (GLTexture::getMemoryPressure() < 1.0f) {
return;
}
qDebug() << "Allowed texture memory " << Texture::getAllowedGPUMemoryUsage();
qDebug() << "Used texture memory " << Context::getTextureGPUMemoryUsage();
if (texturesByMipCounts.empty()) {
qDebug() << "No available textures to derez";
return;
}
auto mipLevel = texturesByMipCounts.rbegin()->first;
if (mipLevel <= 1) {
qDebug() << "Max mip levels " << mipLevel;
return;
}
auto& textureMap = texturesByMipCounts;
auto newMipLevel = mipLevel - 1;
qDebug() << "Derez a texture";
GL45Texture* targetTexture = nullptr;
{
auto& textures = texturesByMipCounts[mipLevel];
assert(!textures.empty());
targetTexture = *textures.begin();
}
targetTexture->derez();
qDebug() << "New Used texture memory " << Context::getTextureGPUMemoryUsage();
}

View file

@ -35,8 +35,7 @@
#include "ModelNetworkingLogging.h"
TextureCache::TextureCache() {
const qint64 TEXTURE_DEFAULT_UNUSED_MAX_SIZE = DEFAULT_UNUSED_MAX_SIZE;
setUnusedResourceCacheSize(TEXTURE_DEFAULT_UNUSED_MAX_SIZE);
setUnusedResourceCacheSize(0);
setObjectName("TextureCache");
// Expose enum Type to JS/QML via properties

View file

@ -666,6 +666,14 @@ protected:
toggleCulling();
return;
case Qt::Key_Home:
gpu::Texture::setAllowedGPUMemoryUsage(0);
return;
case Qt::Key_End:
gpu::Texture::setAllowedGPUMemoryUsage(MB_TO_BYTES(256));
return;
default:
break;
@ -776,10 +784,11 @@ private:
};
void updateText() {
QString title = QString("FPS %1 Culling %2 TextureMemory GPU %3 CPU %4")
QString title = QString("FPS %1 Culling %2 TextureMemory GPU %3 CPU %4 Max GPU %5")
.arg(_fps).arg(_cullingEnabled)
.arg(toHumanSize(gpu::Context::getTextureGPUMemoryUsage(), 2))
.arg(toHumanSize(gpu::Texture::getTextureCPUMemoryUsage(), 2));
.arg(toHumanSize(gpu::Texture::getTextureCPUMemoryUsage(), 2))
.arg(toHumanSize(gpu::Texture::getAllowedGPUMemoryUsage(), 2));
setTitle(title);
#if 0
{
@ -1111,7 +1120,6 @@ int main(int argc, char** argv) {
QCoreApplication::setApplicationName("RenderPerf");
QCoreApplication::setOrganizationName("High Fidelity");
QCoreApplication::setOrganizationDomain("highfidelity.com");
qInstallMessageHandler(messageHandler);
QLoggingCategory::setFilterRules(LOG_FILTER_RULES);
QTestWindow::setup();

View file

@ -297,8 +297,6 @@ public:
};
QTestWindow() {
_currentTexture = _textures.end();
{
QStringList stringList;
QFile textFile(DATA_DIR.path() + "/loads.txt");
@ -318,12 +316,13 @@ public:
QString timeStr = s.left(index);
auto time = timeStr.toUInt();
QString path = DATA_DIR.path() + "/" + s.right(s.length() - index).trimmed();
qDebug() << "Path " << path;
if (!QFileInfo(path).exists()) {
continue;
}
_textureLoads.push({ time, path, s });
qDebug() << "Path " << path;
_texturesFiles.push_back({ time, path, s });
}
_textures.resize(_texturesFiles.size());
}
installEventFilter(this);
@ -383,6 +382,33 @@ protected:
}
void keyPressEvent(QKeyEvent* event) override {
switch (event->key()) {
case Qt::Key_Left:
prevTexture();
break;
case Qt::Key_Right:
nextTexture();
break;
case Qt::Key_Return:
reportMemory();
break;
case Qt::Key_PageDown:
derezTexture();
break;
case Qt::Key_Home:
unloadAll();
break;
case Qt::Key_End:
loadAll();
break;
case Qt::Key_Down:
loadTexture();
break;
case Qt::Key_Up:
unloadTexture();
break;
}
QWindow::keyPressEvent(event);
}
void keyReleaseEvent(QKeyEvent* event) override {
@ -395,10 +421,80 @@ protected:
resizeWindow(ev->size());
}
void nextTexture() {
if (_textures.empty()) {
return;
}
auto textureCount = _textures.size();
_currentTextureIndex = (_currentTextureIndex + 1) % textureCount;
loadTexture();
}
void prevTexture() {
if (_textures.empty()) {
return;
}
auto textureCount = _textures.size();
_currentTextureIndex = (_currentTextureIndex + textureCount - 1) % textureCount;
loadTexture();
}
void reportMemory() {
static GLint lastMemory = 0;
GLint availableMem;
glGetIntegerv(GL_GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX, &availableMem);
qDebug() << "Memory available " << availableMem;
if (lastMemory != 0) {
qDebug() << "Delta " << availableMem - lastMemory;
}
lastMemory = availableMem;
}
void derezTexture() {
if (!_textures[_currentTextureIndex]) {
return;
}
auto texture = _textures[_currentTextureIndex];
texture->setMinMip(texture->minMip() + 1);
}
void loadTexture() {
if (_textures[_currentTextureIndex]) {
return;
}
auto file = _texturesFiles[_currentTextureIndex].file;
qDebug() << "Loading texture " << file;
_textures[_currentTextureIndex] = DependencyManager::get<TextureCache>()->getImageTexture(file);
}
void unloadTexture() {
if (_textures.empty()) {
return;
}
_textures[_currentTextureIndex].reset();
}
void loadAll() {
for (auto i = 0; i < _texturesFiles.size(); ++i) {
if (_textures[i]) {
continue;
}
auto file = _texturesFiles[i].file;
qDebug() << "Loading texture " << file;
_textures[i] = DependencyManager::get<TextureCache>()->getImageTexture(file);
}
}
void unloadAll() {
for (auto& texture : _textures) {
texture.reset();
}
}
private:
std::queue<TextureLoad> _textureLoads;
std::list<gpu::TexturePointer> _textures;
std::list<gpu::TexturePointer>::iterator _currentTexture;
size_t _currentTextureIndex { 0 };
std::vector<TextureLoad> _texturesFiles;
std::vector<gpu::TexturePointer> _textures;
uint16_t _fps;
gpu::PipelinePointer _simplePipeline;
@ -438,7 +534,8 @@ private:
auto now = usecTimestampNow();
static auto last = now;
auto delta = (now - last) / USECS_PER_MSEC;
if (!_textureLoads.empty()) {
#if 0
if (!_textures.empty()) {
const auto& front = _textureLoads.front();
if (delta >= front.time) {
QFileInfo fileInfo(front.file);
@ -456,6 +553,7 @@ private:
}
}
}
#endif
}
void render() {
@ -474,14 +572,8 @@ private:
auto vpsize = framebuffer->getSize();
auto vppos = ivec2(0);
batch.setViewportTransform(ivec4(vppos, vpsize));
if (_currentTexture != _textures.end()) {
++_currentTexture;
}
if (_currentTexture == _textures.end()) {
_currentTexture = _textures.begin();
}
if (_currentTexture != _textures.end()) {
batch.setResourceTexture(0, *_currentTexture);
if (!_textures.empty()) {
batch.setResourceTexture(0, _textures[_currentTextureIndex]);
}
batch.setPipeline(_simplePipeline);
batch.draw(gpu::TRIANGLE_STRIP, 4);
@ -564,7 +656,6 @@ int main(int argc, char** argv) {
}).waitForDownload();
}
QTestWindow::setup();
QTestWindow window;
app.exec();