mirror of
https://github.com/JulianGro/overte.git
synced 2025-04-17 22:08:27 +02:00
Prevent sparse texture on AMD gpus for now
This commit is contained in:
parent
b1582b474b
commit
6a39ad3b5a
6 changed files with 37 additions and 4 deletions
|
@ -56,6 +56,7 @@ BackendPointer GLBackend::createBackend() {
|
|||
}
|
||||
result->initInput();
|
||||
result->initTransform();
|
||||
result->initTextureManagementStage();
|
||||
|
||||
INSTANCE = result.get();
|
||||
void* voidInstance = &(*result);
|
||||
|
|
|
@ -176,6 +176,9 @@ public:
|
|||
virtual void releaseQuery(GLuint id) const;
|
||||
virtual void queueLambda(const std::function<void()> lambda) const;
|
||||
|
||||
bool isTextureManagementSparseEnabled() const { return (_textureManagement._sparseCapable && Texture::getEnableSparseTextures()); }
|
||||
bool isTextureManagementIncrementalTransferEnabled() const { return (_textureManagement._incrementalTransferCapable && Texture::getEnableIncrementalTextureTransfers()); }
|
||||
|
||||
protected:
|
||||
|
||||
void recycle() const override;
|
||||
|
@ -364,6 +367,12 @@ protected:
|
|||
|
||||
void resetStages();
|
||||
|
||||
struct TextureManagementStageState {
|
||||
bool _sparseCapable { false };
|
||||
bool _incrementalTransferCapable { false };
|
||||
} _textureManagement;
|
||||
virtual void initTextureManagementStage() {}
|
||||
|
||||
typedef void (GLBackend::*CommandCall)(const Batch&, size_t);
|
||||
static CommandCall _commandCalls[Batch::NUM_COMMANDS];
|
||||
friend class GLState;
|
||||
|
|
|
@ -32,6 +32,7 @@ public:
|
|||
static GLuint allocate(const Texture& texture);
|
||||
static const uint32_t DEFAULT_PAGE_DIMENSION = 128;
|
||||
static const uint32_t DEFAULT_MAX_SPARSE_LEVEL = 0xFFFF;
|
||||
|
||||
public:
|
||||
GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLuint externalId);
|
||||
GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, bool transferrable);
|
||||
|
@ -132,6 +133,9 @@ protected:
|
|||
|
||||
// Output stage
|
||||
void do_blit(const Batch& batch, size_t paramOffset) override;
|
||||
|
||||
// Texture Management Stage
|
||||
void initTextureManagementStage() override;
|
||||
};
|
||||
|
||||
} }
|
||||
|
|
|
@ -148,6 +148,22 @@ uint32_t SparseInfo::getPageCount(const uvec3& dimensions) const {
|
|||
return pageCounts.x * pageCounts.y * pageCounts.z;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void GL45Backend::initTextureManagementStage() {
|
||||
|
||||
// enable the Sparse Texture on gl45
|
||||
_textureManagement._sparseCapable = true;
|
||||
_textureManagement._incrementalTransferCapable = true;
|
||||
|
||||
// But now let s refine the behavior based on vendor
|
||||
std::string vendor { (const char*)glGetString(GL_VENDOR) };
|
||||
if ((vendor.compare("AMD") <= 0) || (vendor.compare("INTEL") <= 0)) {
|
||||
qCDebug(gpugllogging, "GPU is sparse capable but force it off %s\n", vendor);
|
||||
_textureManagement._sparseCapable = false;
|
||||
}
|
||||
}
|
||||
|
||||
using TransferState = GL45Backend::GL45Texture::TransferState;
|
||||
|
||||
TransferState::TransferState(GL45Texture& texture) : texture(texture) {
|
||||
|
@ -250,7 +266,8 @@ GL45Texture::GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture&
|
|||
GL45Texture::GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, bool transferrable)
|
||||
: GLTexture(backend, texture, allocate(texture), transferrable), _sparseInfo(*this), _transferState(*this) {
|
||||
|
||||
if (_transferrable && Texture::getEnableSparseTextures()) {
|
||||
auto theBackend = _backend.lock();
|
||||
if (_transferrable && theBackend && theBackend->isTextureManagementSparseEnabled()) {
|
||||
_sparseInfo.maybeMakeSparse();
|
||||
if (_sparseInfo.sparse) {
|
||||
Backend::incrementTextureGPUSparseCount();
|
||||
|
@ -362,7 +379,8 @@ void GL45Texture::startTransfer() {
|
|||
}
|
||||
|
||||
bool GL45Texture::continueTransfer() {
|
||||
if (!Texture::getEnableIncrementalTextureTransfers()) {
|
||||
auto backend = _backend.lock();
|
||||
if (!backend || !backend->isTextureManagementIncrementalTransferEnabled()) {
|
||||
size_t maxFace = GL_TEXTURE_CUBE_MAP == _target ? CUBE_NUM_FACES : 1;
|
||||
for (uint8_t face = 0; face < maxFace; ++face) {
|
||||
for (uint16_t mipLevel = _minMip; mipLevel <= _maxMip; ++mipLevel) {
|
||||
|
|
|
@ -125,6 +125,7 @@ protected:
|
|||
friend class Context;
|
||||
ContextStats _stats;
|
||||
StereoState _stereo;
|
||||
|
||||
};
|
||||
|
||||
class Context {
|
||||
|
@ -214,7 +215,7 @@ public:
|
|||
static Size getTextureGPUFramebufferMemoryUsage();
|
||||
static Size getTextureGPUSparseMemoryUsage();
|
||||
static uint32_t getTextureGPUTransferCount();
|
||||
|
||||
|
||||
protected:
|
||||
Context(const Context& context);
|
||||
|
||||
|
@ -270,7 +271,6 @@ protected:
|
|||
static std::atomic<Size> _textureGPUFramebufferMemoryUsage;
|
||||
static std::atomic<uint32_t> _textureGPUTransferCount;
|
||||
|
||||
|
||||
friend class Backend;
|
||||
};
|
||||
typedef std::shared_ptr<Context> ContextPointer;
|
||||
|
|
|
@ -147,6 +147,7 @@ class Texture : public Resource {
|
|||
|
||||
static std::atomic<bool> _enableSparseTextures;
|
||||
static std::atomic<bool> _enableIncrementalTextureTransfers;
|
||||
|
||||
public:
|
||||
static uint32_t getTextureCPUCount();
|
||||
static Size getTextureCPUMemoryUsage();
|
||||
|
|
Loading…
Reference in a new issue