Remove incremental transfers

This commit is contained in:
Brad Davis 2016-11-09 12:22:57 -08:00
parent f5fd4bf8d1
commit db98a742b6
6 changed files with 51 additions and 64 deletions

View file

@ -396,15 +396,6 @@ Menu::Menu() {
});
}
// Developer > Render > Enable Incremental Texture Transfer
{
auto action = addCheckableActionToQMenuAndActionHash(renderOptionsMenu, MenuOption::IncrementalTextureTransfer, 0, gpu::Texture::getEnableIncrementalTextureTransfers());
connect(action, &QAction::triggered, [&](bool checked) {
qDebug() << "[TEXTURE TRANSFER SUPPORT] --- Enable Incremental Texture Transfer menu option:" << checked;
gpu::Texture::setEnableIncrementalTextureTransfers(checked);
});
}
#else
qDebug() << "[TEXTURE TRANSFER SUPPORT] Incremental Texture Transfer and Dynamic Texture Management not supported on this platform.";
#endif

View file

@ -112,7 +112,6 @@ namespace MenuOption {
const QString FrameTimer = "Show Timer";
const QString FullscreenMirror = "Mirror";
const QString Help = "Help...";
const QString IncrementalTextureTransfer = "Enable Incremental Texture Transfer";
const QString IncreaseAvatarSize = "Increase Avatar Size";
const QString IndependentMode = "Independent Mode";
const QString ActionMotorControl = "Enable Default Motor Control";

View file

@ -14,6 +14,8 @@
#include "../gl/GLBackend.h"
#include "../gl/GLTexture.h"
#define INCREMENTAL_TRANSFER 0
namespace gpu { namespace gl45 {
using namespace gpu::gl;
@ -56,6 +58,7 @@ public:
GLint pageDimensionsIndex { 0 };
};
#if INCREMENTAL_TRANSFER
struct TransferState {
TransferState(GL45Texture& texture);
uvec3 currentPageSize() const;
@ -74,6 +77,10 @@ public:
uvec3 mipOffset;
const uint8_t* srcPointer { nullptr };
};
protected:
TransferState _transferState;
#endif
protected:
void updateMips() override;
void stripToMip(uint16_t newMinMip);
@ -91,7 +98,6 @@ public:
void derez();
SparseInfo _sparseInfo;
TransferState _transferState;
uint32_t _allocatedPages { 0 };
uint32_t _lastMipAllocatedPages { 0 };
uint16_t _mipOffset { 0 };

View file

@ -162,6 +162,8 @@ void GL45Backend::initTextureManagementStage() {
}
}
#if INCREMENTAL_TRANSFER
using TransferState = GL45Backend::GL45Texture::TransferState;
TransferState::TransferState(GL45Texture& texture) : texture(texture) {
@ -246,6 +248,7 @@ void TransferState::populatePage(std::vector<uint8_t>& buffer) {
uvec3 TransferState::currentPageSize() const {
return glm::clamp(mipDimensions - mipOffset, uvec3(1), texture._sparseInfo.pageDimensions);
}
#endif
GLuint GL45Texture::allocate(const Texture& texture) {
GLuint result;
@ -258,11 +261,19 @@ GLuint GL45Backend::getTextureID(const TexturePointer& texture, bool transfer) {
}
GL45Texture::GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLuint externalId)
: GLTexture(backend, texture, externalId), _sparseInfo(*this), _transferState(*this) {
: GLTexture(backend, texture, externalId), _sparseInfo(*this)
#if INCREMENTAL_TRANSFER
, _transferState(*this)
#endif
{
}
GL45Texture::GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, bool transferrable)
: GLTexture(backend, texture, allocate(texture), transferrable), _sparseInfo(*this), _transferState(*this) {
: GLTexture(backend, texture, allocate(texture), transferrable), _sparseInfo(*this)
#if INCREMENTAL_TRANSFER
, _transferState(*this)
#endif
{
auto theBackend = _backend.lock();
if (_transferrable && theBackend && theBackend->isTextureManagementSparseEnabled()) {
@ -375,39 +386,40 @@ void GL45Texture::updateSize() const {
void GL45Texture::startTransfer() {
Parent::startTransfer();
_sparseInfo.update();
#if INCREMENTAL_TRANSFER
_transferState.updateMip();
#endif
}
bool GL45Texture::continueTransfer() {
if (!Texture::getEnableIncrementalTextureTransfers()) {
size_t maxFace = GL_TEXTURE_CUBE_MAP == _target ? CUBE_NUM_FACES : 1;
for (uint8_t face = 0; face < maxFace; ++face) {
for (uint16_t mipLevel = _minMip; mipLevel <= _maxMip; ++mipLevel) {
auto size = _gpuObject.evalMipDimensions(mipLevel);
if (_sparseInfo.sparse && mipLevel <= _sparseInfo.maxSparseLevel) {
glTexturePageCommitmentEXT(_id, mipLevel, 0, 0, face, size.x, size.y, 1, GL_TRUE);
_allocatedPages += _sparseInfo.getPageCount(size);
}
if (_gpuObject.isStoredMipFaceAvailable(mipLevel, face)) {
auto mip = _gpuObject.accessStoredMipFace(mipLevel, face);
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuObject.getTexelFormat(), mip->getFormat());
if (GL_TEXTURE_2D == _target) {
glTextureSubImage2D(_id, mipLevel, 0, 0, size.x, size.y, texelFormat.format, texelFormat.type, mip->readData());
} else if (GL_TEXTURE_CUBE_MAP == _target) {
// DSA ARB does not work on AMD, so use EXT
// glTextureSubImage3D(_id, mipLevel, 0, 0, face, size.x, size.y, 1, texelFormat.format, texelFormat.type, mip->readData());
auto target = CUBE_FACE_LAYOUT[face];
glTextureSubImage2DEXT(_id, target, mipLevel, 0, 0, size.x, size.y, texelFormat.format, texelFormat.type, mip->readData());
} else {
Q_ASSERT(false);
}
(void)CHECK_GL_ERROR();
#if !INCREMENTAL_TRANSFER
size_t maxFace = GL_TEXTURE_CUBE_MAP == _target ? CUBE_NUM_FACES : 1;
for (uint8_t face = 0; face < maxFace; ++face) {
for (uint16_t mipLevel = _minMip; mipLevel <= _maxMip; ++mipLevel) {
auto size = _gpuObject.evalMipDimensions(mipLevel);
if (_sparseInfo.sparse && mipLevel <= _sparseInfo.maxSparseLevel) {
glTexturePageCommitmentEXT(_id, mipLevel, 0, 0, face, size.x, size.y, 1, GL_TRUE);
_allocatedPages += _sparseInfo.getPageCount(size);
}
if (_gpuObject.isStoredMipFaceAvailable(mipLevel, face)) {
auto mip = _gpuObject.accessStoredMipFace(mipLevel, face);
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuObject.getTexelFormat(), mip->getFormat());
if (GL_TEXTURE_2D == _target) {
glTextureSubImage2D(_id, mipLevel, 0, 0, size.x, size.y, texelFormat.format, texelFormat.type, mip->readData());
} else if (GL_TEXTURE_CUBE_MAP == _target) {
// DSA ARB does not work on AMD, so use EXT
// glTextureSubImage3D(_id, mipLevel, 0, 0, face, size.x, size.y, 1, texelFormat.format, texelFormat.type, mip->readData());
auto target = CUBE_FACE_LAYOUT[face];
glTextureSubImage2DEXT(_id, target, mipLevel, 0, 0, size.x, size.y, texelFormat.format, texelFormat.type, mip->readData());
} else {
Q_ASSERT(false);
}
(void)CHECK_GL_ERROR();
}
}
return false;
}
return false;
#else
static std::vector<uint8_t> buffer;
if (buffer.empty()) {
buffer.resize(DEFAULT_PAGE_BUFFER_SIZE);
@ -458,6 +470,7 @@ bool GL45Texture::continueTransfer() {
_lastMipAllocatedPages = _allocatedPages;
}
return result;
#endif
}
void GL45Texture::finishTransfer() {

View file

@ -35,18 +35,15 @@ std::atomic<Texture::Size> Texture::_allowedCPUMemoryUsage { 0 };
#define MIN_CORES_FOR_INCREMENTAL_TEXTURES 5
bool recommendedIncrementalTransfers = (QThread::idealThreadCount() >= MIN_CORES_FOR_INCREMENTAL_TEXTURES);
bool recommendedSparseTextures = recommendedIncrementalTransfers;
bool recommendedSparseTextures = (QThread::idealThreadCount() >= MIN_CORES_FOR_INCREMENTAL_TEXTURES);
std::atomic<bool> Texture::_enableSparseTextures { recommendedIncrementalTransfers };
std::atomic<bool> Texture::_enableIncrementalTextureTransfers { recommendedSparseTextures };
std::atomic<bool> Texture::_enableSparseTextures { recommendedSparseTextures };
struct ReportTextureState {
ReportTextureState() {
qDebug() << "[TEXTURE TRANSFER SUPPORT]"
<< "\n\tidealThreadCount:" << QThread::idealThreadCount()
<< "\n\tRECOMMENDED enableSparseTextures:" << recommendedSparseTextures
<< "\n\tRECOMMENDED enableIncrementalTextures:" << recommendedIncrementalTransfers;
<< "\n\tRECOMMENDED enableSparseTextures:" << recommendedSparseTextures;
}
} report;
@ -59,16 +56,6 @@ void Texture::setEnableSparseTextures(bool enabled) {
#endif
}
void Texture::setEnableIncrementalTextureTransfers(bool enabled) {
#ifdef Q_OS_WIN
qDebug() << "[TEXTURE TRANSFER SUPPORT] SETTING - Enable Incremental Texture Transfer:" << enabled;
_enableIncrementalTextureTransfers = enabled;
#else
qDebug() << "[TEXTURE TRANSFER SUPPORT] Incremental Texture Transfer not supported on this platform.";
#endif
}
void Texture::updateTextureCPUMemoryUsage(Size prevObjectSize, Size newObjectSize) {
if (prevObjectSize == newObjectSize) {
return;
@ -84,10 +71,6 @@ bool Texture::getEnableSparseTextures() {
return _enableSparseTextures.load();
}
bool Texture::getEnableIncrementalTextureTransfers() {
return _enableIncrementalTextureTransfers.load();
}
uint32_t Texture::getTextureCPUCount() {
return _textureCPUCount.load();
}

View file

@ -143,10 +143,8 @@ class Texture : public Resource {
static std::atomic<uint32_t> _textureCPUCount;
static std::atomic<Size> _textureCPUMemoryUsage;
static std::atomic<Size> _allowedCPUMemoryUsage;
static void updateTextureCPUMemoryUsage(Size prevObjectSize, Size newObjectSize);
static std::atomic<bool> _enableSparseTextures;
static std::atomic<bool> _enableIncrementalTextureTransfers;
static void updateTextureCPUMemoryUsage(Size prevObjectSize, Size newObjectSize);
public:
static uint32_t getTextureCPUCount();
@ -162,10 +160,7 @@ public:
static void setAllowedGPUMemoryUsage(Size size);
static bool getEnableSparseTextures();
static bool getEnableIncrementalTextureTransfers();
static void setEnableSparseTextures(bool enabled);
static void setEnableIncrementalTextureTransfers(bool enabled);
using ExternalRecycler = std::function<void(uint32, void*)>;
using ExternalIdAndFence = std::pair<uint32, void*>;