Working on new texture management strategy

This commit is contained in:
Bradley Austin Davis 2017-01-24 15:37:18 -08:00 committed by Brad Davis
parent 5bef6ea029
commit 7fb7aa87eb
40 changed files with 1508 additions and 1360 deletions

View file

@ -99,7 +99,7 @@ void ApplicationOverlay::renderQmlUi(RenderArgs* renderArgs) {
PROFILE_RANGE(app, __FUNCTION__);
if (!_uiTexture) {
_uiTexture = gpu::TexturePointer(gpu::Texture::createExternal2D(OffscreenQmlSurface::getDiscardLambda()));
_uiTexture = gpu::TexturePointer(gpu::Texture::createExternal(OffscreenQmlSurface::getDiscardLambda()));
_uiTexture->setSource(__FUNCTION__);
}
// Once we move UI rendering and screen rendering to different
@ -272,13 +272,13 @@ void ApplicationOverlay::buildFramebufferObject() {
auto width = uiSize.x;
auto height = uiSize.y;
if (!_overlayFramebuffer->getDepthStencilBuffer()) {
auto overlayDepthTexture = gpu::TexturePointer(gpu::Texture::create2D(DEPTH_FORMAT, width, height, DEFAULT_SAMPLER));
auto overlayDepthTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(DEPTH_FORMAT, width, height, DEFAULT_SAMPLER));
_overlayFramebuffer->setDepthStencilBuffer(overlayDepthTexture, DEPTH_FORMAT);
}
if (!_overlayFramebuffer->getRenderBuffer(0)) {
const gpu::Sampler OVERLAY_SAMPLER(gpu::Sampler::FILTER_MIN_MAG_LINEAR, gpu::Sampler::WRAP_CLAMP);
auto colorBuffer = gpu::TexturePointer(gpu::Texture::create2D(COLOR_FORMAT, width, height, OVERLAY_SAMPLER));
auto colorBuffer = gpu::TexturePointer(gpu::Texture::createRenderBuffer(COLOR_FORMAT, width, height, OVERLAY_SAMPLER));
_overlayFramebuffer->setRenderBuffer(0, colorBuffer);
}
}

View file

@ -251,7 +251,7 @@ void Web3DOverlay::render(RenderArgs* args) {
if (!_texture) {
auto webSurface = _webSurface;
_texture = gpu::TexturePointer(gpu::Texture::createExternal2D(OffscreenQmlSurface::getDiscardLambda()));
_texture = gpu::TexturePointer(gpu::Texture::createExternal(OffscreenQmlSurface::getDiscardLambda()));
_texture->setSource(__FUNCTION__);
}
OffscreenQmlSurface::TextureAndFence newTextureAndFence;

View file

@ -355,7 +355,7 @@ void OpenGLDisplayPlugin::customizeContext() {
if ((image.width() > 0) && (image.height() > 0)) {
cursorData.texture.reset(
gpu::Texture::create2D(
gpu::Texture::createStrict(
gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA),
image.width(), image.height(),
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
@ -363,6 +363,7 @@ void OpenGLDisplayPlugin::customizeContext() {
auto usage = gpu::Texture::Usage::Builder().withColor().withAlpha();
cursorData.texture->setUsage(usage.build());
cursorData.texture->assignStoredMip(0, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA), image.byteCount(), image.constBits());
cursorData.texture->autoGenerateMips(-1);
}
}
}

View file

@ -296,7 +296,7 @@ void HmdDisplayPlugin::internalPresent() {
image = image.convertToFormat(QImage::Format_RGBA8888);
if (!_previewTexture) {
_previewTexture.reset(
gpu::Texture::create2D(
gpu::Texture::createStrict(
gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA),
image.width(), image.height(),
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
@ -306,23 +306,21 @@ void HmdDisplayPlugin::internalPresent() {
_previewTexture->autoGenerateMips(-1);
}
if (getGLBackend()->isTextureReady(_previewTexture)) {
auto viewport = getViewportForSourceSize(uvec2(_previewTexture->getDimensions()));
auto viewport = getViewportForSourceSize(uvec2(_previewTexture->getDimensions()));
render([&](gpu::Batch& batch) {
batch.enableStereo(false);
batch.resetViewTransform();
batch.setFramebuffer(gpu::FramebufferPointer());
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, vec4(0));
batch.setStateScissorRect(viewport);
batch.setViewportTransform(viewport);
batch.setResourceTexture(0, _previewTexture);
batch.setPipeline(_presentPipeline);
batch.draw(gpu::TRIANGLE_STRIP, 4);
});
_clearPreviewFlag = false;
swapBuffers();
}
render([&](gpu::Batch& batch) {
batch.enableStereo(false);
batch.resetViewTransform();
batch.setFramebuffer(gpu::FramebufferPointer());
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, vec4(0));
batch.setStateScissorRect(viewport);
batch.setViewportTransform(viewport);
batch.setResourceTexture(0, _previewTexture);
batch.setPipeline(_presentPipeline);
batch.draw(gpu::TRIANGLE_STRIP, 4);
});
_clearPreviewFlag = false;
swapBuffers();
}
postPreview();

View file

@ -214,7 +214,7 @@ void RenderableWebEntityItem::render(RenderArgs* args) {
if (!_texture) {
auto webSurface = _webSurface;
_texture = gpu::TexturePointer(gpu::Texture::createExternal2D(OffscreenQmlSurface::getDiscardLambda()));
_texture = gpu::TexturePointer(gpu::Texture::createExternal(OffscreenQmlSurface::getDiscardLambda()));
_texture->setSource(__FUNCTION__);
}
OffscreenQmlSurface::TextureAndFence newTextureAndFence;

View file

@ -62,8 +62,6 @@ BackendPointer GLBackend::createBackend() {
INSTANCE = result.get();
void* voidInstance = &(*result);
qApp->setProperty(hifi::properties::gl::BACKEND, QVariant::fromValue(voidInstance));
gl::GLTexture::initTextureTransferHelper();
return result;
}
@ -623,6 +621,7 @@ void GLBackend::queueLambda(const std::function<void()> lambda) const {
}
void GLBackend::recycle() const {
PROFILE_RANGE(render_gpu_gl, __FUNCTION__)
{
std::list<std::function<void()>> lamdbasTrash;
{
@ -745,10 +744,6 @@ void GLBackend::recycle() const {
glDeleteQueries((GLsizei)ids.size(), ids.data());
}
}
#ifndef THREADED_TEXTURE_TRANSFER
gl::GLTexture::_textureTransferHelper->process();
#endif
}
void GLBackend::setCameraCorrection(const Mat4& correction) {

View file

@ -187,10 +187,15 @@ public:
virtual void do_setStateScissorRect(const Batch& batch, size_t paramOffset) final;
virtual GLuint getFramebufferID(const FramebufferPointer& framebuffer) = 0;
virtual GLuint getTextureID(const TexturePointer& texture, bool needTransfer = true) = 0;
virtual GLuint getTextureID(const TexturePointer& texture) final;
virtual GLuint getBufferID(const Buffer& buffer) = 0;
virtual GLuint getQueryID(const QueryPointer& query) = 0;
virtual bool isTextureReady(const TexturePointer& texture);
virtual GLFramebuffer* syncGPUObject(const Framebuffer& framebuffer) = 0;
virtual GLBuffer* syncGPUObject(const Buffer& buffer) = 0;
virtual GLTexture* syncGPUObject(const TexturePointer& texture);
virtual GLQuery* syncGPUObject(const Query& query) = 0;
//virtual bool isTextureReady(const TexturePointer& texture);
virtual void releaseBuffer(GLuint id, Size size) const;
virtual void releaseExternalTexture(GLuint id, const Texture::ExternalRecycler& recycler) const;
@ -206,10 +211,6 @@ public:
protected:
void recycle() const override;
virtual GLFramebuffer* syncGPUObject(const Framebuffer& framebuffer) = 0;
virtual GLBuffer* syncGPUObject(const Buffer& buffer) = 0;
virtual GLTexture* syncGPUObject(const TexturePointer& texture, bool sync = true) = 0;
virtual GLQuery* syncGPUObject(const Query& query) = 0;
static const size_t INVALID_OFFSET = (size_t)-1;
bool _inRenderTransferPass { false };

View file

@ -14,12 +14,56 @@
using namespace gpu;
using namespace gpu::gl;
bool GLBackend::isTextureReady(const TexturePointer& texture) {
// DO not transfer the texture, this call is expected for rendering texture
GLTexture* object = syncGPUObject(texture, true);
return object && object->isReady();
GLuint GLBackend::getTextureID(const TexturePointer& texture) {
GLTexture* object = syncGPUObject(texture);
if (!object) {
return 0;
}
return object->_id;
}
GLTexture* GLBackend::syncGPUObject(const TexturePointer& texturePointer) {
const Texture& texture = *texturePointer;
// Special case external textures
if (TextureUsageType::EXTERNAL == texture.getUsageType()) {
Texture::ExternalUpdates updates = texture.getUpdates();
if (!updates.empty()) {
Texture::ExternalRecycler recycler = texture.getExternalRecycler();
Q_ASSERT(recycler);
// Discard any superfluous updates
while (updates.size() > 1) {
const auto& update = updates.front();
// Superfluous updates will never have been read, but we want to ensure the previous
// writes to them are complete before they're written again, so return them with the
// same fences they arrived with. This can happen on any thread because no GL context
// work is involved
recycler(update.first, update.second);
updates.pop_front();
}
// The last texture remaining is the one we'll use to create the GLTexture
const auto& update = updates.front();
// Check for a fence, and if it exists, inject a wait into the command stream, then destroy the fence
if (update.second) {
GLsync fence = static_cast<GLsync>(update.second);
glWaitSync(fence, 0, GL_TIMEOUT_IGNORED);
glDeleteSync(fence);
}
// Create the new texture object (replaces any previous texture object)
new GLExternalTexture(shared_from_this(), texture, update.first);
}
// Return the texture object (if any) associated with the texture, without extensive logic
// (external textures are
return Backend::getGPUObject<GLTexture>(texture);
}
return nullptr;
}
void GLBackend::do_generateTextureMips(const Batch& batch, size_t paramOffset) {
TexturePointer resourceTexture = batch._textures.get(batch._params[paramOffset + 0]._uint);
@ -28,7 +72,7 @@ void GLBackend::do_generateTextureMips(const Batch& batch, size_t paramOffset) {
}
// DO not transfer the texture, this call is expected for rendering texture
GLTexture* object = syncGPUObject(resourceTexture, false);
GLTexture* object = syncGPUObject(resourceTexture);
if (!object) {
return;
}

View file

@ -21,13 +21,12 @@ GLFramebuffer::~GLFramebuffer() {
}
}
bool GLFramebuffer::checkStatus(GLenum target) const {
bool result = false;
bool GLFramebuffer::checkStatus() const {
switch (_status) {
case GL_FRAMEBUFFER_COMPLETE:
// Success !
result = true;
break;
return true;
case GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT:
qCWarning(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT.";
break;
@ -44,5 +43,5 @@ bool GLFramebuffer::checkStatus(GLenum target) const {
qCWarning(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_UNSUPPORTED.";
break;
}
return result;
return false;
}

View file

@ -64,7 +64,7 @@ public:
protected:
GLenum _status { GL_FRAMEBUFFER_COMPLETE };
virtual void update() = 0;
bool checkStatus(GLenum target) const;
bool checkStatus() const;
GLFramebuffer(const std::weak_ptr<GLBackend>& backend, const Framebuffer& framebuffer, GLuint id) : GLObject(backend, framebuffer, id) {}
~GLFramebuffer();

View file

@ -10,15 +10,13 @@
#include <NumericalConstants.h>
#include "GLTextureTransfer.h"
#include "GLBackend.h"
using namespace gpu;
using namespace gpu::gl;
std::shared_ptr<GLTextureTransferHelper> GLTexture::_textureTransferHelper;
const GLenum GLTexture::CUBE_FACE_LAYOUT[6] = {
const GLenum GLTexture::CUBE_FACE_LAYOUT[GLTexture::TEXTURE_CUBE_NUM_FACES] = {
GL_TEXTURE_CUBE_MAP_POSITIVE_X, GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
GL_TEXTURE_CUBE_MAP_POSITIVE_Y, GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
GL_TEXTURE_CUBE_MAP_POSITIVE_Z, GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
@ -67,6 +65,17 @@ GLenum GLTexture::getGLTextureType(const Texture& texture) {
}
uint8_t GLTexture::getFaceCount(GLenum target) {
switch (target) {
case GL_TEXTURE_2D:
return TEXTURE_2D_NUM_FACES;
case GL_TEXTURE_CUBE_MAP:
return TEXTURE_CUBE_NUM_FACES;
default:
Q_UNREACHABLE();
break;
}
}
const std::vector<GLenum>& GLTexture::getFaceTargets(GLenum target) {
static std::vector<GLenum> cubeFaceTargets {
GL_TEXTURE_CUBE_MAP_POSITIVE_X, GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
@ -89,216 +98,34 @@ const std::vector<GLenum>& GLTexture::getFaceTargets(GLenum target) {
return faceTargets;
}
// Default texture memory = GPU total memory - 2GB
#define GPU_MEMORY_RESERVE_BYTES MB_TO_BYTES(2048)
// Minimum texture memory = 1GB
#define TEXTURE_MEMORY_MIN_BYTES MB_TO_BYTES(1024)
float GLTexture::getMemoryPressure() {
// Check for an explicit memory limit
auto availableTextureMemory = Texture::getAllowedGPUMemoryUsage();
// If no memory limit has been set, use a percentage of the total dedicated memory
if (!availableTextureMemory) {
#if 0
auto totalMemory = getDedicatedMemory();
if ((GPU_MEMORY_RESERVE_BYTES + TEXTURE_MEMORY_MIN_BYTES) > totalMemory) {
availableTextureMemory = TEXTURE_MEMORY_MIN_BYTES;
} else {
availableTextureMemory = totalMemory - GPU_MEMORY_RESERVE_BYTES;
}
#else
// Hardcode texture limit for sparse textures at 1 GB for now
availableTextureMemory = TEXTURE_MEMORY_MIN_BYTES;
#endif
}
// Return the consumed texture memory divided by the available texture memory.
auto consumedGpuMemory = Context::getTextureGPUMemoryUsage() - Context::getTextureGPUFramebufferMemoryUsage();
float memoryPressure = (float)consumedGpuMemory / (float)availableTextureMemory;
static Context::Size lastConsumedGpuMemory = 0;
if (memoryPressure > 1.0f && lastConsumedGpuMemory != consumedGpuMemory) {
lastConsumedGpuMemory = consumedGpuMemory;
qCDebug(gpugllogging) << "Exceeded max allowed texture memory: " << consumedGpuMemory << " / " << availableTextureMemory;
}
return memoryPressure;
}
// Create the texture and allocate storage
GLTexture::GLTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLuint id, bool transferrable) :
GLObject(backend, texture, id),
_external(false),
_source(texture.source()),
_storageStamp(texture.getStamp()),
_target(getGLTextureType(texture)),
_internalFormat(gl::GLTexelFormat::evalGLTexelFormatInternal(texture.getTexelFormat())),
_maxMip(texture.maxMip()),
_minMip(texture.minMip()),
_virtualSize(texture.evalTotalSize()),
_transferrable(transferrable)
{
auto strongBackend = _backend.lock();
strongBackend->recycle();
Backend::incrementTextureGPUCount();
Backend::updateTextureGPUVirtualMemoryUsage(0, _virtualSize);
Backend::setGPUObject(texture, this);
}
GLTexture::GLTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLuint id) :
GLObject(backend, texture, id),
_external(true),
_source(texture.source()),
_storageStamp(0),
_target(getGLTextureType(texture)),
_internalFormat(GL_RGBA8),
// FIXME force mips to 0?
_maxMip(texture.maxMip()),
_minMip(texture.minMip()),
_virtualSize(0),
_transferrable(false)
_target(getGLTextureType(texture))
{
Backend::setGPUObject(texture, this);
// FIXME Is this necessary?
//withPreservedTexture([this] {
// syncSampler();
// if (_gpuObject.isAutogenerateMips()) {
// generateMips();
// }
//});
}
GLTexture::~GLTexture() {
auto backend = _backend.lock();
if (backend && _id) {
backend->releaseTexture(_id, 0);
}
}
GLExternalTexture::GLExternalTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLuint id)
: Parent(backend, texture, id) { }
GLExternalTexture::~GLExternalTexture() {
auto backend = _backend.lock();
if (backend) {
if (_external) {
auto recycler = _gpuObject.getExternalRecycler();
if (recycler) {
backend->releaseExternalTexture(_id, recycler);
} else {
qWarning() << "No recycler available for texture " << _id << " possible leak";
}
} else if (_id) {
// WARNING! Sparse textures do not use this code path. See GL45BackendTexture for
// the GL45Texture destructor for doing any required work tracking GPU stats
backend->releaseTexture(_id, _size);
auto recycler = _gpuObject.getExternalRecycler();
if (recycler) {
backend->releaseExternalTexture(_id, recycler);
} else {
qWarning() << "No recycler available for texture " << _id << " possible leak";
}
if (!_external && !_transferrable) {
Backend::updateTextureGPUFramebufferMemoryUsage(_size, 0);
}
}
Backend::updateTextureGPUVirtualMemoryUsage(_virtualSize, 0);
}
void GLTexture::createTexture() {
withPreservedTexture([&] {
allocateStorage();
(void)CHECK_GL_ERROR();
syncSampler();
(void)CHECK_GL_ERROR();
});
}
void GLTexture::withPreservedTexture(std::function<void()> f) const {
GLint boundTex = -1;
switch (_target) {
case GL_TEXTURE_2D:
glGetIntegerv(GL_TEXTURE_BINDING_2D, &boundTex);
break;
case GL_TEXTURE_CUBE_MAP:
glGetIntegerv(GL_TEXTURE_BINDING_CUBE_MAP, &boundTex);
break;
default:
qFatal("Unsupported texture type");
}
(void)CHECK_GL_ERROR();
glBindTexture(_target, _texture);
f();
glBindTexture(_target, boundTex);
(void)CHECK_GL_ERROR();
}
void GLTexture::setSize(GLuint size) const {
if (!_external && !_transferrable) {
Backend::updateTextureGPUFramebufferMemoryUsage(_size, size);
}
Backend::updateTextureGPUMemoryUsage(_size, size);
const_cast<GLuint&>(_size) = size;
}
bool GLTexture::isInvalid() const {
return _storageStamp < _gpuObject.getStamp();
}
bool GLTexture::isOutdated() const {
return GLSyncState::Idle == _syncState && _contentStamp < _gpuObject.getDataStamp();
}
bool GLTexture::isReady() const {
// If we have an invalid texture, we're never ready
if (isInvalid()) {
return false;
}
auto syncState = _syncState.load();
if (isOutdated() || Idle != syncState) {
return false;
}
return true;
}
// Do any post-transfer operations that might be required on the main context / rendering thread
void GLTexture::postTransfer() {
setSyncState(GLSyncState::Idle);
++_transferCount;
// At this point the mip pixels have been loaded, we can notify the gpu texture to abandon it's memory
switch (_gpuObject.getType()) {
case Texture::TEX_2D:
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
if (_gpuObject.isStoredMipFaceAvailable(i)) {
_gpuObject.notifyMipFaceGPULoaded(i);
}
}
break;
case Texture::TEX_CUBE:
// transfer pixels from each faces
for (uint8_t f = 0; f < CUBE_NUM_FACES; f++) {
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
if (_gpuObject.isStoredMipFaceAvailable(i, f)) {
_gpuObject.notifyMipFaceGPULoaded(i, f);
}
}
}
break;
default:
qCWarning(gpugllogging) << __FUNCTION__ << " case for Texture Type " << _gpuObject.getType() << " not supported";
break;
const_cast<GLuint&>(_id) = 0;
}
}
void GLTexture::initTextureTransferHelper() {
_textureTransferHelper = std::make_shared<GLTextureTransferHelper>();
}
void GLTexture::startTransfer() {
createTexture();
}
void GLTexture::finishTransfer() {
if (_gpuObject.isAutogenerateMips()) {
generateMips();
}
}

View file

@ -9,7 +9,6 @@
#define hifi_gpu_gl_GLTexture_h
#include "GLShared.h"
#include "GLTextureTransfer.h"
#include "GLBackend.h"
#include "GLTexelFormat.h"
@ -20,210 +19,48 @@ struct GLFilterMode {
GLint magFilter;
};
class GLTexture : public GLObject<Texture> {
using Parent = GLObject<Texture>;
friend class GLBackend;
public:
static const uint16_t INVALID_MIP { (uint16_t)-1 };
static const uint8_t INVALID_FACE { (uint8_t)-1 };
static void initTextureTransferHelper();
static std::shared_ptr<GLTextureTransferHelper> _textureTransferHelper;
template <typename GLTextureType>
static GLTexture* sync(GLBackend& backend, const TexturePointer& texturePointer, bool needTransfer) {
const Texture& texture = *texturePointer;
// Special case external textures
if (texture.getUsage().isExternal()) {
Texture::ExternalUpdates updates = texture.getUpdates();
if (!updates.empty()) {
Texture::ExternalRecycler recycler = texture.getExternalRecycler();
Q_ASSERT(recycler);
// Discard any superfluous updates
while (updates.size() > 1) {
const auto& update = updates.front();
// Superfluous updates will never have been read, but we want to ensure the previous
// writes to them are complete before they're written again, so return them with the
// same fences they arrived with. This can happen on any thread because no GL context
// work is involved
recycler(update.first, update.second);
updates.pop_front();
}
// The last texture remaining is the one we'll use to create the GLTexture
const auto& update = updates.front();
// Check for a fence, and if it exists, inject a wait into the command stream, then destroy the fence
if (update.second) {
GLsync fence = static_cast<GLsync>(update.second);
glWaitSync(fence, 0, GL_TIMEOUT_IGNORED);
glDeleteSync(fence);
}
// Create the new texture object (replaces any previous texture object)
new GLTextureType(backend.shared_from_this(), texture, update.first);
}
// Return the texture object (if any) associated with the texture, without extensive logic
// (external textures are
return Backend::getGPUObject<GLTextureType>(texture);
}
if (!texture.isDefined()) {
// NO texture definition yet so let's avoid thinking
return nullptr;
}
// If the object hasn't been created, or the object definition is out of date, drop and re-create
GLTexture* object = Backend::getGPUObject<GLTextureType>(texture);
// Create the texture if need be (force re-creation if the storage stamp changes
// for easier use of immutable storage)
if (!object || object->isInvalid()) {
// This automatically any previous texture
object = new GLTextureType(backend.shared_from_this(), texture, needTransfer);
if (!object->_transferrable) {
object->createTexture();
object->_contentStamp = texture.getDataStamp();
object->updateSize();
object->postTransfer();
}
}
// Object maybe doens't neet to be tranasferred after creation
if (!object->_transferrable) {
return object;
}
// If we just did a transfer, return the object after doing post-transfer work
if (GLSyncState::Transferred == object->getSyncState()) {
object->postTransfer();
}
if (object->isOutdated()) {
// Object might be outdated, if so, start the transfer
// (outdated objects that are already in transfer will have reported 'true' for ready()
_textureTransferHelper->transferTexture(texturePointer);
return nullptr;
}
if (!object->isReady()) {
return nullptr;
}
((GLTexture*)object)->updateMips();
return object;
}
template <typename GLTextureType>
static GLuint getId(GLBackend& backend, const TexturePointer& texture, bool shouldSync) {
if (!texture) {
return 0;
}
GLTexture* object { nullptr };
if (shouldSync) {
object = sync<GLTextureType>(backend, texture, shouldSync);
} else {
object = Backend::getGPUObject<GLTextureType>(*texture);
}
if (!object) {
return 0;
}
if (!shouldSync) {
return object->_id;
}
// Don't return textures that are in transfer state
if ((object->getSyncState() != GLSyncState::Idle) ||
// Don't return transferrable textures that have never completed transfer
(!object->_transferrable || 0 != object->_transferCount)) {
return 0;
}
return object->_id;
}
~GLTexture();
// Is this texture generated outside the GPU library?
const bool _external;
const GLuint& _texture { _id };
const std::string _source;
const Stamp _storageStamp;
const GLenum _target;
const GLenum _internalFormat;
const uint16 _maxMip;
uint16 _minMip;
const GLuint _virtualSize; // theoretical size as expected
Stamp _contentStamp { 0 };
const bool _transferrable;
Size _transferCount { 0 };
GLuint size() const { return _size; }
GLSyncState getSyncState() const { return _syncState; }
// Is the storage out of date relative to the gpu texture?
bool isInvalid() const;
static const std::vector<GLenum>& getFaceTargets(GLenum textureType);
static uint8_t getFaceCount(GLenum textureType);
static GLenum getGLTextureType(const Texture& texture);
// Is the content out of date relative to the gpu texture?
bool isOutdated() const;
// Is the texture in a state where it can be rendered with no work?
bool isReady() const;
// Execute any post-move operations that must occur only on the main thread
virtual void postTransfer();
uint16 usedMipLevels() const { return (_maxMip - _minMip) + 1; }
static const size_t CUBE_NUM_FACES = 6;
static const GLenum CUBE_FACE_LAYOUT[6];
static const uint8_t TEXTURE_2D_NUM_FACES = 1;
static const uint8_t TEXTURE_CUBE_NUM_FACES = 6;
static const GLenum CUBE_FACE_LAYOUT[TEXTURE_CUBE_NUM_FACES];
static const GLFilterMode FILTER_MODES[Sampler::NUM_FILTERS];
static const GLenum WRAP_MODES[Sampler::NUM_WRAP_MODES];
// Return a floating point value indicating how much of the allowed
// texture memory we are currently consuming. A value of 0 indicates
// no texture memory usage, while a value of 1 indicates all available / allowed memory
// is consumed. A value above 1 indicates that there is a problem.
static float getMemoryPressure();
protected:
static const std::vector<GLenum>& getFaceTargets(GLenum textureType);
static GLenum getGLTextureType(const Texture& texture);
const GLuint _size { 0 }; // true size as reported by the gl api
std::atomic<GLSyncState> _syncState { GLSyncState::Idle };
GLTexture(const std::weak_ptr<gl::GLBackend>& backend, const Texture& texture, GLuint id, bool transferrable);
GLTexture(const std::weak_ptr<gl::GLBackend>& backend, const Texture& texture, GLuint id);
void setSyncState(GLSyncState syncState) { _syncState = syncState; }
void createTexture();
virtual void updateMips() {}
virtual void allocateStorage() const = 0;
virtual void updateSize() const = 0;
virtual void syncSampler() const = 0;
virtual uint32 size() const = 0;
virtual void generateMips() const = 0;
virtual void withPreservedTexture(std::function<void()> f) const;
protected:
void setSize(GLuint size) const;
virtual void startTransfer();
// Returns true if this is the last block required to complete transfer
virtual bool continueTransfer() { return false; }
virtual void finishTransfer();
private:
friend class GLTextureTransferHelper;
friend class GLBackend;
GLTexture(const std::weak_ptr<gl::GLBackend>& backend, const Texture& texture, GLuint id);
};
class GLExternalTexture : public GLTexture {
using Parent = GLTexture;
friend class GLBackend;
public:
~GLExternalTexture();
protected:
GLExternalTexture(const std::weak_ptr<gl::GLBackend>& backend, const Texture& texture, GLuint id);
void generateMips() const override {}
uint32 size() const override { return 0; }
};
} }
#endif

View file

@ -1,208 +0,0 @@
//
// Created by Bradley Austin Davis on 2016/04/03
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "GLTextureTransfer.h"
#include <gl/GLHelpers.h>
#include <gl/Context.h>
#include <gpu/GPULogging.h>
#include "GLShared.h"
#include "GLTexture.h"
#ifdef HAVE_NSIGHT
#include "nvToolsExt.h"
std::unordered_map<TexturePointer, nvtxRangeId_t> _map;
#endif
#ifdef TEXTURE_TRANSFER_PBOS
#define TEXTURE_TRANSFER_BLOCK_SIZE (64 * 1024)
#define TEXTURE_TRANSFER_PBO_COUNT 128
#endif
using namespace gpu;
using namespace gpu::gl;
GLTextureTransferHelper::GLTextureTransferHelper() {
#ifdef THREADED_TEXTURE_TRANSFER
setObjectName("TextureTransferThread");
_context.create();
initialize(true, QThread::LowPriority);
// Clean shutdown on UNIX, otherwise _canvas is freed early
connect(qApp, &QCoreApplication::aboutToQuit, [&] { terminate(); });
#else
initialize(false, QThread::LowPriority);
#endif
}
GLTextureTransferHelper::~GLTextureTransferHelper() {
#ifdef THREADED_TEXTURE_TRANSFER
if (isStillRunning()) {
terminate();
}
#else
terminate();
#endif
}
void GLTextureTransferHelper::transferTexture(const gpu::TexturePointer& texturePointer) {
GLTexture* object = Backend::getGPUObject<GLTexture>(*texturePointer);
Backend::incrementTextureGPUTransferCount();
object->setSyncState(GLSyncState::Pending);
Lock lock(_mutex);
_pendingTextures.push_back(texturePointer);
}
void GLTextureTransferHelper::setup() {
#ifdef THREADED_TEXTURE_TRANSFER
_context.makeCurrent();
#ifdef TEXTURE_TRANSFER_FORCE_DRAW
// FIXME don't use opengl 4.5 DSA functionality without verifying it's present
glCreateRenderbuffers(1, &_drawRenderbuffer);
glNamedRenderbufferStorage(_drawRenderbuffer, GL_RGBA8, 128, 128);
glCreateFramebuffers(1, &_drawFramebuffer);
glNamedFramebufferRenderbuffer(_drawFramebuffer, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, _drawRenderbuffer);
glCreateFramebuffers(1, &_readFramebuffer);
#endif
#ifdef TEXTURE_TRANSFER_PBOS
std::array<GLuint, TEXTURE_TRANSFER_PBO_COUNT> pbos;
glCreateBuffers(TEXTURE_TRANSFER_PBO_COUNT, &pbos[0]);
for (uint32_t i = 0; i < TEXTURE_TRANSFER_PBO_COUNT; ++i) {
TextureTransferBlock newBlock;
newBlock._pbo = pbos[i];
glNamedBufferStorage(newBlock._pbo, TEXTURE_TRANSFER_BLOCK_SIZE, 0, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT);
newBlock._mapped = glMapNamedBufferRange(newBlock._pbo, 0, TEXTURE_TRANSFER_BLOCK_SIZE, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT);
_readyQueue.push(newBlock);
}
#endif
#endif
}
void GLTextureTransferHelper::shutdown() {
#ifdef THREADED_TEXTURE_TRANSFER
_context.makeCurrent();
#endif
#ifdef TEXTURE_TRANSFER_FORCE_DRAW
glNamedFramebufferRenderbuffer(_drawFramebuffer, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, 0);
glDeleteFramebuffers(1, &_drawFramebuffer);
_drawFramebuffer = 0;
glDeleteFramebuffers(1, &_readFramebuffer);
_readFramebuffer = 0;
glNamedFramebufferTexture(_readFramebuffer, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0);
glDeleteRenderbuffers(1, &_drawRenderbuffer);
_drawRenderbuffer = 0;
#endif
}
void GLTextureTransferHelper::queueExecution(VoidLambda lambda) {
Lock lock(_mutex);
_pendingCommands.push_back(lambda);
}
#define MAX_TRANSFERS_PER_PASS 2
bool GLTextureTransferHelper::process() {
// Take any new textures or commands off the queue
VoidLambdaList pendingCommands;
TextureList newTransferTextures;
{
Lock lock(_mutex);
newTransferTextures.swap(_pendingTextures);
pendingCommands.swap(_pendingCommands);
}
if (!pendingCommands.empty()) {
for (auto command : pendingCommands) {
command();
}
glFlush();
}
if (!newTransferTextures.empty()) {
for (auto& texturePointer : newTransferTextures) {
#ifdef HAVE_NSIGHT
_map[texturePointer] = nvtxRangeStart("TextureTansfer");
#endif
GLTexture* object = Backend::getGPUObject<GLTexture>(*texturePointer);
object->startTransfer();
_transferringTextures.push_back(texturePointer);
_textureIterator = _transferringTextures.begin();
}
_transferringTextures.sort([](const gpu::TexturePointer& a, const gpu::TexturePointer& b)->bool {
return a->getSize() < b->getSize();
});
}
// No transfers in progress, sleep
if (_transferringTextures.empty()) {
#ifdef THREADED_TEXTURE_TRANSFER
QThread::usleep(1);
#endif
return true;
}
PROFILE_COUNTER_IF_CHANGED(render_gpu_gl, "transferringTextures", int, (int) _transferringTextures.size())
static auto lastReport = usecTimestampNow();
auto now = usecTimestampNow();
auto lastReportInterval = now - lastReport;
if (lastReportInterval > USECS_PER_SECOND * 4) {
lastReport = now;
qCDebug(gpulogging) << "Texture list " << _transferringTextures.size();
}
size_t transferCount = 0;
for (_textureIterator = _transferringTextures.begin(); _textureIterator != _transferringTextures.end();) {
if (++transferCount > MAX_TRANSFERS_PER_PASS) {
break;
}
auto texture = *_textureIterator;
GLTexture* gltexture = Backend::getGPUObject<GLTexture>(*texture);
if (gltexture->continueTransfer()) {
++_textureIterator;
continue;
}
gltexture->finishTransfer();
#ifdef TEXTURE_TRANSFER_FORCE_DRAW
// FIXME force a draw on the texture transfer thread before passing the texture to the main thread for use
#endif
#ifdef THREADED_TEXTURE_TRANSFER
clientWait();
#endif
gltexture->_contentStamp = gltexture->_gpuObject.getDataStamp();
gltexture->updateSize();
gltexture->setSyncState(gpu::gl::GLSyncState::Transferred);
Backend::decrementTextureGPUTransferCount();
#ifdef HAVE_NSIGHT
// Mark the texture as transferred
nvtxRangeEnd(_map[texture]);
_map.erase(texture);
#endif
_textureIterator = _transferringTextures.erase(_textureIterator);
}
#ifdef THREADED_TEXTURE_TRANSFER
if (!_transferringTextures.empty()) {
// Don't saturate the GPU
clientWait();
} else {
// Don't saturate the CPU
QThread::msleep(1);
}
#endif
return true;
}

View file

@ -1,78 +0,0 @@
//
// Created by Bradley Austin Davis on 2016/04/03
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_gpu_gl_GLTextureTransfer_h
#define hifi_gpu_gl_GLTextureTransfer_h
#include <QtGlobal>
#include <QtCore/QSharedPointer>
#include <GenericQueueThread.h>
#include <gl/Context.h>
#include "GLShared.h"
#ifdef Q_OS_WIN
#define THREADED_TEXTURE_TRANSFER
#endif
#ifdef THREADED_TEXTURE_TRANSFER
// FIXME when sparse textures are enabled, it's harder to force a draw on the transfer thread
// also, the current draw code is implicitly using OpenGL 4.5 functionality
//#define TEXTURE_TRANSFER_FORCE_DRAW
// FIXME PBO's increase the complexity and don't seem to work reliably
//#define TEXTURE_TRANSFER_PBOS
#endif
namespace gpu { namespace gl {
using TextureList = std::list<TexturePointer>;
using TextureListIterator = TextureList::iterator;
class GLTextureTransferHelper : public GenericThread {
public:
using VoidLambda = std::function<void()>;
using VoidLambdaList = std::list<VoidLambda>;
using Pointer = std::shared_ptr<GLTextureTransferHelper>;
GLTextureTransferHelper();
~GLTextureTransferHelper();
void transferTexture(const gpu::TexturePointer& texturePointer);
void queueExecution(VoidLambda lambda);
void setup() override;
void shutdown() override;
bool process() override;
private:
#ifdef THREADED_TEXTURE_TRANSFER
::gl::OffscreenContext _context;
#endif
#ifdef TEXTURE_TRANSFER_FORCE_DRAW
// Framebuffers / renderbuffers for forcing access to the texture on the transfer thread
GLuint _drawRenderbuffer { 0 };
GLuint _drawFramebuffer { 0 };
GLuint _readFramebuffer { 0 };
#endif
// A mutex for protecting items access on the render and transfer threads
Mutex _mutex;
// Commands that have been submitted for execution on the texture transfer thread
VoidLambdaList _pendingCommands;
// Textures that have been submitted for transfer
TextureList _pendingTextures;
// Textures currently in the transfer process
// Only used on the transfer thread
TextureList _transferringTextures;
TextureListIterator _textureIterator;
};
} }
#endif

View file

@ -40,18 +40,28 @@ public:
class GL41Texture : public GLTexture {
using Parent = GLTexture;
GLuint allocate();
public:
GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& buffer, GLuint externalId);
GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& buffer, bool transferrable);
static GLuint allocate();
public:
~GL41Texture();
private:
GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& buffer);
protected:
void transferMip(uint16_t mipLevel, uint8_t face) const;
void startTransfer() override;
void allocateStorage() const override;
void updateSize() const override;
void syncSampler() const override;
void generateMips() const override;
uint32 size() const override;
friend class GL41Backend;
const Stamp _storageStamp;
mutable Stamp _contentStamp { 0 };
mutable Stamp _samplerStamp { 0 };
const uint32 _size;
bool isOutdated() const;
void withPreservedTexture(std::function<void()> f) const;
void syncContent() const;
void syncSampler() const;
};
@ -62,8 +72,7 @@ protected:
GLuint getBufferID(const Buffer& buffer) override;
GLBuffer* syncGPUObject(const Buffer& buffer) override;
GLuint getTextureID(const TexturePointer& texture, bool needTransfer = true) override;
GLTexture* syncGPUObject(const TexturePointer& texture, bool sync = true) override;
GLTexture* syncGPUObject(const TexturePointer& texture) override;
GLuint getQueryID(const QueryPointer& query) override;
GLQuery* syncGPUObject(const Query& query) override;

View file

@ -53,10 +53,12 @@ public:
GL_COLOR_ATTACHMENT15 };
int unit = 0;
auto backend = _backend.lock();
for (auto& b : _gpuObject.getRenderBuffers()) {
surface = b._texture;
if (surface) {
gltexture = gl::GLTexture::sync<GL41Backend::GL41Texture>(*_backend.lock().get(), surface, false); // Grab the gltexture and don't transfer
Q_ASSERT(TextureUsageType::RENDERBUFFER == surface->getUsageType());
gltexture = backend->syncGPUObject(surface);
} else {
gltexture = nullptr;
}
@ -81,9 +83,11 @@ public:
}
if (_gpuObject.getDepthStamp() != _depthStamp) {
auto backend = _backend.lock();
auto surface = _gpuObject.getDepthStencilBuffer();
if (_gpuObject.hasDepthStencil() && surface) {
gltexture = gl::GLTexture::sync<GL41Backend::GL41Texture>(*_backend.lock().get(), surface, false); // Grab the gltexture and don't transfer
Q_ASSERT(TextureUsageType::RENDERBUFFER == surface->getUsageType());
gltexture = backend->syncGPUObject(surface);
}
if (gltexture) {
@ -110,7 +114,7 @@ public:
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, currentFBO);
}
checkStatus(GL_DRAW_FRAMEBUFFER);
checkStatus();
}

View file

@ -29,20 +29,97 @@ GLuint GL41Texture::allocate() {
return result;
}
GLuint GL41Backend::getTextureID(const TexturePointer& texture, bool transfer) {
return GL41Texture::getId<GL41Texture>(*this, texture, transfer);
GLTexture* GL41Backend::syncGPUObject(const TexturePointer& texturePointer) {
if (!texturePointer) {
return nullptr;
}
const Texture& texture = *texturePointer;
if (TextureUsageType::EXTERNAL == texture.getUsageType()) {
return Parent::syncGPUObject(texturePointer);
}
if (!texture.isDefined()) {
// NO texture definition yet so let's avoid thinking
return nullptr;
}
// If the object hasn't been created, or the object definition is out of date, drop and re-create
GL41Texture* object = Backend::getGPUObject<GL41Texture>(texture);
if (!object || object->_storageStamp < texture.getStamp()) {
// This automatically any previous texture
object = new GL41Texture(shared_from_this(), texture);
}
// FIXME internalize to GL41Texture 'sync' function
if (object->isOutdated()) {
object->withPreservedTexture([&] {
if (object->_contentStamp < texture.getDataStamp()) {
// FIXME implement synchronous texture transfer here
object->syncContent();
}
if (object->_samplerStamp < texture.getSamplerStamp()) {
object->syncSampler();
}
});
}
return object;
}
GLTexture* GL41Backend::syncGPUObject(const TexturePointer& texture, bool transfer) {
return GL41Texture::sync<GL41Texture>(*this, texture, transfer);
GL41Texture::GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture)
: GLTexture(backend, texture, allocate()), _storageStamp { texture.getStamp() }, _size(texture.evalTotalSize()) {
withPreservedTexture([&] {
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuObject.getTexelFormat());
const Sampler& sampler = _gpuObject.getSampler();
auto minMip = sampler.getMinMip();
auto maxMip = sampler.getMaxMip();
for (uint16_t l = minMip; l <= maxMip; l++) {
// Get the mip level dimensions, accounting for the downgrade level
Vec3u dimensions = _gpuObject.evalMipDimensions(l);
for (GLenum target : getFaceTargets(_target)) {
glTexImage2D(target, l - minMip, texelFormat.internalFormat, dimensions.x, dimensions.y, 0, texelFormat.format, texelFormat.type, NULL);
(void)CHECK_GL_ERROR();
}
}
});
}
GL41Texture::GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLuint externalId)
: GLTexture(backend, texture, externalId) {
GL41Texture::~GL41Texture() {
}
GL41Texture::GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, bool transferrable)
: GLTexture(backend, texture, allocate(), transferrable) {
bool GL41Texture::isOutdated() const {
if (_samplerStamp <= _gpuObject.getSamplerStamp()) {
return true;
}
if (TextureUsageType::RESOURCE == _gpuObject.getUsageType() && _contentStamp <= _gpuObject.getDataStamp()) {
return true;
}
return false;
}
void GL41Texture::withPreservedTexture(std::function<void()> f) const {
GLint boundTex = -1;
switch (_target) {
case GL_TEXTURE_2D:
glGetIntegerv(GL_TEXTURE_BINDING_2D, &boundTex);
break;
case GL_TEXTURE_CUBE_MAP:
glGetIntegerv(GL_TEXTURE_BINDING_CUBE_MAP, &boundTex);
break;
default:
qFatal("Unsupported texture type");
}
(void)CHECK_GL_ERROR();
glBindTexture(_target, _texture);
f();
glBindTexture(_target, boundTex);
(void)CHECK_GL_ERROR();
}
void GL41Texture::generateMips() const {
@ -52,94 +129,12 @@ void GL41Texture::generateMips() const {
(void)CHECK_GL_ERROR();
}
void GL41Texture::allocateStorage() const {
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuObject.getTexelFormat());
glTexParameteri(_target, GL_TEXTURE_BASE_LEVEL, 0);
(void)CHECK_GL_ERROR();
glTexParameteri(_target, GL_TEXTURE_MAX_LEVEL, _maxMip - _minMip);
(void)CHECK_GL_ERROR();
if (GLEW_VERSION_4_2 && !_gpuObject.getTexelFormat().isCompressed()) {
// Get the dimensions, accounting for the downgrade level
Vec3u dimensions = _gpuObject.evalMipDimensions(_minMip);
glTexStorage2D(_target, usedMipLevels(), texelFormat.internalFormat, dimensions.x, dimensions.y);
(void)CHECK_GL_ERROR();
} else {
for (uint16_t l = _minMip; l <= _maxMip; l++) {
// Get the mip level dimensions, accounting for the downgrade level
Vec3u dimensions = _gpuObject.evalMipDimensions(l);
for (GLenum target : getFaceTargets(_target)) {
glTexImage2D(target, l - _minMip, texelFormat.internalFormat, dimensions.x, dimensions.y, 0, texelFormat.format, texelFormat.type, NULL);
(void)CHECK_GL_ERROR();
}
}
}
void GL41Texture::syncContent() const {
// FIXME actually copy the texture data
_contentStamp = _gpuObject.getDataStamp() + 1;
}
void GL41Texture::updateSize() const {
setSize(_virtualSize);
if (!_id) {
return;
}
if (_gpuObject.getTexelFormat().isCompressed()) {
GLenum proxyType = GL_TEXTURE_2D;
GLuint numFaces = 1;
if (_gpuObject.getType() == gpu::Texture::TEX_CUBE) {
proxyType = CUBE_FACE_LAYOUT[0];
numFaces = (GLuint)CUBE_NUM_FACES;
}
GLint gpuSize{ 0 };
glGetTexLevelParameteriv(proxyType, 0, GL_TEXTURE_COMPRESSED, &gpuSize);
(void)CHECK_GL_ERROR();
if (gpuSize) {
for (GLuint level = _minMip; level < _maxMip; level++) {
GLint levelSize{ 0 };
glGetTexLevelParameteriv(proxyType, level, GL_TEXTURE_COMPRESSED_IMAGE_SIZE, &levelSize);
levelSize *= numFaces;
if (levelSize <= 0) {
break;
}
gpuSize += levelSize;
}
(void)CHECK_GL_ERROR();
setSize(gpuSize);
return;
}
}
}
// Move content bits from the CPU to the GPU for a given mip / face
void GL41Texture::transferMip(uint16_t mipLevel, uint8_t face) const {
auto mip = _gpuObject.accessStoredMipFace(mipLevel, face);
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuObject.getTexelFormat(), mip->getFormat());
//GLenum target = getFaceTargets()[face];
GLenum target = _target == GL_TEXTURE_2D ? GL_TEXTURE_2D : CUBE_FACE_LAYOUT[face];
auto size = _gpuObject.evalMipDimensions(mipLevel);
glTexSubImage2D(target, mipLevel, 0, 0, size.x, size.y, texelFormat.format, texelFormat.type, mip->readData());
(void)CHECK_GL_ERROR();
}
void GL41Texture::startTransfer() {
PROFILE_RANGE(render_gpu_gl, __FUNCTION__);
Parent::startTransfer();
glBindTexture(_target, _id);
(void)CHECK_GL_ERROR();
// transfer pixels from each faces
uint8_t numFaces = (Texture::TEX_CUBE == _gpuObject.getType()) ? CUBE_NUM_FACES : 1;
for (uint8_t f = 0; f < numFaces; f++) {
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
if (_gpuObject.isStoredMipFaceAvailable(i, f)) {
transferMip(i, f);
}
}
}
}
void GL41Backend::GL41Texture::syncSampler() const {
void GL41Texture::syncSampler() const {
const Sampler& sampler = _gpuObject.getSampler();
const auto& fm = FILTER_MODES[sampler.getFilter()];
glTexParameteri(_target, GL_TEXTURE_MIN_FILTER, fm.minFilter);
@ -161,5 +156,9 @@ void GL41Backend::GL41Texture::syncSampler() const {
glTexParameterf(_target, GL_TEXTURE_MIN_LOD, (float)sampler.getMinMip());
glTexParameterf(_target, GL_TEXTURE_MAX_LOD, (sampler.getMaxMip() == Sampler::MAX_MIP_LEVEL ? 1000.f : sampler.getMaxMip()));
glTexParameterf(_target, GL_TEXTURE_MAX_ANISOTROPY_EXT, sampler.getMaxAnisotropy());
_samplerStamp = _gpuObject.getSamplerStamp() + 1;
}
uint32 GL41Texture::size() const {
return _size;
}

View file

@ -163,8 +163,3 @@ void GL45Backend::do_multiDrawIndexedIndirect(const Batch& batch, size_t paramOf
_stats._DSNumAPIDrawcalls++;
(void)CHECK_GL_ERROR();
}
void GL45Backend::recycle() const {
Parent::recycle();
derezTextures();
}

View file

@ -8,6 +8,7 @@
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#pragma once
#ifndef hifi_gpu_45_GL45Backend_h
#define hifi_gpu_45_GL45Backend_h
@ -19,6 +20,7 @@
namespace gpu { namespace gl45 {
using namespace gpu::gl;
using TextureWeakPointer = std::weak_ptr<Texture>;
class GL45Backend : public GLBackend {
using Parent = GLBackend;
@ -31,60 +33,152 @@ public:
class GL45Texture : public GLTexture {
using Parent = GLTexture;
friend class GL45Backend;
static GLuint allocate(const Texture& texture);
static const uint32_t DEFAULT_PAGE_DIMENSION = 128;
static const uint32_t DEFAULT_MAX_SPARSE_LEVEL = 0xFFFF;
protected:
GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture);
void generateMips() const override;
void copyMipFromTexture(uint16_t sourceMip, uint16_t targetMip) const;
virtual void syncSampler() const;
};
//
// Textures that have fixed allocation sizes and cannot be managed at runtime
//
class GL45FixedAllocationTexture : public GL45Texture {
using Parent = GL45Texture;
friend class GL45Backend;
public:
GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLuint externalId);
GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, bool transferrable);
~GL45Texture();
GL45FixedAllocationTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture);
~GL45FixedAllocationTexture();
void postTransfer() override;
protected:
uint32 size() const override { return _size; }
void allocateStorage() const;
void syncSampler() const override;
const uint32 _size { 0 };
};
struct SparseInfo {
SparseInfo(GL45Texture& texture);
void maybeMakeSparse();
void update();
uvec3 getPageCounts(const uvec3& dimensions) const;
uint32_t getPageCount(const uvec3& dimensions) const;
uint32_t getSize() const;
class GL45AttachmentTexture : public GL45FixedAllocationTexture {
using Parent = GL45FixedAllocationTexture;
friend class GL45Backend;
protected:
GL45AttachmentTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture);
~GL45AttachmentTexture();
};
GL45Texture& texture;
bool sparse { false };
uvec3 pageDimensions { DEFAULT_PAGE_DIMENSION };
GLuint maxSparseLevel { DEFAULT_MAX_SPARSE_LEVEL };
uint32_t allocatedPages { 0 };
uint32_t maxPages { 0 };
uint32_t pageBytes { 0 };
GLint pageDimensionsIndex { 0 };
class GL45StrictResourceTexture : public GL45FixedAllocationTexture {
using Parent = GL45FixedAllocationTexture;
friend class GL45Backend;
protected:
GL45StrictResourceTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture);
};
//
// Textures that can be managed at runtime to increase or decrease their memory load
//
class GL45VariableAllocationTexture : public GL45Texture {
using Parent = GL45Texture;
friend class GL45Backend;
using PromoteLambda = std::function<void()>;
public:
enum class MemoryPressureState {
Idle,
Transfer,
Oversubscribed,
Undersubscribed,
};
protected:
void updateMips() override;
void stripToMip(uint16_t newMinMip);
void startTransfer() override;
bool continueTransfer() override;
void finishTransfer() override;
void incrementalTransfer(const uvec3& size, const gpu::Texture::PixelsPointer& mip, std::function<void(const ivec3& offset, const uvec3& size)> f) const;
void transferMip(uint16_t mipLevel, uint8_t face = 0) const;
void allocateMip(uint16_t mipLevel, uint8_t face = 0) const;
void allocateStorage() const override;
void updateSize() const override;
void syncSampler() const override;
void generateMips() const override;
void withPreservedTexture(std::function<void()> f) const override;
void derez();
static std::atomic<bool> _memoryPressureStateStale;
static MemoryPressureState _memoryPressureState;
static std::list<TextureWeakPointer> _memoryManagedTextures;
static const uvec3 INITIAL_MIP_TRANSFER_DIMENSIONS;
SparseInfo _sparseInfo;
uint16_t _mipOffset { 0 };
friend class GL45Backend;
static void updateMemoryPressure();
static void processWorkQueues();
static void addMemoryManagedTexture(const TexturePointer& texturePointer);
static void manageMemory();
protected:
GL45VariableAllocationTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture);
~GL45VariableAllocationTexture();
//bool canPromoteNoAllocate() const { return _allocatedMip < _populatedMip; }
bool canPromote() const { return _allocatedMip > 0; }
bool canDemote() const { return _allocatedMip < _maxAllocatedMip; }
bool hasPendingTransfers() const { return !_pendingTransfers.empty(); }
void executeNextTransfer();
uint32 size() const override { return _size; }
virtual void populateTransferQueue() = 0;
virtual void promote() = 0;
virtual void demote() = 0;
uint16 _populatedMip { 0 };
uint16 _allocatedMip { 0 };
uint16 _maxAllocatedMip { 0 };
uint32 _size { 0 };
std::queue<PromoteLambda> _pendingTransfers;
};
class GL45ResourceTexture : public GL45VariableAllocationTexture {
using Parent = GL45VariableAllocationTexture;
friend class GL45Backend;
protected:
GL45ResourceTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture);
void syncSampler() const override;
void promote() override;
void demote() override;
void populateTransferQueue() override;
void allocateStorage(uint16 mip);
void copyMipsFromTexture();
private:
};
#if 0
class GL45SparseResourceTexture : public GL45VariableAllocationTexture {
using Parent = GL45VariableAllocationTexture;
friend class GL45Backend;
using TextureTypeFormat = std::pair<GLenum, GLenum>;
using PageDimensions = std::vector<uvec3>;
using PageDimensionsMap = std::map<TextureTypeFormat, PageDimensions>;
static PageDimensionsMap pageDimensionsByFormat;
static Mutex pageDimensionsMutex;
static bool isSparseEligible(const Texture& texture);
static PageDimensions getPageDimensionsForFormat(const TextureTypeFormat& typeFormat);
static PageDimensions getPageDimensionsForFormat(GLenum type, GLenum format);
static const uint32_t DEFAULT_PAGE_DIMENSION = 128;
static const uint32_t DEFAULT_MAX_SPARSE_LEVEL = 0xFFFF;
protected:
GL45SparseResourceTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture);
~GL45SparseResourceTexture();
uint32 size() const override { return _allocatedPages * _pageBytes; }
void promote() override;
void demote() override;
private:
uvec3 getPageCounts(const uvec3& dimensions) const;
uint32_t getPageCount(const uvec3& dimensions) const;
uint32_t _allocatedPages { 0 };
uint32_t _pageBytes { 0 };
uvec3 _pageDimensions { DEFAULT_PAGE_DIMENSION };
GLuint _maxSparseLevel { DEFAULT_MAX_SPARSE_LEVEL };
};
#endif
protected:
void recycle() const override;
void derezTextures() const;
GLuint getFramebufferID(const FramebufferPointer& framebuffer) override;
GLFramebuffer* syncGPUObject(const Framebuffer& framebuffer) override;
@ -92,8 +186,7 @@ protected:
GLuint getBufferID(const Buffer& buffer) override;
GLBuffer* syncGPUObject(const Buffer& buffer) override;
GLuint getTextureID(const TexturePointer& texture, bool needTransfer = true) override;
GLTexture* syncGPUObject(const TexturePointer& texture, bool sync = true) override;
GLTexture* syncGPUObject(const TexturePointer& texture) override;
GLuint getQueryID(const QueryPointer& query) override;
GLQuery* syncGPUObject(const Query& query) override;
@ -126,5 +219,5 @@ protected:
Q_DECLARE_LOGGING_CATEGORY(gpugl45logging)
#endif

View file

@ -49,10 +49,12 @@ public:
GL_COLOR_ATTACHMENT15 };
int unit = 0;
auto backend = _backend.lock();
for (auto& b : _gpuObject.getRenderBuffers()) {
surface = b._texture;
if (surface) {
gltexture = gl::GLTexture::sync<GL45Backend::GL45Texture>(*_backend.lock().get(), surface, false); // Grab the gltexture and don't transfer
Q_ASSERT(TextureUsageType::RENDERBUFFER == surface->getUsageType());
gltexture = backend->syncGPUObject(surface);
} else {
gltexture = nullptr;
}
@ -78,8 +80,10 @@ public:
if (_gpuObject.getDepthStamp() != _depthStamp) {
auto surface = _gpuObject.getDepthStencilBuffer();
auto backend = _backend.lock();
if (_gpuObject.hasDepthStencil() && surface) {
gltexture = gl::GLTexture::sync<GL45Backend::GL45Texture>(*_backend.lock().get(), surface, false); // Grab the gltexture and don't transfer
Q_ASSERT(TextureUsageType::RENDERBUFFER == surface->getUsageType());
gltexture = backend->syncGPUObject(surface);
}
if (gltexture) {
@ -102,7 +106,7 @@ public:
_status = glCheckNamedFramebufferStatus(_id, GL_DRAW_FRAMEBUFFER);
// restore the current framebuffer
checkStatus(GL_DRAW_FRAMEBUFFER);
checkStatus();
}

View file

@ -8,9 +8,10 @@
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "GL45Backend.h"
#include "GL45Backend.h"
#include <mutex>
#include <algorithm>
#include <condition_variable>
#include <unordered_set>
#include <unordered_map>
@ -19,142 +20,73 @@
#include <QtCore/QDebug>
#include <QtCore/QThread>
#include <NumericalConstants.h>
#include "../gl/GLTexelFormat.h"
using namespace gpu;
using namespace gpu::gl;
using namespace gpu::gl45;
// Allocate 1 MB of buffer space for paged transfers
#define DEFAULT_PAGE_BUFFER_SIZE (1024*1024)
#define DEFAULT_GL_PIXEL_ALIGNMENT 4
using GL45Texture = GL45Backend::GL45Texture;
static std::map<uint16_t, std::unordered_set<GL45Texture*>> texturesByMipCounts;
static Mutex texturesByMipCountsMutex;
using TextureTypeFormat = std::pair<GLenum, GLenum>;
std::map<TextureTypeFormat, std::vector<uvec3>> sparsePageDimensionsByFormat;
Mutex sparsePageDimensionsByFormatMutex;
static std::vector<uvec3> getPageDimensionsForFormat(const TextureTypeFormat& typeFormat) {
{
Lock lock(sparsePageDimensionsByFormatMutex);
if (sparsePageDimensionsByFormat.count(typeFormat)) {
return sparsePageDimensionsByFormat[typeFormat];
}
}
GLint count = 0;
glGetInternalformativ(typeFormat.first, typeFormat.second, GL_NUM_VIRTUAL_PAGE_SIZES_ARB, 1, &count);
std::vector<uvec3> result;
if (count > 0) {
std::vector<GLint> x, y, z;
x.resize(count);
glGetInternalformativ(typeFormat.first, typeFormat.second, GL_VIRTUAL_PAGE_SIZE_X_ARB, 1, &x[0]);
y.resize(count);
glGetInternalformativ(typeFormat.first, typeFormat.second, GL_VIRTUAL_PAGE_SIZE_Y_ARB, 1, &y[0]);
z.resize(count);
glGetInternalformativ(typeFormat.first, typeFormat.second, GL_VIRTUAL_PAGE_SIZE_Z_ARB, 1, &z[0]);
result.resize(count);
for (GLint i = 0; i < count; ++i) {
result[i] = uvec3(x[i], y[i], z[i]);
}
}
{
Lock lock(sparsePageDimensionsByFormatMutex);
if (0 == sparsePageDimensionsByFormat.count(typeFormat)) {
sparsePageDimensionsByFormat[typeFormat] = result;
}
}
return result;
}
static std::vector<uvec3> getPageDimensionsForFormat(GLenum target, GLenum format) {
return getPageDimensionsForFormat({ target, format });
}
GLTexture* GL45Backend::syncGPUObject(const TexturePointer& texture, bool transfer) {
return GL45Texture::sync<GL45Texture>(*this, texture, transfer);
}
using SparseInfo = GL45Backend::GL45Texture::SparseInfo;
SparseInfo::SparseInfo(GL45Texture& texture)
: texture(texture) {
}
void SparseInfo::maybeMakeSparse() {
// Don't enable sparse for objects with explicitly managed mip levels
if (!texture._gpuObject.isAutogenerateMips()) {
return;
}
return;
const uvec3 dimensions = texture._gpuObject.getDimensions();
auto allowedPageDimensions = getPageDimensionsForFormat(texture._target, texture._internalFormat);
// In order to enable sparse the texture size must be an integer multiple of the page size
for (size_t i = 0; i < allowedPageDimensions.size(); ++i) {
pageDimensionsIndex = (uint32_t) i;
pageDimensions = allowedPageDimensions[i];
// Is this texture an integer multiple of page dimensions?
if (uvec3(0) == (dimensions % pageDimensions)) {
qCDebug(gpugl45logging) << "Enabling sparse for texture " << texture._source.c_str();
sparse = true;
break;
}
}
if (sparse) {
glTextureParameteri(texture._id, GL_TEXTURE_SPARSE_ARB, GL_TRUE);
glTextureParameteri(texture._id, GL_VIRTUAL_PAGE_SIZE_INDEX_ARB, pageDimensionsIndex);
} else {
qCDebug(gpugl45logging) << "Size " << dimensions.x << " x " << dimensions.y <<
" is not supported by any sparse page size for texture" << texture._source.c_str();
}
}
#define SPARSE_PAGE_SIZE_OVERHEAD_ESTIMATE 1.3f
// This can only be called after we've established our storage size
void SparseInfo::update() {
if (!sparse) {
return;
GLTexture* GL45Backend::syncGPUObject(const TexturePointer& texturePointer) {
if (!texturePointer) {
return nullptr;
}
glGetTextureParameterIuiv(texture._id, GL_NUM_SPARSE_LEVELS_ARB, &maxSparseLevel);
pageBytes = texture._gpuObject.getTexelFormat().getSize();
pageBytes *= pageDimensions.x * pageDimensions.y * pageDimensions.z;
// Testing with a simple texture allocating app shows an estimated 20% GPU memory overhead for
// sparse textures as compared to non-sparse, so we acount for that here.
pageBytes = (uint32_t)(pageBytes * SPARSE_PAGE_SIZE_OVERHEAD_ESTIMATE);
for (uint16_t mipLevel = 0; mipLevel <= maxSparseLevel; ++mipLevel) {
auto mipDimensions = texture._gpuObject.evalMipDimensions(mipLevel);
auto mipPageCount = getPageCount(mipDimensions);
maxPages += mipPageCount;
const Texture& texture = *texturePointer;
if (std::string("cursor texture") == texture.source()) {
qDebug() << "Loading cursor texture";
}
if (texture._target == GL_TEXTURE_CUBE_MAP) {
maxPages *= GLTexture::CUBE_NUM_FACES;
if (TextureUsageType::EXTERNAL == texture.getUsageType()) {
return Parent::syncGPUObject(texturePointer);
}
if (!texture.isDefined()) {
// NO texture definition yet so let's avoid thinking
return nullptr;
}
GL45Texture* object = Backend::getGPUObject<GL45Texture>(texture);
if (!object) {
switch (texture.getUsageType()) {
case TextureUsageType::RENDERBUFFER:
object = new GL45AttachmentTexture(shared_from_this(), texture);
break;
case TextureUsageType::STRICT_RESOURCE:
qCDebug(gpugllogging) << "Strict texture " << texture.source().c_str();
object = new GL45StrictResourceTexture(shared_from_this(), texture);
break;
case TextureUsageType::RESOURCE: {
GL45VariableAllocationTexture* varObject { nullptr };
#if 0
if (isTextureManagementSparseEnabled() && GL45Texture::isSparseEligible(texture)) {
varObject = new GL45SparseResourceTexture(shared_from_this(), texture);
} else {
varObject = new GL45ResourceTexture(shared_from_this(), texture);
}
#else
varObject = new GL45ResourceTexture(shared_from_this(), texture);
#endif
GL45VariableAllocationTexture::addMemoryManagedTexture(texturePointer);
object = varObject;
break;
}
default:
Q_UNREACHABLE();
}
}
return object;
}
uvec3 SparseInfo::getPageCounts(const uvec3& dimensions) const {
auto result = (dimensions / pageDimensions) +
glm::clamp(dimensions % pageDimensions, glm::uvec3(0), glm::uvec3(1));
return result;
}
uint32_t SparseInfo::getPageCount(const uvec3& dimensions) const {
auto pageCounts = getPageCounts(dimensions);
return pageCounts.x * pageCounts.y * pageCounts.z;
}
uint32_t SparseInfo::getSize() const {
return allocatedPages * pageBytes;
void GL45Backend::recycle() const {
Parent::recycle();
GL45VariableAllocationTexture::manageMemory();
}
void GL45Backend::initTextureManagementStage() {
@ -171,6 +103,11 @@ void GL45Backend::initTextureManagementStage() {
}
}
using GL45Texture = GL45Backend::GL45Texture;
GL45Texture::GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture)
: GLTexture(backend, texture, allocate(texture)) {
}
GLuint GL45Texture::allocate(const Texture& texture) {
GLuint result;
@ -178,162 +115,37 @@ GLuint GL45Texture::allocate(const Texture& texture) {
return result;
}
GLuint GL45Backend::getTextureID(const TexturePointer& texture, bool transfer) {
return GL45Texture::getId<GL45Texture>(*this, texture, transfer);
}
GL45Texture::GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLuint externalId)
: GLTexture(backend, texture, externalId), _sparseInfo(*this)
{
}
GL45Texture::GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, bool transferrable)
: GLTexture(backend, texture, allocate(texture), transferrable), _sparseInfo(*this)
{
auto theBackend = _backend.lock();
if (_transferrable && theBackend && theBackend->isTextureManagementSparseEnabled()) {
_sparseInfo.maybeMakeSparse();
if (_sparseInfo.sparse) {
Backend::incrementTextureGPUSparseCount();
}
}
}
GL45Texture::~GL45Texture() {
// Remove this texture from the candidate list of derezzable textures
if (_transferrable) {
auto mipLevels = usedMipLevels();
Lock lock(texturesByMipCountsMutex);
if (texturesByMipCounts.count(mipLevels)) {
auto& textures = texturesByMipCounts[mipLevels];
textures.erase(this);
if (textures.empty()) {
texturesByMipCounts.erase(mipLevels);
}
}
}
if (_sparseInfo.sparse) {
Backend::decrementTextureGPUSparseCount();
// Experimenation suggests that allocating sparse textures on one context/thread and deallocating
// them on another is buggy. So for sparse textures we need to queue a lambda with the deallocation
// callls to the transfer thread
auto id = _id;
// Set the class _id to 0 so we don't try to double delete
const_cast<GLuint&>(_id) = 0;
std::list<std::function<void()>> destructionFunctions;
uint8_t maxFace = (uint8_t)((_target == GL_TEXTURE_CUBE_MAP) ? GLTexture::CUBE_NUM_FACES : 1);
auto maxSparseMip = std::min<uint16_t>(_maxMip, _sparseInfo.maxSparseLevel);
for (uint16_t mipLevel = _minMip; mipLevel <= maxSparseMip; ++mipLevel) {
auto mipDimensions = _gpuObject.evalMipDimensions(mipLevel);
destructionFunctions.push_back([id, maxFace, mipLevel, mipDimensions] {
glTexturePageCommitmentEXT(id, mipLevel, 0, 0, 0, mipDimensions.x, mipDimensions.y, maxFace, GL_FALSE);
});
auto deallocatedPages = _sparseInfo.getPageCount(mipDimensions) * maxFace;
assert(deallocatedPages <= _sparseInfo.allocatedPages);
_sparseInfo.allocatedPages -= deallocatedPages;
}
if (0 != _sparseInfo.allocatedPages) {
qCWarning(gpugl45logging) << "Allocated pages remaining " << _id << " " << _sparseInfo.allocatedPages;
}
auto size = _size;
const_cast<GLuint&>(_size) = 0;
_textureTransferHelper->queueExecution([id, size, destructionFunctions] {
for (auto function : destructionFunctions) {
function();
}
glDeleteTextures(1, &id);
Backend::decrementTextureGPUCount();
Backend::updateTextureGPUMemoryUsage(size, 0);
Backend::updateTextureGPUSparseMemoryUsage(size, 0);
});
}
}
void GL45Texture::withPreservedTexture(std::function<void()> f) const {
f();
}
void GL45Texture::generateMips() const {
glGenerateTextureMipmap(_id);
(void)CHECK_GL_ERROR();
}
void GL45Texture::allocateStorage() const {
if (_gpuObject.getTexelFormat().isCompressed()) {
qFatal("Compressed textures not yet supported");
void GL45Texture::copyMipFromTexture(uint16_t sourceMip, uint16_t targetMip) const {
const auto& texture = _gpuObject;
if (!texture.isStoredMipFaceAvailable(sourceMip)) {
return;
}
glTextureParameteri(_id, GL_TEXTURE_BASE_LEVEL, 0);
glTextureParameteri(_id, GL_TEXTURE_MAX_LEVEL, _maxMip - _minMip);
// Get the dimensions, accounting for the downgrade level
Vec3u dimensions = _gpuObject.evalMipDimensions(_minMip + _mipOffset);
glTextureStorage2D(_id, usedMipLevels(), _internalFormat, dimensions.x, dimensions.y);
(void)CHECK_GL_ERROR();
}
void GL45Texture::updateSize() const {
if (_gpuObject.getTexelFormat().isCompressed()) {
qFatal("Compressed textures not yet supported");
}
if (_transferrable && _sparseInfo.sparse) {
auto size = _sparseInfo.getSize();
Backend::updateTextureGPUSparseMemoryUsage(_size, size);
setSize(size);
} else {
setSize(_gpuObject.evalTotalSize(_mipOffset));
}
}
void GL45Texture::startTransfer() {
Parent::startTransfer();
_sparseInfo.update();
}
bool GL45Texture::continueTransfer() {
PROFILE_RANGE(render_gpu_gl, "continueTransfer")
size_t maxFace = GL_TEXTURE_CUBE_MAP == _target ? CUBE_NUM_FACES : 1;
size_t maxFace = GLTexture::getFaceCount(_target);
for (uint8_t face = 0; face < maxFace; ++face) {
for (uint16_t mipLevel = _minMip; mipLevel <= _maxMip; ++mipLevel) {
auto size = _gpuObject.evalMipDimensions(mipLevel);
if (_sparseInfo.sparse && mipLevel <= _sparseInfo.maxSparseLevel) {
glTexturePageCommitmentEXT(_id, mipLevel, 0, 0, face, size.x, size.y, 1, GL_TRUE);
_sparseInfo.allocatedPages += _sparseInfo.getPageCount(size);
}
if (_gpuObject.isStoredMipFaceAvailable(mipLevel, face)) {
PROFILE_RANGE_EX(render_gpu_gl, "texSubImage", 0x0000ffff, (size.x * size.y * maxFace / 1024));
auto mip = _gpuObject.accessStoredMipFace(mipLevel, face);
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuObject.getTexelFormat(), mip->getFormat());
if (GL_TEXTURE_2D == _target) {
glTextureSubImage2D(_id, mipLevel, 0, 0, size.x, size.y, texelFormat.format, texelFormat.type, mip->readData());
} else if (GL_TEXTURE_CUBE_MAP == _target) {
// DSA ARB does not work on AMD, so use EXT
// unless EXT is not available on the driver
if (glTextureSubImage2DEXT) {
auto target = CUBE_FACE_LAYOUT[face];
glTextureSubImage2DEXT(_id, target, mipLevel, 0, 0, size.x, size.y, texelFormat.format, texelFormat.type, mip->readData());
} else {
glTextureSubImage3D(_id, mipLevel, 0, 0, face, size.x, size.y, 1, texelFormat.format, texelFormat.type, mip->readData());
}
} else {
Q_ASSERT(false);
}
(void)CHECK_GL_ERROR();
auto size = texture.evalMipDimensions(sourceMip);
auto mipData = texture.accessStoredMipFace(sourceMip, face);
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(texture.getTexelFormat(), mipData->getFormat());
if (GL_TEXTURE_2D == _target) {
glTextureSubImage2D(_id, targetMip, 0, 0, size.x, size.y, texelFormat.format, texelFormat.type, mipData->readData());
} else if (GL_TEXTURE_CUBE_MAP == _target) {
// DSA ARB does not work on AMD, so use EXT
// unless EXT is not available on the driver
if (glTextureSubImage2DEXT) {
auto target = GLTexture::CUBE_FACE_LAYOUT[face];
glTextureSubImage2DEXT(_id, target, targetMip, 0, 0, size.x, size.y, texelFormat.format, texelFormat.type, mipData->readData());
} else {
glTextureSubImage3D(_id, targetMip, 0, 0, face, size.x, size.y, 1, texelFormat.format, texelFormat.type, mipData->readData());
}
} else {
Q_ASSERT(false);
}
(void)CHECK_GL_ERROR();
}
return false;
}
void GL45Texture::finishTransfer() {
Parent::finishTransfer();
}
void GL45Texture::syncSampler() const {
@ -353,163 +165,66 @@ void GL45Texture::syncSampler() const {
glTextureParameteri(_id, GL_TEXTURE_WRAP_S, WRAP_MODES[sampler.getWrapModeU()]);
glTextureParameteri(_id, GL_TEXTURE_WRAP_T, WRAP_MODES[sampler.getWrapModeV()]);
glTextureParameteri(_id, GL_TEXTURE_WRAP_R, WRAP_MODES[sampler.getWrapModeW()]);
glTextureParameterf(_id, GL_TEXTURE_MAX_ANISOTROPY_EXT, sampler.getMaxAnisotropy());
glTextureParameterfv(_id, GL_TEXTURE_BORDER_COLOR, (const float*)&sampler.getBorderColor());
#if 0
// FIXME account for mip offsets here
auto baseMip = std::max<uint16_t>(sampler.getMipOffset(), _minMip);
glTextureParameteri(_id, GL_TEXTURE_BASE_LEVEL, baseMip);
glTextureParameterf(_id, GL_TEXTURE_MIN_LOD, (float)sampler.getMinMip());
glTextureParameterf(_id, GL_TEXTURE_MAX_LOD, (sampler.getMaxMip() == Sampler::MAX_MIP_LEVEL ? 1000.f : sampler.getMaxMip() - _mipOffset));
glTextureParameterf(_id, GL_TEXTURE_MAX_ANISOTROPY_EXT, sampler.getMaxAnisotropy());
#endif
}
void GL45Texture::postTransfer() {
Parent::postTransfer();
auto mipLevels = usedMipLevels();
if (_transferrable && mipLevels > 1 && _minMip < _sparseInfo.maxSparseLevel) {
Lock lock(texturesByMipCountsMutex);
texturesByMipCounts[mipLevels].insert(this);
}
}
using GL45FixedAllocationTexture = GL45Backend::GL45FixedAllocationTexture;
void GL45Texture::stripToMip(uint16_t newMinMip) {
if (newMinMip < _minMip) {
qCWarning(gpugl45logging) << "Cannot decrease the min mip";
return;
}
if (_sparseInfo.sparse && newMinMip > _sparseInfo.maxSparseLevel) {
qCWarning(gpugl45logging) << "Cannot increase the min mip into the mip tail";
return;
}
PROFILE_RANGE(render_gpu_gl, "GL45Texture::stripToMip");
auto mipLevels = usedMipLevels();
{
Lock lock(texturesByMipCountsMutex);
assert(0 != texturesByMipCounts.count(mipLevels));
assert(0 != texturesByMipCounts[mipLevels].count(this));
texturesByMipCounts[mipLevels].erase(this);
if (texturesByMipCounts[mipLevels].empty()) {
texturesByMipCounts.erase(mipLevels);
}
}
// If we weren't generating mips before, we need to now that we're stripping down mip levels.
if (!_gpuObject.isAutogenerateMips()) {
qCDebug(gpugl45logging) << "Force mip generation for texture";
glGenerateTextureMipmap(_id);
}
uint8_t maxFace = (uint8_t)((_target == GL_TEXTURE_CUBE_MAP) ? GLTexture::CUBE_NUM_FACES : 1);
if (_sparseInfo.sparse) {
for (uint16_t mip = _minMip; mip < newMinMip; ++mip) {
auto id = _id;
auto mipDimensions = _gpuObject.evalMipDimensions(mip);
_textureTransferHelper->queueExecution([id, mip, mipDimensions, maxFace] {
glTexturePageCommitmentEXT(id, mip, 0, 0, 0, mipDimensions.x, mipDimensions.y, maxFace, GL_FALSE);
});
auto deallocatedPages = _sparseInfo.getPageCount(mipDimensions) * maxFace;
assert(deallocatedPages < _sparseInfo.allocatedPages);
_sparseInfo.allocatedPages -= deallocatedPages;
}
_minMip = newMinMip;
} else {
GLuint oldId = _id;
// Find the distance between the old min mip and the new one
uint16 mipDelta = newMinMip - _minMip;
_mipOffset += mipDelta;
const_cast<uint16&>(_maxMip) -= mipDelta;
auto newLevels = usedMipLevels();
// Create and setup the new texture (allocate)
{
Vec3u newDimensions = _gpuObject.evalMipDimensions(_mipOffset);
PROFILE_RANGE_EX(render_gpu_gl, "Re-Allocate", 0xff0000ff, (newDimensions.x * newDimensions.y));
glCreateTextures(_target, 1, &const_cast<GLuint&>(_id));
glTextureParameteri(_id, GL_TEXTURE_BASE_LEVEL, 0);
glTextureParameteri(_id, GL_TEXTURE_MAX_LEVEL, _maxMip - _minMip);
glTextureStorage2D(_id, newLevels, _internalFormat, newDimensions.x, newDimensions.y);
}
// Copy the contents of the old texture to the new
{
PROFILE_RANGE(render_gpu_gl, "Blit");
// Preferred path only available in 4.3
for (uint16 targetMip = _minMip; targetMip <= _maxMip; ++targetMip) {
uint16 sourceMip = targetMip + mipDelta;
Vec3u mipDimensions = _gpuObject.evalMipDimensions(targetMip + _mipOffset);
for (GLenum target : getFaceTargets(_target)) {
glCopyImageSubData(
oldId, target, sourceMip, 0, 0, 0,
_id, target, targetMip, 0, 0, 0,
mipDimensions.x, mipDimensions.y, 1
);
(void)CHECK_GL_ERROR();
}
}
glDeleteTextures(1, &oldId);
}
}
// Re-sync the sampler to force access to the new mip level
GL45FixedAllocationTexture::GL45FixedAllocationTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture) : GL45Texture(backend, texture), _size(texture.evalTotalSize()) {
allocateStorage();
syncSampler();
updateSize();
}
// Re-insert into the texture-by-mips map if appropriate
mipLevels = usedMipLevels();
if (mipLevels > 1 && (!_sparseInfo.sparse || _minMip < _sparseInfo.maxSparseLevel)) {
Lock lock(texturesByMipCountsMutex);
texturesByMipCounts[mipLevels].insert(this);
GL45FixedAllocationTexture::~GL45FixedAllocationTexture() {
}
void GL45FixedAllocationTexture::allocateStorage() const {
const GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuObject.getTexelFormat());
const auto dimensions = _gpuObject.getDimensions();
const auto mips = _gpuObject.evalNumMips();
glTextureStorage2D(_id, mips, texelFormat.internalFormat, dimensions.x, dimensions.y);
}
void GL45FixedAllocationTexture::syncSampler() const {
Parent::syncSampler();
const Sampler& sampler = _gpuObject.getSampler();
auto baseMip = std::max<uint16_t>(sampler.getMipOffset(), sampler.getMinMip());
glTextureParameteri(_id, GL_TEXTURE_BASE_LEVEL, baseMip);
glTextureParameterf(_id, GL_TEXTURE_MIN_LOD, (float)sampler.getMinMip());
glTextureParameterf(_id, GL_TEXTURE_MAX_LOD, (sampler.getMaxMip() == Sampler::MAX_MIP_LEVEL ? 1000.f : sampler.getMaxMip()));
}
// Renderbuffer attachment textures
using GL45AttachmentTexture = GL45Backend::GL45AttachmentTexture;
GL45AttachmentTexture::GL45AttachmentTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture) : GL45FixedAllocationTexture(backend, texture) {
Backend::updateTextureGPUFramebufferMemoryUsage(0, size());
}
GL45AttachmentTexture::~GL45AttachmentTexture() {
Backend::updateTextureGPUFramebufferMemoryUsage(size(), 0);
}
// Strict resource textures
using GL45StrictResourceTexture = GL45Backend::GL45StrictResourceTexture;
GL45StrictResourceTexture::GL45StrictResourceTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture) : GL45FixedAllocationTexture(backend, texture) {
auto mipLevels = _gpuObject.evalNumMips();
for (uint16_t sourceMip = 0; sourceMip < mipLevels; ++sourceMip) {
uint16_t targetMip = sourceMip;
copyMipFromTexture(sourceMip, targetMip);
}
if (texture.isAutogenerateMips()) {
generateMips();
}
}
void GL45Texture::updateMips() {
if (!_sparseInfo.sparse) {
return;
}
auto newMinMip = std::min<uint16_t>(_gpuObject.minMip(), _sparseInfo.maxSparseLevel);
if (_minMip < newMinMip) {
stripToMip(newMinMip);
}
}
void GL45Texture::derez() {
if (_sparseInfo.sparse) {
assert(_minMip < _sparseInfo.maxSparseLevel);
}
assert(_minMip < _maxMip);
assert(_transferrable);
stripToMip(_minMip + 1);
}
void GL45Backend::derezTextures() const {
if (GLTexture::getMemoryPressure() < 1.0f) {
return;
}
Lock lock(texturesByMipCountsMutex);
if (texturesByMipCounts.empty()) {
// No available textures to derez
return;
}
auto mipLevel = texturesByMipCounts.rbegin()->first;
if (mipLevel <= 1) {
// No mips available to remove
return;
}
GL45Texture* targetTexture = nullptr;
{
auto& textures = texturesByMipCounts[mipLevel];
assert(!textures.empty());
targetTexture = *textures.begin();
}
lock.unlock();
targetTexture->derez();
}

View file

@ -0,0 +1,888 @@
//
// GL45BackendTexture.cpp
// libraries/gpu/src/gpu
//
// Created by Sam Gateau on 1/19/2015.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "GL45Backend.h"
#include <mutex>
#include <algorithm>
#include <condition_variable>
#include <unordered_set>
#include <unordered_map>
#include <glm/gtx/component_wise.hpp>
#include <QtCore/QDebug>
#include <QtCore/QThread>
#include <NumericalConstants.h>
#include "../gl/GLTexelFormat.h"
using namespace gpu;
using namespace gpu::gl;
using namespace gpu::gl45;
// Variable sized textures
using GL45VariableAllocationTexture = GL45Backend::GL45VariableAllocationTexture;
using MemoryPressureState = GL45VariableAllocationTexture::MemoryPressureState;
std::list<TextureWeakPointer> GL45VariableAllocationTexture::_memoryManagedTextures;
MemoryPressureState GL45VariableAllocationTexture::_memoryPressureState = MemoryPressureState::Idle;
std::atomic<bool> GL45VariableAllocationTexture::_memoryPressureStateStale { false };
const uvec3 GL45VariableAllocationTexture::INITIAL_MIP_TRANSFER_DIMENSIONS { 64, 64, 1 };
#define OVERSUBSCRIBED_PRESSURE_VALUE 0.95f
#define UNDERSUBSCRIBED_PRESSURE_VALUE 0.85f
#define DEFAULT_ALLOWED_TEXTURE_MEMORY_MB ((size_t)1024)
static const size_t DEFAULT_ALLOWED_TEXTURE_MEMORY = MB_TO_BYTES(DEFAULT_ALLOWED_TEXTURE_MEMORY_MB);
using QueuePair = std::pair<TextureWeakPointer, uint32_t>;
class QueuePairLess {
public:
bool operator()(const QueuePair& a, const QueuePair& b) {
return a.second < b.second;
}
};
class QueuePairGreater {
public:
bool operator()(const QueuePair& a, const QueuePair& b) {
return a.second > b.second;
}
};
using DemoteQueue = std::priority_queue<QueuePair, std::vector<QueuePair>, QueuePairLess>;
using PromoteQueue = std::priority_queue<QueuePair, std::vector<QueuePair>, QueuePairGreater>;
using TransferQueue = std::queue<TextureWeakPointer>;
static DemoteQueue demoteQueue;
static PromoteQueue promoteQueue;
static TransferQueue transferQueue;
void GL45VariableAllocationTexture::addMemoryManagedTexture(const TexturePointer& texturePointer) {
_memoryManagedTextures.push_back(texturePointer);
GL45VariableAllocationTexture* object = Backend::getGPUObject<GL45VariableAllocationTexture>(*texturePointer);
switch (_memoryPressureState) {
case MemoryPressureState::Oversubscribed:
if (object->canDemote()) {
demoteQueue.push({ texturePointer, object->size() });
}
break;
case MemoryPressureState::Undersubscribed:
if (object->canPromote()) {
promoteQueue.push({ texturePointer, object->size() });
}
break;
case MemoryPressureState::Transfer:
if (object->hasPendingTransfers()) {
transferQueue.push( texturePointer );
}
break;
case MemoryPressureState::Idle:
break;
default:
Q_UNREACHABLE();
}
}
void GL45VariableAllocationTexture::updateMemoryPressure() {
static size_t lastAllowedMemoryAllocation = gpu::Texture::getAllowedGPUMemoryUsage();
size_t allowedMemoryAllocation = gpu::Texture::getAllowedGPUMemoryUsage();
if (allowedMemoryAllocation != lastAllowedMemoryAllocation) {
_memoryPressureStateStale = true;
lastAllowedMemoryAllocation = allowedMemoryAllocation;
}
if (!_memoryPressureStateStale) {
return;
}
_memoryPressureStateStale = false;
// Clear any defunct textures
_memoryManagedTextures.remove_if([&](const TextureWeakPointer& weakPointer) {
return weakPointer.expired();
});
// Convert weak pointers to strong
std::list<TexturePointer> strongTextures; {
std::transform(
_memoryManagedTextures.begin(), _memoryManagedTextures.end(),
std::back_inserter(strongTextures),
[](const TextureWeakPointer& p) { return p.lock(); });
}
size_t totalVariableMemoryAllocation = 0;
size_t idealMemoryAllocation = 0;
bool canDemote = false;
bool canPromote = false;
bool hasTransfers = false;
for (const auto& texture : strongTextures) {
// Race conditions can still leave nulls in the list, so we need to check
if (!texture) {
continue;
}
idealMemoryAllocation += texture->evalTotalSize();
GL45VariableAllocationTexture* object = Backend::getGPUObject<GL45VariableAllocationTexture>(*texture);
totalVariableMemoryAllocation += object->size();
canDemote |= object->canDemote();
canPromote |= object->canPromote();
hasTransfers |= object->hasPendingTransfers();
}
size_t unallocated = idealMemoryAllocation - totalVariableMemoryAllocation;
if (0 == allowedMemoryAllocation) {
allowedMemoryAllocation = DEFAULT_ALLOWED_TEXTURE_MEMORY;
}
float pressure = (float)totalVariableMemoryAllocation / (float)allowedMemoryAllocation;
auto newState = MemoryPressureState::Idle;
if (pressure > OVERSUBSCRIBED_PRESSURE_VALUE && canDemote) {
newState = MemoryPressureState::Oversubscribed;
} else if (pressure < UNDERSUBSCRIBED_PRESSURE_VALUE && unallocated != 0 && canPromote) {
newState = MemoryPressureState::Undersubscribed;
} else if (hasTransfers) {
newState = MemoryPressureState::Transfer;
}
if (newState != _memoryPressureState) {
_memoryPressureState = newState;
demoteQueue = DemoteQueue();
promoteQueue = PromoteQueue();
transferQueue = TransferQueue();
switch (_memoryPressureState) {
case MemoryPressureState::Idle:
break;
case MemoryPressureState::Oversubscribed:
for (const auto& texture : strongTextures) {
if (!texture) {
continue;
}
GL45VariableAllocationTexture* object = Backend::getGPUObject<GL45VariableAllocationTexture>(*texture);
if (object->canDemote()) {
demoteQueue.push({ texture, object->size() });
}
}
break;
case MemoryPressureState::Undersubscribed:
for (const auto& texture : strongTextures) {
if (!texture) {
continue;
}
GL45VariableAllocationTexture* object = Backend::getGPUObject<GL45VariableAllocationTexture>(*texture);
if (object->canPromote()) {
promoteQueue.push({ texture, object->size() });
}
}
break;
case MemoryPressureState::Transfer:
for (const auto& texture : strongTextures) {
if (!texture) {
continue;
}
GL45VariableAllocationTexture* object = Backend::getGPUObject<GL45VariableAllocationTexture>(*texture);
if (object->hasPendingTransfers()) {
transferQueue.push(texture);
}
}
break;
default:
Q_UNREACHABLE();
break;
}
}
}
void GL45VariableAllocationTexture::processWorkQueues() {
switch (_memoryPressureState) {
case MemoryPressureState::Idle:
break;
case MemoryPressureState::Oversubscribed:
// Grab the first item off the demote queue
while (!demoteQueue.empty()) {
auto demoteTarget = demoteQueue.top();
demoteQueue.pop();
auto texture = demoteTarget.first.lock();
if (!texture) {
continue;
}
GL45VariableAllocationTexture* object = Backend::getGPUObject<GL45VariableAllocationTexture>(*texture);
if (!object->canDemote()) {
continue;
}
//qDebug() << "QQQ executing demote for " << texture->source().c_str();
object->demote();
// if the object can be further demoted, reinsert into the queue
if (object->canDemote()) {
demoteQueue.push({ demoteTarget.first, object->size() });
}
break;
}
if (demoteQueue.empty()) {
_memoryPressureState = MemoryPressureState::Idle;
}
break;
case MemoryPressureState::Undersubscribed:
while (!promoteQueue.empty()) {
auto promoteTarget = promoteQueue.top();
promoteQueue.pop();
auto texture = promoteTarget.first.lock();
if (!texture) {
continue;
}
GL45VariableAllocationTexture* object = Backend::getGPUObject<GL45VariableAllocationTexture>(*texture);
if (!object->canPromote()) {
continue;
}
//qDebug() << "QQQ executing promote for " << texture->source().c_str();
object->promote();
if (object->canPromote()) {
promoteQueue.push({ promoteTarget.first, object->size() });
}
break;
}
if (promoteQueue.empty()) {
_memoryPressureState = MemoryPressureState::Idle;
}
break;
case MemoryPressureState::Transfer:
while (!transferQueue.empty()) {
auto weakTexture = transferQueue.front();
transferQueue.pop();
auto texture = weakTexture.lock();
if (!texture) {
continue;
}
GL45VariableAllocationTexture* object = Backend::getGPUObject<GL45VariableAllocationTexture>(*texture);
if (!object->hasPendingTransfers()) {
continue;
}
//qDebug() << "QQQ executing transfer for " << texture->source().c_str();
object->executeNextTransfer();
if (object->hasPendingTransfers()) {
transferQueue.push(weakTexture);
}
break;
}
if (transferQueue.empty()) {
_memoryPressureState = MemoryPressureState::Idle;
}
break;
default:
Q_UNREACHABLE();
break;
}
}
void GL45VariableAllocationTexture::manageMemory() {
static auto lastProcessTime = usecTimestampNow();
auto now = usecTimestampNow();
auto interval = now - lastProcessTime;
if (interval > (USECS_PER_MSEC * 20)) {
lastProcessTime = now;
updateMemoryPressure();
processWorkQueues();
}
}
GL45VariableAllocationTexture::GL45VariableAllocationTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture) : GL45Texture(backend, texture) {
}
GL45VariableAllocationTexture::~GL45VariableAllocationTexture() {
_memoryPressureStateStale = true;
Backend::updateTextureGPUMemoryUsage(_size, 0);
}
void GL45VariableAllocationTexture::executeNextTransfer() {
if (!_pendingTransfers.empty()) {
_pendingTransfers.front()();
_pendingTransfers.pop();
}
}
// Managed size resource textures
using GL45ResourceTexture = GL45Backend::GL45ResourceTexture;
GL45ResourceTexture::GL45ResourceTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture) : GL45VariableAllocationTexture(backend, texture) {
auto mipLevels = texture.evalNumMips();
_allocatedMip = mipLevels;
uvec3 mipDimensions;
for (uint16_t mip = 0; mip < mipLevels; ++mip) {
if (glm::all(glm::lessThanEqual(texture.evalMipDimensions(mip), INITIAL_MIP_TRANSFER_DIMENSIONS))) {
_maxAllocatedMip = _populatedMip = mip;
break;
}
}
uint16_t allocatedMip = _populatedMip - std::min<uint16_t>(_populatedMip, 2);
allocateStorage(allocatedMip);
_memoryPressureStateStale = true;
copyMipsFromTexture();
syncSampler();
}
void GL45ResourceTexture::allocateStorage(uint16 allocatedMip) {
_allocatedMip = allocatedMip;
const GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuObject.getTexelFormat());
const auto dimensions = _gpuObject.evalMipDimensions(_allocatedMip);
const auto totalMips = _gpuObject.evalNumMips();
const auto mips = totalMips - _allocatedMip;
glTextureStorage2D(_id, mips, texelFormat.internalFormat, dimensions.x, dimensions.y);
auto mipLevels = _gpuObject.evalNumMips();
_size = 0;
for (uint16_t mip = _allocatedMip; mip < mipLevels; ++mip) {
_size += _gpuObject.evalMipSize(mip);
}
Backend::updateTextureGPUMemoryUsage(0, _size);
}
void GL45ResourceTexture::copyMipsFromTexture() {
auto mipLevels = _gpuObject.evalNumMips();
for (uint16_t sourceMip = _populatedMip; sourceMip < mipLevels; ++sourceMip) {
uint16_t targetMip = sourceMip - _allocatedMip;
copyMipFromTexture(sourceMip, targetMip);
}
}
void GL45ResourceTexture::syncSampler() const {
Parent::syncSampler();
const Sampler& sampler = _gpuObject.getSampler();
uint16_t maxMip = _gpuObject.evalNumMips() - _allocatedMip;
auto minMip = std::max<uint16_t>(sampler.getMipOffset(), sampler.getMinMip());
minMip = std::min<uint16_t>(minMip, maxMip);
glTextureParameteri(_id, GL_TEXTURE_BASE_LEVEL, _populatedMip - _allocatedMip);
glTextureParameterf(_id, GL_TEXTURE_MIN_LOD, (float)minMip);
glTextureParameterf(_id, GL_TEXTURE_MAX_LOD, (float)maxMip);
}
void GL45ResourceTexture::promote() {
Q_ASSERT(_allocatedMip > 0);
GLuint oldId = _id;
uint32_t oldSize = _size;
// create new texture
const_cast<GLuint&>(_id) = allocate(_gpuObject);
uint16_t oldAllocatedMip = _allocatedMip;
// allocate storage for new level
allocateStorage(_allocatedMip - std::min<uint16_t>(_allocatedMip, 2));
uint16_t mips = _gpuObject.evalNumMips();
// copy pre-existing mips
for (uint16_t mip = _populatedMip; mip < mips; ++mip) {
auto mipDimensions = _gpuObject.evalMipDimensions(mip);
uint16_t targetMip = mip - _allocatedMip;
uint16_t sourceMip = mip - oldAllocatedMip;
auto faces = getFaceCount(_target);
for (uint8_t face = 0; face < faces; ++face) {
glCopyImageSubData(
oldId, _target, sourceMip, 0, 0, face,
_id, _target, targetMip, 0, 0, face,
mipDimensions.x, mipDimensions.y, 1
);
(void)CHECK_GL_ERROR();
}
}
// destroy the old texture
glDeleteTextures(1, &oldId);
// update the memory usage
Backend::updateTextureGPUMemoryUsage(oldSize, 0);
_memoryPressureStateStale = true;
syncSampler();
populateTransferQueue();
}
void GL45ResourceTexture::demote() {
Q_ASSERT(_allocatedMip < _maxAllocatedMip);
auto oldId = _id;
auto oldSize = _size;
const_cast<GLuint&>(_id) = allocate(_gpuObject);
allocateStorage(_allocatedMip + 1);
_populatedMip = std::max(_populatedMip, _allocatedMip);
uint16_t mips = _gpuObject.evalNumMips();
// copy pre-existing mips
for (uint16_t mip = _populatedMip; mip < mips; ++mip) {
auto mipDimensions = _gpuObject.evalMipDimensions(mip);
uint16_t targetMip = mip - _allocatedMip;
uint16_t sourceMip = targetMip + 1;
auto faces = getFaceCount(_target);
for (uint8_t face = 0; face < faces; ++face) {
glCopyImageSubData(
oldId, _target, sourceMip, 0, 0, face,
_id, _target, targetMip, 0, 0, face,
mipDimensions.x, mipDimensions.y, 1
);
(void)CHECK_GL_ERROR();
}
}
// destroy the old texture
glDeleteTextures(1, &oldId);
// update the memory usage
Backend::updateTextureGPUMemoryUsage(oldSize, 0);
_memoryPressureStateStale = true;
syncSampler();
populateTransferQueue();
}
void GL45ResourceTexture::populateTransferQueue() {
_pendingTransfers = std::queue<PromoteLambda>();
if (_populatedMip <= _allocatedMip) {
return;
}
for (int16_t mip = _populatedMip - 1; mip >= _allocatedMip; --mip) {
// FIXME break down the transfers into chunks so that no single transfer is
// consuming more than X bandwidth
_pendingTransfers.push([mip, this] {
Q_ASSERT(mip >= _allocatedMip);
// FIXME modify the copy mechanism to be incremental
copyMipFromTexture(mip, mip - _allocatedMip);
_populatedMip = mip;
syncSampler();
});
}
}
// Sparsely allocated, managed size resource textures
#if 0
#define SPARSE_PAGE_SIZE_OVERHEAD_ESTIMATE 1.3f
using GL45SparseResourceTexture = GL45Backend::GL45SparseResourceTexture;
GL45Texture::PageDimensionsMap GL45Texture::pageDimensionsByFormat;
Mutex GL45Texture::pageDimensionsMutex;
GL45Texture::PageDimensions GL45Texture::getPageDimensionsForFormat(const TextureTypeFormat& typeFormat) {
{
Lock lock(pageDimensionsMutex);
if (pageDimensionsByFormat.count(typeFormat)) {
return pageDimensionsByFormat[typeFormat];
}
}
GLint count = 0;
glGetInternalformativ(typeFormat.first, typeFormat.second, GL_NUM_VIRTUAL_PAGE_SIZES_ARB, 1, &count);
std::vector<uvec3> result;
if (count > 0) {
std::vector<GLint> x, y, z;
x.resize(count);
glGetInternalformativ(typeFormat.first, typeFormat.second, GL_VIRTUAL_PAGE_SIZE_X_ARB, 1, &x[0]);
y.resize(count);
glGetInternalformativ(typeFormat.first, typeFormat.second, GL_VIRTUAL_PAGE_SIZE_Y_ARB, 1, &y[0]);
z.resize(count);
glGetInternalformativ(typeFormat.first, typeFormat.second, GL_VIRTUAL_PAGE_SIZE_Z_ARB, 1, &z[0]);
result.resize(count);
for (GLint i = 0; i < count; ++i) {
result[i] = uvec3(x[i], y[i], z[i]);
}
}
{
Lock lock(pageDimensionsMutex);
if (0 == pageDimensionsByFormat.count(typeFormat)) {
pageDimensionsByFormat[typeFormat] = result;
}
}
return result;
}
GL45Texture::PageDimensions GL45Texture::getPageDimensionsForFormat(GLenum target, GLenum format) {
return getPageDimensionsForFormat({ target, format });
}
bool GL45Texture::isSparseEligible(const Texture& texture) {
Q_ASSERT(TextureUsageType::RESOURCE == texture.getUsageType());
// Disabling sparse for the momemnt
return false;
const auto allowedPageDimensions = getPageDimensionsForFormat(getGLTextureType(texture),
gl::GLTexelFormat::evalGLTexelFormatInternal(texture.getTexelFormat()));
const auto textureDimensions = texture.getDimensions();
for (const auto& pageDimensions : allowedPageDimensions) {
if (uvec3(0) == (textureDimensions % pageDimensions)) {
return true;
}
}
return false;
}
GL45SparseResourceTexture::GL45SparseResourceTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture) : GL45VariableAllocationTexture(backend, texture) {
const GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuObject.getTexelFormat());
const uvec3 dimensions = _gpuObject.getDimensions();
auto allowedPageDimensions = getPageDimensionsForFormat(_target, texelFormat.internalFormat);
uint32_t pageDimensionsIndex = 0;
// In order to enable sparse the texture size must be an integer multiple of the page size
for (size_t i = 0; i < allowedPageDimensions.size(); ++i) {
pageDimensionsIndex = (uint32_t)i;
_pageDimensions = allowedPageDimensions[i];
// Is this texture an integer multiple of page dimensions?
if (uvec3(0) == (dimensions % _pageDimensions)) {
qCDebug(gpugl45logging) << "Enabling sparse for texture " << _gpuObject.source().c_str();
break;
}
}
glTextureParameteri(_id, GL_TEXTURE_SPARSE_ARB, GL_TRUE);
glTextureParameteri(_id, GL_VIRTUAL_PAGE_SIZE_INDEX_ARB, pageDimensionsIndex);
glGetTextureParameterIuiv(_id, GL_NUM_SPARSE_LEVELS_ARB, &_maxSparseLevel);
_pageBytes = _gpuObject.getTexelFormat().getSize();
_pageBytes *= _pageDimensions.x * _pageDimensions.y * _pageDimensions.z;
// Testing with a simple texture allocating app shows an estimated 20% GPU memory overhead for
// sparse textures as compared to non-sparse, so we acount for that here.
_pageBytes = (uint32_t)(_pageBytes * SPARSE_PAGE_SIZE_OVERHEAD_ESTIMATE);
//allocateStorage();
syncSampler();
}
GL45SparseResourceTexture::~GL45SparseResourceTexture() {
Backend::updateTextureGPUVirtualMemoryUsage(size(), 0);
}
uvec3 GL45SparseResourceTexture::getPageCounts(const uvec3& dimensions) const {
auto result = (dimensions / _pageDimensions) +
glm::clamp(dimensions % _pageDimensions, glm::uvec3(0), glm::uvec3(1));
return result;
}
uint32_t GL45SparseResourceTexture::getPageCount(const uvec3& dimensions) const {
auto pageCounts = getPageCounts(dimensions);
return pageCounts.x * pageCounts.y * pageCounts.z;
}
void GL45SparseResourceTexture::promote() {
}
void GL45SparseResourceTexture::demote() {
}
SparseInfo::SparseInfo(GL45Texture& texture)
: texture(texture) {
}
void SparseInfo::maybeMakeSparse() {
// Don't enable sparse for objects with explicitly managed mip levels
if (!texture._gpuObject.isAutogenerateMips()) {
return;
}
const uvec3 dimensions = texture._gpuObject.getDimensions();
auto allowedPageDimensions = getPageDimensionsForFormat(texture._target, texture._internalFormat);
// In order to enable sparse the texture size must be an integer multiple of the page size
for (size_t i = 0; i < allowedPageDimensions.size(); ++i) {
pageDimensionsIndex = (uint32_t)i;
pageDimensions = allowedPageDimensions[i];
// Is this texture an integer multiple of page dimensions?
if (uvec3(0) == (dimensions % pageDimensions)) {
qCDebug(gpugl45logging) << "Enabling sparse for texture " << texture._source.c_str();
sparse = true;
break;
}
}
if (sparse) {
glTextureParameteri(texture._id, GL_TEXTURE_SPARSE_ARB, GL_TRUE);
glTextureParameteri(texture._id, GL_VIRTUAL_PAGE_SIZE_INDEX_ARB, pageDimensionsIndex);
} else {
qCDebug(gpugl45logging) << "Size " << dimensions.x << " x " << dimensions.y <<
" is not supported by any sparse page size for texture" << texture._source.c_str();
}
}
// This can only be called after we've established our storage size
void SparseInfo::update() {
if (!sparse) {
return;
}
glGetTextureParameterIuiv(texture._id, GL_NUM_SPARSE_LEVELS_ARB, &maxSparseLevel);
for (uint16_t mipLevel = 0; mipLevel <= maxSparseLevel; ++mipLevel) {
auto mipDimensions = texture._gpuObject.evalMipDimensions(mipLevel);
auto mipPageCount = getPageCount(mipDimensions);
maxPages += mipPageCount;
}
if (texture._target == GL_TEXTURE_CUBE_MAP) {
maxPages *= GLTexture::CUBE_NUM_FACES;
}
}
void SparseInfo::allocateToMip(uint16_t targetMip) {
// Not sparse, do nothing
if (!sparse) {
return;
}
if (allocatedMip == INVALID_MIP) {
allocatedMip = maxSparseLevel + 1;
}
// Don't try to allocate below the maximum sparse level
if (targetMip > maxSparseLevel) {
targetMip = maxSparseLevel;
}
// Already allocated this level
if (allocatedMip <= targetMip) {
return;
}
uint32_t maxFace = (uint32_t)(GL_TEXTURE_CUBE_MAP == texture._target ? CUBE_NUM_FACES : 1);
for (uint16_t mip = targetMip; mip < allocatedMip; ++mip) {
auto size = texture._gpuObject.evalMipDimensions(mip);
glTexturePageCommitmentEXT(texture._id, mip, 0, 0, 0, size.x, size.y, maxFace, GL_TRUE);
allocatedPages += getPageCount(size);
}
allocatedMip = targetMip;
}
uint32_t SparseInfo::getSize() const {
return allocatedPages * pageBytes;
}
using SparseInfo = GL45Backend::GL45Texture::SparseInfo;
void GL45Texture::updateSize() const {
if (_gpuObject.getTexelFormat().isCompressed()) {
qFatal("Compressed textures not yet supported");
}
if (_transferrable && _sparseInfo.sparse) {
auto size = _sparseInfo.getSize();
Backend::updateTextureGPUSparseMemoryUsage(_size, size);
setSize(size);
} else {
setSize(_gpuObject.evalTotalSize(_mipOffset));
}
}
void GL45Texture::startTransfer() {
Parent::startTransfer();
_sparseInfo.update();
_populatedMip = _maxMip + 1;
}
bool GL45Texture::continueTransfer() {
size_t maxFace = GL_TEXTURE_CUBE_MAP == _target ? CUBE_NUM_FACES : 1;
if (_populatedMip == _minMip) {
return false;
}
uint16_t targetMip = _populatedMip - 1;
while (targetMip > 0 && !_gpuObject.isStoredMipFaceAvailable(targetMip)) {
--targetMip;
}
_sparseInfo.allocateToMip(targetMip);
for (uint8_t face = 0; face < maxFace; ++face) {
auto size = _gpuObject.evalMipDimensions(targetMip);
if (_gpuObject.isStoredMipFaceAvailable(targetMip, face)) {
auto mip = _gpuObject.accessStoredMipFace(targetMip, face);
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuObject.getTexelFormat(), mip->getFormat());
if (GL_TEXTURE_2D == _target) {
glTextureSubImage2D(_id, targetMip, 0, 0, size.x, size.y, texelFormat.format, texelFormat.type, mip->readData());
} else if (GL_TEXTURE_CUBE_MAP == _target) {
// DSA ARB does not work on AMD, so use EXT
// unless EXT is not available on the driver
if (glTextureSubImage2DEXT) {
auto target = CUBE_FACE_LAYOUT[face];
glTextureSubImage2DEXT(_id, target, targetMip, 0, 0, size.x, size.y, texelFormat.format, texelFormat.type, mip->readData());
} else {
glTextureSubImage3D(_id, targetMip, 0, 0, face, size.x, size.y, 1, texelFormat.format, texelFormat.type, mip->readData());
}
} else {
Q_ASSERT(false);
}
(void)CHECK_GL_ERROR();
break;
}
}
_populatedMip = targetMip;
return _populatedMip != _minMip;
}
void GL45Texture::finishTransfer() {
Parent::finishTransfer();
}
void GL45Texture::postTransfer() {
Parent::postTransfer();
}
void GL45Texture::stripToMip(uint16_t newMinMip) {
if (newMinMip < _minMip) {
qCWarning(gpugl45logging) << "Cannot decrease the min mip";
return;
}
if (_sparseInfo.sparse && newMinMip > _sparseInfo.maxSparseLevel) {
qCWarning(gpugl45logging) << "Cannot increase the min mip into the mip tail";
return;
}
// If we weren't generating mips before, we need to now that we're stripping down mip levels.
if (!_gpuObject.isAutogenerateMips()) {
qCDebug(gpugl45logging) << "Force mip generation for texture";
glGenerateTextureMipmap(_id);
}
uint8_t maxFace = (uint8_t)((_target == GL_TEXTURE_CUBE_MAP) ? GLTexture::CUBE_NUM_FACES : 1);
if (_sparseInfo.sparse) {
for (uint16_t mip = _minMip; mip < newMinMip; ++mip) {
auto id = _id;
auto mipDimensions = _gpuObject.evalMipDimensions(mip);
glTexturePageCommitmentEXT(id, mip, 0, 0, 0, mipDimensions.x, mipDimensions.y, maxFace, GL_FALSE);
auto deallocatedPages = _sparseInfo.getPageCount(mipDimensions) * maxFace;
assert(deallocatedPages < _sparseInfo.allocatedPages);
_sparseInfo.allocatedPages -= deallocatedPages;
}
_minMip = newMinMip;
} else {
GLuint oldId = _id;
// Find the distance between the old min mip and the new one
uint16 mipDelta = newMinMip - _minMip;
_mipOffset += mipDelta;
const_cast<uint16&>(_maxMip) -= mipDelta;
auto newLevels = usedMipLevels();
// Create and setup the new texture (allocate)
glCreateTextures(_target, 1, &const_cast<GLuint&>(_id));
glTextureParameteri(_id, GL_TEXTURE_BASE_LEVEL, 0);
glTextureParameteri(_id, GL_TEXTURE_MAX_LEVEL, _maxMip - _minMip);
Vec3u newDimensions = _gpuObject.evalMipDimensions(_mipOffset);
glTextureStorage2D(_id, newLevels, _internalFormat, newDimensions.x, newDimensions.y);
// Copy the contents of the old texture to the new
GLuint fbo { 0 };
glCreateFramebuffers(1, &fbo);
glBindFramebuffer(GL_READ_FRAMEBUFFER, fbo);
for (uint16 targetMip = _minMip; targetMip <= _maxMip; ++targetMip) {
uint16 sourceMip = targetMip + mipDelta;
Vec3u mipDimensions = _gpuObject.evalMipDimensions(targetMip + _mipOffset);
for (GLenum target : getFaceTargets(_target)) {
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, target, oldId, sourceMip);
(void)CHECK_GL_ERROR();
glCopyTextureSubImage2D(_id, targetMip, 0, 0, 0, 0, mipDimensions.x, mipDimensions.y);
(void)CHECK_GL_ERROR();
}
}
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
glDeleteFramebuffers(1, &fbo);
glDeleteTextures(1, &oldId);
}
// Re-sync the sampler to force access to the new mip level
syncSampler();
updateSize();
}
bool GL45Texture::derezable() const {
if (_external) {
return false;
}
auto maxMinMip = _sparseInfo.sparse ? _sparseInfo.maxSparseLevel : _maxMip;
return _transferrable && (_targetMinMip < maxMinMip);
}
size_t GL45Texture::getMipByteCount(uint16_t mip) const {
if (!_sparseInfo.sparse) {
return Parent::getMipByteCount(mip);
}
auto dimensions = _gpuObject.evalMipDimensions(_targetMinMip);
return _sparseInfo.getPageCount(dimensions) * _sparseInfo.pageBytes;
}
std::pair<size_t, bool> GL45Texture::preDerez() {
assert(!_sparseInfo.sparse || _targetMinMip < _sparseInfo.maxSparseLevel);
size_t freedMemory = getMipByteCount(_targetMinMip);
bool liveMip = _populatedMip != INVALID_MIP && _populatedMip <= _targetMinMip;
++_targetMinMip;
return { freedMemory, liveMip };
}
void GL45Texture::derez() {
if (_sparseInfo.sparse) {
assert(_minMip < _sparseInfo.maxSparseLevel);
}
assert(_minMip < _maxMip);
assert(_transferrable);
stripToMip(_minMip + 1);
}
size_t GL45Texture::getCurrentGpuSize() const {
if (!_sparseInfo.sparse) {
return Parent::getCurrentGpuSize();
}
return _sparseInfo.getSize();
}
size_t GL45Texture::getTargetGpuSize() const {
if (!_sparseInfo.sparse) {
return Parent::getTargetGpuSize();
}
size_t result = 0;
for (auto mip = _targetMinMip; mip <= _sparseInfo.maxSparseLevel; ++mip) {
result += (_sparseInfo.pageBytes * _sparseInfo.getPageCount(_gpuObject.evalMipDimensions(mip)));
}
return result;
}
GL45Texture::~GL45Texture() {
if (_sparseInfo.sparse) {
uint8_t maxFace = (uint8_t)((_target == GL_TEXTURE_CUBE_MAP) ? GLTexture::CUBE_NUM_FACES : 1);
auto maxSparseMip = std::min<uint16_t>(_maxMip, _sparseInfo.maxSparseLevel);
for (uint16_t mipLevel = _minMip; mipLevel <= maxSparseMip; ++mipLevel) {
auto mipDimensions = _gpuObject.evalMipDimensions(mipLevel);
glTexturePageCommitmentEXT(_texture, mipLevel, 0, 0, 0, mipDimensions.x, mipDimensions.y, maxFace, GL_FALSE);
auto deallocatedPages = _sparseInfo.getPageCount(mipDimensions) * maxFace;
assert(deallocatedPages <= _sparseInfo.allocatedPages);
_sparseInfo.allocatedPages -= deallocatedPages;
}
if (0 != _sparseInfo.allocatedPages) {
qCWarning(gpugl45logging) << "Allocated pages remaining " << _id << " " << _sparseInfo.allocatedPages;
}
Backend::decrementTextureGPUSparseCount();
}
}
GL45Texture::GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture)
: GLTexture(backend, texture, allocate(texture)), _sparseInfo(*this), _targetMinMip(_minMip)
{
auto theBackend = _backend.lock();
if (_transferrable && theBackend && theBackend->isTextureManagementSparseEnabled()) {
_sparseInfo.maybeMakeSparse();
if (_sparseInfo.sparse) {
Backend::incrementTextureGPUSparseCount();
}
}
}
#endif

View file

@ -292,15 +292,8 @@ void Batch::setUniformBuffer(uint32 slot, const BufferView& view) {
setUniformBuffer(slot, view._buffer, view._offset, view._size);
}
void Batch::setResourceTexture(uint32 slot, const TexturePointer& texture) {
if (texture && texture->getUsage().isExternal()) {
auto recycler = texture->getExternalRecycler();
Q_ASSERT(recycler);
}
ADD_COMMAND(setResourceTexture);
_params.emplace_back(_textures.cache(texture));
_params.emplace_back(slot);
}

View file

@ -32,7 +32,7 @@ Framebuffer* Framebuffer::create(const std::string& name) {
Framebuffer* Framebuffer::create(const std::string& name, const Format& colorBufferFormat, uint16 width, uint16 height) {
auto framebuffer = Framebuffer::create(name);
auto colorTexture = TexturePointer(Texture::create2D(colorBufferFormat, width, height, Sampler(Sampler::FILTER_MIN_MAG_POINT)));
auto colorTexture = TexturePointer(Texture::createRenderBuffer(colorBufferFormat, width, height, Sampler(Sampler::FILTER_MIN_MAG_POINT)));
colorTexture->setSource("Framebuffer::colorTexture");
framebuffer->setRenderBuffer(0, colorTexture);
@ -43,8 +43,8 @@ Framebuffer* Framebuffer::create(const std::string& name, const Format& colorBuf
Framebuffer* Framebuffer::create(const std::string& name, const Format& colorBufferFormat, const Format& depthStencilBufferFormat, uint16 width, uint16 height) {
auto framebuffer = Framebuffer::create(name);
auto colorTexture = TexturePointer(Texture::create2D(colorBufferFormat, width, height, Sampler(Sampler::FILTER_MIN_MAG_POINT)));
auto depthTexture = TexturePointer(Texture::create2D(depthStencilBufferFormat, width, height, Sampler(Sampler::FILTER_MIN_MAG_POINT)));
auto colorTexture = TexturePointer(Texture::createRenderBuffer(colorBufferFormat, width, height, Sampler(Sampler::FILTER_MIN_MAG_POINT)));
auto depthTexture = TexturePointer(Texture::createRenderBuffer(depthStencilBufferFormat, width, height, Sampler(Sampler::FILTER_MIN_MAG_POINT)));
framebuffer->setRenderBuffer(0, colorTexture);
framebuffer->setDepthStencilBuffer(depthTexture, depthStencilBufferFormat);
@ -55,7 +55,7 @@ Framebuffer* Framebuffer::createShadowmap(uint16 width) {
auto framebuffer = Framebuffer::create("Shadowmap");
auto depthFormat = Element(gpu::SCALAR, gpu::FLOAT, gpu::DEPTH); // Depth32 texel format
auto depthTexture = TexturePointer(Texture::create2D(depthFormat, width, width));
auto depthTexture = TexturePointer(Texture::createRenderBuffer(depthFormat, width, width));
Sampler::Desc samplerDesc;
samplerDesc._borderColor = glm::vec4(1.0f);
samplerDesc._wrapModeU = Sampler::WRAP_BORDER;
@ -143,6 +143,8 @@ int Framebuffer::setRenderBuffer(uint32 slot, const TexturePointer& texture, uin
return -1;
}
Q_ASSERT(!texture || TextureUsageType::RENDERBUFFER == texture->getUsageType());
// Check for the slot
if (slot >= getMaxNumRenderBuffers()) {
return -1;
@ -222,6 +224,8 @@ bool Framebuffer::setDepthStencilBuffer(const TexturePointer& texture, const For
return false;
}
Q_ASSERT(!texture || TextureUsageType::RENDERBUFFER == texture->getUsageType());
// Check for the compatibility of size
if (texture) {
if (!validateTargetCompatibility(*texture)) {

View file

@ -253,35 +253,42 @@ bool Texture::Storage::assignMipFaceData(uint16 level, const Element& format, Si
return allocated == size;
}
Texture* Texture::createExternal2D(const ExternalRecycler& recycler, const Sampler& sampler) {
Texture* tex = new Texture();
Texture* Texture::createExternal(const ExternalRecycler& recycler, const Sampler& sampler) {
Texture* tex = new Texture(TextureUsageType::EXTERNAL);
tex->_type = TEX_2D;
tex->_maxMip = 0;
tex->_sampler = sampler;
tex->setUsage(Usage::Builder().withExternal().withColor());
tex->setExternalRecycler(recycler);
return tex;
}
Texture* Texture::createRenderBuffer(const Element& texelFormat, uint16 width, uint16 height, const Sampler& sampler) {
return create(TextureUsageType::RENDERBUFFER, TEX_2D, texelFormat, width, height, 1, 1, 1, sampler);
}
Texture* Texture::create1D(const Element& texelFormat, uint16 width, const Sampler& sampler) {
return create(TEX_1D, texelFormat, width, 1, 1, 1, 1, sampler);
return create(TextureUsageType::RESOURCE, TEX_1D, texelFormat, width, 1, 1, 1, 1, sampler);
}
Texture* Texture::create2D(const Element& texelFormat, uint16 width, uint16 height, const Sampler& sampler) {
return create(TEX_2D, texelFormat, width, height, 1, 1, 1, sampler);
return create(TextureUsageType::RESOURCE, TEX_2D, texelFormat, width, height, 1, 1, 1, sampler);
}
Texture* Texture::createStrict(const Element& texelFormat, uint16 width, uint16 height, const Sampler& sampler) {
return create(TextureUsageType::STRICT_RESOURCE, TEX_2D, texelFormat, width, height, 1, 1, 1, sampler);
}
Texture* Texture::create3D(const Element& texelFormat, uint16 width, uint16 height, uint16 depth, const Sampler& sampler) {
return create(TEX_3D, texelFormat, width, height, depth, 1, 1, sampler);
return create(TextureUsageType::RESOURCE, TEX_3D, texelFormat, width, height, depth, 1, 1, sampler);
}
Texture* Texture::createCube(const Element& texelFormat, uint16 width, const Sampler& sampler) {
return create(TEX_CUBE, texelFormat, width, width, 1, 1, 1, sampler);
return create(TextureUsageType::RESOURCE, TEX_CUBE, texelFormat, width, width, 1, 1, 1, sampler);
}
Texture* Texture::create(Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, const Sampler& sampler)
Texture* Texture::create(TextureUsageType usageType, Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, const Sampler& sampler)
{
Texture* tex = new Texture();
Texture* tex = new Texture(usageType);
tex->_storage.reset(new Storage());
tex->_type = type;
tex->_storage->assignTexture(tex);
@ -293,16 +300,14 @@ Texture* Texture::create(Type type, const Element& texelFormat, uint16 width, ui
return tex;
}
Texture::Texture():
Resource()
{
Texture::Texture(TextureUsageType usageType) :
Resource(), _usageType(usageType) {
_textureCPUCount++;
}
Texture::~Texture()
{
Texture::~Texture() {
_textureCPUCount--;
if (getUsage().isExternal()) {
if (_usageType == TextureUsageType::EXTERNAL) {
Texture::ExternalUpdates externalUpdates;
{
Lock lock(_externalMutex);

View file

@ -139,6 +139,13 @@ protected:
Desc _desc;
};
enum class TextureUsageType {
RENDERBUFFER, // Used as attachments to a framebuffer
RESOURCE, // Resource textures, like materials... subject to memory manipulation
STRICT_RESOURCE, // Resource textures not subject to manipulation, like the normal fitting texture
EXTERNAL,
};
class Texture : public Resource {
static std::atomic<uint32_t> _textureCPUCount;
static std::atomic<Size> _textureCPUMemoryUsage;
@ -173,9 +180,9 @@ public:
NORMAL, // Texture is a normal map
ALPHA, // Texture has an alpha channel
ALPHA_MASK, // Texture alpha channel is a Mask 0/1
EXTERNAL,
NUM_FLAGS,
};
typedef std::bitset<NUM_FLAGS> Flags;
// The key is the Flags
@ -199,7 +206,6 @@ public:
Builder& withNormal() { _flags.set(NORMAL); return (*this); }
Builder& withAlpha() { _flags.set(ALPHA); return (*this); }
Builder& withAlphaMask() { _flags.set(ALPHA_MASK); return (*this); }
Builder& withExternal() { _flags.set(EXTERNAL); return (*this); }
};
Usage(const Builder& builder) : Usage(builder._flags) {}
@ -208,8 +214,6 @@ public:
bool isAlpha() const { return _flags[ALPHA]; }
bool isAlphaMask() const { return _flags[ALPHA_MASK]; }
bool isExternal() const { return _flags[EXTERNAL]; }
bool operator==(const Usage& usage) { return (_flags == usage._flags); }
bool operator!=(const Usage& usage) { return (_flags != usage._flags); }
@ -298,9 +302,11 @@ public:
static Texture* create2D(const Element& texelFormat, uint16 width, uint16 height, const Sampler& sampler = Sampler());
static Texture* create3D(const Element& texelFormat, uint16 width, uint16 height, uint16 depth, const Sampler& sampler = Sampler());
static Texture* createCube(const Element& texelFormat, uint16 width, const Sampler& sampler = Sampler());
static Texture* createExternal2D(const ExternalRecycler& recycler, const Sampler& sampler = Sampler());
static Texture* createRenderBuffer(const Element& texelFormat, uint16 width, uint16 height, const Sampler& sampler = Sampler());
static Texture* createStrict(const Element& texelFormat, uint16 width, uint16 height, const Sampler& sampler = Sampler());
static Texture* createExternal(const ExternalRecycler& recycler, const Sampler& sampler = Sampler());
Texture();
Texture(TextureUsageType usageType);
Texture(const Texture& buf); // deep copy of the sysmem texture
Texture& operator=(const Texture& buf); // deep copy of the sysmem texture
~Texture();
@ -325,6 +331,7 @@ public:
// Size and format
Type getType() const { return _type; }
TextureUsageType getUsageType() const { return _usageType; }
bool isColorRenderTarget() const;
bool isDepthStencilRenderTarget() const;
@ -476,6 +483,8 @@ public:
ExternalUpdates getUpdates() const;
protected:
const TextureUsageType _usageType;
// Should only be accessed internally or by the backend sync function
mutable Mutex _externalMutex;
mutable std::list<ExternalIdAndFence> _externalUpdates;
@ -513,7 +522,7 @@ protected:
bool _isIrradianceValid = false;
bool _defined = false;
static Texture* create(Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, const Sampler& sampler);
static Texture* create(TextureUsageType usageType, Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, const Sampler& sampler);
Size resize(Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices);
};

View file

@ -120,7 +120,7 @@ const unsigned char OPAQUE_BLACK[] = { 0x00, 0x00, 0x00, 0xFF };
const gpu::TexturePointer& TextureCache::getWhiteTexture() {
if (!_whiteTexture) {
_whiteTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, 1, 1));
_whiteTexture = gpu::TexturePointer(gpu::Texture::createStrict(gpu::Element::COLOR_RGBA_32, 1, 1));
_whiteTexture->setSource("TextureCache::_whiteTexture");
_whiteTexture->assignStoredMip(0, _whiteTexture->getTexelFormat(), sizeof(OPAQUE_WHITE), OPAQUE_WHITE);
}
@ -129,7 +129,7 @@ const gpu::TexturePointer& TextureCache::getWhiteTexture() {
const gpu::TexturePointer& TextureCache::getGrayTexture() {
if (!_grayTexture) {
_grayTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, 1, 1));
_grayTexture = gpu::TexturePointer(gpu::Texture::createStrict(gpu::Element::COLOR_RGBA_32, 1, 1));
_grayTexture->setSource("TextureCache::_grayTexture");
_grayTexture->assignStoredMip(0, _grayTexture->getTexelFormat(), sizeof(OPAQUE_GRAY), OPAQUE_GRAY);
}
@ -138,7 +138,7 @@ const gpu::TexturePointer& TextureCache::getGrayTexture() {
const gpu::TexturePointer& TextureCache::getBlueTexture() {
if (!_blueTexture) {
_blueTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, 1, 1));
_blueTexture = gpu::TexturePointer(gpu::Texture::createStrict(gpu::Element::COLOR_RGBA_32, 1, 1));
_blueTexture->setSource("TextureCache::_blueTexture");
_blueTexture->assignStoredMip(0, _blueTexture->getTexelFormat(), sizeof(OPAQUE_BLUE), OPAQUE_BLUE);
}
@ -147,7 +147,7 @@ const gpu::TexturePointer& TextureCache::getBlueTexture() {
const gpu::TexturePointer& TextureCache::getBlackTexture() {
if (!_blackTexture) {
_blackTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, 1, 1));
_blackTexture = gpu::TexturePointer(gpu::Texture::createStrict(gpu::Element::COLOR_RGBA_32, 1, 1));
_blackTexture->setSource("TextureCache::_blackTexture");
_blackTexture->assignStoredMip(0, _blackTexture->getTexelFormat(), sizeof(OPAQUE_BLACK), OPAQUE_BLACK);
}
@ -157,7 +157,7 @@ const gpu::TexturePointer& TextureCache::getBlackTexture() {
const gpu::TexturePointer& TextureCache::getNormalFittingTexture() {
if (!_normalFittingTexture) {
_normalFittingTexture = getImageTexture(PathUtils::resourcesPath() + "images/normalFittingScale.dds");
_normalFittingTexture = getImageTexture(PathUtils::resourcesPath() + "images/normalFittingScale.dds", NetworkTexture::STRICT_TEXTURE);
}
return _normalFittingTexture;
}
@ -227,11 +227,16 @@ NetworkTexture::TextureLoaderFunc getTextureLoaderForType(NetworkTexture::Type t
return model::TextureUsage::createMetallicTextureFromImage;
break;
}
case Type::STRICT_TEXTURE: {
return model::TextureUsage::createStrict2DTextureFromImage;
break;
}
case Type::CUSTOM_TEXTURE: {
Q_ASSERT(false);
return NetworkTexture::TextureLoaderFunc();
break;
}
case Type::DEFAULT_TEXTURE:
default: {
return model::TextureUsage::create2DTextureFromImage;

View file

@ -43,6 +43,7 @@ class NetworkTexture : public Resource, public Texture {
public:
enum Type {
DEFAULT_TEXTURE,
STRICT_TEXTURE,
ALBEDO_TEXTURE,
NORMAL_TEXTURE,
BUMP_TEXTURE,

View file

@ -235,7 +235,7 @@ void generateFaceMips(gpu::Texture* texture, QImage& image, gpu::Element formatM
#endif
}
gpu::Texture* TextureUsage::process2DTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips) {
gpu::Texture* TextureUsage::process2DTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips, bool isStrict) {
PROFILE_RANGE(resource_parse, "process2DTextureColorFromImage");
bool validAlpha = false;
bool alphaAsMask = true;
@ -248,7 +248,11 @@ gpu::Texture* TextureUsage::process2DTextureColorFromImage(const QImage& srcImag
gpu::Element formatMip;
defineColorTexelFormats(formatGPU, formatMip, image, isLinear, doCompress);
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
if (isStrict) {
theTexture = (gpu::Texture::createStrict(formatGPU, image.width(), image.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
} else {
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
}
theTexture->setSource(srcImageName);
auto usage = gpu::Texture::Usage::Builder().withColor();
if (validAlpha) {
@ -269,11 +273,14 @@ gpu::Texture* TextureUsage::process2DTextureColorFromImage(const QImage& srcImag
return theTexture;
}
gpu::Texture* TextureUsage::createStrict2DTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
return process2DTextureColorFromImage(srcImage, srcImageName, false, false, true, true);
}
gpu::Texture* TextureUsage::create2DTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
return process2DTextureColorFromImage(srcImage, srcImageName, false, false, true);
}
gpu::Texture* TextureUsage::createAlbedoTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
return process2DTextureColorFromImage(srcImage, srcImageName, false, true, true);
}

View file

@ -32,6 +32,7 @@ public:
int _environmentUsage = 0;
static gpu::Texture* create2DTextureFromImage(const QImage& image, const std::string& srcImageName);
static gpu::Texture* createStrict2DTextureFromImage(const QImage& image, const std::string& srcImageName);
static gpu::Texture* createAlbedoTextureFromImage(const QImage& image, const std::string& srcImageName);
static gpu::Texture* createEmissiveTextureFromImage(const QImage& image, const std::string& srcImageName);
static gpu::Texture* createNormalTextureFromNormalImage(const QImage& image, const std::string& srcImageName);
@ -47,7 +48,7 @@ public:
static const QImage process2DImageColor(const QImage& srcImage, bool& validAlpha, bool& alphaAsMask);
static void defineColorTexelFormats(gpu::Element& formatGPU, gpu::Element& formatMip,
const QImage& srcImage, bool isLinear, bool doCompress);
static gpu::Texture* process2DTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips);
static gpu::Texture* process2DTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips, bool isStrict = false);
static gpu::Texture* processCubeTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips, bool generateIrradiance);
};

View file

@ -52,7 +52,7 @@ const gpu::PipelinePointer& Antialiasing::getAntialiasingPipeline() {
_antialiasingBuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("antialiasing"));
auto format = gpu::Element::COLOR_SRGBA_32; // DependencyManager::get<FramebufferCache>()->getLightingTexture()->getTexelFormat();
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
_antialiasingTexture = gpu::TexturePointer(gpu::Texture::create2D(format, width, height, defaultSampler));
_antialiasingTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(format, width, height, defaultSampler));
_antialiasingBuffer->setRenderBuffer(0, _antialiasingTexture);
}

View file

@ -53,9 +53,9 @@ void DeferredFramebuffer::allocate() {
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
_deferredColorTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width, height, defaultSampler));
_deferredNormalTexture = gpu::TexturePointer(gpu::Texture::create2D(linearFormat, width, height, defaultSampler));
_deferredSpecularTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width, height, defaultSampler));
_deferredColorTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(colorFormat, width, height, defaultSampler));
_deferredNormalTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(linearFormat, width, height, defaultSampler));
_deferredSpecularTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(colorFormat, width, height, defaultSampler));
_deferredFramebuffer->setRenderBuffer(0, _deferredColorTexture);
_deferredFramebuffer->setRenderBuffer(1, _deferredNormalTexture);
@ -65,7 +65,7 @@ void DeferredFramebuffer::allocate() {
auto depthFormat = gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::DEPTH_STENCIL); // Depth24_Stencil8 texel format
if (!_primaryDepthTexture) {
_primaryDepthTexture = gpu::TexturePointer(gpu::Texture::create2D(depthFormat, width, height, defaultSampler));
_primaryDepthTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(depthFormat, width, height, defaultSampler));
}
_deferredFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
@ -75,7 +75,7 @@ void DeferredFramebuffer::allocate() {
auto smoothSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR);
_lightingTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::R11G11B10), width, height, defaultSampler));
_lightingTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::R11G11B10), width, height, defaultSampler));
_lightingFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("lighting"));
_lightingFramebuffer->setRenderBuffer(0, _lightingTexture);
_lightingFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);

View file

@ -496,14 +496,14 @@ void PreparePrimaryFramebuffer::run(const SceneContextPointer& sceneContext, con
auto colorFormat = gpu::Element::COLOR_SRGBA_32;
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
auto primaryColorTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, frameSize.x, frameSize.y, defaultSampler));
auto primaryColorTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(colorFormat, frameSize.x, frameSize.y, defaultSampler));
_primaryFramebuffer->setRenderBuffer(0, primaryColorTexture);
auto depthFormat = gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::DEPTH_STENCIL); // Depth24_Stencil8 texel format
auto primaryDepthTexture = gpu::TexturePointer(gpu::Texture::create2D(depthFormat, frameSize.x, frameSize.y, defaultSampler));
auto primaryDepthTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(depthFormat, frameSize.x, frameSize.y, defaultSampler));
_primaryFramebuffer->setDepthStencilBuffer(primaryDepthTexture, depthFormat);
}

View file

@ -190,7 +190,7 @@ RenderDeferredTask::RenderDeferredTask(RenderFetchCullSortTask::Output items) {
{
// Grab a texture map representing the different status icons and assign that to the drawStatsuJob
auto iconMapPath = PathUtils::resourcesPath() + "icons/statusIconAtlas.svg";
auto statusIconMap = DependencyManager::get<TextureCache>()->getImageTexture(iconMapPath);
auto statusIconMap = DependencyManager::get<TextureCache>()->getImageTexture(iconMapPath, NetworkTexture::STRICT_TEXTURE);
addJob<DrawStatus>("DrawStatus", opaques, DrawStatus(statusIconMap));
}
}

View file

@ -414,7 +414,7 @@ gpu::TexturePointer SubsurfaceScatteringResource::generateScatteringProfile(Rend
const int PROFILE_RESOLUTION = 512;
// const auto pixelFormat = gpu::Element::COLOR_SRGBA_32;
const auto pixelFormat = gpu::Element::COLOR_R11G11B10;
auto profileMap = gpu::TexturePointer(gpu::Texture::create2D(pixelFormat, PROFILE_RESOLUTION, 1, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP)));
auto profileMap = gpu::TexturePointer(gpu::Texture::createRenderBuffer(pixelFormat, PROFILE_RESOLUTION, 1, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP)));
profileMap->setSource("Generated Scattering Profile");
diffuseProfileGPU(profileMap, args);
return profileMap;
@ -425,7 +425,7 @@ gpu::TexturePointer SubsurfaceScatteringResource::generatePreIntegratedScatterin
const int TABLE_RESOLUTION = 512;
// const auto pixelFormat = gpu::Element::COLOR_SRGBA_32;
const auto pixelFormat = gpu::Element::COLOR_R11G11B10;
auto scatteringLUT = gpu::TexturePointer(gpu::Texture::create2D(pixelFormat, TABLE_RESOLUTION, TABLE_RESOLUTION, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP)));
auto scatteringLUT = gpu::TexturePointer(gpu::Texture::createRenderBuffer(pixelFormat, TABLE_RESOLUTION, TABLE_RESOLUTION, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP)));
//diffuseScatter(scatteringLUT);
scatteringLUT->setSource("Generated pre-integrated scattering");
diffuseScatterGPU(profile, scatteringLUT, args);
@ -434,7 +434,7 @@ gpu::TexturePointer SubsurfaceScatteringResource::generatePreIntegratedScatterin
gpu::TexturePointer SubsurfaceScatteringResource::generateScatteringSpecularBeckmann(RenderArgs* args) {
const int SPECULAR_RESOLUTION = 256;
auto beckmannMap = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32 /*gpu::Element(gpu::SCALAR, gpu::HALF, gpu::RGB)*/, SPECULAR_RESOLUTION, SPECULAR_RESOLUTION, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP)));
auto beckmannMap = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32 /*gpu::Element(gpu::SCALAR, gpu::HALF, gpu::RGB)*/, SPECULAR_RESOLUTION, SPECULAR_RESOLUTION, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP)));
beckmannMap->setSource("Generated beckmannMap");
computeSpecularBeckmannGPU(beckmannMap, args);
return beckmannMap;

View file

@ -72,18 +72,18 @@ void LinearDepthFramebuffer::allocate() {
auto height = _frameSize.y;
// For Linear Depth:
_linearDepthTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::RGB), width, height,
_linearDepthTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::RGB), width, height,
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
_linearDepthFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("linearDepth"));
_linearDepthFramebuffer->setRenderBuffer(0, _linearDepthTexture);
_linearDepthFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, _primaryDepthTexture->getTexelFormat());
// For Downsampling:
_halfLinearDepthTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::RGB), _halfFrameSize.x, _halfFrameSize.y,
_halfLinearDepthTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::RGB), _halfFrameSize.x, _halfFrameSize.y,
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
_halfLinearDepthTexture->autoGenerateMips(5);
_halfNormalTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC3, gpu::NUINT8, gpu::RGB), _halfFrameSize.x, _halfFrameSize.y,
_halfNormalTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element(gpu::VEC3, gpu::NUINT8, gpu::RGB), _halfFrameSize.x, _halfFrameSize.y,
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
_downsampleFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("halfLinearDepth"));
@ -304,15 +304,15 @@ void SurfaceGeometryFramebuffer::allocate() {
auto width = _frameSize.x;
auto height = _frameSize.y;
_curvatureTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
_curvatureTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
_curvatureFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("surfaceGeometry::curvature"));
_curvatureFramebuffer->setRenderBuffer(0, _curvatureTexture);
_lowCurvatureTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
_lowCurvatureTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
_lowCurvatureFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("surfaceGeometry::lowCurvature"));
_lowCurvatureFramebuffer->setRenderBuffer(0, _lowCurvatureTexture);
_blurringTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
_blurringTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
_blurringFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("surfaceGeometry::blurring"));
_blurringFramebuffer->setRenderBuffer(0, _blurringTexture);
}

View file

@ -255,7 +255,7 @@ void OculusLegacyDisplayPlugin::hmdPresent() {
memset(eyePoses, 0, sizeof(ovrPosef) * 2);
eyePoses[0].Orientation = eyePoses[1].Orientation = ovrRotation;
GLint texture = getGLBackend()->getTextureID(_compositeFramebuffer->getRenderBuffer(0), false);
GLint texture = getGLBackend()->getTextureID(_compositeFramebuffer->getRenderBuffer(0));
auto sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
glFlush();
if (_hmdWindow->makeCurrent()) {

View file

@ -494,9 +494,9 @@ void OpenVrDisplayPlugin::customizeContext() {
_compositeInfos[0].texture = _compositeFramebuffer->getRenderBuffer(0);
for (size_t i = 0; i < COMPOSITING_BUFFER_SIZE; ++i) {
if (0 != i) {
_compositeInfos[i].texture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, _renderTargetSize.x, _renderTargetSize.y, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT)));
_compositeInfos[i].texture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, _renderTargetSize.x, _renderTargetSize.y, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT)));
}
_compositeInfos[i].textureID = getGLBackend()->getTextureID(_compositeInfos[i].texture, false);
_compositeInfos[i].textureID = getGLBackend()->getTextureID(_compositeInfos[i].texture);
}
_submitThread->_canvas = _submitCanvas;
_submitThread->start(QThread::HighPriority);
@ -624,7 +624,7 @@ void OpenVrDisplayPlugin::compositeLayers() {
glFlush();
if (!newComposite.textureID) {
newComposite.textureID = getGLBackend()->getTextureID(newComposite.texture, false);
newComposite.textureID = getGLBackend()->getTextureID(newComposite.texture);
}
withPresentThreadLock([&] {
_submitThread->update(newComposite);
@ -638,7 +638,7 @@ void OpenVrDisplayPlugin::hmdPresent() {
if (_threadedSubmit) {
_submitThread->waitForPresent();
} else {
GLuint glTexId = getGLBackend()->getTextureID(_compositeFramebuffer->getRenderBuffer(0), false);
GLuint glTexId = getGLBackend()->getTextureID(_compositeFramebuffer->getRenderBuffer(0));
vr::Texture_t vrTexture { (void*)glTexId, vr::API_OpenGL, vr::ColorSpace_Auto };
vr::VRCompositor()->Submit(vr::Eye_Left, &vrTexture, &OPENVR_TEXTURE_BOUNDS_LEFT);
vr::VRCompositor()->Submit(vr::Eye_Right, &vrTexture, &OPENVR_TEXTURE_BOUNDS_RIGHT);

View file

@ -642,7 +642,6 @@ protected:
gpu::Texture::setAllowedGPUMemoryUsage(MB_TO_BYTES(64));
return;
default:
break;
}

View file

@ -48,6 +48,7 @@
#include <gpu/gl/GLTexture.h>
#include <gpu/StandardShaderLib.h>
#include <GenericThread.h>
#include <AddressManager.h>
#include <NodeList.h>
#include <TextureCache.h>