mirror of
https://thingvellir.net/git/overte
synced 2025-03-27 23:52:03 +01:00
Merge branch 'master' of https://github.com/highfidelity/hifi into notification-circumstances
This commit is contained in:
commit
e3546c38fa
16 changed files with 245 additions and 89 deletions
|
@ -137,9 +137,9 @@ public:
|
|||
|
||||
enum Event {
|
||||
Present = DisplayPlugin::Present,
|
||||
Paint = Present + 1,
|
||||
Idle = Paint + 1,
|
||||
Lambda = Paint + 1
|
||||
Paint,
|
||||
Idle,
|
||||
Lambda
|
||||
};
|
||||
|
||||
// FIXME? Empty methods, do we still need them?
|
||||
|
|
|
@ -18,6 +18,8 @@ Q_DECLARE_LOGGING_CATEGORY(gpugllogging)
|
|||
Q_DECLARE_LOGGING_CATEGORY(trace_render_gpu_gl)
|
||||
Q_DECLARE_LOGGING_CATEGORY(trace_render_gpu_gl_detail)
|
||||
|
||||
#define BUFFER_OFFSET(bytes) ((GLubyte*) nullptr + (bytes))
|
||||
|
||||
namespace gpu { namespace gl {
|
||||
|
||||
// Create a fence and inject a GPU wait on the fence
|
||||
|
|
|
@ -11,6 +11,20 @@
|
|||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
bool GLTexelFormat::isCompressed() const {
|
||||
switch (internalFormat) {
|
||||
case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
|
||||
case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
|
||||
case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
|
||||
case GL_COMPRESSED_RED_RGTC1:
|
||||
case GL_COMPRESSED_RG_RGTC2:
|
||||
return true;
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
|
||||
GLenum result = GL_RGBA8;
|
||||
|
|
|
@ -18,6 +18,11 @@ public:
|
|||
GLenum format;
|
||||
GLenum type;
|
||||
|
||||
GLTexelFormat(GLenum glinternalFormat, GLenum glformat, GLenum gltype) : internalFormat(glinternalFormat), format(glformat), type(gltype) {}
|
||||
GLTexelFormat(GLenum glinternalFormat) : internalFormat(glinternalFormat) {}
|
||||
|
||||
bool isCompressed() const;
|
||||
|
||||
static GLTexelFormat evalGLTexelFormat(const Element& dstFormat) {
|
||||
return evalGLTexelFormat(dstFormat, dstFormat);
|
||||
}
|
||||
|
|
|
@ -102,7 +102,8 @@ const std::vector<GLenum>& GLTexture::getFaceTargets(GLenum target) {
|
|||
GLTexture::GLTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLuint id) :
|
||||
GLObject(backend, texture, id),
|
||||
_source(texture.source()),
|
||||
_target(getGLTextureType(texture))
|
||||
_target(getGLTextureType(texture)),
|
||||
_texelFormat(GLTexelFormat::evalGLTexelFormatInternal(texture.getTexelFormat()))
|
||||
{
|
||||
Backend::setGPUObject(texture, this);
|
||||
}
|
||||
|
@ -150,6 +151,7 @@ GLExternalTexture::~GLExternalTexture() {
|
|||
// Variable sized textures
|
||||
using MemoryPressureState = GLVariableAllocationSupport::MemoryPressureState;
|
||||
using WorkQueue = GLVariableAllocationSupport::WorkQueue;
|
||||
using TransferJobPointer = GLVariableAllocationSupport::TransferJobPointer;
|
||||
|
||||
std::list<TextureWeakPointer> GLVariableAllocationSupport::_memoryManagedTextures;
|
||||
MemoryPressureState GLVariableAllocationSupport::_memoryPressureState { MemoryPressureState::Idle };
|
||||
|
@ -159,6 +161,7 @@ WorkQueue GLVariableAllocationSupport::_transferQueue;
|
|||
WorkQueue GLVariableAllocationSupport::_promoteQueue;
|
||||
WorkQueue GLVariableAllocationSupport::_demoteQueue;
|
||||
TexturePointer GLVariableAllocationSupport::_currentTransferTexture;
|
||||
TransferJobPointer GLVariableAllocationSupport::_currentTransferJob;
|
||||
size_t GLVariableAllocationSupport::_frameTexturesCreated { 0 };
|
||||
|
||||
#define OVERSUBSCRIBED_PRESSURE_VALUE 0.95f
|
||||
|
@ -553,9 +556,15 @@ void GLVariableAllocationSupport::executeNextTransfer(const TexturePointer& curr
|
|||
if (!_pendingTransfers.empty()) {
|
||||
// Keeping hold of a strong pointer during the transfer ensures that the transfer thread cannot try to access a destroyed texture
|
||||
_currentTransferTexture = currentTexture;
|
||||
if (_pendingTransfers.front()->tryTransfer()) {
|
||||
// Keeping hold of a strong pointer to the transfer job ensures that if the pending transfer queue is rebuilt, the transfer job
|
||||
// doesn't leave scope, causing a crash in the buffering thread
|
||||
_currentTransferJob = _pendingTransfers.front();
|
||||
// transfer jobs use asynchronous buffering of the texture data because it may involve disk IO, so we execute a try here to determine if the buffering
|
||||
// is complete
|
||||
if (_currentTransferJob->tryTransfer()) {
|
||||
_pendingTransfers.pop();
|
||||
_currentTransferTexture.reset();
|
||||
_currentTransferJob.reset();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -86,7 +86,8 @@ public:
|
|||
void transfer();
|
||||
};
|
||||
|
||||
using TransferQueue = std::queue<std::unique_ptr<TransferJob>>;
|
||||
using TransferJobPointer = std::shared_ptr<TransferJob>;
|
||||
using TransferQueue = std::queue<TransferJobPointer>;
|
||||
static MemoryPressureState _memoryPressureState;
|
||||
|
||||
public:
|
||||
|
@ -100,6 +101,7 @@ protected:
|
|||
static WorkQueue _promoteQueue;
|
||||
static WorkQueue _demoteQueue;
|
||||
static TexturePointer _currentTransferTexture;
|
||||
static TransferJobPointer _currentTransferJob;
|
||||
static const uvec3 INITIAL_MIP_TRANSFER_DIMENSIONS;
|
||||
static const uvec3 MAX_TRANSFER_DIMENSIONS;
|
||||
static const size_t MAX_TRANSFER_SIZE;
|
||||
|
@ -153,6 +155,7 @@ public:
|
|||
const GLuint& _texture { _id };
|
||||
const std::string _source;
|
||||
const GLenum _target;
|
||||
GLTexelFormat _texelFormat;
|
||||
|
||||
static const std::vector<GLenum>& getFaceTargets(GLenum textureType);
|
||||
static uint8_t getFaceCount(GLenum textureType);
|
||||
|
|
|
@ -18,6 +18,8 @@ Q_LOGGING_CATEGORY(gpugl41logging, "hifi.gpu.gl41")
|
|||
using namespace gpu;
|
||||
using namespace gpu::gl41;
|
||||
|
||||
const std::string GL41Backend::GL41_VERSION { "GL41" };
|
||||
|
||||
void GL41Backend::do_draw(const Batch& batch, size_t paramOffset) {
|
||||
Primitive primitiveType = (Primitive)batch._params[paramOffset + 2]._uint;
|
||||
GLenum mode = gl::PRIMITIVE_TO_GL[primitiveType];
|
||||
|
|
|
@ -37,12 +37,16 @@ class GL41Backend : public GLBackend {
|
|||
public:
|
||||
static const GLint TRANSFORM_OBJECT_SLOT { 31 };
|
||||
static const GLint RESOURCE_TRANSFER_TEX_UNIT { 32 };
|
||||
static const GLint RESOURCE_BUFFER_TEXBUF_TEX_UNIT { 33 };
|
||||
static const GLint RESOURCE_BUFFER_SLOT0_TEX_UNIT { 34 };
|
||||
static const GLint RESOURCE_TRANSFER_EXTRA_TEX_UNIT { 33 };
|
||||
static const GLint RESOURCE_BUFFER_TEXBUF_TEX_UNIT { 34 };
|
||||
static const GLint RESOURCE_BUFFER_SLOT0_TEX_UNIT { 35 };
|
||||
|
||||
explicit GL41Backend(bool syncCache) : Parent(syncCache) {}
|
||||
GL41Backend() : Parent() {}
|
||||
|
||||
static const std::string GL41_VERSION;
|
||||
const std::string& getVersion() const override { return GL41_VERSION; }
|
||||
|
||||
class GL41Texture : public GLTexture {
|
||||
using Parent = GLTexture;
|
||||
friend class GL41Backend;
|
||||
|
|
|
@ -240,7 +240,9 @@ GL41StrictResourceTexture::GL41StrictResourceTexture(const std::weak_ptr<GLBacke
|
|||
|
||||
using GL41VariableAllocationTexture = GL41Backend::GL41VariableAllocationTexture;
|
||||
|
||||
GL41VariableAllocationTexture::GL41VariableAllocationTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture) : GL41Texture(backend, texture) {
|
||||
GL41VariableAllocationTexture::GL41VariableAllocationTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture) :
|
||||
GL41Texture(backend, texture)
|
||||
{
|
||||
auto mipLevels = texture.getNumMips();
|
||||
_allocatedMip = mipLevels;
|
||||
_maxAllocatedMip = _populatedMip = mipLevels;
|
||||
|
@ -306,6 +308,129 @@ void GL41VariableAllocationTexture::syncSampler() const {
|
|||
});
|
||||
}
|
||||
|
||||
|
||||
void copyUncompressedTexGPUMem(const gpu::Texture& texture, GLenum texTarget, GLuint srcId, GLuint destId, uint16_t numMips, uint16_t srcMipOffset, uint16_t destMipOffset, uint16_t populatedMips) {
|
||||
// DestID must be bound to the GL41Backend::RESOURCE_TRANSFER_TEX_UNIT
|
||||
|
||||
GLuint fbo { 0 };
|
||||
glGenFramebuffers(1, &fbo);
|
||||
glBindFramebuffer(GL_READ_FRAMEBUFFER, fbo);
|
||||
|
||||
uint16_t mips = numMips;
|
||||
// copy pre-existing mips
|
||||
for (uint16_t mip = populatedMips; mip < mips; ++mip) {
|
||||
auto mipDimensions = texture.evalMipDimensions(mip);
|
||||
uint16_t targetMip = mip - destMipOffset;
|
||||
uint16_t sourceMip = mip - srcMipOffset;
|
||||
for (GLenum target : GLTexture::getFaceTargets(texTarget)) {
|
||||
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, target, srcId, sourceMip);
|
||||
(void)CHECK_GL_ERROR();
|
||||
glCopyTexSubImage2D(target, targetMip, 0, 0, 0, 0, mipDimensions.x, mipDimensions.y);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
|
||||
// destroy the transfer framebuffer
|
||||
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
|
||||
glDeleteFramebuffers(1, &fbo);
|
||||
}
|
||||
|
||||
void copyCompressedTexGPUMem(const gpu::Texture& texture, GLenum texTarget, GLuint srcId, GLuint destId, uint16_t numMips, uint16_t srcMipOffset, uint16_t destMipOffset, uint16_t populatedMips) {
|
||||
// DestID must be bound to the GL41Backend::RESOURCE_TRANSFER_TEX_UNIT
|
||||
|
||||
struct MipDesc {
|
||||
GLint _faceSize;
|
||||
GLint _size;
|
||||
GLint _offset;
|
||||
GLint _width;
|
||||
GLint _height;
|
||||
};
|
||||
std::vector<MipDesc> sourceMips(numMips);
|
||||
|
||||
std::vector<GLubyte> bytes;
|
||||
|
||||
glActiveTexture(GL_TEXTURE0 + GL41Backend::RESOURCE_TRANSFER_EXTRA_TEX_UNIT);
|
||||
glBindTexture(texTarget, srcId);
|
||||
const auto& faceTargets = GLTexture::getFaceTargets(texTarget);
|
||||
GLint internalFormat { 0 };
|
||||
|
||||
// Collect the mip description from the source texture
|
||||
GLint bufferOffset { 0 };
|
||||
for (uint16_t mip = populatedMips; mip < numMips; ++mip) {
|
||||
auto& sourceMip = sourceMips[mip];
|
||||
|
||||
uint16_t sourceLevel = mip - srcMipOffset;
|
||||
|
||||
// Grab internal format once
|
||||
if (internalFormat == 0) {
|
||||
glGetTexLevelParameteriv(faceTargets[0], sourceLevel, GL_TEXTURE_INTERNAL_FORMAT, &internalFormat);
|
||||
}
|
||||
|
||||
// Collect the size of the first face, and then compute the total size offset needed for this mip level
|
||||
auto mipDimensions = texture.evalMipDimensions(mip);
|
||||
sourceMip._width = mipDimensions.x;
|
||||
sourceMip._height = mipDimensions.y;
|
||||
#ifdef DEBUG_COPY
|
||||
glGetTexLevelParameteriv(faceTargets.front(), sourceLevel, GL_TEXTURE_WIDTH, &sourceMip._width);
|
||||
glGetTexLevelParameteriv(faceTargets.front(), sourceLevel, GL_TEXTURE_HEIGHT, &sourceMip._height);
|
||||
#endif
|
||||
glGetTexLevelParameteriv(faceTargets.front(), sourceLevel, GL_TEXTURE_COMPRESSED_IMAGE_SIZE, &sourceMip._faceSize);
|
||||
sourceMip._size = (GLint)faceTargets.size() * sourceMip._faceSize;
|
||||
sourceMip._offset = bufferOffset;
|
||||
bufferOffset += sourceMip._size;
|
||||
gpu::gl::checkGLError();
|
||||
}
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
// Allocate the PBO to accomodate for all the mips to copy
|
||||
GLuint pbo { 0 };
|
||||
glGenBuffers(1, &pbo);
|
||||
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbo);
|
||||
glBufferData(GL_PIXEL_PACK_BUFFER, bufferOffset, nullptr, GL_STATIC_COPY);
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
// Transfer from source texture to pbo
|
||||
for (uint16_t mip = populatedMips; mip < numMips; ++mip) {
|
||||
auto& sourceMip = sourceMips[mip];
|
||||
|
||||
uint16_t sourceLevel = mip - srcMipOffset;
|
||||
|
||||
for (GLint f = 0; f < (GLint)faceTargets.size(); f++) {
|
||||
glGetCompressedTexImage(faceTargets[f], sourceLevel, BUFFER_OFFSET(sourceMip._offset + f * sourceMip._faceSize));
|
||||
}
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
// Now populate the new texture from the pbo
|
||||
glBindTexture(texTarget, 0);
|
||||
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
|
||||
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
|
||||
|
||||
glActiveTexture(GL_TEXTURE0 + GL41Backend::RESOURCE_TRANSFER_TEX_UNIT);
|
||||
|
||||
// Transfer from pbo to new texture
|
||||
for (uint16_t mip = populatedMips; mip < numMips; ++mip) {
|
||||
auto& sourceMip = sourceMips[mip];
|
||||
|
||||
uint16_t destLevel = mip - destMipOffset;
|
||||
|
||||
for (GLint f = 0; f < (GLint)faceTargets.size(); f++) {
|
||||
#ifdef DEBUG_COPY
|
||||
GLint destWidth, destHeight, destSize;
|
||||
glGetTexLevelParameteriv(faceTargets.front(), destLevel, GL_TEXTURE_WIDTH, &destWidth);
|
||||
glGetTexLevelParameteriv(faceTargets.front(), destLevel, GL_TEXTURE_HEIGHT, &destHeight);
|
||||
glGetTexLevelParameteriv(faceTargets.front(), destLevel, GL_TEXTURE_COMPRESSED_IMAGE_SIZE, &destSize);
|
||||
#endif
|
||||
glCompressedTexSubImage2D(faceTargets[f], destLevel, 0, 0, sourceMip._width, sourceMip._height, internalFormat,
|
||||
sourceMip._faceSize, BUFFER_OFFSET(sourceMip._offset + f * sourceMip._faceSize));
|
||||
gpu::gl::checkGLError();
|
||||
}
|
||||
}
|
||||
|
||||
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
|
||||
glDeleteBuffers(1, &pbo);
|
||||
}
|
||||
|
||||
void GL41VariableAllocationTexture::promote() {
|
||||
PROFILE_RANGE(render_gpu_gl, __FUNCTION__);
|
||||
Q_ASSERT(_allocatedMip > 0);
|
||||
|
@ -315,36 +440,22 @@ void GL41VariableAllocationTexture::promote() {
|
|||
|
||||
GLuint oldId = _id;
|
||||
auto oldSize = _size;
|
||||
uint16_t oldAllocatedMip = _allocatedMip;
|
||||
|
||||
// create new texture
|
||||
const_cast<GLuint&>(_id) = allocate(_gpuObject);
|
||||
uint16_t oldAllocatedMip = _allocatedMip;
|
||||
|
||||
// allocate storage for new level
|
||||
allocateStorage(targetAllocatedMip);
|
||||
|
||||
// copy pre-existing mips
|
||||
uint16_t numMips = _gpuObject.getNumMips();
|
||||
withPreservedTexture([&] {
|
||||
GLuint fbo { 0 };
|
||||
glGenFramebuffers(1, &fbo);
|
||||
glBindFramebuffer(GL_READ_FRAMEBUFFER, fbo);
|
||||
|
||||
uint16_t mips = _gpuObject.getNumMips();
|
||||
// copy pre-existing mips
|
||||
for (uint16_t mip = _populatedMip; mip < mips; ++mip) {
|
||||
auto mipDimensions = _gpuObject.evalMipDimensions(mip);
|
||||
uint16_t targetMip = mip - _allocatedMip;
|
||||
uint16_t sourceMip = mip - oldAllocatedMip;
|
||||
for (GLenum target : getFaceTargets(_target)) {
|
||||
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, target, oldId, sourceMip);
|
||||
(void)CHECK_GL_ERROR();
|
||||
glCopyTexSubImage2D(target, targetMip, 0, 0, 0, 0, mipDimensions.x, mipDimensions.y);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
if (_texelFormat.isCompressed()) {
|
||||
copyCompressedTexGPUMem(_gpuObject, _target, oldId, _id, numMips, oldAllocatedMip, _allocatedMip, _populatedMip);
|
||||
} else {
|
||||
copyUncompressedTexGPUMem(_gpuObject, _target, oldId, _id, numMips, oldAllocatedMip, _allocatedMip, _populatedMip);
|
||||
}
|
||||
|
||||
// destroy the transfer framebuffer
|
||||
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
|
||||
glDeleteFramebuffers(1, &fbo);
|
||||
|
||||
syncSampler();
|
||||
});
|
||||
|
||||
|
@ -360,34 +471,21 @@ void GL41VariableAllocationTexture::demote() {
|
|||
Q_ASSERT(_allocatedMip < _maxAllocatedMip);
|
||||
auto oldId = _id;
|
||||
auto oldSize = _size;
|
||||
|
||||
// allocate new texture
|
||||
const_cast<GLuint&>(_id) = allocate(_gpuObject);
|
||||
uint16_t oldAllocatedMip = _allocatedMip;
|
||||
allocateStorage(_allocatedMip + 1);
|
||||
_populatedMip = std::max(_populatedMip, _allocatedMip);
|
||||
|
||||
// copy pre-existing mips
|
||||
uint16_t numMips = _gpuObject.getNumMips();
|
||||
withPreservedTexture([&] {
|
||||
GLuint fbo { 0 };
|
||||
glCreateFramebuffers(1, &fbo);
|
||||
glBindFramebuffer(GL_READ_FRAMEBUFFER, fbo);
|
||||
|
||||
uint16_t mips = _gpuObject.getNumMips();
|
||||
// copy pre-existing mips
|
||||
for (uint16_t mip = _populatedMip; mip < mips; ++mip) {
|
||||
auto mipDimensions = _gpuObject.evalMipDimensions(mip);
|
||||
uint16_t targetMip = mip - _allocatedMip;
|
||||
uint16_t sourceMip = mip - oldAllocatedMip;
|
||||
for (GLenum target : getFaceTargets(_target)) {
|
||||
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, target, oldId, sourceMip);
|
||||
(void)CHECK_GL_ERROR();
|
||||
glCopyTexSubImage2D(target, targetMip, 0, 0, 0, 0, mipDimensions.x, mipDimensions.y);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
if (_texelFormat.isCompressed()) {
|
||||
copyCompressedTexGPUMem(_gpuObject, _target, oldId, _id, numMips, oldAllocatedMip, _allocatedMip, _populatedMip);
|
||||
} else {
|
||||
copyUncompressedTexGPUMem(_gpuObject, _target, oldId, _id, numMips, oldAllocatedMip, _allocatedMip, _populatedMip);
|
||||
}
|
||||
|
||||
// destroy the transfer framebuffer
|
||||
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
|
||||
glDeleteFramebuffers(1, &fbo);
|
||||
|
||||
syncSampler();
|
||||
});
|
||||
|
||||
|
@ -460,4 +558,3 @@ GL41ResourceTexture::GL41ResourceTexture(const std::weak_ptr<GLBackend>& backend
|
|||
GL41ResourceTexture::~GL41ResourceTexture() {
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -18,6 +18,8 @@ Q_LOGGING_CATEGORY(gpugl45logging, "hifi.gpu.gl45")
|
|||
using namespace gpu;
|
||||
using namespace gpu::gl45;
|
||||
|
||||
const std::string GL45Backend::GL45_VERSION { "GL45" };
|
||||
|
||||
void GL45Backend::recycle() const {
|
||||
Parent::recycle();
|
||||
}
|
||||
|
|
|
@ -41,6 +41,9 @@ public:
|
|||
explicit GL45Backend(bool syncCache) : Parent(syncCache) {}
|
||||
GL45Backend() : Parent() {}
|
||||
|
||||
static const std::string GL45_VERSION;
|
||||
const std::string& getVersion() const override { return GL45_VERSION; }
|
||||
|
||||
class GL45Texture : public GLTexture {
|
||||
using Parent = GLTexture;
|
||||
friend class GL45Backend;
|
||||
|
|
|
@ -97,6 +97,24 @@ void GL45ResourceTexture::syncSampler() const {
|
|||
glTextureParameteri(_id, GL_TEXTURE_BASE_LEVEL, _populatedMip - _allocatedMip);
|
||||
}
|
||||
|
||||
|
||||
void copyTexGPUMem(const gpu::Texture& texture, GLenum texTarget, GLuint srcId, GLuint destId, uint16_t numMips, uint16_t srcMipOffset, uint16_t destMipOffset, uint16_t populatedMips) {
|
||||
for (uint16_t mip = populatedMips; mip < numMips; ++mip) {
|
||||
auto mipDimensions = texture.evalMipDimensions(mip);
|
||||
uint16_t targetMip = mip - destMipOffset;
|
||||
uint16_t sourceMip = mip - srcMipOffset;
|
||||
auto faces = GLTexture::getFaceCount(texTarget);
|
||||
for (uint8_t face = 0; face < faces; ++face) {
|
||||
glCopyImageSubData(
|
||||
srcId, texTarget, sourceMip, 0, 0, face,
|
||||
destId, texTarget, targetMip, 0, 0, face,
|
||||
mipDimensions.x, mipDimensions.y, 1
|
||||
);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void GL45ResourceTexture::promote() {
|
||||
PROFILE_RANGE(render_gpu_gl, __FUNCTION__);
|
||||
Q_ASSERT(_allocatedMip > 0);
|
||||
|
@ -106,27 +124,18 @@ void GL45ResourceTexture::promote() {
|
|||
|
||||
GLuint oldId = _id;
|
||||
auto oldSize = _size;
|
||||
uint16_t oldAllocatedMip = _allocatedMip;
|
||||
|
||||
// create new texture
|
||||
const_cast<GLuint&>(_id) = allocate(_gpuObject);
|
||||
uint16_t oldAllocatedMip = _allocatedMip;
|
||||
|
||||
// allocate storage for new level
|
||||
allocateStorage(targetAllocatedMip);
|
||||
uint16_t mips = _gpuObject.getNumMips();
|
||||
|
||||
// copy pre-existing mips
|
||||
for (uint16_t mip = _populatedMip; mip < mips; ++mip) {
|
||||
auto mipDimensions = _gpuObject.evalMipDimensions(mip);
|
||||
uint16_t targetMip = mip - _allocatedMip;
|
||||
uint16_t sourceMip = mip - oldAllocatedMip;
|
||||
auto faces = getFaceCount(_target);
|
||||
for (uint8_t face = 0; face < faces; ++face) {
|
||||
glCopyImageSubData(
|
||||
oldId, _target, sourceMip, 0, 0, face,
|
||||
_id, _target, targetMip, 0, 0, face,
|
||||
mipDimensions.x, mipDimensions.y, 1
|
||||
);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
uint16_t numMips = _gpuObject.getNumMips();
|
||||
copyTexGPUMem(_gpuObject, _target, oldId, _id, numMips, oldAllocatedMip, _allocatedMip, _populatedMip);
|
||||
|
||||
// destroy the old texture
|
||||
glDeleteTextures(1, &oldId);
|
||||
// update the memory usage
|
||||
|
@ -140,25 +149,17 @@ void GL45ResourceTexture::demote() {
|
|||
Q_ASSERT(_allocatedMip < _maxAllocatedMip);
|
||||
auto oldId = _id;
|
||||
auto oldSize = _size;
|
||||
|
||||
// allocate new texture
|
||||
const_cast<GLuint&>(_id) = allocate(_gpuObject);
|
||||
uint16_t oldAllocatedMip = _allocatedMip;
|
||||
allocateStorage(_allocatedMip + 1);
|
||||
_populatedMip = std::max(_populatedMip, _allocatedMip);
|
||||
uint16_t mips = _gpuObject.getNumMips();
|
||||
|
||||
// copy pre-existing mips
|
||||
for (uint16_t mip = _populatedMip; mip < mips; ++mip) {
|
||||
auto mipDimensions = _gpuObject.evalMipDimensions(mip);
|
||||
uint16_t targetMip = mip - _allocatedMip;
|
||||
uint16_t sourceMip = targetMip + 1;
|
||||
auto faces = getFaceCount(_target);
|
||||
for (uint8_t face = 0; face < faces; ++face) {
|
||||
glCopyImageSubData(
|
||||
oldId, _target, sourceMip, 0, 0, face,
|
||||
_id, _target, targetMip, 0, 0, face,
|
||||
mipDimensions.x, mipDimensions.y, 1
|
||||
);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
uint16_t numMips = _gpuObject.getNumMips();
|
||||
copyTexGPUMem(_gpuObject, _target, oldId, _id, numMips, oldAllocatedMip, _allocatedMip, _populatedMip);
|
||||
|
||||
// destroy the old texture
|
||||
glDeleteTextures(1, &oldId);
|
||||
// update the memory usage
|
||||
|
|
|
@ -50,6 +50,10 @@ Context::Context(const Context& context) {
|
|||
Context::~Context() {
|
||||
}
|
||||
|
||||
const std::string& Context::getBackendVersion() const {
|
||||
return _backend->getVersion();
|
||||
}
|
||||
|
||||
void Context::beginFrame(const glm::mat4& renderPose) {
|
||||
assert(!_frameActive);
|
||||
_frameActive = true;
|
||||
|
|
|
@ -54,6 +54,9 @@ class Backend {
|
|||
public:
|
||||
virtual~ Backend() {};
|
||||
|
||||
|
||||
virtual const std::string& getVersion() const = 0;
|
||||
|
||||
void setStereoState(const StereoState& stereo) { _stereo = stereo; }
|
||||
|
||||
virtual void render(const Batch& batch) = 0;
|
||||
|
@ -153,6 +156,8 @@ public:
|
|||
Context();
|
||||
~Context();
|
||||
|
||||
const std::string& getBackendVersion() const;
|
||||
|
||||
void beginFrame(const glm::mat4& renderPose = glm::mat4());
|
||||
void appendFrameBatch(Batch& batch);
|
||||
FramePointer endFrame();
|
||||
|
|
|
@ -216,6 +216,7 @@ void Texture::MemoryStorage::assignMipFaceData(uint16 level, uint8 face, const s
|
|||
TexturePointer Texture::createExternal(const ExternalRecycler& recycler, const Sampler& sampler) {
|
||||
TexturePointer tex = std::make_shared<Texture>(TextureUsageType::EXTERNAL);
|
||||
tex->_type = TEX_2D;
|
||||
tex->_texelFormat = Element::COLOR_RGBA_32;
|
||||
tex->_maxMipLevel = 0;
|
||||
tex->_sampler = sampler;
|
||||
tex->setExternalRecycler(recycler);
|
||||
|
@ -407,8 +408,12 @@ void Texture::setStoredMipFormat(const Element& format) {
|
|||
_storage->setFormat(format);
|
||||
}
|
||||
|
||||
const Element& Texture::getStoredMipFormat() const {
|
||||
return _storage->getFormat();
|
||||
Element Texture::getStoredMipFormat() const {
|
||||
if (_storage) {
|
||||
return _storage->getFormat();
|
||||
} else {
|
||||
return Element();
|
||||
}
|
||||
}
|
||||
|
||||
void Texture::assignStoredMip(uint16 level, Size size, const Byte* bytes) {
|
||||
|
|
|
@ -285,7 +285,7 @@ public:
|
|||
Stamp bumpStamp() { return ++_stamp; }
|
||||
|
||||
void setFormat(const Element& format) { _format = format; }
|
||||
const Element& getFormat() const { return _format; }
|
||||
Element getFormat() const { return _format; }
|
||||
|
||||
private:
|
||||
Stamp _stamp { 0 };
|
||||
|
@ -372,7 +372,7 @@ public:
|
|||
bool isColorRenderTarget() const;
|
||||
bool isDepthStencilRenderTarget() const;
|
||||
|
||||
const Element& getTexelFormat() const { return _texelFormat; }
|
||||
Element getTexelFormat() const { return _texelFormat; }
|
||||
|
||||
Vec3u getDimensions() const { return Vec3u(_width, _height, _depth); }
|
||||
uint16 getWidth() const { return _width; }
|
||||
|
@ -468,7 +468,7 @@ public:
|
|||
|
||||
// Mip storage format is constant across all mips
|
||||
void setStoredMipFormat(const Element& format);
|
||||
const Element& getStoredMipFormat() const;
|
||||
Element getStoredMipFormat() const;
|
||||
|
||||
// Manually allocate the mips down until the specified maxMip
|
||||
// this is just allocating the sysmem version of it
|
||||
|
|
Loading…
Reference in a new issue