mirror of
https://github.com/lubosz/overte.git
synced 2025-04-08 04:42:20 +02:00
Working on draw crash bug, adding render thread trash handling
This commit is contained in:
parent
09ddad0fe0
commit
c2509e9492
36 changed files with 535 additions and 342 deletions
|
@ -438,14 +438,16 @@ void OpenGLDisplayPlugin::updateFrameData() {
|
|||
withPresentThreadLock([&] {
|
||||
gpu::FramePointer oldFrame = _currentFrame;
|
||||
uint32_t skippedCount = 0;
|
||||
if (!_newFrameQueue.empty()) {
|
||||
// We're changing frames, so we can cleanup any GL resources that might have been used by the old frame
|
||||
getGLBackend()->cleanupTrash();
|
||||
}
|
||||
while (!_newFrameQueue.empty()) {
|
||||
_currentFrame = _newFrameQueue.front();
|
||||
_currentFrame->preRender();
|
||||
_newFrameQueue.pop();
|
||||
|
||||
_newFrameQueue = std::queue<gpu::FramePointer>();
|
||||
if (_currentFrame && oldFrame) {
|
||||
skippedCount = (_currentFrame->frameIndex - oldFrame->frameIndex) - 1;
|
||||
skippedCount += (_currentFrame->frameIndex - oldFrame->frameIndex) - 1;
|
||||
}
|
||||
}
|
||||
_droppedFrameRate.increment(skippedCount);
|
||||
|
@ -532,9 +534,9 @@ void OpenGLDisplayPlugin::internalPresent() {
|
|||
|
||||
void OpenGLDisplayPlugin::present() {
|
||||
PROFILE_RANGE_EX(__FUNCTION__, 0xffffff00, (uint64_t)presentCount())
|
||||
incrementPresentCount();
|
||||
|
||||
updateFrameData();
|
||||
|
||||
incrementPresentCount();
|
||||
if (_currentFrame) {
|
||||
_backend->syncCache();
|
||||
_backend->setStereoState(_currentFrame->stereoState);
|
||||
|
|
|
@ -33,7 +33,10 @@ using namespace gpu;
|
|||
using namespace gpu::gl;
|
||||
|
||||
static const QString DEBUG_FLAG("HIFI_ENABLE_OPENGL_45");
|
||||
static bool enableOpenGL45 = QProcessEnvironment::systemEnvironment().contains(DEBUG_FLAG);
|
||||
static bool enableOpenGL45 = true || QProcessEnvironment::systemEnvironment().contains(DEBUG_FLAG);
|
||||
|
||||
static GLBackend* INSTANCE{ nullptr };
|
||||
static const char* GL_BACKEND_PROPERTY_NAME = "com.highfidelity.gl.backend";
|
||||
|
||||
Backend* GLBackend::createBackend() {
|
||||
// FIXME provide a mechanism to override the backend for testing
|
||||
|
@ -49,13 +52,24 @@ Backend* GLBackend::createBackend() {
|
|||
}
|
||||
result->initInput();
|
||||
result->initTransform();
|
||||
|
||||
INSTANCE = result;
|
||||
void* voidInstance = &(*result);
|
||||
qApp->setProperty(GL_BACKEND_PROPERTY_NAME, QVariant::fromValue(voidInstance));
|
||||
|
||||
gl::GLTexture::initTextureTransferHelper();
|
||||
return result;
|
||||
}
|
||||
|
||||
GLBackend& getBackend() {
|
||||
if (!INSTANCE) {
|
||||
INSTANCE = static_cast<GLBackend*>(qApp->property(GL_BACKEND_PROPERTY_NAME).value<void*>());
|
||||
}
|
||||
return *INSTANCE;
|
||||
}
|
||||
|
||||
bool GLBackend::makeProgram(Shader& shader, const Shader::BindingSet& slotBindings) {
|
||||
return GLShader::makeProgram(shader, slotBindings);
|
||||
return GLShader::makeProgram(getBackend(), shader, slotBindings);
|
||||
}
|
||||
|
||||
GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
|
||||
|
@ -304,6 +318,7 @@ void GLBackend::render(Batch& batch) {
|
|||
|
||||
|
||||
void GLBackend::syncCache() {
|
||||
cleanupTrash();
|
||||
syncTransformStateCache();
|
||||
syncPipelineStateCache();
|
||||
syncInputStateCache();
|
||||
|
@ -352,13 +367,15 @@ void GLBackend::resetStages() {
|
|||
|
||||
|
||||
void GLBackend::do_pushProfileRange(Batch& batch, size_t paramOffset) {
|
||||
#if defined(NSIGHT_FOUND)
|
||||
auto name = batch._profileRanges.get(batch._params[paramOffset]._uint);
|
||||
profileRanges.push_back(name);
|
||||
#if defined(NSIGHT_FOUND)
|
||||
nvtxRangePush(name.c_str());
|
||||
#endif
|
||||
}
|
||||
|
||||
void GLBackend::do_popProfileRange(Batch& batch, size_t paramOffset) {
|
||||
profileRanges.pop_back();
|
||||
#if defined(NSIGHT_FOUND)
|
||||
nvtxRangePop();
|
||||
#endif
|
||||
|
@ -545,3 +562,117 @@ void GLBackend::do_glColor4f(Batch& batch, size_t paramOffset) {
|
|||
}
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::releaseBuffer(GLuint id, Size size) const {
|
||||
Lock lock(_trashMutex);
|
||||
_buffersTrash.push_back({ id, size });
|
||||
}
|
||||
|
||||
void GLBackend::releaseTexture(GLuint id, Size size) const {
|
||||
Lock lock(_trashMutex);
|
||||
_texturesTrash.push_back({ id, size });
|
||||
}
|
||||
|
||||
void GLBackend::releaseFramebuffer(GLuint id) const {
|
||||
Lock lock(_trashMutex);
|
||||
_framebuffersTrash.push_back(id);
|
||||
}
|
||||
|
||||
void GLBackend::releaseShader(GLuint id) const {
|
||||
Lock lock(_trashMutex);
|
||||
_shadersTrash.push_back(id);
|
||||
}
|
||||
|
||||
void GLBackend::releaseProgram(GLuint id) const {
|
||||
Lock lock(_trashMutex);
|
||||
_shadersTrash.push_back(id);
|
||||
}
|
||||
|
||||
void GLBackend::releaseQuery(GLuint id) const {
|
||||
Lock lock(_trashMutex);
|
||||
_queriesTrash.push_back(id);
|
||||
}
|
||||
|
||||
void GLBackend::cleanupTrash() const {
|
||||
{
|
||||
std::vector<GLuint> ids;
|
||||
std::list<std::pair<GLuint, Size>> buffersTrash;
|
||||
{
|
||||
Lock lock(_trashMutex);
|
||||
std::swap(_buffersTrash, buffersTrash);
|
||||
}
|
||||
ids.reserve(buffersTrash.size());
|
||||
for (auto pair : buffersTrash) {
|
||||
ids.push_back(pair.first);
|
||||
decrementBufferGPUCount();
|
||||
updateBufferGPUMemoryUsage(pair.second, 0);
|
||||
}
|
||||
glDeleteBuffers((GLsizei)ids.size(), ids.data());
|
||||
}
|
||||
|
||||
{
|
||||
std::vector<GLuint> ids;
|
||||
std::list<GLuint> framebuffersTrash;
|
||||
{
|
||||
Lock lock(_trashMutex);
|
||||
std::swap(_framebuffersTrash, framebuffersTrash);
|
||||
}
|
||||
ids.reserve(framebuffersTrash.size());
|
||||
for (auto id : framebuffersTrash) {
|
||||
ids.push_back(id);
|
||||
}
|
||||
glDeleteFramebuffers((GLsizei)ids.size(), ids.data());
|
||||
}
|
||||
|
||||
{
|
||||
std::vector<GLuint> ids;
|
||||
std::list<std::pair<GLuint, Size>> texturesTrash;
|
||||
{
|
||||
Lock lock(_trashMutex);
|
||||
std::swap(_texturesTrash, texturesTrash);
|
||||
}
|
||||
ids.reserve(texturesTrash.size());
|
||||
for (auto pair : texturesTrash) {
|
||||
ids.push_back(pair.first);
|
||||
decrementTextureGPUCount();
|
||||
updateTextureGPUMemoryUsage(pair.second, 0);
|
||||
}
|
||||
glDeleteTextures((GLsizei)ids.size(), ids.data());
|
||||
}
|
||||
|
||||
{
|
||||
std::list<GLuint> programsTrash;
|
||||
{
|
||||
Lock lock(_trashMutex);
|
||||
std::swap(_programsTrash, programsTrash);
|
||||
}
|
||||
for (auto id : programsTrash) {
|
||||
glDeleteProgram(id);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
std::list<GLuint> shadersTrash;
|
||||
{
|
||||
Lock lock(_trashMutex);
|
||||
std::swap(_shadersTrash, shadersTrash);
|
||||
}
|
||||
for (auto id : shadersTrash) {
|
||||
glDeleteShader(id);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
std::vector<GLuint> ids;
|
||||
std::list<GLuint> queriesTrash;
|
||||
{
|
||||
Lock lock(_trashMutex);
|
||||
std::swap(_queriesTrash, queriesTrash);
|
||||
}
|
||||
ids.reserve(queriesTrash.size());
|
||||
for (auto id : queriesTrash) {
|
||||
ids.push_back(id);
|
||||
}
|
||||
glDeleteQueries((GLsizei)ids.size(), ids.data());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -164,6 +164,14 @@ public:
|
|||
virtual GLuint getFramebufferID(const FramebufferPointer& framebuffer) const = 0;
|
||||
virtual GLuint getTextureID(const TexturePointer& texture, bool needTransfer = true) const = 0;
|
||||
|
||||
virtual void releaseBuffer(GLuint id, Size size) const;
|
||||
virtual void releaseTexture(GLuint id, Size size) const;
|
||||
virtual void releaseFramebuffer(GLuint id) const;
|
||||
virtual void releaseShader(GLuint id) const;
|
||||
virtual void releaseProgram(GLuint id) const;
|
||||
virtual void releaseQuery(GLuint id) const;
|
||||
void cleanupTrash() const;
|
||||
|
||||
protected:
|
||||
|
||||
virtual GLFramebuffer* syncGPUObject(const Framebuffer& framebuffer) const = 0;
|
||||
|
@ -173,14 +181,24 @@ protected:
|
|||
|
||||
virtual GLTexture* syncGPUObject(const TexturePointer& texture, bool sync = true) const = 0;
|
||||
|
||||
virtual GLuint getQueryID(const QueryPointer& query) = 0;
|
||||
virtual GLQuery* syncGPUObject(const Query& query) = 0;
|
||||
virtual GLuint getQueryID(const QueryPointer& query) const = 0;
|
||||
virtual GLQuery* syncGPUObject(const Query& query) const = 0;
|
||||
|
||||
static const size_t INVALID_OFFSET = (size_t)-1;
|
||||
bool _inRenderTransferPass { false };
|
||||
int32_t _uboAlignment { 0 };
|
||||
int _currentDraw { -1 };
|
||||
|
||||
std::list<std::string> profileRanges;
|
||||
mutable Mutex _trashMutex;
|
||||
mutable std::list<std::pair<GLuint, Size>> _buffersTrash;
|
||||
mutable std::list<std::pair<GLuint, Size>> _texturesTrash;
|
||||
mutable std::list<GLuint> _framebuffersTrash;
|
||||
mutable std::list<GLuint> _shadersTrash;
|
||||
mutable std::list<GLuint> _programsTrash;
|
||||
mutable std::list<GLuint> _queriesTrash;
|
||||
|
||||
|
||||
void renderPassTransfer(Batch& batch);
|
||||
void renderPassDraw(Batch& batch);
|
||||
void setupStereoSide(int side);
|
||||
|
|
|
@ -40,7 +40,7 @@ void GLBackend::do_setPipeline(Batch& batch, size_t paramOffset) {
|
|||
_pipeline._state = nullptr;
|
||||
_pipeline._invalidState = true;
|
||||
} else {
|
||||
auto pipelineObject = GLPipeline::sync(*pipeline);
|
||||
auto pipelineObject = GLPipeline::sync(*this, *pipeline);
|
||||
if (!pipelineObject) {
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -7,18 +7,17 @@
|
|||
//
|
||||
|
||||
#include "GLBuffer.h"
|
||||
#include "GLBackend.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
GLBuffer::~GLBuffer() {
|
||||
glDeleteBuffers(1, &_id);
|
||||
Backend::decrementBufferGPUCount();
|
||||
Backend::updateBufferGPUMemoryUsage(_size, 0);
|
||||
_backend.releaseBuffer(_id, _size);
|
||||
}
|
||||
|
||||
GLBuffer::GLBuffer(const Buffer& buffer, GLuint id) :
|
||||
GLObject(buffer, id),
|
||||
GLBuffer::GLBuffer(const GLBackend& backend, const Buffer& buffer, GLuint id) :
|
||||
GLObject(backend, buffer, id),
|
||||
_size((GLuint)buffer._renderSysmem.getSize()),
|
||||
_stamp(buffer._renderSysmem.getStamp())
|
||||
{
|
||||
|
|
|
@ -15,12 +15,20 @@ namespace gpu { namespace gl {
|
|||
class GLBuffer : public GLObject<Buffer> {
|
||||
public:
|
||||
template <typename GLBufferType>
|
||||
static GLBufferType* sync(const Buffer& buffer) {
|
||||
static GLBufferType* sync(const GLBackend& backend, const Buffer& buffer) {
|
||||
if (buffer.getSysmem().getSize() != 0) {
|
||||
if (buffer._getUpdateCount == 0) {
|
||||
qDebug() << "QQQ Unsynced buffer";
|
||||
}
|
||||
if (buffer._getUpdateCount < buffer._applyUpdateCount) {
|
||||
qDebug() << "QQQ Unsynced buffer " << buffer._getUpdateCount << " " << buffer._applyUpdateCount;
|
||||
}
|
||||
}
|
||||
GLBufferType* object = Backend::getGPUObject<GLBufferType>(buffer);
|
||||
|
||||
// Has the storage size changed?
|
||||
if (!object || object->_stamp != buffer._renderSysmem.getStamp()) {
|
||||
object = new GLBufferType(buffer, object);
|
||||
object = new GLBufferType(backend, buffer, object);
|
||||
}
|
||||
|
||||
if (0 != (buffer._renderPages._flags & PageManager::DIRTY)) {
|
||||
|
@ -31,8 +39,8 @@ public:
|
|||
}
|
||||
|
||||
template <typename GLBufferType>
|
||||
static GLuint getId(const Buffer& buffer) {
|
||||
GLBuffer* bo = sync<GLBufferType>(buffer);
|
||||
static GLuint getId(const GLBackend& backend, const Buffer& buffer) {
|
||||
GLBuffer* bo = sync<GLBufferType>(backend, buffer);
|
||||
if (bo) {
|
||||
return bo->_buffer;
|
||||
} else {
|
||||
|
@ -49,7 +57,7 @@ public:
|
|||
virtual void transfer() = 0;
|
||||
|
||||
protected:
|
||||
GLBuffer(const Buffer& buffer, GLuint id);
|
||||
GLBuffer(const GLBackend& backend, const Buffer& buffer, GLuint id);
|
||||
};
|
||||
|
||||
} }
|
||||
|
|
|
@ -7,10 +7,12 @@
|
|||
//
|
||||
|
||||
#include "GLFramebuffer.h"
|
||||
#include "GLBackend.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
GLFramebuffer::~GLFramebuffer() { if (_id) { _backend.releaseFramebuffer(_id); } };
|
||||
|
||||
bool GLFramebuffer::checkStatus(GLenum target) const {
|
||||
bool result = false;
|
||||
|
|
|
@ -15,7 +15,7 @@ namespace gpu { namespace gl {
|
|||
class GLFramebuffer : public GLObject<Framebuffer> {
|
||||
public:
|
||||
template <typename GLFramebufferType>
|
||||
static GLFramebufferType* sync(const Framebuffer& framebuffer) {
|
||||
static GLFramebufferType* sync(const GLBackend& backend, const Framebuffer& framebuffer) {
|
||||
GLFramebufferType* object = Backend::getGPUObject<GLFramebufferType>(framebuffer);
|
||||
|
||||
bool needsUpate { false };
|
||||
|
@ -36,7 +36,7 @@ public:
|
|||
// need to have a gpu object?
|
||||
if (!object) {
|
||||
// All is green, assign the gpuobject to the Framebuffer
|
||||
object = new GLFramebufferType(framebuffer);
|
||||
object = new GLFramebufferType(backend, framebuffer);
|
||||
Backend::setGPUObject(framebuffer, object);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
@ -46,8 +46,8 @@ public:
|
|||
}
|
||||
|
||||
template <typename GLFramebufferType>
|
||||
static GLuint getId(const Framebuffer& framebuffer) {
|
||||
GLFramebufferType* fbo = sync<GLFramebufferType>(framebuffer);
|
||||
static GLuint getId(const GLBackend& backend, const Framebuffer& framebuffer) {
|
||||
GLFramebufferType* fbo = sync<GLFramebufferType>(backend, framebuffer);
|
||||
if (fbo) {
|
||||
return fbo->_id;
|
||||
} else {
|
||||
|
@ -65,8 +65,8 @@ protected:
|
|||
virtual void update() = 0;
|
||||
bool checkStatus(GLenum target) const;
|
||||
|
||||
GLFramebuffer(const Framebuffer& framebuffer, GLuint id) : GLObject(framebuffer, id) {}
|
||||
~GLFramebuffer() { if (_id) { glDeleteFramebuffers(1, &_id); } };
|
||||
GLFramebuffer(const GLBackend& backend, const Framebuffer& framebuffer, GLuint id) : GLObject(backend, framebuffer, id) {}
|
||||
~GLFramebuffer();
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
GLPipeline* GLPipeline::sync(const Pipeline& pipeline) {
|
||||
GLPipeline* GLPipeline::sync(const GLBackend& backend, const Pipeline& pipeline) {
|
||||
GLPipeline* object = Backend::getGPUObject<GLPipeline>(pipeline);
|
||||
|
||||
// If GPU object already created then good
|
||||
|
@ -30,7 +30,7 @@ GLPipeline* GLPipeline::sync(const Pipeline& pipeline) {
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
GLShader* programObject = GLShader::sync(*shader);
|
||||
GLShader* programObject = GLShader::sync(backend, *shader);
|
||||
if (programObject == nullptr) {
|
||||
shader->setCompilationHasFailed(true);
|
||||
return nullptr;
|
||||
|
|
|
@ -14,7 +14,7 @@ namespace gpu { namespace gl {
|
|||
|
||||
class GLPipeline : public GPUObject {
|
||||
public:
|
||||
static GLPipeline* sync(const Pipeline& pipeline);
|
||||
static GLPipeline* sync(const GLBackend& backend, const Pipeline& pipeline);
|
||||
|
||||
GLShader* _program { nullptr };
|
||||
GLState* _state { nullptr };
|
||||
|
|
|
@ -16,13 +16,13 @@ class GLQuery : public GLObject<Query> {
|
|||
using Parent = gpu::gl::GLObject<Query>;
|
||||
public:
|
||||
template <typename GLQueryType>
|
||||
static GLQueryType* sync(const Query& query) {
|
||||
static GLQueryType* sync(const GLBackend& backend, const Query& query) {
|
||||
GLQueryType* object = Backend::getGPUObject<GLQueryType>(query);
|
||||
|
||||
// need to have a gpu object?
|
||||
if (!object) {
|
||||
// All is green, assign the gpuobject to the Query
|
||||
object = new GLQueryType(query);
|
||||
object = new GLQueryType(backend, query);
|
||||
(void)CHECK_GL_ERROR();
|
||||
Backend::setGPUObject(query, object);
|
||||
}
|
||||
|
@ -31,12 +31,12 @@ public:
|
|||
}
|
||||
|
||||
template <typename GLQueryType>
|
||||
static GLuint getId(const QueryPointer& query) {
|
||||
static GLuint getId(const GLBackend& backend, const QueryPointer& query) {
|
||||
if (!query) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
GLQuery* object = sync<GLQueryType>(*query);
|
||||
GLQuery* object = sync<GLQueryType>(backend, *query);
|
||||
if (!object) {
|
||||
return 0;
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ public:
|
|||
GLuint64 _result { (GLuint64)-1 };
|
||||
|
||||
protected:
|
||||
GLQuery(const Query& query, GLuint endId, GLuint beginId) : Parent(query, endId), _beginqo(beginId){}
|
||||
GLQuery(const GLBackend& backend, const Query& query, GLuint endId, GLuint beginId) : Parent(backend, query, endId), _beginqo(beginId) {}
|
||||
~GLQuery() {
|
||||
if (_id) {
|
||||
GLuint ids[2] = { _endqo, _beginqo };
|
||||
|
|
|
@ -11,16 +11,16 @@
|
|||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
GLShader::GLShader() {
|
||||
GLShader::GLShader(const GLBackend& backend) : _backend(backend) {
|
||||
}
|
||||
|
||||
GLShader::~GLShader() {
|
||||
for (auto& so : _shaderObjects) {
|
||||
if (so.glshader != 0) {
|
||||
glDeleteShader(so.glshader);
|
||||
_backend.releaseShader(so.glshader);
|
||||
}
|
||||
if (so.glprogram != 0) {
|
||||
glDeleteProgram(so.glprogram);
|
||||
_backend.releaseProgram(so.glprogram);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ static const std::array<std::string, GLShader::NumVersions> VERSION_DEFINES { {
|
|||
""
|
||||
} };
|
||||
|
||||
GLShader* compileBackendShader(const Shader& shader) {
|
||||
GLShader* compileBackendShader(const GLBackend& backend, const Shader& shader) {
|
||||
// Any GLSLprogram ? normally yes...
|
||||
const std::string& shaderSource = shader.getSource().getCode();
|
||||
GLenum shaderDomain = SHADER_DOMAINS[shader.getType()];
|
||||
|
@ -72,13 +72,13 @@ GLShader* compileBackendShader(const Shader& shader) {
|
|||
}
|
||||
|
||||
// So far so good, the shader is created successfully
|
||||
GLShader* object = new GLShader();
|
||||
GLShader* object = new GLShader(backend);
|
||||
object->_shaderObjects = shaderObjects;
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
GLShader* compileBackendProgram(const Shader& program) {
|
||||
GLShader* compileBackendProgram(const GLBackend& backend, const Shader& program) {
|
||||
if (!program.isProgram()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ GLShader* compileBackendProgram(const Shader& program) {
|
|||
// Let's go through every shaders and make sure they are ready to go
|
||||
std::vector< GLuint > shaderGLObjects;
|
||||
for (auto subShader : program.getShaders()) {
|
||||
auto object = GLShader::sync(*subShader);
|
||||
auto object = GLShader::sync(backend, *subShader);
|
||||
if (object) {
|
||||
shaderGLObjects.push_back(object->_shaderObjects[version].glshader);
|
||||
} else {
|
||||
|
@ -111,13 +111,13 @@ GLShader* compileBackendProgram(const Shader& program) {
|
|||
}
|
||||
|
||||
// So far so good, the program versions have all been created successfully
|
||||
GLShader* object = new GLShader();
|
||||
GLShader* object = new GLShader(backend);
|
||||
object->_shaderObjects = programObjects;
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
GLShader* GLShader::sync(const Shader& shader) {
|
||||
GLShader* GLShader::sync(const GLBackend& backend, const Shader& shader) {
|
||||
GLShader* object = Backend::getGPUObject<GLShader>(shader);
|
||||
|
||||
// If GPU object already created then good
|
||||
|
@ -126,26 +126,27 @@ GLShader* GLShader::sync(const Shader& shader) {
|
|||
}
|
||||
// need to have a gpu object?
|
||||
if (shader.isProgram()) {
|
||||
GLShader* tempObject = compileBackendProgram(shader);
|
||||
GLShader* tempObject = compileBackendProgram(backend, shader);
|
||||
if (tempObject) {
|
||||
object = tempObject;
|
||||
Backend::setGPUObject(shader, object);
|
||||
}
|
||||
} else if (shader.isDomain()) {
|
||||
GLShader* tempObject = compileBackendShader(shader);
|
||||
GLShader* tempObject = compileBackendShader(backend, shader);
|
||||
if (tempObject) {
|
||||
object = tempObject;
|
||||
Backend::setGPUObject(shader, object);
|
||||
}
|
||||
}
|
||||
|
||||
glFinish();
|
||||
return object;
|
||||
}
|
||||
|
||||
bool GLShader::makeProgram(Shader& shader, const Shader::BindingSet& slotBindings) {
|
||||
bool GLShader::makeProgram(const GLBackend& backend, Shader& shader, const Shader::BindingSet& slotBindings) {
|
||||
|
||||
// First make sure the Shader has been compiled
|
||||
GLShader* object = sync(shader);
|
||||
GLShader* object = sync(backend, shader);
|
||||
if (!object) {
|
||||
return false;
|
||||
}
|
||||
|
@ -181,7 +182,6 @@ bool GLShader::makeProgram(Shader& shader, const Shader::BindingSet& slotBinding
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@ namespace gpu { namespace gl {
|
|||
|
||||
class GLShader : public GPUObject {
|
||||
public:
|
||||
static GLShader* sync(const Shader& shader);
|
||||
static bool makeProgram(Shader& shader, const Shader::BindingSet& slotBindings);
|
||||
static GLShader* sync(const GLBackend& backend, const Shader& shader);
|
||||
static bool makeProgram(const GLBackend& backend, Shader& shader, const Shader::BindingSet& slotBindings);
|
||||
|
||||
enum Version {
|
||||
Mono = 0,
|
||||
|
@ -28,7 +28,7 @@ public:
|
|||
using UniformMapping = std::map<GLint, GLint>;
|
||||
using UniformMappingVersions = std::vector<UniformMapping>;
|
||||
|
||||
GLShader();
|
||||
GLShader(const GLBackend& backend);
|
||||
~GLShader();
|
||||
|
||||
ShaderObjects _shaderObjects;
|
||||
|
@ -44,6 +44,7 @@ public:
|
|||
return srcLoc;
|
||||
}
|
||||
|
||||
const GLBackend& _backend;
|
||||
};
|
||||
|
||||
} }
|
||||
|
|
|
@ -121,15 +121,19 @@ static const GLenum ELEMENT_TYPE_TO_GL[gpu::NUM_TYPES] = {
|
|||
bool checkGLError(const char* name = nullptr);
|
||||
bool checkGLErrorDebug(const char* name = nullptr);
|
||||
|
||||
class GLBackend;
|
||||
|
||||
template <typename GPUType>
|
||||
struct GLObject : public GPUObject {
|
||||
public:
|
||||
GLObject(const GPUType& gpuObject, GLuint id) : _gpuObject(gpuObject), _id(id) {}
|
||||
GLObject(const GLBackend& backend, const GPUType& gpuObject, GLuint id) : _gpuObject(gpuObject), _id(id), _backend(backend) {}
|
||||
|
||||
virtual ~GLObject() { }
|
||||
|
||||
const GPUType& _gpuObject;
|
||||
const GLuint _id;
|
||||
protected:
|
||||
const GLBackend& _backend;
|
||||
};
|
||||
|
||||
class GlBuffer;
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <NumericalConstants.h>
|
||||
|
||||
#include "GLTextureTransfer.h"
|
||||
#include "GLBackend.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
@ -117,7 +118,9 @@ float GLTexture::getMemoryPressure() {
|
|||
return (float)consumedGpuMemory / (float)availableTextureMemory;
|
||||
}
|
||||
|
||||
GLTexture::DownsampleSource::DownsampleSource(GLTexture* oldTexture) :
|
||||
GLTexture::DownsampleSource::DownsampleSource(const GLBackend& backend, GLTexture* oldTexture) :
|
||||
_backend(backend),
|
||||
_size(oldTexture ? oldTexture->_size : 0),
|
||||
_texture(oldTexture ? oldTexture->takeOwnership() : 0),
|
||||
_minMip(oldTexture ? oldTexture->_minMip : 0),
|
||||
_maxMip(oldTexture ? oldTexture->_maxMip : 0)
|
||||
|
@ -126,20 +129,19 @@ GLTexture::DownsampleSource::DownsampleSource(GLTexture* oldTexture) :
|
|||
|
||||
GLTexture::DownsampleSource::~DownsampleSource() {
|
||||
if (_texture) {
|
||||
glDeleteTextures(1, &_texture);
|
||||
Backend::decrementTextureGPUCount();
|
||||
_backend.releaseTexture(_texture, _size);
|
||||
}
|
||||
}
|
||||
|
||||
GLTexture::GLTexture(const gpu::Texture& texture, GLuint id, GLTexture* originalTexture, bool transferrable) :
|
||||
GLObject(texture, id),
|
||||
GLTexture::GLTexture(const GLBackend& backend, const gpu::Texture& texture, GLuint id, GLTexture* originalTexture, bool transferrable) :
|
||||
GLObject(backend, texture, id),
|
||||
_storageStamp(texture.getStamp()),
|
||||
_target(getGLTextureType(texture)),
|
||||
_maxMip(texture.maxMip()),
|
||||
_minMip(texture.minMip()),
|
||||
_virtualSize(texture.evalTotalSize()),
|
||||
_transferrable(transferrable),
|
||||
_downsampleSource(originalTexture)
|
||||
_downsampleSource(backend, originalTexture)
|
||||
{
|
||||
if (_transferrable) {
|
||||
uint16 mipCount = usedMipLevels();
|
||||
|
@ -156,8 +158,8 @@ GLTexture::GLTexture(const gpu::Texture& texture, GLuint id, GLTexture* original
|
|||
|
||||
|
||||
// Create the texture and allocate storage
|
||||
GLTexture::GLTexture(const Texture& texture, GLuint id, bool transferrable) :
|
||||
GLTexture(texture, id, nullptr, transferrable)
|
||||
GLTexture::GLTexture(const GLBackend& backend, const Texture& texture, GLuint id, bool transferrable) :
|
||||
GLTexture(backend, texture, id, nullptr, transferrable)
|
||||
{
|
||||
// FIXME, do during allocation
|
||||
//Backend::updateTextureGPUMemoryUsage(0, _size);
|
||||
|
@ -165,8 +167,8 @@ GLTexture::GLTexture(const Texture& texture, GLuint id, bool transferrable) :
|
|||
}
|
||||
|
||||
// Create the texture and copy from the original higher resolution version
|
||||
GLTexture::GLTexture(const gpu::Texture& texture, GLuint id, GLTexture* originalTexture) :
|
||||
GLTexture(texture, id, originalTexture, originalTexture->_transferrable)
|
||||
GLTexture::GLTexture(const GLBackend& backend, const gpu::Texture& texture, GLuint id, GLTexture* originalTexture) :
|
||||
GLTexture(backend, texture, id, originalTexture, originalTexture->_transferrable)
|
||||
{
|
||||
Q_ASSERT(_minMip >= originalTexture->_minMip);
|
||||
// Set the GPU object last because that implicitly destroys the originalTexture object
|
||||
|
@ -187,12 +189,7 @@ GLTexture::~GLTexture() {
|
|||
}
|
||||
}
|
||||
|
||||
if (_id) {
|
||||
glDeleteTextures(1, &_id);
|
||||
const_cast<GLuint&>(_id) = 0;
|
||||
Backend::decrementTextureGPUCount();
|
||||
}
|
||||
Backend::updateTextureGPUMemoryUsage(_size, 0);
|
||||
_backend.releaseTexture(_id, _size);
|
||||
Backend::updateTextureGPUVirtualMemoryUsage(_virtualSize, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ public:
|
|||
static std::shared_ptr<GLTextureTransferHelper> _textureTransferHelper;
|
||||
|
||||
template <typename GLTextureType>
|
||||
static GLTextureType* sync(const TexturePointer& texturePointer, bool needTransfer) {
|
||||
static GLTextureType* sync(const GLBackend& backend, const TexturePointer& texturePointer, bool needTransfer) {
|
||||
const Texture& texture = *texturePointer;
|
||||
if (!texture.isDefined()) {
|
||||
// NO texture definition yet so let's avoid thinking
|
||||
|
@ -38,7 +38,7 @@ public:
|
|||
// for easier use of immutable storage)
|
||||
if (!object || object->isInvalid()) {
|
||||
// This automatically any previous texture
|
||||
object = new GLTextureType(texture, needTransfer);
|
||||
object = new GLTextureType(backend, texture, needTransfer);
|
||||
if (!object->_transferrable) {
|
||||
object->createTexture();
|
||||
object->_contentStamp = texture.getDataStamp();
|
||||
|
@ -62,7 +62,7 @@ public:
|
|||
if (object->isOverMaxMemory() && texturePointer->incremementMinMip()) {
|
||||
// WARNING, this code path will essentially `delete this`,
|
||||
// so no dereferencing of this instance should be done past this point
|
||||
object = new GLTextureType(texture, object);
|
||||
object = new GLTextureType(backend, texture, object);
|
||||
_textureTransferHelper->transferTexture(texturePointer);
|
||||
}
|
||||
} else if (object->isOutdated()) {
|
||||
|
@ -75,13 +75,13 @@ public:
|
|||
}
|
||||
|
||||
template <typename GLTextureType>
|
||||
static GLuint getId(const TexturePointer& texture, bool shouldSync) {
|
||||
static GLuint getId(const GLBackend& backend, const TexturePointer& texture, bool shouldSync) {
|
||||
if (!texture) {
|
||||
return 0;
|
||||
}
|
||||
GLTextureType* object { nullptr };
|
||||
if (shouldSync) {
|
||||
object = sync<GLTextureType>(texture, shouldSync);
|
||||
object = sync<GLTextureType>(backend, texture, shouldSync);
|
||||
} else {
|
||||
object = Backend::getGPUObject<GLTextureType>(*texture);
|
||||
}
|
||||
|
@ -125,10 +125,12 @@ public:
|
|||
|
||||
struct DownsampleSource {
|
||||
using Pointer = std::shared_ptr<DownsampleSource>;
|
||||
DownsampleSource() : _texture(0), _minMip(0), _maxMip(0) {}
|
||||
DownsampleSource(GLTexture* originalTexture);
|
||||
DownsampleSource(const GLBackend& backend) : _backend(backend), _size(0), _texture(0), _minMip(0), _maxMip(0) {}
|
||||
DownsampleSource(const GLBackend& backend, GLTexture* originalTexture);
|
||||
~DownsampleSource();
|
||||
void reset() const { const_cast<GLuint&>(_texture) = 0; }
|
||||
const GLBackend& _backend;
|
||||
const GLuint _size { 0 };
|
||||
const GLuint _texture { 0 };
|
||||
const uint16 _minMip { 0 };
|
||||
const uint16 _maxMip { 0 };
|
||||
|
@ -170,8 +172,8 @@ protected:
|
|||
const GLuint _size { 0 }; // true size as reported by the gl api
|
||||
std::atomic<GLSyncState> _syncState { GLSyncState::Idle };
|
||||
|
||||
GLTexture(const Texture& texture, GLuint id, bool transferrable);
|
||||
GLTexture(const Texture& texture, GLuint id, GLTexture* originalTexture);
|
||||
GLTexture(const GLBackend& backend, const Texture& texture, GLuint id, bool transferrable);
|
||||
GLTexture(const GLBackend& backend, const Texture& texture, GLuint id, GLTexture* originalTexture);
|
||||
|
||||
void setSyncState(GLSyncState syncState) { _syncState = syncState; }
|
||||
uint16 usedMipLevels() const { return (_maxMip - _minMip) + 1; }
|
||||
|
@ -190,7 +192,7 @@ protected:
|
|||
|
||||
private:
|
||||
|
||||
GLTexture(const gpu::Texture& gpuTexture, GLuint id, GLTexture* originalTexture, bool transferrable);
|
||||
GLTexture(const GLBackend& backend, const gpu::Texture& gpuTexture, GLuint id, GLTexture* originalTexture, bool transferrable);
|
||||
|
||||
friend class GLTextureTransferHelper;
|
||||
friend class GLBackend;
|
||||
|
|
|
@ -40,8 +40,8 @@ public:
|
|||
using Parent = gpu::gl::GLTexture;
|
||||
GLuint allocate();
|
||||
public:
|
||||
GL41Texture(const Texture& buffer, bool transferrable);
|
||||
GL41Texture(const Texture& buffer, GL41Texture* original);
|
||||
GL41Texture(const gl::GLBackend& backend, const Texture& buffer, bool transferrable);
|
||||
GL41Texture(const gl::GLBackend& backend, const Texture& buffer, GL41Texture* original);
|
||||
|
||||
protected:
|
||||
void transferMip(uint16_t mipLevel, uint8_t face = 0) const;
|
||||
|
@ -64,8 +64,8 @@ protected:
|
|||
GLuint getTextureID(const TexturePointer& texture, bool needTransfer = true) const override;
|
||||
gl::GLTexture* syncGPUObject(const TexturePointer& texture, bool sync = true) const override;
|
||||
|
||||
GLuint getQueryID(const QueryPointer& query) override;
|
||||
gl::GLQuery* syncGPUObject(const Query& query) override;
|
||||
GLuint getQueryID(const QueryPointer& query) const override;
|
||||
gl::GLQuery* syncGPUObject(const Query& query) const override;
|
||||
|
||||
// Draw Stage
|
||||
void do_draw(Batch& batch, size_t paramOffset) override;
|
||||
|
|
|
@ -20,7 +20,7 @@ class GL41Buffer : public gl::GLBuffer {
|
|||
}
|
||||
|
||||
public:
|
||||
GL41Buffer(const Buffer& buffer, GL41Buffer* original) : Parent(buffer, allocate()) {
|
||||
GL41Buffer(const gl::GLBackend& backend, const Buffer& buffer, GL41Buffer* original) : Parent(backend, buffer, allocate()) {
|
||||
glBindBuffer(GL_ARRAY_BUFFER, _buffer);
|
||||
glBufferData(GL_ARRAY_BUFFER, _size, nullptr, GL_DYNAMIC_DRAW);
|
||||
glBindBuffer(GL_ARRAY_BUFFER, 0);
|
||||
|
@ -54,9 +54,9 @@ public:
|
|||
};
|
||||
|
||||
GLuint GL41Backend::getBufferID(const Buffer& buffer) const {
|
||||
return GL41Buffer::getId<GL41Buffer>(buffer);
|
||||
return GL41Buffer::getId<GL41Buffer>(*this, buffer);
|
||||
}
|
||||
|
||||
gl::GLBuffer* GL41Backend::syncGPUObject(const Buffer& buffer) const {
|
||||
return GL41Buffer::sync<GL41Buffer>(buffer);
|
||||
return GL41Buffer::sync<GL41Buffer>(*this, buffer);
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ public:
|
|||
for (auto& b : _gpuObject.getRenderBuffers()) {
|
||||
surface = b._texture;
|
||||
if (surface) {
|
||||
gltexture = gl::GLTexture::sync<GL41Backend::GL41Texture>(surface, false); // Grab the gltexture and don't transfer
|
||||
gltexture = gl::GLTexture::sync<GL41Backend::GL41Texture>(_backend, surface, false); // Grab the gltexture and don't transfer
|
||||
} else {
|
||||
gltexture = nullptr;
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ public:
|
|||
if (_gpuObject.getDepthStamp() != _depthStamp) {
|
||||
auto surface = _gpuObject.getDepthStencilBuffer();
|
||||
if (_gpuObject.hasDepthStencil() && surface) {
|
||||
gltexture = gl::GLTexture::sync<GL41Backend::GL41Texture>(surface, false); // Grab the gltexture and don't transfer
|
||||
gltexture = gl::GLTexture::sync<GL41Backend::GL41Texture>(_backend, surface, false); // Grab the gltexture and don't transfer
|
||||
}
|
||||
|
||||
if (gltexture) {
|
||||
|
@ -115,16 +115,16 @@ public:
|
|||
|
||||
|
||||
public:
|
||||
GL41Framebuffer(const gpu::Framebuffer& framebuffer)
|
||||
: Parent(framebuffer, allocate()) { }
|
||||
GL41Framebuffer(const gl::GLBackend& backend, const gpu::Framebuffer& framebuffer)
|
||||
: Parent(backend, framebuffer, allocate()) { }
|
||||
};
|
||||
|
||||
gl::GLFramebuffer* GL41Backend::syncGPUObject(const Framebuffer& framebuffer) const {
|
||||
return GL41Framebuffer::sync<GL41Framebuffer>(framebuffer);
|
||||
return GL41Framebuffer::sync<GL41Framebuffer>(*this, framebuffer);
|
||||
}
|
||||
|
||||
GLuint GL41Backend::getFramebufferID(const FramebufferPointer& framebuffer) const {
|
||||
return framebuffer ? GL41Framebuffer::getId<GL41Framebuffer>(*framebuffer) : 0;
|
||||
return framebuffer ? GL41Framebuffer::getId<GL41Framebuffer>(*this, *framebuffer) : 0;
|
||||
}
|
||||
|
||||
void GL41Backend::do_blit(Batch& batch, size_t paramOffset) {
|
||||
|
|
|
@ -24,14 +24,14 @@ public:
|
|||
return result;
|
||||
}
|
||||
|
||||
GL41Query(const Query& query)
|
||||
: Parent(query, allocateQuery(), allocateQuery()) { }
|
||||
GL41Query(const gl::GLBackend& backend, const Query& query)
|
||||
: Parent(backend, query, allocateQuery(), allocateQuery()) { }
|
||||
};
|
||||
|
||||
gl::GLQuery* GL41Backend::syncGPUObject(const Query& query) {
|
||||
return GL41Query::sync<GL41Query>(query);
|
||||
gl::GLQuery* GL41Backend::syncGPUObject(const Query& query) const {
|
||||
return GL41Query::sync<GL41Query>(*this, query);
|
||||
}
|
||||
|
||||
GLuint GL41Backend::getQueryID(const QueryPointer& query) {
|
||||
return GL41Query::getId<GL41Query>(query);
|
||||
GLuint GL41Backend::getQueryID(const QueryPointer& query) const {
|
||||
return GL41Query::getId<GL41Query>(*this, query);
|
||||
}
|
||||
|
|
|
@ -30,16 +30,16 @@ GLuint GL41Texture::allocate() {
|
|||
}
|
||||
|
||||
GLuint GL41Backend::getTextureID(const TexturePointer& texture, bool transfer) const {
|
||||
return GL41Texture::getId<GL41Texture>(texture, transfer);
|
||||
return GL41Texture::getId<GL41Texture>(*this, texture, transfer);
|
||||
}
|
||||
|
||||
gl::GLTexture* GL41Backend::syncGPUObject(const TexturePointer& texture, bool transfer) const {
|
||||
return GL41Texture::sync<GL41Texture>(texture, transfer);
|
||||
return GL41Texture::sync<GL41Texture>(*this, texture, transfer);
|
||||
}
|
||||
|
||||
GL41Texture::GL41Texture(const Texture& texture, bool transferrable) : gl::GLTexture(texture, allocate(), transferrable) {}
|
||||
GL41Texture::GL41Texture(const gl::GLBackend& backend, const Texture& texture, bool transferrable) : gl::GLTexture(backend, texture, allocate(), transferrable) {}
|
||||
|
||||
GL41Texture::GL41Texture(const Texture& texture, GL41Texture* original) : gl::GLTexture(texture, allocate(), original) {}
|
||||
GL41Texture::GL41Texture(const gl::GLBackend& backend, const Texture& texture, GL41Texture* original) : gl::GLTexture(backend, texture, allocate(), original) {}
|
||||
|
||||
void GL41Backend::GL41Texture::withPreservedTexture(std::function<void()> f) const {
|
||||
GLint boundTex = -1;
|
||||
|
|
|
@ -33,19 +33,18 @@ void GL41Backend::transferTransformState(const Batch& batch) const {
|
|||
memcpy(bufferData.data() + (_transform._cameraUboSize * i), &_transform._cameras[i], sizeof(TransformStageState::CameraBufferElement));
|
||||
}
|
||||
glBindBuffer(GL_UNIFORM_BUFFER, _transform._cameraBuffer);
|
||||
glBufferData(GL_UNIFORM_BUFFER, bufferData.size(), bufferData.data(), GL_STREAM_DRAW);
|
||||
glBufferData(GL_UNIFORM_BUFFER, bufferData.size(), bufferData.data(), GL_DYNAMIC_DRAW);
|
||||
glBindBuffer(GL_UNIFORM_BUFFER, 0);
|
||||
}
|
||||
|
||||
if (batch._objectsBuffer) {
|
||||
const auto& sysmem = batch._objectsBuffer->_renderSysmem;
|
||||
if (!batch._objects.empty()) {
|
||||
#ifdef GPU_SSBO_DRAW_CALL_INFO
|
||||
glBindBuffer(GL_SHADER_STORAGE_BUFFER, _transform._objectBuffer);
|
||||
glBufferData(GL_SHADER_STORAGE_BUFFER, sysmem.getSize(), sysmem.readData(), GL_STREAM_DRAW);
|
||||
glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0);
|
||||
#else
|
||||
glBindBuffer(GL_TEXTURE_BUFFER, _transform._objectBuffer);
|
||||
glBufferData(GL_TEXTURE_BUFFER, sysmem.getSize(), sysmem.readData(), GL_STREAM_DRAW);
|
||||
glBufferData(GL_TEXTURE_BUFFER, batch._objects.size() * sizeof(Batch::TransformObject), batch._objects.data(), GL_DYNAMIC_DRAW);
|
||||
glBindBuffer(GL_TEXTURE_BUFFER, 0);
|
||||
#endif
|
||||
}
|
||||
|
@ -61,7 +60,7 @@ void GL41Backend::transferTransformState(const Batch& batch) const {
|
|||
}
|
||||
|
||||
glBindBuffer(GL_ARRAY_BUFFER, _transform._drawCallInfoBuffer);
|
||||
glBufferData(GL_ARRAY_BUFFER, bufferData.size(), bufferData.data(), GL_STREAM_DRAW);
|
||||
glBufferData(GL_ARRAY_BUFFER, bufferData.size(), bufferData.data(), GL_DYNAMIC_DRAW);
|
||||
glBindBuffer(GL_ARRAY_BUFFER, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -29,8 +29,8 @@ public:
|
|||
using Parent = gpu::gl::GLTexture;
|
||||
GLuint allocate(const Texture& texture);
|
||||
public:
|
||||
GL45Texture(const Texture& texture, bool transferrable);
|
||||
GL45Texture(const Texture& texture, GLTexture* original);
|
||||
GL45Texture(const gl::GLBackend& backend, const Texture& texture, bool transferrable);
|
||||
GL45Texture(const gl::GLBackend& backend, const Texture& texture, GLTexture* original);
|
||||
|
||||
protected:
|
||||
void transferMip(uint16_t mipLevel, uint8_t face = 0) const;
|
||||
|
@ -53,8 +53,8 @@ protected:
|
|||
GLuint getTextureID(const TexturePointer& texture, bool needTransfer = true) const override;
|
||||
gl::GLTexture* syncGPUObject(const TexturePointer& texture, bool sync = true) const override;
|
||||
|
||||
GLuint getQueryID(const QueryPointer& query) override;
|
||||
gl::GLQuery* syncGPUObject(const Query& query) override;
|
||||
GLuint getQueryID(const QueryPointer& query) const override;
|
||||
gl::GLQuery* syncGPUObject(const Query& query) const override;
|
||||
|
||||
// Draw Stage
|
||||
void do_draw(Batch& batch, size_t paramOffset) override;
|
||||
|
|
|
@ -20,7 +20,7 @@ class GL45Buffer : public gl::GLBuffer {
|
|||
}
|
||||
|
||||
public:
|
||||
GL45Buffer(const Buffer& buffer, GLBuffer* original) : Parent(buffer, allocate()) {
|
||||
GL45Buffer(const gl::GLBackend& backend, const Buffer& buffer, GLBuffer* original) : Parent(backend, buffer, allocate()) {
|
||||
glNamedBufferStorage(_buffer, _size, nullptr, GL_DYNAMIC_STORAGE_BIT);
|
||||
if (original && original->_size) {
|
||||
glCopyNamedBufferSubData(original->_buffer, _buffer, 0, 0, std::min(original->_size, _size));
|
||||
|
@ -42,9 +42,9 @@ public:
|
|||
};
|
||||
|
||||
GLuint GL45Backend::getBufferID(const Buffer& buffer) const {
|
||||
return GL45Buffer::getId<GL45Buffer>(buffer);
|
||||
return GL45Buffer::getId<GL45Buffer>(*this, buffer);
|
||||
}
|
||||
|
||||
gl::GLBuffer* GL45Backend::syncGPUObject(const Buffer& buffer) const {
|
||||
return GL45Buffer::sync<GL45Buffer>(buffer);
|
||||
return GL45Buffer::sync<GL45Buffer>(*this, buffer);
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ public:
|
|||
for (auto& b : _gpuObject.getRenderBuffers()) {
|
||||
surface = b._texture;
|
||||
if (surface) {
|
||||
gltexture = gl::GLTexture::sync<GL45Backend::GL45Texture>(surface, false); // Grab the gltexture and don't transfer
|
||||
gltexture = gl::GLTexture::sync<GL45Backend::GL45Texture>(_backend, surface, false); // Grab the gltexture and don't transfer
|
||||
} else {
|
||||
gltexture = nullptr;
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ public:
|
|||
if (_gpuObject.getDepthStamp() != _depthStamp) {
|
||||
auto surface = _gpuObject.getDepthStencilBuffer();
|
||||
if (_gpuObject.hasDepthStencil() && surface) {
|
||||
gltexture = gl::GLTexture::sync<GL45Backend::GL45Texture>(surface, false); // Grab the gltexture and don't transfer
|
||||
gltexture = gl::GLTexture::sync<GL45Backend::GL45Texture>(_backend, surface, false); // Grab the gltexture and don't transfer
|
||||
}
|
||||
|
||||
if (gltexture) {
|
||||
|
@ -107,16 +107,16 @@ public:
|
|||
|
||||
|
||||
public:
|
||||
GL45Framebuffer(const gpu::Framebuffer& framebuffer)
|
||||
: Parent(framebuffer, allocate()) { }
|
||||
GL45Framebuffer(const gl::GLBackend& backend, const gpu::Framebuffer& framebuffer)
|
||||
: Parent(backend, framebuffer, allocate()) { }
|
||||
};
|
||||
|
||||
gl::GLFramebuffer* GL45Backend::syncGPUObject(const Framebuffer& framebuffer) const {
|
||||
return gl::GLFramebuffer::sync<GL45Framebuffer>(framebuffer);
|
||||
return gl::GLFramebuffer::sync<GL45Framebuffer>(*this, framebuffer);
|
||||
}
|
||||
|
||||
GLuint GL45Backend::getFramebufferID(const FramebufferPointer& framebuffer) const {
|
||||
return framebuffer ? gl::GLFramebuffer::getId<GL45Framebuffer>(*framebuffer) : 0;
|
||||
return framebuffer ? gl::GLFramebuffer::getId<GL45Framebuffer>(*this, *framebuffer) : 0;
|
||||
}
|
||||
|
||||
void GL45Backend::do_blit(Batch& batch, size_t paramOffset) {
|
||||
|
|
|
@ -23,16 +23,17 @@ public:
|
|||
return result;
|
||||
}
|
||||
|
||||
GL45Query(const Query& query)
|
||||
: Parent(query, allocateQuery(), allocateQuery()){}
|
||||
GL45Query(const gl::GLBackend& backend, const Query& query)
|
||||
: Parent(backend, query, allocateQuery(), allocateQuery()) {
|
||||
}
|
||||
};
|
||||
|
||||
gl::GLQuery* GL45Backend::syncGPUObject(const Query& query) {
|
||||
return GL45Query::sync<GL45Query>(query);
|
||||
gl::GLQuery* GL45Backend::syncGPUObject(const Query& query) const {
|
||||
return GL45Query::sync<GL45Query>(*this, query);
|
||||
}
|
||||
|
||||
GLuint GL45Backend::getQueryID(const QueryPointer& query) {
|
||||
return GL45Query::getId<GL45Query>(query);
|
||||
GLuint GL45Backend::getQueryID(const QueryPointer& query) const {
|
||||
return GL45Query::getId<GL45Query>(*this, query);
|
||||
}
|
||||
|
||||
} }
|
|
@ -30,18 +30,18 @@ GLuint GL45Texture::allocate(const Texture& texture) {
|
|||
}
|
||||
|
||||
GLuint GL45Backend::getTextureID(const TexturePointer& texture, bool transfer) const {
|
||||
return GL45Texture::getId<GL45Texture>(texture, transfer);
|
||||
return GL45Texture::getId<GL45Texture>(*this, texture, transfer);
|
||||
}
|
||||
|
||||
gl::GLTexture* GL45Backend::syncGPUObject(const TexturePointer& texture, bool transfer) const {
|
||||
return GL45Texture::sync<GL45Texture>(texture, transfer);
|
||||
return GL45Texture::sync<GL45Texture>(*this, texture, transfer);
|
||||
}
|
||||
|
||||
GL45Backend::GL45Texture::GL45Texture(const Texture& texture, bool transferrable)
|
||||
: gl::GLTexture(texture, allocate(texture), transferrable) {}
|
||||
GL45Backend::GL45Texture::GL45Texture(const gl::GLBackend& backend, const Texture& texture, bool transferrable)
|
||||
: gl::GLTexture(backend, texture, allocate(texture), transferrable) {}
|
||||
|
||||
GL45Backend::GL45Texture::GL45Texture(const Texture& texture, GLTexture* original)
|
||||
: gl::GLTexture(texture, allocate(texture), original) {}
|
||||
GL45Backend::GL45Texture::GL45Texture(const gl::GLBackend& backend, const Texture& texture, GLTexture* original)
|
||||
: gl::GLTexture(backend, texture, allocate(texture), original) {}
|
||||
|
||||
void GL45Backend::GL45Texture::withPreservedTexture(std::function<void()> f) const {
|
||||
f();
|
||||
|
|
|
@ -37,9 +37,8 @@ void GL45Backend::transferTransformState(const Batch& batch) const {
|
|||
glNamedBufferData(_transform._cameraBuffer, bufferData.size(), bufferData.data(), GL_STREAM_DRAW);
|
||||
}
|
||||
|
||||
if (batch._objectsBuffer) {
|
||||
const auto& sysmem = batch._objectsBuffer->_renderSysmem;
|
||||
glNamedBufferData(_transform._objectBuffer, sysmem.getSize(), sysmem.readData(), GL_STREAM_DRAW);
|
||||
if (!batch._objects.empty()) {
|
||||
glNamedBufferData(_transform._objectBuffer, batch._objects.size() * sizeof(Batch::TransformObject), batch._objects.data(), GL_DYNAMIC_DRAW);
|
||||
}
|
||||
|
||||
if (!batch._namedData.empty()) {
|
||||
|
@ -57,9 +56,9 @@ void GL45Backend::transferTransformState(const Batch& batch) const {
|
|||
#ifdef GPU_SSBO_DRAW_CALL_INFO
|
||||
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, TRANSFORM_OBJECT_SLOT, _transform._objectBuffer);
|
||||
#else
|
||||
glTextureBuffer(_transform._objectBufferTexture, GL_RGBA32F, _transform._objectBuffer);
|
||||
glActiveTexture(GL_TEXTURE0 + TRANSFORM_OBJECT_SLOT);
|
||||
glBindTexture(GL_TEXTURE_BUFFER, _transform._objectBufferTexture);
|
||||
glTextureBuffer(_transform._objectBufferTexture, GL_RGBA32F, _transform._objectBuffer);
|
||||
#endif
|
||||
|
||||
CHECK_GL_ERROR();
|
||||
|
|
|
@ -34,7 +34,7 @@ size_t Batch::_commandsMax { BATCH_PREALLOCATE_MIN };
|
|||
size_t Batch::_commandOffsetsMax { BATCH_PREALLOCATE_MIN };
|
||||
size_t Batch::_paramsMax { BATCH_PREALLOCATE_MIN };
|
||||
size_t Batch::_dataMax { BATCH_PREALLOCATE_MIN };
|
||||
//size_t Batch::_objectsMax { BATCH_PREALLOCATE_MIN };
|
||||
size_t Batch::_objectsMax { BATCH_PREALLOCATE_MIN };
|
||||
size_t Batch::_drawCallInfosMax { BATCH_PREALLOCATE_MIN };
|
||||
|
||||
Batch::Batch() {
|
||||
|
@ -42,6 +42,7 @@ Batch::Batch() {
|
|||
_commandOffsets.reserve(_commandOffsetsMax);
|
||||
_params.reserve(_paramsMax);
|
||||
_data.reserve(_dataMax);
|
||||
_objects.reserve(_objectsMax);
|
||||
_drawCallInfos.reserve(_drawCallInfosMax);
|
||||
}
|
||||
|
||||
|
@ -53,7 +54,7 @@ Batch::Batch(const Batch& batch_) {
|
|||
_data.swap(batch._data);
|
||||
_invalidModel = batch._invalidModel;
|
||||
_currentModel = batch._currentModel;
|
||||
_objectsBuffer.swap(batch._objectsBuffer);
|
||||
_objects.swap(batch._objects);
|
||||
_currentNamedCall = batch._currentNamedCall;
|
||||
|
||||
_buffers._items.swap(batch._buffers._items);
|
||||
|
@ -77,7 +78,7 @@ Batch::~Batch() {
|
|||
_commandOffsetsMax = std::max(_commandOffsets.size(), _commandOffsetsMax);
|
||||
_paramsMax = std::max(_params.size(), _paramsMax);
|
||||
_dataMax = std::max(_data.size(), _dataMax);
|
||||
//_objectsMax = std::max(_objectsBuffer->getSize(), _objectsMax);
|
||||
_objectsMax = std::max(_objects.size(), _objectsMax);
|
||||
_drawCallInfosMax = std::max(_drawCallInfos.size(), _drawCallInfosMax);
|
||||
}
|
||||
|
||||
|
@ -86,7 +87,7 @@ void Batch::clear() {
|
|||
_commandOffsetsMax = std::max(_commandOffsets.size(), _commandOffsetsMax);
|
||||
_paramsMax = std::max(_params.size(), _paramsMax);
|
||||
_dataMax = std::max(_data.size(), _dataMax);
|
||||
//_objectsMax = std::max(_objects.size(), _objectsMax);
|
||||
_objectsMax = std::max(_objects.size(), _objectsMax);
|
||||
_drawCallInfosMax = std::max(_drawCallInfos.size(), _drawCallInfosMax);
|
||||
|
||||
_commands.clear();
|
||||
|
@ -99,7 +100,7 @@ void Batch::clear() {
|
|||
_transforms.clear();
|
||||
_pipelines.clear();
|
||||
_framebuffers.clear();
|
||||
_objectsBuffer.reset();
|
||||
_objects.clear();
|
||||
_drawCallInfos.clear();
|
||||
}
|
||||
|
||||
|
@ -466,18 +467,14 @@ void Batch::captureDrawCallInfoImpl() {
|
|||
//_model.getInverseMatrix(_object._modelInverse);
|
||||
object._modelInverse = glm::inverse(object._model);
|
||||
|
||||
if (!_objectsBuffer) {
|
||||
_objectsBuffer = std::make_shared<Buffer>();
|
||||
}
|
||||
|
||||
_objectsBuffer->append(object);
|
||||
_objects.emplace_back(object);
|
||||
|
||||
// Flag is clean
|
||||
_invalidModel = false;
|
||||
}
|
||||
|
||||
auto& drawCallInfos = getDrawCallInfoBuffer();
|
||||
drawCallInfos.emplace_back((uint16)(_objectsBuffer->getTypedSize<TransformObject>() - 1));
|
||||
drawCallInfos.emplace_back((uint16)_objects.size() - 1);
|
||||
}
|
||||
|
||||
void Batch::captureDrawCallInfo() {
|
||||
|
@ -634,16 +631,9 @@ void Batch::_glColor4f(float red, float green, float blue, float alpha) {
|
|||
}
|
||||
|
||||
void Batch::finish(BufferUpdates& updates) {
|
||||
if (_objectsBuffer && _objectsBuffer->isDirty()) {
|
||||
updates.push_back({ _objectsBuffer, _objectsBuffer->getUpdate() });
|
||||
}
|
||||
|
||||
for (auto& namedCallData : _namedData) {
|
||||
for (auto& buffer : namedCallData.second.buffers) {
|
||||
if (!buffer) {
|
||||
continue;
|
||||
}
|
||||
if (!buffer->isDirty()) {
|
||||
if (!buffer || !buffer->isDirty()) {
|
||||
continue;
|
||||
}
|
||||
updates.push_back({ buffer, buffer->getUpdate() });
|
||||
|
@ -652,10 +642,7 @@ void Batch::finish(BufferUpdates& updates) {
|
|||
|
||||
for (auto& bufferCacheItem : _buffers._items) {
|
||||
const BufferPointer& buffer = bufferCacheItem._data;
|
||||
if (!buffer) {
|
||||
continue;
|
||||
}
|
||||
if (!buffer->isDirty()) {
|
||||
if (!buffer || !buffer->isDirty()) {
|
||||
continue;
|
||||
}
|
||||
updates.push_back({ buffer, buffer->getUpdate() });
|
||||
|
@ -663,10 +650,6 @@ void Batch::finish(BufferUpdates& updates) {
|
|||
}
|
||||
|
||||
void Batch::flush() {
|
||||
if (_objectsBuffer && _objectsBuffer->isDirty()) {
|
||||
_objectsBuffer->flush();
|
||||
}
|
||||
|
||||
for (auto& namedCallData : _namedData) {
|
||||
for (auto& buffer : namedCallData.second.buffers) {
|
||||
if (!buffer) {
|
||||
|
|
|
@ -455,9 +455,10 @@ public:
|
|||
Mat4 _modelInverse;
|
||||
};
|
||||
|
||||
using TransformObjects = std::vector<TransformObject>;
|
||||
bool _invalidModel { true };
|
||||
Transform _currentModel;
|
||||
BufferPointer _objectsBuffer;
|
||||
TransformObjects _objects;
|
||||
static size_t _objectsMax;
|
||||
|
||||
BufferCaches _buffers;
|
||||
|
|
|
@ -21,6 +21,10 @@ Frame::~Frame() {
|
|||
overlayRecycler(overlay);
|
||||
overlay.reset();
|
||||
}
|
||||
assert(bufferUpdates.empty());
|
||||
if (!bufferUpdates.empty()) {
|
||||
qFatal("Buffer sync error... frame destroyed without buffer updates being applied");
|
||||
}
|
||||
}
|
||||
|
||||
void Frame::finish() {
|
||||
|
|
|
@ -210,6 +210,134 @@ Size Sysmem::append(Size size, const Byte* bytes) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
PageManager::PageManager(Size pageSize) : _pageSize(pageSize) {}
|
||||
|
||||
PageManager& PageManager::operator=(const PageManager& other) {
|
||||
assert(other._pageSize == _pageSize);
|
||||
_pages = other._pages;
|
||||
_flags = other._flags;
|
||||
return *this;
|
||||
}
|
||||
|
||||
PageManager::operator bool() const {
|
||||
return (*this)(DIRTY);
|
||||
}
|
||||
|
||||
bool PageManager::operator()(uint8 desiredFlags) const {
|
||||
return (desiredFlags == (_flags & desiredFlags));
|
||||
}
|
||||
|
||||
void PageManager::markPage(Size index, uint8 markFlags) {
|
||||
assert(_pages.size() > index);
|
||||
_pages[index] |= markFlags;
|
||||
_flags |= markFlags;
|
||||
}
|
||||
|
||||
void PageManager::markRegion(Size offset, Size bytes, uint8 markFlags) {
|
||||
if (!bytes) {
|
||||
return;
|
||||
}
|
||||
_flags |= markFlags;
|
||||
// Find the starting page
|
||||
Size startPage = (offset / _pageSize);
|
||||
// Non-zero byte count, so at least one page is dirty
|
||||
Size pageCount = 1;
|
||||
// How much of the page is after the offset?
|
||||
Size remainder = _pageSize - (offset % _pageSize);
|
||||
// If there are more bytes than page space remaining, we need to increase the page count
|
||||
if (bytes > remainder) {
|
||||
// Get rid of the amount that will fit in the current page
|
||||
bytes -= remainder;
|
||||
|
||||
pageCount += (bytes / _pageSize);
|
||||
if (bytes % _pageSize) {
|
||||
++pageCount;
|
||||
}
|
||||
}
|
||||
|
||||
// Mark the pages dirty
|
||||
for (Size i = 0; i < pageCount; ++i) {
|
||||
_pages[i + startPage] |= markFlags;
|
||||
}
|
||||
}
|
||||
|
||||
Size PageManager::getPageCount(uint8_t desiredFlags) const {
|
||||
Size result = 0;
|
||||
for (auto pageFlags : _pages) {
|
||||
if (desiredFlags == (pageFlags & desiredFlags)) {
|
||||
++result;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
Size PageManager::getSize(uint8_t desiredFlags) const {
|
||||
return getPageCount(desiredFlags) * _pageSize;
|
||||
}
|
||||
|
||||
void PageManager::setPageCount(Size count) {
|
||||
_pages.resize(count);
|
||||
}
|
||||
|
||||
Size PageManager::getRequiredPageCount(Size size) const {
|
||||
Size result = size / _pageSize;
|
||||
if (size % _pageSize) {
|
||||
++result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
Size PageManager::getRequiredSize(Size size) const {
|
||||
return getRequiredPageCount(size) * _pageSize;
|
||||
}
|
||||
|
||||
Size PageManager::accommodate(Size size) {
|
||||
Size newPageCount = getRequiredPageCount(size);
|
||||
Size newSize = newPageCount * _pageSize;
|
||||
_pages.resize(newPageCount, 0);
|
||||
return newSize;
|
||||
}
|
||||
|
||||
// Get pages with the specified flags, optionally clearing the flags as we go
|
||||
PageManager::Pages PageManager::getMarkedPages(uint8_t desiredFlags, bool clear) {
|
||||
Pages result;
|
||||
if (desiredFlags == (_flags & desiredFlags)) {
|
||||
_flags &= ~desiredFlags;
|
||||
result.reserve(_pages.size());
|
||||
for (Size i = 0; i < _pages.size(); ++i) {
|
||||
if (desiredFlags == (_pages[i] & desiredFlags)) {
|
||||
result.push_back(i);
|
||||
if (clear) {
|
||||
_pages[i] &= ~desiredFlags;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool PageManager::getNextTransferBlock(Size& outOffset, Size& outSize, Size& currentPage) {
|
||||
Size pageCount = _pages.size();
|
||||
// Advance to the first dirty page
|
||||
while (currentPage < pageCount && (0 == (DIRTY & _pages[currentPage]))) {
|
||||
++currentPage;
|
||||
}
|
||||
|
||||
// If we got to the end, we're done
|
||||
if (currentPage >= pageCount) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Advance to the next clean page
|
||||
outOffset = static_cast<Size>(currentPage * _pageSize);
|
||||
while (currentPage < pageCount && (0 != (DIRTY & _pages[currentPage]))) {
|
||||
_pages[currentPage] &= ~DIRTY;
|
||||
++currentPage;
|
||||
}
|
||||
outSize = static_cast<Size>((currentPage * _pageSize) - outOffset);
|
||||
return true;
|
||||
}
|
||||
|
||||
std::atomic<uint32_t> Buffer::_bufferCPUCount{ 0 };
|
||||
std::atomic<Buffer::Size> Buffer::_bufferCPUMemoryUsage{ 0 };
|
||||
|
||||
|
@ -282,43 +410,49 @@ void Buffer::markDirty(Size offset, Size bytes) {
|
|||
_pages.markRegion(offset, bytes);
|
||||
}
|
||||
|
||||
Buffer::Update Buffer::getUpdate() const {
|
||||
++_getUpdateCount;
|
||||
static Update EMPTY_UPDATE;
|
||||
if (!_pages) {
|
||||
return EMPTY_UPDATE;
|
||||
}
|
||||
|
||||
Update result;
|
||||
result.pages = _pages;
|
||||
Size bufferSize = _sysmem.getSize();
|
||||
Size pageSize = _pages._pageSize;
|
||||
PageManager::Pages dirtyPages = _pages.getMarkedPages();
|
||||
std::vector<uint8> dirtyPageData;
|
||||
dirtyPageData.resize(dirtyPages.size() * pageSize);
|
||||
Buffer::Update::Update(const Buffer& parent) : buffer(parent) {
|
||||
const auto pageSize = buffer._pages._pageSize;
|
||||
updateNumber = ++buffer._getUpdateCount;
|
||||
size = buffer._sysmem.getSize();
|
||||
dirtyPages = buffer._pages.getMarkedPages();
|
||||
dirtyData.resize(dirtyPages.size() * pageSize, 0);
|
||||
for (Size i = 0; i < dirtyPages.size(); ++i) {
|
||||
Size page = dirtyPages[i];
|
||||
Size sourceOffset = page * pageSize;
|
||||
Size destOffset = i * pageSize;
|
||||
memcpy(dirtyPageData.data() + destOffset, _sysmem.readData() + sourceOffset, pageSize);
|
||||
assert(dirtyData.size() >= (destOffset + pageSize));
|
||||
assert(buffer._sysmem.getSize() >= (sourceOffset + pageSize));
|
||||
memcpy(dirtyData.data() + destOffset, buffer._sysmem.readData() + sourceOffset, pageSize);
|
||||
}
|
||||
}
|
||||
|
||||
result.updateOperator = [bufferSize, pageSize, dirtyPages, dirtyPageData](Sysmem& dest){
|
||||
dest.resize(bufferSize);
|
||||
for (Size i = 0; i < dirtyPages.size(); ++i) {
|
||||
Size page = dirtyPages[i];
|
||||
Size sourceOffset = i * pageSize;
|
||||
Size destOffset = page * pageSize;
|
||||
memcpy(dest.editData() + destOffset, dirtyPageData.data() + sourceOffset, pageSize);
|
||||
}
|
||||
};
|
||||
return result;
|
||||
extern bool isRenderThread();
|
||||
|
||||
void Buffer::Update::apply() const {
|
||||
// Make sure we're loaded in order
|
||||
++buffer._applyUpdateCount;
|
||||
assert(isRenderThread());
|
||||
assert(buffer._applyUpdateCount.load() == updateNumber);
|
||||
const auto pageSize = buffer._pages._pageSize;
|
||||
buffer._renderSysmem.resize(size);
|
||||
buffer._renderPages.accommodate(size);
|
||||
for (Size i = 0; i < dirtyPages.size(); ++i) {
|
||||
Size page = dirtyPages[i];
|
||||
Size sourceOffset = i * pageSize;
|
||||
assert(dirtyData.size() >= (sourceOffset + pageSize));
|
||||
Size destOffset = page * pageSize;
|
||||
assert(buffer._renderSysmem.getSize() >= (destOffset + pageSize));
|
||||
memcpy(buffer._renderSysmem.editData() + destOffset, dirtyData.data() + sourceOffset, pageSize);
|
||||
buffer._renderPages.markPage(page);
|
||||
}
|
||||
}
|
||||
|
||||
Buffer::Update Buffer::getUpdate() const {
|
||||
return Update(*this);
|
||||
}
|
||||
|
||||
void Buffer::applyUpdate(const Update& update) {
|
||||
++_applyUpdateCount;
|
||||
_renderPages = update.pages;
|
||||
update.updateOperator(_renderSysmem);
|
||||
update.apply();
|
||||
}
|
||||
|
||||
void Buffer::flush() {
|
||||
|
|
|
@ -118,139 +118,33 @@ struct PageManager {
|
|||
DIRTY = 0x01,
|
||||
};
|
||||
|
||||
PageManager(Size pageSize = DEFAULT_PAGE_SIZE) : _pageSize(pageSize) {}
|
||||
PageManager& operator=(const PageManager& other) {
|
||||
assert(other._pageSize == _pageSize);
|
||||
_pages = other._pages;
|
||||
_flags = other._flags;
|
||||
return *this;
|
||||
}
|
||||
using FlagType = uint8_t;
|
||||
|
||||
using Vector = std::vector<uint8_t>;
|
||||
// A list of flags
|
||||
using Vector = std::vector<FlagType>;
|
||||
// A list of pages
|
||||
using Pages = std::vector<Size>;
|
||||
Vector _pages;
|
||||
|
||||
Vector _pages;
|
||||
uint8 _flags{ 0 };
|
||||
const Size _pageSize;
|
||||
|
||||
operator bool() const {
|
||||
return (*this)(DIRTY);
|
||||
}
|
||||
|
||||
bool operator()(uint8 desiredFlags) const {
|
||||
return (desiredFlags == (_flags & desiredFlags));
|
||||
}
|
||||
|
||||
void markPage(Size index, uint8 markFlags = DIRTY) {
|
||||
assert(_pages.size() > index);
|
||||
_pages[index] |= markFlags;
|
||||
_flags |= markFlags;
|
||||
}
|
||||
|
||||
void markRegion(Size offset, Size bytes, uint8 markFlags = DIRTY) {
|
||||
if (!bytes) {
|
||||
return;
|
||||
}
|
||||
_flags |= markFlags;
|
||||
// Find the starting page
|
||||
Size startPage = (offset / _pageSize);
|
||||
// Non-zero byte count, so at least one page is dirty
|
||||
Size pageCount = 1;
|
||||
// How much of the page is after the offset?
|
||||
Size remainder = _pageSize - (offset % _pageSize);
|
||||
// If there are more bytes than page space remaining, we need to increase the page count
|
||||
if (bytes > remainder) {
|
||||
// Get rid of the amount that will fit in the current page
|
||||
bytes -= remainder;
|
||||
|
||||
pageCount += (bytes / _pageSize);
|
||||
if (bytes % _pageSize) {
|
||||
++pageCount;
|
||||
}
|
||||
}
|
||||
|
||||
// Mark the pages dirty
|
||||
for (Size i = 0; i < pageCount; ++i) {
|
||||
_pages[i + startPage] |= DIRTY;
|
||||
}
|
||||
}
|
||||
|
||||
Size getPageCount(uint8_t desiredFlags = DIRTY) const {
|
||||
Size result = 0;
|
||||
for (auto pageFlags : _pages) {
|
||||
if (desiredFlags == (pageFlags & desiredFlags)) {
|
||||
++result;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
Size getSize(uint8_t desiredFlags = DIRTY) const {
|
||||
return getPageCount(desiredFlags) * _pageSize;
|
||||
}
|
||||
|
||||
void setPageCount(Size count) {
|
||||
_pages.resize(count);
|
||||
}
|
||||
|
||||
Size getRequiredPageCount(Size size) const {
|
||||
Size result = size / _pageSize;
|
||||
if (size % _pageSize) {
|
||||
++result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
Size getRequiredSize(Size size) const {
|
||||
return getRequiredPageCount(size) * _pageSize;
|
||||
}
|
||||
|
||||
Size accommodate(Size size) {
|
||||
Size newPageCount = getRequiredPageCount(size);
|
||||
Size newSize = newPageCount * _pageSize;
|
||||
_pages.resize(newPageCount, 0);
|
||||
return newSize;
|
||||
}
|
||||
PageManager(Size pageSize = DEFAULT_PAGE_SIZE);
|
||||
PageManager& operator=(const PageManager& other);
|
||||
|
||||
operator bool() const;
|
||||
bool operator()(uint8 desiredFlags) const;
|
||||
void markPage(Size index, uint8 markFlags = DIRTY);
|
||||
void markRegion(Size offset, Size bytes, uint8 markFlags = DIRTY);
|
||||
Size getPageCount(uint8_t desiredFlags = DIRTY) const;
|
||||
Size getSize(uint8_t desiredFlags = DIRTY) const;
|
||||
void setPageCount(Size count);
|
||||
Size getRequiredPageCount(Size size) const;
|
||||
Size getRequiredSize(Size size) const;
|
||||
Size accommodate(Size size);
|
||||
// Get pages with the specified flags, optionally clearing the flags as we go
|
||||
Pages getMarkedPages(uint8_t desiredFlags = DIRTY, bool clear = true) {
|
||||
Pages result;
|
||||
if (desiredFlags == (_flags & desiredFlags)) {
|
||||
_flags &= ~desiredFlags;
|
||||
result.reserve(_pages.size());
|
||||
for (Size i = 0; i < _pages.size(); ++i) {
|
||||
if (desiredFlags == (_pages[i] & desiredFlags)) {
|
||||
result.push_back(i);
|
||||
if (clear) {
|
||||
_pages[i] &= ~desiredFlags;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool getNextTransferBlock(Size& outOffset, Size& outSize, Size& currentPage) {
|
||||
Size pageCount = _pages.size();
|
||||
// Advance to the first dirty page
|
||||
while (currentPage < pageCount && (0 == (DIRTY & _pages[currentPage]))) {
|
||||
++currentPage;
|
||||
}
|
||||
|
||||
// If we got to the end, we're done
|
||||
if (currentPage >= pageCount) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Advance to the next clean page
|
||||
outOffset = static_cast<Size>(currentPage * _pageSize);
|
||||
while (currentPage < pageCount && (0 != (DIRTY & _pages[currentPage]))) {
|
||||
_pages[currentPage] &= ~DIRTY;
|
||||
++currentPage;
|
||||
}
|
||||
outSize = static_cast<Size>((currentPage * _pageSize) - outOffset);
|
||||
return true;
|
||||
}
|
||||
Pages getMarkedPages(uint8_t desiredFlags = DIRTY, bool clear = true);
|
||||
bool getNextTransferBlock(Size& outOffset, Size& outSize, Size& currentPage);
|
||||
};
|
||||
|
||||
|
||||
|
@ -261,9 +155,19 @@ class Buffer : public Resource {
|
|||
|
||||
public:
|
||||
using Flag = PageManager::Flag;
|
||||
struct Update {
|
||||
PageManager pages;
|
||||
Sysmem::Operator updateOperator;
|
||||
|
||||
class Update {
|
||||
public:
|
||||
Update(const Buffer& buffer);
|
||||
void apply() const;
|
||||
|
||||
private:
|
||||
const Buffer& buffer;
|
||||
size_t updateNumber;
|
||||
//PageManager pages;
|
||||
Size size;
|
||||
PageManager::Pages dirtyPages;
|
||||
std::vector<uint8> dirtyData;
|
||||
};
|
||||
|
||||
// Currently only one flag... 'dirty'
|
||||
|
@ -353,11 +257,12 @@ public:
|
|||
// FIXME don't maintain a second buffer continuously. We should be able to apply updates
|
||||
// directly to the GL object and discard _renderSysmem and _renderPages
|
||||
mutable PageManager _renderPages;
|
||||
Sysmem _renderSysmem;
|
||||
mutable Sysmem _renderSysmem;
|
||||
|
||||
mutable std::atomic<size_t> _getUpdateCount;
|
||||
mutable std::atomic<size_t> _applyUpdateCount;
|
||||
protected:
|
||||
//protected:
|
||||
public:
|
||||
void markDirty(Size offset, Size bytes);
|
||||
|
||||
template <typename T>
|
||||
|
|
|
@ -55,16 +55,6 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual void queueItemInternal(const T& t) {
|
||||
_items.push_back(t);
|
||||
}
|
||||
|
||||
virtual uint32_t getMaxWait() {
|
||||
return MSECS_PER_SECOND;
|
||||
}
|
||||
|
||||
|
||||
virtual bool process() {
|
||||
lock();
|
||||
if (!_items.size()) {
|
||||
|
@ -88,6 +78,17 @@ protected:
|
|||
return processQueueItems(processItems);
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual void queueItemInternal(const T& t) {
|
||||
_items.push_back(t);
|
||||
}
|
||||
|
||||
virtual uint32_t getMaxWait() {
|
||||
return MSECS_PER_SECOND;
|
||||
}
|
||||
|
||||
|
||||
|
||||
virtual bool processQueueItems(const Queue& items) = 0;
|
||||
|
||||
Queue _items;
|
||||
|
|
|
@ -37,6 +37,11 @@ public:
|
|||
|
||||
bool isThreaded() const { return _isThreaded; }
|
||||
|
||||
/// Override this function to do whatever your class actually does, return false to exit thread early.
|
||||
virtual bool process() = 0;
|
||||
virtual void setup() {};
|
||||
virtual void shutdown() {};
|
||||
|
||||
public slots:
|
||||
/// If you're running in non-threaded mode, you must call this regularly
|
||||
void threadRoutine();
|
||||
|
@ -45,10 +50,6 @@ signals:
|
|||
void finished();
|
||||
|
||||
protected:
|
||||
/// Override this function to do whatever your class actually does, return false to exit thread early.
|
||||
virtual bool process() = 0;
|
||||
virtual void setup() {};
|
||||
virtual void shutdown() {};
|
||||
|
||||
/// Locks all the resources of the thread.
|
||||
void lock() { _mutex.lock(); }
|
||||
|
|
|
@ -231,8 +231,9 @@ public:
|
|||
void renderFrame(gpu::FramePointer& frame) {
|
||||
++_presentCount;
|
||||
_displayContext->makeCurrent(_displaySurface);
|
||||
|
||||
((gpu::gl::GLBackend&)(*_backend)).cleanupTrash();
|
||||
if (frame && !frame->batches.empty()) {
|
||||
frame->preRender();
|
||||
_backend->syncCache();
|
||||
_backend->setStereoState(frame->stereoState);
|
||||
for (auto& batch : frame->batches) {
|
||||
|
@ -468,7 +469,7 @@ private:
|
|||
if (!isVisible()) {
|
||||
return;
|
||||
}
|
||||
if (_renderCount.load() >= _renderThread._presentCount.load()) {
|
||||
if (_renderCount.load() != 0 && _renderCount.load() >= _renderThread._presentCount.load()) {
|
||||
return;
|
||||
}
|
||||
_renderCount = _renderThread._presentCount.load();
|
||||
|
@ -528,9 +529,6 @@ private:
|
|||
const qint64& now;
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
void updateText() {
|
||||
setTitle(QString("FPS %1 Culling %2 TextureMemory GPU %3 CPU %4")
|
||||
.arg(_fps).arg(_cullingEnabled)
|
||||
|
@ -623,6 +621,9 @@ private:
|
|||
DependencyManager::get<FramebufferCache>()->releaseFramebuffer(framebuffer);
|
||||
};
|
||||
_renderThread.queueItem(frame);
|
||||
if (!_renderThread.isThreaded()) {
|
||||
_renderThread.process();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -805,7 +806,7 @@ private:
|
|||
QSharedPointer<EntityTreeRenderer> _octree;
|
||||
};
|
||||
|
||||
bool QTestWindow::_cullingEnabled = false;
|
||||
bool QTestWindow::_cullingEnabled = true;
|
||||
|
||||
void messageHandler(QtMsgType type, const QMessageLogContext& context, const QString& message) {
|
||||
if (!message.isEmpty()) {
|
||||
|
|
Loading…
Reference in a new issue