mirror of
https://github.com/overte-org/overte.git
synced 2025-04-20 14:03:55 +02:00
Reduce API surface area, make render batches const correct
This commit is contained in:
parent
cac529a1b1
commit
58c7df115f
24 changed files with 259 additions and 172 deletions
|
@ -188,7 +188,7 @@ GLBackend::~GLBackend() {
|
|||
killTransform();
|
||||
}
|
||||
|
||||
void GLBackend::renderPassTransfer(Batch& batch) {
|
||||
void GLBackend::renderPassTransfer(const Batch& batch) {
|
||||
const size_t numCommands = batch.getCommands().size();
|
||||
const Batch::Commands::value_type* command = batch.getCommands().data();
|
||||
const Batch::CommandOffsets::value_type* offset = batch.getCommandOffsets().data();
|
||||
|
@ -244,7 +244,7 @@ void GLBackend::renderPassTransfer(Batch& batch) {
|
|||
_inRenderTransferPass = false;
|
||||
}
|
||||
|
||||
void GLBackend::renderPassDraw(Batch& batch) {
|
||||
void GLBackend::renderPassDraw(const Batch& batch) {
|
||||
_currentDraw = -1;
|
||||
_transform._camerasItr = _transform._cameraOffsets.begin();
|
||||
const size_t numCommands = batch.getCommands().size();
|
||||
|
@ -289,10 +289,7 @@ void GLBackend::renderPassDraw(Batch& batch) {
|
|||
}
|
||||
}
|
||||
|
||||
void GLBackend::render(Batch& batch) {
|
||||
// Finalize the batch by moving all the instanced rendering into the command buffer
|
||||
batch.preExecute();
|
||||
|
||||
void GLBackend::render(const Batch& batch) {
|
||||
_transform._skybox = _stereo._skybox = batch.isSkyboxEnabled();
|
||||
// Allow the batch to override the rendering stereo settings
|
||||
// for things like full framebuffer copy operations (deferred lighting passes)
|
||||
|
@ -317,7 +314,7 @@ void GLBackend::render(Batch& batch) {
|
|||
|
||||
|
||||
void GLBackend::syncCache() {
|
||||
cleanupTrash();
|
||||
recycle();
|
||||
syncTransformStateCache();
|
||||
syncPipelineStateCache();
|
||||
syncInputStateCache();
|
||||
|
@ -334,21 +331,21 @@ void GLBackend::setupStereoSide(int side) {
|
|||
_transform.bindCurrentCamera(side);
|
||||
}
|
||||
|
||||
void GLBackend::do_resetStages(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_resetStages(const Batch& batch, size_t paramOffset) {
|
||||
resetStages();
|
||||
}
|
||||
|
||||
void GLBackend::do_runLambda(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_runLambda(const Batch& batch, size_t paramOffset) {
|
||||
std::function<void()> f = batch._lambdas.get(batch._params[paramOffset]._uint);
|
||||
f();
|
||||
}
|
||||
|
||||
void GLBackend::do_startNamedCall(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_startNamedCall(const Batch& batch, size_t paramOffset) {
|
||||
batch._currentNamedCall = batch._names.get(batch._params[paramOffset]._uint);
|
||||
_currentDraw = -1;
|
||||
}
|
||||
|
||||
void GLBackend::do_stopNamedCall(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_stopNamedCall(const Batch& batch, size_t paramOffset) {
|
||||
batch._currentNamedCall.clear();
|
||||
}
|
||||
|
||||
|
@ -365,7 +362,7 @@ void GLBackend::resetStages() {
|
|||
}
|
||||
|
||||
|
||||
void GLBackend::do_pushProfileRange(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_pushProfileRange(const Batch& batch, size_t paramOffset) {
|
||||
auto name = batch._profileRanges.get(batch._params[paramOffset]._uint);
|
||||
profileRanges.push_back(name);
|
||||
#if defined(NSIGHT_FOUND)
|
||||
|
@ -373,7 +370,7 @@ void GLBackend::do_pushProfileRange(Batch& batch, size_t paramOffset) {
|
|||
#endif
|
||||
}
|
||||
|
||||
void GLBackend::do_popProfileRange(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_popProfileRange(const Batch& batch, size_t paramOffset) {
|
||||
profileRanges.pop_back();
|
||||
#if defined(NSIGHT_FOUND)
|
||||
nvtxRangePop();
|
||||
|
@ -387,7 +384,7 @@ void GLBackend::do_popProfileRange(Batch& batch, size_t paramOffset) {
|
|||
// As long as we don;t use several versions of shaders we can avoid this more complex code path
|
||||
// #define GET_UNIFORM_LOCATION(shaderUniformLoc) _pipeline._programShader->getUniformLocation(shaderUniformLoc, isStereo());
|
||||
#define GET_UNIFORM_LOCATION(shaderUniformLoc) shaderUniformLoc
|
||||
void GLBackend::do_glActiveBindTexture(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_glActiveBindTexture(const Batch& batch, size_t paramOffset) {
|
||||
glActiveTexture(batch._params[paramOffset + 2]._uint);
|
||||
glBindTexture(
|
||||
GET_UNIFORM_LOCATION(batch._params[paramOffset + 1]._uint),
|
||||
|
@ -396,7 +393,7 @@ void GLBackend::do_glActiveBindTexture(Batch& batch, size_t paramOffset) {
|
|||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_glUniform1i(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_glUniform1i(const Batch& batch, size_t paramOffset) {
|
||||
if (_pipeline._program == 0) {
|
||||
// We should call updatePipeline() to bind the program but we are not doing that
|
||||
// because these uniform setters are deprecated and we don;t want to create side effect
|
||||
|
@ -410,7 +407,7 @@ void GLBackend::do_glUniform1i(Batch& batch, size_t paramOffset) {
|
|||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_glUniform1f(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_glUniform1f(const Batch& batch, size_t paramOffset) {
|
||||
if (_pipeline._program == 0) {
|
||||
// We should call updatePipeline() to bind the program but we are not doing that
|
||||
// because these uniform setters are deprecated and we don;t want to create side effect
|
||||
|
@ -424,7 +421,7 @@ void GLBackend::do_glUniform1f(Batch& batch, size_t paramOffset) {
|
|||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_glUniform2f(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_glUniform2f(const Batch& batch, size_t paramOffset) {
|
||||
if (_pipeline._program == 0) {
|
||||
// We should call updatePipeline() to bind the program but we are not doing that
|
||||
// because these uniform setters are deprecated and we don;t want to create side effect
|
||||
|
@ -438,7 +435,7 @@ void GLBackend::do_glUniform2f(Batch& batch, size_t paramOffset) {
|
|||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_glUniform3f(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_glUniform3f(const Batch& batch, size_t paramOffset) {
|
||||
if (_pipeline._program == 0) {
|
||||
// We should call updatePipeline() to bind the program but we are not doing that
|
||||
// because these uniform setters are deprecated and we don;t want to create side effect
|
||||
|
@ -453,7 +450,7 @@ void GLBackend::do_glUniform3f(Batch& batch, size_t paramOffset) {
|
|||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_glUniform4f(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_glUniform4f(const Batch& batch, size_t paramOffset) {
|
||||
if (_pipeline._program == 0) {
|
||||
// We should call updatePipeline() to bind the program but we are not doing that
|
||||
// because these uniform setters are deprecated and we don;t want to create side effect
|
||||
|
@ -469,7 +466,7 @@ void GLBackend::do_glUniform4f(Batch& batch, size_t paramOffset) {
|
|||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_glUniform3fv(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_glUniform3fv(const Batch& batch, size_t paramOffset) {
|
||||
if (_pipeline._program == 0) {
|
||||
// We should call updatePipeline() to bind the program but we are not doing that
|
||||
// because these uniform setters are deprecated and we don;t want to create side effect
|
||||
|
@ -479,12 +476,12 @@ void GLBackend::do_glUniform3fv(Batch& batch, size_t paramOffset) {
|
|||
glUniform3fv(
|
||||
GET_UNIFORM_LOCATION(batch._params[paramOffset + 2]._int),
|
||||
batch._params[paramOffset + 1]._uint,
|
||||
(const GLfloat*)batch.editData(batch._params[paramOffset + 0]._uint));
|
||||
(const GLfloat*)batch.readData(batch._params[paramOffset + 0]._uint));
|
||||
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_glUniform4fv(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_glUniform4fv(const Batch& batch, size_t paramOffset) {
|
||||
if (_pipeline._program == 0) {
|
||||
// We should call updatePipeline() to bind the program but we are not doing that
|
||||
// because these uniform setters are deprecated and we don;t want to create side effect
|
||||
|
@ -494,13 +491,13 @@ void GLBackend::do_glUniform4fv(Batch& batch, size_t paramOffset) {
|
|||
|
||||
GLint location = GET_UNIFORM_LOCATION(batch._params[paramOffset + 2]._int);
|
||||
GLsizei count = batch._params[paramOffset + 1]._uint;
|
||||
const GLfloat* value = (const GLfloat*)batch.editData(batch._params[paramOffset + 0]._uint);
|
||||
const GLfloat* value = (const GLfloat*)batch.readData(batch._params[paramOffset + 0]._uint);
|
||||
glUniform4fv(location, count, value);
|
||||
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_glUniform4iv(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_glUniform4iv(const Batch& batch, size_t paramOffset) {
|
||||
if (_pipeline._program == 0) {
|
||||
// We should call updatePipeline() to bind the program but we are not doing that
|
||||
// because these uniform setters are deprecated and we don;t want to create side effect
|
||||
|
@ -510,12 +507,12 @@ void GLBackend::do_glUniform4iv(Batch& batch, size_t paramOffset) {
|
|||
glUniform4iv(
|
||||
GET_UNIFORM_LOCATION(batch._params[paramOffset + 2]._int),
|
||||
batch._params[paramOffset + 1]._uint,
|
||||
(const GLint*)batch.editData(batch._params[paramOffset + 0]._uint));
|
||||
(const GLint*)batch.readData(batch._params[paramOffset + 0]._uint));
|
||||
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_glUniformMatrix3fv(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_glUniformMatrix3fv(const Batch& batch, size_t paramOffset) {
|
||||
if (_pipeline._program == 0) {
|
||||
// We should call updatePipeline() to bind the program but we are not doing that
|
||||
// because these uniform setters are deprecated and we don;t want to create side effect
|
||||
|
@ -527,11 +524,11 @@ void GLBackend::do_glUniformMatrix3fv(Batch& batch, size_t paramOffset) {
|
|||
GET_UNIFORM_LOCATION(batch._params[paramOffset + 3]._int),
|
||||
batch._params[paramOffset + 2]._uint,
|
||||
batch._params[paramOffset + 1]._uint,
|
||||
(const GLfloat*)batch.editData(batch._params[paramOffset + 0]._uint));
|
||||
(const GLfloat*)batch.readData(batch._params[paramOffset + 0]._uint));
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_glUniformMatrix4fv(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_glUniformMatrix4fv(const Batch& batch, size_t paramOffset) {
|
||||
if (_pipeline._program == 0) {
|
||||
// We should call updatePipeline() to bind the program but we are not doing that
|
||||
// because these uniform setters are deprecated and we don;t want to create side effect
|
||||
|
@ -543,11 +540,11 @@ void GLBackend::do_glUniformMatrix4fv(Batch& batch, size_t paramOffset) {
|
|||
GET_UNIFORM_LOCATION(batch._params[paramOffset + 3]._int),
|
||||
batch._params[paramOffset + 2]._uint,
|
||||
batch._params[paramOffset + 1]._uint,
|
||||
(const GLfloat*)batch.editData(batch._params[paramOffset + 0]._uint));
|
||||
(const GLfloat*)batch.readData(batch._params[paramOffset + 0]._uint));
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_glColor4f(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_glColor4f(const Batch& batch, size_t paramOffset) {
|
||||
|
||||
glm::vec4 newColor(
|
||||
batch._params[paramOffset + 3]._float,
|
||||
|
@ -592,7 +589,7 @@ void GLBackend::releaseQuery(GLuint id) const {
|
|||
_queriesTrash.push_back(id);
|
||||
}
|
||||
|
||||
void GLBackend::cleanupTrash() const {
|
||||
void GLBackend::recycle() const {
|
||||
{
|
||||
std::vector<GLuint> ids;
|
||||
std::list<std::pair<GLuint, Size>> buffersTrash;
|
||||
|
@ -606,7 +603,9 @@ void GLBackend::cleanupTrash() const {
|
|||
decrementBufferGPUCount();
|
||||
updateBufferGPUMemoryUsage(pair.second, 0);
|
||||
}
|
||||
glDeleteBuffers((GLsizei)ids.size(), ids.data());
|
||||
if (!ids.empty()) {
|
||||
glDeleteBuffers((GLsizei)ids.size(), ids.data());
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -620,7 +619,9 @@ void GLBackend::cleanupTrash() const {
|
|||
for (auto id : framebuffersTrash) {
|
||||
ids.push_back(id);
|
||||
}
|
||||
glDeleteFramebuffers((GLsizei)ids.size(), ids.data());
|
||||
if (!ids.empty()) {
|
||||
glDeleteFramebuffers((GLsizei)ids.size(), ids.data());
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -636,7 +637,9 @@ void GLBackend::cleanupTrash() const {
|
|||
decrementTextureGPUCount();
|
||||
updateTextureGPUMemoryUsage(pair.second, 0);
|
||||
}
|
||||
glDeleteTextures((GLsizei)ids.size(), ids.data());
|
||||
if (!ids.empty()) {
|
||||
glDeleteTextures((GLsizei)ids.size(), ids.data());
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -672,7 +675,9 @@ void GLBackend::cleanupTrash() const {
|
|||
for (auto id : queriesTrash) {
|
||||
ids.push_back(id);
|
||||
}
|
||||
glDeleteQueries((GLsizei)ids.size(), ids.data());
|
||||
if (!ids.empty()) {
|
||||
glDeleteQueries((GLsizei)ids.size(), ids.data());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ public:
|
|||
~GLBackend();
|
||||
|
||||
void setCameraCorrection(const Mat4& correction);
|
||||
void render(Batch& batch) final;
|
||||
void render(const Batch& batch) final;
|
||||
|
||||
// This call synchronize the Full Backend cache with the current GLState
|
||||
// THis is only intended to be used when mixing raw gl calls with the gpu api usage in order to sync
|
||||
|
@ -75,74 +75,74 @@ public:
|
|||
size_t getMaxNumResourceTextures() const { return MAX_NUM_RESOURCE_TEXTURES; }
|
||||
|
||||
// Draw Stage
|
||||
virtual void do_draw(Batch& batch, size_t paramOffset) = 0;
|
||||
virtual void do_drawIndexed(Batch& batch, size_t paramOffset) = 0;
|
||||
virtual void do_drawInstanced(Batch& batch, size_t paramOffset) = 0;
|
||||
virtual void do_drawIndexedInstanced(Batch& batch, size_t paramOffset) = 0;
|
||||
virtual void do_multiDrawIndirect(Batch& batch, size_t paramOffset) = 0;
|
||||
virtual void do_multiDrawIndexedIndirect(Batch& batch, size_t paramOffset) = 0;
|
||||
virtual void do_draw(const Batch& batch, size_t paramOffset) = 0;
|
||||
virtual void do_drawIndexed(const Batch& batch, size_t paramOffset) = 0;
|
||||
virtual void do_drawInstanced(const Batch& batch, size_t paramOffset) = 0;
|
||||
virtual void do_drawIndexedInstanced(const Batch& batch, size_t paramOffset) = 0;
|
||||
virtual void do_multiDrawIndirect(const Batch& batch, size_t paramOffset) = 0;
|
||||
virtual void do_multiDrawIndexedIndirect(const Batch& batch, size_t paramOffset) = 0;
|
||||
|
||||
// Input Stage
|
||||
virtual void do_setInputFormat(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setInputBuffer(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setIndexBuffer(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setIndirectBuffer(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_generateTextureMips(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setInputFormat(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setInputBuffer(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setIndexBuffer(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setIndirectBuffer(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_generateTextureMips(const Batch& batch, size_t paramOffset) final;
|
||||
|
||||
// Transform Stage
|
||||
virtual void do_setModelTransform(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setViewTransform(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setProjectionTransform(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setViewportTransform(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setDepthRangeTransform(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setModelTransform(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setViewTransform(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setProjectionTransform(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setViewportTransform(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setDepthRangeTransform(const Batch& batch, size_t paramOffset) final;
|
||||
|
||||
// Uniform Stage
|
||||
virtual void do_setUniformBuffer(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setUniformBuffer(const Batch& batch, size_t paramOffset) final;
|
||||
|
||||
// Resource Stage
|
||||
virtual void do_setResourceTexture(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setResourceTexture(const Batch& batch, size_t paramOffset) final;
|
||||
|
||||
// Pipeline Stage
|
||||
virtual void do_setPipeline(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setPipeline(const Batch& batch, size_t paramOffset) final;
|
||||
|
||||
// Output stage
|
||||
virtual void do_setFramebuffer(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_clearFramebuffer(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_blit(Batch& batch, size_t paramOffset) = 0;
|
||||
virtual void do_setFramebuffer(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_clearFramebuffer(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_blit(const Batch& batch, size_t paramOffset) = 0;
|
||||
|
||||
// Query section
|
||||
virtual void do_beginQuery(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_endQuery(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_getQuery(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_beginQuery(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_endQuery(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_getQuery(const Batch& batch, size_t paramOffset) final;
|
||||
|
||||
// Reset stages
|
||||
virtual void do_resetStages(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_resetStages(const Batch& batch, size_t paramOffset) final;
|
||||
|
||||
virtual void do_runLambda(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_runLambda(const Batch& batch, size_t paramOffset) final;
|
||||
|
||||
virtual void do_startNamedCall(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_stopNamedCall(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_startNamedCall(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_stopNamedCall(const Batch& batch, size_t paramOffset) final;
|
||||
|
||||
virtual void do_pushProfileRange(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_popProfileRange(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_pushProfileRange(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_popProfileRange(const Batch& batch, size_t paramOffset) final;
|
||||
|
||||
// TODO: As long as we have gl calls explicitely issued from interface
|
||||
// code, we need to be able to record and batch these calls. THe long
|
||||
// term strategy is to get rid of any GL calls in favor of the HIFI GPU API
|
||||
virtual void do_glActiveBindTexture(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glActiveBindTexture(const Batch& batch, size_t paramOffset) final;
|
||||
|
||||
virtual void do_glUniform1i(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform1f(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform2f(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform3f(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform4f(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform3fv(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform4fv(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform4iv(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniformMatrix3fv(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniformMatrix4fv(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform1i(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform1f(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform2f(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform3f(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform4f(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform3fv(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform4fv(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform4iv(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniformMatrix3fv(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniformMatrix4fv(const Batch& batch, size_t paramOffset) final;
|
||||
|
||||
virtual void do_glColor4f(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glColor4f(const Batch& batch, size_t paramOffset) final;
|
||||
|
||||
// The State setters called by the GLState::Commands when a new state is assigned
|
||||
virtual void do_setStateFillMode(int32 mode) final;
|
||||
|
@ -159,8 +159,8 @@ public:
|
|||
virtual void do_setStateSampleMask(uint32 mask) final;
|
||||
virtual void do_setStateBlend(State::BlendFunction blendFunction) final;
|
||||
virtual void do_setStateColorWriteMask(uint32 mask) final;
|
||||
virtual void do_setStateBlendFactor(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setStateScissorRect(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setStateBlendFactor(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setStateScissorRect(const Batch& batch, size_t paramOffset) final;
|
||||
|
||||
virtual GLuint getFramebufferID(const FramebufferPointer& framebuffer) = 0;
|
||||
virtual GLuint getTextureID(const TexturePointer& texture, bool needTransfer = true) = 0;
|
||||
|
@ -174,10 +174,10 @@ public:
|
|||
virtual void releaseShader(GLuint id) const;
|
||||
virtual void releaseProgram(GLuint id) const;
|
||||
virtual void releaseQuery(GLuint id) const;
|
||||
virtual void cleanupTrash() const;
|
||||
|
||||
protected:
|
||||
|
||||
void recycle() const override;
|
||||
virtual GLFramebuffer* syncGPUObject(const Framebuffer& framebuffer) = 0;
|
||||
virtual GLBuffer* syncGPUObject(const Buffer& buffer) = 0;
|
||||
virtual GLTexture* syncGPUObject(const TexturePointer& texture, bool sync = true) = 0;
|
||||
|
@ -197,8 +197,8 @@ protected:
|
|||
mutable std::list<GLuint> _programsTrash;
|
||||
mutable std::list<GLuint> _queriesTrash;
|
||||
|
||||
void renderPassTransfer(Batch& batch);
|
||||
void renderPassDraw(Batch& batch);
|
||||
void renderPassTransfer(const Batch& batch);
|
||||
void renderPassDraw(const Batch& batch);
|
||||
void setupStereoSide(int side);
|
||||
|
||||
virtual void initInput() final;
|
||||
|
@ -362,7 +362,7 @@ protected:
|
|||
|
||||
void resetStages();
|
||||
|
||||
typedef void (GLBackend::*CommandCall)(Batch&, size_t);
|
||||
typedef void (GLBackend::*CommandCall)(const Batch&, size_t);
|
||||
static CommandCall _commandCalls[Batch::NUM_COMMANDS];
|
||||
friend class GLState;
|
||||
};
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
void GLBackend::do_setInputFormat(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_setInputFormat(const Batch& batch, size_t paramOffset) {
|
||||
Stream::FormatPointer format = batch._streamFormats.get(batch._params[paramOffset]._uint);
|
||||
|
||||
if (format != _input._format) {
|
||||
|
@ -23,7 +23,7 @@ void GLBackend::do_setInputFormat(Batch& batch, size_t paramOffset) {
|
|||
}
|
||||
}
|
||||
|
||||
void GLBackend::do_setInputBuffer(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_setInputBuffer(const Batch& batch, size_t paramOffset) {
|
||||
Offset stride = batch._params[paramOffset + 0]._uint;
|
||||
Offset offset = batch._params[paramOffset + 1]._uint;
|
||||
BufferPointer buffer = batch._buffers.get(batch._params[paramOffset + 2]._uint);
|
||||
|
@ -116,7 +116,7 @@ void GLBackend::resetInputStage() {
|
|||
|
||||
}
|
||||
|
||||
void GLBackend::do_setIndexBuffer(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_setIndexBuffer(const Batch& batch, size_t paramOffset) {
|
||||
_input._indexBufferType = (Type)batch._params[paramOffset + 2]._uint;
|
||||
_input._indexBufferOffset = batch._params[paramOffset + 0]._uint;
|
||||
|
||||
|
@ -133,7 +133,7 @@ void GLBackend::do_setIndexBuffer(Batch& batch, size_t paramOffset) {
|
|||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_setIndirectBuffer(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_setIndirectBuffer(const Batch& batch, size_t paramOffset) {
|
||||
_input._indirectBufferOffset = batch._params[paramOffset + 1]._uint;
|
||||
_input._indirectBufferStride = batch._params[paramOffset + 2]._uint;
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ void GLBackend::resetOutputStage() {
|
|||
glEnable(GL_FRAMEBUFFER_SRGB);
|
||||
}
|
||||
|
||||
void GLBackend::do_setFramebuffer(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_setFramebuffer(const Batch& batch, size_t paramOffset) {
|
||||
auto framebuffer = batch._framebuffers.get(batch._params[paramOffset]._uint);
|
||||
if (_output._framebuffer != framebuffer) {
|
||||
auto newFBO = getFramebufferID(framebuffer);
|
||||
|
@ -47,7 +47,7 @@ void GLBackend::do_setFramebuffer(Batch& batch, size_t paramOffset) {
|
|||
}
|
||||
}
|
||||
|
||||
void GLBackend::do_clearFramebuffer(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) {
|
||||
if (_stereo._enable && !_pipeline._stateCache.scissorEnable) {
|
||||
qWarning("Clear without scissor in stereo mode");
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
void GLBackend::do_setPipeline(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_setPipeline(const Batch& batch, size_t paramOffset) {
|
||||
PipelinePointer pipeline = batch._pipelines.get(batch._params[paramOffset + 0]._uint);
|
||||
|
||||
if (_pipeline._pipeline == pipeline) {
|
||||
|
@ -141,7 +141,7 @@ void GLBackend::resetUniformStage() {
|
|||
}
|
||||
}
|
||||
|
||||
void GLBackend::do_setUniformBuffer(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_setUniformBuffer(const Batch& batch, size_t paramOffset) {
|
||||
GLuint slot = batch._params[paramOffset + 3]._uint;
|
||||
BufferPointer uniformBuffer = batch._buffers.get(batch._params[paramOffset + 2]._uint);
|
||||
GLintptr rangeStart = batch._params[paramOffset + 1]._uint;
|
||||
|
@ -190,7 +190,7 @@ void GLBackend::resetResourceStage() {
|
|||
}
|
||||
}
|
||||
|
||||
void GLBackend::do_setResourceTexture(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_setResourceTexture(const Batch& batch, size_t paramOffset) {
|
||||
GLuint slot = batch._params[paramOffset + 1]._uint;
|
||||
TexturePointer resourceTexture = batch._textures.get(batch._params[paramOffset + 0]._uint);
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ static bool timeElapsed = true;
|
|||
static bool timeElapsed = false;
|
||||
#endif
|
||||
|
||||
void GLBackend::do_beginQuery(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_beginQuery(const Batch& batch, size_t paramOffset) {
|
||||
auto query = batch._queries.get(batch._params[paramOffset]._uint);
|
||||
GLQuery* glquery = syncGPUObject(*query);
|
||||
if (glquery) {
|
||||
|
@ -34,7 +34,7 @@ void GLBackend::do_beginQuery(Batch& batch, size_t paramOffset) {
|
|||
}
|
||||
}
|
||||
|
||||
void GLBackend::do_endQuery(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_endQuery(const Batch& batch, size_t paramOffset) {
|
||||
auto query = batch._queries.get(batch._params[paramOffset]._uint);
|
||||
GLQuery* glquery = syncGPUObject(*query);
|
||||
if (glquery) {
|
||||
|
@ -47,7 +47,7 @@ void GLBackend::do_endQuery(Batch& batch, size_t paramOffset) {
|
|||
}
|
||||
}
|
||||
|
||||
void GLBackend::do_getQuery(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_getQuery(const Batch& batch, size_t paramOffset) {
|
||||
auto query = batch._queries.get(batch._params[paramOffset]._uint);
|
||||
GLQuery* glquery = syncGPUObject(*query);
|
||||
if (glquery) {
|
||||
|
|
|
@ -290,7 +290,7 @@ void GLBackend::do_setStateColorWriteMask(uint32 mask) {
|
|||
}
|
||||
|
||||
|
||||
void GLBackend::do_setStateBlendFactor(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_setStateBlendFactor(const Batch& batch, size_t paramOffset) {
|
||||
Vec4 factor(batch._params[paramOffset + 0]._float,
|
||||
batch._params[paramOffset + 1]._float,
|
||||
batch._params[paramOffset + 2]._float,
|
||||
|
@ -300,9 +300,9 @@ void GLBackend::do_setStateBlendFactor(Batch& batch, size_t paramOffset) {
|
|||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_setStateScissorRect(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_setStateScissorRect(const Batch& batch, size_t paramOffset) {
|
||||
Vec4i rect;
|
||||
memcpy(&rect, batch.editData(batch._params[paramOffset]._uint), sizeof(Vec4i));
|
||||
memcpy(&rect, batch.readData(batch._params[paramOffset]._uint), sizeof(Vec4i));
|
||||
|
||||
if (_stereo._enable) {
|
||||
rect.z /= 2;
|
||||
|
|
|
@ -21,7 +21,7 @@ bool GLBackend::isTextureReady(const TexturePointer& texture) {
|
|||
}
|
||||
|
||||
|
||||
void GLBackend::do_generateTextureMips(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_generateTextureMips(const Batch& batch, size_t paramOffset) {
|
||||
TexturePointer resourceTexture = batch._textures.get(batch._params[paramOffset + 0]._uint);
|
||||
if (!resourceTexture) {
|
||||
return;
|
||||
|
|
|
@ -14,22 +14,22 @@ using namespace gpu;
|
|||
using namespace gpu::gl;
|
||||
|
||||
// Transform Stage
|
||||
void GLBackend::do_setModelTransform(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_setModelTransform(const Batch& batch, size_t paramOffset) {
|
||||
}
|
||||
|
||||
void GLBackend::do_setViewTransform(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_setViewTransform(const Batch& batch, size_t paramOffset) {
|
||||
_transform._view = batch._transforms.get(batch._params[paramOffset]._uint);
|
||||
_transform._viewIsCamera = batch._params[paramOffset + 1]._uint != 0;
|
||||
_transform._invalidView = true;
|
||||
}
|
||||
|
||||
void GLBackend::do_setProjectionTransform(Batch& batch, size_t paramOffset) {
|
||||
memcpy(&_transform._projection, batch.editData(batch._params[paramOffset]._uint), sizeof(Mat4));
|
||||
void GLBackend::do_setProjectionTransform(const Batch& batch, size_t paramOffset) {
|
||||
memcpy(&_transform._projection, batch.readData(batch._params[paramOffset]._uint), sizeof(Mat4));
|
||||
_transform._invalidProj = true;
|
||||
}
|
||||
|
||||
void GLBackend::do_setViewportTransform(Batch& batch, size_t paramOffset) {
|
||||
memcpy(&_transform._viewport, batch.editData(batch._params[paramOffset]._uint), sizeof(Vec4i));
|
||||
void GLBackend::do_setViewportTransform(const Batch& batch, size_t paramOffset) {
|
||||
memcpy(&_transform._viewport, batch.readData(batch._params[paramOffset]._uint), sizeof(Vec4i));
|
||||
|
||||
if (!_inRenderTransferPass && !isStereo()) {
|
||||
ivec4& vp = _transform._viewport;
|
||||
|
@ -40,7 +40,7 @@ void GLBackend::do_setViewportTransform(Batch& batch, size_t paramOffset) {
|
|||
_transform._invalidViewport = true;
|
||||
}
|
||||
|
||||
void GLBackend::do_setDepthRangeTransform(Batch& batch, size_t paramOffset) {
|
||||
void GLBackend::do_setDepthRangeTransform(const Batch& batch, size_t paramOffset) {
|
||||
|
||||
Vec2 depthRange(batch._params[paramOffset + 1]._float, batch._params[paramOffset + 0]._float);
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ Q_LOGGING_CATEGORY(gpugl41logging, "hifi.gpu.gl41")
|
|||
using namespace gpu;
|
||||
using namespace gpu::gl41;
|
||||
|
||||
void GL41Backend::do_draw(Batch& batch, size_t paramOffset) {
|
||||
void GL41Backend::do_draw(const Batch& batch, size_t paramOffset) {
|
||||
Primitive primitiveType = (Primitive)batch._params[paramOffset + 2]._uint;
|
||||
GLenum mode = gl::PRIMITIVE_TO_GL[primitiveType];
|
||||
uint32 numVertices = batch._params[paramOffset + 1]._uint;
|
||||
|
@ -43,7 +43,7 @@ void GL41Backend::do_draw(Batch& batch, size_t paramOffset) {
|
|||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GL41Backend::do_drawIndexed(Batch& batch, size_t paramOffset) {
|
||||
void GL41Backend::do_drawIndexed(const Batch& batch, size_t paramOffset) {
|
||||
Primitive primitiveType = (Primitive)batch._params[paramOffset + 2]._uint;
|
||||
GLenum mode = gl::PRIMITIVE_TO_GL[primitiveType];
|
||||
uint32 numIndices = batch._params[paramOffset + 1]._uint;
|
||||
|
@ -72,7 +72,7 @@ void GL41Backend::do_drawIndexed(Batch& batch, size_t paramOffset) {
|
|||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GL41Backend::do_drawInstanced(Batch& batch, size_t paramOffset) {
|
||||
void GL41Backend::do_drawInstanced(const Batch& batch, size_t paramOffset) {
|
||||
GLint numInstances = batch._params[paramOffset + 4]._uint;
|
||||
Primitive primitiveType = (Primitive)batch._params[paramOffset + 3]._uint;
|
||||
GLenum mode = gl::PRIMITIVE_TO_GL[primitiveType];
|
||||
|
@ -108,7 +108,7 @@ void glbackend_glDrawElementsInstancedBaseVertexBaseInstance(GLenum mode, GLsize
|
|||
#endif
|
||||
}
|
||||
|
||||
void GL41Backend::do_drawIndexedInstanced(Batch& batch, size_t paramOffset) {
|
||||
void GL41Backend::do_drawIndexedInstanced(const Batch& batch, size_t paramOffset) {
|
||||
GLint numInstances = batch._params[paramOffset + 4]._uint;
|
||||
GLenum mode = gl::PRIMITIVE_TO_GL[(Primitive)batch._params[paramOffset + 3]._uint];
|
||||
uint32 numIndices = batch._params[paramOffset + 2]._uint;
|
||||
|
@ -143,7 +143,7 @@ void GL41Backend::do_drawIndexedInstanced(Batch& batch, size_t paramOffset) {
|
|||
}
|
||||
|
||||
|
||||
void GL41Backend::do_multiDrawIndirect(Batch& batch, size_t paramOffset) {
|
||||
void GL41Backend::do_multiDrawIndirect(const Batch& batch, size_t paramOffset) {
|
||||
#if (GPU_INPUT_PROFILE == GPU_CORE_43)
|
||||
uint commandCount = batch._params[paramOffset + 0]._uint;
|
||||
GLenum mode = gl::PRIMITIVE_TO_GL[(Primitive)batch._params[paramOffset + 1]._uint];
|
||||
|
@ -159,7 +159,7 @@ void GL41Backend::do_multiDrawIndirect(Batch& batch, size_t paramOffset) {
|
|||
|
||||
}
|
||||
|
||||
void GL41Backend::do_multiDrawIndexedIndirect(Batch& batch, size_t paramOffset) {
|
||||
void GL41Backend::do_multiDrawIndexedIndirect(const Batch& batch, size_t paramOffset) {
|
||||
#if (GPU_INPUT_PROFILE == GPU_CORE_43)
|
||||
uint commandCount = batch._params[paramOffset + 0]._uint;
|
||||
GLenum mode = gl::PRIMITIVE_TO_GL[(Primitive)batch._params[paramOffset + 1]._uint];
|
||||
|
|
|
@ -68,12 +68,12 @@ protected:
|
|||
gl::GLQuery* syncGPUObject(const Query& query) override;
|
||||
|
||||
// Draw Stage
|
||||
void do_draw(Batch& batch, size_t paramOffset) override;
|
||||
void do_drawIndexed(Batch& batch, size_t paramOffset) override;
|
||||
void do_drawInstanced(Batch& batch, size_t paramOffset) override;
|
||||
void do_drawIndexedInstanced(Batch& batch, size_t paramOffset) override;
|
||||
void do_multiDrawIndirect(Batch& batch, size_t paramOffset) override;
|
||||
void do_multiDrawIndexedIndirect(Batch& batch, size_t paramOffset) override;
|
||||
void do_draw(const Batch& batch, size_t paramOffset) override;
|
||||
void do_drawIndexed(const Batch& batch, size_t paramOffset) override;
|
||||
void do_drawInstanced(const Batch& batch, size_t paramOffset) override;
|
||||
void do_drawIndexedInstanced(const Batch& batch, size_t paramOffset) override;
|
||||
void do_multiDrawIndirect(const Batch& batch, size_t paramOffset) override;
|
||||
void do_multiDrawIndexedIndirect(const Batch& batch, size_t paramOffset) override;
|
||||
|
||||
// Input Stage
|
||||
void updateInput() override;
|
||||
|
@ -85,7 +85,7 @@ protected:
|
|||
void resetTransformStage();
|
||||
|
||||
// Output stage
|
||||
void do_blit(Batch& batch, size_t paramOffset) override;
|
||||
void do_blit(const Batch& batch, size_t paramOffset) override;
|
||||
};
|
||||
|
||||
} }
|
||||
|
|
|
@ -127,7 +127,7 @@ GLuint GL41Backend::getFramebufferID(const FramebufferPointer& framebuffer) {
|
|||
return framebuffer ? GL41Framebuffer::getId<GL41Framebuffer>(*this, *framebuffer) : 0;
|
||||
}
|
||||
|
||||
void GL41Backend::do_blit(Batch& batch, size_t paramOffset) {
|
||||
void GL41Backend::do_blit(const Batch& batch, size_t paramOffset) {
|
||||
auto srcframebuffer = batch._framebuffers.get(batch._params[paramOffset]._uint);
|
||||
Vec4i srcvp;
|
||||
for (auto i = 0; i < 4; ++i) {
|
||||
|
|
|
@ -18,7 +18,7 @@ Q_LOGGING_CATEGORY(gpugl45logging, "hifi.gpu.gl45")
|
|||
using namespace gpu;
|
||||
using namespace gpu::gl45;
|
||||
|
||||
void GL45Backend::do_draw(Batch& batch, size_t paramOffset) {
|
||||
void GL45Backend::do_draw(const Batch& batch, size_t paramOffset) {
|
||||
Primitive primitiveType = (Primitive)batch._params[paramOffset + 2]._uint;
|
||||
GLenum mode = gl::PRIMITIVE_TO_GL[primitiveType];
|
||||
uint32 numVertices = batch._params[paramOffset + 1]._uint;
|
||||
|
@ -43,7 +43,7 @@ void GL45Backend::do_draw(Batch& batch, size_t paramOffset) {
|
|||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GL45Backend::do_drawIndexed(Batch& batch, size_t paramOffset) {
|
||||
void GL45Backend::do_drawIndexed(const Batch& batch, size_t paramOffset) {
|
||||
Primitive primitiveType = (Primitive)batch._params[paramOffset + 2]._uint;
|
||||
GLenum mode = gl::PRIMITIVE_TO_GL[primitiveType];
|
||||
uint32 numIndices = batch._params[paramOffset + 1]._uint;
|
||||
|
@ -72,7 +72,7 @@ void GL45Backend::do_drawIndexed(Batch& batch, size_t paramOffset) {
|
|||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GL45Backend::do_drawInstanced(Batch& batch, size_t paramOffset) {
|
||||
void GL45Backend::do_drawInstanced(const Batch& batch, size_t paramOffset) {
|
||||
GLint numInstances = batch._params[paramOffset + 4]._uint;
|
||||
Primitive primitiveType = (Primitive)batch._params[paramOffset + 3]._uint;
|
||||
GLenum mode = gl::PRIMITIVE_TO_GL[primitiveType];
|
||||
|
@ -100,7 +100,7 @@ void GL45Backend::do_drawInstanced(Batch& batch, size_t paramOffset) {
|
|||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GL45Backend::do_drawIndexedInstanced(Batch& batch, size_t paramOffset) {
|
||||
void GL45Backend::do_drawIndexedInstanced(const Batch& batch, size_t paramOffset) {
|
||||
GLint numInstances = batch._params[paramOffset + 4]._uint;
|
||||
GLenum mode = gl::PRIMITIVE_TO_GL[(Primitive)batch._params[paramOffset + 3]._uint];
|
||||
uint32 numIndices = batch._params[paramOffset + 2]._uint;
|
||||
|
@ -129,7 +129,7 @@ void GL45Backend::do_drawIndexedInstanced(Batch& batch, size_t paramOffset) {
|
|||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GL45Backend::do_multiDrawIndirect(Batch& batch, size_t paramOffset) {
|
||||
void GL45Backend::do_multiDrawIndirect(const Batch& batch, size_t paramOffset) {
|
||||
uint commandCount = batch._params[paramOffset + 0]._uint;
|
||||
GLenum mode = gl::PRIMITIVE_TO_GL[(Primitive)batch._params[paramOffset + 1]._uint];
|
||||
glMultiDrawArraysIndirect(mode, reinterpret_cast<GLvoid*>(_input._indirectBufferOffset), commandCount, (GLsizei)_input._indirectBufferStride);
|
||||
|
@ -138,7 +138,7 @@ void GL45Backend::do_multiDrawIndirect(Batch& batch, size_t paramOffset) {
|
|||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GL45Backend::do_multiDrawIndexedIndirect(Batch& batch, size_t paramOffset) {
|
||||
void GL45Backend::do_multiDrawIndexedIndirect(const Batch& batch, size_t paramOffset) {
|
||||
uint commandCount = batch._params[paramOffset + 0]._uint;
|
||||
GLenum mode = gl::PRIMITIVE_TO_GL[(Primitive)batch._params[paramOffset + 1]._uint];
|
||||
GLenum indexType = gl::ELEMENT_TYPE_TO_GL[_input._indexBufferType];
|
||||
|
|
|
@ -57,12 +57,12 @@ protected:
|
|||
gl::GLQuery* syncGPUObject(const Query& query) override;
|
||||
|
||||
// Draw Stage
|
||||
void do_draw(Batch& batch, size_t paramOffset) override;
|
||||
void do_drawIndexed(Batch& batch, size_t paramOffset) override;
|
||||
void do_drawInstanced(Batch& batch, size_t paramOffset) override;
|
||||
void do_drawIndexedInstanced(Batch& batch, size_t paramOffset) override;
|
||||
void do_multiDrawIndirect(Batch& batch, size_t paramOffset) override;
|
||||
void do_multiDrawIndexedIndirect(Batch& batch, size_t paramOffset) override;
|
||||
void do_draw(const Batch& batch, size_t paramOffset) override;
|
||||
void do_drawIndexed(const Batch& batch, size_t paramOffset) override;
|
||||
void do_drawInstanced(const Batch& batch, size_t paramOffset) override;
|
||||
void do_drawIndexedInstanced(const Batch& batch, size_t paramOffset) override;
|
||||
void do_multiDrawIndirect(const Batch& batch, size_t paramOffset) override;
|
||||
void do_multiDrawIndexedIndirect(const Batch& batch, size_t paramOffset) override;
|
||||
|
||||
// Input Stage
|
||||
void updateInput() override;
|
||||
|
@ -74,7 +74,7 @@ protected:
|
|||
void resetTransformStage();
|
||||
|
||||
// Output stage
|
||||
void do_blit(Batch& batch, size_t paramOffset) override;
|
||||
void do_blit(const Batch& batch, size_t paramOffset) override;
|
||||
};
|
||||
|
||||
} }
|
||||
|
|
|
@ -119,7 +119,7 @@ GLuint GL45Backend::getFramebufferID(const FramebufferPointer& framebuffer) {
|
|||
return framebuffer ? gl::GLFramebuffer::getId<GL45Framebuffer>(*this, *framebuffer) : 0;
|
||||
}
|
||||
|
||||
void GL45Backend::do_blit(Batch& batch, size_t paramOffset) {
|
||||
void GL45Backend::do_blit(const Batch& batch, size_t paramOffset) {
|
||||
auto srcframebuffer = batch._framebuffers.get(batch._params[paramOffset]._uint);
|
||||
Vec4i srcvp;
|
||||
for (auto i = 0; i < 4; ++i) {
|
||||
|
|
|
@ -492,17 +492,6 @@ void Batch::captureNamedDrawCallInfo(std::string name) {
|
|||
std::swap(_currentNamedCall, name); // Restore _currentNamedCall
|
||||
}
|
||||
|
||||
void Batch::preExecute() {
|
||||
for (auto& mapItem : _namedData) {
|
||||
auto& name = mapItem.first;
|
||||
auto& instance = mapItem.second;
|
||||
|
||||
startNamedCall(name);
|
||||
instance.process(*this);
|
||||
stopNamedCall();
|
||||
}
|
||||
}
|
||||
|
||||
// Debugging
|
||||
void Batch::pushProfileRange(const char* name) {
|
||||
#if defined(NSIGHT_FOUND)
|
||||
|
@ -630,7 +619,16 @@ void Batch::_glColor4f(float red, float green, float blue, float alpha) {
|
|||
_params.emplace_back(red);
|
||||
}
|
||||
|
||||
void Batch::finish(BufferUpdates& updates) {
|
||||
void Batch::finishFrame(BufferUpdates& updates) {
|
||||
for (auto& mapItem : _namedData) {
|
||||
auto& name = mapItem.first;
|
||||
auto& instance = mapItem.second;
|
||||
|
||||
startNamedCall(name);
|
||||
instance.process(*this);
|
||||
stopNamedCall();
|
||||
}
|
||||
|
||||
for (auto& namedCallData : _namedData) {
|
||||
for (auto& buffer : namedCallData.second.buffers) {
|
||||
if (!buffer || !buffer->isDirty()) {
|
||||
|
@ -650,6 +648,16 @@ void Batch::finish(BufferUpdates& updates) {
|
|||
}
|
||||
|
||||
void Batch::flush() {
|
||||
for (auto& mapItem : _namedData) {
|
||||
auto& name = mapItem.first;
|
||||
auto& instance = mapItem.second;
|
||||
|
||||
auto& self = const_cast<Batch&>(*this);
|
||||
self.startNamedCall(name);
|
||||
instance.process(self);
|
||||
self.stopNamedCall();
|
||||
}
|
||||
|
||||
for (auto& namedCallData : _namedData) {
|
||||
for (auto& buffer : namedCallData.second.buffers) {
|
||||
if (!buffer) {
|
||||
|
|
|
@ -89,7 +89,7 @@ public:
|
|||
DrawCallInfoBuffer _drawCallInfos;
|
||||
static size_t _drawCallInfosMax;
|
||||
|
||||
std::string _currentNamedCall;
|
||||
mutable std::string _currentNamedCall;
|
||||
|
||||
const DrawCallInfoBuffer& getDrawCallInfoBuffer() const;
|
||||
DrawCallInfoBuffer& getDrawCallInfoBuffer();
|
||||
|
@ -103,14 +103,6 @@ public:
|
|||
|
||||
void clear();
|
||||
|
||||
// Call on the main thread to prepare for passing to the render thread
|
||||
void finish(BufferUpdates& updates);
|
||||
|
||||
// Call on the rendering thread for batches that only exist there
|
||||
void flush();
|
||||
|
||||
void preExecute();
|
||||
|
||||
// Batches may need to override the context level stereo settings
|
||||
// if they're performing framebuffer copy operations, like the
|
||||
// deferred lighting resolution mechanism
|
||||
|
@ -401,7 +393,7 @@ public:
|
|||
return offset;
|
||||
}
|
||||
|
||||
Data get(uint32 offset) {
|
||||
Data get(uint32 offset) const {
|
||||
if (offset >= _items.size()) {
|
||||
return Data();
|
||||
}
|
||||
|
@ -436,6 +428,13 @@ public:
|
|||
return (_data.data() + offset);
|
||||
}
|
||||
|
||||
const Byte* readData(size_t offset) const {
|
||||
if (offset >= _data.size()) {
|
||||
return 0;
|
||||
}
|
||||
return (_data.data() + offset);
|
||||
}
|
||||
|
||||
Commands _commands;
|
||||
static size_t _commandsMax;
|
||||
|
||||
|
@ -478,6 +477,18 @@ public:
|
|||
bool _enableSkybox{ false };
|
||||
|
||||
protected:
|
||||
friend class Context;
|
||||
friend class Frame;
|
||||
|
||||
// Apply all the named calls to the end of the batch
|
||||
// and prepare updates for the render shadow copies of the buffers
|
||||
void finishFrame(BufferUpdates& updates);
|
||||
|
||||
// Directly copy from the main data to the render thread shadow copy
|
||||
// MUST only be called on the render thread
|
||||
// MUST only be called on batches created on the render thread
|
||||
void flush();
|
||||
|
||||
void startNamedCall(const std::string& name);
|
||||
void stopNamedCall();
|
||||
|
||||
|
|
|
@ -141,7 +141,7 @@ void Buffer::applyUpdate(const Update& update) {
|
|||
update.apply();
|
||||
}
|
||||
|
||||
void Buffer::flush() {
|
||||
void Buffer::flush() const {
|
||||
++_getUpdateCount;
|
||||
++_applyUpdateCount;
|
||||
_renderPages = _pages;
|
||||
|
|
|
@ -127,7 +127,7 @@ public:
|
|||
Update getUpdate() const;
|
||||
|
||||
// For use by the render thread to avoid the intermediate step of getUpdate/applyUpdate
|
||||
void flush();
|
||||
void flush() const;
|
||||
|
||||
// FIXME don't maintain a second buffer continuously. We should be able to apply updates
|
||||
// directly to the GL object and discard _renderSysmem and _renderPages
|
||||
|
|
|
@ -35,7 +35,7 @@ void Context::beginFrame(const glm::mat4& renderPose) {
|
|||
_currentFrame->pose = renderPose;
|
||||
}
|
||||
|
||||
void Context::append(Batch& batch) {
|
||||
void Context::appendFrameBatch(Batch& batch) {
|
||||
if (!_frameActive) {
|
||||
qWarning() << "Batch executed outside of frame boundaries";
|
||||
return;
|
||||
|
@ -54,6 +54,30 @@ FramePointer Context::endFrame() {
|
|||
return result;
|
||||
}
|
||||
|
||||
void Context::executeBatch(Batch& batch) const {
|
||||
batch.flush();
|
||||
_backend->render(batch);
|
||||
}
|
||||
|
||||
void Context::recycle() const {
|
||||
_backend->recycle();
|
||||
}
|
||||
|
||||
void Context::consumeFrameUpdates(const FramePointer& frame) const {
|
||||
frame->preRender();
|
||||
}
|
||||
|
||||
void Context::executeFrame(const FramePointer& frame) const {
|
||||
// FIXME? probably not necessary, but safe
|
||||
consumeFrameUpdates(frame);
|
||||
_backend->setStereoState(frame->stereoState);
|
||||
{
|
||||
// Execute the frame rendering commands
|
||||
for (auto& batch : frame->batches) {
|
||||
_backend->render(batch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool Context::makeProgram(Shader& shader, const Shader::BindingSet& bindings) {
|
||||
if (shader.isProgram() && _makeProgramCallback) {
|
||||
|
|
|
@ -53,9 +53,9 @@ public:
|
|||
|
||||
void setStereoState(const StereoState& stereo) { _stereo = stereo; }
|
||||
|
||||
virtual void render(Batch& batch) = 0;
|
||||
virtual void render(const Batch& batch) = 0;
|
||||
virtual void syncCache() = 0;
|
||||
virtual void cleanupTrash() const = 0;
|
||||
virtual void recycle() const = 0;
|
||||
virtual void downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) = 0;
|
||||
|
||||
// UBO class... layout MUST match the layout in Transform.slh
|
||||
|
@ -142,9 +142,44 @@ public:
|
|||
~Context();
|
||||
|
||||
void beginFrame(const glm::mat4& renderPose = glm::mat4());
|
||||
void append(Batch& batch);
|
||||
void appendFrameBatch(Batch& batch);
|
||||
FramePointer endFrame();
|
||||
|
||||
// MUST only be called on the rendering thread
|
||||
//
|
||||
// Handle any pending operations to clean up (recycle / deallocate) resources no longer in use
|
||||
void recycle() const;
|
||||
|
||||
// MUST only be called on the rendering thread
|
||||
//
|
||||
// Execute a batch immediately, rather than as part of a frame
|
||||
void executeBatch(Batch& batch) const;
|
||||
|
||||
// MUST only be called on the rendering thread
|
||||
//
|
||||
// Executes a frame, applying any updates contained in the frame batches to the rendering
|
||||
// thread shadow copies. Either executeFrame or consumeFrameUpdates MUST be called on every frame
|
||||
// generated, IN THE ORDER they were generated.
|
||||
void executeFrame(const FramePointer& frame) const;
|
||||
|
||||
// MUST only be called on the rendering thread.
|
||||
//
|
||||
// Consuming a frame applies any updates queued from the recording thread and applies them to the
|
||||
// shadow copy used by the rendering thread.
|
||||
//
|
||||
// EVERY frame generated MUST be consumed, regardless of whether the frame is actually executed,
|
||||
// or the buffer shadow copies can become unsynced from the recording thread copies.
|
||||
//
|
||||
// Consuming a frame is idempotent, as the frame encapsulates the updates and clears them out as
|
||||
// it applies them, so calling it more than once on a given frame will have no effect after the
|
||||
// first time
|
||||
//
|
||||
//
|
||||
// This is automatically called by executeFrame, so you only need to call it if you
|
||||
// have frames you aren't going to otherwise execute, for instance when a display plugin is
|
||||
// being disabled, or in the null display plugin where no rendering actually occurs
|
||||
void consumeFrameUpdates(const FramePointer& frame) const;
|
||||
|
||||
const BackendPointer& getBackend() const { return _backend; }
|
||||
|
||||
void enableStereo(bool enable = true);
|
||||
|
@ -220,7 +255,7 @@ template<typename F>
|
|||
void doInBatch(std::shared_ptr<gpu::Context> context, F f) {
|
||||
gpu::Batch batch;
|
||||
f(batch);
|
||||
context->append(batch);
|
||||
context->appendFrameBatch(batch);
|
||||
}
|
||||
|
||||
};
|
||||
|
|
|
@ -25,7 +25,7 @@ Frame::~Frame() {
|
|||
|
||||
void Frame::finish() {
|
||||
for (Batch& batch : batches) {
|
||||
batch.finish(bufferUpdates);
|
||||
batch.finishFrame(bufferUpdates);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,15 +17,15 @@
|
|||
namespace gpu {
|
||||
|
||||
class Frame {
|
||||
friend class Context;
|
||||
|
||||
public:
|
||||
virtual ~Frame();
|
||||
|
||||
using Batches = std::vector<Batch>;
|
||||
using FramebufferRecycler = std::function<void(const FramebufferPointer&)>;
|
||||
using OverlayRecycler = std::function<void(const TexturePointer&)>;
|
||||
|
||||
virtual ~Frame();
|
||||
void finish();
|
||||
void preRender();
|
||||
|
||||
StereoState stereoState;
|
||||
uint32_t frameIndex{ 0 };
|
||||
/// The sensor pose used for rendering the frame, only applicable for HMDs
|
||||
|
@ -38,9 +38,13 @@ namespace gpu {
|
|||
FramebufferPointer framebuffer;
|
||||
/// The destination texture containing the 2D overlay
|
||||
TexturePointer overlay;
|
||||
|
||||
/// How to process the framebuffer when the frame dies. MUST BE THREAD SAFE
|
||||
FramebufferRecycler framebufferRecycler;
|
||||
|
||||
protected:
|
||||
// Should be called once per frame, on the recording thred
|
||||
void finish();
|
||||
void preRender();
|
||||
};
|
||||
|
||||
};
|
||||
|
|
|
@ -36,7 +36,7 @@ protected:
|
|||
public:
|
||||
~Backend() { }
|
||||
|
||||
void render(Batch& batch) final { }
|
||||
void render(const Batch& batch) final { }
|
||||
|
||||
// This call synchronize the Full Backend cache with the current GLState
|
||||
// THis is only intended to be used when mixing raw gl calls with the gpu api usage in order to sync
|
||||
|
|
Loading…
Reference in a new issue