mirror of
https://github.com/overte-org/overte.git
synced 2025-04-23 15:13:41 +02:00
First take oon the query for the gpu
This commit is contained in:
parent
3842e748b5
commit
7e59be6196
11 changed files with 250 additions and 2 deletions
|
@ -213,3 +213,20 @@ void Batch::setFramebuffer(const FramebufferPointer& framebuffer) {
|
|||
|
||||
}
|
||||
|
||||
void Batch::beginQuery(const QueryPointer& query) {
|
||||
ADD_COMMAND(beginQuery);
|
||||
|
||||
_params.push_back(_queries.cache(query));
|
||||
}
|
||||
|
||||
void Batch::endQuery(const QueryPointer& query) {
|
||||
ADD_COMMAND(endQuery);
|
||||
|
||||
_params.push_back(_queries.cache(query));
|
||||
}
|
||||
|
||||
void Batch::getQuery(const QueryPointer& query) {
|
||||
ADD_COMMAND(getQuery);
|
||||
|
||||
_params.push_back(_queries.cache(query));
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
#include <vector>
|
||||
|
||||
#include "Query.h"
|
||||
#include "Stream.h"
|
||||
#include "Texture.h"
|
||||
|
||||
|
@ -121,6 +122,11 @@ public:
|
|||
// Framebuffer Stage
|
||||
void setFramebuffer(const FramebufferPointer& framebuffer);
|
||||
|
||||
// Query Section
|
||||
void beginQuery(const QueryPointer& query);
|
||||
void endQuery(const QueryPointer& query);
|
||||
void getQuery(const QueryPointer& query);
|
||||
|
||||
// TODO: As long as we have gl calls explicitely issued from interface
|
||||
// code, we need to be able to record and batch these calls. THe long
|
||||
// term strategy is to get rid of any GL calls in favor of the HIFI GPU API
|
||||
|
@ -189,6 +195,10 @@ public:
|
|||
|
||||
COMMAND_setFramebuffer,
|
||||
|
||||
COMMAND_beginQuery,
|
||||
COMMAND_endQuery,
|
||||
COMMAND_getQuery,
|
||||
|
||||
// TODO: As long as we have gl calls explicitely issued from interface
|
||||
// code, we need to be able to record and batch these calls. THe long
|
||||
// term strategy is to get rid of any GL calls in favor of the HIFI GPU API
|
||||
|
@ -292,6 +302,7 @@ public:
|
|||
typedef Cache<Transform>::Vector TransformCaches;
|
||||
typedef Cache<PipelinePointer>::Vector PipelineCaches;
|
||||
typedef Cache<FramebufferPointer>::Vector FramebufferCaches;
|
||||
typedef Cache<QueryPointer>::Vector QueryCaches;
|
||||
|
||||
// Cache Data in a byte array if too big to fit in Param
|
||||
// FOr example Mat4s are going there
|
||||
|
@ -316,6 +327,7 @@ public:
|
|||
TransformCaches _transforms;
|
||||
PipelineCaches _pipelines;
|
||||
FramebufferCaches _framebuffers;
|
||||
QueryCaches _queries;
|
||||
|
||||
protected:
|
||||
};
|
||||
|
|
|
@ -99,6 +99,15 @@ public:
|
|||
return reinterpret_cast<T*>(framebuffer.getGPUObject());
|
||||
}
|
||||
|
||||
template< typename T >
|
||||
static void setGPUObject(const Query& query, T* object) {
|
||||
query.setGPUObject(object);
|
||||
}
|
||||
template< typename T >
|
||||
static T* getGPUObject(const Query& query) {
|
||||
return reinterpret_cast<T*>(query.getGPUObject());
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
};
|
||||
|
|
|
@ -39,6 +39,9 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
|
|||
|
||||
(&::gpu::GLBackend::do_setFramebuffer),
|
||||
|
||||
(&::gpu::GLBackend::do_beginQuery),
|
||||
(&::gpu::GLBackend::do_endQuery),
|
||||
(&::gpu::GLBackend::do_getQuery),
|
||||
|
||||
(&::gpu::GLBackend::do_glEnable),
|
||||
(&::gpu::GLBackend::do_glDisable),
|
||||
|
@ -243,7 +246,6 @@ void GLBackend::do_clearFramebuffer(Batch& batch, uint32 paramOffset) {
|
|||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
|
||||
// TODO: As long as we have gl calls explicitely issued from interface
|
||||
// code, we need to be able to record and batch these calls. THe long
|
||||
// term strategy is to get rid of any GL calls in favor of the HIFI GPU API
|
||||
|
|
|
@ -188,6 +188,18 @@ public:
|
|||
static GLFramebuffer* syncGPUObject(const Framebuffer& framebuffer);
|
||||
static GLuint getFramebufferID(const FramebufferPointer& framebuffer);
|
||||
|
||||
class GLQuery : public GPUObject {
|
||||
public:
|
||||
GLuint _qo = 0;
|
||||
GLuint64 _result = 0;
|
||||
|
||||
GLQuery();
|
||||
~GLQuery();
|
||||
};
|
||||
static GLQuery* syncGPUObject(const Query& query);
|
||||
static GLuint getQueryID(const QueryPointer& query);
|
||||
|
||||
|
||||
static const int MAX_NUM_ATTRIBUTES = Stream::NUM_INPUT_SLOTS;
|
||||
static const int MAX_NUM_INPUT_BUFFERS = 16;
|
||||
|
||||
|
@ -367,6 +379,11 @@ protected:
|
|||
OutputStageState() {}
|
||||
} _output;
|
||||
|
||||
// Query section
|
||||
void do_beginQuery(Batch& batch, uint32 paramOffset);
|
||||
void do_endQuery(Batch& batch, uint32 paramOffset);
|
||||
void do_getQuery(Batch& batch, uint32 paramOffset);
|
||||
|
||||
// TODO: As long as we have gl calls explicitely issued from interface
|
||||
// code, we need to be able to record and batch these calls. THe long
|
||||
// term strategy is to get rid of any GL calls in favor of the HIFI GPU API
|
||||
|
|
|
@ -167,4 +167,3 @@ void GLBackend::do_setFramebuffer(Batch& batch, uint32 paramOffset) {
|
|||
_output._framebuffer = framebuffer;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
93
libraries/gpu/src/gpu/GLBackendQuery.cpp
Normal file
93
libraries/gpu/src/gpu/GLBackendQuery.cpp
Normal file
|
@ -0,0 +1,93 @@
|
|||
//
|
||||
// GLBackendQuery.cpp
|
||||
// libraries/gpu/src/gpu
|
||||
//
|
||||
// Created by Sam Gateau on 7/7/2015.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GPULogging.h"
|
||||
#include "GLBackendShared.h"
|
||||
|
||||
using namespace gpu;
|
||||
|
||||
GLBackend::GLQuery::GLQuery() {}
|
||||
|
||||
GLBackend::GLQuery::~GLQuery() {
|
||||
if (_qo != 0) {
|
||||
glDeleteQueries(1, &_qo);
|
||||
}
|
||||
}
|
||||
|
||||
GLBackend::GLQuery* GLBackend::syncGPUObject(const Query& query) {
|
||||
GLQuery* object = Backend::getGPUObject<GLBackend::GLQuery>(query);
|
||||
|
||||
// If GPU object already created and in sync
|
||||
if (object) {
|
||||
return object;
|
||||
}
|
||||
|
||||
// need to have a gpu object?
|
||||
if (!object) {
|
||||
GLuint qo;
|
||||
glGenQueries(1, &qo);
|
||||
(void) CHECK_GL_ERROR();
|
||||
GLuint64 result = -1;
|
||||
|
||||
// All is green, assign the gpuobject to the Query
|
||||
object = new GLQuery();
|
||||
object->_qo = qo;
|
||||
object->_result = result;
|
||||
Backend::setGPUObject(query, object);
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
|
||||
|
||||
GLuint GLBackend::getQueryID(const QueryPointer& query) {
|
||||
if (!query) {
|
||||
return 0;
|
||||
}
|
||||
GLQuery* object = GLBackend::syncGPUObject(*query);
|
||||
if (object) {
|
||||
return object->_qo;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void GLBackend::do_beginQuery(Batch& batch, uint32 paramOffset) {
|
||||
auto& query = batch._queries.get(batch._params[paramOffset]._uint);
|
||||
GLQuery* glquery = syncGPUObject(*query);
|
||||
if (glquery) {
|
||||
glBeginQuery(GL_TIME_ELAPSED, glquery->_qo);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
|
||||
void GLBackend::do_endQuery(Batch& batch, uint32 paramOffset) {
|
||||
auto& query = batch._queries.get(batch._params[paramOffset]._uint);
|
||||
GLQuery* glquery = syncGPUObject(*query);
|
||||
if (glquery) {
|
||||
glEndQuery(GL_TIME_ELAPSED);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
|
||||
void GLBackend::do_getQuery(Batch& batch, uint32 paramOffset) {
|
||||
auto& query = batch._queries.get(batch._params[paramOffset]._uint);
|
||||
GLQuery* glquery = syncGPUObject(*query);
|
||||
if (glquery) {
|
||||
GLint available = 0;
|
||||
while (!available) {
|
||||
glGetQueryObjectiv(glquery->_qo, GL_QUERY_RESULT_AVAILABLE, &available);
|
||||
}
|
||||
|
||||
glGetQueryObjectui64v(glquery->_qo, GL_QUERY_RESULT, &glquery->_result);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
27
libraries/gpu/src/gpu/Query.cpp
Normal file
27
libraries/gpu/src/gpu/Query.cpp
Normal file
|
@ -0,0 +1,27 @@
|
|||
//
|
||||
// Query.cpp
|
||||
// interface/src/gpu
|
||||
//
|
||||
// Created by Niraj Venkat on 7/7/2015.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "Query.h"
|
||||
|
||||
#include <QDebug>
|
||||
|
||||
using namespace gpu;
|
||||
|
||||
Query::Query()
|
||||
{
|
||||
}
|
||||
|
||||
Query::~Query()
|
||||
{
|
||||
}
|
||||
|
||||
double Query::getElapsedTime() {
|
||||
return 0.0;
|
||||
}
|
45
libraries/gpu/src/gpu/Query.h
Normal file
45
libraries/gpu/src/gpu/Query.h
Normal file
|
@ -0,0 +1,45 @@
|
|||
//
|
||||
// Query.h
|
||||
// interface/src/gpu
|
||||
//
|
||||
// Created by Niraj Venkat on 7/7/2015.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#ifndef hifi_gpu_Query_h
|
||||
#define hifi_gpu_Query_h
|
||||
|
||||
#include <assert.h>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "GPUConfig.h"
|
||||
|
||||
#include "Format.h"
|
||||
|
||||
namespace gpu {
|
||||
|
||||
class Query {
|
||||
public:
|
||||
Query();
|
||||
~Query();
|
||||
|
||||
uint32 queryResult;
|
||||
|
||||
double getElapsedTime();
|
||||
|
||||
protected:
|
||||
|
||||
// This shouldn't be used by anything else than the Backend class with the proper casting.
|
||||
mutable GPUObject* _gpuObject = NULL;
|
||||
void setGPUObject(GPUObject* gpuObject) const { _gpuObject = gpuObject; }
|
||||
GPUObject* getGPUObject() const { return _gpuObject; }
|
||||
friend class Backend;
|
||||
};
|
||||
|
||||
typedef std::shared_ptr<Query> QueryPointer;
|
||||
typedef std::vector< QueryPointer > Queries;
|
||||
};
|
||||
|
||||
#endif
|
|
@ -75,6 +75,12 @@ RenderDeferredTask::RenderDeferredTask() : Task() {
|
|||
|
||||
_jobs.push_back(Job(new DrawOverlay3D::JobModel("DrawOverlay3D")));
|
||||
_jobs.push_back(Job(new ResetGLState::JobModel()));
|
||||
|
||||
// Give ourselves 3 frmaes of timer queries
|
||||
_timerQueries.push_back(gpu::QueryPointer(new gpu::Query()));
|
||||
_timerQueries.push_back(gpu::QueryPointer(new gpu::Query()));
|
||||
_timerQueries.push_back(gpu::QueryPointer(new gpu::Query()));
|
||||
_currentTimerQueryIndex = 0;
|
||||
}
|
||||
|
||||
RenderDeferredTask::~RenderDeferredTask() {
|
||||
|
@ -98,9 +104,27 @@ void RenderDeferredTask::run(const SceneContextPointer& sceneContext, const Rend
|
|||
|
||||
renderContext->args->_context->syncCache();
|
||||
|
||||
// start the current timer query
|
||||
auto& currentQuery = _timerQueries[_currentTimerQueryIndex];
|
||||
{
|
||||
gpu::Batch batch;
|
||||
batch.beginQuery(currentQuery);
|
||||
renderContext->args->_context->render(batch);
|
||||
}
|
||||
|
||||
for (auto job : _jobs) {
|
||||
job.run(sceneContext, renderContext);
|
||||
}
|
||||
|
||||
// End the current timer query
|
||||
{
|
||||
gpu::Batch batch;
|
||||
batch.endQuery(currentQuery);
|
||||
batch.getQuery(currentQuery);
|
||||
renderContext->args->_context->render(batch);
|
||||
(_currentTimerQueryIndex++);
|
||||
_currentTimerQueryIndex = _currentTimerQueryIndex% _timerQueries.size();
|
||||
}
|
||||
};
|
||||
|
||||
void DrawOpaqueDeferred::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const ItemIDsBounds& inItems) {
|
||||
|
|
|
@ -77,6 +77,9 @@ public:
|
|||
|
||||
virtual void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext);
|
||||
|
||||
|
||||
gpu::Queries _timerQueries;
|
||||
int _currentTimerQueryIndex = 0;
|
||||
};
|
||||
|
||||
|
||||
|
|
Loading…
Reference in a new issue