mirror of
https://github.com/JulianGro/overte.git
synced 2025-04-25 17:14:59 +02:00
Merge pull request #11499 from samcake/blue
Moving Camera update and render frame to Game Loop (Main thread)
This commit is contained in:
commit
25dae2f24e
10 changed files with 524 additions and 428 deletions
|
@ -189,7 +189,6 @@
|
|||
#include "InterfaceParentFinder.h"
|
||||
#include "ui/OctreeStatsProvider.h"
|
||||
|
||||
#include "FrameTimingsScriptingInterface.h"
|
||||
#include <GPUIdent.h>
|
||||
#include <gl/GLHelpers.h>
|
||||
#include <src/scripting/LimitlessVoiceRecognitionScriptingInterface.h>
|
||||
|
@ -2023,7 +2022,8 @@ void Application::cleanupBeforeQuit() {
|
|||
// The cleanup process enqueues the transactions but does not process them. Calling this here will force the actual
|
||||
// removal of the items.
|
||||
// See https://highfidelity.fogbugz.com/f/cases/5328
|
||||
_main3DScene->processTransactionQueue();
|
||||
_main3DScene->enqueueFrame(); // flush all the transactions
|
||||
_main3DScene->processTransactionQueue(); // process and apply deletions
|
||||
|
||||
// first stop all timers directly or by invokeMethod
|
||||
// depending on what thread they run in
|
||||
|
@ -2221,8 +2221,6 @@ void Application::initializeGL() {
|
|||
update(0);
|
||||
}
|
||||
|
||||
FrameTimingsScriptingInterface _frameTimingsScriptingInterface;
|
||||
|
||||
extern void setupPreferences();
|
||||
|
||||
void Application::initializeUi() {
|
||||
|
@ -2383,301 +2381,105 @@ void Application::initializeUi() {
|
|||
offscreenSurfaceCache->reserve(Web3DOverlay::QML, 2);
|
||||
}
|
||||
|
||||
void Application::paintGL() {
|
||||
// Some plugins process message events, allowing paintGL to be called reentrantly.
|
||||
if (_aboutToQuit || _window->isMinimized()) {
|
||||
return;
|
||||
}
|
||||
|
||||
_renderFrameCount++;
|
||||
_lastTimeRendered.start();
|
||||
|
||||
auto lastPaintBegin = usecTimestampNow();
|
||||
PROFILE_RANGE_EX(render, __FUNCTION__, 0xff0000ff, (uint64_t)_renderFrameCount);
|
||||
PerformanceTimer perfTimer("paintGL");
|
||||
|
||||
if (nullptr == _displayPlugin) {
|
||||
return;
|
||||
}
|
||||
|
||||
DisplayPluginPointer displayPlugin;
|
||||
{
|
||||
PROFILE_RANGE(render, "/getActiveDisplayPlugin");
|
||||
displayPlugin = getActiveDisplayPlugin();
|
||||
}
|
||||
|
||||
{
|
||||
PROFILE_RANGE(render, "/pluginBeginFrameRender");
|
||||
// If a display plugin loses it's underlying support, it
|
||||
// needs to be able to signal us to not use it
|
||||
if (!displayPlugin->beginFrameRender(_renderFrameCount)) {
|
||||
updateDisplayMode();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// update the avatar with a fresh HMD pose
|
||||
{
|
||||
PROFILE_RANGE(render, "/updateAvatar");
|
||||
getMyAvatar()->updateFromHMDSensorMatrix(getHMDSensorPose());
|
||||
}
|
||||
|
||||
auto lodManager = DependencyManager::get<LODManager>();
|
||||
|
||||
RenderArgs renderArgs;
|
||||
|
||||
float sensorToWorldScale = getMyAvatar()->getSensorToWorldScale();
|
||||
{
|
||||
PROFILE_RANGE(render, "/buildFrustrumAndArgs");
|
||||
{
|
||||
QMutexLocker viewLocker(&_viewMutex);
|
||||
// adjust near clip plane to account for sensor scaling.
|
||||
auto adjustedProjection = glm::perspective(_viewFrustum.getFieldOfView(),
|
||||
_viewFrustum.getAspectRatio(),
|
||||
DEFAULT_NEAR_CLIP * sensorToWorldScale,
|
||||
_viewFrustum.getFarClip());
|
||||
_viewFrustum.setProjection(adjustedProjection);
|
||||
_viewFrustum.calculate();
|
||||
}
|
||||
renderArgs = RenderArgs(_gpuContext, lodManager->getOctreeSizeScale(),
|
||||
lodManager->getBoundaryLevelAdjust(), RenderArgs::DEFAULT_RENDER_MODE,
|
||||
RenderArgs::MONO, RenderArgs::RENDER_DEBUG_NONE);
|
||||
{
|
||||
QMutexLocker viewLocker(&_viewMutex);
|
||||
renderArgs.setViewFrustum(_viewFrustum);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
PROFILE_RANGE(render, "/resizeGL");
|
||||
PerformanceWarning::setSuppressShortTimings(Menu::getInstance()->isOptionChecked(MenuOption::SuppressShortTimings));
|
||||
bool showWarnings = Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings);
|
||||
PerformanceWarning warn(showWarnings, "Application::paintGL()");
|
||||
resizeGL();
|
||||
}
|
||||
|
||||
{
|
||||
PROFILE_RANGE(render, "/gpuContextReset");
|
||||
_gpuContext->beginFrame(getHMDSensorPose());
|
||||
// Reset the gpu::Context Stages
|
||||
// Back to the default framebuffer;
|
||||
gpu::doInBatch(_gpuContext, [&](gpu::Batch& batch) {
|
||||
batch.resetStages();
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
PROFILE_RANGE(render, "/renderOverlay");
|
||||
PerformanceTimer perfTimer("renderOverlay");
|
||||
// NOTE: There is no batch associated with this renderArgs
|
||||
// the ApplicationOverlay class assumes it's viewport is setup to be the device size
|
||||
QSize size = getDeviceSize();
|
||||
renderArgs._viewport = glm::ivec4(0, 0, size.width(), size.height());
|
||||
_applicationOverlay.renderOverlay(&renderArgs);
|
||||
}
|
||||
void Application::updateCamera(RenderArgs& renderArgs) {
|
||||
PROFILE_RANGE(render, "/updateCamera");
|
||||
PerformanceTimer perfTimer("CameraUpdates");
|
||||
|
||||
glm::vec3 boomOffset;
|
||||
{
|
||||
PROFILE_RANGE(render, "/updateCamera");
|
||||
{
|
||||
PerformanceTimer perfTimer("CameraUpdates");
|
||||
auto myAvatar = getMyAvatar();
|
||||
boomOffset = myAvatar->getModelScale() * myAvatar->getBoomLength() * -IDENTITY_FORWARD;
|
||||
|
||||
auto myAvatar = getMyAvatar();
|
||||
boomOffset = myAvatar->getModelScale() * myAvatar->getBoomLength() * -IDENTITY_FORWARD;
|
||||
// The render mode is default or mirror if the camera is in mirror mode, assigned further below
|
||||
renderArgs._renderMode = RenderArgs::DEFAULT_RENDER_MODE;
|
||||
|
||||
// The render mode is default or mirror if the camera is in mirror mode, assigned further below
|
||||
renderArgs._renderMode = RenderArgs::DEFAULT_RENDER_MODE;
|
||||
|
||||
// Always use the default eye position, not the actual head eye position.
|
||||
// Using the latter will cause the camera to wobble with idle animations,
|
||||
// or with changes from the face tracker
|
||||
if (_myCamera.getMode() == CAMERA_MODE_FIRST_PERSON) {
|
||||
if (isHMDMode()) {
|
||||
mat4 camMat = myAvatar->getSensorToWorldMatrix() * myAvatar->getHMDSensorMatrix();
|
||||
_myCamera.setPosition(extractTranslation(camMat));
|
||||
_myCamera.setOrientation(glmExtractRotation(camMat));
|
||||
} else {
|
||||
_myCamera.setPosition(myAvatar->getDefaultEyePosition());
|
||||
_myCamera.setOrientation(myAvatar->getMyHead()->getHeadOrientation());
|
||||
}
|
||||
} else if (_myCamera.getMode() == CAMERA_MODE_THIRD_PERSON) {
|
||||
if (isHMDMode()) {
|
||||
auto hmdWorldMat = myAvatar->getSensorToWorldMatrix() * myAvatar->getHMDSensorMatrix();
|
||||
_myCamera.setOrientation(glm::normalize(glmExtractRotation(hmdWorldMat)));
|
||||
_myCamera.setPosition(extractTranslation(hmdWorldMat) +
|
||||
myAvatar->getOrientation() * boomOffset);
|
||||
} else {
|
||||
_myCamera.setOrientation(myAvatar->getHead()->getOrientation());
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::CenterPlayerInView)) {
|
||||
_myCamera.setPosition(myAvatar->getDefaultEyePosition()
|
||||
+ _myCamera.getOrientation() * boomOffset);
|
||||
} else {
|
||||
_myCamera.setPosition(myAvatar->getDefaultEyePosition()
|
||||
+ myAvatar->getOrientation() * boomOffset);
|
||||
}
|
||||
}
|
||||
} else if (_myCamera.getMode() == CAMERA_MODE_MIRROR) {
|
||||
if (isHMDMode()) {
|
||||
auto mirrorBodyOrientation = myAvatar->getOrientation() * glm::quat(glm::vec3(0.0f, PI + _rotateMirror, 0.0f));
|
||||
|
||||
glm::quat hmdRotation = extractRotation(myAvatar->getHMDSensorMatrix());
|
||||
// Mirror HMD yaw and roll
|
||||
glm::vec3 mirrorHmdEulers = glm::eulerAngles(hmdRotation);
|
||||
mirrorHmdEulers.y = -mirrorHmdEulers.y;
|
||||
mirrorHmdEulers.z = -mirrorHmdEulers.z;
|
||||
glm::quat mirrorHmdRotation = glm::quat(mirrorHmdEulers);
|
||||
|
||||
glm::quat worldMirrorRotation = mirrorBodyOrientation * mirrorHmdRotation;
|
||||
|
||||
_myCamera.setOrientation(worldMirrorRotation);
|
||||
|
||||
glm::vec3 hmdOffset = extractTranslation(myAvatar->getHMDSensorMatrix());
|
||||
// Mirror HMD lateral offsets
|
||||
hmdOffset.x = -hmdOffset.x;
|
||||
|
||||
_myCamera.setPosition(myAvatar->getDefaultEyePosition()
|
||||
+ glm::vec3(0, _raiseMirror * myAvatar->getModelScale(), 0)
|
||||
+ mirrorBodyOrientation * glm::vec3(0.0f, 0.0f, 1.0f) * MIRROR_FULLSCREEN_DISTANCE * _scaleMirror
|
||||
+ mirrorBodyOrientation * hmdOffset);
|
||||
} else {
|
||||
_myCamera.setOrientation(myAvatar->getOrientation()
|
||||
* glm::quat(glm::vec3(0.0f, PI + _rotateMirror, 0.0f)));
|
||||
_myCamera.setPosition(myAvatar->getDefaultEyePosition()
|
||||
+ glm::vec3(0, _raiseMirror * myAvatar->getModelScale(), 0)
|
||||
+ (myAvatar->getOrientation() * glm::quat(glm::vec3(0.0f, _rotateMirror, 0.0f))) *
|
||||
glm::vec3(0.0f, 0.0f, -1.0f) * MIRROR_FULLSCREEN_DISTANCE * _scaleMirror);
|
||||
}
|
||||
renderArgs._renderMode = RenderArgs::MIRROR_RENDER_MODE;
|
||||
} else if (_myCamera.getMode() == CAMERA_MODE_ENTITY) {
|
||||
EntityItemPointer cameraEntity = _myCamera.getCameraEntityPointer();
|
||||
if (cameraEntity != nullptr) {
|
||||
if (isHMDMode()) {
|
||||
glm::quat hmdRotation = extractRotation(myAvatar->getHMDSensorMatrix());
|
||||
_myCamera.setOrientation(cameraEntity->getRotation() * hmdRotation);
|
||||
glm::vec3 hmdOffset = extractTranslation(myAvatar->getHMDSensorMatrix());
|
||||
_myCamera.setPosition(cameraEntity->getPosition() + (hmdRotation * hmdOffset));
|
||||
} else {
|
||||
_myCamera.setOrientation(cameraEntity->getRotation());
|
||||
_myCamera.setPosition(cameraEntity->getPosition());
|
||||
}
|
||||
}
|
||||
// Always use the default eye position, not the actual head eye position.
|
||||
// Using the latter will cause the camera to wobble with idle animations,
|
||||
// or with changes from the face tracker
|
||||
if (_myCamera.getMode() == CAMERA_MODE_FIRST_PERSON) {
|
||||
if (isHMDMode()) {
|
||||
mat4 camMat = myAvatar->getSensorToWorldMatrix() * myAvatar->getHMDSensorMatrix();
|
||||
_myCamera.setPosition(extractTranslation(camMat));
|
||||
_myCamera.setOrientation(glmExtractRotation(camMat));
|
||||
}
|
||||
else {
|
||||
_myCamera.setPosition(myAvatar->getDefaultEyePosition());
|
||||
_myCamera.setOrientation(myAvatar->getMyHead()->getHeadOrientation());
|
||||
}
|
||||
}
|
||||
else if (_myCamera.getMode() == CAMERA_MODE_THIRD_PERSON) {
|
||||
if (isHMDMode()) {
|
||||
auto hmdWorldMat = myAvatar->getSensorToWorldMatrix() * myAvatar->getHMDSensorMatrix();
|
||||
_myCamera.setOrientation(glm::normalize(glmExtractRotation(hmdWorldMat)));
|
||||
_myCamera.setPosition(extractTranslation(hmdWorldMat) +
|
||||
myAvatar->getOrientation() * boomOffset);
|
||||
}
|
||||
else {
|
||||
_myCamera.setOrientation(myAvatar->getHead()->getOrientation());
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::CenterPlayerInView)) {
|
||||
_myCamera.setPosition(myAvatar->getDefaultEyePosition()
|
||||
+ _myCamera.getOrientation() * boomOffset);
|
||||
}
|
||||
// Update camera position
|
||||
if (!isHMDMode()) {
|
||||
_myCamera.update();
|
||||
else {
|
||||
_myCamera.setPosition(myAvatar->getDefaultEyePosition()
|
||||
+ myAvatar->getOrientation() * boomOffset);
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (_myCamera.getMode() == CAMERA_MODE_MIRROR) {
|
||||
if (isHMDMode()) {
|
||||
auto mirrorBodyOrientation = myAvatar->getOrientation() * glm::quat(glm::vec3(0.0f, PI + _rotateMirror, 0.0f));
|
||||
|
||||
{
|
||||
PROFILE_RANGE(render, "/updateCompositor");
|
||||
getApplicationCompositor().setFrameInfo(_renderFrameCount, _myCamera.getTransform(), getMyAvatar()->getSensorToWorldMatrix());
|
||||
}
|
||||
glm::quat hmdRotation = extractRotation(myAvatar->getHMDSensorMatrix());
|
||||
// Mirror HMD yaw and roll
|
||||
glm::vec3 mirrorHmdEulers = glm::eulerAngles(hmdRotation);
|
||||
mirrorHmdEulers.y = -mirrorHmdEulers.y;
|
||||
mirrorHmdEulers.z = -mirrorHmdEulers.z;
|
||||
glm::quat mirrorHmdRotation = glm::quat(mirrorHmdEulers);
|
||||
|
||||
gpu::FramebufferPointer finalFramebuffer;
|
||||
QSize finalFramebufferSize;
|
||||
{
|
||||
PROFILE_RANGE(render, "/getOutputFramebuffer");
|
||||
// Primary rendering pass
|
||||
auto framebufferCache = DependencyManager::get<FramebufferCache>();
|
||||
finalFramebufferSize = framebufferCache->getFrameBufferSize();
|
||||
// Final framebuffer that will be handled to the display-plugin
|
||||
finalFramebuffer = framebufferCache->getFramebuffer();
|
||||
}
|
||||
glm::quat worldMirrorRotation = mirrorBodyOrientation * mirrorHmdRotation;
|
||||
|
||||
auto hmdInterface = DependencyManager::get<HMDScriptingInterface>();
|
||||
float ipdScale = hmdInterface->getIPDScale();
|
||||
_myCamera.setOrientation(worldMirrorRotation);
|
||||
|
||||
// scale IPD by sensorToWorldScale, to make the world seem larger or smaller accordingly.
|
||||
ipdScale *= sensorToWorldScale;
|
||||
glm::vec3 hmdOffset = extractTranslation(myAvatar->getHMDSensorMatrix());
|
||||
// Mirror HMD lateral offsets
|
||||
hmdOffset.x = -hmdOffset.x;
|
||||
|
||||
{
|
||||
PROFILE_RANGE(render, "/mainRender");
|
||||
PerformanceTimer perfTimer("mainRender");
|
||||
renderArgs._boomOffset = boomOffset;
|
||||
// FIXME is this ever going to be different from the size previously set in the render args
|
||||
// in the overlay render?
|
||||
// Viewport is assigned to the size of the framebuffer
|
||||
renderArgs._viewport = ivec4(0, 0, finalFramebufferSize.width(), finalFramebufferSize.height());
|
||||
auto baseProjection = renderArgs.getViewFrustum().getProjection();
|
||||
if (displayPlugin->isStereo()) {
|
||||
// Stereo modes will typically have a larger projection matrix overall,
|
||||
// so we ask for the 'mono' projection matrix, which for stereo and HMD
|
||||
// plugins will imply the combined projection for both eyes.
|
||||
//
|
||||
// This is properly implemented for the Oculus plugins, but for OpenVR
|
||||
// and Stereo displays I'm not sure how to get / calculate it, so we're
|
||||
// just relying on the left FOV in each case and hoping that the
|
||||
// overall culling margin of error doesn't cause popping in the
|
||||
// right eye. There are FIXMEs in the relevant plugins
|
||||
_myCamera.setProjection(displayPlugin->getCullingProjection(baseProjection));
|
||||
renderArgs._context->enableStereo(true);
|
||||
mat4 eyeOffsets[2];
|
||||
mat4 eyeProjections[2];
|
||||
|
||||
// FIXME we probably don't need to set the projection matrix every frame,
|
||||
// only when the display plugin changes (or in non-HMD modes when the user
|
||||
// changes the FOV manually, which right now I don't think they can.
|
||||
for_each_eye([&](Eye eye) {
|
||||
// For providing the stereo eye views, the HMD head pose has already been
|
||||
// applied to the avatar, so we need to get the difference between the head
|
||||
// pose applied to the avatar and the per eye pose, and use THAT as
|
||||
// the per-eye stereo matrix adjustment.
|
||||
mat4 eyeToHead = displayPlugin->getEyeToHeadTransform(eye);
|
||||
// Grab the translation
|
||||
vec3 eyeOffset = glm::vec3(eyeToHead[3]);
|
||||
// Apply IPD scaling
|
||||
mat4 eyeOffsetTransform = glm::translate(mat4(), eyeOffset * -1.0f * ipdScale);
|
||||
eyeOffsets[eye] = eyeOffsetTransform;
|
||||
eyeProjections[eye] = displayPlugin->getEyeProjection(eye, baseProjection);
|
||||
});
|
||||
renderArgs._context->setStereoProjections(eyeProjections);
|
||||
renderArgs._context->setStereoViews(eyeOffsets);
|
||||
|
||||
// Configure the type of display / stereo
|
||||
renderArgs._displayMode = (isHMDMode() ? RenderArgs::STEREO_HMD : RenderArgs::STEREO_MONITOR);
|
||||
_myCamera.setPosition(myAvatar->getDefaultEyePosition()
|
||||
+ glm::vec3(0, _raiseMirror * myAvatar->getModelScale(), 0)
|
||||
+ mirrorBodyOrientation * glm::vec3(0.0f, 0.0f, 1.0f) * MIRROR_FULLSCREEN_DISTANCE * _scaleMirror
|
||||
+ mirrorBodyOrientation * hmdOffset);
|
||||
}
|
||||
renderArgs._blitFramebuffer = finalFramebuffer;
|
||||
displaySide(&renderArgs, _myCamera);
|
||||
else {
|
||||
_myCamera.setOrientation(myAvatar->getOrientation()
|
||||
* glm::quat(glm::vec3(0.0f, PI + _rotateMirror, 0.0f)));
|
||||
_myCamera.setPosition(myAvatar->getDefaultEyePosition()
|
||||
+ glm::vec3(0, _raiseMirror * myAvatar->getModelScale(), 0)
|
||||
+ (myAvatar->getOrientation() * glm::quat(glm::vec3(0.0f, _rotateMirror, 0.0f))) *
|
||||
glm::vec3(0.0f, 0.0f, -1.0f) * MIRROR_FULLSCREEN_DISTANCE * _scaleMirror);
|
||||
}
|
||||
renderArgs._renderMode = RenderArgs::MIRROR_RENDER_MODE;
|
||||
}
|
||||
else if (_myCamera.getMode() == CAMERA_MODE_ENTITY) {
|
||||
EntityItemPointer cameraEntity = _myCamera.getCameraEntityPointer();
|
||||
if (cameraEntity != nullptr) {
|
||||
if (isHMDMode()) {
|
||||
glm::quat hmdRotation = extractRotation(myAvatar->getHMDSensorMatrix());
|
||||
_myCamera.setOrientation(cameraEntity->getRotation() * hmdRotation);
|
||||
glm::vec3 hmdOffset = extractTranslation(myAvatar->getHMDSensorMatrix());
|
||||
_myCamera.setPosition(cameraEntity->getPosition() + (hmdRotation * hmdOffset));
|
||||
}
|
||||
else {
|
||||
_myCamera.setOrientation(cameraEntity->getRotation());
|
||||
_myCamera.setPosition(cameraEntity->getPosition());
|
||||
}
|
||||
}
|
||||
}
|
||||
// Update camera position
|
||||
if (!isHMDMode()) {
|
||||
_myCamera.update();
|
||||
}
|
||||
|
||||
gpu::Batch postCompositeBatch;
|
||||
{
|
||||
PROFILE_RANGE(render, "/postComposite");
|
||||
PerformanceTimer perfTimer("postComposite");
|
||||
renderArgs._batch = &postCompositeBatch;
|
||||
renderArgs._batch->setViewportTransform(ivec4(0, 0, finalFramebufferSize.width(), finalFramebufferSize.height()));
|
||||
renderArgs._batch->setViewTransform(renderArgs.getViewFrustum().getView());
|
||||
_overlays.render3DHUDOverlays(&renderArgs);
|
||||
}
|
||||
|
||||
auto frame = _gpuContext->endFrame();
|
||||
frame->frameIndex = _renderFrameCount;
|
||||
frame->framebuffer = finalFramebuffer;
|
||||
frame->framebufferRecycler = [](const gpu::FramebufferPointer& framebuffer){
|
||||
DependencyManager::get<FramebufferCache>()->releaseFramebuffer(framebuffer);
|
||||
};
|
||||
frame->overlay = _applicationOverlay.getOverlayTexture();
|
||||
frame->postCompositeBatch = postCompositeBatch;
|
||||
// deliver final scene rendering commands to the display plugin
|
||||
{
|
||||
PROFILE_RANGE(render, "/pluginOutput");
|
||||
PerformanceTimer perfTimer("pluginOutput");
|
||||
_renderLoopCounter.increment();
|
||||
displayPlugin->submitFrame(frame);
|
||||
}
|
||||
|
||||
// Reset the framebuffer and stereo state
|
||||
renderArgs._blitFramebuffer.reset();
|
||||
renderArgs._context->enableStereo(false);
|
||||
|
||||
{
|
||||
Stats::getInstance()->setRenderDetails(renderArgs._details);
|
||||
}
|
||||
|
||||
uint64_t lastPaintDuration = usecTimestampNow() - lastPaintBegin;
|
||||
_frameTimingsScriptingInterface.addValue(lastPaintDuration);
|
||||
renderArgs._cameraMode = (int8_t)_myCamera.getMode();
|
||||
}
|
||||
|
||||
void Application::runTests() {
|
||||
|
@ -5225,6 +5027,7 @@ void Application::update(float deltaTime) {
|
|||
|
||||
avatarManager->postUpdate(deltaTime, getMain3DScene());
|
||||
|
||||
|
||||
{
|
||||
PROFILE_RANGE_EX(app, "PreRenderLambdas", 0xffff0000, (uint64_t)0);
|
||||
|
||||
|
@ -5235,9 +5038,123 @@ void Application::update(float deltaTime) {
|
|||
_postUpdateLambdas.clear();
|
||||
}
|
||||
|
||||
editRenderArgs([this](AppRenderArgs& appRenderArgs) {
|
||||
appRenderArgs._headPose= getHMDSensorPose();
|
||||
|
||||
auto myAvatar = getMyAvatar();
|
||||
|
||||
// update the avatar with a fresh HMD pose
|
||||
{
|
||||
PROFILE_RANGE(render, "/updateAvatar");
|
||||
myAvatar->updateFromHMDSensorMatrix(appRenderArgs._headPose);
|
||||
}
|
||||
|
||||
auto lodManager = DependencyManager::get<LODManager>();
|
||||
|
||||
float sensorToWorldScale = getMyAvatar()->getSensorToWorldScale();
|
||||
appRenderArgs._sensorToWorldScale = sensorToWorldScale;
|
||||
{
|
||||
PROFILE_RANGE(render, "/buildFrustrumAndArgs");
|
||||
{
|
||||
QMutexLocker viewLocker(&_viewMutex);
|
||||
// adjust near clip plane to account for sensor scaling.
|
||||
auto adjustedProjection = glm::perspective(_viewFrustum.getFieldOfView(),
|
||||
_viewFrustum.getAspectRatio(),
|
||||
DEFAULT_NEAR_CLIP * sensorToWorldScale,
|
||||
_viewFrustum.getFarClip());
|
||||
_viewFrustum.setProjection(adjustedProjection);
|
||||
_viewFrustum.calculate();
|
||||
}
|
||||
appRenderArgs._renderArgs = RenderArgs(_gpuContext, lodManager->getOctreeSizeScale(),
|
||||
lodManager->getBoundaryLevelAdjust(), RenderArgs::DEFAULT_RENDER_MODE,
|
||||
RenderArgs::MONO, RenderArgs::RENDER_DEBUG_NONE);
|
||||
appRenderArgs._renderArgs._scene = getMain3DScene();
|
||||
|
||||
{
|
||||
QMutexLocker viewLocker(&_viewMutex);
|
||||
appRenderArgs._renderArgs.setViewFrustum(_viewFrustum);
|
||||
}
|
||||
}
|
||||
{
|
||||
PROFILE_RANGE(render, "/resizeGL");
|
||||
PerformanceWarning::setSuppressShortTimings(Menu::getInstance()->isOptionChecked(MenuOption::SuppressShortTimings));
|
||||
bool showWarnings = Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings);
|
||||
PerformanceWarning warn(showWarnings, "Application::paintGL()");
|
||||
resizeGL();
|
||||
}
|
||||
|
||||
this->updateCamera(appRenderArgs._renderArgs);
|
||||
appRenderArgs._isStereo = false;
|
||||
|
||||
{
|
||||
auto hmdInterface = DependencyManager::get<HMDScriptingInterface>();
|
||||
float ipdScale = hmdInterface->getIPDScale();
|
||||
|
||||
// scale IPD by sensorToWorldScale, to make the world seem larger or smaller accordingly.
|
||||
ipdScale *= sensorToWorldScale;
|
||||
|
||||
auto baseProjection = appRenderArgs._renderArgs.getViewFrustum().getProjection();
|
||||
if (getActiveDisplayPlugin()->isStereo()) {
|
||||
// Stereo modes will typically have a larger projection matrix overall,
|
||||
// so we ask for the 'mono' projection matrix, which for stereo and HMD
|
||||
// plugins will imply the combined projection for both eyes.
|
||||
//
|
||||
// This is properly implemented for the Oculus plugins, but for OpenVR
|
||||
// and Stereo displays I'm not sure how to get / calculate it, so we're
|
||||
// just relying on the left FOV in each case and hoping that the
|
||||
// overall culling margin of error doesn't cause popping in the
|
||||
// right eye. There are FIXMEs in the relevant plugins
|
||||
_myCamera.setProjection(getActiveDisplayPlugin()->getCullingProjection(baseProjection));
|
||||
appRenderArgs._isStereo = true;
|
||||
|
||||
auto& eyeOffsets = appRenderArgs._eyeOffsets;
|
||||
auto& eyeProjections = appRenderArgs._eyeProjections;
|
||||
|
||||
// FIXME we probably don't need to set the projection matrix every frame,
|
||||
// only when the display plugin changes (or in non-HMD modes when the user
|
||||
// changes the FOV manually, which right now I don't think they can.
|
||||
for_each_eye([&](Eye eye) {
|
||||
// For providing the stereo eye views, the HMD head pose has already been
|
||||
// applied to the avatar, so we need to get the difference between the head
|
||||
// pose applied to the avatar and the per eye pose, and use THAT as
|
||||
// the per-eye stereo matrix adjustment.
|
||||
mat4 eyeToHead = getActiveDisplayPlugin()->getEyeToHeadTransform(eye);
|
||||
// Grab the translation
|
||||
vec3 eyeOffset = glm::vec3(eyeToHead[3]);
|
||||
// Apply IPD scaling
|
||||
mat4 eyeOffsetTransform = glm::translate(mat4(), eyeOffset * -1.0f * ipdScale);
|
||||
eyeOffsets[eye] = eyeOffsetTransform;
|
||||
eyeProjections[eye] = getActiveDisplayPlugin()->getEyeProjection(eye, baseProjection);
|
||||
});
|
||||
|
||||
// Configure the type of display / stereo
|
||||
appRenderArgs._renderArgs._displayMode = (isHMDMode() ? RenderArgs::STEREO_HMD : RenderArgs::STEREO_MONITOR);
|
||||
}
|
||||
}
|
||||
|
||||
// HACK
|
||||
// load the view frustum
|
||||
// FIXME: This preDisplayRender call is temporary until we create a separate render::scene for the mirror rendering.
|
||||
// Then we can move this logic into the Avatar::simulate call.
|
||||
myAvatar->preDisplaySide(&appRenderArgs._renderArgs);
|
||||
|
||||
{
|
||||
QMutexLocker viewLocker(&_viewMutex);
|
||||
_myCamera.loadViewFrustum(_displayViewFrustum);
|
||||
}
|
||||
|
||||
{
|
||||
QMutexLocker viewLocker(&_viewMutex);
|
||||
appRenderArgs._renderArgs.setViewFrustum(_displayViewFrustum);
|
||||
}
|
||||
});
|
||||
|
||||
AnimDebugDraw::getInstance().update();
|
||||
|
||||
DependencyManager::get<LimitlessVoiceRecognitionScriptingInterface>()->update();
|
||||
|
||||
// Game loop is done, mark the end of the frame for the scene transactions and the render loop to take over
|
||||
getMain3DScene()->enqueueFrame();
|
||||
}
|
||||
|
||||
void Application::sendAvatarViewFrustum() {
|
||||
|
@ -5296,7 +5213,6 @@ int Application::sendNackPackets() {
|
|||
}
|
||||
});
|
||||
|
||||
|
||||
return packetsSent;
|
||||
}
|
||||
|
||||
|
@ -5527,116 +5443,6 @@ void Application::copyDisplayViewFrustum(ViewFrustum& viewOut) const {
|
|||
viewOut = _displayViewFrustum;
|
||||
}
|
||||
|
||||
void Application::copyShadowViewFrustum(ViewFrustum& viewOut) const {
|
||||
QMutexLocker viewLocker(&_viewMutex);
|
||||
viewOut = _shadowViewFrustum;
|
||||
}
|
||||
|
||||
// WorldBox Render Data & rendering functions
|
||||
|
||||
class WorldBoxRenderData {
|
||||
public:
|
||||
typedef render::Payload<WorldBoxRenderData> Payload;
|
||||
typedef Payload::DataPointer Pointer;
|
||||
|
||||
int _val = 0;
|
||||
static render::ItemID _item; // unique WorldBoxRenderData
|
||||
};
|
||||
|
||||
render::ItemID WorldBoxRenderData::_item { render::Item::INVALID_ITEM_ID };
|
||||
|
||||
namespace render {
|
||||
template <> const ItemKey payloadGetKey(const WorldBoxRenderData::Pointer& stuff) { return ItemKey::Builder::opaqueShape(); }
|
||||
template <> const Item::Bound payloadGetBound(const WorldBoxRenderData::Pointer& stuff) { return Item::Bound(); }
|
||||
template <> void payloadRender(const WorldBoxRenderData::Pointer& stuff, RenderArgs* args) {
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::WorldAxes)) {
|
||||
PerformanceTimer perfTimer("worldBox");
|
||||
|
||||
auto& batch = *args->_batch;
|
||||
DependencyManager::get<GeometryCache>()->bindSimpleProgram(batch);
|
||||
renderWorldBox(args, batch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Application::displaySide(RenderArgs* renderArgs, Camera& theCamera, bool selfAvatarOnly) {
|
||||
|
||||
// FIXME: This preDisplayRender call is temporary until we create a separate render::scene for the mirror rendering.
|
||||
// Then we can move this logic into the Avatar::simulate call.
|
||||
auto myAvatar = getMyAvatar();
|
||||
myAvatar->preDisplaySide(renderArgs);
|
||||
|
||||
PROFILE_RANGE(render, __FUNCTION__);
|
||||
PerformanceTimer perfTimer("display");
|
||||
PerformanceWarning warn(Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings), "Application::displaySide()");
|
||||
|
||||
// load the view frustum
|
||||
{
|
||||
QMutexLocker viewLocker(&_viewMutex);
|
||||
theCamera.loadViewFrustum(_displayViewFrustum);
|
||||
}
|
||||
|
||||
// TODO fix shadows and make them use the GPU library
|
||||
|
||||
// The pending changes collecting the changes here
|
||||
render::Transaction transaction;
|
||||
|
||||
// Assuming nothing gets rendered through that
|
||||
if (!selfAvatarOnly) {
|
||||
if (DependencyManager::get<SceneScriptingInterface>()->shouldRenderEntities()) {
|
||||
// render models...
|
||||
PerformanceTimer perfTimer("entities");
|
||||
PerformanceWarning warn(Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings),
|
||||
"Application::displaySide() ... entities...");
|
||||
|
||||
RenderArgs::DebugFlags renderDebugFlags = RenderArgs::RENDER_DEBUG_NONE;
|
||||
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::PhysicsShowHulls)) {
|
||||
renderDebugFlags = static_cast<RenderArgs::DebugFlags>(renderDebugFlags |
|
||||
static_cast<int>(RenderArgs::RENDER_DEBUG_HULLS));
|
||||
}
|
||||
renderArgs->_debugFlags = renderDebugFlags;
|
||||
//ViveControllerManager::getInstance().updateRendering(renderArgs, _main3DScene, transaction);
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: Move this out of here!, WorldBox should be driven by the entity content just like the other entities
|
||||
// Make sure the WorldBox is in the scene
|
||||
if (!render::Item::isValidID(WorldBoxRenderData::_item)) {
|
||||
auto worldBoxRenderData = make_shared<WorldBoxRenderData>();
|
||||
auto worldBoxRenderPayload = make_shared<WorldBoxRenderData::Payload>(worldBoxRenderData);
|
||||
|
||||
WorldBoxRenderData::_item = _main3DScene->allocateID();
|
||||
|
||||
transaction.resetItem(WorldBoxRenderData::_item, worldBoxRenderPayload);
|
||||
} else {
|
||||
transaction.updateItem<WorldBoxRenderData>(WorldBoxRenderData::_item,
|
||||
[](WorldBoxRenderData& payload) {
|
||||
payload._val++;
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
_main3DScene->enqueueTransaction(transaction);
|
||||
}
|
||||
|
||||
// For now every frame pass the renderContext
|
||||
{
|
||||
PerformanceTimer perfTimer("EngineRun");
|
||||
|
||||
{
|
||||
QMutexLocker viewLocker(&_viewMutex);
|
||||
renderArgs->setViewFrustum(_displayViewFrustum);
|
||||
}
|
||||
renderArgs->_cameraMode = (int8_t)theCamera.getMode(); // HACK
|
||||
renderArgs->_scene = getMain3DScene();
|
||||
_renderEngine->getRenderContext()->args = renderArgs;
|
||||
|
||||
// Before the deferred pass, let's try to use the render engine
|
||||
_renderEngine->run();
|
||||
}
|
||||
}
|
||||
|
||||
void Application::resetSensors(bool andReload) {
|
||||
DependencyManager::get<DdeFaceTracker>()->reset();
|
||||
DependencyManager::get<EyeTracker>()->reset();
|
||||
|
@ -7677,5 +7483,4 @@ void Application::setAvatarOverrideUrl(const QUrl& url, bool save) {
|
|||
_avatarOverrideUrl = url;
|
||||
_saveAvatarOverrideUrl = save;
|
||||
}
|
||||
|
||||
#include "Application.moc"
|
||||
|
|
|
@ -76,6 +76,7 @@
|
|||
#include <procedural/ProceduralSkybox.h>
|
||||
#include <model/Skybox.h>
|
||||
#include <ModelScriptingInterface.h>
|
||||
#include "FrameTimingsScriptingInterface.h"
|
||||
|
||||
#include "Sound.h"
|
||||
|
||||
|
@ -147,6 +148,8 @@ public:
|
|||
|
||||
void initializeGL();
|
||||
void initializeUi();
|
||||
|
||||
void updateCamera(RenderArgs& renderArgs);
|
||||
void paintGL();
|
||||
void resizeGL();
|
||||
|
||||
|
@ -173,7 +176,6 @@ public:
|
|||
// which might be different from the viewFrustum, i.e. shadowmap
|
||||
// passes, mirror window passes, etc
|
||||
void copyDisplayViewFrustum(ViewFrustum& viewOut) const;
|
||||
void copyShadowViewFrustum(ViewFrustum& viewOut) const override;
|
||||
const OctreePacketProcessor& getOctreePacketProcessor() const { return _octreeProcessor; }
|
||||
QSharedPointer<EntityTreeRenderer> getEntities() const { return DependencyManager::get<EntityTreeRenderer>(); }
|
||||
QUndoStack* getUndoStack() { return &_undoStack; }
|
||||
|
@ -467,8 +469,6 @@ private:
|
|||
|
||||
void queryOctree(NodeType_t serverType, PacketType packetType, NodeToJurisdictionMap& jurisdictions);
|
||||
|
||||
void renderRearViewMirror(RenderArgs* renderArgs, const QRect& region, bool isZoomed);
|
||||
|
||||
int sendNackPackets();
|
||||
void sendAvatarViewFrustum();
|
||||
|
||||
|
@ -478,7 +478,7 @@ private:
|
|||
|
||||
void initializeAcceptedFiles();
|
||||
|
||||
void displaySide(RenderArgs* renderArgs, Camera& whichCamera, bool selfAvatarOnly = false);
|
||||
void runRenderFrame(RenderArgs* renderArgs/*, Camera& whichCamera, bool selfAvatarOnly = false*/);
|
||||
|
||||
bool importJSONFromURL(const QString& urlString);
|
||||
bool importSVOFromURL(const QString& urlString);
|
||||
|
@ -535,6 +535,8 @@ private:
|
|||
RateCounter<500> _renderLoopCounter;
|
||||
RateCounter<500> _gameLoopCounter;
|
||||
|
||||
FrameTimingsScriptingInterface _frameTimingsScriptingInterface;
|
||||
|
||||
QTimer _minimizedWindowTimer;
|
||||
QElapsedTimer _timerStart;
|
||||
QElapsedTimer _lastTimeUpdated;
|
||||
|
@ -550,7 +552,6 @@ private:
|
|||
ViewFrustum _viewFrustum; // current state of view frustum, perspective, orientation, etc.
|
||||
ViewFrustum _lastQueriedViewFrustum; /// last view frustum used to query octree servers (voxels)
|
||||
ViewFrustum _displayViewFrustum;
|
||||
ViewFrustum _shadowViewFrustum;
|
||||
quint64 _lastQueriedTime;
|
||||
|
||||
OctreeQuery _octreeQuery; // NodeData derived class for querying octee cells from octree servers
|
||||
|
@ -621,6 +622,24 @@ private:
|
|||
render::EnginePointer _renderEngine{ new render::Engine() };
|
||||
gpu::ContextPointer _gpuContext; // initialized during window creation
|
||||
|
||||
mutable QMutex _renderArgsMutex{ QMutex::Recursive };
|
||||
struct AppRenderArgs {
|
||||
render::Args _renderArgs;
|
||||
glm::mat4 _eyeToWorld;
|
||||
glm::mat4 _eyeOffsets[2];
|
||||
glm::mat4 _eyeProjections[2];
|
||||
glm::mat4 _headPose;
|
||||
glm::mat4 _sensorToWorld;
|
||||
float _sensorToWorldScale { 1.0f };
|
||||
bool _isStereo{ false };
|
||||
};
|
||||
AppRenderArgs _appRenderArgs;
|
||||
|
||||
|
||||
using RenderArgsEditor = std::function <void (AppRenderArgs&)>;
|
||||
void editRenderArgs(RenderArgsEditor editor);
|
||||
|
||||
|
||||
Overlays _overlays;
|
||||
ApplicationOverlay _applicationOverlay;
|
||||
OverlayConductor _overlayConductor;
|
||||
|
|
240
interface/src/Application_render.cpp
Normal file
240
interface/src/Application_render.cpp
Normal file
|
@ -0,0 +1,240 @@
|
|||
//
|
||||
// Application_render.cpp
|
||||
// interface/src
|
||||
//
|
||||
// Copyright 2013 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "Application.h"
|
||||
#include <MainWindow.h>
|
||||
|
||||
#include <display-plugins/CompositorHelper.h>
|
||||
#include <FramebufferCache.h>
|
||||
#include "ui/Stats.h"
|
||||
#include <SceneScriptingInterface.h>
|
||||
#include "Util.h"
|
||||
|
||||
|
||||
// Statically provided display and input plugins
|
||||
extern DisplayPluginList getDisplayPlugins();
|
||||
|
||||
void Application::editRenderArgs(RenderArgsEditor editor) {
|
||||
QMutexLocker renderLocker(&_renderArgsMutex);
|
||||
editor(_appRenderArgs);
|
||||
|
||||
}
|
||||
|
||||
void Application::paintGL() {
|
||||
// Some plugins process message events, allowing paintGL to be called reentrantly.
|
||||
if (_aboutToQuit || _window->isMinimized()) {
|
||||
return;
|
||||
}
|
||||
|
||||
_renderFrameCount++;
|
||||
_lastTimeRendered.start();
|
||||
|
||||
auto lastPaintBegin = usecTimestampNow();
|
||||
PROFILE_RANGE_EX(render, __FUNCTION__, 0xff0000ff, (uint64_t)_renderFrameCount);
|
||||
PerformanceTimer perfTimer("paintGL");
|
||||
|
||||
if (nullptr == _displayPlugin) {
|
||||
return;
|
||||
}
|
||||
|
||||
DisplayPluginPointer displayPlugin;
|
||||
{
|
||||
PROFILE_RANGE(render, "/getActiveDisplayPlugin");
|
||||
displayPlugin = getActiveDisplayPlugin();
|
||||
}
|
||||
|
||||
{
|
||||
PROFILE_RANGE(render, "/pluginBeginFrameRender");
|
||||
// If a display plugin loses it's underlying support, it
|
||||
// needs to be able to signal us to not use it
|
||||
if (!displayPlugin->beginFrameRender(_renderFrameCount)) {
|
||||
updateDisplayMode();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
RenderArgs renderArgs;
|
||||
glm::mat4 HMDSensorPose;
|
||||
glm::mat4 eyeToWorld;
|
||||
glm::mat4 sensorToWorld;
|
||||
|
||||
bool isStereo;
|
||||
glm::mat4 stereoEyeOffsets[2];
|
||||
glm::mat4 stereoEyeProjections[2];
|
||||
|
||||
{
|
||||
QMutexLocker viewLocker(&_renderArgsMutex);
|
||||
renderArgs = _appRenderArgs._renderArgs;
|
||||
HMDSensorPose = _appRenderArgs._headPose;
|
||||
eyeToWorld = _appRenderArgs._eyeToWorld;
|
||||
sensorToWorld = _appRenderArgs._sensorToWorld;
|
||||
isStereo = _appRenderArgs._isStereo;
|
||||
for_each_eye([&](Eye eye) {
|
||||
stereoEyeOffsets[eye] = _appRenderArgs._eyeOffsets[eye];
|
||||
stereoEyeProjections[eye] = _appRenderArgs._eyeProjections[eye];
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
PROFILE_RANGE(render, "/gpuContextReset");
|
||||
_gpuContext->beginFrame(HMDSensorPose);
|
||||
// Reset the gpu::Context Stages
|
||||
// Back to the default framebuffer;
|
||||
gpu::doInBatch(_gpuContext, [&](gpu::Batch& batch) {
|
||||
batch.resetStages();
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
PROFILE_RANGE(render, "/renderOverlay");
|
||||
PerformanceTimer perfTimer("renderOverlay");
|
||||
// NOTE: There is no batch associated with this renderArgs
|
||||
// the ApplicationOverlay class assumes it's viewport is setup to be the device size
|
||||
QSize size = getDeviceSize();
|
||||
renderArgs._viewport = glm::ivec4(0, 0, size.width(), size.height());
|
||||
_applicationOverlay.renderOverlay(&renderArgs);
|
||||
}
|
||||
|
||||
{
|
||||
PROFILE_RANGE(render, "/updateCompositor");
|
||||
getApplicationCompositor().setFrameInfo(_renderFrameCount, eyeToWorld, sensorToWorld);
|
||||
}
|
||||
|
||||
gpu::FramebufferPointer finalFramebuffer;
|
||||
QSize finalFramebufferSize;
|
||||
{
|
||||
PROFILE_RANGE(render, "/getOutputFramebuffer");
|
||||
// Primary rendering pass
|
||||
auto framebufferCache = DependencyManager::get<FramebufferCache>();
|
||||
finalFramebufferSize = framebufferCache->getFrameBufferSize();
|
||||
// Final framebuffer that will be handled to the display-plugin
|
||||
finalFramebuffer = framebufferCache->getFramebuffer();
|
||||
}
|
||||
|
||||
{
|
||||
if (isStereo) {
|
||||
renderArgs._context->enableStereo(true);
|
||||
renderArgs._context->setStereoProjections(stereoEyeProjections);
|
||||
renderArgs._context->setStereoViews(stereoEyeOffsets);
|
||||
}
|
||||
|
||||
renderArgs._blitFramebuffer = finalFramebuffer;
|
||||
runRenderFrame(&renderArgs);
|
||||
}
|
||||
|
||||
gpu::Batch postCompositeBatch;
|
||||
{
|
||||
PROFILE_RANGE(render, "/postComposite");
|
||||
PerformanceTimer perfTimer("postComposite");
|
||||
renderArgs._batch = &postCompositeBatch;
|
||||
renderArgs._batch->setViewportTransform(ivec4(0, 0, finalFramebufferSize.width(), finalFramebufferSize.height()));
|
||||
renderArgs._batch->setViewTransform(renderArgs.getViewFrustum().getView());
|
||||
_overlays.render3DHUDOverlays(&renderArgs);
|
||||
}
|
||||
|
||||
auto frame = _gpuContext->endFrame();
|
||||
frame->frameIndex = _renderFrameCount;
|
||||
frame->framebuffer = finalFramebuffer;
|
||||
frame->framebufferRecycler = [](const gpu::FramebufferPointer& framebuffer) {
|
||||
DependencyManager::get<FramebufferCache>()->releaseFramebuffer(framebuffer);
|
||||
};
|
||||
frame->overlay = _applicationOverlay.getOverlayTexture();
|
||||
frame->postCompositeBatch = postCompositeBatch;
|
||||
// deliver final scene rendering commands to the display plugin
|
||||
{
|
||||
PROFILE_RANGE(render, "/pluginOutput");
|
||||
PerformanceTimer perfTimer("pluginOutput");
|
||||
_renderLoopCounter.increment();
|
||||
displayPlugin->submitFrame(frame);
|
||||
}
|
||||
|
||||
// Reset the framebuffer and stereo state
|
||||
renderArgs._blitFramebuffer.reset();
|
||||
renderArgs._context->enableStereo(false);
|
||||
|
||||
{
|
||||
Stats::getInstance()->setRenderDetails(renderArgs._details);
|
||||
}
|
||||
|
||||
uint64_t lastPaintDuration = usecTimestampNow() - lastPaintBegin;
|
||||
_frameTimingsScriptingInterface.addValue(lastPaintDuration);
|
||||
}
|
||||
|
||||
|
||||
// WorldBox Render Data & rendering functions
|
||||
|
||||
class WorldBoxRenderData {
|
||||
public:
|
||||
typedef render::Payload<WorldBoxRenderData> Payload;
|
||||
typedef Payload::DataPointer Pointer;
|
||||
|
||||
int _val = 0;
|
||||
static render::ItemID _item; // unique WorldBoxRenderData
|
||||
};
|
||||
|
||||
render::ItemID WorldBoxRenderData::_item{ render::Item::INVALID_ITEM_ID };
|
||||
|
||||
namespace render {
|
||||
template <> const ItemKey payloadGetKey(const WorldBoxRenderData::Pointer& stuff) { return ItemKey::Builder::opaqueShape(); }
|
||||
template <> const Item::Bound payloadGetBound(const WorldBoxRenderData::Pointer& stuff) { return Item::Bound(); }
|
||||
template <> void payloadRender(const WorldBoxRenderData::Pointer& stuff, RenderArgs* args) {
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::WorldAxes)) {
|
||||
PerformanceTimer perfTimer("worldBox");
|
||||
|
||||
auto& batch = *args->_batch;
|
||||
DependencyManager::get<GeometryCache>()->bindSimpleProgram(batch);
|
||||
renderWorldBox(args, batch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Application::runRenderFrame(RenderArgs* renderArgs) {
|
||||
PROFILE_RANGE(render, __FUNCTION__);
|
||||
PerformanceTimer perfTimer("display");
|
||||
PerformanceWarning warn(Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings), "Application::runRenderFrame()");
|
||||
|
||||
// The pending changes collecting the changes here
|
||||
render::Transaction transaction;
|
||||
|
||||
if (DependencyManager::get<SceneScriptingInterface>()->shouldRenderEntities()) {
|
||||
// render models...
|
||||
PerformanceTimer perfTimer("entities");
|
||||
PerformanceWarning warn(Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings),
|
||||
"Application::runRenderFrame() ... entities...");
|
||||
|
||||
RenderArgs::DebugFlags renderDebugFlags = RenderArgs::RENDER_DEBUG_NONE;
|
||||
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::PhysicsShowHulls)) {
|
||||
renderDebugFlags = static_cast<RenderArgs::DebugFlags>(renderDebugFlags |
|
||||
static_cast<int>(RenderArgs::RENDER_DEBUG_HULLS));
|
||||
}
|
||||
renderArgs->_debugFlags = renderDebugFlags;
|
||||
}
|
||||
|
||||
// Make sure the WorldBox is in the scene
|
||||
// For the record, this one RenderItem is the first one we created and added to the scene.
|
||||
// We could meoee that code elsewhere but you know...
|
||||
if (!render::Item::isValidID(WorldBoxRenderData::_item)) {
|
||||
auto worldBoxRenderData = std::make_shared<WorldBoxRenderData>();
|
||||
auto worldBoxRenderPayload = std::make_shared<WorldBoxRenderData::Payload>(worldBoxRenderData);
|
||||
|
||||
WorldBoxRenderData::_item = _main3DScene->allocateID();
|
||||
|
||||
transaction.resetItem(WorldBoxRenderData::_item, worldBoxRenderPayload);
|
||||
_main3DScene->enqueueTransaction(transaction);
|
||||
}
|
||||
|
||||
{
|
||||
PerformanceTimer perfTimer("EngineRun");
|
||||
_renderEngine->getRenderContext()->args = renderArgs;
|
||||
_renderEngine->run();
|
||||
}
|
||||
}
|
|
@ -265,7 +265,6 @@ void Base3DOverlay::parentDeleted() {
|
|||
}
|
||||
|
||||
void Base3DOverlay::update(float duration) {
|
||||
|
||||
// In Base3DOverlay, if its location or bound changed, the renderTrasnformDirty flag is true.
|
||||
// then the correct transform used for rendering is computed in the update transaction and assigned.
|
||||
if (_renderTransformDirty) {
|
||||
|
|
|
@ -31,9 +31,6 @@ public:
|
|||
/// copies the current view frustum for rendering the view state
|
||||
virtual void copyCurrentViewFrustum(ViewFrustum& viewOut) const = 0;
|
||||
|
||||
/// copies the shadow view frustum for rendering the view state
|
||||
virtual void copyShadowViewFrustum(ViewFrustum& viewOut) const = 0;
|
||||
|
||||
virtual QThread* getMainThread() = 0;
|
||||
|
||||
virtual PickRay computePickRay(float x, float y) const = 0;
|
||||
|
|
|
@ -144,6 +144,7 @@ void AnimDebugDraw::shutdown() {
|
|||
if (scene && _itemID) {
|
||||
render::Transaction transaction;
|
||||
transaction.removeItem(_itemID);
|
||||
render::Item::clearID(_itemID);
|
||||
scene->enqueueTransaction(transaction);
|
||||
}
|
||||
}
|
||||
|
@ -316,7 +317,9 @@ void AnimDebugDraw::update() {
|
|||
if (!scene) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!render::Item::isValidID(_itemID)) {
|
||||
return;
|
||||
}
|
||||
render::Transaction transaction;
|
||||
transaction.updateItem<AnimDebugDrawData>(_itemID, [&](AnimDebugDrawData& data) {
|
||||
|
||||
|
|
|
@ -241,7 +241,7 @@ void Model::updateRenderItems() {
|
|||
if (model && model->isLoaded()) {
|
||||
// Ensure the model geometry was not reset between frames
|
||||
if (deleteGeometryCounter == model->_deleteGeometryCounter) {
|
||||
|
||||
|
||||
const Model::MeshState& state = model->getMeshState(data._meshIndex);
|
||||
Transform renderTransform = modelTransform;
|
||||
if (state.clusterMatrices.size() == 1) {
|
||||
|
|
|
@ -15,9 +15,6 @@
|
|||
#include "Logging.h"
|
||||
#include "TransitionStage.h"
|
||||
|
||||
// Comment this to disable transitions (fades)
|
||||
#define SCENE_ENABLE_TRANSITIONS
|
||||
|
||||
using namespace render;
|
||||
|
||||
void Transaction::resetItem(ItemID id, const PayloadPointer& payload) {
|
||||
|
@ -101,16 +98,46 @@ void consolidateTransaction(TransactionQueue& queue, Transaction& singleBatch) {
|
|||
queue.pop();
|
||||
};
|
||||
}
|
||||
|
||||
void Scene::processTransactionQueue() {
|
||||
|
||||
uint32_t Scene::enqueueFrame() {
|
||||
PROFILE_RANGE(render, __FUNCTION__);
|
||||
Transaction consolidatedTransaction;
|
||||
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(_transactionQueueMutex);
|
||||
consolidateTransaction(_transactionQueue, consolidatedTransaction);
|
||||
}
|
||||
|
||||
|
||||
uint32_t frameNumber = 0;
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(_transactionFramesMutex);
|
||||
_transactionFrames.push_back(consolidatedTransaction);
|
||||
_transactionFrameNumber++;
|
||||
frameNumber = _transactionFrameNumber;
|
||||
}
|
||||
|
||||
return frameNumber;
|
||||
}
|
||||
|
||||
|
||||
void Scene::processTransactionQueue() {
|
||||
PROFILE_RANGE(render, __FUNCTION__);
|
||||
|
||||
TransactionFrames queuedFrames;
|
||||
{
|
||||
// capture the queued frames and clear the queue
|
||||
std::unique_lock<std::mutex> lock(_transactionFramesMutex);
|
||||
queuedFrames = _transactionFrames;
|
||||
_transactionFrames.clear();
|
||||
}
|
||||
|
||||
// go through the queue of frames and process them
|
||||
for (auto& frame : queuedFrames) {
|
||||
processTransactionFrame(frame);
|
||||
}
|
||||
}
|
||||
|
||||
void Scene::processTransactionFrame(const Transaction& transaction) {
|
||||
PROFILE_RANGE(render, __FUNCTION__);
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(_itemsMutex);
|
||||
// Here we should be able to check the value of last ItemID allocated
|
||||
|
@ -123,32 +150,31 @@ void Scene::processTransactionQueue() {
|
|||
// capture anything coming from the transaction
|
||||
|
||||
// resets and potential NEW items
|
||||
resetItems(consolidatedTransaction._resetItems);
|
||||
resetItems(transaction._resetItems);
|
||||
|
||||
// Update the numItemsAtomic counter AFTER the reset changes went through
|
||||
_numAllocatedItems.exchange(maxID);
|
||||
|
||||
// updates
|
||||
updateItems(consolidatedTransaction._updatedItems);
|
||||
updateItems(transaction._updatedItems);
|
||||
|
||||
// removes
|
||||
removeItems(consolidatedTransaction._removedItems);
|
||||
removeItems(transaction._removedItems);
|
||||
|
||||
#ifdef SCENE_ENABLE_TRANSITIONS
|
||||
// add transitions
|
||||
transitionItems(consolidatedTransaction._addedTransitions);
|
||||
reApplyTransitions(consolidatedTransaction._reAppliedTransitions);
|
||||
queryTransitionItems(consolidatedTransaction._queriedTransitions);
|
||||
#endif
|
||||
transitionItems(transaction._addedTransitions);
|
||||
reApplyTransitions(transaction._reAppliedTransitions);
|
||||
queryTransitionItems(transaction._queriedTransitions);
|
||||
|
||||
// Update the numItemsAtomic counter AFTER the pending changes went through
|
||||
_numAllocatedItems.exchange(maxID);
|
||||
}
|
||||
|
||||
if (consolidatedTransaction.touchTransactions()) {
|
||||
if (transaction.touchTransactions()) {
|
||||
std::unique_lock<std::mutex> lock(_selectionsMutex);
|
||||
|
||||
// resets and potential NEW items
|
||||
resetSelections(consolidatedTransaction._resetSelections);
|
||||
resetSelections(transaction._resetSelections);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -117,6 +117,9 @@ public:
|
|||
// Enqueue transaction to the scene
|
||||
void enqueueTransaction(const Transaction& transaction);
|
||||
|
||||
// Enqueue end of frame transactions boundary
|
||||
uint32_t enqueueFrame();
|
||||
|
||||
// Process the pending transactions queued
|
||||
void processTransactionQueue();
|
||||
|
||||
|
@ -162,6 +165,15 @@ protected:
|
|||
std::mutex _transactionQueueMutex;
|
||||
TransactionQueue _transactionQueue;
|
||||
|
||||
|
||||
std::mutex _transactionFramesMutex;
|
||||
using TransactionFrames = std::list<Transaction>;
|
||||
TransactionFrames _transactionFrames;
|
||||
uint32_t _transactionFrameNumber{ 0 };
|
||||
|
||||
// Process one transaction frame
|
||||
void processTransactionFrame(const Transaction& transaction);
|
||||
|
||||
// The actual database
|
||||
// database of items is protected for editing by a mutex
|
||||
std::mutex _itemsMutex;
|
||||
|
|
|
@ -443,10 +443,6 @@ protected:
|
|||
viewOut = _viewFrustum;
|
||||
}
|
||||
|
||||
void copyShadowViewFrustum(ViewFrustum& viewOut) const override {
|
||||
viewOut = _shadowViewFrustum;
|
||||
}
|
||||
|
||||
QThread* getMainThread() override {
|
||||
return QThread::currentThread();
|
||||
}
|
||||
|
@ -1118,7 +1114,6 @@ private:
|
|||
RenderThread _renderThread;
|
||||
QWindowCamera _camera;
|
||||
ViewFrustum _viewFrustum; // current state of view frustum, perspective, orientation, etc.
|
||||
ViewFrustum _shadowViewFrustum; // current state of view frustum, perspective, orientation, etc.
|
||||
model::SunSkyStage _sunSkyStage;
|
||||
model::LightPointer _globalLight { std::make_shared<model::Light>() };
|
||||
bool _ready { false };
|
||||
|
|
Loading…
Reference in a new issue