mirror of
https://github.com/HifiExperiments/overte.git
synced 2025-04-09 03:05:13 +02:00
Merge pull request #10739 from zfox23/spectatorCamera_mergeable_20170619
Spectator Camera - Mergeable Rendering Components
This commit is contained in:
commit
ddce665613
27 changed files with 384 additions and 48 deletions
|
@ -115,6 +115,7 @@
|
|||
#include <RenderDeferredTask.h>
|
||||
#include <RenderForwardTask.h>
|
||||
#include <RenderViewTask.h>
|
||||
#include <SecondaryCamera.h>
|
||||
#include <ResourceCache.h>
|
||||
#include <ResourceRequest.h>
|
||||
#include <SandboxUtils.h>
|
||||
|
@ -1873,6 +1874,7 @@ void Application::initializeGL() {
|
|||
render::CullFunctor cullFunctor = LODManager::shouldRender;
|
||||
static const QString RENDER_FORWARD = "HIFI_RENDER_FORWARD";
|
||||
bool isDeferred = !QProcessEnvironment::systemEnvironment().contains(RENDER_FORWARD);
|
||||
_renderEngine->addJob<SecondaryCameraRenderTask>("SecondaryCameraFrame", cullFunctor);
|
||||
_renderEngine->addJob<RenderViewTask>("RenderMainView", cullFunctor, isDeferred);
|
||||
_renderEngine->load();
|
||||
_renderEngine->registerScene(_main3DScene);
|
||||
|
|
125
interface/src/SecondaryCamera.cpp
Normal file
125
interface/src/SecondaryCamera.cpp
Normal file
|
@ -0,0 +1,125 @@
|
|||
//
|
||||
// SecondaryCamera.cpp
|
||||
// interface/src
|
||||
//
|
||||
// Created by Samuel Gateau, Howard Stearns, and Zach Fox on 2017-06-08.
|
||||
// Copyright 2013 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "SecondaryCamera.h"
|
||||
#include <TextureCache.h>
|
||||
#include <gpu/Context.h>
|
||||
|
||||
using RenderArgsPointer = std::shared_ptr<RenderArgs>;
|
||||
|
||||
void MainRenderTask::build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor, bool isDeferred) {
|
||||
|
||||
task.addJob<RenderShadowTask>("RenderShadowTask", cullFunctor);
|
||||
const auto items = task.addJob<RenderFetchCullSortTask>("FetchCullSort", cullFunctor);
|
||||
assert(items.canCast<RenderFetchCullSortTask::Output>());
|
||||
if (!isDeferred) {
|
||||
task.addJob<RenderForwardTask>("Forward", items);
|
||||
} else {
|
||||
task.addJob<RenderDeferredTask>("RenderDeferredTask", items);
|
||||
}
|
||||
}
|
||||
|
||||
void SecondaryCameraRenderTaskConfig::resetSize(int width, int height) { // FIXME: Add an arg here for "destinationFramebuffer"
|
||||
bool wasEnabled = isEnabled();
|
||||
setEnabled(false);
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
textureCache->resetSpectatorCameraFramebuffer(width, height); // FIXME: Call the correct reset function based on the "destinationFramebuffer" arg
|
||||
setEnabled(wasEnabled);
|
||||
}
|
||||
|
||||
void SecondaryCameraRenderTaskConfig::resetSizeSpectatorCamera(int width, int height) { // Carefully adjust the framebuffer / texture.
|
||||
resetSize(width, height);
|
||||
}
|
||||
|
||||
class BeginSecondaryCameraFrame { // Changes renderContext for our framebuffer and and view.
|
||||
glm::vec3 _position{};
|
||||
glm::quat _orientation{};
|
||||
float _vFoV{};
|
||||
float _nearClipPlaneDistance{};
|
||||
float _farClipPlaneDistance{};
|
||||
public:
|
||||
using Config = BeginSecondaryCameraFrameConfig;
|
||||
using JobModel = render::Job::ModelO<BeginSecondaryCameraFrame, RenderArgsPointer, Config>;
|
||||
BeginSecondaryCameraFrame() {
|
||||
_cachedArgsPointer = std::make_shared<RenderArgs>(_cachedArgs);
|
||||
}
|
||||
|
||||
void configure(const Config& config) {
|
||||
if (config.enabled || config.alwaysEnabled) {
|
||||
_position = config.position;
|
||||
_orientation = config.orientation;
|
||||
_vFoV = config.vFoV;
|
||||
_nearClipPlaneDistance = config.nearClipPlaneDistance;
|
||||
_farClipPlaneDistance = config.farClipPlaneDistance;
|
||||
}
|
||||
}
|
||||
|
||||
void run(const render::RenderContextPointer& renderContext, RenderArgsPointer& cachedArgs) {
|
||||
auto args = renderContext->args;
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
gpu::FramebufferPointer destFramebuffer;
|
||||
destFramebuffer = textureCache->getSpectatorCameraFramebuffer(); // FIXME: Change the destination based on some unimplemented config var
|
||||
if (destFramebuffer) {
|
||||
_cachedArgsPointer->_blitFramebuffer = args->_blitFramebuffer;
|
||||
_cachedArgsPointer->_viewport = args->_viewport;
|
||||
_cachedArgsPointer->_displayMode = args->_displayMode;
|
||||
_cachedArgsPointer->_renderMode = args->_renderMode;
|
||||
args->_blitFramebuffer = destFramebuffer;
|
||||
args->_viewport = glm::ivec4(0, 0, destFramebuffer->getWidth(), destFramebuffer->getHeight());
|
||||
args->_displayMode = RenderArgs::MONO;
|
||||
args->_renderMode = RenderArgs::RenderMode::SECONDARY_CAMERA_RENDER_MODE;
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
batch.disableContextStereo();
|
||||
});
|
||||
|
||||
auto srcViewFrustum = args->getViewFrustum();
|
||||
srcViewFrustum.setPosition(_position);
|
||||
srcViewFrustum.setOrientation(_orientation);
|
||||
srcViewFrustum.setProjection(glm::perspective(glm::radians(_vFoV), ((float)args->_viewport.z / (float)args->_viewport.w), _nearClipPlaneDistance, _farClipPlaneDistance));
|
||||
// Without calculating the bound planes, the secondary camera will use the same culling frustum as the main camera,
|
||||
// which is not what we want here.
|
||||
srcViewFrustum.calculate();
|
||||
args->pushViewFrustum(srcViewFrustum);
|
||||
cachedArgs = _cachedArgsPointer;
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
RenderArgs _cachedArgs;
|
||||
RenderArgsPointer _cachedArgsPointer;
|
||||
};
|
||||
|
||||
class EndSecondaryCameraFrame { // Restores renderContext.
|
||||
public:
|
||||
using JobModel = render::Job::ModelI<EndSecondaryCameraFrame, RenderArgsPointer>;
|
||||
|
||||
void run(const render::RenderContextPointer& renderContext, const RenderArgsPointer& cachedArgs) {
|
||||
auto args = renderContext->args;
|
||||
args->_blitFramebuffer = cachedArgs->_blitFramebuffer;
|
||||
args->_viewport = cachedArgs->_viewport;
|
||||
args->popViewFrustum();
|
||||
args->_displayMode = cachedArgs->_displayMode;
|
||||
args->_renderMode = cachedArgs->_renderMode;
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
batch.restoreContextStereo();
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
void SecondaryCameraRenderTask::build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor) {
|
||||
const auto cachedArg = task.addJob<BeginSecondaryCameraFrame>("BeginSecondaryCamera");
|
||||
const auto items = task.addJob<RenderFetchCullSortTask>("FetchCullSort", cullFunctor);
|
||||
assert(items.canCast<RenderFetchCullSortTask::Output>());
|
||||
task.addJob<RenderDeferredTask>("RenderDeferredTask", items);
|
||||
task.addJob<EndSecondaryCameraFrame>("EndSecondaryCamera", cachedArg);
|
||||
}
|
70
interface/src/SecondaryCamera.h
Normal file
70
interface/src/SecondaryCamera.h
Normal file
|
@ -0,0 +1,70 @@
|
|||
//
|
||||
// SecondaryCamera.h
|
||||
// interface/src
|
||||
//
|
||||
// Created by Samuel Gateau, Howard Stearns, and Zach Fox on 2017-06-08.
|
||||
// Copyright 2013 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#pragma once
|
||||
#ifndef hifi_SecondaryCamera_h
|
||||
#define hifi_SecondaryCamera_h
|
||||
|
||||
#include <RenderShadowTask.h>
|
||||
#include <render/RenderFetchCullSortTask.h>
|
||||
#include <RenderDeferredTask.h>
|
||||
#include <RenderForwardTask.h>
|
||||
|
||||
|
||||
class MainRenderTask {
|
||||
public:
|
||||
using JobModel = render::Task::Model<MainRenderTask>;
|
||||
|
||||
MainRenderTask() {}
|
||||
|
||||
void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor, bool isDeferred = true);
|
||||
};
|
||||
|
||||
class BeginSecondaryCameraFrameConfig : public render::Task::Config { // Exposes secondary camera parameters to JavaScript.
|
||||
Q_OBJECT
|
||||
Q_PROPERTY(glm::vec3 position MEMBER position NOTIFY dirty) // of viewpoint to render from
|
||||
Q_PROPERTY(glm::quat orientation MEMBER orientation NOTIFY dirty) // of viewpoint to render from
|
||||
Q_PROPERTY(float vFoV MEMBER vFoV NOTIFY dirty) // Secondary camera's vertical field of view. In degrees.
|
||||
Q_PROPERTY(float nearClipPlaneDistance MEMBER nearClipPlaneDistance NOTIFY dirty) // Secondary camera's near clip plane distance. In meters.
|
||||
Q_PROPERTY(float farClipPlaneDistance MEMBER farClipPlaneDistance NOTIFY dirty) // Secondary camera's far clip plane distance. In meters.
|
||||
public:
|
||||
glm::vec3 position{};
|
||||
glm::quat orientation{};
|
||||
float vFoV{ 45.0f };
|
||||
float nearClipPlaneDistance{ 0.1f };
|
||||
float farClipPlaneDistance{ 100.0f };
|
||||
BeginSecondaryCameraFrameConfig() : render::Task::Config(false) {}
|
||||
signals:
|
||||
void dirty();
|
||||
};
|
||||
|
||||
class SecondaryCameraRenderTaskConfig : public render::Task::Config {
|
||||
Q_OBJECT
|
||||
public:
|
||||
SecondaryCameraRenderTaskConfig() : render::Task::Config(false) {}
|
||||
private:
|
||||
void resetSize(int width, int height);
|
||||
signals:
|
||||
void dirty();
|
||||
public slots:
|
||||
void resetSizeSpectatorCamera(int width, int height);
|
||||
};
|
||||
|
||||
class SecondaryCameraRenderTask {
|
||||
public:
|
||||
using Config = SecondaryCameraRenderTaskConfig;
|
||||
using JobModel = render::Task::Model<SecondaryCameraRenderTask, Config>;
|
||||
SecondaryCameraRenderTask() {}
|
||||
void configure(const Config& config) {}
|
||||
void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor);
|
||||
};
|
||||
|
||||
#endif
|
|
@ -1881,15 +1881,14 @@ void MyAvatar::preDisplaySide(RenderArgs* renderArgs) {
|
|||
|
||||
const float RENDER_HEAD_CUTOFF_DISTANCE = 0.3f;
|
||||
|
||||
bool MyAvatar::cameraInsideHead() const {
|
||||
const glm::vec3 cameraPosition = qApp->getCamera().getPosition();
|
||||
bool MyAvatar::cameraInsideHead(const glm::vec3& cameraPosition) const {
|
||||
return glm::length(cameraPosition - getHeadPosition()) < (RENDER_HEAD_CUTOFF_DISTANCE * getUniformScale());
|
||||
}
|
||||
|
||||
bool MyAvatar::shouldRenderHead(const RenderArgs* renderArgs) const {
|
||||
bool defaultMode = renderArgs->_renderMode == RenderArgs::DEFAULT_RENDER_MODE;
|
||||
bool firstPerson = qApp->getCamera().getMode() == CAMERA_MODE_FIRST_PERSON;
|
||||
bool insideHead = cameraInsideHead();
|
||||
bool insideHead = cameraInsideHead(renderArgs->getViewFrustum().getPosition());
|
||||
return !defaultMode || !firstPerson || !insideHead;
|
||||
}
|
||||
|
||||
|
|
|
@ -615,7 +615,7 @@ private:
|
|||
float scale = 1.0f, bool isSoft = false,
|
||||
bool allowDuplicates = false, bool useSaved = true) override;
|
||||
|
||||
bool cameraInsideHead() const;
|
||||
bool cameraInsideHead(const glm::vec3& cameraPosition) const;
|
||||
|
||||
void updateEyeContactTarget(float deltaTime);
|
||||
|
||||
|
|
|
@ -284,6 +284,11 @@ void WindowScriptingInterface::copyToClipboard(const QString& text) {
|
|||
QApplication::clipboard()->setText(text);
|
||||
}
|
||||
|
||||
|
||||
bool WindowScriptingInterface::setDisplayTexture(const QString& name) {
|
||||
return qApp->getActiveDisplayPlugin()->setDisplayTexture(name); // Plugins that don't know how, answer false.
|
||||
}
|
||||
|
||||
void WindowScriptingInterface::takeSnapshot(bool notify, bool includeAnimated, float aspectRatio) {
|
||||
qApp->takeSnapshot(notify, includeAnimated, aspectRatio);
|
||||
}
|
||||
|
|
|
@ -62,6 +62,7 @@ public slots:
|
|||
void displayAnnouncement(const QString& message);
|
||||
void shareSnapshot(const QString& path, const QUrl& href = QUrl(""));
|
||||
bool isPhysicsEnabled();
|
||||
bool setDisplayTexture(const QString& name);
|
||||
|
||||
int openMessageBox(QString title, QString text, int buttons, int defaultButton);
|
||||
void updateMessageBox(int id, QString title, QString text, int buttons, int defaultButton);
|
||||
|
|
|
@ -288,6 +288,13 @@ void Avatar::updateAvatarEntities() {
|
|||
properties.setScript(noScript);
|
||||
}
|
||||
|
||||
// When grabbing avatar entities, they are parented to the joint moving them, then when un-grabbed
|
||||
// they go back to the default parent (null uuid). When un-gripped, others saw the entity disappear.
|
||||
// The thinking here is the local position was noticed as changing, but not the parentID (since it is now
|
||||
// back to the default), and the entity flew off somewhere. Marking all changed definitely fixes this,
|
||||
// and seems safe (per Seth).
|
||||
properties.markAllChanged();
|
||||
|
||||
// try to build the entity
|
||||
EntityItemPointer entity = entityTree->findEntityByEntityItemID(EntityItemID(entityID));
|
||||
bool success = true;
|
||||
|
@ -1067,15 +1074,15 @@ void Avatar::setModelURLFinished(bool success) {
|
|||
const int MAX_SKELETON_DOWNLOAD_ATTEMPTS = 4; // NOTE: we don't want to be as generous as ResourceCache is, we only want 4 attempts
|
||||
if (_skeletonModel->getResourceDownloadAttemptsRemaining() <= 0 ||
|
||||
_skeletonModel->getResourceDownloadAttempts() > MAX_SKELETON_DOWNLOAD_ATTEMPTS) {
|
||||
qCWarning(avatars_renderer) << "Using default after failing to load Avatar model: " << _skeletonModelURL
|
||||
qCWarning(avatars_renderer) << "Using default after failing to load Avatar model: " << _skeletonModelURL
|
||||
<< "after" << _skeletonModel->getResourceDownloadAttempts() << "attempts.";
|
||||
// call _skeletonModel.setURL, but leave our copy of _skeletonModelURL alone. This is so that
|
||||
// we don't redo this every time we receive an identity packet from the avatar with the bad url.
|
||||
QMetaObject::invokeMethod(_skeletonModel.get(), "setURL",
|
||||
Qt::QueuedConnection, Q_ARG(QUrl, AvatarData::defaultFullAvatarModelUrl()));
|
||||
} else {
|
||||
qCWarning(avatars_renderer) << "Avatar model: " << _skeletonModelURL
|
||||
<< "failed to load... attempts:" << _skeletonModel->getResourceDownloadAttempts()
|
||||
qCWarning(avatars_renderer) << "Avatar model: " << _skeletonModelURL
|
||||
<< "failed to load... attempts:" << _skeletonModel->getResourceDownloadAttempts()
|
||||
<< "out of:" << MAX_SKELETON_DOWNLOAD_ATTEMPTS;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -496,6 +496,17 @@ void OpenGLDisplayPlugin::submitFrame(const gpu::FramePointer& newFrame) {
|
|||
_newFrameQueue.push(newFrame);
|
||||
});
|
||||
}
|
||||
void OpenGLDisplayPlugin::renderFromTexture(gpu::Batch& batch, const gpu::TexturePointer texture, glm::ivec4 viewport, const glm::ivec4 scissor) {
|
||||
batch.enableStereo(false);
|
||||
batch.resetViewTransform();
|
||||
batch.setFramebuffer(gpu::FramebufferPointer());
|
||||
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, vec4(0));
|
||||
batch.setStateScissorRect(scissor);
|
||||
batch.setViewportTransform(viewport);
|
||||
batch.setResourceTexture(0, texture);
|
||||
batch.setPipeline(_presentPipeline);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
}
|
||||
|
||||
void OpenGLDisplayPlugin::updateFrameData() {
|
||||
PROFILE_RANGE(render, __FUNCTION__)
|
||||
|
@ -605,14 +616,11 @@ void OpenGLDisplayPlugin::compositeLayers() {
|
|||
|
||||
void OpenGLDisplayPlugin::internalPresent() {
|
||||
render([&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
batch.resetViewTransform();
|
||||
batch.setFramebuffer(gpu::FramebufferPointer());
|
||||
batch.setViewportTransform(ivec4(uvec2(0), getSurfacePixels()));
|
||||
batch.setResourceTexture(0, _compositeFramebuffer->getRenderBuffer(0));
|
||||
batch.setPipeline(_presentPipeline);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
});
|
||||
// Note: _displayTexture must currently be the same size as the display.
|
||||
uvec2 dims = _displayTexture ? uvec2(_displayTexture->getDimensions()) : getSurfacePixels();
|
||||
auto viewport = ivec4(uvec2(0), dims);
|
||||
renderFromTexture(batch, _displayTexture ? _displayTexture : _compositeFramebuffer->getRenderBuffer(0), viewport, viewport);
|
||||
});
|
||||
swapBuffers();
|
||||
_presentRate.increment();
|
||||
}
|
||||
|
@ -694,6 +702,22 @@ void OpenGLDisplayPlugin::withMainThreadContext(std::function<void()> f) const {
|
|||
_container->makeRenderingContextCurrent();
|
||||
}
|
||||
|
||||
bool OpenGLDisplayPlugin::setDisplayTexture(const QString& name) {
|
||||
// Note: it is the caller's responsibility to keep the network texture in cache.
|
||||
if (name.isEmpty()) {
|
||||
_displayTexture.reset();
|
||||
onDisplayTextureReset();
|
||||
return true;
|
||||
}
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
auto displayNetworkTexture = textureCache->getTexture(name);
|
||||
if (!displayNetworkTexture) {
|
||||
return false;
|
||||
}
|
||||
_displayTexture = displayNetworkTexture->getGPUTexture();
|
||||
return !!_displayTexture;
|
||||
}
|
||||
|
||||
QImage OpenGLDisplayPlugin::getScreenshot(float aspectRatio) const {
|
||||
auto size = _compositeFramebuffer->getSize();
|
||||
if (isHmd()) {
|
||||
|
|
|
@ -57,6 +57,8 @@ public:
|
|||
return getSurfaceSize();
|
||||
}
|
||||
|
||||
virtual bool setDisplayTexture(const QString& name) override;
|
||||
virtual bool onDisplayTextureReset() { return false; };
|
||||
QImage getScreenshot(float aspectRatio = 0.0f) const override;
|
||||
|
||||
float presentRate() const override;
|
||||
|
@ -109,6 +111,7 @@ protected:
|
|||
// Plugin specific functionality to send the composed scene to the output window or device
|
||||
virtual void internalPresent();
|
||||
|
||||
void renderFromTexture(gpu::Batch& batch, const gpu::TexturePointer texture, glm::ivec4 viewport, const glm::ivec4 scissor);
|
||||
virtual void updateFrameData();
|
||||
|
||||
void withMainThreadContext(std::function<void()> f) const;
|
||||
|
@ -134,6 +137,7 @@ protected:
|
|||
gpu::PipelinePointer _simplePipeline;
|
||||
gpu::PipelinePointer _presentPipeline;
|
||||
gpu::PipelinePointer _cursorPipeline;
|
||||
gpu::TexturePointer _displayTexture{};
|
||||
float _compositeOverlayAlpha { 1.0f };
|
||||
|
||||
struct CursorData {
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include <gpu/StandardShaderLib.h>
|
||||
#include <gpu/gl/GLBackend.h>
|
||||
|
||||
#include <TextureCache.h>
|
||||
#include <PathUtils.h>
|
||||
|
||||
#include "../Logging.h"
|
||||
|
@ -211,7 +212,15 @@ void HmdDisplayPlugin::internalPresent() {
|
|||
// Composite together the scene, overlay and mouse cursor
|
||||
hmdPresent();
|
||||
|
||||
if (!_disablePreview) {
|
||||
if (_displayTexture) {
|
||||
// Note: _displayTexture must currently be the same size as the display.
|
||||
uvec2 dims = uvec2(_displayTexture->getDimensions());
|
||||
auto viewport = ivec4(uvec2(0), dims);
|
||||
render([&](gpu::Batch& batch) {
|
||||
renderFromTexture(batch, _displayTexture, viewport, viewport);
|
||||
});
|
||||
swapBuffers();
|
||||
} else if (!_disablePreview) {
|
||||
// screen preview mirroring
|
||||
auto sourceSize = _renderTargetSize;
|
||||
if (_monoPreview) {
|
||||
|
@ -278,16 +287,7 @@ void HmdDisplayPlugin::internalPresent() {
|
|||
|
||||
viewport.z *= 2;
|
||||
}
|
||||
|
||||
batch.enableStereo(false);
|
||||
batch.resetViewTransform();
|
||||
batch.setFramebuffer(gpu::FramebufferPointer());
|
||||
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, vec4(0));
|
||||
batch.setStateScissorRect(scissor); // was viewport
|
||||
batch.setViewportTransform(viewport);
|
||||
batch.setResourceTexture(0, _compositeFramebuffer->getRenderBuffer(0));
|
||||
batch.setPipeline(_presentPipeline);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
renderFromTexture(batch, _compositeFramebuffer->getRenderBuffer(0), viewport, scissor);
|
||||
});
|
||||
swapBuffers();
|
||||
} else if (_clearPreviewFlag) {
|
||||
|
@ -316,15 +316,7 @@ void HmdDisplayPlugin::internalPresent() {
|
|||
auto viewport = getViewportForSourceSize(uvec2(_previewTexture->getDimensions()));
|
||||
|
||||
render([&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
batch.resetViewTransform();
|
||||
batch.setFramebuffer(gpu::FramebufferPointer());
|
||||
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, vec4(0));
|
||||
batch.setStateScissorRect(viewport);
|
||||
batch.setViewportTransform(viewport);
|
||||
batch.setResourceTexture(0, _previewTexture);
|
||||
batch.setPipeline(_presentPipeline);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
renderFromTexture(batch, _previewTexture, viewport, viewport);
|
||||
});
|
||||
_clearPreviewFlag = false;
|
||||
swapBuffers();
|
||||
|
|
|
@ -46,6 +46,8 @@ public:
|
|||
|
||||
float stutterRate() const override;
|
||||
|
||||
virtual bool onDisplayTextureReset() override { _clearPreviewFlag = true; return true; };
|
||||
|
||||
protected:
|
||||
virtual void hmdPresent() = 0;
|
||||
virtual bool isHmdMounted() const = 0;
|
||||
|
|
|
@ -115,6 +115,9 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
|
|||
|
||||
(&::gpu::gl::GLBackend::do_resetStages),
|
||||
|
||||
(&::gpu::gl::GLBackend::do_disableContextStereo),
|
||||
(&::gpu::gl::GLBackend::do_restoreContextStereo),
|
||||
|
||||
(&::gpu::gl::GLBackend::do_runLambda),
|
||||
|
||||
(&::gpu::gl::GLBackend::do_startNamedCall),
|
||||
|
@ -224,6 +227,14 @@ void GLBackend::renderPassTransfer(const Batch& batch) {
|
|||
_transform.preUpdate(_commandIndex, _stereo);
|
||||
break;
|
||||
|
||||
case Batch::COMMAND_disableContextStereo:
|
||||
_stereo._contextDisable = true;
|
||||
break;
|
||||
|
||||
case Batch::COMMAND_restoreContextStereo:
|
||||
_stereo._contextDisable = false;
|
||||
break;
|
||||
|
||||
case Batch::COMMAND_setViewportTransform:
|
||||
case Batch::COMMAND_setViewTransform:
|
||||
case Batch::COMMAND_setProjectionTransform: {
|
||||
|
@ -308,16 +319,16 @@ void GLBackend::render(const Batch& batch) {
|
|||
}
|
||||
|
||||
#ifdef GPU_STEREO_DRAWCALL_INSTANCED
|
||||
if (_stereo._enable) {
|
||||
if (_stereo.isStereo()) {
|
||||
glEnable(GL_CLIP_DISTANCE0);
|
||||
}
|
||||
#endif
|
||||
{
|
||||
PROFILE_RANGE(render_gpu_gl_detail, _stereo._enable ? "Render Stereo" : "Render");
|
||||
PROFILE_RANGE(render_gpu_gl_detail, _stereo.isStereo() ? "Render Stereo" : "Render");
|
||||
renderPassDraw(batch);
|
||||
}
|
||||
#ifdef GPU_STEREO_DRAWCALL_INSTANCED
|
||||
if (_stereo._enable) {
|
||||
if (_stereo.isStereo()) {
|
||||
glDisable(GL_CLIP_DISTANCE0);
|
||||
}
|
||||
#endif
|
||||
|
@ -358,6 +369,15 @@ void GLBackend::do_resetStages(const Batch& batch, size_t paramOffset) {
|
|||
resetStages();
|
||||
}
|
||||
|
||||
|
||||
void GLBackend::do_disableContextStereo(const Batch& batch, size_t paramOffset) {
|
||||
|
||||
}
|
||||
|
||||
void GLBackend::do_restoreContextStereo(const Batch& batch, size_t paramOffset) {
|
||||
|
||||
}
|
||||
|
||||
void GLBackend::do_runLambda(const Batch& batch, size_t paramOffset) {
|
||||
std::function<void()> f = batch._lambdas.get(batch._params[paramOffset]._uint);
|
||||
f();
|
||||
|
|
|
@ -143,6 +143,9 @@ public:
|
|||
// Reset stages
|
||||
virtual void do_resetStages(const Batch& batch, size_t paramOffset) final;
|
||||
|
||||
virtual void do_disableContextStereo(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_restoreContextStereo(const Batch& batch, size_t paramOffset) final;
|
||||
|
||||
virtual void do_runLambda(const Batch& batch, size_t paramOffset) final;
|
||||
|
||||
virtual void do_startNamedCall(const Batch& batch, size_t paramOffset) final;
|
||||
|
|
|
@ -48,7 +48,7 @@ void GLBackend::do_setFramebuffer(const Batch& batch, size_t paramOffset) {
|
|||
}
|
||||
|
||||
void GLBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) {
|
||||
if (_stereo._enable && !_pipeline._stateCache.scissorEnable) {
|
||||
if (_stereo.isStereo() && !_pipeline._stateCache.scissorEnable) {
|
||||
qWarning("Clear without scissor in stereo mode");
|
||||
}
|
||||
|
||||
|
|
|
@ -322,7 +322,7 @@ void GLBackend::do_setStateScissorRect(const Batch& batch, size_t paramOffset) {
|
|||
Vec4i rect;
|
||||
memcpy(&rect, batch.readData(batch._params[paramOffset]._uint), sizeof(Vec4i));
|
||||
|
||||
if (_stereo._enable) {
|
||||
if (_stereo.isStereo()) {
|
||||
rect.z /= 2;
|
||||
if (_stereo._pass) {
|
||||
rect.x += rect.z;
|
||||
|
|
|
@ -37,7 +37,7 @@ void GLBackend::do_setViewportTransform(const Batch& batch, size_t paramOffset)
|
|||
glViewport(vp.x, vp.y, vp.z, vp.w);
|
||||
|
||||
// Where we assign the GL viewport
|
||||
if (_stereo._enable) {
|
||||
if (_stereo.isStereo()) {
|
||||
vp.z /= 2;
|
||||
if (_stereo._pass) {
|
||||
vp.x += vp.z;
|
||||
|
@ -119,7 +119,7 @@ void GLBackend::TransformStageState::preUpdate(size_t commandIndex, const Stereo
|
|||
size_t offset = _cameraUboSize * _cameras.size();
|
||||
_cameraOffsets.push_back(TransformStageState::Pair(commandIndex, offset));
|
||||
|
||||
if (stereo._enable) {
|
||||
if (stereo.isStereo()) {
|
||||
#ifdef GPU_STEREO_CAMERA_BUFFER
|
||||
_cameras.push_back(CameraBufferElement(_camera.getEyeCamera(0, stereo, _view), _camera.getEyeCamera(1, stereo, _view)));
|
||||
#else
|
||||
|
@ -151,7 +151,7 @@ void GLBackend::TransformStageState::update(size_t commandIndex, const StereoSta
|
|||
#ifdef GPU_STEREO_CAMERA_BUFFER
|
||||
bindCurrentCamera(0);
|
||||
#else
|
||||
if (!stereo._enable) {
|
||||
if (!stereo.isStereo()) {
|
||||
bindCurrentCamera(0);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -390,6 +390,15 @@ void Batch::resetStages() {
|
|||
ADD_COMMAND(resetStages);
|
||||
}
|
||||
|
||||
|
||||
void Batch::disableContextStereo() {
|
||||
ADD_COMMAND(disableContextStereo);
|
||||
}
|
||||
|
||||
void Batch::restoreContextStereo() {
|
||||
ADD_COMMAND(restoreContextStereo);
|
||||
}
|
||||
|
||||
void Batch::runLambda(std::function<void()> f) {
|
||||
ADD_COMMAND(runLambda);
|
||||
_params.emplace_back(_lambdas.cache(f));
|
||||
|
|
|
@ -217,6 +217,9 @@ public:
|
|||
// Reset the stage caches and states
|
||||
void resetStages();
|
||||
|
||||
void disableContextStereo();
|
||||
void restoreContextStereo();
|
||||
|
||||
// Debugging
|
||||
void pushProfileRange(const char* name);
|
||||
void popProfileRange();
|
||||
|
@ -301,6 +304,9 @@ public:
|
|||
|
||||
COMMAND_resetStages,
|
||||
|
||||
COMMAND_disableContextStereo,
|
||||
COMMAND_restoreContextStereo,
|
||||
|
||||
COMMAND_runLambda,
|
||||
|
||||
COMMAND_startNamedCall,
|
||||
|
@ -467,7 +473,7 @@ public:
|
|||
NamedBatchDataMap _namedData;
|
||||
|
||||
bool _enableStereo{ true };
|
||||
bool _enableSkybox{ false };
|
||||
bool _enableSkybox { false };
|
||||
|
||||
protected:
|
||||
friend class Context;
|
||||
|
|
|
@ -145,7 +145,7 @@ void Context::enableStereo(bool enable) {
|
|||
}
|
||||
|
||||
bool Context::isStereo() {
|
||||
return _stereo._enable;
|
||||
return _stereo.isStereo();
|
||||
}
|
||||
|
||||
void Context::setStereoProjections(const mat4 eyeProjections[2]) {
|
||||
|
|
|
@ -118,7 +118,7 @@ public:
|
|||
|
||||
protected:
|
||||
virtual bool isStereo() {
|
||||
return _stereo._enable;
|
||||
return _stereo.isStereo();
|
||||
}
|
||||
|
||||
void getStereoProjections(mat4* eyeProjections) const {
|
||||
|
|
|
@ -93,7 +93,11 @@ namespace gpu {
|
|||
using TextureViews = std::vector<TextureView>;
|
||||
|
||||
struct StereoState {
|
||||
bool isStereo() const {
|
||||
return _enable && !_contextDisable;
|
||||
}
|
||||
bool _enable{ false };
|
||||
bool _contextDisable { false };
|
||||
bool _skybox{ false };
|
||||
// 0 for left eye, 1 for right eye
|
||||
uint8 _pass{ 0 };
|
||||
|
|
|
@ -52,6 +52,9 @@ Q_LOGGING_CATEGORY(trace_resource_parse_image_ktx, "trace.resource.parse.image.k
|
|||
const std::string TextureCache::KTX_DIRNAME { "ktx_cache" };
|
||||
const std::string TextureCache::KTX_EXT { "ktx" };
|
||||
|
||||
static const QString RESOURCE_SCHEME = "resource";
|
||||
static const QUrl SPECTATOR_CAMERA_FRAME_URL("resource://spectatorCameraFrame");
|
||||
|
||||
static const float SKYBOX_LOAD_PRIORITY { 10.0f }; // Make sure skybox loads first
|
||||
static const float HIGH_MIPS_LOAD_PRIORITY { 9.0f }; // Make sure high mips loads after skybox but before models
|
||||
|
||||
|
@ -182,6 +185,9 @@ ScriptableResource* TextureCache::prefetch(const QUrl& url, int type, int maxNum
|
|||
}
|
||||
|
||||
NetworkTexturePointer TextureCache::getTexture(const QUrl& url, image::TextureUsage::Type type, const QByteArray& content, int maxNumPixels) {
|
||||
if (url.scheme() == RESOURCE_SCHEME) {
|
||||
return getResourceTexture(url);
|
||||
}
|
||||
TextureExtra extra = { type, content, maxNumPixels };
|
||||
return ResourceCache::getResource(url, QUrl(), &extra).staticCast<NetworkTexture>();
|
||||
}
|
||||
|
@ -267,6 +273,18 @@ QSharedPointer<Resource> TextureCache::createResource(const QUrl& url, const QSh
|
|||
return QSharedPointer<Resource>(texture, &Resource::deleter);
|
||||
}
|
||||
|
||||
NetworkTexture::NetworkTexture(const QUrl& url) :
|
||||
Resource(url),
|
||||
_type(),
|
||||
_sourceIsKTX(false),
|
||||
_maxNumPixels(100)
|
||||
{
|
||||
_textureSource = std::make_shared<gpu::TextureSource>();
|
||||
_lowestRequestedMipLevel = 0;
|
||||
_loaded = true;
|
||||
}
|
||||
|
||||
|
||||
NetworkTexture::NetworkTexture(const QUrl& url, image::TextureUsage::Type type, const QByteArray& content, int maxNumPixels) :
|
||||
Resource(url),
|
||||
_type(type),
|
||||
|
@ -953,3 +971,32 @@ void ImageReader::read() {
|
|||
Q_ARG(int, texture->getWidth()),
|
||||
Q_ARG(int, texture->getHeight()));
|
||||
}
|
||||
|
||||
|
||||
NetworkTexturePointer TextureCache::getResourceTexture(QUrl resourceTextureUrl) {
|
||||
gpu::TexturePointer texture;
|
||||
if (resourceTextureUrl == SPECTATOR_CAMERA_FRAME_URL) {
|
||||
if (!_spectatorCameraNetworkTexture) {
|
||||
_spectatorCameraNetworkTexture.reset(new NetworkTexture(resourceTextureUrl));
|
||||
}
|
||||
texture = _spectatorCameraFramebuffer->getRenderBuffer(0);
|
||||
if (texture) {
|
||||
_spectatorCameraNetworkTexture->setImage(texture, texture->getWidth(), texture->getHeight());
|
||||
return _spectatorCameraNetworkTexture;
|
||||
}
|
||||
}
|
||||
|
||||
return NetworkTexturePointer();
|
||||
}
|
||||
|
||||
const gpu::FramebufferPointer& TextureCache::getSpectatorCameraFramebuffer() {
|
||||
if (!_spectatorCameraFramebuffer) {
|
||||
resetSpectatorCameraFramebuffer(2048, 1024);
|
||||
}
|
||||
return _spectatorCameraFramebuffer;
|
||||
}
|
||||
|
||||
void TextureCache::resetSpectatorCameraFramebuffer(int width, int height) {
|
||||
_spectatorCameraFramebuffer.reset(gpu::Framebuffer::create("spectatorCamera", gpu::Element::COLOR_SRGBA_32, width, height));
|
||||
_spectatorCameraNetworkTexture.reset();
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@ class NetworkTexture : public Resource, public Texture {
|
|||
Q_OBJECT
|
||||
|
||||
public:
|
||||
NetworkTexture(const QUrl& url);
|
||||
NetworkTexture(const QUrl& url, image::TextureUsage::Type type, const QByteArray& content, int maxNumPixels);
|
||||
~NetworkTexture() override;
|
||||
|
||||
|
@ -124,6 +125,8 @@ private:
|
|||
int _width { 0 };
|
||||
int _height { 0 };
|
||||
int _maxNumPixels { ABSOLUTE_MAX_TEXTURE_NUM_PIXELS };
|
||||
|
||||
friend class TextureCache;
|
||||
};
|
||||
|
||||
using NetworkTexturePointer = QSharedPointer<NetworkTexture>;
|
||||
|
@ -162,6 +165,12 @@ public:
|
|||
gpu::TexturePointer getTextureByHash(const std::string& hash);
|
||||
gpu::TexturePointer cacheTextureByHash(const std::string& hash, const gpu::TexturePointer& texture);
|
||||
|
||||
|
||||
/// SpectatorCamera rendering targets.
|
||||
NetworkTexturePointer getResourceTexture(QUrl resourceTextureUrl);
|
||||
const gpu::FramebufferPointer& getSpectatorCameraFramebuffer();
|
||||
void resetSpectatorCameraFramebuffer(int width, int height);
|
||||
|
||||
protected:
|
||||
// Overload ResourceCache::prefetch to allow specifying texture type for loads
|
||||
Q_INVOKABLE ScriptableResource* prefetch(const QUrl& url, int type, int maxNumPixels = ABSOLUTE_MAX_TEXTURE_NUM_PIXELS);
|
||||
|
@ -179,6 +188,7 @@ private:
|
|||
|
||||
static const std::string KTX_DIRNAME;
|
||||
static const std::string KTX_EXT;
|
||||
|
||||
KTXCache _ktxCache;
|
||||
// Map from image hashes to texture weak pointers
|
||||
std::unordered_map<std::string, std::weak_ptr<gpu::Texture>> _texturesByHashes;
|
||||
|
@ -189,6 +199,9 @@ private:
|
|||
gpu::TexturePointer _grayTexture;
|
||||
gpu::TexturePointer _blueTexture;
|
||||
gpu::TexturePointer _blackTexture;
|
||||
|
||||
NetworkTexturePointer _spectatorCameraNetworkTexture;
|
||||
gpu::FramebufferPointer _spectatorCameraFramebuffer;
|
||||
};
|
||||
|
||||
#endif // hifi_TextureCache_h
|
||||
|
|
|
@ -184,6 +184,9 @@ public:
|
|||
// will query the underlying hmd api to compute the most recent head pose
|
||||
virtual bool beginFrameRender(uint32_t frameIndex) { return true; }
|
||||
|
||||
// Set the texture to display on the monitor and return true, if allowed. Empty string resets.
|
||||
virtual bool setDisplayTexture(const QString& name) { return false; }
|
||||
|
||||
virtual float devicePixelRatio() { return 1.0f; }
|
||||
// Rate at which we render frames
|
||||
virtual float renderRate() const { return -1.0f; }
|
||||
|
|
|
@ -29,7 +29,7 @@ void CauterizedMeshPartPayload::updateTransformForCauterizedMesh(
|
|||
|
||||
void CauterizedMeshPartPayload::bindTransform(gpu::Batch& batch, const render::ShapePipeline::LocationsPointer locations, RenderArgs::RenderMode renderMode) const {
|
||||
// Still relying on the raw data from the model
|
||||
bool useCauterizedMesh = (renderMode != RenderArgs::RenderMode::SHADOW_RENDER_MODE);
|
||||
bool useCauterizedMesh = (renderMode != RenderArgs::RenderMode::SHADOW_RENDER_MODE && renderMode != RenderArgs::RenderMode::SECONDARY_CAMERA_RENDER_MODE);
|
||||
if (useCauterizedMesh) {
|
||||
ModelPointer model = _model.lock();
|
||||
if (model) {
|
||||
|
|
|
@ -61,7 +61,7 @@ namespace render {
|
|||
|
||||
class Args {
|
||||
public:
|
||||
enum RenderMode { DEFAULT_RENDER_MODE, SHADOW_RENDER_MODE, DIFFUSE_RENDER_MODE, NORMAL_RENDER_MODE, MIRROR_RENDER_MODE };
|
||||
enum RenderMode { DEFAULT_RENDER_MODE, SHADOW_RENDER_MODE, DIFFUSE_RENDER_MODE, NORMAL_RENDER_MODE, MIRROR_RENDER_MODE, SECONDARY_CAMERA_RENDER_MODE };
|
||||
enum DisplayMode { MONO, STEREO_MONITOR, STEREO_HMD };
|
||||
enum DebugFlags {
|
||||
RENDER_DEBUG_NONE = 0,
|
||||
|
|
Loading…
Reference in a new issue