mirror of
https://github.com/overte-org/overte.git
synced 2025-04-20 04:24:07 +02:00
Merge pull request #10636 from zfox23/spectatorCamera_unprototype
"Unprototype" the Spectator Camera
This commit is contained in:
commit
c32f5c1f2b
7 changed files with 222 additions and 180 deletions
|
@ -114,7 +114,7 @@
|
|||
#include <render/RenderFetchCullSortTask.h>
|
||||
#include <RenderDeferredTask.h>
|
||||
#include <RenderForwardTask.h>
|
||||
#include <PrototypeSelfie.h>
|
||||
#include <SecondaryCamera.h>
|
||||
#include <ResourceCache.h>
|
||||
#include <ResourceRequest.h>
|
||||
#include <SandboxUtils.h>
|
||||
|
@ -1874,7 +1874,7 @@ void Application::initializeGL() {
|
|||
}
|
||||
|
||||
_renderEngine->addJob<MainRenderTask>("MainFrame", cullFunctor, isDeferred);
|
||||
_renderEngine->addJob<SelfieRenderTask>("SelfieFrame", cullFunctor);
|
||||
_renderEngine->addJob<SecondaryCameraRenderTask>("SecondaryCameraFrame", cullFunctor);
|
||||
|
||||
|
||||
/* _renderEngine->addJob<RenderShadowTask>("RenderShadowTask", cullFunctor);
|
||||
|
|
|
@ -1,102 +0,0 @@
|
|||
|
||||
#include "PrototypeSelfie.h"
|
||||
|
||||
#include <gpu/Context.h>
|
||||
|
||||
void MainRenderTask::build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor, bool isDeferred) {
|
||||
|
||||
task.addJob<RenderShadowTask>("RenderShadowTask", cullFunctor);
|
||||
const auto items = task.addJob<RenderFetchCullSortTask>("FetchCullSort", cullFunctor);
|
||||
assert(items.canCast<RenderFetchCullSortTask::Output>());
|
||||
if (!isDeferred) {
|
||||
task.addJob<RenderForwardTask>("Forward", items);
|
||||
} else {
|
||||
task.addJob<RenderDeferredTask>("RenderDeferredTask", items);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#include <TextureCache.h>
|
||||
|
||||
using RenderArgsPointer = std::shared_ptr<RenderArgs>;
|
||||
|
||||
void SelfieRenderTaskConfig::resetSize(int width, int height) { // Carefully adjust the framebuffer / texture.
|
||||
bool wasEnabled = isEnabled();
|
||||
setEnabled(false);
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
textureCache->resetSelfieFramebuffer(width, height);
|
||||
setEnabled(wasEnabled);
|
||||
}
|
||||
|
||||
class BeginSelfieFrame { // Changes renderContext for our framebuffer and and view.
|
||||
glm::vec3 _position{};
|
||||
glm::quat _orientation{};
|
||||
public:
|
||||
using Config = BeginSelfieFrameConfig;
|
||||
using JobModel = render::Job::ModelO<BeginSelfieFrame, RenderArgsPointer, Config>;
|
||||
BeginSelfieFrame() {
|
||||
_cachedArgsPointer = std::make_shared<RenderArgs>(_cachedArgs);
|
||||
}
|
||||
|
||||
void configure(const Config& config) {
|
||||
// Why does this run all the time, even when not enabled? Should we check and bail?
|
||||
//qDebug() << "FIXME pos" << config.position << "orient" << config.orientation;
|
||||
_position = config.position;
|
||||
_orientation = config.orientation;
|
||||
}
|
||||
|
||||
void run(const render::RenderContextPointer& renderContext, RenderArgsPointer& cachedArgs) {
|
||||
auto args = renderContext->args;
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
auto destFramebuffer = textureCache->getSelfieFramebuffer();
|
||||
// Caching/restoring the old values doesn't seem to be needed. Is it because we happen to be last in the pipeline (which would be a bug waiting to happen)?
|
||||
_cachedArgsPointer->_blitFramebuffer = args->_blitFramebuffer;
|
||||
_cachedArgsPointer->_viewport = args->_viewport;
|
||||
_cachedArgsPointer->_displayMode = args->_displayMode;
|
||||
args->_blitFramebuffer = destFramebuffer;
|
||||
args->_viewport = glm::ivec4(0, 0, destFramebuffer->getWidth(), destFramebuffer->getHeight());
|
||||
args->_viewport = glm::ivec4(0, 0, destFramebuffer->getWidth(), destFramebuffer->getHeight());
|
||||
args->_displayMode = RenderArgs::MONO;
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
batch.disableContextStereo();
|
||||
});
|
||||
|
||||
auto srcViewFrustum = args->getViewFrustum();
|
||||
srcViewFrustum.setPosition(_position);
|
||||
srcViewFrustum.setOrientation(_orientation);
|
||||
//srcViewFrustum.calculate(); // do we need this? I don't think so
|
||||
//qDebug() << "FIXME pos" << _position << "orient" << _orientation << "frust pos" << srcViewFrustum.getPosition() << "orient" << srcViewFrustum.getOrientation() << "direct" << srcViewFrustum.getDirection();
|
||||
args->pushViewFrustum(srcViewFrustum);
|
||||
cachedArgs = _cachedArgsPointer;
|
||||
}
|
||||
|
||||
protected:
|
||||
RenderArgs _cachedArgs;
|
||||
RenderArgsPointer _cachedArgsPointer;
|
||||
};
|
||||
|
||||
class EndSelfieFrame { // Restores renderContext.
|
||||
public:
|
||||
using JobModel = render::Job::ModelI<EndSelfieFrame, RenderArgsPointer>;
|
||||
|
||||
void run(const render::RenderContextPointer& renderContext, const RenderArgsPointer& cachedArgs) {
|
||||
auto args = renderContext->args;
|
||||
args->_blitFramebuffer = cachedArgs->_blitFramebuffer;
|
||||
args->_viewport = cachedArgs->_viewport;
|
||||
args->popViewFrustum();
|
||||
args->_displayMode = cachedArgs->_displayMode;
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
batch.restoreContextStereo();
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
void SelfieRenderTask::build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor) {
|
||||
const auto cachedArg = task.addJob<BeginSelfieFrame>("BeginSelfie");
|
||||
const auto items = task.addJob<RenderFetchCullSortTask>("FetchCullSort", cullFunctor);
|
||||
assert(items.canCast<RenderFetchCullSortTask::Output>());
|
||||
task.addJob<RenderDeferredTask>("RenderDeferredTask", items);
|
||||
task.addJob<EndSelfieFrame>("EndSelfie", cachedArg);
|
||||
}
|
113
interface/src/SecondaryCamera.cpp
Normal file
113
interface/src/SecondaryCamera.cpp
Normal file
|
@ -0,0 +1,113 @@
|
|||
//
|
||||
// SecondaryCamera.cpp
|
||||
// interface/src
|
||||
//
|
||||
// Created by Samuel Gateau, Howard Stearns, and Zach Fox on 2017-06-08.
|
||||
// Copyright 2013 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "SecondaryCamera.h"
|
||||
#include <TextureCache.h>
|
||||
#include <gpu/Context.h>
|
||||
|
||||
using RenderArgsPointer = std::shared_ptr<RenderArgs>;
|
||||
|
||||
void MainRenderTask::build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor, bool isDeferred) {
|
||||
|
||||
task.addJob<RenderShadowTask>("RenderShadowTask", cullFunctor);
|
||||
const auto items = task.addJob<RenderFetchCullSortTask>("FetchCullSort", cullFunctor);
|
||||
assert(items.canCast<RenderFetchCullSortTask::Output>());
|
||||
if (!isDeferred) {
|
||||
task.addJob<RenderForwardTask>("Forward", items);
|
||||
} else {
|
||||
task.addJob<RenderDeferredTask>("RenderDeferredTask", items);
|
||||
}
|
||||
}
|
||||
|
||||
void SecondaryCameraRenderTaskConfig::resetSize(int width, int height) { // FIXME: Add an arg here for "destinationFramebuffer"
|
||||
bool wasEnabled = isEnabled();
|
||||
setEnabled(false);
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
textureCache->resetSpectatorCameraFramebuffer(width, height); // FIXME: Call the correct reset function based on the "destinationFramebuffer" arg
|
||||
setEnabled(wasEnabled);
|
||||
}
|
||||
|
||||
void SecondaryCameraRenderTaskConfig::resetSizeSpectatorCamera(int width, int height) { // Carefully adjust the framebuffer / texture.
|
||||
resetSize(width, height);
|
||||
}
|
||||
|
||||
class BeginSecondaryCameraFrame { // Changes renderContext for our framebuffer and and view.
|
||||
glm::vec3 _position{};
|
||||
glm::quat _orientation{};
|
||||
public:
|
||||
using Config = BeginSecondaryCameraFrameConfig;
|
||||
using JobModel = render::Job::ModelO<BeginSecondaryCameraFrame, RenderArgsPointer, Config>;
|
||||
BeginSecondaryCameraFrame() {
|
||||
_cachedArgsPointer = std::make_shared<RenderArgs>(_cachedArgs);
|
||||
}
|
||||
|
||||
void configure(const Config& config) {
|
||||
if (config.enabled || config.alwaysEnabled) {
|
||||
_position = config.position;
|
||||
_orientation = config.orientation;
|
||||
}
|
||||
}
|
||||
|
||||
void run(const render::RenderContextPointer& renderContext, RenderArgsPointer& cachedArgs) {
|
||||
auto args = renderContext->args;
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
gpu::FramebufferPointer destFramebuffer;
|
||||
destFramebuffer = textureCache->getSpectatorCameraFramebuffer(); // FIXME: Change the destination based on some unimplemented config var
|
||||
if (destFramebuffer) {
|
||||
// Caching/restoring the old values doesn't seem to be needed. Is it because we happen to be last in the pipeline (which would be a bug waiting to happen)?
|
||||
_cachedArgsPointer->_blitFramebuffer = args->_blitFramebuffer;
|
||||
_cachedArgsPointer->_viewport = args->_viewport;
|
||||
_cachedArgsPointer->_displayMode = args->_displayMode;
|
||||
args->_blitFramebuffer = destFramebuffer;
|
||||
args->_viewport = glm::ivec4(0, 0, destFramebuffer->getWidth(), destFramebuffer->getHeight());
|
||||
args->_displayMode = RenderArgs::MONO;
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
batch.disableContextStereo();
|
||||
});
|
||||
|
||||
auto srcViewFrustum = args->getViewFrustum();
|
||||
srcViewFrustum.setPosition(_position);
|
||||
srcViewFrustum.setOrientation(_orientation);
|
||||
args->pushViewFrustum(srcViewFrustum);
|
||||
cachedArgs = _cachedArgsPointer;
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
RenderArgs _cachedArgs;
|
||||
RenderArgsPointer _cachedArgsPointer;
|
||||
};
|
||||
|
||||
class EndSecondaryCameraFrame { // Restores renderContext.
|
||||
public:
|
||||
using JobModel = render::Job::ModelI<EndSecondaryCameraFrame, RenderArgsPointer>;
|
||||
|
||||
void run(const render::RenderContextPointer& renderContext, const RenderArgsPointer& cachedArgs) {
|
||||
auto args = renderContext->args;
|
||||
args->_blitFramebuffer = cachedArgs->_blitFramebuffer;
|
||||
args->_viewport = cachedArgs->_viewport;
|
||||
args->popViewFrustum();
|
||||
args->_displayMode = cachedArgs->_displayMode;
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
batch.restoreContextStereo();
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
void SecondaryCameraRenderTask::build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor) {
|
||||
const auto cachedArg = task.addJob<BeginSecondaryCameraFrame>("BeginSecondaryCamera");
|
||||
const auto items = task.addJob<RenderFetchCullSortTask>("FetchCullSort", cullFunctor);
|
||||
assert(items.canCast<RenderFetchCullSortTask::Output>());
|
||||
task.addJob<RenderDeferredTask>("RenderDeferredTask", items);
|
||||
task.addJob<EndSecondaryCameraFrame>("EndSecondaryCamera", cachedArg);
|
||||
}
|
|
@ -1,6 +1,17 @@
|
|||
//
|
||||
// SecondaryCamera.h
|
||||
// interface/src
|
||||
//
|
||||
// Created by Samuel Gateau, Howard Stearns, and Zach Fox on 2017-06-08.
|
||||
// Copyright 2013 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#pragma once
|
||||
#ifndef hifi_PrototypeSelfie_h
|
||||
#define hifi_PrototypeSelfie_h
|
||||
#ifndef hifi_SecondaryCamera_h
|
||||
#define hifi_SecondaryCamera_h
|
||||
|
||||
#include <RenderShadowTask.h>
|
||||
#include <render/RenderFetchCullSortTask.h>
|
||||
|
@ -18,33 +29,35 @@ public:
|
|||
void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor, bool isDeferred = true);
|
||||
};
|
||||
|
||||
class BeginSelfieFrameConfig : public render::Task::Config { // Exposes view frustum position/orientation to javascript.
|
||||
class BeginSecondaryCameraFrameConfig : public render::Task::Config { // Exposes view frustum position/orientation to javascript.
|
||||
Q_OBJECT
|
||||
Q_PROPERTY(glm::vec3 position MEMBER position NOTIFY dirty) // of viewpoint to render from
|
||||
Q_PROPERTY(glm::quat orientation MEMBER orientation NOTIFY dirty) // of viewpoint to render from
|
||||
public:
|
||||
glm::vec3 position{};
|
||||
glm::quat orientation{};
|
||||
BeginSelfieFrameConfig() : render::Task::Config(false) {}
|
||||
BeginSecondaryCameraFrameConfig() : render::Task::Config(false) {}
|
||||
signals:
|
||||
void dirty();
|
||||
};
|
||||
|
||||
class SelfieRenderTaskConfig : public render::Task::Config {
|
||||
class SecondaryCameraRenderTaskConfig : public render::Task::Config {
|
||||
Q_OBJECT
|
||||
public:
|
||||
SelfieRenderTaskConfig() : render::Task::Config(false) {}
|
||||
SecondaryCameraRenderTaskConfig() : render::Task::Config(false) {}
|
||||
private:
|
||||
void resetSize(int width, int height);
|
||||
signals:
|
||||
void dirty();
|
||||
public slots:
|
||||
void resetSize(int width, int height);
|
||||
void resetSizeSpectatorCamera(int width, int height);
|
||||
};
|
||||
|
||||
class SelfieRenderTask {
|
||||
class SecondaryCameraRenderTask {
|
||||
public:
|
||||
using Config = SelfieRenderTaskConfig;
|
||||
using JobModel = render::Task::Model<SelfieRenderTask, Config>;
|
||||
SelfieRenderTask() {}
|
||||
using Config = SecondaryCameraRenderTaskConfig;
|
||||
using JobModel = render::Task::Model<SecondaryCameraRenderTask, Config>;
|
||||
SecondaryCameraRenderTask() {}
|
||||
void configure(const Config& config) {}
|
||||
void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor);
|
||||
};
|
|
@ -50,7 +50,8 @@ Q_LOGGING_CATEGORY(trace_resource_parse_image_ktx, "trace.resource.parse.image.k
|
|||
const std::string TextureCache::KTX_DIRNAME { "ktx_cache" };
|
||||
const std::string TextureCache::KTX_EXT { "ktx" };
|
||||
|
||||
const std::string TextureCache::SELFIE_FRAME_URL { "http://selfieFrame" };
|
||||
static const QString RESOURCE_SCHEME = "resource";
|
||||
static const QUrl SPECTATOR_CAMERA_FRAME_URL("resource://spectatorCameraFrame");
|
||||
|
||||
static const float SKYBOX_LOAD_PRIORITY { 10.0f }; // Make sure skybox loads first
|
||||
static const float HIGH_MIPS_LOAD_PRIORITY { 9.0f }; // Make sure high mips loads after skybox but before models
|
||||
|
@ -182,9 +183,8 @@ ScriptableResource* TextureCache::prefetch(const QUrl& url, int type, int maxNum
|
|||
}
|
||||
|
||||
NetworkTexturePointer TextureCache::getTexture(const QUrl& url, image::TextureUsage::Type type, const QByteArray& content, int maxNumPixels) {
|
||||
if (url == QUrl(SELFIE_FRAME_URL.c_str())) {
|
||||
|
||||
return getSelfieNetworkTexture();
|
||||
if (url.scheme() == RESOURCE_SCHEME) {
|
||||
return getResourceTexture(url);
|
||||
}
|
||||
TextureExtra extra = { type, content, maxNumPixels };
|
||||
return ResourceCache::getResource(url, QUrl(), &extra).staticCast<NetworkTexture>();
|
||||
|
@ -885,31 +885,30 @@ void ImageReader::read() {
|
|||
}
|
||||
|
||||
|
||||
NetworkTexturePointer TextureCache::getSelfieNetworkTexture() {
|
||||
if (!_selfieNetworkTexture) {
|
||||
_selfieNetworkTexture.reset(new NetworkTexture(QUrl(SELFIE_FRAME_URL.c_str())));
|
||||
auto texture = getSelfieTexture();
|
||||
_selfieNetworkTexture->setImage(texture, texture->getWidth(), texture->getHeight());
|
||||
}
|
||||
return _selfieNetworkTexture;
|
||||
}
|
||||
|
||||
const gpu::TexturePointer& TextureCache::getSelfieTexture() {
|
||||
if (!_selfieTexture) {
|
||||
getSelfieFramebuffer();
|
||||
}
|
||||
return _selfieTexture;
|
||||
}
|
||||
const gpu::FramebufferPointer& TextureCache::getSelfieFramebuffer() {
|
||||
if (!_selfieFramebuffer) {
|
||||
resetSelfieFramebuffer(2048, 1024);
|
||||
NetworkTexturePointer TextureCache::getResourceTexture(QUrl resourceTextureUrl) {
|
||||
gpu::TexturePointer texture;
|
||||
if (resourceTextureUrl == SPECTATOR_CAMERA_FRAME_URL) {
|
||||
if (!_spectatorCameraNetworkTexture) {
|
||||
_spectatorCameraNetworkTexture.reset(new NetworkTexture(resourceTextureUrl));
|
||||
}
|
||||
texture = _spectatorCameraFramebuffer->getRenderBuffer(0);
|
||||
if (texture) {
|
||||
_spectatorCameraNetworkTexture->setImage(texture, texture->getWidth(), texture->getHeight());
|
||||
return _spectatorCameraNetworkTexture;
|
||||
}
|
||||
}
|
||||
|
||||
return _selfieFramebuffer;
|
||||
return NetworkTexturePointer();
|
||||
}
|
||||
|
||||
void TextureCache::resetSelfieFramebuffer(int width, int height) {
|
||||
_selfieFramebuffer.reset(gpu::Framebuffer::create("selfie", gpu::Element::COLOR_SRGBA_32, 2048, 1024));
|
||||
_selfieTexture = _selfieFramebuffer->getRenderBuffer(0);
|
||||
_selfieNetworkTexture.reset();
|
||||
}
|
||||
const gpu::FramebufferPointer& TextureCache::getSpectatorCameraFramebuffer() {
|
||||
if (!_spectatorCameraFramebuffer) {
|
||||
resetSpectatorCameraFramebuffer(2048, 1024);
|
||||
}
|
||||
return _spectatorCameraFramebuffer;
|
||||
}
|
||||
|
||||
void TextureCache::resetSpectatorCameraFramebuffer(int width, int height) {
|
||||
_spectatorCameraFramebuffer.reset(gpu::Framebuffer::create("spectatorCamera", gpu::Element::COLOR_SRGBA_32, width, height));
|
||||
_spectatorCameraNetworkTexture.reset();
|
||||
}
|
||||
|
|
|
@ -170,11 +170,10 @@ public:
|
|||
gpu::TexturePointer cacheTextureByHash(const std::string& hash, const gpu::TexturePointer& texture);
|
||||
|
||||
|
||||
/// Selfie rendering targets.
|
||||
NetworkTexturePointer getSelfieNetworkTexture();
|
||||
const gpu::TexturePointer& getSelfieTexture();
|
||||
const gpu::FramebufferPointer& getSelfieFramebuffer();
|
||||
void resetSelfieFramebuffer(int width, int height);
|
||||
/// SpectatorCamera rendering targets.
|
||||
NetworkTexturePointer getResourceTexture(QUrl resourceTextureUrl);
|
||||
const gpu::FramebufferPointer& getSpectatorCameraFramebuffer();
|
||||
void resetSpectatorCameraFramebuffer(int width, int height);
|
||||
|
||||
protected:
|
||||
// Overload ResourceCache::prefetch to allow specifying texture type for loads
|
||||
|
@ -193,7 +192,6 @@ private:
|
|||
|
||||
static const std::string KTX_DIRNAME;
|
||||
static const std::string KTX_EXT;
|
||||
static const std::string SELFIE_FRAME_URL;
|
||||
|
||||
KTXCache _ktxCache;
|
||||
// Map from image hashes to texture weak pointers
|
||||
|
@ -206,10 +204,8 @@ private:
|
|||
gpu::TexturePointer _blueTexture;
|
||||
gpu::TexturePointer _blackTexture;
|
||||
|
||||
|
||||
gpu::FramebufferPointer _selfieFramebuffer;
|
||||
gpu::TexturePointer _selfieTexture;
|
||||
NetworkTexturePointer _selfieNetworkTexture;
|
||||
NetworkTexturePointer _spectatorCameraNetworkTexture;
|
||||
gpu::FramebufferPointer _spectatorCameraFramebuffer;
|
||||
};
|
||||
|
||||
#endif // hifi_TextureCache_h
|
||||
|
|
|
@ -46,6 +46,8 @@
|
|||
// camera: The in-world entity that corresponds to the spectator camera.
|
||||
// cameraIsDynamic: "false" for now while we figure out why dynamic, parented overlays
|
||||
// drift with respect to their parent
|
||||
// lastCameraPosition: Holds the last known camera position
|
||||
// lastCameraRotation: Holds the last known camera rotation
|
||||
//
|
||||
// Arguments:
|
||||
// None
|
||||
|
@ -54,16 +56,23 @@
|
|||
// The update function for the spectator camera. Modifies the camera's position
|
||||
// and orientation.
|
||||
//
|
||||
var spectatorFrameRenderConfig = Render.getConfig("SelfieFrame");
|
||||
var beginSpectatorFrameRenderConfig = Render.getConfig("BeginSelfie");
|
||||
var spectatorFrameRenderConfig = Render.getConfig("SecondaryCameraFrame");
|
||||
var beginSpectatorFrameRenderConfig = Render.getConfig("BeginSecondaryCamera");
|
||||
var viewFinderOverlay = false;
|
||||
var camera = false;
|
||||
var cameraIsDynamic = false;
|
||||
var lastCameraPosition = false;
|
||||
var lastCameraRotation = false;
|
||||
function updateRenderFromCamera() {
|
||||
var cameraData = Entities.getEntityProperties(camera, ['position', 'rotation']);
|
||||
// FIXME: don't muck with config if properties haven't changed.
|
||||
beginSpectatorFrameRenderConfig.position = cameraData.position;
|
||||
beginSpectatorFrameRenderConfig.orientation = cameraData.rotation;
|
||||
if (JSON.stringify(lastCameraRotation) !== JSON.stringify(cameraData.rotation)) {
|
||||
lastCameraRotation = cameraData.rotation;
|
||||
beginSpectatorFrameRenderConfig.orientation = lastCameraRotation;
|
||||
}
|
||||
if (JSON.stringify(lastCameraPosition) !== JSON.stringify(cameraData.position)) {
|
||||
lastCameraPosition = cameraData.position;
|
||||
beginSpectatorFrameRenderConfig.position = Vec3.sum(inFrontOf(0.17, lastCameraPosition, lastCameraRotation), {x: 0, y: 0.02, z: 0});
|
||||
}
|
||||
if (cameraIsDynamic) {
|
||||
// BUG: image3d overlays don't retain their locations properly when parented to a dynamic object
|
||||
Overlays.editOverlay(viewFinderOverlay, { orientation: flip(cameraData.rotation) });
|
||||
|
@ -88,35 +97,49 @@
|
|||
function spectatorCameraOn() {
|
||||
// Set the special texture size based on the window in which it will eventually be displayed.
|
||||
var size = Controller.getViewportDimensions(); // FIXME: Need a signal to hook into when the dimensions change.
|
||||
spectatorFrameRenderConfig.resetSize(size.x, size.y);
|
||||
spectatorFrameRenderConfig.resetSizeSpectatorCamera(size.x, size.y);
|
||||
spectatorFrameRenderConfig.enabled = beginSpectatorFrameRenderConfig.enabled = true;
|
||||
var cameraRotation = MyAvatar.orientation, cameraPosition = inFrontOf(2);
|
||||
var cameraRotation = MyAvatar.orientation, cameraPosition = inFrontOf(1, Vec3.sum(MyAvatar.position, { x: 0, y: 0.3, z: 0 }));
|
||||
Script.update.connect(updateRenderFromCamera);
|
||||
isUpdateRenderWired = true;
|
||||
camera = Entities.addEntity({
|
||||
type: 'Box',
|
||||
dimensions: { x: 0.4, y: 0.2, z: 0.4 },
|
||||
userData: '{"grabbableKey":{"grabbable":true}}',
|
||||
dynamic: cameraIsDynamic,
|
||||
color: { red: 255, green: 0, blue: 0 },
|
||||
name: 'SpectatorCamera',
|
||||
position: cameraPosition, // Put the camera in front of me so that I can find it.
|
||||
rotation: cameraRotation
|
||||
"angularDamping": 0.98000001907348633,
|
||||
"collisionsWillMove": 0,
|
||||
"damping": 0.98000001907348633,
|
||||
"dimensions": {
|
||||
"x": 0.2338641881942749,
|
||||
"y": 0.407032310962677,
|
||||
"z": 0.38702544569969177
|
||||
},
|
||||
"dynamic": cameraIsDynamic,
|
||||
"modelURL": "http://hifi-content.s3.amazonaws.com/alan/dev/spectator-camera.fbx",
|
||||
"queryAACube": {
|
||||
"scale": 0.60840487480163574,
|
||||
"x": -0.30420243740081787,
|
||||
"y": -0.30420243740081787,
|
||||
"z": -0.30420243740081787
|
||||
},
|
||||
"rotation": { x: 0, y: 0, z: 0 },
|
||||
"position": { x: 0, y: 0, z: 0 },
|
||||
"shapeType": "simple-compound",
|
||||
"type": "Model",
|
||||
"userData": "{\"grabbableKey\":{\"grabbable\":true}}"
|
||||
}, true);
|
||||
// Put an image3d overlay on the near face, as a viewFinder.
|
||||
// This image3d overlay acts as the camera's preview screen.
|
||||
viewFinderOverlay = Overlays.addOverlay("image3d", {
|
||||
url: "http://selfieFrame",
|
||||
//url: "http://1.bp.blogspot.com/-1GABEq__054/T03B00j_OII/AAAAAAAAAa8/jo55LcvEPHI/s1600/Winning.jpg",
|
||||
url: "resource://spectatorCameraFrame",
|
||||
emissive: true,
|
||||
parentID: camera,
|
||||
alpha: 1,
|
||||
position: inFrontOf(-0.25, cameraPosition, cameraRotation),
|
||||
// FIXME: We shouldn't need the flip and the negative scale.
|
||||
// e.g., This isn't necessary using an ordinary .jpg with lettering, above.
|
||||
// Must be something about the view frustum projection matrix?
|
||||
// But don't go changing that in (c++ code) without getting all the way to a desktop display!
|
||||
orientation: flip(cameraRotation),
|
||||
scale: -0.35,
|
||||
position: { x: 0.007, y: 0.15, z: -0.005 },
|
||||
scale: -0.16,
|
||||
});
|
||||
Entities.editEntity(camera, { position: cameraPosition, rotation: cameraRotation });
|
||||
// FIXME: We shouldn't need the flip and the negative scale.
|
||||
// e.g., This isn't necessary using an ordinary .jpg with lettering, above.
|
||||
// Must be something about the view frustum projection matrix?
|
||||
// But don't go changing that in (c++ code) without getting all the way to a desktop display!
|
||||
Overlays.editOverlay(viewFinderOverlay, { orientation: flip(cameraRotation) });
|
||||
setDisplay(monitorShowsCameraView);
|
||||
}
|
||||
|
||||
|
@ -141,7 +164,6 @@
|
|||
}
|
||||
if (camera) {
|
||||
Entities.deleteEntity(camera);
|
||||
print("ZACH FOX GOODBYE");
|
||||
}
|
||||
if (viewFinderOverlay) {
|
||||
Overlays.deleteOverlay(viewFinderOverlay);
|
||||
|
@ -216,7 +238,7 @@
|
|||
|
||||
function setDisplay(showCameraView) {
|
||||
// It would be fancy if (showCameraView && !isUpdateRenderWired) would show instructions, but that's out of scope for now.
|
||||
var url = (showCameraView && isUpdateRenderWired) ? "http://selfieFrame" : "";
|
||||
var url = (showCameraView && isUpdateRenderWired) ? "resource://spectatorCameraFrame" : "";
|
||||
Window.setDisplayTexture(url);
|
||||
}
|
||||
const MONITOR_SHOWS_CAMERA_VIEW_DEFAULT = false;
|
||||
|
@ -264,6 +286,7 @@
|
|||
tablet.loadQMLSource("../SpectatorCamera.qml");
|
||||
onSpectatorCameraScreen = true;
|
||||
sendToQml({ method: 'updateSpectatorCameraCheckbox', params: !!camera });
|
||||
sendToQml({ method: 'updateMonitorShowsSwitch', params: !!Settings.getValue('spectatorCamera/monitorShowsCameraView', false) });
|
||||
setMonitorShowsCameraViewAndSendToQml(monitorShowsCameraView);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue