Merge branch 'master' of https://github.com/highfidelity/hifi into event-installer-interface-options

This commit is contained in:
howard-stearns 2017-06-20 09:47:52 -07:00
commit 974768390c
50 changed files with 875 additions and 434 deletions

View file

@ -10,7 +10,6 @@
<link href="/css/sweetalert.css" rel="stylesheet" media="screen"> <link href="/css/sweetalert.css" rel="stylesheet" media="screen">
<link href="/css/bootstrap-switch.min.css" rel="stylesheet" media="screen"> <link href="/css/bootstrap-switch.min.css" rel="stylesheet" media="screen">
</head> </head>
<body> <body>
<nav class="navbar navbar-default" role="navigation"> <nav class="navbar navbar-default" role="navigation">
@ -38,8 +37,23 @@
</li> </li>
<li><a href="/content/">Content</a></li> <li><a href="/content/">Content</a></li>
<li><a href="/settings/">Settings</a></li> <li><a href="/settings/">Settings</a></li>
<li><a href="#" id="restart-server"><span class="glyphicon glyphicon-refresh"></span> Restart</a></li>
</ul> </ul>
</div> </div>
</div><!-- /.container-fluid --> </div><!-- /.container-fluid -->
</nav> </nav>
<div class="modal fade" id="restart-modal">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<h4 class="modal-title">domain-server is restarting</h4>
</div>
<div class="modal-body">
<h5>This page will automatically refresh in <span id="refresh-time">3 seconds</span>.</h5>
</div>
</div><!-- /.modal-content -->
</div><!-- /.modal-dialog -->
</div><!-- /.modal -->
<div class="container-fluid"> <div class="container-fluid">

View file

@ -1,3 +1,28 @@
function showRestartModal() {
$('#restart-modal').modal({
backdrop: 'static',
keyboard: false
});
var secondsElapsed = 0;
var numberOfSecondsToWait = 3;
var refreshSpan = $('span#refresh-time')
refreshSpan.html(numberOfSecondsToWait + " seconds");
// call ourselves every 1 second to countdown
var refreshCountdown = setInterval(function(){
secondsElapsed++;
secondsLeft = numberOfSecondsToWait - secondsElapsed
refreshSpan.html(secondsLeft + (secondsLeft == 1 ? " second" : " seconds"))
if (secondsElapsed == numberOfSecondsToWait) {
location.reload(true);
clearInterval(refreshCountdown);
}
}, 1000);
}
$(document).ready(function(){ $(document).ready(function(){
var url = window.location; var url = window.location;
// Will only work if string in href matches with location // Will only work if string in href matches with location
@ -7,4 +32,10 @@ $(document).ready(function(){
$('ul.nav a').filter(function() { $('ul.nav a').filter(function() {
return this.href == url; return this.href == url;
}).parent().addClass('active'); }).parent().addClass('active');
$('body').on('click', '#restart-server', function(e){
$.get("/restart");
showRestartModal();
return false;
});
}); });

View file

@ -81,19 +81,6 @@
</div> </div>
</div> </div>
<div class="modal fade" id="restart-modal">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<h4 class="modal-title">domain-server is restarting</h4>
</div>
<div class="modal-body">
<h5>This page will automatically refresh in <span id="refresh-time">3 seconds</span>.</h5>
</div>
</div><!-- /.modal-content -->
</div><!-- /.modal-dialog -->
</div><!-- /.modal -->
<!--#include virtual="footer.html"--> <!--#include virtual="footer.html"-->
<script src='/js/underscore-min.js'></script> <script src='/js/underscore-min.js'></script>
<script src='/js/underscore-keypath.min.js'></script> <script src='/js/underscore-keypath.min.js'></script>

View file

@ -1680,31 +1680,6 @@ function updateDataChangedForSiblingRows(row, forceTrue) {
}) })
} }
function showRestartModal() {
$('#restart-modal').modal({
backdrop: 'static',
keyboard: false
});
var secondsElapsed = 0;
var numberOfSecondsToWait = 3;
var refreshSpan = $('span#refresh-time')
refreshSpan.html(numberOfSecondsToWait + " seconds");
// call ourselves every 1 second to countdown
var refreshCountdown = setInterval(function(){
secondsElapsed++;
secondsLeft = numberOfSecondsToWait - secondsElapsed
refreshSpan.html(secondsLeft + (secondsLeft == 1 ? " second" : " seconds"))
if (secondsElapsed == numberOfSecondsToWait) {
location.reload(true);
clearInterval(refreshCountdown);
}
}, 1000);
}
function cleanupFormValues(node) { function cleanupFormValues(node) {
if (node.type && node.type === 'checkbox') { if (node.type && node.type === 'checkbox') {
return { name: node.name, value: node.checked ? true : false }; return { name: node.name, value: node.checked ? true : false };

View file

@ -1650,6 +1650,7 @@ bool DomainServer::handleHTTPRequest(HTTPConnection* connection, const QUrl& url
const QString URI_NODES = "/nodes"; const QString URI_NODES = "/nodes";
const QString URI_SETTINGS = "/settings"; const QString URI_SETTINGS = "/settings";
const QString URI_ENTITY_FILE_UPLOAD = "/content/upload"; const QString URI_ENTITY_FILE_UPLOAD = "/content/upload";
const QString URI_RESTART = "/restart";
const QString UUID_REGEX_STRING = "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"; const QString UUID_REGEX_STRING = "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}";
@ -1804,6 +1805,10 @@ bool DomainServer::handleHTTPRequest(HTTPConnection* connection, const QUrl& url
// send the response // send the response
connection->respond(HTTPConnection::StatusCode200, nodesDocument.toJson(), qPrintable(JSON_MIME_TYPE)); connection->respond(HTTPConnection::StatusCode200, nodesDocument.toJson(), qPrintable(JSON_MIME_TYPE));
return true;
} else if (url.path() == URI_RESTART) {
connection->respond(HTTPConnection::StatusCode200);
restart();
return true; return true;
} else { } else {
// check if this is for json stats for a node // check if this is for json stats for a node

View file

@ -18,7 +18,7 @@ import QtQuick.Layouts 1.3
import "../../styles-uit" import "../../styles-uit"
import "../../controls-uit" as HifiControls import "../../controls-uit" as HifiControls
import "../../windows" import "../../windows"
import "./" as Audio import "./" as AudioControls
Rectangle { Rectangle {
id: root; id: root;
@ -57,7 +57,7 @@ Rectangle {
x: 16; // padding does not work x: 16; // padding does not work
spacing: 16; spacing: 16;
Audio.CheckBox { AudioControls.CheckBox {
text: qsTr("Mute microphone"); text: qsTr("Mute microphone");
checked: Audio.muted; checked: Audio.muted;
onClicked: { onClicked: {
@ -65,7 +65,7 @@ Rectangle {
checked = Qt.binding(function() { return Audio.muted; }); // restore binding checked = Qt.binding(function() { return Audio.muted; }); // restore binding
} }
} }
Audio.CheckBox { AudioControls.CheckBox {
text: qsTr("Enable noise reduction"); text: qsTr("Enable noise reduction");
checked: Audio.noiseReduction; checked: Audio.noiseReduction;
onClicked: { onClicked: {
@ -73,7 +73,7 @@ Rectangle {
checked = Qt.binding(function() { return Audio.noiseReduction; }); // restore binding checked = Qt.binding(function() { return Audio.noiseReduction; }); // restore binding
} }
} }
Audio.CheckBox { AudioControls.CheckBox {
text: qsTr("Show audio level meter"); text: qsTr("Show audio level meter");
checked: AvatarInputs.showAudioTools; checked: AvatarInputs.showAudioTools;
onClicked: { onClicked: {
@ -110,7 +110,7 @@ Rectangle {
delegate: Item { delegate: Item {
width: parent.width; width: parent.width;
height: 36; height: 36;
Audio.CheckBox { AudioControls.CheckBox {
text: display; text: display;
checked: selected; checked: selected;
onClicked: { onClicked: {
@ -148,7 +148,7 @@ Rectangle {
delegate: Item { delegate: Item {
width: parent.width; width: parent.width;
height: 36; height: 36;
Audio.CheckBox { AudioControls.CheckBox {
text: display; text: display;
checked: selected; checked: selected;
onClicked: { onClicked: {

View file

@ -115,6 +115,7 @@
#include <RenderDeferredTask.h> #include <RenderDeferredTask.h>
#include <RenderForwardTask.h> #include <RenderForwardTask.h>
#include <RenderViewTask.h> #include <RenderViewTask.h>
#include <SecondaryCamera.h>
#include <ResourceCache.h> #include <ResourceCache.h>
#include <ResourceRequest.h> #include <ResourceRequest.h>
#include <SandboxUtils.h> #include <SandboxUtils.h>
@ -1899,6 +1900,7 @@ void Application::initializeGL() {
render::CullFunctor cullFunctor = LODManager::shouldRender; render::CullFunctor cullFunctor = LODManager::shouldRender;
static const QString RENDER_FORWARD = "HIFI_RENDER_FORWARD"; static const QString RENDER_FORWARD = "HIFI_RENDER_FORWARD";
bool isDeferred = !QProcessEnvironment::systemEnvironment().contains(RENDER_FORWARD); bool isDeferred = !QProcessEnvironment::systemEnvironment().contains(RENDER_FORWARD);
_renderEngine->addJob<SecondaryCameraRenderTask>("SecondaryCameraFrame", cullFunctor);
_renderEngine->addJob<RenderViewTask>("RenderMainView", cullFunctor, isDeferred); _renderEngine->addJob<RenderViewTask>("RenderMainView", cullFunctor, isDeferred);
_renderEngine->load(); _renderEngine->load();
_renderEngine->registerScene(_main3DScene); _renderEngine->registerScene(_main3DScene);

View file

@ -0,0 +1,125 @@
//
// SecondaryCamera.cpp
// interface/src
//
// Created by Samuel Gateau, Howard Stearns, and Zach Fox on 2017-06-08.
// Copyright 2013 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "SecondaryCamera.h"
#include <TextureCache.h>
#include <gpu/Context.h>
using RenderArgsPointer = std::shared_ptr<RenderArgs>;
void MainRenderTask::build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor, bool isDeferred) {
task.addJob<RenderShadowTask>("RenderShadowTask", cullFunctor);
const auto items = task.addJob<RenderFetchCullSortTask>("FetchCullSort", cullFunctor);
assert(items.canCast<RenderFetchCullSortTask::Output>());
if (!isDeferred) {
task.addJob<RenderForwardTask>("Forward", items);
} else {
task.addJob<RenderDeferredTask>("RenderDeferredTask", items);
}
}
void SecondaryCameraRenderTaskConfig::resetSize(int width, int height) { // FIXME: Add an arg here for "destinationFramebuffer"
bool wasEnabled = isEnabled();
setEnabled(false);
auto textureCache = DependencyManager::get<TextureCache>();
textureCache->resetSpectatorCameraFramebuffer(width, height); // FIXME: Call the correct reset function based on the "destinationFramebuffer" arg
setEnabled(wasEnabled);
}
void SecondaryCameraRenderTaskConfig::resetSizeSpectatorCamera(int width, int height) { // Carefully adjust the framebuffer / texture.
resetSize(width, height);
}
class BeginSecondaryCameraFrame { // Changes renderContext for our framebuffer and and view.
glm::vec3 _position{};
glm::quat _orientation{};
float _vFoV{};
float _nearClipPlaneDistance{};
float _farClipPlaneDistance{};
public:
using Config = BeginSecondaryCameraFrameConfig;
using JobModel = render::Job::ModelO<BeginSecondaryCameraFrame, RenderArgsPointer, Config>;
BeginSecondaryCameraFrame() {
_cachedArgsPointer = std::make_shared<RenderArgs>(_cachedArgs);
}
void configure(const Config& config) {
if (config.enabled || config.alwaysEnabled) {
_position = config.position;
_orientation = config.orientation;
_vFoV = config.vFoV;
_nearClipPlaneDistance = config.nearClipPlaneDistance;
_farClipPlaneDistance = config.farClipPlaneDistance;
}
}
void run(const render::RenderContextPointer& renderContext, RenderArgsPointer& cachedArgs) {
auto args = renderContext->args;
auto textureCache = DependencyManager::get<TextureCache>();
gpu::FramebufferPointer destFramebuffer;
destFramebuffer = textureCache->getSpectatorCameraFramebuffer(); // FIXME: Change the destination based on some unimplemented config var
if (destFramebuffer) {
_cachedArgsPointer->_blitFramebuffer = args->_blitFramebuffer;
_cachedArgsPointer->_viewport = args->_viewport;
_cachedArgsPointer->_displayMode = args->_displayMode;
_cachedArgsPointer->_renderMode = args->_renderMode;
args->_blitFramebuffer = destFramebuffer;
args->_viewport = glm::ivec4(0, 0, destFramebuffer->getWidth(), destFramebuffer->getHeight());
args->_displayMode = RenderArgs::MONO;
args->_renderMode = RenderArgs::RenderMode::SECONDARY_CAMERA_RENDER_MODE;
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
batch.disableContextStereo();
});
auto srcViewFrustum = args->getViewFrustum();
srcViewFrustum.setPosition(_position);
srcViewFrustum.setOrientation(_orientation);
srcViewFrustum.setProjection(glm::perspective(glm::radians(_vFoV), ((float)args->_viewport.z / (float)args->_viewport.w), _nearClipPlaneDistance, _farClipPlaneDistance));
// Without calculating the bound planes, the secondary camera will use the same culling frustum as the main camera,
// which is not what we want here.
srcViewFrustum.calculate();
args->pushViewFrustum(srcViewFrustum);
cachedArgs = _cachedArgsPointer;
}
}
protected:
RenderArgs _cachedArgs;
RenderArgsPointer _cachedArgsPointer;
};
class EndSecondaryCameraFrame { // Restores renderContext.
public:
using JobModel = render::Job::ModelI<EndSecondaryCameraFrame, RenderArgsPointer>;
void run(const render::RenderContextPointer& renderContext, const RenderArgsPointer& cachedArgs) {
auto args = renderContext->args;
args->_blitFramebuffer = cachedArgs->_blitFramebuffer;
args->_viewport = cachedArgs->_viewport;
args->popViewFrustum();
args->_displayMode = cachedArgs->_displayMode;
args->_renderMode = cachedArgs->_renderMode;
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
batch.restoreContextStereo();
});
}
};
void SecondaryCameraRenderTask::build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor) {
const auto cachedArg = task.addJob<BeginSecondaryCameraFrame>("BeginSecondaryCamera");
const auto items = task.addJob<RenderFetchCullSortTask>("FetchCullSort", cullFunctor);
assert(items.canCast<RenderFetchCullSortTask::Output>());
task.addJob<RenderDeferredTask>("RenderDeferredTask", items);
task.addJob<EndSecondaryCameraFrame>("EndSecondaryCamera", cachedArg);
}

View file

@ -0,0 +1,70 @@
//
// SecondaryCamera.h
// interface/src
//
// Created by Samuel Gateau, Howard Stearns, and Zach Fox on 2017-06-08.
// Copyright 2013 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#pragma once
#ifndef hifi_SecondaryCamera_h
#define hifi_SecondaryCamera_h
#include <RenderShadowTask.h>
#include <render/RenderFetchCullSortTask.h>
#include <RenderDeferredTask.h>
#include <RenderForwardTask.h>
class MainRenderTask {
public:
using JobModel = render::Task::Model<MainRenderTask>;
MainRenderTask() {}
void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor, bool isDeferred = true);
};
class BeginSecondaryCameraFrameConfig : public render::Task::Config { // Exposes secondary camera parameters to JavaScript.
Q_OBJECT
Q_PROPERTY(glm::vec3 position MEMBER position NOTIFY dirty) // of viewpoint to render from
Q_PROPERTY(glm::quat orientation MEMBER orientation NOTIFY dirty) // of viewpoint to render from
Q_PROPERTY(float vFoV MEMBER vFoV NOTIFY dirty) // Secondary camera's vertical field of view. In degrees.
Q_PROPERTY(float nearClipPlaneDistance MEMBER nearClipPlaneDistance NOTIFY dirty) // Secondary camera's near clip plane distance. In meters.
Q_PROPERTY(float farClipPlaneDistance MEMBER farClipPlaneDistance NOTIFY dirty) // Secondary camera's far clip plane distance. In meters.
public:
glm::vec3 position{};
glm::quat orientation{};
float vFoV{ 45.0f };
float nearClipPlaneDistance{ 0.1f };
float farClipPlaneDistance{ 100.0f };
BeginSecondaryCameraFrameConfig() : render::Task::Config(false) {}
signals:
void dirty();
};
class SecondaryCameraRenderTaskConfig : public render::Task::Config {
Q_OBJECT
public:
SecondaryCameraRenderTaskConfig() : render::Task::Config(false) {}
private:
void resetSize(int width, int height);
signals:
void dirty();
public slots:
void resetSizeSpectatorCamera(int width, int height);
};
class SecondaryCameraRenderTask {
public:
using Config = SecondaryCameraRenderTaskConfig;
using JobModel = render::Task::Model<SecondaryCameraRenderTask, Config>;
SecondaryCameraRenderTask() {}
void configure(const Config& config) {}
void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor);
};
#endif

View file

@ -1881,15 +1881,14 @@ void MyAvatar::preDisplaySide(RenderArgs* renderArgs) {
const float RENDER_HEAD_CUTOFF_DISTANCE = 0.3f; const float RENDER_HEAD_CUTOFF_DISTANCE = 0.3f;
bool MyAvatar::cameraInsideHead() const { bool MyAvatar::cameraInsideHead(const glm::vec3& cameraPosition) const {
const glm::vec3 cameraPosition = qApp->getCamera().getPosition();
return glm::length(cameraPosition - getHeadPosition()) < (RENDER_HEAD_CUTOFF_DISTANCE * getUniformScale()); return glm::length(cameraPosition - getHeadPosition()) < (RENDER_HEAD_CUTOFF_DISTANCE * getUniformScale());
} }
bool MyAvatar::shouldRenderHead(const RenderArgs* renderArgs) const { bool MyAvatar::shouldRenderHead(const RenderArgs* renderArgs) const {
bool defaultMode = renderArgs->_renderMode == RenderArgs::DEFAULT_RENDER_MODE; bool defaultMode = renderArgs->_renderMode == RenderArgs::DEFAULT_RENDER_MODE;
bool firstPerson = qApp->getCamera().getMode() == CAMERA_MODE_FIRST_PERSON; bool firstPerson = qApp->getCamera().getMode() == CAMERA_MODE_FIRST_PERSON;
bool insideHead = cameraInsideHead(); bool insideHead = cameraInsideHead(renderArgs->getViewFrustum().getPosition());
return !defaultMode || !firstPerson || !insideHead; return !defaultMode || !firstPerson || !insideHead;
} }

View file

@ -615,7 +615,7 @@ private:
float scale = 1.0f, bool isSoft = false, float scale = 1.0f, bool isSoft = false,
bool allowDuplicates = false, bool useSaved = true) override; bool allowDuplicates = false, bool useSaved = true) override;
bool cameraInsideHead() const; bool cameraInsideHead(const glm::vec3& cameraPosition) const;
void updateEyeContactTarget(float deltaTime); void updateEyeContactTarget(float deltaTime);

View file

@ -284,6 +284,11 @@ void WindowScriptingInterface::copyToClipboard(const QString& text) {
QApplication::clipboard()->setText(text); QApplication::clipboard()->setText(text);
} }
bool WindowScriptingInterface::setDisplayTexture(const QString& name) {
return qApp->getActiveDisplayPlugin()->setDisplayTexture(name); // Plugins that don't know how, answer false.
}
void WindowScriptingInterface::takeSnapshot(bool notify, bool includeAnimated, float aspectRatio) { void WindowScriptingInterface::takeSnapshot(bool notify, bool includeAnimated, float aspectRatio) {
qApp->takeSnapshot(notify, includeAnimated, aspectRatio); qApp->takeSnapshot(notify, includeAnimated, aspectRatio);
} }

View file

@ -62,6 +62,7 @@ public slots:
void displayAnnouncement(const QString& message); void displayAnnouncement(const QString& message);
void shareSnapshot(const QString& path, const QUrl& href = QUrl("")); void shareSnapshot(const QString& path, const QUrl& href = QUrl(""));
bool isPhysicsEnabled(); bool isPhysicsEnabled();
bool setDisplayTexture(const QString& name);
int openMessageBox(QString title, QString text, int buttons, int defaultButton); int openMessageBox(QString title, QString text, int buttons, int defaultButton);
void updateMessageBox(int id, QString title, QString text, int buttons, int defaultButton); void updateMessageBox(int id, QString title, QString text, int buttons, int defaultButton);

View file

@ -288,6 +288,13 @@ void Avatar::updateAvatarEntities() {
properties.setScript(noScript); properties.setScript(noScript);
} }
// When grabbing avatar entities, they are parented to the joint moving them, then when un-grabbed
// they go back to the default parent (null uuid). When un-gripped, others saw the entity disappear.
// The thinking here is the local position was noticed as changing, but not the parentID (since it is now
// back to the default), and the entity flew off somewhere. Marking all changed definitely fixes this,
// and seems safe (per Seth).
properties.markAllChanged();
// try to build the entity // try to build the entity
EntityItemPointer entity = entityTree->findEntityByEntityItemID(EntityItemID(entityID)); EntityItemPointer entity = entityTree->findEntityByEntityItemID(EntityItemID(entityID));
bool success = true; bool success = true;
@ -1067,15 +1074,15 @@ void Avatar::setModelURLFinished(bool success) {
const int MAX_SKELETON_DOWNLOAD_ATTEMPTS = 4; // NOTE: we don't want to be as generous as ResourceCache is, we only want 4 attempts const int MAX_SKELETON_DOWNLOAD_ATTEMPTS = 4; // NOTE: we don't want to be as generous as ResourceCache is, we only want 4 attempts
if (_skeletonModel->getResourceDownloadAttemptsRemaining() <= 0 || if (_skeletonModel->getResourceDownloadAttemptsRemaining() <= 0 ||
_skeletonModel->getResourceDownloadAttempts() > MAX_SKELETON_DOWNLOAD_ATTEMPTS) { _skeletonModel->getResourceDownloadAttempts() > MAX_SKELETON_DOWNLOAD_ATTEMPTS) {
qCWarning(avatars_renderer) << "Using default after failing to load Avatar model: " << _skeletonModelURL qCWarning(avatars_renderer) << "Using default after failing to load Avatar model: " << _skeletonModelURL
<< "after" << _skeletonModel->getResourceDownloadAttempts() << "attempts."; << "after" << _skeletonModel->getResourceDownloadAttempts() << "attempts.";
// call _skeletonModel.setURL, but leave our copy of _skeletonModelURL alone. This is so that // call _skeletonModel.setURL, but leave our copy of _skeletonModelURL alone. This is so that
// we don't redo this every time we receive an identity packet from the avatar with the bad url. // we don't redo this every time we receive an identity packet from the avatar with the bad url.
QMetaObject::invokeMethod(_skeletonModel.get(), "setURL", QMetaObject::invokeMethod(_skeletonModel.get(), "setURL",
Qt::QueuedConnection, Q_ARG(QUrl, AvatarData::defaultFullAvatarModelUrl())); Qt::QueuedConnection, Q_ARG(QUrl, AvatarData::defaultFullAvatarModelUrl()));
} else { } else {
qCWarning(avatars_renderer) << "Avatar model: " << _skeletonModelURL qCWarning(avatars_renderer) << "Avatar model: " << _skeletonModelURL
<< "failed to load... attempts:" << _skeletonModel->getResourceDownloadAttempts() << "failed to load... attempts:" << _skeletonModel->getResourceDownloadAttempts()
<< "out of:" << MAX_SKELETON_DOWNLOAD_ATTEMPTS; << "out of:" << MAX_SKELETON_DOWNLOAD_ATTEMPTS;
} }
} }

View file

@ -496,6 +496,17 @@ void OpenGLDisplayPlugin::submitFrame(const gpu::FramePointer& newFrame) {
_newFrameQueue.push(newFrame); _newFrameQueue.push(newFrame);
}); });
} }
void OpenGLDisplayPlugin::renderFromTexture(gpu::Batch& batch, const gpu::TexturePointer texture, glm::ivec4 viewport, const glm::ivec4 scissor) {
batch.enableStereo(false);
batch.resetViewTransform();
batch.setFramebuffer(gpu::FramebufferPointer());
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, vec4(0));
batch.setStateScissorRect(scissor);
batch.setViewportTransform(viewport);
batch.setResourceTexture(0, texture);
batch.setPipeline(_presentPipeline);
batch.draw(gpu::TRIANGLE_STRIP, 4);
}
void OpenGLDisplayPlugin::updateFrameData() { void OpenGLDisplayPlugin::updateFrameData() {
PROFILE_RANGE(render, __FUNCTION__) PROFILE_RANGE(render, __FUNCTION__)
@ -605,14 +616,11 @@ void OpenGLDisplayPlugin::compositeLayers() {
void OpenGLDisplayPlugin::internalPresent() { void OpenGLDisplayPlugin::internalPresent() {
render([&](gpu::Batch& batch) { render([&](gpu::Batch& batch) {
batch.enableStereo(false); // Note: _displayTexture must currently be the same size as the display.
batch.resetViewTransform(); uvec2 dims = _displayTexture ? uvec2(_displayTexture->getDimensions()) : getSurfacePixels();
batch.setFramebuffer(gpu::FramebufferPointer()); auto viewport = ivec4(uvec2(0), dims);
batch.setViewportTransform(ivec4(uvec2(0), getSurfacePixels())); renderFromTexture(batch, _displayTexture ? _displayTexture : _compositeFramebuffer->getRenderBuffer(0), viewport, viewport);
batch.setResourceTexture(0, _compositeFramebuffer->getRenderBuffer(0)); });
batch.setPipeline(_presentPipeline);
batch.draw(gpu::TRIANGLE_STRIP, 4);
});
swapBuffers(); swapBuffers();
_presentRate.increment(); _presentRate.increment();
} }
@ -694,6 +702,22 @@ void OpenGLDisplayPlugin::withMainThreadContext(std::function<void()> f) const {
_container->makeRenderingContextCurrent(); _container->makeRenderingContextCurrent();
} }
bool OpenGLDisplayPlugin::setDisplayTexture(const QString& name) {
// Note: it is the caller's responsibility to keep the network texture in cache.
if (name.isEmpty()) {
_displayTexture.reset();
onDisplayTextureReset();
return true;
}
auto textureCache = DependencyManager::get<TextureCache>();
auto displayNetworkTexture = textureCache->getTexture(name);
if (!displayNetworkTexture) {
return false;
}
_displayTexture = displayNetworkTexture->getGPUTexture();
return !!_displayTexture;
}
QImage OpenGLDisplayPlugin::getScreenshot(float aspectRatio) const { QImage OpenGLDisplayPlugin::getScreenshot(float aspectRatio) const {
auto size = _compositeFramebuffer->getSize(); auto size = _compositeFramebuffer->getSize();
if (isHmd()) { if (isHmd()) {

View file

@ -57,6 +57,8 @@ public:
return getSurfaceSize(); return getSurfaceSize();
} }
virtual bool setDisplayTexture(const QString& name) override;
virtual bool onDisplayTextureReset() { return false; };
QImage getScreenshot(float aspectRatio = 0.0f) const override; QImage getScreenshot(float aspectRatio = 0.0f) const override;
float presentRate() const override; float presentRate() const override;
@ -109,6 +111,7 @@ protected:
// Plugin specific functionality to send the composed scene to the output window or device // Plugin specific functionality to send the composed scene to the output window or device
virtual void internalPresent(); virtual void internalPresent();
void renderFromTexture(gpu::Batch& batch, const gpu::TexturePointer texture, glm::ivec4 viewport, const glm::ivec4 scissor);
virtual void updateFrameData(); virtual void updateFrameData();
void withMainThreadContext(std::function<void()> f) const; void withMainThreadContext(std::function<void()> f) const;
@ -134,6 +137,7 @@ protected:
gpu::PipelinePointer _simplePipeline; gpu::PipelinePointer _simplePipeline;
gpu::PipelinePointer _presentPipeline; gpu::PipelinePointer _presentPipeline;
gpu::PipelinePointer _cursorPipeline; gpu::PipelinePointer _cursorPipeline;
gpu::TexturePointer _displayTexture{};
float _compositeOverlayAlpha { 1.0f }; float _compositeOverlayAlpha { 1.0f };
struct CursorData { struct CursorData {

View file

@ -27,6 +27,7 @@
#include <gpu/StandardShaderLib.h> #include <gpu/StandardShaderLib.h>
#include <gpu/gl/GLBackend.h> #include <gpu/gl/GLBackend.h>
#include <TextureCache.h>
#include <PathUtils.h> #include <PathUtils.h>
#include "../Logging.h" #include "../Logging.h"
@ -211,7 +212,15 @@ void HmdDisplayPlugin::internalPresent() {
// Composite together the scene, overlay and mouse cursor // Composite together the scene, overlay and mouse cursor
hmdPresent(); hmdPresent();
if (!_disablePreview) { if (_displayTexture) {
// Note: _displayTexture must currently be the same size as the display.
uvec2 dims = uvec2(_displayTexture->getDimensions());
auto viewport = ivec4(uvec2(0), dims);
render([&](gpu::Batch& batch) {
renderFromTexture(batch, _displayTexture, viewport, viewport);
});
swapBuffers();
} else if (!_disablePreview) {
// screen preview mirroring // screen preview mirroring
auto sourceSize = _renderTargetSize; auto sourceSize = _renderTargetSize;
if (_monoPreview) { if (_monoPreview) {
@ -278,16 +287,7 @@ void HmdDisplayPlugin::internalPresent() {
viewport.z *= 2; viewport.z *= 2;
} }
renderFromTexture(batch, _compositeFramebuffer->getRenderBuffer(0), viewport, scissor);
batch.enableStereo(false);
batch.resetViewTransform();
batch.setFramebuffer(gpu::FramebufferPointer());
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, vec4(0));
batch.setStateScissorRect(scissor); // was viewport
batch.setViewportTransform(viewport);
batch.setResourceTexture(0, _compositeFramebuffer->getRenderBuffer(0));
batch.setPipeline(_presentPipeline);
batch.draw(gpu::TRIANGLE_STRIP, 4);
}); });
swapBuffers(); swapBuffers();
} else if (_clearPreviewFlag) { } else if (_clearPreviewFlag) {
@ -316,15 +316,7 @@ void HmdDisplayPlugin::internalPresent() {
auto viewport = getViewportForSourceSize(uvec2(_previewTexture->getDimensions())); auto viewport = getViewportForSourceSize(uvec2(_previewTexture->getDimensions()));
render([&](gpu::Batch& batch) { render([&](gpu::Batch& batch) {
batch.enableStereo(false); renderFromTexture(batch, _previewTexture, viewport, viewport);
batch.resetViewTransform();
batch.setFramebuffer(gpu::FramebufferPointer());
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, vec4(0));
batch.setStateScissorRect(viewport);
batch.setViewportTransform(viewport);
batch.setResourceTexture(0, _previewTexture);
batch.setPipeline(_presentPipeline);
batch.draw(gpu::TRIANGLE_STRIP, 4);
}); });
_clearPreviewFlag = false; _clearPreviewFlag = false;
swapBuffers(); swapBuffers();

View file

@ -46,6 +46,8 @@ public:
float stutterRate() const override; float stutterRate() const override;
virtual bool onDisplayTextureReset() override { _clearPreviewFlag = true; return true; };
protected: protected:
virtual void hmdPresent() = 0; virtual void hmdPresent() = 0;
virtual bool isHmdMounted() const = 0; virtual bool isHmdMounted() const = 0;

View file

@ -115,6 +115,9 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
(&::gpu::gl::GLBackend::do_resetStages), (&::gpu::gl::GLBackend::do_resetStages),
(&::gpu::gl::GLBackend::do_disableContextStereo),
(&::gpu::gl::GLBackend::do_restoreContextStereo),
(&::gpu::gl::GLBackend::do_runLambda), (&::gpu::gl::GLBackend::do_runLambda),
(&::gpu::gl::GLBackend::do_startNamedCall), (&::gpu::gl::GLBackend::do_startNamedCall),
@ -224,6 +227,14 @@ void GLBackend::renderPassTransfer(const Batch& batch) {
_transform.preUpdate(_commandIndex, _stereo); _transform.preUpdate(_commandIndex, _stereo);
break; break;
case Batch::COMMAND_disableContextStereo:
_stereo._contextDisable = true;
break;
case Batch::COMMAND_restoreContextStereo:
_stereo._contextDisable = false;
break;
case Batch::COMMAND_setViewportTransform: case Batch::COMMAND_setViewportTransform:
case Batch::COMMAND_setViewTransform: case Batch::COMMAND_setViewTransform:
case Batch::COMMAND_setProjectionTransform: { case Batch::COMMAND_setProjectionTransform: {
@ -308,16 +319,16 @@ void GLBackend::render(const Batch& batch) {
} }
#ifdef GPU_STEREO_DRAWCALL_INSTANCED #ifdef GPU_STEREO_DRAWCALL_INSTANCED
if (_stereo._enable) { if (_stereo.isStereo()) {
glEnable(GL_CLIP_DISTANCE0); glEnable(GL_CLIP_DISTANCE0);
} }
#endif #endif
{ {
PROFILE_RANGE(render_gpu_gl_detail, _stereo._enable ? "Render Stereo" : "Render"); PROFILE_RANGE(render_gpu_gl_detail, _stereo.isStereo() ? "Render Stereo" : "Render");
renderPassDraw(batch); renderPassDraw(batch);
} }
#ifdef GPU_STEREO_DRAWCALL_INSTANCED #ifdef GPU_STEREO_DRAWCALL_INSTANCED
if (_stereo._enable) { if (_stereo.isStereo()) {
glDisable(GL_CLIP_DISTANCE0); glDisable(GL_CLIP_DISTANCE0);
} }
#endif #endif
@ -358,6 +369,15 @@ void GLBackend::do_resetStages(const Batch& batch, size_t paramOffset) {
resetStages(); resetStages();
} }
void GLBackend::do_disableContextStereo(const Batch& batch, size_t paramOffset) {
}
void GLBackend::do_restoreContextStereo(const Batch& batch, size_t paramOffset) {
}
void GLBackend::do_runLambda(const Batch& batch, size_t paramOffset) { void GLBackend::do_runLambda(const Batch& batch, size_t paramOffset) {
std::function<void()> f = batch._lambdas.get(batch._params[paramOffset]._uint); std::function<void()> f = batch._lambdas.get(batch._params[paramOffset]._uint);
f(); f();

View file

@ -143,6 +143,9 @@ public:
// Reset stages // Reset stages
virtual void do_resetStages(const Batch& batch, size_t paramOffset) final; virtual void do_resetStages(const Batch& batch, size_t paramOffset) final;
virtual void do_disableContextStereo(const Batch& batch, size_t paramOffset) final;
virtual void do_restoreContextStereo(const Batch& batch, size_t paramOffset) final;
virtual void do_runLambda(const Batch& batch, size_t paramOffset) final; virtual void do_runLambda(const Batch& batch, size_t paramOffset) final;
virtual void do_startNamedCall(const Batch& batch, size_t paramOffset) final; virtual void do_startNamedCall(const Batch& batch, size_t paramOffset) final;

View file

@ -48,7 +48,7 @@ void GLBackend::do_setFramebuffer(const Batch& batch, size_t paramOffset) {
} }
void GLBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) { void GLBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) {
if (_stereo._enable && !_pipeline._stateCache.scissorEnable) { if (_stereo.isStereo() && !_pipeline._stateCache.scissorEnable) {
qWarning("Clear without scissor in stereo mode"); qWarning("Clear without scissor in stereo mode");
} }

View file

@ -322,7 +322,7 @@ void GLBackend::do_setStateScissorRect(const Batch& batch, size_t paramOffset) {
Vec4i rect; Vec4i rect;
memcpy(&rect, batch.readData(batch._params[paramOffset]._uint), sizeof(Vec4i)); memcpy(&rect, batch.readData(batch._params[paramOffset]._uint), sizeof(Vec4i));
if (_stereo._enable) { if (_stereo.isStereo()) {
rect.z /= 2; rect.z /= 2;
if (_stereo._pass) { if (_stereo._pass) {
rect.x += rect.z; rect.x += rect.z;

View file

@ -37,7 +37,7 @@ void GLBackend::do_setViewportTransform(const Batch& batch, size_t paramOffset)
glViewport(vp.x, vp.y, vp.z, vp.w); glViewport(vp.x, vp.y, vp.z, vp.w);
// Where we assign the GL viewport // Where we assign the GL viewport
if (_stereo._enable) { if (_stereo.isStereo()) {
vp.z /= 2; vp.z /= 2;
if (_stereo._pass) { if (_stereo._pass) {
vp.x += vp.z; vp.x += vp.z;
@ -119,7 +119,7 @@ void GLBackend::TransformStageState::preUpdate(size_t commandIndex, const Stereo
size_t offset = _cameraUboSize * _cameras.size(); size_t offset = _cameraUboSize * _cameras.size();
_cameraOffsets.push_back(TransformStageState::Pair(commandIndex, offset)); _cameraOffsets.push_back(TransformStageState::Pair(commandIndex, offset));
if (stereo._enable) { if (stereo.isStereo()) {
#ifdef GPU_STEREO_CAMERA_BUFFER #ifdef GPU_STEREO_CAMERA_BUFFER
_cameras.push_back(CameraBufferElement(_camera.getEyeCamera(0, stereo, _view), _camera.getEyeCamera(1, stereo, _view))); _cameras.push_back(CameraBufferElement(_camera.getEyeCamera(0, stereo, _view), _camera.getEyeCamera(1, stereo, _view)));
#else #else
@ -151,7 +151,7 @@ void GLBackend::TransformStageState::update(size_t commandIndex, const StereoSta
#ifdef GPU_STEREO_CAMERA_BUFFER #ifdef GPU_STEREO_CAMERA_BUFFER
bindCurrentCamera(0); bindCurrentCamera(0);
#else #else
if (!stereo._enable) { if (!stereo.isStereo()) {
bindCurrentCamera(0); bindCurrentCamera(0);
} }
#endif #endif

View file

@ -390,6 +390,15 @@ void Batch::resetStages() {
ADD_COMMAND(resetStages); ADD_COMMAND(resetStages);
} }
void Batch::disableContextStereo() {
ADD_COMMAND(disableContextStereo);
}
void Batch::restoreContextStereo() {
ADD_COMMAND(restoreContextStereo);
}
void Batch::runLambda(std::function<void()> f) { void Batch::runLambda(std::function<void()> f) {
ADD_COMMAND(runLambda); ADD_COMMAND(runLambda);
_params.emplace_back(_lambdas.cache(f)); _params.emplace_back(_lambdas.cache(f));

View file

@ -217,6 +217,9 @@ public:
// Reset the stage caches and states // Reset the stage caches and states
void resetStages(); void resetStages();
void disableContextStereo();
void restoreContextStereo();
// Debugging // Debugging
void pushProfileRange(const char* name); void pushProfileRange(const char* name);
void popProfileRange(); void popProfileRange();
@ -301,6 +304,9 @@ public:
COMMAND_resetStages, COMMAND_resetStages,
COMMAND_disableContextStereo,
COMMAND_restoreContextStereo,
COMMAND_runLambda, COMMAND_runLambda,
COMMAND_startNamedCall, COMMAND_startNamedCall,
@ -467,7 +473,7 @@ public:
NamedBatchDataMap _namedData; NamedBatchDataMap _namedData;
bool _enableStereo{ true }; bool _enableStereo{ true };
bool _enableSkybox{ false }; bool _enableSkybox { false };
protected: protected:
friend class Context; friend class Context;

View file

@ -145,7 +145,7 @@ void Context::enableStereo(bool enable) {
} }
bool Context::isStereo() { bool Context::isStereo() {
return _stereo._enable; return _stereo.isStereo();
} }
void Context::setStereoProjections(const mat4 eyeProjections[2]) { void Context::setStereoProjections(const mat4 eyeProjections[2]) {

View file

@ -118,7 +118,7 @@ public:
protected: protected:
virtual bool isStereo() { virtual bool isStereo() {
return _stereo._enable; return _stereo.isStereo();
} }
void getStereoProjections(mat4* eyeProjections) const { void getStereoProjections(mat4* eyeProjections) const {

View file

@ -93,7 +93,11 @@ namespace gpu {
using TextureViews = std::vector<TextureView>; using TextureViews = std::vector<TextureView>;
struct StereoState { struct StereoState {
bool isStereo() const {
return _enable && !_contextDisable;
}
bool _enable{ false }; bool _enable{ false };
bool _contextDisable { false };
bool _skybox{ false }; bool _skybox{ false };
// 0 for left eye, 1 for right eye // 0 for left eye, 1 for right eye
uint8 _pass{ 0 }; uint8 _pass{ 0 };

View file

@ -16,6 +16,8 @@
using namespace ktx; using namespace ktx;
int ktxDescriptorMetaTypeId = qRegisterMetaType<KTXDescriptor*>();
const Header::Identifier ktx::Header::IDENTIFIER {{ const Header::Identifier ktx::Header::IDENTIFIER {{
0xAB, 0x4B, 0x54, 0x58, 0x20, 0x31, 0x31, 0xBB, 0x0D, 0x0A, 0x1A, 0x0A 0xAB, 0x4B, 0x54, 0x58, 0x20, 0x31, 0x31, 0xBB, 0x0D, 0x0A, 0x1A, 0x0A
}}; }};

View file

@ -387,4 +387,6 @@ namespace ktx {
} }
Q_DECLARE_METATYPE(ktx::KTXDescriptor*);
#endif // hifi_ktx_KTX_h #endif // hifi_ktx_KTX_h

View file

@ -13,6 +13,8 @@
#include <mutex> #include <mutex>
#include <QtConcurrent/QtConcurrentRun>
#include <QCryptographicHash> #include <QCryptographicHash>
#include <QImageReader> #include <QImageReader>
#include <QRunnable> #include <QRunnable>
@ -50,6 +52,9 @@ Q_LOGGING_CATEGORY(trace_resource_parse_image_ktx, "trace.resource.parse.image.k
const std::string TextureCache::KTX_DIRNAME { "ktx_cache" }; const std::string TextureCache::KTX_DIRNAME { "ktx_cache" };
const std::string TextureCache::KTX_EXT { "ktx" }; const std::string TextureCache::KTX_EXT { "ktx" };
static const QString RESOURCE_SCHEME = "resource";
static const QUrl SPECTATOR_CAMERA_FRAME_URL("resource://spectatorCameraFrame");
static const float SKYBOX_LOAD_PRIORITY { 10.0f }; // Make sure skybox loads first static const float SKYBOX_LOAD_PRIORITY { 10.0f }; // Make sure skybox loads first
static const float HIGH_MIPS_LOAD_PRIORITY { 9.0f }; // Make sure high mips loads after skybox but before models static const float HIGH_MIPS_LOAD_PRIORITY { 9.0f }; // Make sure high mips loads after skybox but before models
@ -180,6 +185,9 @@ ScriptableResource* TextureCache::prefetch(const QUrl& url, int type, int maxNum
} }
NetworkTexturePointer TextureCache::getTexture(const QUrl& url, image::TextureUsage::Type type, const QByteArray& content, int maxNumPixels) { NetworkTexturePointer TextureCache::getTexture(const QUrl& url, image::TextureUsage::Type type, const QByteArray& content, int maxNumPixels) {
if (url.scheme() == RESOURCE_SCHEME) {
return getResourceTexture(url);
}
TextureExtra extra = { type, content, maxNumPixels }; TextureExtra extra = { type, content, maxNumPixels };
return ResourceCache::getResource(url, QUrl(), &extra).staticCast<NetworkTexture>(); return ResourceCache::getResource(url, QUrl(), &extra).staticCast<NetworkTexture>();
} }
@ -265,6 +273,18 @@ QSharedPointer<Resource> TextureCache::createResource(const QUrl& url, const QSh
return QSharedPointer<Resource>(texture, &Resource::deleter); return QSharedPointer<Resource>(texture, &Resource::deleter);
} }
NetworkTexture::NetworkTexture(const QUrl& url) :
Resource(url),
_type(),
_sourceIsKTX(false),
_maxNumPixels(100)
{
_textureSource = std::make_shared<gpu::TextureSource>();
_lowestRequestedMipLevel = 0;
_loaded = true;
}
NetworkTexture::NetworkTexture(const QUrl& url, image::TextureUsage::Type type, const QByteArray& content, int maxNumPixels) : NetworkTexture::NetworkTexture(const QUrl& url, image::TextureUsage::Type type, const QByteArray& content, int maxNumPixels) :
Resource(url), Resource(url),
_type(type), _type(type),
@ -303,14 +323,12 @@ void NetworkTexture::setImage(gpu::TexturePointer texture, int originalWidth,
_width = texture->getWidth(); _width = texture->getWidth();
_height = texture->getHeight(); _height = texture->getHeight();
setSize(texture->getStoredSize()); setSize(texture->getStoredSize());
finishedLoading(true);
} else { } else {
// FIXME: If !gpuTexture, we failed to load!
_width = _height = 0; _width = _height = 0;
qWarning() << "Texture did not load"; finishedLoading(false);
} }
finishedLoading(true);
emit networkTextureCreated(qWeakPointerCast<NetworkTexture, Resource> (_self)); emit networkTextureCreated(qWeakPointerCast<NetworkTexture, Resource> (_self));
} }
@ -382,8 +400,7 @@ void NetworkTexture::makeRequest() {
emit loading(); emit loading();
connect(_ktxHeaderRequest, &ResourceRequest::progress, this, &NetworkTexture::ktxHeaderRequestProgress); connect(_ktxHeaderRequest, &ResourceRequest::finished, this, &NetworkTexture::ktxInitialDataRequestFinished);
connect(_ktxHeaderRequest, &ResourceRequest::finished, this, &NetworkTexture::ktxHeaderRequestFinished);
_bytesReceived = _bytesTotal = _bytes = 0; _bytesReceived = _bytesTotal = _bytes = 0;
@ -407,18 +424,18 @@ void NetworkTexture::makeRequest() {
} }
void NetworkTexture::startRequestForNextMipLevel() { void NetworkTexture::startRequestForNextMipLevel() {
if (_lowestKnownPopulatedMip == 0) { auto self = _self.lock();
qWarning(networking) << "Requesting next mip level but all have been fulfilled: " << _lowestKnownPopulatedMip if (!self) {
<< " " << _textureSource->getGPUTexture()->minAvailableMipLevel() << " " << _url;
return; return;
} }
if (_ktxResourceState == WAITING_FOR_MIP_REQUEST) { auto texture = _textureSource->getGPUTexture();
auto self = _self.lock(); if (!texture || _ktxResourceState != WAITING_FOR_MIP_REQUEST) {
if (!self) { return;
return; }
}
_lowestKnownPopulatedMip = texture->minAvailableMipLevel();
if (_lowestRequestedMipLevel < _lowestKnownPopulatedMip) {
_ktxResourceState = PENDING_MIP_REQUEST; _ktxResourceState = PENDING_MIP_REQUEST;
init(false); init(false);
@ -453,6 +470,8 @@ void NetworkTexture::startMipRangeRequest(uint16_t low, uint16_t high) {
ByteRange range; ByteRange range;
range.fromInclusive = -HIGH_MIP_MAX_SIZE; range.fromInclusive = -HIGH_MIP_MAX_SIZE;
_ktxMipRequest->setByteRange(range); _ktxMipRequest->setByteRange(range);
connect(_ktxMipRequest, &ResourceRequest::finished, this, &NetworkTexture::ktxInitialDataRequestFinished);
} else { } else {
ByteRange range; ByteRange range;
range.fromInclusive = ktx::KTX_HEADER_SIZE + _originalKtxDescriptor->header.bytesOfKeyValueData range.fromInclusive = ktx::KTX_HEADER_SIZE + _originalKtxDescriptor->header.bytesOfKeyValueData
@ -460,229 +479,315 @@ void NetworkTexture::startMipRangeRequest(uint16_t low, uint16_t high) {
range.toExclusive = ktx::KTX_HEADER_SIZE + _originalKtxDescriptor->header.bytesOfKeyValueData range.toExclusive = ktx::KTX_HEADER_SIZE + _originalKtxDescriptor->header.bytesOfKeyValueData
+ _originalKtxDescriptor->images[high + 1]._imageOffset; + _originalKtxDescriptor->images[high + 1]._imageOffset;
_ktxMipRequest->setByteRange(range); _ktxMipRequest->setByteRange(range);
}
connect(_ktxMipRequest, &ResourceRequest::progress, this, &NetworkTexture::ktxMipRequestProgress); connect(_ktxMipRequest, &ResourceRequest::finished, this, &NetworkTexture::ktxMipRequestFinished);
connect(_ktxMipRequest, &ResourceRequest::finished, this, &NetworkTexture::ktxMipRequestFinished); }
_ktxMipRequest->send(); _ktxMipRequest->send();
} }
void NetworkTexture::ktxHeaderRequestFinished() { // This is called when the header or top mips have been loaded
Q_ASSERT(_ktxResourceState == LOADING_INITIAL_DATA); void NetworkTexture::ktxInitialDataRequestFinished() {
if (!_ktxHeaderRequest || _ktxHeaderRequest->getState() != ResourceRequest::Finished ||
if (!_ktxHeaderRequest) { !_ktxMipRequest || _ktxMipRequest->getState() != ResourceRequest::Finished) {
// Wait for both request to be finished
return; return;
} }
_ktxHeaderRequestFinished = true; Q_ASSERT(_ktxResourceState == LOADING_INITIAL_DATA);
maybeHandleFinishedInitialLoad(); Q_ASSERT_X(_ktxHeaderRequest && _ktxMipRequest, __FUNCTION__, "Request should not be null while in ktxInitialDataRequestFinished");
PROFILE_ASYNC_END(resource, "Resource:" + getType(), QString::number(_requestID), {
{ "from_cache", _ktxHeaderRequest->loadedFromCache() },
{ "size_mb", _bytesTotal / 1000000.0 }
});
PROFILE_RANGE_EX(resource_parse_image, __FUNCTION__, 0xffff0000, 0, { { "url", _url.toString() } });
setSize(_bytesTotal);
TextureCache::requestCompleted(_self);
auto result = _ktxHeaderRequest->getResult();
if (result == ResourceRequest::Success) {
result = _ktxMipRequest->getResult();
}
if (result == ResourceRequest::Success) {
auto extraInfo = _url == _activeUrl ? "" : QString(", %1").arg(_activeUrl.toDisplayString());
qCDebug(networking).noquote() << QString("Request finished for %1%2").arg(_url.toDisplayString(), extraInfo);
_ktxHeaderData = _ktxHeaderRequest->getData();
_ktxHighMipData = _ktxMipRequest->getData();
handleFinishedInitialLoad();
} else {
if (handleFailedRequest(result)) {
_ktxResourceState = PENDING_INITIAL_LOAD;
} else {
_ktxResourceState = FAILED_TO_LOAD;
}
}
_ktxHeaderRequest->disconnect(this);
_ktxHeaderRequest->deleteLater();
_ktxHeaderRequest = nullptr;
_ktxMipRequest->disconnect(this);
_ktxMipRequest->deleteLater();
_ktxMipRequest = nullptr;
} }
void NetworkTexture::ktxMipRequestFinished() { void NetworkTexture::ktxMipRequestFinished() {
Q_ASSERT(_ktxResourceState == LOADING_INITIAL_DATA || _ktxResourceState == REQUESTING_MIP); Q_ASSERT_X(_ktxMipRequest, __FUNCTION__, "Request should not be null while in ktxMipRequestFinished");
Q_ASSERT(_ktxResourceState == REQUESTING_MIP);
if (!_ktxMipRequest) { PROFILE_ASYNC_END(resource, "Resource:" + getType(), QString::number(_requestID), {
{ "from_cache", _ktxMipRequest->loadedFromCache() },
{ "size_mb", _bytesTotal / 1000000.0 }
});
PROFILE_RANGE_EX(resource_parse_image, __FUNCTION__, 0xffff0000, 0, { { "url", _url.toString() } });
setSize(_bytesTotal);
if (!_ktxMipRequest || _ktxMipRequest != sender()) {
// This can happen in the edge case that a request is timed out, but a `finished` signal is emitted before it is deleted.
qWarning(networking) << "Received signal NetworkTexture::ktxMipRequestFinished from ResourceRequest that is not the current"
<< " request: " << sender() << ", " << _ktxMipRequest;
return; return;
} }
if (_ktxResourceState == LOADING_INITIAL_DATA) { TextureCache::requestCompleted(_self);
_ktxHighMipRequestFinished = true;
maybeHandleFinishedInitialLoad();
} else if (_ktxResourceState == REQUESTING_MIP) {
Q_ASSERT(_ktxMipLevelRangeInFlight.first != NULL_MIP_LEVEL);
TextureCache::requestCompleted(_self);
if (_ktxMipRequest->getResult() == ResourceRequest::Success) { auto result = _ktxMipRequest->getResult();
if (result == ResourceRequest::Success) {
auto extraInfo = _url == _activeUrl ? "" : QString(", %1").arg(_activeUrl.toDisplayString());
qCDebug(networking).noquote() << QString("Request finished for %1%2").arg(_url.toDisplayString(), extraInfo);
if (_ktxResourceState == REQUESTING_MIP) {
Q_ASSERT(_ktxMipLevelRangeInFlight.first != NULL_MIP_LEVEL);
Q_ASSERT(_ktxMipLevelRangeInFlight.second - _ktxMipLevelRangeInFlight.first == 0); Q_ASSERT(_ktxMipLevelRangeInFlight.second - _ktxMipLevelRangeInFlight.first == 0);
_ktxResourceState = WAITING_FOR_MIP_REQUEST;
auto self = _self;
auto url = _url;
auto data = _ktxMipRequest->getData();
auto mipLevel = _ktxMipLevelRangeInFlight.first;
auto texture = _textureSource->getGPUTexture(); auto texture = _textureSource->getGPUTexture();
if (texture) { DependencyManager::get<StatTracker>()->incrementStat("PendingProcessing");
texture->assignStoredMip(_ktxMipLevelRangeInFlight.first, QtConcurrent::run(QThreadPool::globalInstance(), [self, data, mipLevel, url, texture] {
_ktxMipRequest->getData().size(), reinterpret_cast<uint8_t*>(_ktxMipRequest->getData().data())); PROFILE_RANGE_EX(resource_parse_image, "NetworkTexture - Processing Mip Data", 0xffff0000, 0, { { "url", url.toString() } });
DependencyManager::get<StatTracker>()->decrementStat("PendingProcessing");
CounterStat counter("Processing");
if (texture->minAvailableMipLevel() <= _ktxMipLevelRangeInFlight.first) { auto originalPriority = QThread::currentThread()->priority();
_lowestKnownPopulatedMip = texture->minAvailableMipLevel(); if (originalPriority == QThread::InheritPriority) {
_ktxResourceState = WAITING_FOR_MIP_REQUEST; originalPriority = QThread::NormalPriority;
} else {
qWarning(networking) << "Failed to load mip: " << _url << ":" << _ktxMipLevelRangeInFlight.first;
_ktxResourceState = FAILED_TO_LOAD;
} }
} else { QThread::currentThread()->setPriority(QThread::LowPriority);
_ktxResourceState = WAITING_FOR_MIP_REQUEST; Finally restorePriority([originalPriority] { QThread::currentThread()->setPriority(originalPriority); });
qWarning(networking) << "Trying to update mips but texture is null";
} auto resource = self.lock();
finishedLoading(true); if (!resource) {
// Resource no longer exists, bail
return;
}
Q_ASSERT_X(texture, "Async - NetworkTexture::ktxMipRequestFinished", "NetworkTexture should have been assigned a GPU texture by now.");
texture->assignStoredMip(mipLevel, data.size(), reinterpret_cast<const uint8_t*>(data.data()));
QMetaObject::invokeMethod(resource.data(), "setImage",
Q_ARG(gpu::TexturePointer, texture),
Q_ARG(int, texture->getWidth()),
Q_ARG(int, texture->getHeight()));
QMetaObject::invokeMethod(resource.data(), "startRequestForNextMipLevel");
});
} else { } else {
qWarning(networking) << "Mip request finished in an unexpected state: " << _ktxResourceState;
finishedLoading(false); finishedLoading(false);
if (handleFailedRequest(_ktxMipRequest->getResult())) {
_ktxResourceState = PENDING_MIP_REQUEST;
} else {
qWarning(networking) << "Failed to load mip: " << _url;
_ktxResourceState = FAILED_TO_LOAD;
}
}
_ktxMipRequest->deleteLater();
_ktxMipRequest = nullptr;
if (_ktxResourceState == WAITING_FOR_MIP_REQUEST && _lowestRequestedMipLevel < _lowestKnownPopulatedMip) {
startRequestForNextMipLevel();
} }
} else { } else {
qWarning() << "Mip request finished in an unexpected state: " << _ktxResourceState; if (handleFailedRequest(result)) {
_ktxResourceState = PENDING_MIP_REQUEST;
} else {
_ktxResourceState = FAILED_TO_LOAD;
}
} }
_ktxMipRequest->disconnect(this);
_ktxMipRequest->deleteLater();
_ktxMipRequest = nullptr;
} }
// This is called when the header or top mips have been loaded // This is called when the header and top mips have been loaded
void NetworkTexture::maybeHandleFinishedInitialLoad() { void NetworkTexture::handleFinishedInitialLoad() {
Q_ASSERT(_ktxResourceState == LOADING_INITIAL_DATA); Q_ASSERT(_ktxResourceState == LOADING_INITIAL_DATA);
Q_ASSERT(!_ktxHeaderData.isEmpty() && !_ktxHighMipData.isEmpty());
if (_ktxHeaderRequestFinished && _ktxHighMipRequestFinished) { // create ktx...
auto ktxHeaderData = _ktxHeaderData;
auto ktxHighMipData = _ktxHighMipData;
_ktxHeaderData.clear();
_ktxHighMipData.clear();
TextureCache::requestCompleted(_self); _ktxResourceState = WAITING_FOR_MIP_REQUEST;
if (_ktxHeaderRequest->getResult() != ResourceRequest::Success || _ktxMipRequest->getResult() != ResourceRequest::Success) { auto self = _self;
if (handleFailedRequest(_ktxMipRequest->getResult())) { auto url = _url;
_ktxResourceState = PENDING_INITIAL_LOAD; DependencyManager::get<StatTracker>()->incrementStat("PendingProcessing");
} QtConcurrent::run(QThreadPool::globalInstance(), [self, ktxHeaderData, ktxHighMipData, url] {
else { PROFILE_RANGE_EX(resource_parse_image, "NetworkTexture - Processing Initial Data", 0xffff0000, 0, { { "url", url.toString() } });
_ktxResourceState = FAILED_TO_LOAD; DependencyManager::get<StatTracker>()->decrementStat("PendingProcessing");
} CounterStat counter("Processing");
_ktxHeaderRequest->deleteLater(); auto originalPriority = QThread::currentThread()->priority();
_ktxHeaderRequest = nullptr; if (originalPriority == QThread::InheritPriority) {
_ktxMipRequest->deleteLater(); originalPriority = QThread::NormalPriority;
_ktxMipRequest = nullptr;
} else {
// create ktx...
auto ktxHeaderData = _ktxHeaderRequest->getData();
auto ktxHighMipData = _ktxMipRequest->getData();
auto header = reinterpret_cast<const ktx::Header*>(ktxHeaderData.data());
if (!ktx::checkIdentifier(header->identifier)) {
qWarning() << "Cannot load " << _url << ", invalid header identifier";
_ktxResourceState = FAILED_TO_LOAD;
finishedLoading(false);
return;
}
auto kvSize = header->bytesOfKeyValueData;
if (kvSize > (ktxHeaderData.size() - ktx::KTX_HEADER_SIZE)) {
qWarning() << "Cannot load " << _url << ", did not receive all kv data with initial request";
_ktxResourceState = FAILED_TO_LOAD;
finishedLoading(false);
return;
}
auto keyValues = ktx::KTX::parseKeyValues(header->bytesOfKeyValueData, reinterpret_cast<const ktx::Byte*>(ktxHeaderData.data()) + ktx::KTX_HEADER_SIZE);
auto imageDescriptors = header->generateImageDescriptors();
if (imageDescriptors.size() == 0) {
qWarning(networking) << "Failed to process ktx file " << _url;
_ktxResourceState = FAILED_TO_LOAD;
finishedLoading(false);
}
_originalKtxDescriptor.reset(new ktx::KTXDescriptor(*header, keyValues, imageDescriptors));
// Create bare ktx in memory
auto found = std::find_if(keyValues.begin(), keyValues.end(), [](const ktx::KeyValue& val) -> bool {
return val._key.compare(gpu::SOURCE_HASH_KEY) == 0;
});
std::string filename;
std::string hash;
if (found == keyValues.end() || found->_value.size() != gpu::SOURCE_HASH_BYTES) {
qWarning("Invalid source hash key found, bailing");
_ktxResourceState = FAILED_TO_LOAD;
finishedLoading(false);
return;
} else {
// at this point the source hash is in binary 16-byte form
// and we need it in a hexadecimal string
auto binaryHash = QByteArray(reinterpret_cast<char*>(found->_value.data()), gpu::SOURCE_HASH_BYTES);
hash = filename = binaryHash.toHex().toStdString();
}
auto textureCache = DependencyManager::get<TextureCache>();
gpu::TexturePointer texture = textureCache->getTextureByHash(hash);
if (!texture) {
KTXFilePointer ktxFile = textureCache->_ktxCache.getFile(hash);
if (ktxFile) {
texture = gpu::Texture::unserialize(ktxFile);
if (texture) {
texture = textureCache->cacheTextureByHash(hash, texture);
_file = ktxFile;
}
}
}
if (!texture) {
auto memKtx = ktx::KTX::createBare(*header, keyValues);
if (!memKtx) {
qWarning() << " Ktx could not be created, bailing";
finishedLoading(false);
return;
}
// Move ktx to file
const char* data = reinterpret_cast<const char*>(memKtx->_storage->data());
size_t length = memKtx->_storage->size();
KTXFilePointer file;
auto& ktxCache = textureCache->_ktxCache;
if (!memKtx || !(file = ktxCache.writeFile(data, KTXCache::Metadata(filename, length)))) {
qCWarning(modelnetworking) << _url << " failed to write cache file";
_ktxResourceState = FAILED_TO_LOAD;
finishedLoading(false);
return;
} else {
_file = file;
}
auto newKtxDescriptor = memKtx->toDescriptor();
texture = gpu::Texture::build(newKtxDescriptor);
texture->setKtxBacking(file);
texture->setSource(filename);
auto& images = _originalKtxDescriptor->images;
size_t imageSizeRemaining = ktxHighMipData.size();
uint8_t* ktxData = reinterpret_cast<uint8_t*>(ktxHighMipData.data());
ktxData += ktxHighMipData.size();
// TODO Move image offset calculation to ktx ImageDescriptor
for (int level = static_cast<int>(images.size()) - 1; level >= 0; --level) {
auto& image = images[level];
if (image._imageSize > imageSizeRemaining) {
break;
}
ktxData -= image._imageSize;
texture->assignStoredMip(static_cast<gpu::uint16>(level), image._imageSize, ktxData);
ktxData -= ktx::IMAGE_SIZE_WIDTH;
imageSizeRemaining -= (image._imageSize + ktx::IMAGE_SIZE_WIDTH);
}
// We replace the texture with the one stored in the cache. This deals with the possible race condition of two different
// images with the same hash being loaded concurrently. Only one of them will make it into the cache by hash first and will
// be the winner
texture = textureCache->cacheTextureByHash(filename, texture);
}
_lowestKnownPopulatedMip = texture->minAvailableMipLevel();
_ktxResourceState = WAITING_FOR_MIP_REQUEST;
setImage(texture, header->getPixelWidth(), header->getPixelHeight());
_ktxHeaderRequest->deleteLater();
_ktxHeaderRequest = nullptr;
_ktxMipRequest->deleteLater();
_ktxMipRequest = nullptr;
} }
startRequestForNextMipLevel(); QThread::currentThread()->setPriority(QThread::LowPriority);
} Finally restorePriority([originalPriority] { QThread::currentThread()->setPriority(originalPriority); });
auto resource = self.lock();
if (!resource) {
// Resource no longer exists, bail
return;
}
auto header = reinterpret_cast<const ktx::Header*>(ktxHeaderData.data());
if (!ktx::checkIdentifier(header->identifier)) {
qWarning() << "Cannot load " << url << ", invalid header identifier";
QMetaObject::invokeMethod(resource.data(), "setImage",
Q_ARG(gpu::TexturePointer, nullptr),
Q_ARG(int, 0),
Q_ARG(int, 0));
return;
}
auto kvSize = header->bytesOfKeyValueData;
if (kvSize > (ktxHeaderData.size() - ktx::KTX_HEADER_SIZE)) {
qWarning() << "Cannot load " << url << ", did not receive all kv data with initial request";
QMetaObject::invokeMethod(resource.data(), "setImage",
Q_ARG(gpu::TexturePointer, nullptr),
Q_ARG(int, 0),
Q_ARG(int, 0));
return;
}
auto keyValues = ktx::KTX::parseKeyValues(header->bytesOfKeyValueData, reinterpret_cast<const ktx::Byte*>(ktxHeaderData.data()) + ktx::KTX_HEADER_SIZE);
auto imageDescriptors = header->generateImageDescriptors();
if (imageDescriptors.size() == 0) {
qWarning(networking) << "Failed to process ktx file " << url;
QMetaObject::invokeMethod(resource.data(), "setImage",
Q_ARG(gpu::TexturePointer, nullptr),
Q_ARG(int, 0),
Q_ARG(int, 0));
return;
}
auto originalKtxDescriptor = new ktx::KTXDescriptor(*header, keyValues, imageDescriptors);
QMetaObject::invokeMethod(resource.data(), "setOriginalDescriptor",
Q_ARG(ktx::KTXDescriptor*, originalKtxDescriptor));
// Create bare ktx in memory
auto found = std::find_if(keyValues.begin(), keyValues.end(), [](const ktx::KeyValue& val) -> bool {
return val._key.compare(gpu::SOURCE_HASH_KEY) == 0;
});
std::string filename;
std::string hash;
if (found == keyValues.end() || found->_value.size() != gpu::SOURCE_HASH_BYTES) {
qWarning("Invalid source hash key found, bailing");
QMetaObject::invokeMethod(resource.data(), "setImage",
Q_ARG(gpu::TexturePointer, nullptr),
Q_ARG(int, 0),
Q_ARG(int, 0));
return;
} else {
// at this point the source hash is in binary 16-byte form
// and we need it in a hexadecimal string
auto binaryHash = QByteArray(reinterpret_cast<char*>(found->_value.data()), gpu::SOURCE_HASH_BYTES);
hash = filename = binaryHash.toHex().toStdString();
}
auto textureCache = DependencyManager::get<TextureCache>();
gpu::TexturePointer texture = textureCache->getTextureByHash(hash);
if (!texture) {
KTXFilePointer ktxFile = textureCache->_ktxCache.getFile(hash);
if (ktxFile) {
texture = gpu::Texture::unserialize(ktxFile);
if (texture) {
texture = textureCache->cacheTextureByHash(hash, texture);
}
}
}
if (!texture) {
auto memKtx = ktx::KTX::createBare(*header, keyValues);
if (!memKtx) {
qWarning() << " Ktx could not be created, bailing";
QMetaObject::invokeMethod(resource.data(), "setImage",
Q_ARG(gpu::TexturePointer, nullptr),
Q_ARG(int, 0),
Q_ARG(int, 0));
return;
}
// Move ktx to file
const char* data = reinterpret_cast<const char*>(memKtx->_storage->data());
size_t length = memKtx->_storage->size();
KTXFilePointer file;
auto& ktxCache = textureCache->_ktxCache;
if (!memKtx || !(file = ktxCache.writeFile(data, KTXCache::Metadata(filename, length)))) {
qCWarning(modelnetworking) << url << " failed to write cache file";
QMetaObject::invokeMethod(resource.data(), "setImage",
Q_ARG(gpu::TexturePointer, nullptr),
Q_ARG(int, 0),
Q_ARG(int, 0));
return;
}
auto newKtxDescriptor = memKtx->toDescriptor();
texture = gpu::Texture::build(newKtxDescriptor);
texture->setKtxBacking(file);
texture->setSource(filename);
auto& images = originalKtxDescriptor->images;
size_t imageSizeRemaining = ktxHighMipData.size();
const uint8_t* ktxData = reinterpret_cast<const uint8_t*>(ktxHighMipData.data());
ktxData += ktxHighMipData.size();
// TODO Move image offset calculation to ktx ImageDescriptor
for (int level = static_cast<int>(images.size()) - 1; level >= 0; --level) {
auto& image = images[level];
if (image._imageSize > imageSizeRemaining) {
break;
}
ktxData -= image._imageSize;
texture->assignStoredMip(static_cast<gpu::uint16>(level), image._imageSize, ktxData);
ktxData -= ktx::IMAGE_SIZE_WIDTH;
imageSizeRemaining -= (image._imageSize + ktx::IMAGE_SIZE_WIDTH);
}
// We replace the texture with the one stored in the cache. This deals with the possible race condition of two different
// images with the same hash being loaded concurrently. Only one of them will make it into the cache by hash first and will
// be the winner
texture = textureCache->cacheTextureByHash(filename, texture);
}
QMetaObject::invokeMethod(resource.data(), "setImage",
Q_ARG(gpu::TexturePointer, texture),
Q_ARG(int, texture->getWidth()),
Q_ARG(int, texture->getHeight()));
QMetaObject::invokeMethod(resource.data(), "startRequestForNextMipLevel");
});
} }
void NetworkTexture::downloadFinished(const QByteArray& data) { void NetworkTexture::downloadFinished(const QByteArray& data) {
@ -845,11 +950,11 @@ void ImageReader::read() {
const char* data = reinterpret_cast<const char*>(memKtx->_storage->data()); const char* data = reinterpret_cast<const char*>(memKtx->_storage->data());
size_t length = memKtx->_storage->size(); size_t length = memKtx->_storage->size();
auto& ktxCache = textureCache->_ktxCache; auto& ktxCache = textureCache->_ktxCache;
networkTexture->_file = ktxCache.writeFile(data, KTXCache::Metadata(hash, length)); // auto file = ktxCache.writeFile(data, KTXCache::Metadata(hash, length));
if (!networkTexture->_file) { if (!file) {
qCWarning(modelnetworking) << _url << "file cache failed"; qCWarning(modelnetworking) << _url << "file cache failed";
} else { } else {
texture->setKtxBacking(networkTexture->_file); texture->setKtxBacking(file);
} }
} else { } else {
qCWarning(modelnetworking) << "Unable to serialize texture to KTX " << _url; qCWarning(modelnetworking) << "Unable to serialize texture to KTX " << _url;
@ -866,3 +971,32 @@ void ImageReader::read() {
Q_ARG(int, texture->getWidth()), Q_ARG(int, texture->getWidth()),
Q_ARG(int, texture->getHeight())); Q_ARG(int, texture->getHeight()));
} }
NetworkTexturePointer TextureCache::getResourceTexture(QUrl resourceTextureUrl) {
gpu::TexturePointer texture;
if (resourceTextureUrl == SPECTATOR_CAMERA_FRAME_URL) {
if (!_spectatorCameraNetworkTexture) {
_spectatorCameraNetworkTexture.reset(new NetworkTexture(resourceTextureUrl));
}
texture = _spectatorCameraFramebuffer->getRenderBuffer(0);
if (texture) {
_spectatorCameraNetworkTexture->setImage(texture, texture->getWidth(), texture->getHeight());
return _spectatorCameraNetworkTexture;
}
}
return NetworkTexturePointer();
}
const gpu::FramebufferPointer& TextureCache::getSpectatorCameraFramebuffer() {
if (!_spectatorCameraFramebuffer) {
resetSpectatorCameraFramebuffer(2048, 1024);
}
return _spectatorCameraFramebuffer;
}
void TextureCache::resetSpectatorCameraFramebuffer(int width, int height) {
_spectatorCameraFramebuffer.reset(gpu::Framebuffer::create("spectatorCamera", gpu::Element::COLOR_SRGBA_32, width, height));
_spectatorCameraNetworkTexture.reset();
}

View file

@ -43,6 +43,7 @@ class NetworkTexture : public Resource, public Texture {
Q_OBJECT Q_OBJECT
public: public:
NetworkTexture(const QUrl& url);
NetworkTexture(const QUrl& url, image::TextureUsage::Type type, const QByteArray& content, int maxNumPixels); NetworkTexture(const QUrl& url, image::TextureUsage::Type type, const QByteArray& content, int maxNumPixels);
~NetworkTexture() override; ~NetworkTexture() override;
@ -58,14 +59,13 @@ public:
void refresh() override; void refresh() override;
Q_INVOKABLE void setOriginalDescriptor(ktx::KTXDescriptor* descriptor) { _originalKtxDescriptor.reset(descriptor); }
signals: signals:
void networkTextureCreated(const QWeakPointer<NetworkTexture>& self); void networkTextureCreated(const QWeakPointer<NetworkTexture>& self);
public slots: public slots:
void ktxHeaderRequestProgress(uint64_t bytesReceived, uint64_t bytesTotal) { } void ktxInitialDataRequestFinished();
void ktxHeaderRequestFinished();
void ktxMipRequestProgress(uint64_t bytesReceived, uint64_t bytesTotal) { }
void ktxMipRequestFinished(); void ktxMipRequestFinished();
protected: protected:
@ -74,14 +74,14 @@ protected:
virtual bool isCacheable() const override { return _loaded; } virtual bool isCacheable() const override { return _loaded; }
virtual void downloadFinished(const QByteArray& data) override; virtual void downloadFinished(const QByteArray& data) override;
Q_INVOKABLE void loadContent(const QByteArray& content); Q_INVOKABLE void loadContent(const QByteArray& content);
Q_INVOKABLE void setImage(gpu::TexturePointer texture, int originalWidth, int originalHeight); Q_INVOKABLE void setImage(gpu::TexturePointer texture, int originalWidth, int originalHeight);
void startRequestForNextMipLevel(); Q_INVOKABLE void startRequestForNextMipLevel();
void startMipRangeRequest(uint16_t low, uint16_t high); void startMipRangeRequest(uint16_t low, uint16_t high);
void maybeHandleFinishedInitialLoad(); void handleFinishedInitialLoad();
private: private:
friend class KTXReader; friend class KTXReader;
@ -102,16 +102,13 @@ private:
bool _sourceIsKTX { false }; bool _sourceIsKTX { false };
KTXResourceState _ktxResourceState { PENDING_INITIAL_LOAD }; KTXResourceState _ktxResourceState { PENDING_INITIAL_LOAD };
// TODO Can this be removed?
KTXFilePointer _file;
// The current mips that are currently being requested w/ _ktxMipRequest // The current mips that are currently being requested w/ _ktxMipRequest
std::pair<uint16_t, uint16_t> _ktxMipLevelRangeInFlight{ NULL_MIP_LEVEL, NULL_MIP_LEVEL }; std::pair<uint16_t, uint16_t> _ktxMipLevelRangeInFlight{ NULL_MIP_LEVEL, NULL_MIP_LEVEL };
ResourceRequest* _ktxHeaderRequest { nullptr }; ResourceRequest* _ktxHeaderRequest { nullptr };
ResourceRequest* _ktxMipRequest { nullptr }; ResourceRequest* _ktxMipRequest { nullptr };
bool _ktxHeaderRequestFinished{ false }; QByteArray _ktxHeaderData;
bool _ktxHighMipRequestFinished{ false }; QByteArray _ktxHighMipData;
uint16_t _lowestRequestedMipLevel { NULL_MIP_LEVEL }; uint16_t _lowestRequestedMipLevel { NULL_MIP_LEVEL };
uint16_t _lowestKnownPopulatedMip { NULL_MIP_LEVEL }; uint16_t _lowestKnownPopulatedMip { NULL_MIP_LEVEL };
@ -128,6 +125,8 @@ private:
int _width { 0 }; int _width { 0 };
int _height { 0 }; int _height { 0 };
int _maxNumPixels { ABSOLUTE_MAX_TEXTURE_NUM_PIXELS }; int _maxNumPixels { ABSOLUTE_MAX_TEXTURE_NUM_PIXELS };
friend class TextureCache;
}; };
using NetworkTexturePointer = QSharedPointer<NetworkTexture>; using NetworkTexturePointer = QSharedPointer<NetworkTexture>;
@ -166,6 +165,12 @@ public:
gpu::TexturePointer getTextureByHash(const std::string& hash); gpu::TexturePointer getTextureByHash(const std::string& hash);
gpu::TexturePointer cacheTextureByHash(const std::string& hash, const gpu::TexturePointer& texture); gpu::TexturePointer cacheTextureByHash(const std::string& hash, const gpu::TexturePointer& texture);
/// SpectatorCamera rendering targets.
NetworkTexturePointer getResourceTexture(QUrl resourceTextureUrl);
const gpu::FramebufferPointer& getSpectatorCameraFramebuffer();
void resetSpectatorCameraFramebuffer(int width, int height);
protected: protected:
// Overload ResourceCache::prefetch to allow specifying texture type for loads // Overload ResourceCache::prefetch to allow specifying texture type for loads
Q_INVOKABLE ScriptableResource* prefetch(const QUrl& url, int type, int maxNumPixels = ABSOLUTE_MAX_TEXTURE_NUM_PIXELS); Q_INVOKABLE ScriptableResource* prefetch(const QUrl& url, int type, int maxNumPixels = ABSOLUTE_MAX_TEXTURE_NUM_PIXELS);
@ -183,6 +188,7 @@ private:
static const std::string KTX_DIRNAME; static const std::string KTX_DIRNAME;
static const std::string KTX_EXT; static const std::string KTX_EXT;
KTXCache _ktxCache; KTXCache _ktxCache;
// Map from image hashes to texture weak pointers // Map from image hashes to texture weak pointers
std::unordered_map<std::string, std::weak_ptr<gpu::Texture>> _texturesByHashes; std::unordered_map<std::string, std::weak_ptr<gpu::Texture>> _texturesByHashes;
@ -193,6 +199,9 @@ private:
gpu::TexturePointer _grayTexture; gpu::TexturePointer _grayTexture;
gpu::TexturePointer _blueTexture; gpu::TexturePointer _blueTexture;
gpu::TexturePointer _blackTexture; gpu::TexturePointer _blackTexture;
NetworkTexturePointer _spectatorCameraNetworkTexture;
gpu::FramebufferPointer _spectatorCameraFramebuffer;
}; };
#endif // hifi_TextureCache_h #endif // hifi_TextureCache_h

View file

@ -353,13 +353,20 @@ void AssetClient::handleAssetGetReply(QSharedPointer<ReceivedMessage> message, S
connect(message.data(), &ReceivedMessage::progress, this, [this, weakNode, messageID, length](qint64 size) { connect(message.data(), &ReceivedMessage::progress, this, [this, weakNode, messageID, length](qint64 size) {
handleProgressCallback(weakNode, messageID, size, length); handleProgressCallback(weakNode, messageID, size, length);
}); });
connect(message.data(), &ReceivedMessage::completed, this, [this, weakNode, messageID]() { connect(message.data(), &ReceivedMessage::completed, this, [this, weakNode, messageID, length]() {
handleCompleteCallback(weakNode, messageID); handleCompleteCallback(weakNode, messageID, length);
}); });
if (message->isComplete()) { if (message->isComplete()) {
disconnect(message.data(), nullptr, this, nullptr); disconnect(message.data(), nullptr, this, nullptr);
callbacks.completeCallback(true, error, message->readAll());
if (length != message->getBytesLeftToRead()) {
callbacks.completeCallback(false, error, QByteArray());
} else {
callbacks.completeCallback(true, error, message->readAll());
}
messageCallbackMap.erase(requestIt); messageCallbackMap.erase(requestIt);
} }
} }
@ -391,7 +398,7 @@ void AssetClient::handleProgressCallback(const QWeakPointer<Node>& node, Message
callbacks.progressCallback(size, length); callbacks.progressCallback(size, length);
} }
void AssetClient::handleCompleteCallback(const QWeakPointer<Node>& node, MessageID messageID) { void AssetClient::handleCompleteCallback(const QWeakPointer<Node>& node, MessageID messageID, DataOffset length) {
auto senderNode = node.toStrongRef(); auto senderNode = node.toStrongRef();
if (!senderNode) { if (!senderNode) {
@ -424,8 +431,7 @@ void AssetClient::handleCompleteCallback(const QWeakPointer<Node>& node, Message
return; return;
} }
if (message->failed() || length != message->getBytesLeftToRead()) {
if (message->failed()) {
callbacks.completeCallback(false, AssetServerError::NoError, QByteArray()); callbacks.completeCallback(false, AssetServerError::NoError, QByteArray());
} else { } else {
callbacks.completeCallback(true, AssetServerError::NoError, message->readAll()); callbacks.completeCallback(true, AssetServerError::NoError, message->readAll());

View file

@ -93,7 +93,7 @@ private:
bool cancelUploadAssetRequest(MessageID id); bool cancelUploadAssetRequest(MessageID id);
void handleProgressCallback(const QWeakPointer<Node>& node, MessageID messageID, qint64 size, DataOffset length); void handleProgressCallback(const QWeakPointer<Node>& node, MessageID messageID, qint64 size, DataOffset length);
void handleCompleteCallback(const QWeakPointer<Node>& node, MessageID messageID); void handleCompleteCallback(const QWeakPointer<Node>& node, MessageID messageID, DataOffset length);
void forceFailureOfPendingRequests(SharedNodePointer node); void forceFailureOfPendingRequests(SharedNodePointer node);

View file

@ -104,12 +104,7 @@ void AssetRequest::start() {
break; break;
} }
} else { } else {
if (_byteRange.isSet()) { if (!_byteRange.isSet() && hashData(data).toHex() != _hash) {
// we had a byte range, the size of the data does not match what we expect, so we return an error
if (data.size() != _byteRange.size()) {
_error = SizeVerificationFailed;
}
} else if (hashData(data).toHex() != _hash) {
// the hash of the received data does not match what we expect, so we return an error // the hash of the received data does not match what we expect, so we return an error
_error = HashVerificationFailed; _error = HashVerificationFailed;
} }

View file

@ -184,6 +184,9 @@ public:
// will query the underlying hmd api to compute the most recent head pose // will query the underlying hmd api to compute the most recent head pose
virtual bool beginFrameRender(uint32_t frameIndex) { return true; } virtual bool beginFrameRender(uint32_t frameIndex) { return true; }
// Set the texture to display on the monitor and return true, if allowed. Empty string resets.
virtual bool setDisplayTexture(const QString& name) { return false; }
virtual float devicePixelRatio() { return 1.0f; } virtual float devicePixelRatio() { return 1.0f; }
// Rate at which we render frames // Rate at which we render frames
virtual float renderRate() const { return -1.0f; } virtual float renderRate() const { return -1.0f; }

View file

@ -29,7 +29,7 @@ void CauterizedMeshPartPayload::updateTransformForCauterizedMesh(
void CauterizedMeshPartPayload::bindTransform(gpu::Batch& batch, const render::ShapePipeline::LocationsPointer locations, RenderArgs::RenderMode renderMode) const { void CauterizedMeshPartPayload::bindTransform(gpu::Batch& batch, const render::ShapePipeline::LocationsPointer locations, RenderArgs::RenderMode renderMode) const {
// Still relying on the raw data from the model // Still relying on the raw data from the model
bool useCauterizedMesh = (renderMode != RenderArgs::RenderMode::SHADOW_RENDER_MODE); bool useCauterizedMesh = (renderMode != RenderArgs::RenderMode::SHADOW_RENDER_MODE && renderMode != RenderArgs::RenderMode::SECONDARY_CAMERA_RENDER_MODE);
if (useCauterizedMesh) { if (useCauterizedMesh) {
ModelPointer model = _model.lock(); ModelPointer model = _model.lock();
if (model) { if (model) {

View file

@ -548,6 +548,7 @@ glm::ivec3 LightClusters::updateClusters() {
LightClusteringPass::LightClusteringPass() { LightClusteringPass::LightClusteringPass() {
_lightClusters = std::make_shared<LightClusters>();
} }
@ -566,12 +567,7 @@ void LightClusteringPass::run(const render::RenderContextPointer& renderContext,
auto deferredTransform = inputs.get0(); auto deferredTransform = inputs.get0();
auto lightingModel = inputs.get1(); auto lightingModel = inputs.get1();
auto surfaceGeometryFramebuffer = inputs.get2(); auto surfaceGeometryFramebuffer = inputs.get2();
if (!_lightClusters) {
_lightClusters = std::make_shared<LightClusters>();
}
// first update the Grid with the new frustum // first update the Grid with the new frustum
if (!_freeze) { if (!_freeze) {
_lightClusters->updateFrustum(args->getViewFrustum()); _lightClusters->updateFrustum(args->getViewFrustum());

View file

@ -61,7 +61,7 @@ namespace render {
class Args { class Args {
public: public:
enum RenderMode { DEFAULT_RENDER_MODE, SHADOW_RENDER_MODE, DIFFUSE_RENDER_MODE, NORMAL_RENDER_MODE, MIRROR_RENDER_MODE }; enum RenderMode { DEFAULT_RENDER_MODE, SHADOW_RENDER_MODE, DIFFUSE_RENDER_MODE, NORMAL_RENDER_MODE, MIRROR_RENDER_MODE, SECONDARY_CAMERA_RENDER_MODE };
enum DisplayMode { MONO, STEREO_MONITOR, STEREO_HMD }; enum DisplayMode { MONO, STEREO_MONITOR, STEREO_HMD };
enum DebugFlags { enum DebugFlags {
RENDER_DEBUG_NONE = 0, RENDER_DEBUG_NONE = 0,

View file

@ -13,6 +13,7 @@ import "configSlider"
import "../lib/plotperf" import "../lib/plotperf"
Column { Column {
property var mainViewTask: Render.getConfig("RenderMainView")
spacing: 8 spacing: 8
Column { Column {
id: surfaceGeometry id: surfaceGeometry
@ -32,7 +33,7 @@ Column {
ConfigSlider { ConfigSlider {
label: qsTr(modelData.split(":")[0]) label: qsTr(modelData.split(":")[0])
integral: (modelData.split(":")[3] == 'true') integral: (modelData.split(":")[3] == 'true')
config: Render.getConfig("AmbientOcclusion") config: mainViewTask.getConfig("AmbientOcclusion")
property: modelData.split(":")[1] property: modelData.split(":")[1]
max: modelData.split(":")[2] max: modelData.split(":")[2]
min: 0.0 min: 0.0
@ -50,8 +51,8 @@ Column {
] ]
CheckBox { CheckBox {
text: qsTr(modelData.split(":")[0]) text: qsTr(modelData.split(":")[0])
checked: Render.getConfig("AmbientOcclusion")[modelData.split(":")[1]] checked: mainViewTask.getConfig("AmbientOcclusion")[modelData.split(":")[1]]
onCheckedChanged: { Render.getConfig("AmbientOcclusion")[modelData.split(":")[1]] = checked } onCheckedChanged: { mainViewTask.getConfig("AmbientOcclusion")[modelData.split(":")[1]] = checked }
} }
} }
} }
@ -62,8 +63,8 @@ Column {
] ]
CheckBox { CheckBox {
text: qsTr(modelData.split(":")[0]) text: qsTr(modelData.split(":")[0])
checked: Render.getConfig("DebugAmbientOcclusion")[modelData.split(":")[1]] checked: mainViewTask.getConfig("DebugAmbientOcclusion")[modelData.split(":")[1]]
onCheckedChanged: { Render.getConfig("DebugAmbientOcclusion")[modelData.split(":")[1]] = checked } onCheckedChanged: { mainViewTask.getConfig("DebugAmbientOcclusion")[modelData.split(":")[1]] = checked }
} }
} }
} }
@ -72,7 +73,7 @@ Column {
PlotPerf { PlotPerf {
title: "Timing" title: "Timing"
height: 50 height: 50
object: Render.getConfig("AmbientOcclusion") object: mainViewTask.getConfig("AmbientOcclusion")
valueUnit: "ms" valueUnit: "ms"
valueScale: 1 valueScale: 1
valueNumDigits: "3" valueNumDigits: "3"

View file

@ -14,8 +14,9 @@ import "configSlider"
Column { Column {
id: root id: root
spacing: 8 spacing: 8
property var sceneOctree: Render.getConfig("DrawSceneOctree"); property var mainViewTask: Render.getConfig("RenderMainView");
property var itemSelection: Render.getConfig("DrawItemSelection"); property var sceneOctree: mainViewTask.getConfig("DrawSceneOctree");
property var itemSelection: mainViewTask.getConfig("DrawItemSelection");
Component.onCompleted: { Component.onCompleted: {
sceneOctree.enabled = true; sceneOctree.enabled = true;
@ -30,8 +31,8 @@ Column {
Component.onDestruction: { Component.onDestruction: {
sceneOctree.enabled = false; sceneOctree.enabled = false;
itemSelection.enabled = false; itemSelection.enabled = false;
Render.getConfig("FetchSceneSelection").freezeFrustum = false; mainViewTask.getConfig("FetchSceneSelection").freezeFrustum = false;
Render.getConfig("CullSceneSelection").freezeFrustum = false; mainViewTask.getConfig("CullSceneSelection").freezeFrustum = false;
} }
GroupBox { GroupBox {
@ -45,8 +46,8 @@ Column {
text: "Freeze Culling Frustum" text: "Freeze Culling Frustum"
checked: false checked: false
onCheckedChanged: { onCheckedChanged: {
Render.getConfig("FetchSceneSelection").freezeFrustum = checked; mainViewTask.getConfig("FetchSceneSelection").freezeFrustum = checked;
Render.getConfig("CullSceneSelection").freezeFrustum = checked; mainViewTask.getConfig("CullSceneSelection").freezeFrustum = checked;
} }
} }
Label { Label {
@ -103,7 +104,7 @@ Column {
ConfigSlider { ConfigSlider {
label: qsTr(modelData.split(":")[0]) label: qsTr(modelData.split(":")[0])
integral: true integral: true
config: Render.getConfig(modelData.split(":")[1]) config: mainViewTask.getConfig(modelData.split(":")[1])
property: "maxDrawn" property: "maxDrawn"
max: config.numDrawn max: config.numDrawn
min: -1 min: -1

View file

@ -34,5 +34,5 @@ function setDebugCursor(x, y) {
nx = (x / Window.innerWidth); nx = (x / Window.innerWidth);
ny = 1.0 - ((y) / (Window.innerHeight - 32)); ny = 1.0 - ((y) / (Window.innerHeight - 32));
Render.getConfig("DebugAmbientOcclusion").debugCursorTexcoord = { x: nx, y: ny }; Render.getConfig("RenderMainView").getConfig("DebugAmbientOcclusion").debugCursorTexcoord = { x: nx, y: ny };
} }

View file

@ -33,5 +33,5 @@ function setDebugCursor(x, y) {
nx = (x / Window.innerWidth); nx = (x / Window.innerWidth);
ny = 1.0 - ((y) / (Window.innerHeight - 32)); ny = 1.0 - ((y) / (Window.innerHeight - 32));
Render.getConfig("DebugScattering").debugCursorTexcoord = { x: nx, y: ny }; Render.getConfig("RenderMainView").getConfig("DebugScattering").debugCursorTexcoord = { x: nx, y: ny };
} }

View file

@ -13,6 +13,7 @@ import "configSlider"
Column { Column {
spacing: 8 spacing: 8
property var mainViewTask: Render.getConfig("RenderMainView")
Row { Row {
spacing: 8 spacing: 8
@ -29,8 +30,8 @@ Column {
] ]
CheckBox { CheckBox {
text: modelData.split(":")[0] text: modelData.split(":")[0]
checked: Render.getConfig(modelData.split(":")[1])[modelData.split(":")[2]] checked: mainViewTask.getConfig(modelData.split(":")[1])[modelData.split(":")[2]]
onCheckedChanged: { Render.getConfig(modelData.split(":")[1])[modelData.split(":")[2]] = checked } onCheckedChanged: { mainViewTask.getConfig(modelData.split(":")[1])[modelData.split(":")[2]] = checked }
} }
} }
} }
@ -49,8 +50,8 @@ Column {
] ]
CheckBox { CheckBox {
text: modelData.split(":")[0] text: modelData.split(":")[0]
checked: Render.getConfig(modelData.split(":")[1])[modelData.split(":")[2]] checked: mainViewTask.getConfig(modelData.split(":")[1])[modelData.split(":")[2]]
onCheckedChanged: { Render.getConfig(modelData.split(":")[1])[modelData.split(":")[2]] = checked } onCheckedChanged: { mainViewTask.getConfig(modelData.split(":")[1])[modelData.split(":")[2]] = checked }
} }
} }
} }
@ -69,8 +70,8 @@ Column {
] ]
CheckBox { CheckBox {
text: modelData.split(":")[0] text: modelData.split(":")[0]
checked: Render.getConfig(modelData.split(":")[1])[modelData.split(":")[2]] checked: mainViewTask.getConfig(modelData.split(":")[1])[modelData.split(":")[2]]
onCheckedChanged: { Render.getConfig(modelData.split(":")[1])[modelData.split(":")[2]] = checked } onCheckedChanged: { mainViewTask.getConfig(modelData.split(":")[1])[modelData.split(":")[2]] = checked }
} }
} }
} }
@ -83,7 +84,7 @@ Column {
ConfigSlider { ConfigSlider {
label: qsTr(modelData.split(":")[0]) label: qsTr(modelData.split(":")[0])
integral: false integral: false
config: Render.getConfig(modelData.split(":")[1]) config: mainViewTask.getConfig(modelData.split(":")[1])
property: modelData.split(":")[2] property: modelData.split(":")[2]
max: modelData.split(":")[3] max: modelData.split(":")[3]
min: modelData.split(":")[4] min: modelData.split(":")[4]
@ -107,7 +108,7 @@ Column {
ListElement { text: "Filmic"; color: "White" } ListElement { text: "Filmic"; color: "White" }
} }
width: 200 width: 200
onCurrentIndexChanged: { Render.getConfig("ToneMapping")["curve"] = currentIndex } onCurrentIndexChanged: { mainViewTask.getConfig("ToneMapping")["curve"] = currentIndex }
} }
} }
} }
@ -120,7 +121,7 @@ Column {
anchors.left: root.left anchors.left: root.left
} }
property var config: Render.getConfig("DebugDeferredBuffer") property var config: mainViewTask.getConfig("DebugDeferredBuffer")
function setDebugMode(mode) { function setDebugMode(mode) {
framebuffer.config.enabled = (mode != 0); framebuffer.config.enabled = (mode != 0);
@ -168,40 +169,40 @@ Column {
CheckBox { CheckBox {
text: "Opaques" text: "Opaques"
checked: Render.getConfig("DrawOpaqueBounds")["enabled"] checked: mainViewTask.getConfig("DrawOpaqueBounds")["enabled"]
onCheckedChanged: { Render.getConfig("DrawOpaqueBounds")["enabled"] = checked } onCheckedChanged: { mainViewTask.getConfig("DrawOpaqueBounds")["enabled"] = checked }
} }
CheckBox { CheckBox {
text: "Transparents" text: "Transparents"
checked: Render.getConfig("DrawTransparentBounds")["enabled"] checked: mainViewTask.getConfig("DrawTransparentBounds")["enabled"]
onCheckedChanged: { Render.getConfig("DrawTransparentBounds")["enabled"] = checked } onCheckedChanged: { mainViewTask.getConfig("DrawTransparentBounds")["enabled"] = checked }
} }
CheckBox { CheckBox {
text: "Overlay Opaques" text: "Overlay Opaques"
checked: Render.getConfig("DrawOverlayOpaqueBounds")["enabled"] checked: mainViewTask.getConfig("DrawOverlayOpaqueBounds")["enabled"]
onCheckedChanged: { Render.getConfig("DrawOverlayOpaqueBounds")["enabled"] = checked } onCheckedChanged: { mainViewTask.getConfig("DrawOverlayOpaqueBounds")["enabled"] = checked }
} }
CheckBox { CheckBox {
text: "Overlay Transparents" text: "Overlay Transparents"
checked: Render.getConfig("DrawOverlayTransparentBounds")["enabled"] checked: mainViewTask.getConfig("DrawOverlayTransparentBounds")["enabled"]
onCheckedChanged: { Render.getConfig("DrawOverlayTransparentBounds")["enabled"] = checked } onCheckedChanged: { mainViewTask.getConfig("DrawOverlayTransparentBounds")["enabled"] = checked }
} }
} }
Column { Column {
CheckBox { CheckBox {
text: "Metas" text: "Metas"
checked: Render.getConfig("DrawMetaBounds")["enabled"] checked: mainViewTask.getConfig("DrawMetaBounds")["enabled"]
onCheckedChanged: { Render.getConfig("DrawMetaBounds")["enabled"] = checked } onCheckedChanged: { mainViewTask.getConfig("DrawMetaBounds")["enabled"] = checked }
} }
CheckBox { CheckBox {
text: "Lights" text: "Lights"
checked: Render.getConfig("DrawLightBounds")["enabled"] checked: mainViewTask.getConfig("DrawLightBounds")["enabled"]
onCheckedChanged: { Render.getConfig("DrawLightBounds")["enabled"] = checked; } onCheckedChanged: { mainViewTask.getConfig("DrawLightBounds")["enabled"] = checked; }
} }
CheckBox { CheckBox {
text: "Zones" text: "Zones"
checked: Render.getConfig("DrawZones")["enabled"] checked: mainViewTask.getConfig("DrawZones")["enabled"]
onCheckedChanged: { Render.getConfig("ZoneRenderer")["enabled"] = checked; Render.getConfig("DrawZones")["enabled"] = checked; } onCheckedChanged: { mainViewTask.getConfig("ZoneRenderer")["enabled"] = checked; mainViewTask.getConfig("DrawZones")["enabled"] = checked; }
} }
} }
} }

View file

@ -17,18 +17,19 @@ Column {
Column { Column {
id: lightClustering id: lightClustering
spacing: 10 spacing: 10
property var mainViewTask: Render.getConfig("RenderMainView");
Column{ Column{
PlotPerf { PlotPerf {
title: "Light CLustering Timing" title: "Light CLustering Timing"
height: 50 height: 50
object: Render.getConfig("LightClustering") object: mainViewTask.getConfig("LightClustering")
valueUnit: "ms" valueUnit: "ms"
valueScale: 1 valueScale: 1
valueNumDigits: "4" valueNumDigits: "4"
plots: [ plots: [
{ {
object: Render.getConfig("LightClustering"), object: mainViewTask.getConfig("LightClustering"),
prop: "cpuRunTime", prop: "cpuRunTime",
label: "time", label: "time",
scale: 1, scale: 1,
@ -40,19 +41,19 @@ Column {
PlotPerf { PlotPerf {
title: "Lights" title: "Lights"
height: 50 height: 50
object: Render.getConfig("LightClustering") object: mainViewTask.getConfig("LightClustering")
valueUnit: "" valueUnit: ""
valueScale: 1 valueScale: 1
valueNumDigits: "0" valueNumDigits: "0"
plots: [ plots: [
{ {
object: Render.getConfig("LightClustering"), object: mainViewTask.getConfig("LightClustering"),
prop: "numClusteredLights", prop: "numClusteredLights",
label: "visible", label: "visible",
color: "#D959FE" color: "#D959FE"
}, },
{ {
object: Render.getConfig("LightClustering"), object: mainViewTask.getConfig("LightClustering"),
prop: "numInputLights", prop: "numInputLights",
label: "input", label: "input",
color: "#FED959" color: "#FED959"
@ -63,25 +64,25 @@ Column {
PlotPerf { PlotPerf {
title: "Scene Lights" title: "Scene Lights"
height: 80 height: 80
object: Render.getConfig("LightClustering") object: mainViewTask.getConfig("LightClustering")
valueUnit: "" valueUnit: ""
valueScale: 1 valueScale: 1
valueNumDigits: "0" valueNumDigits: "0"
plots: [ plots: [
{ {
object: Render.getConfig("LightClustering"), object: mainViewTask.getConfig("LightClustering"),
prop: "numSceneLights", prop: "numSceneLights",
label: "current", label: "current",
color: "#00B4EF" color: "#00B4EF"
}, },
{ {
object: Render.getConfig("LightClustering"), object: mainViewTask.getConfig("LightClustering"),
prop: "numFreeSceneLights", prop: "numFreeSceneLights",
label: "free", label: "free",
color: "#1AC567" color: "#1AC567"
}, },
{ {
object: Render.getConfig("LightClustering"), object: mainViewTask.getConfig("LightClustering"),
prop: "numAllocatedSceneLights", prop: "numAllocatedSceneLights",
label: "allocated", label: "allocated",
color: "#9495FF" color: "#9495FF"
@ -92,7 +93,7 @@ Column {
ConfigSlider { ConfigSlider {
label: qsTr("Range Near [m]") label: qsTr("Range Near [m]")
integral: false integral: false
config: Render.getConfig("LightClustering") config: mainViewTask.getConfig("LightClustering")
property: "rangeNear" property: "rangeNear"
max: 20.0 max: 20.0
min: 0.1 min: 0.1
@ -100,7 +101,7 @@ Column {
ConfigSlider { ConfigSlider {
label: qsTr("Range Far [m]") label: qsTr("Range Far [m]")
integral: false integral: false
config: Render.getConfig("LightClustering") config: mainViewTask.getConfig("LightClustering")
property: "rangeFar" property: "rangeFar"
max: 500.0 max: 500.0
min: 100.0 min: 100.0
@ -108,7 +109,7 @@ Column {
ConfigSlider { ConfigSlider {
label: qsTr("Grid X") label: qsTr("Grid X")
integral: true integral: true
config: Render.getConfig("LightClustering") config: mainViewTask.getConfig("LightClustering")
property: "dimX" property: "dimX"
max: 32 max: 32
min: 1 min: 1
@ -116,7 +117,7 @@ Column {
ConfigSlider { ConfigSlider {
label: qsTr("Grid Y") label: qsTr("Grid Y")
integral: true integral: true
config: Render.getConfig("LightClustering") config: mainViewTask.getConfig("LightClustering")
property: "dimY" property: "dimY"
max: 32 max: 32
min: 1 min: 1
@ -124,33 +125,33 @@ Column {
ConfigSlider { ConfigSlider {
label: qsTr("Grid Z") label: qsTr("Grid Z")
integral: true integral: true
config: Render.getConfig("LightClustering") config: mainViewTask.getConfig("LightClustering")
property: "dimZ" property: "dimZ"
max: 31 max: 31
min: 1 min: 1
} }
CheckBox { CheckBox {
text: "Freeze" text: "Freeze"
checked: Render.getConfig("LightClustering")["freeze"] checked: mainViewTask.getConfig("LightClustering")["freeze"]
onCheckedChanged: { Render.getConfig("LightClustering")["freeze"] = checked } onCheckedChanged: { mainViewTask.getConfig("LightClustering")["freeze"] = checked }
} }
CheckBox { CheckBox {
text: "Draw Grid" text: "Draw Grid"
checked: Render.getConfig("DebugLightClusters")["doDrawGrid"] checked: mainViewTask.getConfig("DebugLightClusters")["doDrawGrid"]
onCheckedChanged: { Render.getConfig("DebugLightClusters")["doDrawGrid"] = checked } onCheckedChanged: { mainViewTask.getConfig("DebugLightClusters")["doDrawGrid"] = checked }
} }
CheckBox { CheckBox {
text: "Draw Cluster From Depth" text: "Draw Cluster From Depth"
checked: Render.getConfig("DebugLightClusters")["doDrawClusterFromDepth"] checked: mainViewTask.getConfig("DebugLightClusters")["doDrawClusterFromDepth"]
onCheckedChanged: { Render.getConfig("DebugLightClusters")["doDrawClusterFromDepth"] = checked } onCheckedChanged: { mainViewTask.getConfig("DebugLightClusters")["doDrawClusterFromDepth"] = checked }
} }
CheckBox { CheckBox {
text: "Draw Content" text: "Draw Content"
checked: Render.getConfig("DebugLightClusters")["doDrawContent"] checked: mainViewTask.getConfig("DebugLightClusters")["doDrawContent"]
onCheckedChanged: { Render.getConfig("DebugLightClusters")["doDrawContent"] = checked } onCheckedChanged: { mainViewTask.getConfig("DebugLightClusters")["doDrawContent"] = checked }
} }
Label { Label {
text: "Num Cluster Items = " + Render.getConfig("LightClustering")["numClusteredLightReferences"].toFixed(0) text: "Num Cluster Items = " + mainViewTask.getConfig("LightClustering")["numClusteredLightReferences"].toFixed(0)
} }
} }

View file

@ -21,7 +21,8 @@ Item {
spacing: 8 spacing: 8
anchors.fill:parent anchors.fill:parent
property var config: Render.getConfig("Stats") property var mainViewTask: Render.getConfig("RenderMainView");
property var config: mainViewTask.getConfig("Stats")
function evalEvenHeight() { function evalEvenHeight() {
// Why do we have to do that manually ? cannot seem to find a qml / anchor / layout mode that does that ? // Why do we have to do that manually ? cannot seem to find a qml / anchor / layout mode that does that ?
@ -182,9 +183,9 @@ Item {
] ]
} }
property var drawOpaqueConfig: Render.getConfig("DrawOpaqueDeferred") property var drawOpaqueConfig: mainViewTask.getConfig("DrawOpaqueDeferred")
property var drawTransparentConfig: Render.getConfig("DrawTransparentDeferred") property var drawTransparentConfig: mainViewTask.getConfig("DrawTransparentDeferred")
property var drawLightConfig: Render.getConfig("DrawLight") property var drawLightConfig: mainViewTask.getConfig("DrawLight")
PlotPerf { PlotPerf {
title: "Items" title: "Items"
@ -199,13 +200,13 @@ Item {
color: "#1AC567" color: "#1AC567"
}, },
{ {
object: Render.getConfig("DrawTransparentDeferred"), object: mainViewTask.getConfig("DrawTransparentDeferred"),
prop: "numDrawn", prop: "numDrawn",
label: "Translucents", label: "Translucents",
color: "#00B4EF" color: "#00B4EF"
}, },
{ {
object: Render.getConfig("DrawLight"), object: mainViewTask.getConfig("DrawLight"),
prop: "numDrawn", prop: "numDrawn",
label: "Lights", label: "Lights",
color: "#FED959" color: "#FED959"
@ -222,25 +223,25 @@ Item {
valueNumDigits: "2" valueNumDigits: "2"
plots: [ plots: [
{ {
object: Render.getConfig("DrawOpaqueDeferred"), object: mainViewTask.getConfig("DrawOpaqueDeferred"),
prop: "cpuRunTime", prop: "cpuRunTime",
label: "Opaques", label: "Opaques",
color: "#1AC567" color: "#1AC567"
}, },
{ {
object: Render.getConfig("DrawTransparentDeferred"), object: mainViewTask.getConfig("DrawTransparentDeferred"),
prop: "cpuRunTime", prop: "cpuRunTime",
label: "Translucents", label: "Translucents",
color: "#00B4EF" color: "#00B4EF"
}, },
{ {
object: Render.getConfig("RenderDeferred"), object: mainViewTask.getConfig("RenderDeferred"),
prop: "cpuRunTime", prop: "cpuRunTime",
label: "Lighting", label: "Lighting",
color: "#FED959" color: "#FED959"
}, },
{ {
object: Render.getConfig("RenderDeferredTask"), object: mainViewTask.getConfig("RenderDeferredTask"),
prop: "cpuRunTime", prop: "cpuRunTime",
label: "RenderFrame", label: "RenderFrame",
color: "#E2334D" color: "#E2334D"

View file

@ -21,7 +21,8 @@ Item {
spacing: 8 spacing: 8
anchors.fill:parent anchors.fill:parent
property var config: Render.getConfig("Stats") property var mainViewTask: Render.getConfig("RenderMainView");
property var config: mainViewTask.getConfig("Stats")
function evalEvenHeight() { function evalEvenHeight() {
// Why do we have to do that manually ? cannot seem to find a qml / anchor / layout mode that does that ? // Why do we have to do that manually ? cannot seem to find a qml / anchor / layout mode that does that ?
@ -38,31 +39,31 @@ Item {
valueNumDigits: "4" valueNumDigits: "4"
plots: [ plots: [
{ {
object: Render.getConfig("OpaqueRangeTimer"), object: mainViewTask.getConfig("OpaqueRangeTimer"),
prop: "gpuRunTime", prop: "gpuRunTime",
label: "Opaque", label: "Opaque",
color: "#FFFFFF" color: "#FFFFFF"
}, },
{ {
object: Render.getConfig("LinearDepth"), object: mainViewTask.getConfig("LinearDepth"),
prop: "gpuRunTime", prop: "gpuRunTime",
label: "LinearDepth", label: "LinearDepth",
color: "#00FF00" color: "#00FF00"
},{ },{
object: Render.getConfig("SurfaceGeometry"), object: mainViewTask.getConfig("SurfaceGeometry"),
prop: "gpuRunTime", prop: "gpuRunTime",
label: "SurfaceGeometry", label: "SurfaceGeometry",
color: "#00FFFF" color: "#00FFFF"
}, },
{ {
object: Render.getConfig("RenderDeferred"), object: mainViewTask.getConfig("RenderDeferred"),
prop: "gpuRunTime", prop: "gpuRunTime",
label: "DeferredLighting", label: "DeferredLighting",
color: "#FF00FF" color: "#FF00FF"
} }
, ,
{ {
object: Render.getConfig("ToneAndPostRangeTimer"), object: mainViewTask.getConfig("ToneAndPostRangeTimer"),
prop: "gpuRunTime", prop: "gpuRunTime",
label: "tone and post", label: "tone and post",
color: "#FF0000" color: "#FF0000"
@ -78,31 +79,31 @@ Item {
valueNumDigits: "3" valueNumDigits: "3"
plots: [ plots: [
{ {
object: Render.getConfig("OpaqueRangeTimer"), object: mainViewTask.getConfig("OpaqueRangeTimer"),
prop: "batchRunTime", prop: "batchRunTime",
label: "Opaque", label: "Opaque",
color: "#FFFFFF" color: "#FFFFFF"
}, },
{ {
object: Render.getConfig("LinearDepth"), object: mainViewTask.getConfig("LinearDepth"),
prop: "batchRunTime", prop: "batchRunTime",
label: "LinearDepth", label: "LinearDepth",
color: "#00FF00" color: "#00FF00"
},{ },{
object: Render.getConfig("SurfaceGeometry"), object: mainViewTask.getConfig("SurfaceGeometry"),
prop: "batchRunTime", prop: "batchRunTime",
label: "SurfaceGeometry", label: "SurfaceGeometry",
color: "#00FFFF" color: "#00FFFF"
}, },
{ {
object: Render.getConfig("RenderDeferred"), object: mainViewTask.getConfig("RenderDeferred"),
prop: "batchRunTime", prop: "batchRunTime",
label: "DeferredLighting", label: "DeferredLighting",
color: "#FF00FF" color: "#FF00FF"
} }
, ,
{ {
object: Render.getConfig("ToneAndPostRangeTimer"), object: mainViewTask.getConfig("ToneAndPostRangeTimer"),
prop: "batchRunTime", prop: "batchRunTime",
label: "tone and post", label: "tone and post",
color: "#FF0000" color: "#FF0000"

View file

@ -16,28 +16,29 @@ Column {
Column { Column {
id: scattering id: scattering
spacing: 10 spacing: 10
property var mainViewTask: Render.getConfig("RenderMainView");
Column{ Column{
CheckBox { CheckBox {
text: "Scattering" text: "Scattering"
checked: Render.getConfig("Scattering").enableScattering checked: mainViewTask.getConfig("Scattering").enableScattering
onCheckedChanged: { Render.getConfig("Scattering").enableScattering = checked } onCheckedChanged: { mainViewTask.getConfig("Scattering").enableScattering = checked }
} }
CheckBox { CheckBox {
text: "Show Scattering BRDF" text: "Show Scattering BRDF"
checked: Render.getConfig("Scattering").showScatteringBRDF checked: mainViewTask.getConfig("Scattering").showScatteringBRDF
onCheckedChanged: { Render.getConfig("Scattering").showScatteringBRDF = checked } onCheckedChanged: { mainViewTask.getConfig("Scattering").showScatteringBRDF = checked }
} }
CheckBox { CheckBox {
text: "Show Curvature" text: "Show Curvature"
checked: Render.getConfig("Scattering").showCurvature checked: mainViewTask.getConfig("Scattering").showCurvature
onCheckedChanged: { Render.getConfig("Scattering").showCurvature = checked } onCheckedChanged: { mainViewTask.getConfig("Scattering").showCurvature = checked }
} }
CheckBox { CheckBox {
text: "Show Diffused Normal" text: "Show Diffused Normal"
checked: Render.getConfig("Scattering").showDiffusedNormal checked: mainViewTask.getConfig("Scattering").showDiffusedNormal
onCheckedChanged: { Render.getConfig("Scattering").showDiffusedNormal = checked } onCheckedChanged: { mainViewTask.getConfig("Scattering").showDiffusedNormal = checked }
} }
Repeater { Repeater {
model: [ "Scattering Bent Red:Scattering:bentRed:2.0", model: [ "Scattering Bent Red:Scattering:bentRed:2.0",
@ -50,7 +51,7 @@ Column {
ConfigSlider { ConfigSlider {
label: qsTr(modelData.split(":")[0]) label: qsTr(modelData.split(":")[0])
integral: false integral: false
config: Render.getConfig(modelData.split(":")[1]) config: mainViewTask.getConfig(modelData.split(":")[1])
property: modelData.split(":")[2] property: modelData.split(":")[2]
max: modelData.split(":")[3] max: modelData.split(":")[3]
min: 0.0 min: 0.0
@ -58,23 +59,23 @@ Column {
} }
CheckBox { CheckBox {
text: "Scattering Profile" text: "Scattering Profile"
checked: Render.getConfig("DebugScattering").showProfile checked: mainViewTask.getConfig("DebugScattering").showProfile
onCheckedChanged: { Render.getConfig("DebugScattering").showProfile = checked } onCheckedChanged: { mainViewTask.getConfig("DebugScattering").showProfile = checked }
} }
CheckBox { CheckBox {
text: "Scattering Table" text: "Scattering Table"
checked: Render.getConfig("DebugScattering").showLUT checked: mainViewTask.getConfig("DebugScattering").showLUT
onCheckedChanged: { Render.getConfig("DebugScattering").showLUT = checked } onCheckedChanged: { mainViewTask.getConfig("DebugScattering").showLUT = checked }
} }
CheckBox { CheckBox {
text: "Cursor Pixel" text: "Cursor Pixel"
checked: Render.getConfig("DebugScattering").showCursorPixel checked: mainViewTask.getConfig("DebugScattering").showCursorPixel
onCheckedChanged: { Render.getConfig("DebugScattering").showCursorPixel = checked } onCheckedChanged: { mainViewTask.getConfig("DebugScattering").showCursorPixel = checked }
} }
CheckBox { CheckBox {
text: "Skin Specular Beckmann" text: "Skin Specular Beckmann"
checked: Render.getConfig("DebugScattering").showSpecularTable checked: mainViewTask.getConfig("DebugScattering").showSpecularTable
onCheckedChanged: { Render.getConfig("DebugScattering").showSpecularTable = checked } onCheckedChanged: { mainViewTask.getConfig("DebugScattering").showSpecularTable = checked }
} }
} }
} }

View file

@ -16,12 +16,13 @@ Column {
Column { Column {
id: surfaceGeometry id: surfaceGeometry
spacing: 10 spacing: 10
property var mainViewTask: Render.getConfig("RenderMainView");
Column{ Column{
ConfigSlider { ConfigSlider {
label: qsTr("Depth Threshold [cm]") label: qsTr("Depth Threshold [cm]")
integral: false integral: false
config: Render.getConfig("SurfaceGeometry") config: mainViewTask.getConfig("SurfaceGeometry")
property: "depthThreshold" property: "depthThreshold"
max: 5.0 max: 5.0
min: 0.0 min: 0.0
@ -34,7 +35,7 @@ Column {
ConfigSlider { ConfigSlider {
label: qsTr(modelData.split(":")[0]) label: qsTr(modelData.split(":")[0])
integral: (modelData.split(":")[3] == 'true') integral: (modelData.split(":")[3] == 'true')
config: Render.getConfig("SurfaceGeometry") config: mainViewTask.getConfig("SurfaceGeometry")
property: modelData.split(":")[1] property: modelData.split(":")[1]
max: modelData.split(":")[2] max: modelData.split(":")[2]
min: 0.0 min: 0.0
@ -42,8 +43,8 @@ Column {
} }
CheckBox { CheckBox {
text: "Half Resolution" text: "Half Resolution"
checked: Render.getConfig("SurfaceGeometry")["resolutionLevel"] checked: mainViewTask.getConfig("SurfaceGeometry")["resolutionLevel"]
onCheckedChanged: { Render.getConfig("SurfaceGeometry")["resolutionLevel"] = checked } onCheckedChanged: { mainViewTask.getConfig("SurfaceGeometry")["resolutionLevel"] = checked }
} }
Repeater { Repeater {
@ -53,7 +54,7 @@ Column {
ConfigSlider { ConfigSlider {
label: qsTr(modelData.split(":")[0]) label: qsTr(modelData.split(":")[0])
integral: false integral: false
config: Render.getConfig(modelData.split(":")[1]) config: mainViewTask.getConfig(modelData.split(":")[1])
property: modelData.split(":")[2] property: modelData.split(":")[2]
max: modelData.split(":")[3] max: modelData.split(":")[3]
min: 0.0 min: 0.0

View file

@ -22,7 +22,8 @@ Item {
spacing: 8 spacing: 8
anchors.fill:parent anchors.fill:parent
property var config: Render.getConfig("Stats") property var mainViewTask: Render.getConfig("RenderMainView");
property var config: mainViewTask.getConfig("Stats")
function evalEvenHeight() { function evalEvenHeight() {
// Why do we have to do that manually ? cannot seem to find a qml / anchor / layout mode that does that ? // Why do we have to do that manually ? cannot seem to find a qml / anchor / layout mode that does that ?