Merge branch 'master' of https://github.com/highfidelity/hifi into 20116_nitpick_v1.2

This commit is contained in:
NissimHadar 2018-12-06 09:52:48 -08:00
commit 0cca388971
125 changed files with 2219 additions and 1367 deletions

View file

@ -656,6 +656,8 @@ void Agent::queryAvatars() {
ViewFrustum view;
view.setPosition(scriptedAvatar->getWorldPosition());
view.setOrientation(scriptedAvatar->getHeadOrientation());
view.setProjection(DEFAULT_FIELD_OF_VIEW_DEGREES, DEFAULT_ASPECT_RATIO,
DEFAULT_NEAR_CLIP, DEFAULT_FAR_CLIP);
view.calculate();
ConicalViewFrustum conicalView { view };
@ -876,18 +878,30 @@ void Agent::aboutToFinish() {
DependencyManager::destroy<AudioInjectorManager>();
// destroy all other created dependencies
DependencyManager::destroy<ScriptCache>();
DependencyManager::destroy<ResourceCacheSharedItems>();
DependencyManager::destroy<SoundCacheScriptingInterface>();
DependencyManager::destroy<SoundCache>();
DependencyManager::destroy<AudioScriptingInterface>();
DependencyManager::destroy<RecordingScriptingInterface>();
DependencyManager::destroy<AnimationCacheScriptingInterface>();
DependencyManager::destroy<EntityScriptingInterface>();
DependencyManager::destroy<ResourceScriptingInterface>();
DependencyManager::destroy<UserActivityLoggerScriptingInterface>();
DependencyManager::destroy<ScriptCache>();
DependencyManager::destroy<SoundCache>();
DependencyManager::destroy<AnimationCache>();
DependencyManager::destroy<recording::Deck>();
DependencyManager::destroy<recording::Recorder>();
DependencyManager::destroy<recording::ClipCache>();
DependencyManager::destroy<AvatarHashMap>();
DependencyManager::destroy<AssignmentParentFinder>();
DependencyManager::destroy<MessagesClient>();
DependencyManager::destroy<ResourceManager>();
DependencyManager::destroy<ResourceCacheSharedItems>();
// drop our shared pointer to the script engine, then ask ScriptEngines to shutdown scripting
// this ensures that the ScriptEngine goes down before ScriptEngines
_scriptEngine.clear();

View file

@ -129,17 +129,12 @@ void AssignmentClient::stopAssignmentClient() {
QThread* currentAssignmentThread = _currentAssignment->thread();
// ask the current assignment to stop
BLOCKING_INVOKE_METHOD(_currentAssignment, "stop");
QMetaObject::invokeMethod(_currentAssignment, "stop");
// ask the current assignment to delete itself on its thread
_currentAssignment->deleteLater();
// when this thread is destroyed we don't need to run our assignment complete method
disconnect(currentAssignmentThread, &QThread::destroyed, this, &AssignmentClient::assignmentCompleted);
// wait on the thread from that assignment - it will be gone once the current assignment deletes
currentAssignmentThread->quit();
currentAssignmentThread->wait();
auto PROCESS_EVENTS_INTERVAL_MS = 100;
while (!currentAssignmentThread->wait(PROCESS_EVENTS_INTERVAL_MS)) {
QCoreApplication::processEvents();
}
}
}

View file

@ -152,6 +152,8 @@ void AvatarMixerClientData::processSetTraitsMessage(ReceivedMessage& message,
if (packetTraitVersion > instanceVersionRef) {
if (traitSize == AvatarTraits::DELETED_TRAIT_SIZE) {
_avatar->processDeletedTraitInstance(traitType, instanceID);
// Mixer doesn't need deleted IDs.
_avatar->getAndClearRecentlyDetachedIDs();
// to track a deleted instance but keep version information
// the avatar mixer uses the negative value of the sent version

View file

@ -21,7 +21,7 @@
#include <GLMHelpers.h>
ScriptableAvatar::ScriptableAvatar() {
_clientTraitsHandler = std::unique_ptr<ClientTraitsHandler>(new ClientTraitsHandler(this));
_clientTraitsHandler.reset(new ClientTraitsHandler(this));
}
QByteArray ScriptableAvatar::toByteArrayStateful(AvatarDataDetail dataDetail, bool dropFaceTracking) {

View file

@ -583,15 +583,29 @@ void EntityScriptServer::handleOctreePacket(QSharedPointer<ReceivedMessage> mess
void EntityScriptServer::aboutToFinish() {
shutdownScriptEngine();
DependencyManager::get<EntityScriptingInterface>()->setEntityTree(nullptr);
DependencyManager::get<ResourceManager>()->cleanup();
DependencyManager::destroy<AudioScriptingInterface>();
DependencyManager::destroy<SoundCacheScriptingInterface>();
DependencyManager::destroy<ResourceScriptingInterface>();
DependencyManager::destroy<EntityScriptingInterface>();
DependencyManager::destroy<SoundCache>();
DependencyManager::destroy<ScriptCache>();
DependencyManager::destroy<ResourceManager>();
DependencyManager::destroy<ResourceCacheSharedItems>();
DependencyManager::destroy<MessagesClient>();
DependencyManager::destroy<AssignmentDynamicFactory>();
DependencyManager::destroy<AssignmentParentFinder>();
DependencyManager::destroy<AvatarHashMap>();
DependencyManager::get<ResourceManager>()->cleanup();
DependencyManager::destroy<PluginManager>();
DependencyManager::destroy<ResourceScriptingInterface>();
DependencyManager::destroy<EntityScriptingInterface>();
// cleanup the AudioInjectorManager (and any still running injectors)
DependencyManager::destroy<AudioInjectorManager>();

View file

@ -207,12 +207,12 @@ Flickable {
width: 112
label: "Y Offset"
suffix: " cm"
minimumValue: -10
minimumValue: -50
maximumValue: 50
realStepSize: 1
realValue: -5
colorScheme: hifi.colorSchemes.dark
onEditingFinished: {
onRealValueChanged: {
sendConfigurationSettings();
openVrConfiguration.forceActiveFocus();
}
@ -223,14 +223,14 @@ Flickable {
id: headZOffset
width: 112
label: "Z Offset"
minimumValue: -10
minimumValue: -50
maximumValue: 50
realStepSize: 1
decimals: 1
suffix: " cm"
realValue: -5
colorScheme: hifi.colorSchemes.dark
onEditingFinished: {
onRealValueChanged: {
sendConfigurationSettings();
openVrConfiguration.forceActiveFocus();
}
@ -319,11 +319,12 @@ Flickable {
width: 112
suffix: " cm"
label: "Y Offset"
minimumValue: -10
minimumValue: -30
maximumValue: 30
realStepSize: 1
colorScheme: hifi.colorSchemes.dark
onEditingFinished: {
onRealValueChanged: {
sendConfigurationSettings();
openVrConfiguration.forceActiveFocus();
}
@ -335,12 +336,13 @@ Flickable {
width: 112
label: "Z Offset"
suffix: " cm"
minimumValue: -10
minimumValue: -30
maximumValue: 30
realStepSize: 1
decimals: 1
colorScheme: hifi.colorSchemes.dark
onEditingFinished: {
onRealValueChanged: {
sendConfigurationSettings();
openVrConfiguration.forceActiveFocus();
}
@ -574,7 +576,7 @@ Flickable {
colorScheme: hifi.colorSchemes.dark
realValue: 33.0
onEditingFinished: {
onRealValueChanged: {
sendConfigurationSettings();
openVrConfiguration.forceActiveFocus();
}
@ -592,7 +594,7 @@ Flickable {
colorScheme: hifi.colorSchemes.dark
realValue: 48
onEditingFinished: {
onRealValueChanged: {
sendConfigurationSettings();
openVrConfiguration.forceActiveFocus();
}
@ -771,7 +773,7 @@ Flickable {
realStepSize: 1.0
colorScheme: hifi.colorSchemes.dark
onEditingFinished: {
onRealValueChanged: {
calibrationTimer.interval = realValue * 1000;
openVrConfiguration.countDown = realValue;
numberAnimation.duration = calibrationTimer.interval;
@ -977,6 +979,13 @@ Flickable {
var configurationType = settings["trackerConfiguration"];
displayTrackerConfiguration(configurationType);
// default offset for user wearing puck on the center of their forehead.
headYOffset.realValue = 4; // (cm), puck is above the head joint.
headZOffset.realValue = 8; // (cm), puck is in front of the head joint.
// defaults for user wearing the pucks on the backs of their palms.
handYOffset.realValue = 8; // (cm), puck is past the the hand joint. (set this to zero if puck is on the wrist)
handZOffset.realValue = -4; // (cm), puck is on above hand joint.
var HmdHead = settings["HMDHead"];
var viveController = settings["handController"];

View file

@ -87,7 +87,6 @@
#include <FramebufferCache.h>
#include <gpu/Batch.h>
#include <gpu/Context.h>
#include <gpu/gl/GLBackend.h>
#include <InfoView.h>
#include <input-plugins/InputPlugin.h>
#include <controllers/UserInputMapper.h>
@ -122,8 +121,6 @@
#include <plugins/SteamClientPlugin.h>
#include <plugins/InputConfiguration.h>
#include <RecordingScriptingInterface.h>
#include <UpdateSceneTask.h>
#include <RenderViewTask.h>
#include <render/EngineStats.h>
#include <SecondaryCamera.h>
#include <ResourceCache.h>
@ -264,54 +261,7 @@ extern "C" {
#include "AndroidHelper.h"
#endif
enum ApplicationEvent {
// Execute a lambda function
Lambda = QEvent::User + 1,
// Trigger the next render
Render,
// Trigger the next idle
Idle,
};
class RenderEventHandler : public QObject {
using Parent = QObject;
Q_OBJECT
public:
RenderEventHandler() {
// Transfer to a new thread
moveToNewNamedThread(this, "RenderThread", [](QThread* renderThread) {
hifi::qt::addBlockingForbiddenThread("Render", renderThread);
qApp->_lastTimeRendered.start();
}, std::bind(&RenderEventHandler::initialize, this), QThread::HighestPriority);
}
private:
void initialize() {
setObjectName("Render");
PROFILE_SET_THREAD_NAME("Render");
setCrashAnnotation("render_thread_id", std::to_string((size_t)QThread::currentThreadId()));
}
void render() {
if (qApp->shouldPaint()) {
qApp->paintGL();
}
}
bool event(QEvent* event) override {
switch ((int)event->type()) {
case ApplicationEvent::Render:
render();
qApp->_pendingRenderEvent.store(false);
return true;
default:
break;
}
return Parent::event(event);
}
};
#include "graphics/RenderEventHandler.h"
Q_LOGGING_CATEGORY(trace_app_input_mouse, "trace.app.input.mouse")
@ -374,8 +324,6 @@ static const int THROTTLED_SIM_FRAME_PERIOD_MS = MSECS_PER_SECOND / THROTTLED_SI
static const int ENTITY_SERVER_ADDED_TIMEOUT = 5000;
static const int ENTITY_SERVER_CONNECTION_TIMEOUT = 5000;
static const uint32_t INVALID_FRAME = UINT32_MAX;
static const float INITIAL_QUERY_RADIUS = 10.0f; // priority radius for entities before physics enabled
static const QString DESKTOP_LOCATION = QStandardPaths::writableLocation(QStandardPaths::DesktopLocation);
@ -2060,7 +2008,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo
auto displayPlugin = qApp->getActiveDisplayPlugin();
properties["render_rate"] = _renderLoopCounter.rate();
properties["render_rate"] = getRenderLoopRate();
properties["target_render_rate"] = getTargetRenderFrameRate();
properties["present_rate"] = displayPlugin->presentRate();
properties["new_frame_present_rate"] = displayPlugin->newFramePresentRate();
@ -2372,7 +2320,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo
DependencyManager::get<Keyboard>()->createKeyboard();
_pendingIdleEvent = false;
_pendingRenderEvent = false;
_graphicsEngine.startup();
qCDebug(interfaceapp) << "Metaverse session ID is" << uuidStringWithoutCurlyBraces(accountManager->getSessionID());
@ -2642,11 +2590,6 @@ void Application::cleanupBeforeQuit() {
// Cleanup all overlays after the scripts, as scripts might add more
_overlays.cleanupAllOverlays();
// The cleanup process enqueues the transactions but does not process them. Calling this here will force the actual
// removal of the items.
// See https://highfidelity.fogbugz.com/f/cases/5328
_main3DScene->enqueueFrame(); // flush all the transactions
_main3DScene->processTransactionQueue(); // process and apply deletions
// first stop all timers directly or by invokeMethod
// depending on what thread they run in
@ -2661,7 +2604,6 @@ void Application::cleanupBeforeQuit() {
}
_window->saveGeometry();
_gpuContext->shutdown();
// Destroy third party processes after scripts have finished using them.
#ifdef HAVE_DDE
@ -2719,10 +2661,9 @@ Application::~Application() {
_shapeManager.collectGarbage();
assert(_shapeManager.getNumShapes() == 0);
// shutdown render engine
_main3DScene = nullptr;
_renderEngine = nullptr;
// shutdown graphics engine
_graphicsEngine.shutdown();
_gameWorkload.shutdown();
DependencyManager::destroy<Preferences>();
@ -2780,8 +2721,6 @@ Application::~Application() {
// Can't log to file past this point, FileLogger about to be deleted
qInstallMessageHandler(LogHandler::verboseMessageHandler);
_renderEventHandler->deleteLater();
}
void Application::initializeGL() {
@ -2871,26 +2810,13 @@ void Application::initializeGL() {
#endif
_renderEventHandler = new RenderEventHandler();
// Build an offscreen GL context for the main thread.
_glWidget->makeCurrent();
glClearColor(0.2f, 0.2f, 0.2f, 1);
glClear(GL_COLOR_BUFFER_BIT);
_glWidget->swapBuffers();
// Create the GPU backend
// Requires the window context, because that's what's used in the actual rendering
// and the GPU backend will make things like the VAO which cannot be shared across
// contexts
_glWidget->makeCurrent();
gpu::Context::init<gpu::gl::GLBackend>();
_glWidget->makeCurrent();
_gpuContext = std::make_shared<gpu::Context>();
DependencyManager::get<TextureCache>()->setGPUContext(_gpuContext);
_graphicsEngine.initializeGPU(_glWidget);
}
static const QString SPLASH_SKYBOX{ "{\"ProceduralEntity\":{ \"version\":2, \"shaderUrl\":\"qrc:///shaders/splashSkybox.frag\" } }" };
@ -2904,7 +2830,7 @@ void Application::initializeDisplayPlugins() {
// Once time initialization code
DisplayPluginPointer targetDisplayPlugin;
foreach(auto displayPlugin, displayPlugins) {
displayPlugin->setContext(_gpuContext);
displayPlugin->setContext(_graphicsEngine.getGPUContext());
if (displayPlugin->getName() == lastActiveDisplayPluginName) {
targetDisplayPlugin = displayPlugin;
}
@ -2974,18 +2900,7 @@ void Application::initializeDisplayPlugins() {
void Application::initializeRenderEngine() {
// FIXME: on low end systems os the shaders take up to 1 minute to compile, so we pause the deadlock watchdog thread.
DeadlockWatchdogThread::withPause([&] {
// Set up the render engine
render::CullFunctor cullFunctor = LODManager::shouldRender;
_renderEngine->addJob<UpdateSceneTask>("UpdateScene");
#ifndef Q_OS_ANDROID
_renderEngine->addJob<SecondaryCameraRenderTask>("SecondaryCameraJob", cullFunctor, !DISABLE_DEFERRED);
#endif
_renderEngine->addJob<RenderViewTask>("RenderMainView", cullFunctor, !DISABLE_DEFERRED, render::ItemKey::TAG_BITS_0, render::ItemKey::TAG_BITS_0);
_renderEngine->load();
_renderEngine->registerScene(_main3DScene);
// Now that OpenGL is initialized, we are sure we have a valid context and can create the various pipeline shaders with success.
DependencyManager::get<GeometryCache>()->initializeShapePipelines();
_graphicsEngine.initializeRender(DISABLE_DEFERRED);
DependencyManager::get<Keyboard>()->registerKeyboardHighlighting();
});
}
@ -3186,7 +3101,7 @@ void Application::onDesktopRootContextCreated(QQmlContext* surfaceContext) {
surfaceContext->setContextProperty("Recording", DependencyManager::get<RecordingScriptingInterface>().data());
surfaceContext->setContextProperty("Preferences", DependencyManager::get<Preferences>().data());
surfaceContext->setContextProperty("AddressManager", DependencyManager::get<AddressManager>().data());
surfaceContext->setContextProperty("FrameTimings", &_frameTimingsScriptingInterface);
surfaceContext->setContextProperty("FrameTimings", &_graphicsEngine._frameTimingsScriptingInterface);
surfaceContext->setContextProperty("Rates", new RatesScriptingInterface(this));
surfaceContext->setContextProperty("TREE_SCALE", TREE_SCALE);
@ -3235,7 +3150,7 @@ void Application::onDesktopRootContextCreated(QQmlContext* surfaceContext) {
surfaceContext->setContextProperty("LODManager", DependencyManager::get<LODManager>().data());
surfaceContext->setContextProperty("HMD", DependencyManager::get<HMDScriptingInterface>().data());
surfaceContext->setContextProperty("Scene", DependencyManager::get<SceneScriptingInterface>().data());
surfaceContext->setContextProperty("Render", _renderEngine->getConfiguration().get());
surfaceContext->setContextProperty("Render", _graphicsEngine.getRenderEngine()->getConfiguration().get());
surfaceContext->setContextProperty("Workload", _gameWorkload._engine->getConfiguration().get());
surfaceContext->setContextProperty("Reticle", getApplicationCompositor().getReticleInterface());
surfaceContext->setContextProperty("Snapshot", DependencyManager::get<Snapshot>().data());
@ -3523,7 +3438,7 @@ void Application::resizeGL() {
auto renderResolutionScale = getRenderResolutionScale();
if (displayPlugin->getRenderResolutionScale() != renderResolutionScale) {
auto renderConfig = _renderEngine->getConfiguration();
auto renderConfig = _graphicsEngine.getRenderEngine()->getConfiguration();
assert(renderConfig);
auto mainView = renderConfig->getConfig("RenderMainView.RenderDeferredTask");
assert(mainView);
@ -3754,8 +3669,8 @@ void Application::onPresent(quint32 frameCount) {
postEvent(this, new QEvent((QEvent::Type)ApplicationEvent::Idle), Qt::HighEventPriority);
}
expected = false;
if (_renderEventHandler && !isAboutToQuit() && _pendingRenderEvent.compare_exchange_strong(expected, true)) {
postEvent(_renderEventHandler, new QEvent((QEvent::Type)ApplicationEvent::Render));
if (_graphicsEngine.checkPendingRenderEvent() && !isAboutToQuit()) {
postEvent(_graphicsEngine._renderEventHandler, new QEvent((QEvent::Type)ApplicationEvent::Render));
}
}
@ -4543,39 +4458,6 @@ bool Application::acceptSnapshot(const QString& urlString) {
return true;
}
static uint32_t _renderedFrameIndex { INVALID_FRAME };
bool Application::shouldPaint() const {
if (_aboutToQuit || _window->isMinimized()) {
return false;
}
auto displayPlugin = getActiveDisplayPlugin();
#ifdef DEBUG_PAINT_DELAY
static uint64_t paintDelaySamples{ 0 };
static uint64_t paintDelayUsecs{ 0 };
paintDelayUsecs += displayPlugin->getPaintDelayUsecs();
static const int PAINT_DELAY_THROTTLE = 1000;
if (++paintDelaySamples % PAINT_DELAY_THROTTLE == 0) {
qCDebug(interfaceapp).nospace() <<
"Paint delay (" << paintDelaySamples << " samples): " <<
(float)paintDelaySamples / paintDelayUsecs << "us";
}
#endif
// Throttle if requested
if (displayPlugin->isThrottled() && (_lastTimeRendered.elapsed() < THROTTLED_SIM_FRAME_PERIOD_MS)) {
return false;
}
// Sync up the _renderedFrameIndex
_renderedFrameIndex = displayPlugin->presentCount();
return true;
}
#ifdef Q_OS_WIN
#include <Windows.h>
#include <TCHAR.h>
@ -4829,26 +4711,13 @@ void Application::idle() {
if (displayPlugin) {
PROFILE_COUNTER_IF_CHANGED(app, "present", float, displayPlugin->presentRate());
}
PROFILE_COUNTER_IF_CHANGED(app, "renderLoopRate", float, _renderLoopCounter.rate());
PROFILE_COUNTER_IF_CHANGED(app, "currentDownloads", uint32_t, ResourceCache::getLoadingRequestCount());
PROFILE_COUNTER_IF_CHANGED(app, "renderLoopRate", float, getRenderLoopRate());
PROFILE_COUNTER_IF_CHANGED(app, "currentDownloads", uint32_t, ResourceCache::getLoadingRequests().length());
PROFILE_COUNTER_IF_CHANGED(app, "pendingDownloads", uint32_t, ResourceCache::getPendingRequestCount());
PROFILE_COUNTER_IF_CHANGED(app, "currentProcessing", int, DependencyManager::get<StatTracker>()->getStat("Processing").toInt());
PROFILE_COUNTER_IF_CHANGED(app, "pendingProcessing", int, DependencyManager::get<StatTracker>()->getStat("PendingProcessing").toInt());
auto renderConfig = _renderEngine->getConfiguration();
PROFILE_COUNTER_IF_CHANGED(render, "gpuTime", float, (float)_gpuContext->getFrameTimerGPUAverage());
auto opaqueRangeTimer = renderConfig->getConfig("OpaqueRangeTimer");
auto linearDepth = renderConfig->getConfig("LinearDepth");
auto surfaceGeometry = renderConfig->getConfig("SurfaceGeometry");
auto renderDeferred = renderConfig->getConfig("RenderDeferred");
auto toneAndPostRangeTimer = renderConfig->getConfig("ToneAndPostRangeTimer");
PROFILE_COUNTER(render_detail, "gpuTimes", {
{ "OpaqueRangeTimer", opaqueRangeTimer ? opaqueRangeTimer->property("gpuRunTime") : 0 },
{ "LinearDepth", linearDepth ? linearDepth->property("gpuRunTime") : 0 },
{ "SurfaceGeometry", surfaceGeometry ? surfaceGeometry->property("gpuRunTime") : 0 },
{ "RenderDeferred", renderDeferred ? renderDeferred->property("gpuRunTime") : 0 },
{ "ToneAndPostRangeTimer", toneAndPostRangeTimer ? toneAndPostRangeTimer->property("gpuRunTime") : 0 }
});
auto renderConfig = _graphicsEngine.getRenderEngine()->getConfiguration();
PROFILE_COUNTER_IF_CHANGED(render, "gpuTime", float, (float)_graphicsEngine.getGPUContext()->getFrameTimerGPUAverage());
PROFILE_RANGE(app, __FUNCTION__);
@ -5223,9 +5092,6 @@ void Application::init() {
#if !defined(DISABLE_QML)
DependencyManager::get<DialogsManager>()->toggleLoginDialog();
#endif
if (!DISABLE_DEFERRED) {
DependencyManager::get<DeferredLightingEffect>()->init();
}
DependencyManager::get<AvatarManager>()->init();
_timerStart.start();
@ -5294,7 +5160,7 @@ void Application::init() {
}
}, Qt::QueuedConnection);
_gameWorkload.startup(getEntities()->getWorkloadSpace(), _main3DScene, _entitySimulation);
_gameWorkload.startup(getEntities()->getWorkloadSpace(), _graphicsEngine.getRenderScene(), _entitySimulation);
_entitySimulation->setWorkloadSpace(getEntities()->getWorkloadSpace());
}
@ -5328,7 +5194,7 @@ void Application::updateLOD(float deltaTime) const {
// adjust it unless we were asked to disable this feature, or if we're currently in throttleRendering mode
if (!isThrottleRendering()) {
float presentTime = getActiveDisplayPlugin()->getAveragePresentTime();
float engineRunTime = (float)(_renderEngine->getConfiguration().get()->getCPURunTime());
float engineRunTime = (float)(_graphicsEngine.getRenderEngine()->getConfiguration().get()->getCPURunTime());
float gpuTime = getGPUContext()->getFrameTimerGPUAverage();
float batchTime = getGPUContext()->getFrameTimerBatchAverage();
auto lodManager = DependencyManager::get<LODManager>();
@ -5729,7 +5595,7 @@ void Application::updateSecondaryCameraViewFrustum() {
// camera should be.
// Code based on SecondaryCameraJob
auto renderConfig = _renderEngine->getConfiguration();
auto renderConfig = _graphicsEngine.getRenderEngine()->getConfiguration();
assert(renderConfig);
auto camera = dynamic_cast<SecondaryCameraJobConfig*>(renderConfig->getConfig("SecondaryCamera"));
@ -5798,7 +5664,7 @@ void Application::updateSecondaryCameraViewFrustum() {
static bool domainLoadingInProgress = false;
void Application::update(float deltaTime) {
PROFILE_RANGE_EX(app, __FUNCTION__, 0xffff0000, (uint64_t)_renderFrameCount + 1);
PROFILE_RANGE_EX(app, __FUNCTION__, 0xffff0000, (uint64_t)_graphicsEngine._renderFrameCount + 1);
if (_aboutToQuit) {
return;
@ -6214,7 +6080,7 @@ void Application::update(float deltaTime) {
// TODO: Fix this by modeling the way the secondary camera works on how the main camera works
// ie. Use a camera object stored in the game logic and informs the Engine on where the secondary
// camera should be.
updateSecondaryCameraViewFrustum();
// updateSecondaryCameraViewFrustum();
}
quint64 now = usecTimestampNow();
@ -6290,13 +6156,6 @@ void Application::update(float deltaTime) {
updateRenderArgs(deltaTime);
// HACK
// load the view frustum
// FIXME: This preDisplayRender call is temporary until we create a separate render::scene for the mirror rendering.
// Then we can move this logic into the Avatar::simulate call.
myAvatar->preDisplaySide(&_appRenderArgs._renderArgs);
{
PerformanceTimer perfTimer("AnimDebugDraw");
AnimDebugDraw::getInstance().update();
@ -6315,7 +6174,7 @@ void Application::update(float deltaTime) {
}
void Application::updateRenderArgs(float deltaTime) {
editRenderArgs([this, deltaTime](AppRenderArgs& appRenderArgs) {
_graphicsEngine.editRenderArgs([this, deltaTime](AppRenderArgs& appRenderArgs) {
PerformanceTimer perfTimer("editRenderArgs");
appRenderArgs._headPose = getHMDSensorPose();
@ -6344,7 +6203,7 @@ void Application::updateRenderArgs(float deltaTime) {
_viewFrustum.setProjection(adjustedProjection);
_viewFrustum.calculate();
}
appRenderArgs._renderArgs = RenderArgs(_gpuContext, lodManager->getOctreeSizeScale(),
appRenderArgs._renderArgs = RenderArgs(_graphicsEngine.getGPUContext(), lodManager->getOctreeSizeScale(),
lodManager->getBoundaryLevelAdjust(), lodManager->getLODAngleHalfTan(), RenderArgs::DEFAULT_RENDER_MODE,
RenderArgs::MONO, RenderArgs::RENDER_DEBUG_NONE);
appRenderArgs._renderArgs._scene = getMain3DScene();
@ -6429,6 +6288,13 @@ void Application::updateRenderArgs(float deltaTime) {
QMutexLocker viewLocker(&_viewMutex);
appRenderArgs._renderArgs.setViewFrustum(_displayViewFrustum);
}
// HACK
// load the view frustum
// FIXME: This preDisplayRender call is temporary until we create a separate render::scene for the mirror rendering.
// Then we can move this logic into the Avatar::simulate call.
myAvatar->preDisplaySide(&appRenderArgs._renderArgs);
});
}
@ -6638,11 +6504,16 @@ void Application::resetSensors(bool andReload) {
}
void Application::hmdVisibleChanged(bool visible) {
// TODO
// calling start and stop will change audio input and ouput to default audio devices.
// we need to add a pause/unpause functionality to AudioClient for this to work properly
#if 0
if (visible) {
QMetaObject::invokeMethod(DependencyManager::get<AudioClient>().data(), "start", Qt::QueuedConnection);
} else {
QMetaObject::invokeMethod(DependencyManager::get<AudioClient>().data(), "stop", Qt::QueuedConnection);
}
#endif
}
void Application::updateWindowTitle() const {
@ -7033,7 +6904,7 @@ void Application::registerScriptEngineWithApplicationServices(ScriptEnginePointe
scriptEngine->registerFunction("HMD", "getHUDLookAtPosition3D", HMDScriptingInterface::getHUDLookAtPosition3D, 0);
scriptEngine->registerGlobalObject("Scene", DependencyManager::get<SceneScriptingInterface>().data());
scriptEngine->registerGlobalObject("Render", _renderEngine->getConfiguration().get());
scriptEngine->registerGlobalObject("Render", _graphicsEngine.getRenderEngine()->getConfiguration().get());
scriptEngine->registerGlobalObject("Workload", _gameWorkload._engine->getConfiguration().get());
GraphicsScriptingInterface::registerMetaTypes(scriptEngine.data());

View file

@ -70,11 +70,11 @@
#include "ui/overlays/Overlays.h"
#include "workload/GameWorkload.h"
#include "graphics/GraphicsEngine.h"
#include <procedural/ProceduralSkybox.h>
#include <graphics/Skybox.h>
#include <ModelScriptingInterface.h>
#include "FrameTimingsScriptingInterface.h"
#include "Sound.h"
@ -153,7 +153,6 @@ public:
void updateSecondaryCameraViewFrustum();
void updateCamera(RenderArgs& renderArgs, float deltaTime);
void paintGL();
void resizeGL();
bool event(QEvent* event) override;
@ -203,8 +202,8 @@ public:
Overlays& getOverlays() { return _overlays; }
size_t getRenderFrameCount() const { return _renderFrameCount; }
float getRenderLoopRate() const { return _renderLoopCounter.rate(); }
size_t getRenderFrameCount() const { return _graphicsEngine.getRenderFrameCount(); }
float getRenderLoopRate() const { return _graphicsEngine.getRenderLoopRate(); }
float getNumCollisionObjects() const;
float getTargetRenderFrameRate() const; // frames/second
@ -275,10 +274,10 @@ public:
void setMaxOctreePacketsPerSecond(int maxOctreePPS);
int getMaxOctreePacketsPerSecond() const;
render::ScenePointer getMain3DScene() override { return _main3DScene; }
const render::ScenePointer& getMain3DScene() const { return _main3DScene; }
render::EnginePointer getRenderEngine() override { return _renderEngine; }
gpu::ContextPointer getGPUContext() const { return _gpuContext; }
render::ScenePointer getMain3DScene() override { return _graphicsEngine.getRenderScene(); }
render::EnginePointer getRenderEngine() override { return _graphicsEngine.getRenderEngine(); }
gpu::ContextPointer getGPUContext() const { return _graphicsEngine.getGPUContext(); }
const GameWorkload& getGameWorkload() const { return _gameWorkload; }
@ -515,7 +514,6 @@ private:
bool handleFileOpenEvent(QFileOpenEvent* event);
void cleanupBeforeQuit();
bool shouldPaint() const;
void idle();
void update(float deltaTime);
@ -535,8 +533,6 @@ private:
void initializeAcceptedFiles();
void runRenderFrame(RenderArgs* renderArgs/*, Camera& whichCamera, bool selfAvatarOnly = false*/);
bool importJSONFromURL(const QString& urlString);
bool importSVOFromURL(const QString& urlString);
bool importFromZIP(const QString& filePath);
@ -586,18 +582,12 @@ private:
bool _activatingDisplayPlugin { false };
uint32_t _renderFrameCount { 0 };
// Frame Rate Measurement
RateCounter<500> _renderLoopCounter;
RateCounter<500> _gameLoopCounter;
FrameTimingsScriptingInterface _frameTimingsScriptingInterface;
QTimer _minimizedWindowTimer;
QElapsedTimer _timerStart;
QElapsedTimer _lastTimeUpdated;
QElapsedTimer _lastTimeRendered;
int _minimumGPUTextureMemSizeStabilityCount { 30 };
@ -683,29 +673,9 @@ private:
quint64 _lastFaceTrackerUpdate;
render::ScenePointer _main3DScene{ new render::Scene(glm::vec3(-0.5f * (float)TREE_SCALE), (float)TREE_SCALE) };
render::EnginePointer _renderEngine{ new render::RenderEngine() };
gpu::ContextPointer _gpuContext; // initialized during window creation
GameWorkload _gameWorkload;
mutable QMutex _renderArgsMutex{ QMutex::Recursive };
struct AppRenderArgs {
render::Args _renderArgs;
glm::mat4 _eyeToWorld;
glm::mat4 _view;
glm::mat4 _eyeOffsets[2];
glm::mat4 _eyeProjections[2];
glm::mat4 _headPose;
glm::mat4 _sensorToWorld;
float _sensorToWorldScale { 1.0f };
bool _isStereo{ false };
};
AppRenderArgs _appRenderArgs;
using RenderArgsEditor = std::function <void (AppRenderArgs&)>;
void editRenderArgs(RenderArgsEditor editor);
GraphicsEngine _graphicsEngine;
void updateRenderArgs(float deltaTime);
@ -751,8 +721,6 @@ private:
bool _keyboardDeviceHasFocus { true };
QString _returnFromFullScreenMirrorTo;
ConnectionMonitor _connectionMonitor;
QTimer _addAssetToWorldResizeTimer;
@ -786,12 +754,8 @@ private:
QUrl _avatarOverrideUrl;
bool _saveAvatarOverrideUrl { false };
QObject* _renderEventHandler{ nullptr };
friend class RenderEventHandler;
std::atomic<bool> _pendingIdleEvent { true };
std::atomic<bool> _pendingRenderEvent { true };
bool quitWhenFinished { false };

View file

@ -19,220 +19,207 @@
#include "Util.h"
// Statically provided display and input plugins
extern DisplayPluginList getDisplayPlugins();
void Application::editRenderArgs(RenderArgsEditor editor) {
QMutexLocker renderLocker(&_renderArgsMutex);
editor(_appRenderArgs);
}
void Application::paintGL() {
// Some plugins process message events, allowing paintGL to be called reentrantly.
_renderFrameCount++;
_lastTimeRendered.start();
auto lastPaintBegin = usecTimestampNow();
PROFILE_RANGE_EX(render, __FUNCTION__, 0xff0000ff, (uint64_t)_renderFrameCount);
PerformanceTimer perfTimer("paintGL");
if (nullptr == _displayPlugin) {
return;
}
DisplayPluginPointer displayPlugin;
{
PROFILE_RANGE(render, "/getActiveDisplayPlugin");
displayPlugin = getActiveDisplayPlugin();
}
{
PROFILE_RANGE(render, "/pluginBeginFrameRender");
// If a display plugin loses it's underlying support, it
// needs to be able to signal us to not use it
if (!displayPlugin->beginFrameRender(_renderFrameCount)) {
QMetaObject::invokeMethod(this, "updateDisplayMode");
return;
}
}
RenderArgs renderArgs;
glm::mat4 HMDSensorPose;
glm::mat4 eyeToWorld;
glm::mat4 sensorToWorld;
bool isStereo;
glm::mat4 stereoEyeOffsets[2];
glm::mat4 stereoEyeProjections[2];
{
QMutexLocker viewLocker(&_renderArgsMutex);
renderArgs = _appRenderArgs._renderArgs;
// don't render if there is no context.
if (!_appRenderArgs._renderArgs._context) {
return;
}
HMDSensorPose = _appRenderArgs._headPose;
eyeToWorld = _appRenderArgs._eyeToWorld;
sensorToWorld = _appRenderArgs._sensorToWorld;
isStereo = _appRenderArgs._isStereo;
for_each_eye([&](Eye eye) {
stereoEyeOffsets[eye] = _appRenderArgs._eyeOffsets[eye];
stereoEyeProjections[eye] = _appRenderArgs._eyeProjections[eye];
});
}
{
PROFILE_RANGE(render, "/gpuContextReset");
_gpuContext->beginFrame(_appRenderArgs._view, HMDSensorPose);
// Reset the gpu::Context Stages
// Back to the default framebuffer;
gpu::doInBatch("Application_render::gpuContextReset", _gpuContext, [&](gpu::Batch& batch) {
batch.resetStages();
});
}
{
PROFILE_RANGE(render, "/renderOverlay");
PerformanceTimer perfTimer("renderOverlay");
// NOTE: There is no batch associated with this renderArgs
// the ApplicationOverlay class assumes it's viewport is setup to be the device size
renderArgs._viewport = glm::ivec4(0, 0, getDeviceSize());
_applicationOverlay.renderOverlay(&renderArgs);
}
{
PROFILE_RANGE(render, "/updateCompositor");
getApplicationCompositor().setFrameInfo(_renderFrameCount, eyeToWorld, sensorToWorld);
}
gpu::FramebufferPointer finalFramebuffer;
QSize finalFramebufferSize;
{
PROFILE_RANGE(render, "/getOutputFramebuffer");
// Primary rendering pass
auto framebufferCache = DependencyManager::get<FramebufferCache>();
finalFramebufferSize = framebufferCache->getFrameBufferSize();
// Final framebuffer that will be handed to the display-plugin
finalFramebuffer = framebufferCache->getFramebuffer();
}
{
if (isStereo) {
renderArgs._context->enableStereo(true);
renderArgs._context->setStereoProjections(stereoEyeProjections);
renderArgs._context->setStereoViews(stereoEyeOffsets);
}
renderArgs._hudOperator = displayPlugin->getHUDOperator();
renderArgs._hudTexture = _applicationOverlay.getOverlayTexture();
renderArgs._blitFramebuffer = finalFramebuffer;
runRenderFrame(&renderArgs);
}
auto frame = _gpuContext->endFrame();
frame->frameIndex = _renderFrameCount;
frame->framebuffer = finalFramebuffer;
frame->framebufferRecycler = [](const gpu::FramebufferPointer& framebuffer) {
auto frameBufferCache = DependencyManager::get<FramebufferCache>();
if (frameBufferCache) {
frameBufferCache->releaseFramebuffer(framebuffer);
}
};
// deliver final scene rendering commands to the display plugin
{
PROFILE_RANGE(render, "/pluginOutput");
PerformanceTimer perfTimer("pluginOutput");
_renderLoopCounter.increment();
displayPlugin->submitFrame(frame);
}
// Reset the framebuffer and stereo state
renderArgs._blitFramebuffer.reset();
renderArgs._context->enableStereo(false);
#if !defined(DISABLE_QML)
{
auto stats = Stats::getInstance();
if (stats) {
stats->setRenderDetails(renderArgs._details);
}
}
#endif
uint64_t lastPaintDuration = usecTimestampNow() - lastPaintBegin;
_frameTimingsScriptingInterface.addValue(lastPaintDuration);
}
//void Application::paintGL() {
// // Some plugins process message events, allowing paintGL to be called reentrantly.
//
// _renderFrameCount++;
// // SG: Moved into the RenderEventHandler
// //_lastTimeRendered.start();
//
// auto lastPaintBegin = usecTimestampNow();
// PROFILE_RANGE_EX(render, __FUNCTION__, 0xff0000ff, (uint64_t)_renderFrameCount);
// PerformanceTimer perfTimer("paintGL");
//
// if (nullptr == _displayPlugin) {
// return;
// }
//
// DisplayPluginPointer displayPlugin;
// {
// PROFILE_RANGE(render, "/getActiveDisplayPlugin");
// displayPlugin = getActiveDisplayPlugin();
// }
//
// {
// PROFILE_RANGE(render, "/pluginBeginFrameRender");
// // If a display plugin loses it's underlying support, it
// // needs to be able to signal us to not use it
// if (!displayPlugin->beginFrameRender(_renderFrameCount)) {
// QMetaObject::invokeMethod(this, "updateDisplayMode");
// return;
// }
// }
//
// RenderArgs renderArgs;
// glm::mat4 HMDSensorPose;
// glm::mat4 eyeToWorld;
// glm::mat4 sensorToWorld;
//
// bool isStereo;
// glm::mat4 stereoEyeOffsets[2];
// glm::mat4 stereoEyeProjections[2];
//
// {
// QMutexLocker viewLocker(&_renderArgsMutex);
// renderArgs = _appRenderArgs._renderArgs;
//
// // don't render if there is no context.
// if (!_appRenderArgs._renderArgs._context) {
// return;
// }
//
// HMDSensorPose = _appRenderArgs._headPose;
// eyeToWorld = _appRenderArgs._eyeToWorld;
// sensorToWorld = _appRenderArgs._sensorToWorld;
// isStereo = _appRenderArgs._isStereo;
// for_each_eye([&](Eye eye) {
// stereoEyeOffsets[eye] = _appRenderArgs._eyeOffsets[eye];
// stereoEyeProjections[eye] = _appRenderArgs._eyeProjections[eye];
// });
// }
//
// {
// PROFILE_RANGE(render, "/gpuContextReset");
// _graphicsEngine.getGPUContext()->beginFrame(_appRenderArgs._view, HMDSensorPose);
// // Reset the gpu::Context Stages
// // Back to the default framebuffer;
// gpu::doInBatch("Application_render::gpuContextReset", _graphicsEngine.getGPUContext(), [&](gpu::Batch& batch) {
// batch.resetStages();
// });
// }
//
//
// {
// PROFILE_RANGE(render, "/renderOverlay");
// PerformanceTimer perfTimer("renderOverlay");
// // NOTE: There is no batch associated with this renderArgs
// // the ApplicationOverlay class assumes it's viewport is setup to be the device size
// renderArgs._viewport = glm::ivec4(0, 0, getDeviceSize() * getRenderResolutionScale());
// _applicationOverlay.renderOverlay(&renderArgs);
// }
//
// {
// PROFILE_RANGE(render, "/updateCompositor");
// getApplicationCompositor().setFrameInfo(_renderFrameCount, eyeToWorld, sensorToWorld);
// }
//
// gpu::FramebufferPointer finalFramebuffer;
// QSize finalFramebufferSize;
// {
// PROFILE_RANGE(render, "/getOutputFramebuffer");
// // Primary rendering pass
// auto framebufferCache = DependencyManager::get<FramebufferCache>();
// finalFramebufferSize = framebufferCache->getFrameBufferSize();
// // Final framebuffer that will be handled to the display-plugin
// finalFramebuffer = framebufferCache->getFramebuffer();
// }
//
// {
// if (isStereo) {
// renderArgs._context->enableStereo(true);
// renderArgs._context->setStereoProjections(stereoEyeProjections);
// renderArgs._context->setStereoViews(stereoEyeOffsets);
// }
//
// renderArgs._hudOperator = displayPlugin->getHUDOperator();
// renderArgs._hudTexture = _applicationOverlay.getOverlayTexture();
// renderArgs._blitFramebuffer = finalFramebuffer;
// _graphicsEngine.render_runRenderFrame(&renderArgs);
// }
//
// auto frame = _graphicsEngine.getGPUContext()->endFrame();
// frame->frameIndex = _renderFrameCount;
// frame->framebuffer = finalFramebuffer;
// frame->framebufferRecycler = [](const gpu::FramebufferPointer& framebuffer) {
// auto frameBufferCache = DependencyManager::get<FramebufferCache>();
// if (frameBufferCache) {
// frameBufferCache->releaseFramebuffer(framebuffer);
// }
// };
// // deliver final scene rendering commands to the display plugin
// {
// PROFILE_RANGE(render, "/pluginOutput");
// PerformanceTimer perfTimer("pluginOutput");
// _renderLoopCounter.increment();
// displayPlugin->submitFrame(frame);
// }
//
// // Reset the framebuffer and stereo state
// renderArgs._blitFramebuffer.reset();
// renderArgs._context->enableStereo(false);
//
// {
// Stats::getInstance()->setRenderDetails(renderArgs._details);
// }
//
// uint64_t lastPaintDuration = usecTimestampNow() - lastPaintBegin;
// _frameTimingsScriptingInterface.addValue(lastPaintDuration);
//}
// WorldBox Render Data & rendering functions
class WorldBoxRenderData {
public:
typedef render::Payload<WorldBoxRenderData> Payload;
typedef Payload::DataPointer Pointer;
int _val = 0;
static render::ItemID _item; // unique WorldBoxRenderData
};
render::ItemID WorldBoxRenderData::_item{ render::Item::INVALID_ITEM_ID };
namespace render {
template <> const ItemKey payloadGetKey(const WorldBoxRenderData::Pointer& stuff) { return ItemKey::Builder::opaqueShape().withTagBits(ItemKey::TAG_BITS_0 | ItemKey::TAG_BITS_1); }
template <> const Item::Bound payloadGetBound(const WorldBoxRenderData::Pointer& stuff) { return Item::Bound(); }
template <> void payloadRender(const WorldBoxRenderData::Pointer& stuff, RenderArgs* args) {
if (Menu::getInstance()->isOptionChecked(MenuOption::WorldAxes)) {
PerformanceTimer perfTimer("worldBox");
auto& batch = *args->_batch;
DependencyManager::get<GeometryCache>()->bindSimpleProgram(batch);
renderWorldBox(args, batch);
}
}
}
void Application::runRenderFrame(RenderArgs* renderArgs) {
PROFILE_RANGE(render, __FUNCTION__);
PerformanceTimer perfTimer("display");
PerformanceWarning warn(Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings), "Application::runRenderFrame()");
// The pending changes collecting the changes here
render::Transaction transaction;
if (DependencyManager::get<SceneScriptingInterface>()->shouldRenderEntities()) {
// render models...
PerformanceTimer perfTimer("entities");
PerformanceWarning warn(Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings),
"Application::runRenderFrame() ... entities...");
RenderArgs::DebugFlags renderDebugFlags = RenderArgs::RENDER_DEBUG_NONE;
renderArgs->_debugFlags = renderDebugFlags;
}
// Make sure the WorldBox is in the scene
// For the record, this one RenderItem is the first one we created and added to the scene.
// We could move that code elsewhere but you know...
if (!render::Item::isValidID(WorldBoxRenderData::_item)) {
auto worldBoxRenderData = std::make_shared<WorldBoxRenderData>();
auto worldBoxRenderPayload = std::make_shared<WorldBoxRenderData::Payload>(worldBoxRenderData);
WorldBoxRenderData::_item = _main3DScene->allocateID();
transaction.resetItem(WorldBoxRenderData::_item, worldBoxRenderPayload);
_main3DScene->enqueueTransaction(transaction);
}
{
PerformanceTimer perfTimer("EngineRun");
_renderEngine->getRenderContext()->args = renderArgs;
_renderEngine->run();
}
}
//
//class WorldBoxRenderData {
//public:
// typedef render::Payload<WorldBoxRenderData> Payload;
// typedef Payload::DataPointer Pointer;
//
// int _val = 0;
// static render::ItemID _item; // unique WorldBoxRenderData
//};
//
//render::ItemID WorldBoxRenderData::_item{ render::Item::INVALID_ITEM_ID };
//
//namespace render {
// template <> const ItemKey payloadGetKey(const WorldBoxRenderData::Pointer& stuff) { return ItemKey::Builder::opaqueShape().withTagBits(ItemKey::TAG_BITS_0 | ItemKey::TAG_BITS_1); }
// template <> const Item::Bound payloadGetBound(const WorldBoxRenderData::Pointer& stuff) { return Item::Bound(); }
// template <> void payloadRender(const WorldBoxRenderData::Pointer& stuff, RenderArgs* args) {
// if (Menu::getInstance()->isOptionChecked(MenuOption::WorldAxes)) {
// PerformanceTimer perfTimer("worldBox");
//
// auto& batch = *args->_batch;
// DependencyManager::get<GeometryCache>()->bindSimpleProgram(batch);
// renderWorldBox(args, batch);
// }
// }
//}
//
//void Application::runRenderFrame(RenderArgs* renderArgs) {
// PROFILE_RANGE(render, __FUNCTION__);
// PerformanceTimer perfTimer("display");
// PerformanceWarning warn(Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings), "Application::runRenderFrame()");
//
// // The pending changes collecting the changes here
// render::Transaction transaction;
//
// if (DependencyManager::get<SceneScriptingInterface>()->shouldRenderEntities()) {
// // render models...
// PerformanceTimer perfTimer("entities");
// PerformanceWarning warn(Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings),
// "Application::runRenderFrame() ... entities...");
//
// RenderArgs::DebugFlags renderDebugFlags = RenderArgs::RENDER_DEBUG_NONE;
//
// renderArgs->_debugFlags = renderDebugFlags;
// }
//
// // Make sure the WorldBox is in the scene
// // For the record, this one RenderItem is the first one we created and added to the scene.
// // We could move that code elsewhere but you know...
// if (!render::Item::isValidID(WorldBoxRenderData::_item)) {
// auto worldBoxRenderData = std::make_shared<WorldBoxRenderData>();
// auto worldBoxRenderPayload = std::make_shared<WorldBoxRenderData::Payload>(worldBoxRenderData);
//
// WorldBoxRenderData::_item = _main3DScene->allocateID();
//
// transaction.resetItem(WorldBoxRenderData::_item, worldBoxRenderPayload);
// _main3DScene->enqueueTransaction(transaction);
// }
//
// {
// PerformanceTimer perfTimer("EngineRun");
// _renderEngine->getRenderContext()->args = renderArgs;
// _renderEngine->run();
// }
//}

View file

@ -117,7 +117,7 @@ namespace MenuOption {
const QString FrameTimer = "Show Timer";
const QString FullscreenMirror = "Mirror";
const QString Help = "Help...";
const QString HomeLocation = "Home";
const QString HomeLocation = "Home ";
const QString IncreaseAvatarSize = "Increase Avatar Size";
const QString IndependentMode = "Independent Mode";
const QString ActionMotorControl = "Enable Default Motor Control";

View file

@ -171,7 +171,7 @@ void SecondaryCameraJobConfig::setOrientation(glm::quat orient) {
}
void SecondaryCameraJobConfig::enableSecondaryCameraRenderConfigs(bool enabled) {
qApp->getRenderEngine()->getConfiguration()->getConfig<SecondaryCameraRenderTask>()->setEnabled(enabled);
qApp->getRenderEngine()->getConfiguration()->getConfig<SecondaryCameraRenderTask>("SecondaryCameraJob")->setEnabled(enabled);
setEnabled(enabled);
}
@ -187,11 +187,13 @@ public:
void run(const render::RenderContextPointer& renderContext, const RenderArgsPointer& cachedArgs) {
auto args = renderContext->args;
if (cachedArgs) {
args->_blitFramebuffer = cachedArgs->_blitFramebuffer;
args->_viewport = cachedArgs->_viewport;
args->popViewFrustum();
args->_displayMode = cachedArgs->_displayMode;
args->_renderMode = cachedArgs->_renderMode;
}
args->popViewFrustum();
gpu::doInBatch("EndSecondaryCameraFrame::run", args->_context, [&](gpu::Batch& batch) {
batch.restoreContextStereo();

View file

@ -36,114 +36,6 @@
using namespace std;
void renderWorldBox(RenderArgs* args, gpu::Batch& batch) {
auto geometryCache = DependencyManager::get<GeometryCache>();
// Show center of world
static const glm::vec3 RED(1.0f, 0.0f, 0.0f);
static const glm::vec3 GREEN(0.0f, 1.0f, 0.0f);
static const glm::vec3 BLUE(0.0f, 0.0f, 1.0f);
static const glm::vec3 GREY(0.5f, 0.5f, 0.5f);
static const glm::vec4 GREY4(0.5f, 0.5f, 0.5f, 1.0f);
static const glm::vec4 DASHED_RED(1.0f, 0.0f, 0.0f, 1.0f);
static const glm::vec4 DASHED_GREEN(0.0f, 1.0f, 0.0f, 1.0f);
static const glm::vec4 DASHED_BLUE(0.0f, 0.0f, 1.0f, 1.0f);
static const float DASH_LENGTH = 1.0f;
static const float GAP_LENGTH = 1.0f;
auto transform = Transform{};
static std::array<int, 18> geometryIds;
static std::once_flag initGeometryIds;
std::call_once(initGeometryIds, [&] {
for (size_t i = 0; i < geometryIds.size(); ++i) {
geometryIds[i] = geometryCache->allocateID();
}
});
batch.setModelTransform(transform);
geometryCache->renderLine(batch, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(HALF_TREE_SCALE, 0.0f, 0.0f), RED, geometryIds[0]);
geometryCache->renderDashedLine(batch, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(-HALF_TREE_SCALE, 0.0f, 0.0f), DASHED_RED,
DASH_LENGTH, GAP_LENGTH, geometryIds[1]);
geometryCache->renderLine(batch, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, HALF_TREE_SCALE, 0.0f), GREEN, geometryIds[2]);
geometryCache->renderDashedLine(batch, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, -HALF_TREE_SCALE, 0.0f), DASHED_GREEN,
DASH_LENGTH, GAP_LENGTH, geometryIds[3]);
geometryCache->renderLine(batch, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 0.0f, HALF_TREE_SCALE), BLUE, geometryIds[4]);
geometryCache->renderDashedLine(batch, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 0.0f, -HALF_TREE_SCALE), DASHED_BLUE,
DASH_LENGTH, GAP_LENGTH, geometryIds[5]);
// X center boundaries
geometryCache->renderLine(batch, glm::vec3(-HALF_TREE_SCALE, -HALF_TREE_SCALE, 0.0f),
glm::vec3(HALF_TREE_SCALE, -HALF_TREE_SCALE, 0.0f), GREY,
geometryIds[6]);
geometryCache->renderLine(batch, glm::vec3(-HALF_TREE_SCALE, -HALF_TREE_SCALE, 0.0f),
glm::vec3(-HALF_TREE_SCALE, HALF_TREE_SCALE, 0.0f), GREY,
geometryIds[7]);
geometryCache->renderLine(batch, glm::vec3(-HALF_TREE_SCALE, HALF_TREE_SCALE, 0.0f),
glm::vec3(HALF_TREE_SCALE, HALF_TREE_SCALE, 0.0f), GREY,
geometryIds[8]);
geometryCache->renderLine(batch, glm::vec3(HALF_TREE_SCALE, -HALF_TREE_SCALE, 0.0f),
glm::vec3(HALF_TREE_SCALE, HALF_TREE_SCALE, 0.0f), GREY,
geometryIds[9]);
// Z center boundaries
geometryCache->renderLine(batch, glm::vec3(0.0f, -HALF_TREE_SCALE, -HALF_TREE_SCALE),
glm::vec3(0.0f, -HALF_TREE_SCALE, HALF_TREE_SCALE), GREY,
geometryIds[10]);
geometryCache->renderLine(batch, glm::vec3(0.0f, -HALF_TREE_SCALE, -HALF_TREE_SCALE),
glm::vec3(0.0f, HALF_TREE_SCALE, -HALF_TREE_SCALE), GREY,
geometryIds[11]);
geometryCache->renderLine(batch, glm::vec3(0.0f, HALF_TREE_SCALE, -HALF_TREE_SCALE),
glm::vec3(0.0f, HALF_TREE_SCALE, HALF_TREE_SCALE), GREY,
geometryIds[12]);
geometryCache->renderLine(batch, glm::vec3(0.0f, -HALF_TREE_SCALE, HALF_TREE_SCALE),
glm::vec3(0.0f, HALF_TREE_SCALE, HALF_TREE_SCALE), GREY,
geometryIds[13]);
// Center boundaries
geometryCache->renderLine(batch, glm::vec3(-HALF_TREE_SCALE, 0.0f, -HALF_TREE_SCALE),
glm::vec3(-HALF_TREE_SCALE, 0.0f, HALF_TREE_SCALE), GREY,
geometryIds[14]);
geometryCache->renderLine(batch, glm::vec3(-HALF_TREE_SCALE, 0.0f, -HALF_TREE_SCALE),
glm::vec3(HALF_TREE_SCALE, 0.0f, -HALF_TREE_SCALE), GREY,
geometryIds[15]);
geometryCache->renderLine(batch, glm::vec3(HALF_TREE_SCALE, 0.0f, -HALF_TREE_SCALE),
glm::vec3(HALF_TREE_SCALE, 0.0f, HALF_TREE_SCALE), GREY,
geometryIds[16]);
geometryCache->renderLine(batch, glm::vec3(-HALF_TREE_SCALE, 0.0f, HALF_TREE_SCALE),
glm::vec3(HALF_TREE_SCALE, 0.0f, HALF_TREE_SCALE), GREY,
geometryIds[17]);
geometryCache->renderWireCubeInstance(args, batch, GREY4);
// Draw meter markers along the 3 axis to help with measuring things
const float MARKER_DISTANCE = 1.0f;
const float MARKER_RADIUS = 0.05f;
transform = Transform().setScale(MARKER_RADIUS);
batch.setModelTransform(transform);
geometryCache->renderSolidSphereInstance(args, batch, RED);
transform = Transform().setTranslation(glm::vec3(MARKER_DISTANCE, 0.0f, 0.0f)).setScale(MARKER_RADIUS);
batch.setModelTransform(transform);
geometryCache->renderSolidSphereInstance(args, batch, RED);
transform = Transform().setTranslation(glm::vec3(0.0f, MARKER_DISTANCE, 0.0f)).setScale(MARKER_RADIUS);
batch.setModelTransform(transform);
geometryCache->renderSolidSphereInstance(args, batch, GREEN);
transform = Transform().setTranslation(glm::vec3(0.0f, 0.0f, MARKER_DISTANCE)).setScale(MARKER_RADIUS);
batch.setModelTransform(transform);
geometryCache->renderSolidSphereInstance(args, batch, BLUE);
transform = Transform().setTranslation(glm::vec3(MARKER_DISTANCE, 0.0f, MARKER_DISTANCE)).setScale(MARKER_RADIUS);
batch.setModelTransform(transform);
geometryCache->renderSolidSphereInstance(args, batch, GREY);
}
// Do some basic timing tests and report the results
void runTimingTests() {
// How long does it take to make a call to get the time?

View file

@ -15,14 +15,9 @@
#include <glm/glm.hpp>
#include <glm/gtc/quaternion.hpp>
#include <gpu/Batch.h>
#include <render/Forward.h>
class ShapeEntityItem;
class ShapeInfo;
void renderWorldBox(RenderArgs* args, gpu::Batch& batch);
void runTimingTests();
void runUnitTests();

View file

@ -139,7 +139,7 @@ MyAvatar::MyAvatar(QThread* thread) :
_flyingHMDSetting(QStringList() << AVATAR_SETTINGS_GROUP_NAME << "flyingHMD", _flyingPrefHMD),
_avatarEntityCountSetting(QStringList() << AVATAR_SETTINGS_GROUP_NAME << "avatarEntityData" << "size", 0)
{
_clientTraitsHandler = std::unique_ptr<ClientTraitsHandler>(new ClientTraitsHandler(this));
_clientTraitsHandler.reset(new ClientTraitsHandler(this));
// give the pointer to our head to inherited _headData variable from AvatarData
_headData = new MyHead(this);
@ -807,46 +807,6 @@ void MyAvatar::simulate(float deltaTime) {
// before we perform rig animations and IK.
updateSensorToWorldMatrix();
// if we detect the hand controller is at rest, i.e. lying on the table, or the hand is too far away from the hmd
// disable the associated hand controller input.
{
// NOTE: all poses are in sensor space.
auto leftHandIter = _controllerPoseMap.find(controller::Action::LEFT_HAND);
if (leftHandIter != _controllerPoseMap.end() && leftHandIter->second.isValid()) {
_leftHandAtRestDetector.update(leftHandIter->second.getTranslation(), leftHandIter->second.getRotation());
if (_leftHandAtRestDetector.isAtRest()) {
leftHandIter->second.valid = false;
}
} else {
_leftHandAtRestDetector.invalidate();
}
auto rightHandIter = _controllerPoseMap.find(controller::Action::RIGHT_HAND);
if (rightHandIter != _controllerPoseMap.end() && rightHandIter->second.isValid()) {
_rightHandAtRestDetector.update(rightHandIter->second.getTranslation(), rightHandIter->second.getRotation());
if (_rightHandAtRestDetector.isAtRest()) {
rightHandIter->second.valid = false;
}
} else {
_rightHandAtRestDetector.invalidate();
}
auto headIter = _controllerPoseMap.find(controller::Action::HEAD);
// The 99th percentile man has a spine to fingertip to height ratio of 0.45. Lets increase that by about 10% to 0.5
// then measure the distance the center of the eyes to the finger tips. To come up with this ratio.
// From "The Measure of Man and Woman: Human Factors in Design, Revised Edition" by Alvin R. Tilley, Henry Dreyfuss Associates
const float MAX_HEAD_TO_HAND_DISTANCE_RATIO = 0.52f;
float maxHeadHandDistance = getUserHeight() * MAX_HEAD_TO_HAND_DISTANCE_RATIO;
if (glm::length(headIter->second.getTranslation() - leftHandIter->second.getTranslation()) > maxHeadHandDistance) {
leftHandIter->second.valid = false;
}
if (glm::length(headIter->second.getTranslation() - rightHandIter->second.getTranslation()) > maxHeadHandDistance) {
rightHandIter->second.valid = false;
}
}
{
PerformanceTimer perfTimer("skeleton");

View file

@ -0,0 +1,301 @@
//
// GraphicsEngine.cpp
//
// Created by Sam Gateau on 29/6/2018.
// Copyright 2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "GraphicsEngine.h"
#include <shared/GlobalAppProperties.h>
#include "WorldBox.h"
#include "LODManager.h"
#include <GeometryCache.h>
#include <TextureCache.h>
#include <FramebufferCache.h>
#include <UpdateSceneTask.h>
#include <RenderViewTask.h>
#include <SecondaryCamera.h>
#include "RenderEventHandler.h"
#include <gpu/Batch.h>
#include <gpu/Context.h>
#include <gpu/gl/GLBackend.h>
#include <display-plugins/DisplayPlugin.h>
#include <display-plugins/CompositorHelper.h>
#include <QMetaObject>
#include "ui/Stats.h"
#include "Application.h"
GraphicsEngine::GraphicsEngine() {
}
GraphicsEngine::~GraphicsEngine() {
}
void GraphicsEngine::initializeGPU(GLWidget* glwidget) {
_renderEventHandler = new RenderEventHandler(
[this]() { return this->shouldPaint(); },
[this]() { this->render_performFrame(); }
);
// Requires the window context, because that's what's used in the actual rendering
// and the GPU backend will make things like the VAO which cannot be shared across
// contexts
glwidget->makeCurrent();
gpu::Context::init<gpu::gl::GLBackend>();
glwidget->makeCurrent();
_gpuContext = std::make_shared<gpu::Context>();
DependencyManager::get<TextureCache>()->setGPUContext(_gpuContext);
}
void GraphicsEngine::initializeRender(bool disableDeferred) {
// Set up the render engine
render::CullFunctor cullFunctor = LODManager::shouldRender;
_renderEngine->addJob<UpdateSceneTask>("UpdateScene");
#ifndef Q_OS_ANDROID
_renderEngine->addJob<SecondaryCameraRenderTask>("SecondaryCameraJob", cullFunctor, !disableDeferred);
#endif
_renderEngine->addJob<RenderViewTask>("RenderMainView", cullFunctor, !disableDeferred, render::ItemKey::TAG_BITS_0, render::ItemKey::TAG_BITS_0);
_renderEngine->load();
_renderEngine->registerScene(_renderScene);
// Now that OpenGL is initialized, we are sure we have a valid context and can create the various pipeline shaders with success.
DependencyManager::get<GeometryCache>()->initializeShapePipelines();
}
void GraphicsEngine::startup() {
static_cast<RenderEventHandler*>(_renderEventHandler)->resumeThread();
}
void GraphicsEngine::shutdown() {
// The cleanup process enqueues the transactions but does not process them. Calling this here will force the actual
// removal of the items.
// See https://highfidelity.fogbugz.com/f/cases/5328
_renderScene->enqueueFrame(); // flush all the transactions
_renderScene->processTransactionQueue(); // process and apply deletions
_gpuContext->shutdown();
// shutdown render engine
_renderScene = nullptr;
_renderEngine = nullptr;
_renderEventHandler->deleteLater();
}
void GraphicsEngine::render_runRenderFrame(RenderArgs* renderArgs) {
PROFILE_RANGE(render, __FUNCTION__);
PerformanceTimer perfTimer("render");
// Make sure the WorldBox is in the scene
// For the record, this one RenderItem is the first one we created and added to the scene.
// We could move that code elsewhere but you know...
if (!render::Item::isValidID(WorldBoxRenderData::_item)) {
render::Transaction transaction;
auto worldBoxRenderData = std::make_shared<WorldBoxRenderData>();
auto worldBoxRenderPayload = std::make_shared<WorldBoxRenderData::Payload>(worldBoxRenderData);
WorldBoxRenderData::_item = _renderScene->allocateID();
transaction.resetItem(WorldBoxRenderData::_item, worldBoxRenderPayload);
_renderScene->enqueueTransaction(transaction);
}
{
_renderEngine->getRenderContext()->args = renderArgs;
_renderEngine->run();
}
}
static const unsigned int THROTTLED_SIM_FRAMERATE = 15;
static const int THROTTLED_SIM_FRAME_PERIOD_MS = MSECS_PER_SECOND / THROTTLED_SIM_FRAMERATE;
bool GraphicsEngine::shouldPaint() const {
auto displayPlugin = qApp->getActiveDisplayPlugin();
#ifdef DEBUG_PAINT_DELAY
static uint64_t paintDelaySamples{ 0 };
static uint64_t paintDelayUsecs{ 0 };
paintDelayUsecs += displayPlugin->getPaintDelayUsecs();
static const int PAINT_DELAY_THROTTLE = 1000;
if (++paintDelaySamples % PAINT_DELAY_THROTTLE == 0) {
qCDebug(interfaceapp).nospace() <<
"Paint delay (" << paintDelaySamples << " samples): " <<
(float)paintDelaySamples / paintDelayUsecs << "us";
}
#endif
// Throttle if requested
//if (displayPlugin->isThrottled() && (_graphicsEngine._renderEventHandler->_lastTimeRendered.elapsed() < THROTTLED_SIM_FRAME_PERIOD_MS)) {
if ( displayPlugin->isThrottled() &&
(static_cast<RenderEventHandler*>(_renderEventHandler)->_lastTimeRendered.elapsed() < THROTTLED_SIM_FRAME_PERIOD_MS)) {
return false;
}
return true;
}
bool GraphicsEngine::checkPendingRenderEvent() {
bool expected = false;
return (_renderEventHandler && static_cast<RenderEventHandler*>(_renderEventHandler)->_pendingRenderEvent.compare_exchange_strong(expected, true));
}
void GraphicsEngine::render_performFrame() {
// Some plugins process message events, allowing paintGL to be called reentrantly.
_renderFrameCount++;
auto lastPaintBegin = usecTimestampNow();
PROFILE_RANGE_EX(render, __FUNCTION__, 0xff0000ff, (uint64_t)_renderFrameCount);
PerformanceTimer perfTimer("paintGL");
DisplayPluginPointer displayPlugin;
{
PROFILE_RANGE(render, "/getActiveDisplayPlugin");
displayPlugin = qApp->getActiveDisplayPlugin();
}
{
PROFILE_RANGE(render, "/pluginBeginFrameRender");
// If a display plugin loses it's underlying support, it
// needs to be able to signal us to not use it
if (!displayPlugin->beginFrameRender(_renderFrameCount)) {
QMetaObject::invokeMethod(qApp, "updateDisplayMode");
return;
}
}
RenderArgs renderArgs;
glm::mat4 HMDSensorPose;
glm::mat4 eyeToWorld;
glm::mat4 sensorToWorld;
bool isStereo;
glm::mat4 stereoEyeOffsets[2];
glm::mat4 stereoEyeProjections[2];
{
QMutexLocker viewLocker(&_renderArgsMutex);
renderArgs = _appRenderArgs._renderArgs;
// don't render if there is no context.
if (!_appRenderArgs._renderArgs._context) {
return;
}
HMDSensorPose = _appRenderArgs._headPose;
eyeToWorld = _appRenderArgs._eyeToWorld;
sensorToWorld = _appRenderArgs._sensorToWorld;
isStereo = _appRenderArgs._isStereo;
for_each_eye([&](Eye eye) {
stereoEyeOffsets[eye] = _appRenderArgs._eyeOffsets[eye];
stereoEyeProjections[eye] = _appRenderArgs._eyeProjections[eye];
});
}
{
PROFILE_RANGE(render, "/gpuContextReset");
getGPUContext()->beginFrame(_appRenderArgs._view, HMDSensorPose);
// Reset the gpu::Context Stages
// Back to the default framebuffer;
gpu::doInBatch("Application_render::gpuContextReset", getGPUContext(), [&](gpu::Batch& batch) {
batch.resetStages();
});
}
{
PROFILE_RANGE(render, "/renderOverlay");
PerformanceTimer perfTimer("renderOverlay");
// NOTE: There is no batch associated with this renderArgs
// the ApplicationOverlay class assumes it's viewport is setup to be the device size
renderArgs._viewport = glm::ivec4(0, 0, qApp->getDeviceSize());
qApp->getApplicationOverlay().renderOverlay(&renderArgs);
}
{
PROFILE_RANGE(render, "/updateCompositor");
qApp->getApplicationCompositor().setFrameInfo(_renderFrameCount, eyeToWorld, sensorToWorld);
}
gpu::FramebufferPointer finalFramebuffer;
QSize finalFramebufferSize;
{
PROFILE_RANGE(render, "/getOutputFramebuffer");
// Primary rendering pass
auto framebufferCache = DependencyManager::get<FramebufferCache>();
finalFramebufferSize = framebufferCache->getFrameBufferSize();
// Final framebuffer that will be handled to the display-plugin
finalFramebuffer = framebufferCache->getFramebuffer();
}
{
if (isStereo) {
renderArgs._context->enableStereo(true);
renderArgs._context->setStereoProjections(stereoEyeProjections);
renderArgs._context->setStereoViews(stereoEyeOffsets);
}
renderArgs._hudOperator = displayPlugin->getHUDOperator();
renderArgs._hudTexture = qApp->getApplicationOverlay().getOverlayTexture();
renderArgs._blitFramebuffer = finalFramebuffer;
render_runRenderFrame(&renderArgs);
}
auto frame = getGPUContext()->endFrame();
frame->frameIndex = _renderFrameCount;
frame->framebuffer = finalFramebuffer;
frame->framebufferRecycler = [](const gpu::FramebufferPointer& framebuffer) {
auto frameBufferCache = DependencyManager::get<FramebufferCache>();
if (frameBufferCache) {
frameBufferCache->releaseFramebuffer(framebuffer);
}
};
// deliver final scene rendering commands to the display plugin
{
PROFILE_RANGE(render, "/pluginOutput");
PerformanceTimer perfTimer("pluginOutput");
_renderLoopCounter.increment();
displayPlugin->submitFrame(frame);
}
// Reset the framebuffer and stereo state
renderArgs._blitFramebuffer.reset();
renderArgs._context->enableStereo(false);
{
auto stats = Stats::getInstance();
if (stats) {
stats->setRenderDetails(renderArgs._details);
}
}
uint64_t lastPaintDuration = usecTimestampNow() - lastPaintBegin;
_frameTimingsScriptingInterface.addValue(lastPaintDuration);
}
void GraphicsEngine::editRenderArgs(RenderArgsEditor editor) {
QMutexLocker renderLocker(&_renderArgsMutex);
editor(_appRenderArgs);
}

View file

@ -0,0 +1,90 @@
//
// GraphicsEngine.h
//
// Created by Sam Gateau on 29/6/2018.
// Copyright 2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_GraphicsEngine_h
#define hifi_GraphicsEngine_h
#include <gl/OffscreenGLCanvas.h>
#include <gl/GLWidget.h>
#include <qmutex.h>
#include <render/Engine.h>
#include <OctreeConstants.h>
#include <shared/RateCounter.h>
#include "FrameTimingsScriptingInterface.h"
struct AppRenderArgs {
render::Args _renderArgs;
glm::mat4 _eyeToWorld;
glm::mat4 _view;
glm::mat4 _eyeOffsets[2];
glm::mat4 _eyeProjections[2];
glm::mat4 _headPose;
glm::mat4 _sensorToWorld;
float _sensorToWorldScale{ 1.0f };
bool _isStereo{ false };
};
using RenderArgsEditor = std::function <void(AppRenderArgs&)>;
class GraphicsEngine {
public:
GraphicsEngine();
~GraphicsEngine();
void initializeGPU(GLWidget*);
void initializeRender(bool disableDeferred);
void startup();
void shutdown();
render::ScenePointer getRenderScene() const { return _renderScene; }
render::EnginePointer getRenderEngine() const { return _renderEngine; }
gpu::ContextPointer getGPUContext() const { return _gpuContext; }
// Same as the one in application
bool shouldPaint() const;
bool checkPendingRenderEvent();
size_t getRenderFrameCount() const { return _renderFrameCount; }
float getRenderLoopRate() const { return _renderLoopCounter.rate(); }
// Feed GRaphics Engine with new frame configuration
void editRenderArgs(RenderArgsEditor editor);
private:
// Thread specific calls
void render_performFrame();
void render_runRenderFrame(RenderArgs* renderArgs);
protected:
mutable QMutex _renderArgsMutex{ QMutex::Recursive };
AppRenderArgs _appRenderArgs;
RateCounter<500> _renderLoopCounter;
uint32_t _renderFrameCount{ 0 };
render::ScenePointer _renderScene{ new render::Scene(glm::vec3(-0.5f * (float)TREE_SCALE), (float)TREE_SCALE) };
render::EnginePointer _renderEngine{ new render::RenderEngine() };
gpu::ContextPointer _gpuContext; // initialized during window creation
QObject* _renderEventHandler{ nullptr };
friend class RenderEventHandler;
FrameTimingsScriptingInterface _frameTimingsScriptingInterface;
friend class Application;
};
#endif // hifi_GraphicsEngine_h

View file

@ -0,0 +1,58 @@
//
// RenderEventHandler.cpp
//
// Created by Bradley Austin Davis on 29/6/2018.
// Copyright 2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "RenderEventHandler.h"
#include "Application.h"
#include <shared/GlobalAppProperties.h>
#include <shared/QtHelpers.h>
#include "CrashHandler.h"
RenderEventHandler::RenderEventHandler(CheckCall checkCall, RenderCall renderCall) :
_checkCall(checkCall),
_renderCall(renderCall)
{
// Transfer to a new thread
moveToNewNamedThread(this, "RenderThread", [this](QThread* renderThread) {
hifi::qt::addBlockingForbiddenThread("Render", renderThread);
_lastTimeRendered.start();
}, std::bind(&RenderEventHandler::initialize, this), QThread::HighestPriority);
}
void RenderEventHandler::initialize() {
setObjectName("Render");
PROFILE_SET_THREAD_NAME("Render");
setCrashAnnotation("render_thread_id", std::to_string((size_t)QThread::currentThreadId()));
}
void RenderEventHandler::resumeThread() {
_pendingRenderEvent = false;
}
void RenderEventHandler::render() {
if (_checkCall()) {
_lastTimeRendered.start();
_renderCall();
}
}
bool RenderEventHandler::event(QEvent* event) {
switch ((int)event->type()) {
case ApplicationEvent::Render:
render();
_pendingRenderEvent.store(false);
return true;
default:
break;
}
return Parent::event(event);
}

View file

@ -0,0 +1,52 @@
//
// RenderEventHandler.h
//
// Created by Bradley Austin Davis on 29/6/2018.
// Copyright 2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_RenderEventHandler_h
#define hifi_RenderEventHandler_h
#include <QEvent>
#include <QElapsedTimer>
#include "gl/OffscreenGLCanvas.h"
enum ApplicationEvent {
// Execute a lambda function
Lambda = QEvent::User + 1,
// Trigger the next render
Render,
// Trigger the next idle
Idle,
};
class RenderEventHandler : public QObject {
using Parent = QObject;
Q_OBJECT
public:
using CheckCall = std::function <bool()>;
using RenderCall = std::function <void()>;
CheckCall _checkCall;
RenderCall _renderCall;
RenderEventHandler(CheckCall checkCall, RenderCall renderCall);
QElapsedTimer _lastTimeRendered;
std::atomic<bool> _pendingRenderEvent{ true };
void resumeThread();
private:
void initialize();
void render();
bool event(QEvent* event) override;
};
#endif // #include hifi_RenderEventHandler_h

View file

@ -0,0 +1,138 @@
//
// WorldBox.cpp
//
// Created by Sam Gateau on 01/07/2018.
// Copyright 2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "WorldBox.h"
#include "OctreeConstants.h"
render::ItemID WorldBoxRenderData::_item{ render::Item::INVALID_ITEM_ID };
namespace render {
template <> const ItemKey payloadGetKey(const WorldBoxRenderData::Pointer& stuff) { return ItemKey::Builder::opaqueShape().withTagBits(ItemKey::TAG_BITS_0 | ItemKey::TAG_BITS_1); }
template <> const Item::Bound payloadGetBound(const WorldBoxRenderData::Pointer& stuff) { return Item::Bound(); }
template <> void payloadRender(const WorldBoxRenderData::Pointer& stuff, RenderArgs* args) {
if (Menu::getInstance()->isOptionChecked(MenuOption::WorldAxes)) {
PerformanceTimer perfTimer("worldBox");
auto& batch = *args->_batch;
DependencyManager::get<GeometryCache>()->bindSimpleProgram(batch);
WorldBoxRenderData::renderWorldBox(args, batch);
}
}
}
void WorldBoxRenderData::renderWorldBox(RenderArgs* args, gpu::Batch& batch) {
auto geometryCache = DependencyManager::get<GeometryCache>();
// Show center of world
static const glm::vec3 RED(1.0f, 0.0f, 0.0f);
static const glm::vec3 GREEN(0.0f, 1.0f, 0.0f);
static const glm::vec3 BLUE(0.0f, 0.0f, 1.0f);
static const glm::vec3 GREY(0.5f, 0.5f, 0.5f);
static const glm::vec4 GREY4(0.5f, 0.5f, 0.5f, 1.0f);
static const glm::vec4 DASHED_RED(1.0f, 0.0f, 0.0f, 1.0f);
static const glm::vec4 DASHED_GREEN(0.0f, 1.0f, 0.0f, 1.0f);
static const glm::vec4 DASHED_BLUE(0.0f, 0.0f, 1.0f, 1.0f);
static const float DASH_LENGTH = 1.0f;
static const float GAP_LENGTH = 1.0f;
auto transform = Transform{};
static std::array<int, 18> geometryIds;
static std::once_flag initGeometryIds;
std::call_once(initGeometryIds, [&] {
for (size_t i = 0; i < geometryIds.size(); ++i) {
geometryIds[i] = geometryCache->allocateID();
}
});
batch.setModelTransform(transform);
geometryCache->renderLine(batch, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(HALF_TREE_SCALE, 0.0f, 0.0f), RED, geometryIds[0]);
geometryCache->renderDashedLine(batch, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(-HALF_TREE_SCALE, 0.0f, 0.0f), DASHED_RED,
DASH_LENGTH, GAP_LENGTH, geometryIds[1]);
geometryCache->renderLine(batch, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, HALF_TREE_SCALE, 0.0f), GREEN, geometryIds[2]);
geometryCache->renderDashedLine(batch, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, -HALF_TREE_SCALE, 0.0f), DASHED_GREEN,
DASH_LENGTH, GAP_LENGTH, geometryIds[3]);
geometryCache->renderLine(batch, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 0.0f, HALF_TREE_SCALE), BLUE, geometryIds[4]);
geometryCache->renderDashedLine(batch, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 0.0f, -HALF_TREE_SCALE), DASHED_BLUE,
DASH_LENGTH, GAP_LENGTH, geometryIds[5]);
// X center boundaries
geometryCache->renderLine(batch, glm::vec3(-HALF_TREE_SCALE, -HALF_TREE_SCALE, 0.0f),
glm::vec3(HALF_TREE_SCALE, -HALF_TREE_SCALE, 0.0f), GREY,
geometryIds[6]);
geometryCache->renderLine(batch, glm::vec3(-HALF_TREE_SCALE, -HALF_TREE_SCALE, 0.0f),
glm::vec3(-HALF_TREE_SCALE, HALF_TREE_SCALE, 0.0f), GREY,
geometryIds[7]);
geometryCache->renderLine(batch, glm::vec3(-HALF_TREE_SCALE, HALF_TREE_SCALE, 0.0f),
glm::vec3(HALF_TREE_SCALE, HALF_TREE_SCALE, 0.0f), GREY,
geometryIds[8]);
geometryCache->renderLine(batch, glm::vec3(HALF_TREE_SCALE, -HALF_TREE_SCALE, 0.0f),
glm::vec3(HALF_TREE_SCALE, HALF_TREE_SCALE, 0.0f), GREY,
geometryIds[9]);
// Z center boundaries
geometryCache->renderLine(batch, glm::vec3(0.0f, -HALF_TREE_SCALE, -HALF_TREE_SCALE),
glm::vec3(0.0f, -HALF_TREE_SCALE, HALF_TREE_SCALE), GREY,
geometryIds[10]);
geometryCache->renderLine(batch, glm::vec3(0.0f, -HALF_TREE_SCALE, -HALF_TREE_SCALE),
glm::vec3(0.0f, HALF_TREE_SCALE, -HALF_TREE_SCALE), GREY,
geometryIds[11]);
geometryCache->renderLine(batch, glm::vec3(0.0f, HALF_TREE_SCALE, -HALF_TREE_SCALE),
glm::vec3(0.0f, HALF_TREE_SCALE, HALF_TREE_SCALE), GREY,
geometryIds[12]);
geometryCache->renderLine(batch, glm::vec3(0.0f, -HALF_TREE_SCALE, HALF_TREE_SCALE),
glm::vec3(0.0f, HALF_TREE_SCALE, HALF_TREE_SCALE), GREY,
geometryIds[13]);
// Center boundaries
geometryCache->renderLine(batch, glm::vec3(-HALF_TREE_SCALE, 0.0f, -HALF_TREE_SCALE),
glm::vec3(-HALF_TREE_SCALE, 0.0f, HALF_TREE_SCALE), GREY,
geometryIds[14]);
geometryCache->renderLine(batch, glm::vec3(-HALF_TREE_SCALE, 0.0f, -HALF_TREE_SCALE),
glm::vec3(HALF_TREE_SCALE, 0.0f, -HALF_TREE_SCALE), GREY,
geometryIds[15]);
geometryCache->renderLine(batch, glm::vec3(HALF_TREE_SCALE, 0.0f, -HALF_TREE_SCALE),
glm::vec3(HALF_TREE_SCALE, 0.0f, HALF_TREE_SCALE), GREY,
geometryIds[16]);
geometryCache->renderLine(batch, glm::vec3(-HALF_TREE_SCALE, 0.0f, HALF_TREE_SCALE),
glm::vec3(HALF_TREE_SCALE, 0.0f, HALF_TREE_SCALE), GREY,
geometryIds[17]);
geometryCache->renderWireCubeInstance(args, batch, GREY4);
// Draw meter markers along the 3 axis to help with measuring things
const float MARKER_DISTANCE = 1.0f;
const float MARKER_RADIUS = 0.05f;
transform = Transform().setScale(MARKER_RADIUS);
batch.setModelTransform(transform);
geometryCache->renderSolidSphereInstance(args, batch, RED);
transform = Transform().setTranslation(glm::vec3(MARKER_DISTANCE, 0.0f, 0.0f)).setScale(MARKER_RADIUS);
batch.setModelTransform(transform);
geometryCache->renderSolidSphereInstance(args, batch, RED);
transform = Transform().setTranslation(glm::vec3(0.0f, MARKER_DISTANCE, 0.0f)).setScale(MARKER_RADIUS);
batch.setModelTransform(transform);
geometryCache->renderSolidSphereInstance(args, batch, GREEN);
transform = Transform().setTranslation(glm::vec3(0.0f, 0.0f, MARKER_DISTANCE)).setScale(MARKER_RADIUS);
batch.setModelTransform(transform);
geometryCache->renderSolidSphereInstance(args, batch, BLUE);
transform = Transform().setTranslation(glm::vec3(MARKER_DISTANCE, 0.0f, MARKER_DISTANCE)).setScale(MARKER_RADIUS);
batch.setModelTransform(transform);
geometryCache->renderSolidSphereInstance(args, batch, GREY);
}

View file

@ -0,0 +1,43 @@
//
// WorldBox.h
//
// Created by Sam Gateau on 01/07/2018.
// Copyright 2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_WorldBox_h
#define hifi_WorldBox_h
#include <PerfStat.h>
#include <gpu/Batch.h>
#include <render/Forward.h>
#include <render/Item.h>
#include <GeometryCache.h>
#include "Menu.h"
class WorldBoxRenderData {
public:
typedef render::Payload<WorldBoxRenderData> Payload;
typedef Payload::DataPointer Pointer;
int _val = 0;
static render::ItemID _item; // unique WorldBoxRenderData
static void renderWorldBox(RenderArgs* args, gpu::Batch& batch);
};
namespace render {
template <> const ItemKey payloadGetKey(const WorldBoxRenderData::Pointer& stuff);
template <> const Item::Bound payloadGetBound(const WorldBoxRenderData::Pointer& stuff);
template <> void payloadRender(const WorldBoxRenderData::Pointer& stuff, RenderArgs* args);
}
#endif // hifi_WorldBox_h

View file

@ -55,8 +55,6 @@ ApplicationOverlay::~ApplicationOverlay() {
// Renders the overlays either to a texture or to the screen
void ApplicationOverlay::renderOverlay(RenderArgs* renderArgs) {
PROFILE_RANGE(render, __FUNCTION__);
PerformanceWarning warn(Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings), "ApplicationOverlay::displayOverlay()");
buildFramebufferObject();
if (!_overlayFramebuffer) {

View file

@ -2103,8 +2103,9 @@ void AvatarData::setJointMappingsFromNetworkReply() {
// before we process this update, make sure that the skeleton model URL hasn't changed
// since we made the FST request
if (networkReply->url() != _skeletonModelURL) {
if (networkReply->error() != QNetworkReply::NoError || networkReply->url() != _skeletonModelURL) {
qCDebug(avatars) << "Refusing to set joint mappings for FST URL that does not match the current URL";
networkReply->deleteLater();
return;
}

View file

@ -1490,7 +1490,7 @@ protected:
bool _isClientAvatar { false };
// null unless MyAvatar or ScriptableAvatar sending traits data to mixer
std::unique_ptr<ClientTraitsHandler> _clientTraitsHandler;
std::unique_ptr<ClientTraitsHandler, LaterDeleter> _clientTraitsHandler;
template <typename T, typename F>
T readLockWithNamedJointIndex(const QString& name, const T& defaultValue, F f) const {

View file

@ -22,7 +22,7 @@ ClientTraitsHandler::ClientTraitsHandler(AvatarData* owningAvatar) :
_owningAvatar(owningAvatar)
{
auto nodeList = DependencyManager::get<NodeList>();
QObject::connect(nodeList.data(), &NodeList::nodeAdded, [this](SharedNodePointer addedNode){
QObject::connect(nodeList.data(), &NodeList::nodeAdded, this, [this](SharedNodePointer addedNode) {
if (addedNode->getType() == NodeType::AvatarMixer) {
resetForNewMixer();
}

View file

@ -14,8 +14,6 @@ void main(void) {
ivec2 texCoord = ivec2(floor(varTexCoord0 * vec2(textureData.textureSize)));
texCoord.x /= 2;
int row = int(floor(gl_FragCoord.y));
if (row % 2 > 0) {
texCoord.x += (textureData.textureSize.x / 2);
}
texCoord.x += int(row % 2 > 0) * (textureData.textureSize.x / 2);
outFragColor = vec4(pow(texelFetch(colorMap, texCoord, 0).rgb, vec3(2.2)), 1.0);
}

View file

@ -534,18 +534,26 @@ void OpenGLDisplayPlugin::updateFrameData() {
}
std::function<void(gpu::Batch&, const gpu::TexturePointer&, bool mirror)> OpenGLDisplayPlugin::getHUDOperator() {
return [this](gpu::Batch& batch, const gpu::TexturePointer& hudTexture, bool mirror) {
if (_hudPipeline && hudTexture) {
auto hudPipeline = _hudPipeline;
auto hudMirrorPipeline = _mirrorHUDPipeline;
auto hudStereo = isStereo();
auto hudCompositeFramebufferSize = _compositeFramebuffer->getSize();
std::array<glm::ivec4, 2> hudEyeViewports;
for_each_eye([&](Eye eye) {
hudEyeViewports[eye] = eyeViewport(eye);
});
return [=](gpu::Batch& batch, const gpu::TexturePointer& hudTexture, bool mirror) {
if (hudPipeline && hudTexture) {
batch.enableStereo(false);
batch.setPipeline(mirror ? _mirrorHUDPipeline : _hudPipeline);
batch.setPipeline(mirror ? hudMirrorPipeline : hudPipeline);
batch.setResourceTexture(0, hudTexture);
if (isStereo()) {
if (hudStereo) {
for_each_eye([&](Eye eye) {
batch.setViewportTransform(eyeViewport(eye));
batch.setViewportTransform(hudEyeViewports[eye]);
batch.draw(gpu::TRIANGLE_STRIP, 4);
});
} else {
batch.setViewportTransform(ivec4(uvec2(0), _compositeFramebuffer->getSize()));
batch.setViewportTransform(ivec4(uvec2(0), hudCompositeFramebufferSize));
batch.draw(gpu::TRIANGLE_STRIP, 4);
}
}

View file

@ -9,7 +9,7 @@ layout(location=0) out vec4 outFragColor;
float sRGBFloatToLinear(float value) {
const float SRGB_ELBOW = 0.04045;
return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);
return mix(pow((value + 0.055) / 1.055, 2.4), value / 12.92, float(value <= SRGB_ELBOW));
}
vec3 colorToLinearRGB(vec3 srgb) {

View file

@ -420,18 +420,26 @@ void HmdDisplayPlugin::HUDRenderer::updatePipeline() {
std::function<void(gpu::Batch&, const gpu::TexturePointer&, bool mirror)> HmdDisplayPlugin::HUDRenderer::render(HmdDisplayPlugin& plugin) {
updatePipeline();
return [this](gpu::Batch& batch, const gpu::TexturePointer& hudTexture, bool mirror) {
if (pipeline && hudTexture) {
batch.setPipeline(pipeline);
batch.setInputFormat(format);
gpu::BufferView posView(vertices, VERTEX_OFFSET, vertices->getSize(), VERTEX_STRIDE, format->getAttributes().at(gpu::Stream::POSITION)._element);
gpu::BufferView uvView(vertices, TEXTURE_OFFSET, vertices->getSize(), VERTEX_STRIDE, format->getAttributes().at(gpu::Stream::TEXCOORD)._element);
auto hudPipeline = pipeline;
auto hudFormat = format;
auto hudVertices = vertices;
auto hudIndices = indices;
auto hudUniformBuffer = uniformsBuffer;
auto hudUniforms = uniforms;
auto hudIndexCount = indexCount;
return [=](gpu::Batch& batch, const gpu::TexturePointer& hudTexture, bool mirror) {
if (hudPipeline && hudTexture) {
batch.setPipeline(hudPipeline);
batch.setInputFormat(hudFormat);
gpu::BufferView posView(hudVertices, VERTEX_OFFSET, hudVertices->getSize(), VERTEX_STRIDE, hudFormat->getAttributes().at(gpu::Stream::POSITION)._element);
gpu::BufferView uvView(hudVertices, TEXTURE_OFFSET, hudVertices->getSize(), VERTEX_STRIDE, hudFormat->getAttributes().at(gpu::Stream::TEXCOORD)._element);
batch.setInputBuffer(gpu::Stream::POSITION, posView);
batch.setInputBuffer(gpu::Stream::TEXCOORD, uvView);
batch.setIndexBuffer(gpu::UINT16, indices, 0);
uniformsBuffer->setSubData(0, uniforms);
batch.setUniformBuffer(0, uniformsBuffer);
batch.setIndexBuffer(gpu::UINT16, hudIndices, 0);
hudUniformBuffer->setSubData(0, hudUniforms);
batch.setUniformBuffer(0, hudUniformBuffer);
auto compositorHelper = DependencyManager::get<CompositorHelper>();
glm::mat4 modelTransform = compositorHelper->getUiTransform();
@ -441,7 +449,7 @@ std::function<void(gpu::Batch&, const gpu::TexturePointer&, bool mirror)> HmdDis
batch.setModelTransform(modelTransform);
batch.setResourceTexture(0, hudTexture);
batch.drawIndexed(gpu::TRIANGLES, indexCount);
batch.drawIndexed(gpu::TRIANGLES, hudIndexCount);
}
};
}

View file

@ -80,10 +80,11 @@ float interpolate3Points(float y1, float y2, float y3, float u) {
halfSlope = (y3 - y1) / 2.0f;
float slope12 = y2 - y1;
float slope23 = y3 - y2;
if (abs(halfSlope) > abs(slope12)) {
halfSlope = slope12;
} else if (abs(halfSlope) > abs(slope23)) {
halfSlope = slope23;
{
float check = float(abs(halfSlope) > abs(slope12));
halfSlope = mix(halfSlope, slope12, check);
halfSlope = mix(halfSlope, slope23, (1.0 - check) * float(abs(halfSlope) > abs(slope23)));
}
}

View file

@ -486,6 +486,8 @@ QUuid EntityScriptingInterface::addEntity(const EntityItemProperties& properties
propertiesWithSimID.setLastEditedBy(sessionID);
propertiesWithSimID.setActionData(QByteArray());
bool scalesWithParent = propertiesWithSimID.getScalesWithParent();
propertiesWithSimID = convertPropertiesFromScriptSemantics(propertiesWithSimID, scalesWithParent);
@ -830,6 +832,8 @@ QUuid EntityScriptingInterface::editEntity(QUuid id, const EntityItemProperties&
properties.setClientOnly(entity->getClientOnly());
properties.setOwningAvatarID(entity->getOwningAvatarID());
properties.setActionData(entity->getDynamicData());
// make sure the properties has a type, so that the encode can know which properties to include
properties.setType(entity->getType());

View file

@ -231,7 +231,8 @@ float snoise(vec2 v) {
// Other corners
vec2 i1;
i1 = (x0.x > x0.y) ? vec2(1.0, 0.0) : vec2(0.0, 1.0);
float check = float(x0.x > x0.y);
i1 = vec2(check, 1.0 - check);
vec4 x12 = x0.xyxy + C.xxzz;
x12.xy -= i1;

View file

@ -152,28 +152,28 @@ float fetchScatteringMap(vec2 uv) {
<@func fetchMaterialTexturesCoord0(matKey, texcoord0, albedo, roughness, normal, metallic, emissive, scattering)@>
<@if albedo@>
vec4 <$albedo$> = (((<$matKey$> & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(<$texcoord0$>) : vec4(1.0));
vec4 <$albedo$> = mix(vec4(1.0), fetchAlbedoMap(<$texcoord0$>), float((<$matKey$> & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0));
<@endif@>
<@if roughness@>
float <$roughness$> = (((<$matKey$> & ROUGHNESS_MAP_BIT) != 0) ? fetchRoughnessMap(<$texcoord0$>) : 1.0);
float <$roughness$> = mix(1.0, fetchRoughnessMap(<$texcoord0$>), float((<$matKey$> & ROUGHNESS_MAP_BIT) != 0));
<@endif@>
<@if normal@>
vec3 <$normal$> = (((<$matKey$> & NORMAL_MAP_BIT) != 0) ? fetchNormalMap(<$texcoord0$>) : vec3(0.0, 1.0, 0.0));
vec3 <$normal$> = mix(vec3(0.0, 1.0, 0.0), fetchNormalMap(<$texcoord0$>), float((<$matKey$> & NORMAL_MAP_BIT) != 0));
<@endif@>
<@if metallic@>
float <$metallic$> = (((<$matKey$> & METALLIC_MAP_BIT) != 0) ? fetchMetallicMap(<$texcoord0$>) : 0.0);
float <$metallic$> = float((<$matKey$> & METALLIC_MAP_BIT) != 0) * fetchMetallicMap(<$texcoord0$>);
<@endif@>
<@if emissive@>
vec3 <$emissive$> = (((<$matKey$> & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(<$texcoord0$>) : vec3(0.0));
vec3 <$emissive$> = float((<$matKey$> & EMISSIVE_MAP_BIT) != 0) * fetchEmissiveMap(<$texcoord0$>);
<@endif@>
<@if scattering@>
float <$scattering$> = (((<$matKey$> & SCATTERING_MAP_BIT) != 0) ? fetchScatteringMap(<$texcoord0$>) : 0.0);
float <$scattering$> = float((<$matKey$> & SCATTERING_MAP_BIT) != 0) * fetchScatteringMap(<$texcoord0$>);
<@endif@>
<@endfunc@>
<@func fetchMaterialTexturesCoord1(matKey, texcoord1, occlusion, lightmap)@>
<@if occlusion@>
float <$occlusion$> = (((<$matKey$> & OCCLUSION_MAP_BIT) != 0) ? fetchOcclusionMap(<$texcoord1$>) : 1.0);
float <$occlusion$> = mix(1.0, fetchOcclusionMap(<$texcoord1$>), float((<$matKey$> & OCCLUSION_MAP_BIT) != 0));
<@endif@>
<@if lightmap@>
vec3 <$lightmap$> = fetchLightmapMap(<$texcoord1$>);
@ -207,20 +207,19 @@ vec3 fetchLightmapMap(vec2 uv) {
<@func evalMaterialAlbedo(fetchedAlbedo, materialAlbedo, matKey, albedo)@>
{
<$albedo$>.xyz = (((<$matKey$> & ALBEDO_VAL_BIT) != 0) ? <$materialAlbedo$> : vec3(1.0));
if (((<$matKey$> & ALBEDO_MAP_BIT) != 0)) {
<$albedo$>.xyz *= <$fetchedAlbedo$>.xyz;
}
<$albedo$>.xyz = mix(vec3(1.0), <$materialAlbedo$>, float((<$matKey$> & ALBEDO_VAL_BIT) != 0));
<$albedo$>.xyz *= mix(vec3(1.0), <$fetchedAlbedo$>.xyz, float((<$matKey$> & ALBEDO_MAP_BIT) != 0));
}
<@endfunc@>
<@func evalMaterialOpacity(fetchedOpacity, materialOpacity, matKey, opacity)@>
{
const float OPACITY_MASK_THRESHOLD = 0.5;
<$opacity$> = (((<$matKey$> & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0) ?
(((<$matKey$> & OPACITY_MASK_MAP_BIT) != 0) ? step(OPACITY_MASK_THRESHOLD, <$fetchedOpacity$>) : <$fetchedOpacity$>) :
1.0) * <$materialOpacity$>;
<$opacity$> = mix(1.0,
mix(<$fetchedOpacity$>,
step(OPACITY_MASK_THRESHOLD, <$fetchedOpacity$>),
float((<$matKey$> & OPACITY_MASK_MAP_BIT) != 0)),
float((<$matKey$> & (OPACITY_TRANSLUCENT_MAP_BIT | OPACITY_MASK_MAP_BIT)) != 0)) * <$materialOpacity$>;
}
<@endfunc@>
@ -241,19 +240,19 @@ vec3 fetchLightmapMap(vec2 uv) {
<@func evalMaterialRoughness(fetchedRoughness, materialRoughness, matKey, roughness)@>
{
<$roughness$> = (((<$matKey$> & ROUGHNESS_MAP_BIT) != 0) ? <$fetchedRoughness$> : <$materialRoughness$>);
<$roughness$> = mix(<$materialRoughness$>, <$fetchedRoughness$>, float((<$matKey$> & ROUGHNESS_MAP_BIT) != 0));
}
<@endfunc@>
<@func evalMaterialMetallic(fetchedMetallic, materialMetallic, matKey, metallic)@>
{
<$metallic$> = (((<$matKey$> & METALLIC_MAP_BIT) != 0) ? <$fetchedMetallic$> : <$materialMetallic$>);
<$metallic$> = mix(<$materialMetallic$>, <$fetchedMetallic$>, float((<$matKey$> & METALLIC_MAP_BIT) != 0));
}
<@endfunc@>
<@func evalMaterialEmissive(fetchedEmissive, materialEmissive, matKey, emissive)@>
{
<$emissive$> = (((<$matKey$> & EMISSIVE_MAP_BIT) != 0) ? <$fetchedEmissive$> : <$materialEmissive$>);
<$emissive$> = mix(<$materialEmissive$>, <$fetchedEmissive$>, float((<$matKey$> & EMISSIVE_MAP_BIT) != 0));
}
<@endfunc@>
@ -265,7 +264,7 @@ vec3 fetchLightmapMap(vec2 uv) {
<@func evalMaterialScattering(fetchedScattering, materialScattering, matKey, scattering)@>
{
<$scattering$> = (((<$matKey$> & SCATTERING_MAP_BIT) != 0) ? <$fetchedScattering$> : <$materialScattering$>);
<$scattering$> = mix(<$materialScattering$>, <$fetchedScattering$>, float((<$matKey$> & SCATTERING_MAP_BIT) != 0));
}
<@endfunc@>

View file

@ -30,11 +30,9 @@ void main(void) {
vec3 color = skybox.color.rgb;
// blend is only set if there is a cubemap
if (skybox.color.a > 0.0) {
color = texture(cubeMap, coord).rgb;
if (skybox.color.a < 1.0) {
color *= skybox.color.rgb;
}
}
float check = float(skybox.color.a > 0.0);
color = mix(color, texture(cubeMap, coord).rgb, check);
color *= mix(vec3(1.0), skybox.color.rgb, check * float(skybox.color.a < 1.0));
_fragColor = vec4(color, 0.0);
}

View file

@ -180,9 +180,7 @@ void Deck::processFrames() {
#ifdef WANT_RECORDING_DEBUG
qCDebug(recordingLog) << "Setting timer for next processing " << nextInterval;
#endif
_timer.singleShot(nextInterval, [this] {
processFrames();
});
_timer.singleShot(nextInterval, this, &Deck::processFrames);
}
void Deck::removeClip(const ClipConstPointer& clip) {

View file

@ -303,7 +303,7 @@ AmbientOcclusionEffect::AmbientOcclusionEffect() {
}
void AmbientOcclusionEffect::configure(const Config& config) {
DependencyManager::get<DeferredLightingEffect>()->setAmbientOcclusionEnabled(config.enabled);
DependencyManager::get<DeferredLightingEffect>()->setAmbientOcclusionEnabled(config.isEnabled());
bool shouldUpdateBlurs = false;
bool shouldUpdateTechnique = false;

View file

@ -78,7 +78,6 @@ using AmbientOcclusionFramebufferPointer = std::shared_ptr<AmbientOcclusionFrame
class AmbientOcclusionEffectConfig : public render::GPUJobConfig::Persistent {
Q_OBJECT
Q_PROPERTY(bool enabled MEMBER enabled NOTIFY dirty)
Q_PROPERTY(bool horizonBased MEMBER horizonBased NOTIFY dirty)
Q_PROPERTY(bool ditheringEnabled MEMBER ditheringEnabled NOTIFY dirty)
Q_PROPERTY(bool borderingEnabled MEMBER borderingEnabled NOTIFY dirty)

View file

@ -282,7 +282,7 @@ void BloomEffect::configure(const Config& config) {
for (auto i = 0; i < BLOOM_BLUR_LEVEL_COUNT; i++) {
blurName.back() = '0' + i;
auto blurConfig = config.getConfig<render::BlurGaussian>(blurName);
blurConfig->setProperty("filterScale", 1.0f);
blurConfig->filterScale = 1.0f;
}
}

View file

@ -25,7 +25,6 @@
class DebugDeferredBufferConfig : public render::Job::Config {
Q_OBJECT
Q_PROPERTY(bool enabled MEMBER enabled)
Q_PROPERTY(int mode MEMBER mode WRITE setMode)
Q_PROPERTY(glm::vec4 size MEMBER size NOTIFY dirty)
public:

View file

@ -76,15 +76,10 @@ DeferredFragment unpackDeferredFragmentNoPosition(vec2 texcoord) {
// Diffuse color and unpack the mode and the metallicness
frag.albedo = diffuseVal.xyz;
frag.scattering = 0.0;
unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);
frag.obscurance = min(specularVal.w, frag.obscurance);
if (frag.mode == FRAG_MODE_SCATTERING) {
frag.scattering = specularVal.x;
}
frag.scattering = float(frag.mode == FRAG_MODE_SCATTERING) * specularVal.x;
frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);
return frag;
@ -122,14 +117,11 @@ DeferredFragment unpackDeferredFragmentNoPositionNoAmbient(vec2 texcoord) {
<$declareDeferredFrameTransform()$>
vec4 unpackDeferredPosition(float depthValue, vec2 texcoord) {
int side = 0;
if (isStereo()) {
if (texcoord.x > 0.5) {
texcoord.x -= 0.5;
side = 1;
}
texcoord.x *= 2.0;
}
float check = float(isStereo());
float check2 = check * float(texcoord.x > 0.5);
texcoord.x -= check2 * 0.5;
int side = int(check2);
texcoord.x *= 1.0 + check;
return vec4(evalEyePositionFromZdb(side, depthValue, texcoord), 1.0);
}
@ -142,19 +134,17 @@ vec4 unpackDeferredPositionFromZdb(vec2 texcoord) {
vec4 unpackDeferredPositionFromZeye(vec2 texcoord) {
float Zeye = -texture(linearZeyeMap, texcoord).x;
int side = 0;
if (isStereo()) {
if (texcoord.x > 0.5) {
texcoord.x -= 0.5;
side = 1;
}
texcoord.x *= 2.0;
}
float check = float(isStereo());
float check2 = check * float(texcoord.x > 0.5);
texcoord.x -= check2 * 0.5;
int side = int(check2);
texcoord.x *= 1.0 + check;
return vec4(evalEyePositionFromZeye(side, Zeye, texcoord), 1.0);
}
DeferredFragment unpackDeferredFragment(DeferredFrameTransform deferredTransform, vec2 texcoord) {
float depthValue = texture(depthMap, texcoord).r;
DeferredFragment frag = unpackDeferredFragmentNoPosition(texcoord);

View file

@ -32,9 +32,11 @@ void packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness
if (alpha != 1.0) {
discard;
}
_fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));
float check = float(scattering > 0.0);
_fragColor0 = vec4(albedo, mix(packShadedMetallic(metallic), packScatteringMetallic(metallic), check));
_fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));
_fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);
_fragColor2 = vec4(mix(emissive, vec3(scattering), check), occlusion);
_fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);
}

View file

@ -606,11 +606,16 @@ void RenderDeferredCleanup::run(const render::RenderContextPointer& renderContex
}
}
RenderDeferred::RenderDeferred(bool renderShadows):
_renderShadows(renderShadows)
{
DependencyManager::get<DeferredLightingEffect>()->init();
}
void RenderDeferred::configure(const Config& config) {
}
void RenderDeferred::run(const RenderContextPointer& renderContext, const Inputs& inputs) {
PROFILE_RANGE(render, "DeferredLighting");
auto deferredTransform = inputs.get0();
auto deferredFramebuffer = inputs.get1();

View file

@ -189,8 +189,7 @@ public:
using Config = RenderDeferredConfig;
using JobModel = render::Job::ModelI<RenderDeferred, Inputs, Config>;
RenderDeferred() {}
RenderDeferred(bool renderShadows) : _renderShadows(renderShadows) {}
RenderDeferred(bool renderShadows = false);
void configure(const Config& config);

View file

@ -116,6 +116,7 @@ bool isStereo() {
float getStereoSideWidth(int resolutionLevel) {
return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);
}
float getStereoSideHeight(int resolutionLevel) {
return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);
}
@ -125,7 +126,7 @@ vec2 getStereoSideSize(int resolutionLevel) {
}
ivec4 getStereoSideInfoFromWidth(int xPos, int sideWidth) {
return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());
return ivec4((1 - int(xPos < sideWidth)) * ivec2(1, sideWidth), sideWidth, isStereo());
}
ivec4 getStereoSideInfo(int xPos, int resolutionLevel) {

View file

@ -85,10 +85,8 @@ float evalFadeGradient(FadeObjectParams params, vec3 position) {
}
float evalFadeAlpha(FadeObjectParams params, vec3 position) {
float alpha = evalFadeGradient(params, position)-params.threshold;
if (fadeParameters[params.category]._isInverted != 0) {
alpha = -alpha;
}
float alpha = evalFadeGradient(params, position) - params.threshold;
alpha *= 1.0 - 2.0 * float(fadeParameters[params.category]._isInverted);
return alpha;
}
@ -166,4 +164,4 @@ layout(location=RENDER_UTILS_ATTR_FADE3) out vec4 _fadeData3;
_fadeData3 = inTexCoord4;
<@endfunc@>
<@endif@>
<@endif@>

View file

@ -25,14 +25,13 @@ LAYOUT(binding=RENDER_UTILS_TEXTURE_HAZE_LINEAR_DEPTH) uniform sampler2D linearD
vec4 unpackPositionFromZeye(vec2 texcoord) {
float Zeye = -texture(linearDepthMap, texcoord).x;
int side = 0;
if (isStereo()) {
if (texcoord.x > 0.5) {
texcoord.x -= 0.5;
side = 1;
}
texcoord.x *= 2.0;
}
float check = float(isStereo());
float check2 = check * float(texcoord.x > 0.5);
texcoord.x -= check2 * 0.5;
int side = int(check2);
texcoord.x *= 1.0 + check;
return vec4(evalEyePositionFromZeye(side, Zeye, texcoord), 1.0);
}

View file

@ -63,20 +63,17 @@ vec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3
// Height at which haze density is reduced by 95% (default set to 2000.0 for safety ,this should never happen)
float height_95p = 2000.0;
const float log_p_005 = log(0.05);
if (hazeParams.hazeKeyLightAltitudeFactor > 0.0f) {
height_95p = -log_p_005 / hazeParams.hazeKeyLightAltitudeFactor;
}
// Note that we need the sine to be positive
float sin_pitch = abs(lightDirectionWS.y);
float distance;
const float minimumSinPitch = 0.001;
if (sin_pitch < minimumSinPitch) {
distance = height_95p / minimumSinPitch;
} else {
distance = height_95p / sin_pitch;
}
float sin_pitch = abs(lightDirectionWS.y);
sin_pitch = max(sin_pitch, minimumSinPitch);
float distance = height_95p / sin_pitch;
// Integration is from the fragment towards the light source
// Note that the haze base reference affects only the haze density as function of altitude
@ -128,6 +125,7 @@ vec4 computeHazeColor(vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePosition
}
vec4 potentialFragColor;
const float EPSILON = 0.0000001f;
if ((hazeParams.hazeMode & HAZE_MODE_IS_MODULATE_COLOR) == HAZE_MODE_IS_MODULATE_COLOR) {
// Compute separately for each colour
@ -143,9 +141,9 @@ vec4 computeHazeColor(vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePosition
const float slopeThreshold = 0.01;
float deltaHeight = fragPositionWS.y - eyeWorldHeight;
if (abs(deltaHeight) > slopeThreshold) {
float t = hazeParams.hazeHeightFactor * deltaHeight;
hazeIntegral *= (1.0 - exp (-t)) / t;
float t = hazeParams.hazeHeightFactor * deltaHeight;
if (abs(t) > EPSILON) {
hazeIntegral *= mix(1.0, (1.0 - exp(-t)) / t, float(abs(deltaHeight) > slopeThreshold));
}
vec3 hazeAmount = 1.0 - exp(-hazeIntegral);
@ -171,13 +169,9 @@ vec4 computeHazeColor(vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePosition
const float slopeThreshold = 0.01;
float deltaHeight = fragPositionWS.y - eyeWorldHeight;
if (abs(deltaHeight) > slopeThreshold) {
float t = hazeParams.hazeHeightFactor * deltaHeight;
// Protect from wild values
const float EPSILON = 0.0000001f;
if (abs(t) > EPSILON) {
hazeIntegral *= (1.0 - exp (-t)) / t;
}
float t = hazeParams.hazeHeightFactor * deltaHeight;
if (abs(t) > EPSILON) {
hazeIntegral *= mix(1.0, (1.0 - exp(-t)) / t, float(abs(deltaHeight) > slopeThreshold));
}
float hazeAmount = 1.0 - exp(-hazeIntegral);
@ -189,9 +183,7 @@ vec4 computeHazeColor(vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePosition
// Mix with background at far range
const float BLEND_DISTANCE = 27000.0f;
vec4 outFragColor = potentialFragColor;
if (distance > BLEND_DISTANCE) {
outFragColor.a *= hazeParams.backgroundBlend;
}
outFragColor.a *= mix(1.0, hazeParams.backgroundBlend, float(distance > BLEND_DISTANCE));
return outFragColor;
}

View file

@ -45,11 +45,9 @@ void main(void) {
highlightedDepth = -evalZeyeFromZdb(highlightedDepth);
sceneDepth = -evalZeyeFromZdb(sceneDepth);
if (sceneDepth < highlightedDepth) {
outFragColor = vec4(params._fillOccludedColor, params._fillOccludedAlpha);
} else {
outFragColor = vec4(params._fillUnoccludedColor, params._fillUnoccludedAlpha);
}
outFragColor = mix(vec4(params._fillUnoccludedColor, params._fillUnoccludedAlpha),
vec4(params._fillOccludedColor, params._fillOccludedAlpha),
float(sceneDepth < highlightedDepth));
<@else@>
discard;
<@endif@>
@ -67,14 +65,13 @@ void main(void) {
float outlinedDepth = 0.0;
float sumOutlineDepth = 0.0;
for (y=0 ; y<params._blurKernelSize ; y++) {
for (y = 0; y < params._blurKernelSize; y++) {
uv = lineStartUv;
lineStartUv.y += deltaUv.y;
if (uv.y>=0.0 && uv.y<=1.0) {
for (x=0 ; x<params._blurKernelSize ; x++) {
if (uv.x>=0.0 && uv.x<=1.0)
{
if (uv.y >= 0.0 && uv.y <= 1.0) {
for (x = 0; x < params._blurKernelSize; x++) {
if (uv.x >= 0.0 && uv.x <= 1.0) {
outlinedDepth = texture(highlightedDepthMap, uv).x;
float touch = (outlinedDepth < FAR_Z) ? 1.0 : 0.0;
sumOutlineDepth = max(outlinedDepth * touch, sumOutlineDepth);
@ -86,11 +83,7 @@ void main(void) {
}
}
if (intensity > 0.0) {
// sumOutlineDepth /= intensity;
} else {
sumOutlineDepth = FAR_Z;
}
sumOutlineDepth = mix(FAR_Z, sumOutlineDepth, float(intensity > 0.0));
intensity /= weight;
if (intensity < OPACITY_EPSILON) {
@ -106,11 +99,9 @@ void main(void) {
sceneDepth = -evalZeyeFromZdb(sceneDepth);
// Are we occluded?
if (sceneDepth < outlinedDepth) {
outFragColor = vec4(params._outlineOccludedColor, intensity * params._outlineOccludedAlpha);
} else {
outFragColor = vec4(params._outlineUnoccludedColor, intensity * params._outlineUnoccludedAlpha);
}
outFragColor = mix(vec4(params._outlineUnoccludedColor, intensity * params._outlineUnoccludedAlpha),
vec4(params._outlineOccludedColor, intensity * params._outlineOccludedAlpha),
float(sceneDepth < outlinedDepth));
}
}

View file

@ -83,7 +83,7 @@ int clusterGrid_getClusterLightId(int index, int offset) {
return element;
*/
int element = _clusterGridContent[GRID_FETCH_BUFFER((elementIndex >> 1))];
return (((elementIndex & 0x00000001) == 1) ? (element >> 16) : element) & 0x0000FFFF;
return (element >> (16 * int((elementIndex & 0x00000001) == 1))) & 0x0000FFFF;
}

View file

@ -5,6 +5,14 @@
#define float_exp2 exp2
#endif
#ifdef __cplusplus
# define _MIN glm::min
# define _ABS(x) (int)fabsf(x)
#else
# define _MIN min
# define _ABS abs
#endif
float frustumGrid_depthRampGridToVolume(float ngrid) {
// return ngrid;
// return sqrt(ngrid);
@ -87,14 +95,9 @@ ivec3 frustumGrid_indexToCluster(int index) {
}
vec3 frustumGrid_clusterPosToEye(vec3 clusterPos) {
vec3 cvpos = clusterPos;
vec3 volumePos = frustumGrid_gridToVolume(cvpos, frustumGrid.dims);
vec3 eyePos = frustumGrid_volumeToEye(volumePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);
return eyePos;
}
@ -116,27 +119,19 @@ int frustumGrid_eyeDepthToClusterLayer(float eyeZ) {
int gridZ = int(frustumGrid_volumeToGridDepth(volumeZ, frustumGrid.dims));
if (gridZ >= frustumGrid.dims.z) {
gridZ = frustumGrid.dims.z;
}
return gridZ;
return _MIN(gridZ, frustumGrid.dims.z);
}
ivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {
// make sure the frontEyePos is always in the front to eval the grid pos correctly
vec3 frontEyePos = eyePos;
frontEyePos.z = (eyePos.z > 0.0f ? -eyePos.z : eyePos.z);
frontEyePos.z = -_ABS(eyePos.z);
vec3 volumePos = frustumGrid_eyeToVolume(frontEyePos, frustumGrid.eyeToGridProj, frustumGrid.rangeNear, frustumGrid.rangeFar);
vec3 gridPos = frustumGrid_volumeToGrid(volumePos, frustumGrid.dims);
if (gridPos.z >= float(frustumGrid.dims.z)) {
gridPos.z = float(frustumGrid.dims.z);
}
gridPos.z = _MIN(gridPos.z, float(frustumGrid.dims.z));
ivec3 igridPos = ivec3(floor(gridPos));
@ -154,7 +149,7 @@ ivec3 frustumGrid_eyeToClusterPos(vec3 eyePos) {
int frustumGrid_eyeToClusterDirH(vec3 eyeDir) {
if (eyeDir.z >= 0.0f) {
return (eyeDir.x > 0.0f ? frustumGrid.dims.x : -1);
return int(eyeDir.x > 0.0f) * (frustumGrid.dims.x + 1) - 1;
}
float eyeDepth = -eyeDir.z;
@ -168,7 +163,7 @@ int frustumGrid_eyeToClusterDirH(vec3 eyeDir) {
int frustumGrid_eyeToClusterDirV(vec3 eyeDir) {
if (eyeDir.z >= 0.0f) {
return (eyeDir.y > 0.0f ? frustumGrid.dims.y : -1);
return int(eyeDir.y > 0.0f) * (frustumGrid.dims.y + 1) - 1;
}
float eyeDepth = -eyeDir.z;
@ -196,4 +191,4 @@ vec4 frustumGrid_worldToEye(vec4 worldPos) {
// <@if 1@>
// Trigger Scribe include
// <@endif@> <!def that !> End C++ compatible
// <@endif@> <!def that !> End C++ compatible

View file

@ -41,12 +41,10 @@ void evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,
specular *= lightEnergy * isSpecularEnabled();
if (isShowLightContour() > 0.0) {
// Show edge
// Show edges
float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);
if (edge < 1.0) {
float edgeCoord = exp2(-8.0*edge*edge);
diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));
}
float edgeCoord = exp2(-8.0 * edge * edge);
diffuse = mix(diffuse, vec3(edgeCoord * edgeCoord * getLightColor(light)), float(edge < 1.0));
}
}
@ -59,13 +57,11 @@ bool evalLightPointEdge(out vec3 color, Light light, vec4 fragLightDirLen, vec3
// Allright we re valid in the volume
float fragLightDistance = fragLightDirLen.w;
vec3 fragLightDir = fragLightDirLen.xyz;
// Show edges
float edge = abs(2.0 * ((lightVolume_getRadius(light.volume) - fragLightDistance) / (0.1)) - 1.0);
if (edge < 1.0) {
float edgeCoord = exp2(-8.0*edge*edge);
color = vec3(edgeCoord * edgeCoord * getLightColor(light));
}
float edgeCoord = exp2(-8.0 * edge * edge);
color = mix(color, vec3(edgeCoord * edgeCoord * getLightColor(light)), float(edge < 1.0));
return (edge < 1.0);
}

View file

@ -46,11 +46,9 @@ void evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,
float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);
float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));
float edgeDist = min(edgeDistR, edgeDistS);
float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);
if (edge < 1.0) {
float edgeCoord = exp2(-8.0*edge*edge);
diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));
}
float edge = abs(2.0 * (edgeDist * 10.0) - 1.0);
float edgeCoord = exp2(-8.0 * edge * edge);
diffuse = mix(diffuse, vec3(edgeCoord * edgeCoord * getLightColor(light)), float(edge < 1.0));
}
}
@ -67,15 +65,11 @@ bool evalLightSpotEdge(out vec3 color, Light light, vec4 fragLightDirLen, float
float edgeDistR = (lightVolume_getRadius(light.volume) - fragLightDistance);
float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -lightVolume_getSpotOutsideNormal2(light.volume));
float edgeDist = min(edgeDistR, edgeDistS);
float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);
if (edge < 1.0) {
float edgeCoord = exp2(-8.0*edge*edge);
color = vec3(edgeCoord * edgeCoord * getLightColor(light));
}
float edge = abs(2.0 * (edgeDist * 10.0) - 1.0);
float edgeCoord = exp2(-8.0 * edge * edge);
color = mix(color, vec3(edgeCoord * edgeCoord * getLightColor(light)), float(edge < 1.0));
return (edge < 1.0);
}
<@endfunc@>

View file

@ -444,10 +444,12 @@ LightStageSetup::LightStageSetup() {
}
void LightStageSetup::run(const render::RenderContextPointer& renderContext) {
auto stage = renderContext->_scene->getStage(LightStage::getName());
if (!stage) {
stage = std::make_shared<LightStage>();
renderContext->_scene->resetStage(LightStage::getName(), stage);
if (renderContext->_scene) {
auto stage = renderContext->_scene->getStage(LightStage::getName());
if (!stage) {
stage = std::make_shared<LightStage>();
renderContext->_scene->resetStage(LightStage::getName(), stage);
}
}
}

View file

@ -38,7 +38,7 @@ using namespace render;
extern void initZPassPipelines(ShapePlumber& plumber, gpu::StatePointer state, const render::ShapePipeline::BatchSetter& batchSetter, const render::ShapePipeline::ItemSetter& itemSetter);
void RenderShadowTask::configure(const Config& configuration) {
DependencyManager::get<DeferredLightingEffect>()->setShadowMapEnabled(configuration.enabled);
DependencyManager::get<DeferredLightingEffect>()->setShadowMapEnabled(configuration.isEnabled());
// This is a task, so must still propogate configure() to its Jobs
// Task::configure(configuration);
}

View file

@ -38,7 +38,6 @@ protected:
class RenderShadowTaskConfig : public render::Task::Config::Persistent {
Q_OBJECT
Q_PROPERTY(bool enabled MEMBER enabled NOTIFY dirty)
public:
RenderShadowTaskConfig() : render::Task::Config::Persistent(QStringList() << "Render" << "Engine" << "Shadows", true) {}

View file

@ -115,18 +115,10 @@ float evalShadowAttenuation(vec3 worldLightDir, vec4 worldPosition, float viewDe
isPixelOnCascade.z = isShadowCascadeProjectedOnPixel(cascadeShadowCoords[2]);
isPixelOnCascade.w = isShadowCascadeProjectedOnPixel(cascadeShadowCoords[3]);
if (isPixelOnCascade.x) {
cascadeAttenuations.x = evalShadowCascadeAttenuation(0, offsets, cascadeShadowCoords[0], oneMinusNdotL);
}
if (isPixelOnCascade.y) {
cascadeAttenuations.y = evalShadowCascadeAttenuation(1, offsets, cascadeShadowCoords[1], oneMinusNdotL);
}
if (isPixelOnCascade.z) {
cascadeAttenuations.z = evalShadowCascadeAttenuation(2, offsets, cascadeShadowCoords[2], oneMinusNdotL);
}
if (isPixelOnCascade.w) {
cascadeAttenuations.w = evalShadowCascadeAttenuation(3, offsets, cascadeShadowCoords[3], oneMinusNdotL);
}
cascadeAttenuations.x = mix(1.0, evalShadowCascadeAttenuation(0, offsets, cascadeShadowCoords[0], oneMinusNdotL), float(isPixelOnCascade.x));
cascadeAttenuations.y = mix(1.0, evalShadowCascadeAttenuation(1, offsets, cascadeShadowCoords[1], oneMinusNdotL), float(isPixelOnCascade.y));
cascadeAttenuations.z = mix(1.0, evalShadowCascadeAttenuation(2, offsets, cascadeShadowCoords[2], oneMinusNdotL), float(isPixelOnCascade.z));
cascadeAttenuations.w = mix(1.0, evalShadowCascadeAttenuation(3, offsets, cascadeShadowCoords[3], oneMinusNdotL), float(isPixelOnCascade.w));
cascadeWeights.x = evalShadowCascadeWeight(cascadeShadowCoords[0]);
cascadeWeights.y = evalShadowCascadeWeight(cascadeShadowCoords[1]);

View file

@ -81,9 +81,7 @@ void evalSkinning(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPositio
// to ensure that we rotate along the shortest arc, reverse dual quaternions with negative polarity.
float dqClusterWeight = clusterWeight;
if (dot(real, polarityReference) < 0.0) {
dqClusterWeight = -clusterWeight;
}
dqClusterWeight *= mix(1.0, -1.0, float(dot(real, polarityReference) < 0.0));
sAccum += scale * clusterWeight;
rAccum += real * dqClusterWeight;
@ -102,12 +100,11 @@ void evalSkinning(ivec4 skinClusterIndex, vec4 skinClusterWeight, vec4 inPositio
// sAccum.w indicates the amount of cauterization for this vertex.
// 0 indicates no cauterization and 1 indicates full cauterization.
// TODO: make this cauterization smoother or implement full dual-quaternion scale support.
const float CAUTERIZATION_THRESHOLD = 0.1;
if (sAccum.w > CAUTERIZATION_THRESHOLD) {
skinnedPosition = cAccum;
} else {
sAccum.w = 1.0;
skinnedPosition = m * (sAccum * inPosition);
{
const float CAUTERIZATION_THRESHOLD = 0.1;
float check = float(sAccum.w > CAUTERIZATION_THRESHOLD);
sAccum.w = mix(1.0, sAccum.w, check);
skinnedPosition = mix(m * (sAccum * inPosition), cAccum, check);
}
<@if USE_NORMAL@>

View file

@ -82,8 +82,6 @@ vec3 integrate(float cosTheta, float skinRadius) {
while (a <= (_PI)) {
float sampleAngle = theta + a;
float diffuse = clamp(cos(sampleAngle), 0.0, 1.0);
//if (diffuse < 0.0) diffuse = 0.0;
//if (diffuse > 1.0) diffuse = 1.0;
// Distance.
float sampleDist = abs(2.0 * skinRadius * sin(a * 0.5));
@ -180,7 +178,8 @@ vec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, fl
return lowNormal * 0.5 + vec3(0.5);
}
if (showCurvature()) {
return (curvature > 0.0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));
float check = float(curvature > 0.0);
return vec3(check * curvature, 0.0, (1.0 - check) * -curvature);
}
vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);

View file

@ -64,7 +64,6 @@ private:
class ToneMappingConfig : public render::Job::Config {
Q_OBJECT
Q_PROPERTY(bool enabled MEMBER enabled)
Q_PROPERTY(float exposure MEMBER exposure WRITE setExposure);
Q_PROPERTY(int curve MEMBER curve WRITE setCurve);
public:

View file

@ -37,9 +37,7 @@ void main(void) {
#ifdef GPU_TRANSFORM_IS_STEREO
#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN
#else
if (cam_isStereo()) {
projected.x = 0.5 * (projected.x + cam_getStereoSide());
}
projected.x = mix(projected.x, 0.5 * (projected.x + cam_getStereoSide()), float(cam_isStereo()));
#endif
#endif
_texCoord01 = vec4(projected.xy, 0.0, 1.0) * gl_Position.w;
@ -66,9 +64,7 @@ void main(void) {
#ifdef GPU_TRANSFORM_IS_STEREO
#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN
#else
if (cam_isStereo()) {
_texCoord01.x = 0.5 * (_texCoord01.x + cam_getStereoSide());
}
_texCoord01.x = mix(_texCoord01.x, 0.5 * (_texCoord01.x + cam_getStereoSide()), float(cam_isStereo()));
#endif
#endif

View file

@ -47,8 +47,6 @@ void main(void) {
vec4 projected = gl_Position / gl_Position.w;
projected.xy = (projected.xy + 1.0) * 0.5;
if (cam_isStereo()) {
projected.x = 0.5 * (projected.x + cam_getStereoSide());
}
projected.x = mix(projected.x, 0.5 * (projected.x + cam_getStereoSide()), float(cam_isStereo()));
_texCoord0 = vec4(projected.xy, 0.0, 1.0) * gl_Position.w;
}

View file

@ -60,9 +60,7 @@ void main(void) {
#ifdef GPU_TRANSFORM_IS_STEREO
#ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN
#else
if (cam_isStereo()) {
projected.x = 0.5 * (projected.x + cam_getStereoSide());
}
projected.x = mix(projected.x, 0.5 * (projected.x + cam_getStereoSide()), float(cam_isStereo()));
#endif
#endif
_texCoord0 = vec4(projected.xy, 0.0, 1.0) * gl_Position.w;

View file

@ -33,16 +33,15 @@ void main(void) {
float shadowAttenuation = 1.0;
if (frag.mode == FRAG_MODE_UNLIT) {
discard;
} else if (frag.mode == FRAG_MODE_LIGHTMAPPED) {
if (frag.mode == FRAG_MODE_UNLIT || frag.mode == FRAG_MODE_LIGHTMAPPED) {
discard;
} else {
vec4 midNormalCurvature = vec4(0);
vec4 lowNormalCurvature = vec4(0);
if (frag.mode == FRAG_MODE_SCATTERING) {
unpackMidLowNormalCurvature(_texCoord0, midNormalCurvature, lowNormalCurvature);
}
unpackMidLowNormalCurvature(_texCoord0, midNormalCurvature, lowNormalCurvature);
float check = float(frag.mode == FRAG_MODE_SCATTERING);
midNormalCurvature = check * midNormalCurvature;
lowNormalCurvature = check * lowNormalCurvature;
vec3 color = evalAmbientSphereGlobalColor(
getViewInverse(),

View file

@ -35,16 +35,16 @@ void main(void) {
vec3 worldLightDirection = getLightDirection(shadowLight);
float shadowAttenuation = evalShadowAttenuation(worldLightDirection, worldPos, -viewPos.z, frag.normal);
if (frag.mode == FRAG_MODE_UNLIT) {
discard;
} else if (frag.mode == FRAG_MODE_LIGHTMAPPED) {
if (frag.mode == FRAG_MODE_UNLIT || frag.mode == FRAG_MODE_LIGHTMAPPED) {
discard;
} else {
vec4 midNormalCurvature = vec4(0);
vec4 lowNormalCurvature = vec4(0);
if (frag.mode == FRAG_MODE_SCATTERING) {
unpackMidLowNormalCurvature(_texCoord0, midNormalCurvature, lowNormalCurvature);
}
unpackMidLowNormalCurvature(_texCoord0, midNormalCurvature, lowNormalCurvature);
float check = float(frag.mode == FRAG_MODE_SCATTERING);
midNormalCurvature = check * midNormalCurvature;
lowNormalCurvature = check * lowNormalCurvature;
vec3 color = evalAmbientSphereGlobalColor(
getViewInverse(),
shadowAttenuation,

View file

@ -31,16 +31,16 @@ void main(void) {
float shadowAttenuation = 1.0;
// Light mapped or not ?
if (frag.mode == FRAG_MODE_UNLIT) {
discard;
} else if (frag.mode == FRAG_MODE_LIGHTMAPPED) {
if (frag.mode == FRAG_MODE_UNLIT || frag.mode == FRAG_MODE_LIGHTMAPPED) {
discard;
} else {
vec4 midNormalCurvature = vec4(0);
vec4 lowNormalCurvature = vec4(0);
if (frag.mode == FRAG_MODE_SCATTERING) {
unpackMidLowNormalCurvature(_texCoord0, midNormalCurvature, lowNormalCurvature);
}
unpackMidLowNormalCurvature(_texCoord0, midNormalCurvature, lowNormalCurvature);
float check = float(frag.mode == FRAG_MODE_SCATTERING);
midNormalCurvature = check * midNormalCurvature;
lowNormalCurvature = check * lowNormalCurvature;
vec3 color = evalSkyboxGlobalColor(
getViewInverse(),
shadowAttenuation,

View file

@ -36,16 +36,16 @@ void main(void) {
float shadowAttenuation = evalShadowAttenuation(worldLightDirection, worldPos, -viewPos.z, frag.normal);
// Light mapped or not ?
if (frag.mode == FRAG_MODE_UNLIT) {
discard;
} else if (frag.mode == FRAG_MODE_LIGHTMAPPED) {
if (frag.mode == FRAG_MODE_UNLIT || frag.mode == FRAG_MODE_LIGHTMAPPED) {
discard;
} else {
vec4 midNormalCurvature = vec4(0);
vec4 lowNormalCurvature = vec4(0);
if (frag.mode == FRAG_MODE_SCATTERING) {
unpackMidLowNormalCurvature(_texCoord0, midNormalCurvature, lowNormalCurvature);
}
unpackMidLowNormalCurvature(_texCoord0, midNormalCurvature, lowNormalCurvature);
float check = float(frag.mode == FRAG_MODE_SCATTERING);
midNormalCurvature = check * midNormalCurvature;
lowNormalCurvature = check * lowNormalCurvature;
vec3 color = evalSkyboxGlobalColor(
getViewInverse(),
shadowAttenuation,

View file

@ -62,7 +62,5 @@ void main(void) {
varColor = vec4(REGION_COLOR[region].xyz, proxy.sphere.w);
if (region == 4) {
gl_Position = vec4(0.0);
}
gl_Position = mix(gl_Position, vec4(0.0), float(region == 4));
}

View file

@ -53,14 +53,9 @@ void main(void) {
// Add or subtract the orthogonal vector based on a different vertex ID
// calculation
if (gl_VertexID < 2) {
distanceFromCenter = -1.0;
eye.xyz -= orthogonal;
} else {
distanceFromCenter = 1.0;
eye.xyz += orthogonal;
}
distanceFromCenter = 1.0 - 2.0 * float(gl_VertexID < 2);
eye.xyz += distanceFromCenter * orthogonal;
// Finally, put the eyespace vertex into clip space
<$transformEyeToClipPos(cam, eye, gl_Position)$>
}
}

View file

@ -34,12 +34,10 @@ layout(location=0) out vec4 outFragColor;
void main(void) {
Grid grid = getGrid();
float alpha;
if (grid.edge.z == 0.0) {
alpha = paintGrid(varTexCoord0, grid.offset.xy, grid.period.xy, grid.edge.xy);
} else {
alpha = paintGridMajorMinor(varTexCoord0, grid.offset, grid.period, grid.edge);
}
float alpha = mix(paintGridMajorMinor(varTexCoord0, grid.offset, grid.period, grid.edge),
paintGrid(varTexCoord0, grid.offset.xy, grid.period.xy, grid.edge.xy),
float(grid.edge.z == 0.0));
if (alpha == 0.0) {
discard;
}

View file

@ -90,6 +90,6 @@ void main(void) {
numLightTouching++;
}
_fragColor = vec4(colorRamp(1.0 - (float(numLightTouching) / 12.0f)), (numLightTouching > 0 ? 0.5 + 0.5 * numLightsScale : 0.0));
_fragColor = vec4(colorRamp(1.0 - (float(numLightTouching) / 12.0f)), float(numLightTouching > 0) * (0.5 + 0.5 * numLightsScale));
}

View file

@ -69,5 +69,5 @@ void main(void) {
TransformCamera cam = getTransformCamera();
<$transformWorldToClipPos(cam, worldPos, gl_Position)$>
varColor = vec4(colorWheel(fract(float(gpu_InstanceID()) / float(frustumGrid_numClusters()))), (numLights >0 ? 0.9 : 0.1));
varColor = vec4(colorWheel(fract(float(gpu_InstanceID()) / float(frustumGrid_numClusters()))), 0.1 + 0.8 * float(numLights > 0));
}

View file

@ -62,12 +62,10 @@ void main(void) {
float relClusterId = float(clusterPos.z * summedDims.x + clusterPos.y * summedDims.y + clusterPos.x) / float(frustumGrid_numClusters());
if (relClusterId < 0.0) {
_fragColor = vec4(0.0);
} else if (relClusterId >= 1.0) {
_fragColor = vec4(vec3(1.0), 0.2);
} else {
_fragColor = vec4(colorWheel(fract(relClusterId)), (numLights > 0 ? 0.05 + 0.95 * numLightsScale : 0.0));
}
_fragColor = mix(mix(vec4(colorWheel(fract(relClusterId)), float(numLights > 0) * (0.05 + 0.95 * numLightsScale)),
vec4(vec3(1.0), 0.2),
float(relClusterId >= 1.0)),
vec4(0.0),
float(relClusterId < 0.0));
}

View file

@ -62,5 +62,5 @@ void main(void) {
TransformCamera cam = getTransformCamera();
<$transformWorldToClipPos(cam, worldPos, gl_Position)$>
varColor = vec4(colorWheel(fract(float(gpu_InstanceID()) / float(frustumGrid_numClusters()))), 0.9);
varColor = vec4(colorWheel(fract(float(gpu_InstanceID()) / float(frustumGrid_numClusters()))), 0.9);
}

View file

@ -69,5 +69,5 @@ void main(void) {
TransformCamera cam = getTransformCamera();
<$transformWorldToClipPos(cam, worldPos, gl_Position)$>
varColor = vec4(colorWheel(fract(float(gpu_InstanceID()) / float(frustumGrid_numClusters()))), (numLights > 0 ? 0.9 : 0.0));
varColor = vec4(colorWheel(fract(float(gpu_InstanceID()) / float(frustumGrid_numClusters()))), float(numLights > 0) * 0.9);
}

View file

@ -96,9 +96,8 @@ void main(void) {
vec3 fragLightDir = fragLightDirLen.xyz;
vec3 color = vec3(0.0);
if (evalLightPointEdge(color, light, fragLightDirLen, fragEyeDir)) {
_fragColor.rgb += color;
}
float check = float(evalLightPointEdge(color, light, fragLightDirLen, fragEyeDir));
_fragColor.rgb += check * color;
}
for (int i = cluster.x; i < numLights; i++) {
@ -130,10 +129,8 @@ void main(void) {
numLightTouching++;
vec3 color = vec3(0.0);
if (evalLightSpotEdge(color, light, fragLightDirLen, cosSpotAngle, fragEyeDir)) {
_fragColor.rgb += color;
}
float check = float(evalLightSpotEdge(color, light, fragLightDirLen, cosSpotAngle, fragEyeDir));
_fragColor.rgb += check * color;
}
}

View file

@ -48,11 +48,8 @@ void main(void) {
} else {
normal = vec4(normalize(cross(_parabolaData.velocity, _parabolaData.acceleration)), 0);
}
if (gl_VertexID % 2 == 0) {
pos += 0.5 * _parabolaData.width * normal;
} else {
pos -= 0.5 * _parabolaData.width * normal;
}
pos += 0.5 * _parabolaData.width * normal * (-1.0 + 2.0 * float(gl_VertexID % 2 == 0));
<$transformModelToClipPos(cam, obj, pos, gl_Position)$>
}
}

View file

@ -39,13 +39,8 @@ const float taaBias = pow(2.0, TAA_TEXTURE_LOD_BIAS);
float evalSDF(vec2 texCoord) {
// retrieve signed distance
float sdf = textureLod(Font, texCoord, TAA_TEXTURE_LOD_BIAS).g;
if (params.outline.x > 0.0) {
if (sdf > interiorCutoff) {
sdf = 1.0 - sdf;
} else {
sdf += outlineExpansion;
}
}
sdf = mix(sdf, mix(sdf + outlineExpansion, 1.0 - sdf, float(sdf > interiorCutoff)), float(params.outline.x > 0.0));
// Rely on TAA for anti-aliasing
return step(0.5, sdf);
}
@ -57,16 +52,11 @@ void main() {
// Perform 4x supersampling for anisotropic filtering
float a;
a = evalSDF(_texCoord0);
a += evalSDF(_texCoord0+dxTexCoord);
a += evalSDF(_texCoord0+dyTexCoord);
a += evalSDF(_texCoord0+dxTexCoord+dyTexCoord);
a += evalSDF(_texCoord0 + dxTexCoord);
a += evalSDF(_texCoord0 + dyTexCoord);
a += evalSDF(_texCoord0 + dxTexCoord + dyTexCoord);
a *= 0.25;
// discard if invisible
if (a < 0.01) {
discard;
}
packDeferredFragment(
normalize(_normalWS),
a * params.color.a,

View file

@ -30,31 +30,32 @@ layout(location=RENDER_UTILS_ATTR_TEXCOORD01) in vec4 _texCoord01;
#define _texCoord0 _texCoord01.xy
#define _texCoord1 _texCoord01.zw
const float gamma = 2.2;
const float smoothing = 32.0;
#define TAA_TEXTURE_LOD_BIAS -3.0
const float interiorCutoff = 0.8;
const float outlineExpansion = 0.2;
const float taaBias = pow(2.0, TAA_TEXTURE_LOD_BIAS);
float evalSDF(vec2 texCoord) {
// retrieve signed distance
float sdf = textureLod(Font, texCoord, TAA_TEXTURE_LOD_BIAS).g;
sdf = mix(sdf, mix(sdf + outlineExpansion, 1.0 - sdf, float(sdf > interiorCutoff)), float(params.outline.x > 0.0));
// Rely on TAA for anti-aliasing
return step(0.5, sdf);
}
void main() {
// retrieve signed distance
float sdf = texture(Font, _texCoord0).g;
if (params.outline.x > 0.0) {
if (sdf > interiorCutoff) {
sdf = 1.0 - sdf;
} else {
sdf += outlineExpansion;
}
}
// perform adaptive anti-aliasing of the edges
// The larger we're rendering, the less anti-aliasing we need
float s = smoothing * length(fwidth(_texCoord0));
float w = clamp(s, 0.0, 0.5);
float a = smoothstep(0.5 - w, 0.5 + w, sdf);
// discard if invisible
if (a < 0.01) {
discard;
}
vec2 dxTexCoord = dFdx(_texCoord0) * 0.5 * taaBias;
vec2 dyTexCoord = dFdy(_texCoord0) * 0.5 * taaBias;
// Perform 4x supersampling for anisotropic filtering
float a;
a = evalSDF(_texCoord0);
a += evalSDF(_texCoord0 + dxTexCoord);
a += evalSDF(_texCoord0 + dyTexCoord);
a += evalSDF(_texCoord0 + dxTexCoord + dyTexCoord);
a *= 0.25;
packDeferredFragmentTranslucent(
normalize(_normalWS),

View file

@ -54,7 +54,7 @@ void main(void) {
vec3 specular = DEFAULT_SPECULAR;
float shininess = DEFAULT_SHININESS;
float emissiveAmount = 0.0;
#ifdef PROCEDURAL
#ifdef PROCEDURAL_V1

View file

@ -12,6 +12,7 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Color.slh@>
<@include DeferredBufferWrite.slh@>
<@include render-utils/ShaderConstants.h@>
@ -28,11 +29,13 @@ layout(location=RENDER_UTILS_ATTR_TEXCOORD01) in vec4 _texCoord01;
void main(void) {
vec4 texel = texture(originalTexture, _texCoord0);
texel = mix(color_sRGBAToLinear(texel), texel, float(_color.a <= 0.0));
texel.rgb *= _color.rgb;
packDeferredFragment(
normalize(_normalWS),
1.0,
_color.rgb * texel.rgb,
texel.rgb,
DEFAULT_ROUGHNESS,
DEFAULT_METALLIC,
DEFAULT_EMISSIVE,

View file

@ -39,26 +39,24 @@ void main(void) {
<$fetchFadeObjectParamsInstanced(fadeParams)$>
applyFade(fadeParams, _positionWS.xyz, fadeEmissive);
vec4 texel = texture(originalTexture, _texCoord0);
float colorAlpha = _color.a;
if (_color.a <= 0.0) {
texel = color_sRGBAToLinear(texel);
colorAlpha = -_color.a;
}
texel = mix(color_sRGBAToLinear(texel), texel, float(_color.a <= 0.0));
texel.rgb *= _color.rgb;
texel.a = abs(_color.a);
const float ALPHA_THRESHOLD = 0.999;
if (colorAlpha * texel.a < ALPHA_THRESHOLD) {
if (texel.a < ALPHA_THRESHOLD) {
packDeferredFragmentTranslucent(
normalize(_normalWS),
colorAlpha * texel.a,
_color.rgb * texel.rgb + fadeEmissive,
texel.a,
texel.rgb + fadeEmissive,
DEFAULT_ROUGHNESS);
} else {
packDeferredFragment(
normalize(_normalWS),
1.0,
_color.rgb * texel.rgb,
texel.rgb,
DEFAULT_ROUGHNESS,
DEFAULT_METALLIC,
DEFAULT_EMISSIVE + fadeEmissive,

View file

@ -28,24 +28,22 @@ layout(location=RENDER_UTILS_ATTR_TEXCOORD01) in vec4 _texCoord01;
#define _texCoord1 _texCoord01.zw
void main(void) {
vec4 texel = texture(originalTexture, _texCoord0.st);
float colorAlpha = _color.a;
if (_color.a <= 0.0) {
texel = color_sRGBAToLinear(texel);
colorAlpha = -_color.a;
}
vec4 texel = texture(originalTexture, _texCoord0);
texel = mix(color_sRGBAToLinear(texel), texel, float(_color.a <= 0.0));
texel.rgb *= _color.rgb;
texel.a = abs(_color.a);
const float ALPHA_THRESHOLD = 0.999;
if (colorAlpha * texel.a < ALPHA_THRESHOLD) {
if (texel.a < ALPHA_THRESHOLD) {
packDeferredFragmentTranslucent(
normalize(_normalWS),
colorAlpha * texel.a,
_color.rgb * texel.rgb,
texel.a,
texel.rgb,
DEFAULT_ROUGHNESS);
} else {
packDeferredFragmentUnlit(
normalize(_normalWS),
1.0,
_color.rgb * texel.rgb);
texel.rgb);
}
}

View file

@ -40,24 +40,22 @@ void main(void) {
<$fetchFadeObjectParamsInstanced(fadeParams)$>
applyFade(fadeParams, _positionWS.xyz, fadeEmissive);
vec4 texel = texture(originalTexture, _texCoord0.st);
float colorAlpha = _color.a;
if (_color.a <= 0.0) {
texel = color_sRGBAToLinear(texel);
colorAlpha = -_color.a;
}
vec4 texel = texture(originalTexture, _texCoord0);
texel = mix(color_sRGBAToLinear(texel), texel, float(_color.a <= 0.0));
texel.rgb *= _color.rgb;
texel.a = abs(_color.a);
const float ALPHA_THRESHOLD = 0.999;
if (colorAlpha * texel.a < ALPHA_THRESHOLD) {
if (texel.a < ALPHA_THRESHOLD) {
packDeferredFragmentTranslucent(
normalize(_normalWS),
colorAlpha * texel.a,
_color.rgb * texel.rgb+fadeEmissive,
texel.a,
texel.rgb + fadeEmissive,
DEFAULT_ROUGHNESS);
} else {
packDeferredFragmentUnlit(
normalize(_normalWS),
1.0,
_color.rgb * texel.rgb+fadeEmissive);
texel.rgb + fadeEmissive);
}
}

View file

@ -12,6 +12,7 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Color.slh@>
<@include DeferredBufferWrite.slh@>
<@include render-utils/ShaderConstants.h@>
@ -28,11 +29,13 @@ layout(location=RENDER_UTILS_ATTR_TEXCOORD01) in vec4 _texCoord01;
void main(void) {
vec4 texel = texture(originalTexture, _texCoord0);
float colorAlpha = _color.a * texel.a;
texel = mix(color_sRGBAToLinear(texel), texel, float(_color.a <= 0.0));
texel.rgb *= _color.rgb;
texel.a *= abs(_color.a);
packDeferredFragmentTranslucent(
normalize(_normalWS),
colorAlpha,
_color.rgb * texel.rgb,
texel.a,
texel.rgb,
DEFAULT_ROUGHNESS);
}

View file

@ -46,14 +46,10 @@ void main(void) {
<$fetchFadeObjectParamsInstanced(fadeParams)$>
applyFade(fadeParams, _positionWS.xyz, fadeEmissive);
vec4 texel = texture(originalTexture, _texCoord0.st);
float opacity = _color.a;
if (_color.a <= 0.0) {
texel = color_sRGBAToLinear(texel);
opacity = -_color.a;
}
opacity *= texel.a;
vec3 albedo = _color.rgb * texel.rgb;
vec4 texel = texture(originalTexture, _texCoord0);
texel = mix(color_sRGBAToLinear(texel), texel, float(_color.a <= 0.0));
texel.rgb *= _color.rgb;
texel.a *= abs(_color.a);
vec3 fragPosition = _positionES.xyz;
vec3 fragNormal = normalize(_normalWS);
@ -66,12 +62,11 @@ void main(void) {
1.0,
fragPosition,
fragNormal,
albedo,
texel.rgb,
DEFAULT_FRESNEL,
0.0f,
fadeEmissive,
DEFAULT_ROUGHNESS,
opacity),
opacity);
texel.a),
texel.a);
}

View file

@ -27,11 +27,10 @@ layout(location=RENDER_UTILS_ATTR_TEXCOORD01) in vec4 _texCoord01;
layout(location=0) out vec4 _fragColor0;
void main(void) {
vec4 texel = texture(originalTexture, _texCoord0.st);
float colorAlpha = _color.a;
if (_color.a <= 0.0) {
texel = color_sRGBAToLinear(texel);
colorAlpha = -_color.a;
}
_fragColor0 = vec4(_color.rgb * texel.rgb, colorAlpha * texel.a);
vec4 texel = texture(originalTexture, _texCoord0);
texel = mix(color_sRGBAToLinear(texel), texel, float(_color.a <= 0.0));
texel.rgb *= _color.rgb;
texel.a *= abs(_color.a);
_fragColor0 = texel;
}

View file

@ -39,11 +39,10 @@ void main(void) {
<$fetchFadeObjectParamsInstanced(fadeParams)$>
applyFade(fadeParams, _positionWS.xyz, fadeEmissive);
vec4 texel = texture(originalTexture, _texCoord0.st);
float colorAlpha = _color.a;
if (_color.a <= 0.0) {
texel = color_sRGBAToLinear(texel);
colorAlpha = -_color.a;
}
_fragColor0 = vec4(_color.rgb * texel.rgb + fadeEmissive, colorAlpha * texel.a);
vec4 texel = texture(originalTexture, _texCoord0);
texel = mix(color_sRGBAToLinear(texel), texel, float(_color.a <= 0.0));
texel.rgb *= _color.rgb;
texel.a *= abs(_color.a);
_fragColor0 = vec4(texel.rgb + fadeEmissive, texel.a);
}

View file

@ -219,26 +219,24 @@ vec3 getTapLocationClampedSSAO(int sampleNumber, float spinAngle, float outerRad
}
bool redoTap = false;
if ((tapPos.x < 0.5)) {
tapPos.x = -tapPos.x;
redoTap = true;
} else if ((tapPos.x > sideImageSize.x - 0.5)) {
tapPos.x -= (sideImageSize.x - tapPos.x);
redoTap = true;
{
float check1 = float(tapPos.x < 0.5);
float check2 = (1.0 - check1) * float(tapPos.x > sideImageSize.x - 0.5);
tapPos.x = tapPos.x - 2.0 * tapPos.x * check1 - check2 * (sideImageSize.x - tapPos.x);
redoTap = (check1 > 0.0 || check2 > 0.0);
}
if ((tapPos.y < 0.5)) {
tapPos.y = -tapPos.y;
redoTap = true;
} else if ((tapPos.y > sideImageSize.y - 0.5)) {
tapPos.y -= (sideImageSize.y - tapPos.y);
redoTap = true;
{
float check1 = float(tapPos.y < 0.5);
float check2 = (1.0 - check1) * float(tapPos.y > sideImageSize.y - 0.5);
tapPos.y = tapPos.y - 2.0 * tapPos.y * check1 - check2 * (sideImageSize.y - tapPos.y);
redoTap = (check1 > 0.0 || check2 > 0.0);
}
if (redoTap) {
tap.xy = tapPos - pixelPos;
tap.z = length(tap.xy);
tap.z = 0.0;
{
float check = float(redoTap);
tap.xy = mix(tap.xy, tapPos - pixelPos, check);
tap.z = (1.0 - check) * tap.z;
}
return tap;

View file

@ -39,6 +39,7 @@ layout(location=0) out vec4 outFragColor;
void main(void) {
// Stereo side info based on the real viewport size of this pass
vec2 sideDepthSize = getDepthTextureSideSize(0);
// Pixel Debugged
vec2 cursorUV = getDebugCursorTexcoord();
vec2 cursorPixelPos = cursorUV * sideDepthSize;
@ -46,6 +47,5 @@ void main(void) {
ivec2 fragUVPos = ivec2(cursorPixelPos);
// TODO
outFragColor = packOcclusionOutput(0.0, 0.0, vec3(0.0, 0.0, 1.0));
}

View file

@ -43,6 +43,7 @@ void main(void) {
} else {
sideOcclusionSize = ivec2( getOcclusionSideSize() );
}
ivec4 side = getStereoSideInfoFromWidth(fragPixelPos.x, sideOcclusionSize.x);
// From now on, fragUVPos is the UV pos in the side
fragUVPos = getSideUVFromFramebufferUV(side, fragUVPos);

View file

@ -18,6 +18,9 @@ float aspectRatio = 0.95;
void main(void) {
vec2 pos = varTexCoord0 * 2.0 - vec2(1.0);
pos.x = aspectRatio * (pos.x * (pos.x > 0.0 ? 2.0 : -2.0) - 1.0);
if (1.0 - dot(pos.xy, pos.xy) > 0.0 ) discard;
pos.x = aspectRatio * (pos.x * mix(-2.0, 2.0, float(pos.x > 0.0)) - 1.0);
if (1.0 - dot(pos.xy, pos.xy) > 0.0) {
discard;
}
}

View file

@ -93,14 +93,14 @@ vec3 drawScatteringTableUV(vec2 cursor, vec2 texcoord) {
vec3 color = vec3(0.0);
bool keep = false;
for (int c = 0; c < 3; c++) {
if (distance[c] > threshold) {
keep = true;
color[c] += 1.0;
}
bool check = distance[c] > threshold;
keep = keep || check;
color[c] += float(check);
}
if (!keep)
discard;
if (!keep) {
discard;
}
return color;
}

View file

@ -67,11 +67,7 @@ vec3 getRawNormal(vec2 texcoord) {
vec3 getWorldNormal(vec2 texcoord) {
vec3 rawNormal = getRawNormal(texcoord);
if (isFullResolution()) {
return unpackNormal(rawNormal);
} else {
return normalize((rawNormal - vec3(0.5)) * 2.0);
}
return mix(normalize((rawNormal - vec3(0.5)) * 2.0), unpackNormal(rawNormal), float(isFullResolution()));
}
vec3 getWorldNormalDiff(vec2 texcoord, vec2 delta) {
@ -93,7 +89,7 @@ void main(void) {
vec2 texcoordPos;
ivec4 stereoSide;
ivec2 framePixelPos = getPixelPosTexcoordPosAndSide(gl_FragCoord.xy, pixelPos, texcoordPos, stereoSide);
vec2 stereoSideClip = vec2(stereoSide.x, (isStereo() ? 0.5 : 1.0));
vec2 stereoSideClip = vec2(stereoSide.x, 1.0 - 0.5 * float(isStereo()));
// Texcoord to fetch in the deferred texture are the exact UVs comming from vertex shader
// sideToFrameTexcoord(stereoSideClip, texcoordPos);
@ -128,8 +124,8 @@ void main(void) {
// Calculate dF/du and dF/dv
vec2 viewportScale = perspectiveScale * getInvWidthHeight();
vec2 du = vec2( viewportScale.x * (float(stereoSide.w) > 0.0 ? 0.5 : 1.0), 0.0f );
vec2 dv = vec2( 0.0f, viewportScale.y );
vec2 du = vec2(viewportScale.x * (1.0 - 0.5 * float(stereoSide.w > 0)), 0.0);
vec2 dv = vec2( 0.0f, viewportScale.y);
vec4 dFdu = vec4(getWorldNormalDiff(frameTexcoordPos, du), getEyeDepthDiff(frameTexcoordPos, du));
vec4 dFdv = vec4(getWorldNormalDiff(frameTexcoordPos, dv), getEyeDepthDiff(frameTexcoordPos, dv));

View file

@ -35,17 +35,11 @@ void main() {
vec2 prevFragUV = taa_fetchSourceAndHistory(fragUV, fragVel, sourceColor, historyColor);
vec3 nextColor = sourceColor;
if (taa_constrainColor()) {
// clamp history to neighbourhood of current sample
historyColor = taa_evalConstrainColor(sourceColor, fragUV, fragVel, historyColor);
}
if (taa_feedbackColor()) {
nextColor = taa_evalFeedbackColor(sourceColor, historyColor, params.blend);
} else {
nextColor = mix(historyColor, sourceColor, params.blend);
}
// clamp history to neighbourhood of current sample
historyColor = mix(historyColor, taa_evalConstrainColor(sourceColor, fragUV, fragVel, historyColor), float(taa_constrainColor()));
nextColor = mix(mix(historyColor, sourceColor, params.blend), taa_evalFeedbackColor(sourceColor, historyColor, params.blend), float(taa_feedbackColor()));
outFragColor = vec4(taa_resolveColor(nextColor), 1.0);
}

View file

@ -121,21 +121,17 @@ float taa_fetchDepth(vec2 uv) {
}
#define ZCMP_GT(a, b) (a > b)
#define ZCMP_GT(a, b) float(a > b)
vec2 taa_getImageSize() {
vec2 imageSize = getWidthHeight(0);
if (isStereo()) {
imageSize.x *= 2.0;
}
imageSize.x *= 1.0 + float(isStereo());
return imageSize;
}
vec2 taa_getTexelSize() {
vec2 texelSize = getInvWidthHeight();
if (isStereo()) {
texelSize.x *= 0.5;
}
texelSize.x *= 1.0 - 0.5 * float(isStereo());
return texelSize;
}
@ -158,16 +154,16 @@ vec3 taa_findClosestFragment3x3(vec2 uv)
vec3 dbr = vec3( 1, 1, taa_fetchDepth(uv + dv + du));
vec3 dmin = dtl;
if (ZCMP_GT(dmin.z, dtc.z)) dmin = dtc;
if (ZCMP_GT(dmin.z, dtr.z)) dmin = dtr;
dmin = mix(dmin, dtc, ZCMP_GT(dmin.z, dtc.z));
dmin = mix(dmin, dtr, ZCMP_GT(dmin.z, dtr.z));
if (ZCMP_GT(dmin.z, dml.z)) dmin = dml;
if (ZCMP_GT(dmin.z, dmc.z)) dmin = dmc;
if (ZCMP_GT(dmin.z, dmr.z)) dmin = dmr;
dmin = mix(dmin, dml, ZCMP_GT(dmin.z, dml.z));
dmin = mix(dmin, dmc, ZCMP_GT(dmin.z, dmc.z));
dmin = mix(dmin, dmr, ZCMP_GT(dmin.z, dmr.z));
if (ZCMP_GT(dmin.z, dbl.z)) dmin = dbl;
if (ZCMP_GT(dmin.z, dbc.z)) dmin = dbc;
if (ZCMP_GT(dmin.z, dbr.z)) dmin = dbr;
dmin = mix(dmin, dbl, ZCMP_GT(dmin.z, dbl.z));
dmin = mix(dmin, dbc, ZCMP_GT(dmin.z, dbc.z));
dmin = mix(dmin, dbr, ZCMP_GT(dmin.z, dbr.z));
return vec3(uv + dd.xy * dmin.xy, dmin.z);
}
@ -189,49 +185,46 @@ vec2 taa_fetchVelocityMapBest(vec2 uv) {
vec2 dbc = taa_fetchVelocityMap(uv + dv);
vec2 dbr = taa_fetchVelocityMap(uv + dv + du);
vec3 best = vec3(dtl, dot(dtl,dtl));
vec3 best = vec3(dtl, dot(dtl, dtl));
float testSpeed = dot(dtc,dtc);
if (testSpeed > best.z) { best = vec3(dtc, testSpeed); }
testSpeed = dot(dtr,dtr);
if (testSpeed > best.z) { best = vec3(dtr, testSpeed); }
float testSpeed = dot(dtc, dtc);
mix(best, vec3(dtc, testSpeed), float(testSpeed > best.z));
testSpeed = dot(dtr, dtr);
mix(best, vec3(dtr, testSpeed), float(testSpeed > best.z));
testSpeed = dot(dml,dml);
if (testSpeed > best.z) { best = vec3(dml, testSpeed); }
testSpeed = dot(dmc,dmc);
if (testSpeed > best.z) { best = vec3(dmc, testSpeed); }
testSpeed = dot(dmr,dmr);
if (testSpeed > best.z) { best = vec3(dmr, testSpeed); }
testSpeed = dot(dml, dml);
mix(best, vec3(dml, testSpeed), float(testSpeed > best.z));
testSpeed = dot(dmc, dmc);
mix(best, vec3(dmc, testSpeed), float(testSpeed > best.z));
testSpeed = dot(dmr, dmr);
mix(best, vec3(dmr, testSpeed), float(testSpeed > best.z));
testSpeed = dot(dbl,dbl);
if (testSpeed > best.z) { best = vec3(dbl, testSpeed); }
testSpeed = dot(dbc,dbc);
if (testSpeed > best.z) { best = vec3(dbc, testSpeed); }
testSpeed = dot(dbr,dbr);
if (testSpeed > best.z) { best = vec3(dbr, testSpeed); }
testSpeed = dot(dbl, dbl);
mix(best, vec3(dbl, testSpeed), float(testSpeed > best.z));
testSpeed = dot(dbc, dbc);
mix(best, vec3(dbc, testSpeed), float(testSpeed > best.z));
testSpeed = dot(dbr, dbr);
mix(best, vec3(dbr, testSpeed), float(testSpeed > best.z));
return best.xy;
}
vec2 taa_fromFragUVToEyeUVAndSide(vec2 fragUV, out int stereoSide) {
vec2 eyeUV = fragUV;
stereoSide = 0;
if (isStereo()) {
if (eyeUV.x > 0.5) {
eyeUV.x -= 0.5;
stereoSide = 1;
}
eyeUV.x *= 2.0;
}
float check = float(isStereo());
float check2 = float(eyeUV.x > 0.5);
eyeUV.x -= check * check2 * 0.5;
stereoSide = int(check * check2);
eyeUV.x *= 1.0 + check;
return eyeUV;
}
vec2 taa_fromEyeUVToFragUV(vec2 eyeUV, int stereoSide) {
vec2 fragUV = eyeUV;
if (isStereo()) {
fragUV.x *= 0.5;
fragUV.x += float(stereoSide)*0.5;
}
float check = float(isStereo());
fragUV.x *= 1.0 - 0.5 * check;
fragUV.x += check * float(stereoSide) * 0.5;
return fragUV;
}
@ -247,10 +240,8 @@ vec2 taa_fetchSourceAndHistory(vec2 fragUV, vec2 fragVelocity, out vec3 sourceCo
vec2 prevFragUV = taa_computePrevFragAndEyeUV(fragUV, fragVelocity, prevEyeUV);
sourceColor = taa_fetchSourceMap(fragUV).xyz;
historyColor = sourceColor;
if (!(any(lessThan(prevEyeUV, vec2(0.0))) || any(greaterThan(prevEyeUV, vec2(1.0))))) {
historyColor = taa_fetchHistoryMap(prevFragUV).xyz;
}
historyColor = mix(sourceColor, taa_fetchHistoryMap(prevFragUV).xyz, float(!(any(lessThan(prevEyeUV, vec2(0.0))) || any(greaterThan(prevEyeUV, vec2(1.0))))));
return prevFragUV;
}
@ -405,10 +396,11 @@ vec3 taa_clampColor(vec3 colorMin, vec3 colorMax, vec3 colorSource, vec3 color)
vec3 a_unit = abs(v_unit);
float ma_unit = max(a_unit.x, max(a_unit.y, a_unit.z));
if (ma_unit > 1.0)
if (ma_unit > 1.0) {
return p_clip + v_clip / ma_unit;
else
return q;// point inside aabb
} else {
return q;// point inside aabb
}
}
vec3 taa_evalConstrainColor(vec3 sourceColor, vec2 sourceUV, vec2 sourceVel, vec3 candidateColor) {
@ -514,10 +506,6 @@ vec3 taa_evalFXAA(vec2 fragUV) {
// compare luma of new samples to the luma range of the original neighborhood
// if the new samples exceed this range, just use the first two samples instead of all four
if (lumaB < lumaMin || lumaB > lumaMax) {
return rgbA;
} else {
return rgbB;
}
return mix(rgbB, rgbA, float(lumaB < lumaMin || lumaB > lumaMax));
}

View file

@ -83,32 +83,21 @@ void main(void) {
if ((prevOrbPosToPixLength < tenPercentHeight) && (cursorVelocityLength > 0.5)) {
vec2 prevOrbPosToPix_uv = cursorPrevUV + prevOrbPosToPix * texelSize / taa_getDebugOrbZoom();
vec3 preOrbColor = vec3(0.0);
if (!(any(lessThan(prevOrbPosToPix_uv, vec2(0.0))) || any(greaterThan(prevOrbPosToPix_uv, vec2(1.0))))) {
preOrbColor = texture(historyMap, prevOrbPosToPix_uv).xyz;
}
if (prevOrbPosToPixLength < orbPixThreshold) {
preOrbColor = vec3(1.0, 0.0, 1.0);
}
preOrbColor = mix(preOrbColor, texture(historyMap, prevOrbPosToPix_uv).xyz, float(!(any(lessThan(prevOrbPosToPix_uv, vec2(0.0))) || any(greaterThan(prevOrbPosToPix_uv, vec2(1.0))))));
preOrbColor = mix(preOrbColor, vec3(1.0, 0.0, 1.0), float(prevOrbPosToPixLength < orbPixThreshold));
float distanceToNext = length(imageSize * (cursorUV - prevOrbPosToPix_uv));
if (distanceToNext < orbPixThreshold) {
preOrbColor = vec3(1.0, 0.5, 0.0);
}
preOrbColor = mix(preOrbColor, vec3(1.0, 0.5, 0.0), float(distanceToNext < orbPixThreshold));
outFragColor = vec4(preOrbColor, 1.0);
return;
}
if (nextOrbPosToPixLength < tenPercentHeight) {
vec2 nextOrbPosToPix_uv = cursorUV + nextOrbPosToPix * texelSize / taa_getDebugOrbZoom();
vec3 nextOrbColor = vec3(0.0);
if (!(any(lessThan(nextOrbPosToPix_uv, vec2(0.0))) || any(greaterThan(nextOrbPosToPix_uv, vec2(1.0))))) {
nextOrbColor = texture(nextMap, nextOrbPosToPix_uv).xyz;
}
nextOrbColor = mix(nextOrbColor, texture(nextMap, nextOrbPosToPix_uv).xyz, float(!(any(lessThan(nextOrbPosToPix_uv, vec2(0.0))) || any(greaterThan(nextOrbPosToPix_uv, vec2(1.0))))));
float distanceToPrev = length(imageSize * (cursorPrevUV - nextOrbPosToPix_uv));
if (distanceToPrev < orbPixThreshold) {
nextOrbColor = vec3(1.0, 0.0, 1.0);
}
if (nextOrbPosToPixLength < orbPixThreshold) {
nextOrbColor = vec3(1.0, 0.5, 0.0);
}
nextOrbColor = mix(nextOrbColor, vec3(1.0, 0.0, 1.0), float(distanceToPrev < orbPixThreshold));
nextOrbColor = mix(nextOrbColor, vec3(1.0, 0.5, 0.0), float(nextOrbPosToPixLength < orbPixThreshold));
outFragColor = vec4(nextOrbColor, 1.0);
return;
@ -141,16 +130,9 @@ void main(void) {
outFragColor = vec4(nextColor, 1.0);
vec3 prevColor = nextColor;
prevColor = mix(prevColor, texture(historyMap, prevTexCoord).xyz, float(!(any(lessThan(prevTexCoord, vec2(0.0))) || any(greaterThan(prevTexCoord, vec2(1.0))))));
if (!(any(lessThan(prevTexCoord, vec2(0.0))) || any(greaterThan(prevTexCoord, vec2(1.0))))) {
prevColor = texture(historyMap, prevTexCoord).xyz;
}
outFragColor.xyz = mix(prevColor, vec3(1, 0, 1), clamp(distance(prevColor, nextColor) - 0.01, 0.0, 1.0));
outFragColor.xyz = mix(prevColor, vec3(1,0,1), clamp(distance(prevColor, nextColor) - 0.01, 0.0, 1.0));
if (pixVelocityLength > params.debugShowVelocityThreshold) {
vec3 speedColor = taa_getVelocityColorAboveThreshold(pixVelocityLength);
outFragColor = vec4(0.0, 1.0, 1.0, 1.0);
}
outFragColor = mix(outFragColor, vec4(0.0, 1.0, 1.0, 1.0), float(pixVelocityLength > params.debugShowVelocityThreshold));
}

View file

@ -44,7 +44,7 @@ void main(void) {
// vec3 ambient = sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(lightAmbient), fragNormal).xyz;
// _fragColor = vec4( 0.5 * (fragNormal + vec3(1.0)), 1.0);
vec3 color = (sphereUV.x > 0.0 ? ambientMap : ambientSH);
vec3 color = mix(ambientSH, ambientMap, float(sphereUV.x > 0.0));
color = color * 1.0 - base.w + base.xyz * base.w;
const float INV_GAMMA_22 = 1.0 / 2.2;

View file

@ -35,16 +35,11 @@ void main(void) {
vec3 color = skybox.color.rgb;
// blend is only set if there is a cubemap
if (skybox.color.a > 0.0) {
color = texture(skyboxMap, direction).rgb;
if (skybox.color.a < 1.0) {
color *= skybox.color.rgb;
}
}
float check = float(skybox.color.a > 0.0);
color = mix(color, texture(skyboxMap, direction).rgb, check);
color *= mix(vec3(1.0), skybox.color.rgb, check * float(skybox.color.a < 1.0));
color = color * 1.0 - base.w + base.xyz * base.w;
const float INV_GAMMA_22 = 1.0 / 2.2;
_fragColor = vec4(pow(color, vec3(INV_GAMMA_22)), 1.0);
}

View file

@ -98,10 +98,11 @@ vec4 pixelShaderGaussian(vec2 texcoord, vec2 direction, vec2 pixelStep) {
totalWeight += weight;
}
}
if (totalWeight>0.0) {
if (totalWeight > 0.0) {
srcBlurred /= totalWeight;
}
srcBlurred.a = getOutputAlpha();
return srcBlurred;
}
@ -159,10 +160,11 @@ vec4 pixelShaderGaussianDepthAware(vec2 texcoord, vec2 direction, vec2 pixelStep
totalWeight += weight;
}
}
if (totalWeight>0.0) {
if (totalWeight > 0.0) {
srcBlurred /= totalWeight;
}
return srcBlurred;
}

View file

@ -20,7 +20,6 @@
namespace render {
class DrawSceneOctreeConfig : public Job::Config {
Q_OBJECT
Q_PROPERTY(bool enabled MEMBER enabled NOTIFY dirty())
Q_PROPERTY(bool showVisibleCells READ getShowVisibleCells WRITE setShowVisibleCells NOTIFY dirty())
Q_PROPERTY(bool showEmptyCells READ getShowEmptyCells WRITE setShowEmptyCells NOTIFY dirty())
Q_PROPERTY(int numAllocatedCells READ getNumAllocatedCells)
@ -77,7 +76,6 @@ namespace render {
class DrawItemSelectionConfig : public Job::Config {
Q_OBJECT
Q_PROPERTY(bool enabled MEMBER enabled NOTIFY dirty())
Q_PROPERTY(bool showInsideItems READ getShowInsideItems WRITE setShowInsideItems NOTIFY dirty())
Q_PROPERTY(bool showInsideSubcellItems READ getShowInsideSubcellItems WRITE setShowInsideSubcellItems NOTIFY dirty())
Q_PROPERTY(bool showPartialItems READ getShowPartialItems WRITE setShowPartialItems NOTIFY dirty())

View file

@ -26,7 +26,7 @@
using namespace render;
void DrawStatusConfig::dirtyHelper() {
enabled = showNetwork || showDisplay;
_isEnabled = showNetwork || showDisplay;
emit dirty();
}

Some files were not shown because too many files have changed in this diff Show more