diff --git a/interface/resources/qml/hifi/dialogs/graphics/GraphicsSettings.qml b/interface/resources/qml/hifi/dialogs/graphics/GraphicsSettings.qml index a928b1379f..ba12fdf7e4 100644 --- a/interface/resources/qml/hifi/dialogs/graphics/GraphicsSettings.qml +++ b/interface/resources/qml/hifi/dialogs/graphics/GraphicsSettings.qml @@ -658,6 +658,36 @@ Flickable { } } } + Item { + Layout.preferredWidth: parent.width + Layout.preferredHeight: 35 + Layout.topMargin: 16 + + HifiStylesUit.RalewayRegular { + id: enableCameraClippingHeader + text: "3rd Person Camera Clipping" + anchors.left: parent.left + anchors.top: parent.top + width: 200 + height: parent.height + size: 16 + color: "#FFFFFF" + } + + HifiControlsUit.CheckBox { + id: enableCameraClipping + checked: Render.cameraClippingEnabled + boxSize: 16 + spacing: -1 + colorScheme: hifi.colorSchemes.dark + anchors.left: enableCameraClippingHeader.right + anchors.leftMargin: 20 + anchors.top: parent.top + onCheckedChanged: { + Render.cameraClippingEnabled = enableCameraClipping.checked; + } + } + } } ColumnLayout { @@ -683,7 +713,7 @@ Flickable { ListModel { id: antialiasingModel - // Maintain same order as "AntialiasingConfig::Mode". + // Maintain same order as "AntialiasingSetupConfig::Mode". ListElement { text: "None" } diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index 6bc57bd200..538b17f3a6 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -234,7 +234,8 @@ Application::Application( _maxOctreePacketsPerSecond("maxOctreePPS", DEFAULT_MAX_OCTREE_PPS), _maxOctreePPS(_maxOctreePacketsPerSecond.get()), // Camera - _fieldOfView("fieldOfView", DEFAULT_FIELD_OF_VIEW_DEGREES) + _fieldOfView("fieldOfView", DEFAULT_FIELD_OF_VIEW_DEGREES), + _cameraClippingEnabled("cameraClippingEnabled", false) { setProperty(hifi::properties::CRASHED, _previousSessionCrashed); diff --git a/interface/src/Application.h b/interface/src/Application.h index df72535b0d..613ad3f1b8 100644 --- a/interface/src/Application.h +++ b/interface/src/Application.h @@ -300,6 +300,9 @@ public: float getFieldOfView() { return _fieldOfView.get(); } void setFieldOfView(float fov); + bool getCameraClippingEnabled() { return _cameraClippingEnabled.get(); } + void setCameraClippingEnabled(bool enabled); + void updateMyAvatarLookAtPosition(float deltaTime); @@ -880,6 +883,10 @@ private: ConicalViewFrustums _lastQueriedViews; // last views used to query servers Setting::Handle _fieldOfView; + Setting::Handle _cameraClippingEnabled; + + bool _prevCameraClippingEnabled { false }; + unsigned int _cameraClippingRayPickID; // Graphics diff --git a/interface/src/Application_Camera.cpp b/interface/src/Application_Camera.cpp index c989ed5dc6..26a0504c61 100644 --- a/interface/src/Application_Camera.cpp +++ b/interface/src/Application_Camera.cpp @@ -18,6 +18,8 @@ #include #include +#include +#include #include #include "avatar/MyAvatar.h" @@ -40,9 +42,7 @@ void Application::updateCamera(RenderArgs& renderArgs, float deltaTime) { PROFILE_RANGE(render, __FUNCTION__); PerformanceTimer perfTimer("updateCamera"); - glm::vec3 boomOffset; auto myAvatar = getMyAvatar(); - boomOffset = myAvatar->getModelScale() * myAvatar->getBoomLength() * -IDENTITY_FORWARD; // The render mode is default or mirror if the camera is in mirror mode, assigned further below renderArgs._renderMode = RenderArgs::DEFAULT_RENDER_MODE; @@ -81,6 +81,16 @@ void Application::updateCamera(RenderArgs& renderArgs, float deltaTime) { _myCamera.setOrientation(glm::normalize(glmExtractRotation(worldCameraMat))); _myCamera.setPosition(extractTranslation(worldCameraMat)); } else { + float boomLength = myAvatar->getBoomLength(); + if (getCameraClippingEnabled()) { + auto result = + DependencyManager::get()->getPrevPickResultTyped(_cameraClippingRayPickID); + if (result && result->doesIntersect()) { + const float CAMERA_CLIPPING_EPSILON = 0.1f; + boomLength = std::min(boomLength, result->distance - CAMERA_CLIPPING_EPSILON); + } + } + glm::vec3 boomOffset = myAvatar->getModelScale() * boomLength * -IDENTITY_FORWARD; _thirdPersonHMDCameraBoomValid = false; if (mode == CAMERA_MODE_THIRD_PERSON) { _myCamera.setOrientation(myAvatar->getHead()->getOrientation()); @@ -158,7 +168,19 @@ void Application::updateCamera(RenderArgs& renderArgs, float deltaTime) { _myCamera.update(); } - renderArgs._cameraMode = (int8_t)_myCamera.getMode(); + renderArgs._cameraMode = (int8_t)mode; + + const bool shouldEnableCameraClipping = + (mode == CAMERA_MODE_THIRD_PERSON || mode == CAMERA_MODE_LOOK_AT || mode == CAMERA_MODE_SELFIE) && !isHMDMode() && + getCameraClippingEnabled(); + if (_prevCameraClippingEnabled != shouldEnableCameraClipping) { + if (shouldEnableCameraClipping) { + DependencyManager::get()->enablePick(_cameraClippingRayPickID); + } else { + DependencyManager::get()->disablePick(_cameraClippingRayPickID); + } + _prevCameraClippingEnabled = shouldEnableCameraClipping; + } } void Application::updateSecondaryCameraViewFrustum() { @@ -277,6 +299,16 @@ void Application::setFieldOfView(float fov) { } } +void Application::setCameraClippingEnabled(bool enabled) { + _cameraClippingEnabled.set(enabled); + _prevCameraClippingEnabled = enabled; + if (enabled) { + DependencyManager::get()->enablePick(_cameraClippingRayPickID); + } else { + DependencyManager::get()->disablePick(_cameraClippingRayPickID); + } +} + // Called during Application::update immediately before AvatarManager::updateMyAvatar, updating my data that is then sent // to everyone. // The principal result is to call updateLookAtTargetAvatar() and then setLookAtPosition(). diff --git a/interface/src/Application_Setup.cpp b/interface/src/Application_Setup.cpp index c62a9e0130..901fcda90a 100644 --- a/interface/src/Application_Setup.cpp +++ b/interface/src/Application_Setup.cpp @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -1277,6 +1278,17 @@ void Application::initialize(const QCommandLineParser &parser) { DependencyManager::get()->setMouseRayPickID(mouseRayPickID); } + // Setup the camera clipping ray pick + { + _prevCameraClippingEnabled = _cameraClippingEnabled.get(); + auto cameraRayPick = std::make_shared(Vectors::ZERO, -Vectors::UP, + PickFilter(PickScriptingInterface::getPickEntities() | + PickScriptingInterface::getPickLocalEntities()), + MyAvatar::ZOOM_MAX, 0.0f, _prevCameraClippingEnabled); + cameraRayPick->parentTransform = std::make_shared(); + _cameraClippingRayPickID = DependencyManager::get()->addPick(PickQuery::Ray, cameraRayPick); + } + // Preload Tablet sounds DependencyManager::get()->setEntityTree(qApp->getEntities()->getTree()); DependencyManager::get()->preloadSounds(); @@ -1656,8 +1668,10 @@ void Application::setupSignalsAndOperators() { return nullptr; }); - Procedural::opaqueStencil = [](gpu::StatePointer state) { PrepareStencil::testMaskDrawShape(*state); }; - Procedural::transparentStencil = [](gpu::StatePointer state) { PrepareStencil::testMask(*state); }; + Procedural::opaqueStencil = [](gpu::StatePointer state, bool useAA) { + useAA ? PrepareStencil::testMaskDrawShape(*state) : PrepareStencil::testMaskDrawShapeNoAA(*state); + }; + Procedural::transparentStencil = [](gpu::StatePointer state) { PrepareStencil::testMaskResetNoAA(*state); }; EntityTree::setGetEntityObjectOperator([this](const QUuid& id) -> QObject* { auto entities = getEntities(); diff --git a/interface/src/CameraRootTransformNode.cpp b/interface/src/CameraRootTransformNode.cpp new file mode 100644 index 0000000000..596bdab3d3 --- /dev/null +++ b/interface/src/CameraRootTransformNode.cpp @@ -0,0 +1,48 @@ +// +// Created by HifiExperiments on 10/30/2024 +// Copyright 2024 Overte e.V. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#include "CameraRootTransformNode.h" + +#include "Application.h" +#include "DependencyManager.h" +#include "avatar/AvatarManager.h" +#include "avatar/MyAvatar.h" + +Transform CameraRootTransformNode::getTransform() { + auto myAvatar = DependencyManager::get()->getMyAvatar(); + + glm::vec3 pos; + glm::quat ori; + + CameraMode mode = qApp->getCamera().getMode(); + if (mode == CAMERA_MODE_FIRST_PERSON || mode == CAMERA_MODE_THIRD_PERSON) { + pos = myAvatar->getDefaultEyePosition(); + ori = myAvatar->getHeadOrientation(); + } else if (mode == CAMERA_MODE_FIRST_PERSON_LOOK_AT) { + pos = myAvatar->getCameraEyesPosition(0.0f); + ori = myAvatar->getLookAtRotation(); + } else { + ori = myAvatar->getLookAtRotation(); + pos = myAvatar->getLookAtPivotPoint(); + + if (mode == CAMERA_MODE_SELFIE) { + ori = ori * glm::angleAxis(PI, ori * Vectors::UP); + } + } + + ori = ori * glm::angleAxis(-PI / 2.0f, Vectors::RIGHT); + + glm::vec3 scale = glm::vec3(myAvatar->scaleForChildren()); + return Transform(ori, scale, pos); +} + +QVariantMap CameraRootTransformNode::toVariantMap() const { + QVariantMap map; + map["joint"] = "CameraRoot"; + return map; +} diff --git a/interface/src/CameraRootTransformNode.h b/interface/src/CameraRootTransformNode.h new file mode 100644 index 0000000000..6a0f58f42e --- /dev/null +++ b/interface/src/CameraRootTransformNode.h @@ -0,0 +1,20 @@ +// +// Created by HifiExperiments on 10/30/2024 +// Copyright 2024 Overte e.V. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// +#ifndef hifi_CameraRootTransformNode_h +#define hifi_CameraRootTransformNode_h + +#include "TransformNode.h" + +class CameraRootTransformNode : public TransformNode { +public: + CameraRootTransformNode() {} + Transform getTransform() override; + QVariantMap toVariantMap() const override; +}; + +#endif // hifi_CameraRootTransformNode_h diff --git a/interface/src/SecondaryCamera.cpp b/interface/src/SecondaryCamera.cpp index 130b8c77ea..f520f5a407 100644 --- a/interface/src/SecondaryCamera.cpp +++ b/interface/src/SecondaryCamera.cpp @@ -274,7 +274,7 @@ public: void SecondaryCameraRenderTask::build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor) { const auto cachedArg = task.addJob("SecondaryCamera"); - task.addJob("RenderSecondView", cullFunctor, render::ItemKey::TAG_BITS_1, render::ItemKey::TAG_BITS_1); + task.addJob("RenderSecondView", cullFunctor, render::ItemKey::TAG_BITS_1, render::ItemKey::TAG_BITS_1, RenderViewTask::TransformOffset::SECONDARY_VIEW); task.addJob("EndSecondaryCamera", cachedArg); } \ No newline at end of file diff --git a/interface/src/graphics/GraphicsEngine.cpp b/interface/src/graphics/GraphicsEngine.cpp index 5075d9b57f..129f32a939 100644 --- a/interface/src/graphics/GraphicsEngine.cpp +++ b/interface/src/graphics/GraphicsEngine.cpp @@ -262,14 +262,14 @@ void GraphicsEngine::render_performFrame() { batch.enableStereo(isStereo); batch.clearDepthStencilFramebuffer(1.0, 0); batch.setViewportTransform({ 0, 0, finalFramebuffer->getSize() }); - _splashScreen->render(batch, viewFrustum, renderArgs._renderMethod == RenderArgs::RenderMethod::FORWARD); + _splashScreen->render(batch, viewFrustum, renderArgs._renderMethod == RenderArgs::RenderMethod::FORWARD, render::RenderEngine::TS_BACKGROUND_VIEW); }); } else { { PROFILE_RANGE(render, "/renderOverlay"); PerformanceTimer perfTimer("renderOverlay"); // NOTE: There is no batch associated with this renderArgs - // the ApplicationOverlay class assumes it's viewport is set up to be the device size + // the ApplicationOverlay class assumes its viewport is set up to be the device size renderArgs._viewport = glm::ivec4(0, 0, qApp->getDeviceSize()); qApp->getApplicationOverlay().renderOverlay(&renderArgs); } diff --git a/interface/src/raypick/ParabolaPointer.cpp b/interface/src/raypick/ParabolaPointer.cpp index a047519373..c9b6434bc2 100644 --- a/interface/src/raypick/ParabolaPointer.cpp +++ b/interface/src/raypick/ParabolaPointer.cpp @@ -446,7 +446,10 @@ void ParabolaPointer::RenderState::ParabolaRenderItem::render(RenderArgs* args) Transform transform; transform.setTranslation(_origin); - batch.setModelTransform(transform); + batch.setModelTransform(transform, _prevRenderTransform); + if (args->_renderMode == RenderArgs::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == RenderArgs::RenderMode::MIRROR_RENDER_MODE) { + _prevRenderTransform = transform; + } batch.setPipeline(getParabolaPipeline(args->_renderMethod == render::Args::RenderMethod::FORWARD)); @@ -481,4 +484,4 @@ namespace render { template <> const ShapeKey shapeGetShapeKey(const ParabolaPointer::RenderState::ParabolaRenderItem::Pointer& payload) { return ShapeKey::Builder::ownPipeline(); } -} \ No newline at end of file +} diff --git a/interface/src/raypick/ParabolaPointer.h b/interface/src/raypick/ParabolaPointer.h index 6415baac14..836e03d2b3 100644 --- a/interface/src/raypick/ParabolaPointer.h +++ b/interface/src/raypick/ParabolaPointer.h @@ -1,6 +1,7 @@ // // Created by Sam Gondelman 7/17/2018 // Copyright 2018 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -62,6 +63,7 @@ public: render::ItemKey _key; glm::vec3 _origin { 0.0f }; + Transform _prevRenderTransform; bool _isVisibleInSecondaryCamera { DEFAULT_PARABOLA_ISVISIBLEINSECONDARYCAMERA }; bool _drawInFront { DEFAULT_PARABOLA_DRAWINFRONT }; bool _visible { false }; diff --git a/interface/src/raypick/PickScriptingInterface.cpp b/interface/src/raypick/PickScriptingInterface.cpp index 5323c52faf..221b0fcb63 100644 --- a/interface/src/raypick/PickScriptingInterface.cpp +++ b/interface/src/raypick/PickScriptingInterface.cpp @@ -21,6 +21,7 @@ #include "ParabolaPick.h" #include "CollisionPick.h" +#include "CameraRootTransformNode.h" #include "SpatialParentFinder.h" #include "PickTransformNode.h" #include "MouseTransformNode.h" @@ -537,6 +538,9 @@ void PickScriptingInterface::setParentTransform(std::shared_ptr pick, } else if (joint == "Avatar") { pick->parentTransform = std::make_shared(); return; + } else if (joint == "CameraRoot") { + pick->parentTransform = std::make_shared(); + return; } else { parentUuid = myAvatar->getSessionUUID(); parentJointIndex = myAvatar->getJointIndex(joint); diff --git a/interface/src/scripting/RenderScriptingInterface.cpp b/interface/src/scripting/RenderScriptingInterface.cpp index d311ca9afe..60781ab19f 100644 --- a/interface/src/scripting/RenderScriptingInterface.cpp +++ b/interface/src/scripting/RenderScriptingInterface.cpp @@ -25,14 +25,14 @@ STATIC_SCRIPT_TYPES_INITIALIZER((+[](ScriptManager* manager){ auto scriptEngine = manager->engine().get(); scriptRegisterMetaType, scriptValueToEnumClass >(scriptEngine, "RenderMethod"); - scriptRegisterMetaType, scriptValueToEnumClass >(scriptEngine, "Mode"); + scriptRegisterMetaType, scriptValueToEnumClass >(scriptEngine, "Mode"); })); STATIC_SCRIPT_INITIALIZER(+[](ScriptManager* manager){ auto scriptEngine = manager->engine().get(); scriptEngine->registerEnum("Render.RenderMethod",QMetaEnum::fromType()); - scriptEngine->registerEnum("AntialiasingMode",QMetaEnum::fromType()); + scriptEngine->registerEnum("AntialiasingMode",QMetaEnum::fromType()); }); RenderScriptingInterface* RenderScriptingInterface::getInstance() { @@ -56,7 +56,7 @@ void RenderScriptingInterface::loadSettings() { _bloomEnabled = _bloomEnabledSetting.get(); _ambientOcclusionEnabled = _ambientOcclusionEnabledSetting.get(); _proceduralMaterialsEnabled = _proceduralMaterialsEnabledSetting.get(); - _antialiasingMode = static_cast(_antialiasingModeSetting.get()); + _antialiasingMode = static_cast(_antialiasingModeSetting.get()); _viewportResolutionScale = _viewportResolutionScaleSetting.get(); _fullScreenScreen = _fullScreenScreenSetting.get(); }); @@ -95,9 +95,10 @@ void recursivelyUpdateMirrorRenderMethods(const QString& parentTaskName, int ren return; } + auto renderConfig = qApp->getRenderEngine()->getConfiguration(); for (size_t mirrorIndex = 0; mirrorIndex < RenderMirrorTask::MAX_MIRRORS_PER_LEVEL; mirrorIndex++) { std::string mirrorTaskString = parentTaskName.toStdString() + ".RenderMirrorView" + std::to_string(mirrorIndex) + "Depth" + std::to_string(depth) + ".DeferredForwardSwitch"; - auto mirrorConfig = dynamic_cast(qApp->getRenderEngine()->getConfiguration()->getConfig(QString::fromStdString(mirrorTaskString))); + auto mirrorConfig = dynamic_cast(renderConfig->getConfig(QString::fromStdString(mirrorTaskString))); if (mirrorConfig) { mirrorConfig->setBranch((int)renderMethod); recursivelyUpdateMirrorRenderMethods(QString::fromStdString(mirrorTaskString) + (renderMethod == 1 ? ".RenderForwardTask" : ".RenderShadowsAndDeferredTask.RenderDeferredTask"), @@ -111,14 +112,20 @@ void RenderScriptingInterface::forceRenderMethod(RenderMethod renderMethod) { _renderMethod = (int)renderMethod; _renderMethodSetting.set((int)renderMethod); + auto renderConfig = qApp->getRenderEngine()->getConfiguration(); QString configName = "RenderMainView.DeferredForwardSwitch"; - auto config = dynamic_cast(qApp->getRenderEngine()->getConfiguration()->getConfig(configName)); + auto config = dynamic_cast(renderConfig->getConfig(configName)); if (config) { config->setBranch((int)renderMethod); recursivelyUpdateMirrorRenderMethods(configName + (renderMethod == RenderMethod::FORWARD ? ".RenderForwardTask" : ".RenderShadowsAndDeferredTask.RenderDeferredTask"), (int)renderMethod, 0); } + + auto secondaryConfig = dynamic_cast(renderConfig->getConfig("RenderSecondView.DeferredForwardSwitch")); + if (secondaryConfig) { + secondaryConfig->setBranch((int)renderMethod); + } }); } @@ -128,13 +135,14 @@ QStringList RenderScriptingInterface::getRenderMethodNames() const { } void recursivelyUpdateLightingModel(const QString& parentTaskName, std::function updateLambda, int depth = -1) { + auto renderConfig = qApp->getRenderEngine()->getConfiguration(); if (depth == -1) { - auto secondaryLightingModelConfig = qApp->getRenderEngine()->getConfiguration()->getConfig("RenderSecondView.LightingModel"); + auto secondaryLightingModelConfig = renderConfig->getConfig("RenderSecondView.LightingModel"); if (secondaryLightingModelConfig) { updateLambda(secondaryLightingModelConfig); } - auto mainLightingModelConfig = qApp->getRenderEngine()->getConfiguration()->getConfig("RenderMainView.LightingModel"); + auto mainLightingModelConfig = renderConfig->getConfig("RenderMainView.LightingModel"); if (mainLightingModelConfig) { updateLambda(mainLightingModelConfig); } @@ -146,7 +154,7 @@ void recursivelyUpdateLightingModel(const QString& parentTaskName, std::function for (size_t mirrorIndex = 0; mirrorIndex < RenderMirrorTask::MAX_MIRRORS_PER_LEVEL; mirrorIndex++) { std::string mirrorTaskString = parentTaskName.toStdString() + ".RenderMirrorView" + std::to_string(mirrorIndex) + "Depth" + std::to_string(depth); - auto lightingModelConfig = qApp->getRenderEngine()->getConfiguration()->getConfig(mirrorTaskString + ".LightingModel"); + auto lightingModelConfig = renderConfig->getConfig(mirrorTaskString + ".LightingModel"); if (lightingModelConfig) { updateLambda(lightingModelConfig); recursivelyUpdateLightingModel(QString::fromStdString(mirrorTaskString), updateLambda, depth + 1); @@ -169,7 +177,6 @@ void RenderScriptingInterface::forceShadowsEnabled(bool enabled) { _renderSettingLock.withWriteLock([&] { _shadowsEnabled = (enabled); _shadowsEnabledSetting.set(enabled); - Menu::getInstance()->setIsOptionChecked(MenuOption::Shadows, enabled); recursivelyUpdateLightingModel("", [enabled] (MakeLightingModelConfig *config) { config->setShadow(enabled); }); @@ -231,7 +238,6 @@ void RenderScriptingInterface::forceAmbientOcclusionEnabled(bool enabled) { _renderSettingLock.withWriteLock([&] { _ambientOcclusionEnabled = (enabled); _ambientOcclusionEnabledSetting.set(enabled); - Menu::getInstance()->setIsOptionChecked(MenuOption::AmbientOcclusion, enabled); recursivelyUpdateLightingModel("", [enabled] (MakeLightingModelConfig *config) { config->setAmbientOcclusion(enabled); }); @@ -259,52 +265,53 @@ void RenderScriptingInterface::forceProceduralMaterialsEnabled(bool enabled) { }); } -AntialiasingConfig::Mode RenderScriptingInterface::getAntialiasingMode() const { +AntialiasingSetupConfig::Mode RenderScriptingInterface::getAntialiasingMode() const { return _antialiasingMode; } -void RenderScriptingInterface::setAntialiasingMode(AntialiasingConfig::Mode mode) { +void RenderScriptingInterface::setAntialiasingMode(AntialiasingSetupConfig::Mode mode) { if (_antialiasingMode != mode) { forceAntialiasingMode(mode); emit settingsChanged(); } } -void setAntialiasingModeForView(AntialiasingConfig::Mode mode, JitterSampleConfig *jitterCamConfig, AntialiasingConfig *antialiasingConfig) { +void setAntialiasingModeForView(AntialiasingSetupConfig::Mode mode, AntialiasingSetupConfig *antialiasingSetupConfig, AntialiasingConfig *antialiasingConfig) { switch (mode) { - case AntialiasingConfig::Mode::NONE: - jitterCamConfig->none(); + case AntialiasingSetupConfig::Mode::NONE: + antialiasingSetupConfig->none(); antialiasingConfig->blend = 1; antialiasingConfig->setDebugFXAA(false); break; - case AntialiasingConfig::Mode::TAA: - jitterCamConfig->play(); + case AntialiasingSetupConfig::Mode::TAA: + antialiasingSetupConfig->play(); antialiasingConfig->blend = 0.25; antialiasingConfig->setDebugFXAA(false); break; - case AntialiasingConfig::Mode::FXAA: - jitterCamConfig->none(); + case AntialiasingSetupConfig::Mode::FXAA: + antialiasingSetupConfig->none(); antialiasingConfig->blend = 0.25; antialiasingConfig->setDebugFXAA(true); break; default: - jitterCamConfig->none(); + antialiasingSetupConfig->none(); antialiasingConfig->blend = 1; antialiasingConfig->setDebugFXAA(false); break; } } -void recursivelyUpdateAntialiasingMode(const QString& parentTaskName, AntialiasingConfig::Mode mode, int depth = -1) { +void recursivelyUpdateAntialiasingMode(const QString& parentTaskName, AntialiasingSetupConfig::Mode mode, int depth = -1) { + auto renderConfig = qApp->getRenderEngine()->getConfiguration(); if (depth == -1) { - auto secondViewJitterCamConfig = qApp->getRenderEngine()->getConfiguration()->getConfig("RenderSecondView.JitterCam"); - auto secondViewAntialiasingConfig = qApp->getRenderEngine()->getConfiguration()->getConfig("RenderSecondView.Antialiasing"); + auto secondViewJitterCamConfig = renderConfig->getConfig("RenderSecondView.AntialiasingSetup"); + auto secondViewAntialiasingConfig = renderConfig->getConfig("RenderSecondView.Antialiasing"); if (secondViewJitterCamConfig && secondViewAntialiasingConfig) { setAntialiasingModeForView(mode, secondViewJitterCamConfig, secondViewAntialiasingConfig); } - auto mainViewJitterCamConfig = qApp->getRenderEngine()->getConfiguration()->getConfig("RenderMainView.JitterCam"); - auto mainViewAntialiasingConfig = qApp->getRenderEngine()->getConfiguration()->getConfig("RenderMainView.Antialiasing"); + auto mainViewJitterCamConfig = renderConfig->getConfig("RenderMainView.AntialiasingSetup"); + auto mainViewAntialiasingConfig = renderConfig->getConfig("RenderMainView.Antialiasing"); if (mainViewJitterCamConfig && mainViewAntialiasingConfig) { setAntialiasingModeForView( mode, mainViewJitterCamConfig, mainViewAntialiasingConfig); } @@ -316,8 +323,8 @@ void recursivelyUpdateAntialiasingMode(const QString& parentTaskName, Antialiasi for (size_t mirrorIndex = 0; mirrorIndex < RenderMirrorTask::MAX_MIRRORS_PER_LEVEL; mirrorIndex++) { std::string mirrorTaskString = parentTaskName.toStdString() + ".RenderMirrorView" + std::to_string(mirrorIndex) + "Depth" + std::to_string(depth); - auto jitterCamConfig = qApp->getRenderEngine()->getConfiguration()->getConfig(mirrorTaskString + ".JitterCam"); - auto antialiasingConfig = qApp->getRenderEngine()->getConfiguration()->getConfig(mirrorTaskString + ".Antialiasing"); + auto jitterCamConfig = renderConfig->getConfig(mirrorTaskString + ".AntialiasingSetup"); + auto antialiasingConfig = renderConfig->getConfig(mirrorTaskString + ".Antialiasing"); if (jitterCamConfig && antialiasingConfig) { setAntialiasingModeForView(mode, jitterCamConfig, antialiasingConfig); recursivelyUpdateAntialiasingMode(QString::fromStdString(mirrorTaskString), mode, depth + 1); @@ -325,14 +332,14 @@ void recursivelyUpdateAntialiasingMode(const QString& parentTaskName, Antialiasi } } -void RenderScriptingInterface::forceAntialiasingMode(AntialiasingConfig::Mode mode) { - if ((int)mode < 0 || mode >= AntialiasingConfig::Mode::MODE_COUNT) { - mode = AntialiasingConfig::Mode::NONE; +void RenderScriptingInterface::forceAntialiasingMode(AntialiasingSetupConfig::Mode mode) { + if ((int)mode < 0 || mode >= AntialiasingSetupConfig::Mode::MODE_COUNT) { + mode = AntialiasingSetupConfig::Mode::NONE; } _renderSettingLock.withWriteLock([&] { _antialiasingMode = mode; - _antialiasingModeSetting.set(_antialiasingMode); + _antialiasingModeSetting.set((int)_antialiasingMode); recursivelyUpdateAntialiasingMode("", _antialiasingMode); }); @@ -345,6 +352,13 @@ void RenderScriptingInterface::setVerticalFieldOfView(float fieldOfView) { } } +void RenderScriptingInterface::setCameraClippingEnabled(bool enabled) { + if (qApp->getCameraClippingEnabled() != enabled) { + qApp->setCameraClippingEnabled(enabled); + emit settingsChanged(); + } +} + QStringList RenderScriptingInterface::getScreens() const { QStringList screens; @@ -396,20 +410,27 @@ void RenderScriptingInterface::forceViewportResolutionScale(float scale) { return; } _renderSettingLock.withWriteLock([&] { - _viewportResolutionScale = (scale); + _viewportResolutionScale = scale; _viewportResolutionScaleSetting.set(scale); auto renderConfig = qApp->getRenderEngine()->getConfiguration(); assert(renderConfig); auto deferredView = renderConfig->getConfig("RenderMainView.RenderDeferredTask"); - // mainView can be null if we're rendering in forward mode if (deferredView) { - deferredView->setProperty("resolutionScale", _viewportResolutionScale); + deferredView->setProperty("resolutionScale", scale); } auto forwardView = renderConfig->getConfig("RenderMainView.RenderForwardTask"); - // mainView can be null if we're rendering in forward mode if (forwardView) { - forwardView->setProperty("resolutionScale", _viewportResolutionScale); + forwardView->setProperty("resolutionScale", scale); + } + + auto deferredSecondView = renderConfig->getConfig("RenderSecondView.RenderDeferredTask"); + if (deferredSecondView) { + deferredSecondView->setProperty("resolutionScale", scale); + } + auto forwardSecondView = renderConfig->getConfig("RenderSecondView.RenderForwardTask"); + if (forwardSecondView) { + forwardSecondView->setProperty("resolutionScale", scale); } }); } diff --git a/interface/src/scripting/RenderScriptingInterface.h b/interface/src/scripting/RenderScriptingInterface.h index 56b474cf31..9ea757b0f3 100644 --- a/interface/src/scripting/RenderScriptingInterface.h +++ b/interface/src/scripting/RenderScriptingInterface.h @@ -37,6 +37,7 @@ * they're disabled. * @property {integer} antialiasingMode - The active anti-aliasing mode. * @property {number} viewportResolutionScale - The view port resolution scale, > 0.0. + * @property {boolean} cameraClippingEnabled - true if third person camera clipping is enabled, false if it's disabled. */ class RenderScriptingInterface : public QObject { Q_OBJECT @@ -45,10 +46,11 @@ class RenderScriptingInterface : public QObject { Q_PROPERTY(bool hazeEnabled READ getHazeEnabled WRITE setHazeEnabled NOTIFY settingsChanged) Q_PROPERTY(bool bloomEnabled READ getBloomEnabled WRITE setBloomEnabled NOTIFY settingsChanged) Q_PROPERTY(bool ambientOcclusionEnabled READ getAmbientOcclusionEnabled WRITE setAmbientOcclusionEnabled NOTIFY settingsChanged) + Q_PROPERTY(AntialiasingSetupConfig::Mode antialiasingMode READ getAntialiasingMode WRITE setAntialiasingMode NOTIFY settingsChanged) Q_PROPERTY(bool proceduralMaterialsEnabled READ getProceduralMaterialsEnabled WRITE setProceduralMaterialsEnabled NOTIFY settingsChanged) - Q_PROPERTY(AntialiasingConfig::Mode antialiasingMode READ getAntialiasingMode WRITE setAntialiasingMode NOTIFY settingsChanged) Q_PROPERTY(float viewportResolutionScale READ getViewportResolutionScale WRITE setViewportResolutionScale NOTIFY settingsChanged) Q_PROPERTY(float verticalFieldOfView READ getVerticalFieldOfView WRITE setVerticalFieldOfView NOTIFY settingsChanged) + Q_PROPERTY(bool cameraClippingEnabled READ getCameraClippingEnabled WRITE setCameraClippingEnabled NOTIFY settingsChanged) public: RenderScriptingInterface(); @@ -202,14 +204,14 @@ public slots: * @function Render.getAntialiasingMode * @returns {AntialiasingMode} The active anti-aliasing mode. */ - AntialiasingConfig::Mode getAntialiasingMode() const; + AntialiasingSetupConfig::Mode getAntialiasingMode() const; /*@jsdoc * Sets the active anti-aliasing mode. * @function Render.setAntialiasingMode * @param {AntialiasingMode} The active anti-aliasing mode. */ - void setAntialiasingMode(AntialiasingConfig::Mode mode); + void setAntialiasingMode(AntialiasingSetupConfig::Mode mode); /*@jsdoc * Gets the view port resolution scale. @@ -261,7 +263,21 @@ public slots: * @function Render.setVerticalFieldOfView * @param {number} fieldOfView - The vertical field of view in degrees to set. */ - void setVerticalFieldOfView( float fieldOfView ); + void setVerticalFieldOfView(float fieldOfView); + + /*@jsdoc + * Gets whether or not third person camera clipping is enabled. + * @function Render.getCameraClippingEnabled + * @returns {boolean} true if camera clipping is enabled, false if it's disabled. + */ + bool getCameraClippingEnabled() { return qApp->getCameraClippingEnabled(); } + + /*@jsdoc + * Sets whether or not third person camera clipping is enabled. + * @function Render.setCameraClippingEnabled + * @param {boolean} enabled - true to enable third person camera clipping, false to disable. + */ + void setCameraClippingEnabled(bool enabled); signals: @@ -288,7 +304,7 @@ private: bool _bloomEnabled { true }; bool _ambientOcclusionEnabled { true }; bool _proceduralMaterialsEnabled { true }; - AntialiasingConfig::Mode _antialiasingMode { AntialiasingConfig::Mode::NONE }; + AntialiasingSetupConfig::Mode _antialiasingMode { AntialiasingSetupConfig::Mode::NONE }; float _viewportResolutionScale { 1.0f }; QString _fullScreenScreen; @@ -299,7 +315,7 @@ private: Setting::Handle _bloomEnabledSetting { "bloomEnabled", true }; Setting::Handle _ambientOcclusionEnabledSetting { "ambientOcclusionEnabled", true }; Setting::Handle _proceduralMaterialsEnabledSetting { "proceduralMaterialsEnabled", true }; - Setting::Handle _antialiasingModeSetting { "antialiasingMode", AntialiasingConfig::Mode::NONE }; + Setting::Handle _antialiasingModeSetting { "antialiasingMode", (int)AntialiasingSetupConfig::Mode::NONE }; Setting::Handle _viewportResolutionScaleSetting { "viewportResolutionScale", 1.0f }; Setting::Handle _fullScreenScreenSetting { "fullScreenScreen", "" }; @@ -310,7 +326,7 @@ private: void forceBloomEnabled(bool enabled); void forceAmbientOcclusionEnabled(bool enabled); void forceProceduralMaterialsEnabled(bool enabled); - void forceAntialiasingMode(AntialiasingConfig::Mode mode); + void forceAntialiasingMode(AntialiasingSetupConfig::Mode mode); void forceViewportResolutionScale(float scale); static std::once_flag registry_flag; diff --git a/interface/src/ui/PreferencesDialog.cpp b/interface/src/ui/PreferencesDialog.cpp index 4c87f12998..623aede2fa 100644 --- a/interface/src/ui/PreferencesDialog.cpp +++ b/interface/src/ui/PreferencesDialog.cpp @@ -231,7 +231,7 @@ void setupPreferences() { preferences->addPreference(new CheckPreference(UI_CATEGORY, "Show Graphics icon on tablet and toolbar", getter, setter)); } - static const QString VIEW_CATEGORY{ "View" }; + static const QString VIEW_CATEGORY { "View" }; { auto getter = [myAvatar]()->float { return myAvatar->getRealWorldFieldOfView(); }; auto setter = [myAvatar](float value) { myAvatar->setRealWorldFieldOfView(value); }; @@ -249,6 +249,11 @@ void setupPreferences() { preference->setStep(1); preferences->addPreference(preference); } + { + auto getter = []()->bool { return qApp->getCameraClippingEnabled(); }; + auto setter = [](bool value) { qApp->setCameraClippingEnabled(value); }; + preferences->addPreference(new CheckPreference(VIEW_CATEGORY, "Enable 3rd Person Camera Clipping?", getter, setter)); + } // Snapshots static const QString SNAPSHOTS { "Snapshots" }; diff --git a/libraries/display-plugins/src/display-plugins/OpenGLDisplayPlugin.cpp b/libraries/display-plugins/src/display-plugins/OpenGLDisplayPlugin.cpp index 00771927fb..d5ce52de18 100644 --- a/libraries/display-plugins/src/display-plugins/OpenGLDisplayPlugin.cpp +++ b/libraries/display-plugins/src/display-plugins/OpenGLDisplayPlugin.cpp @@ -360,7 +360,7 @@ void OpenGLDisplayPlugin::customizeContext() { auto presentThread = DependencyManager::get(); Q_ASSERT(thread() == presentThread->thread()); - getGLBackend()->setCameraCorrection(mat4(), mat4(), true, true); + getGLBackend()->updatePresentFrame(); for (auto& cursorValue : _cursorsData) { auto& cursorData = cursorValue.second; @@ -704,8 +704,7 @@ void OpenGLDisplayPlugin::present(const std::shared_ptr& if (_currentFrame) { auto correction = getViewCorrection(); - getGLBackend()->setCameraCorrection(correction, _prevRenderView, true); - _prevRenderView = correction * _currentFrame->view; + getGLBackend()->updatePresentFrame(correction); { withPresentThreadLock([&] { _renderRate.increment(); diff --git a/libraries/display-plugins/src/display-plugins/OpenGLDisplayPlugin.h b/libraries/display-plugins/src/display-plugins/OpenGLDisplayPlugin.h index 0df0d9ac3e..4dc10a7aa1 100644 --- a/libraries/display-plugins/src/display-plugins/OpenGLDisplayPlugin.h +++ b/libraries/display-plugins/src/display-plugins/OpenGLDisplayPlugin.h @@ -154,7 +154,6 @@ protected: gpu::FramePointer _currentFrame; gpu::Frame* _lastFrame{ nullptr }; - mat4 _prevRenderView; gpu::FramebufferPointer _compositeFramebuffer; gpu::PipelinePointer _hudPipeline; gpu::PipelinePointer _mirrorHUDPipeline; diff --git a/libraries/entities-renderer/src/RenderableEntityItem.h b/libraries/entities-renderer/src/RenderableEntityItem.h index 949590c472..bc442d772b 100644 --- a/libraries/entities-renderer/src/RenderableEntityItem.h +++ b/libraries/entities-renderer/src/RenderableEntityItem.h @@ -166,6 +166,7 @@ protected: MirrorMode _mirrorMode { MirrorMode::NONE }; QUuid _portalExitID; Transform _renderTransform; + Transform _prevRenderTransform; // each subclass is responsible for updating this after they render because they all handle transforms differently MaterialMap _materials; mutable std::mutex _materialsLock; diff --git a/libraries/entities-renderer/src/RenderableGizmoEntityItem.cpp b/libraries/entities-renderer/src/RenderableGizmoEntityItem.cpp index 10ae144334..80beee47fd 100644 --- a/libraries/entities-renderer/src/RenderableGizmoEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableGizmoEntityItem.cpp @@ -1,6 +1,7 @@ // // Created by Sam Gondelman on 1/22/19 // Copyright 2019 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -266,7 +267,10 @@ void GizmoEntityRenderer::doRender(RenderArgs* args) { bool usePrimaryFrustum = args->_renderMode == RenderArgs::RenderMode::SHADOW_RENDER_MODE || args->_mirrorDepth > 0; transform.setRotation(BillboardModeHelpers::getBillboardRotation(transform.getTranslation(), transform.getRotation(), _billboardMode, usePrimaryFrustum ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition(), true)); - batch.setModelTransform(transform); + batch.setModelTransform(transform, _prevRenderTransform); + if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) { + _prevRenderTransform = transform; + } Pipeline pipelineType = getPipelineType(materials); if (pipelineType == Pipeline::PROCEDURAL) { diff --git a/libraries/entities-renderer/src/RenderableGridEntityItem.cpp b/libraries/entities-renderer/src/RenderableGridEntityItem.cpp index 3f40218d46..c79c47efa8 100644 --- a/libraries/entities-renderer/src/RenderableGridEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableGridEntityItem.cpp @@ -1,6 +1,7 @@ // // Created by Sam Gondelman on 11/29/18 // Copyright 2018 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -106,7 +107,10 @@ void GridEntityRenderer::doRender(RenderArgs* args) { bool usePrimaryFrustum = args->_renderMode == RenderArgs::RenderMode::SHADOW_RENDER_MODE || args->_mirrorDepth > 0; transform.setRotation(BillboardModeHelpers::getBillboardRotation(transform.getTranslation(), transform.getRotation(), _billboardMode, usePrimaryFrustum ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition())); - batch->setModelTransform(transform); + batch->setModelTransform(transform, _prevRenderTransform); + if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) { + _prevRenderTransform = transform; + } auto minCorner = glm::vec2(-0.5f, -0.5f); auto maxCorner = glm::vec2(0.5f, 0.5f); @@ -121,4 +125,4 @@ void GridEntityRenderer::doRender(RenderArgs* args) { minorGridRowDivisions, minorGridColDivisions, MINOR_GRID_EDGE, majorGridRowDivisions, majorGridColDivisions, MAJOR_GRID_EDGE, color, forward, _geometryId); -} \ No newline at end of file +} diff --git a/libraries/entities-renderer/src/RenderableImageEntityItem.cpp b/libraries/entities-renderer/src/RenderableImageEntityItem.cpp index 0b76038cd4..483006449f 100644 --- a/libraries/entities-renderer/src/RenderableImageEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableImageEntityItem.cpp @@ -1,6 +1,7 @@ // // Created by Sam Gondelman on 11/29/18 // Copyright 2018 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -189,7 +190,10 @@ void ImageEntityRenderer::doRender(RenderArgs* args) { } transform.setScale(scale); } - batch->setModelTransform(transform); + batch->setModelTransform(transform, _prevRenderTransform); + if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) { + _prevRenderTransform = transform; + } Pipeline pipelineType = getPipelineType(materials); if (pipelineType == Pipeline::PROCEDURAL) { diff --git a/libraries/entities-renderer/src/RenderableLineEntityItem.cpp b/libraries/entities-renderer/src/RenderableLineEntityItem.cpp index 1117c97c75..2baed1afd9 100644 --- a/libraries/entities-renderer/src/RenderableLineEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableLineEntityItem.cpp @@ -4,6 +4,7 @@ // // Created by Seth Alves on 5/11/15. // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -44,13 +45,18 @@ void LineEntityRenderer::doRender(RenderArgs* args) { PerformanceTimer perfTimer("RenderableLineEntityItem::render"); Q_ASSERT(args->_batch); gpu::Batch& batch = *args->_batch; + const auto& modelTransform = getModelTransform(); - Transform transform = Transform(); + Transform transform; transform.setTranslation(modelTransform.getTranslation()); bool usePrimaryFrustum = args->_renderMode == RenderArgs::RenderMode::SHADOW_RENDER_MODE || args->_mirrorDepth > 0; transform.setRotation(BillboardModeHelpers::getBillboardRotation(modelTransform.getTranslation(), modelTransform.getRotation(), _billboardMode, usePrimaryFrustum ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition())); - batch.setModelTransform(transform); + batch.setModelTransform(transform, _prevRenderTransform); + if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) { + _prevRenderTransform = transform; + } + if (_linePoints.size() > 1) { DependencyManager::get()->bindSimpleProgram(batch, false, false, false, false, true, _renderLayer != RenderLayer::WORLD || args->_renderMethod == Args::RenderMethod::FORWARD); diff --git a/libraries/entities-renderer/src/RenderableMaterialEntityItem.cpp b/libraries/entities-renderer/src/RenderableMaterialEntityItem.cpp index 576e842f84..21e6cfece9 100644 --- a/libraries/entities-renderer/src/RenderableMaterialEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableMaterialEntityItem.cpp @@ -328,7 +328,10 @@ void MaterialEntityRenderer::doRender(RenderArgs* args) { bool usePrimaryFrustum = args->_renderMode == RenderArgs::RenderMode::SHADOW_RENDER_MODE || args->_mirrorDepth > 0; transform.setRotation(BillboardModeHelpers::getBillboardRotation(transform.getTranslation(), transform.getRotation(), _billboardMode, usePrimaryFrustum ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition())); - batch.setModelTransform(transform); + batch.setModelTransform(transform, _prevRenderTransform); + if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) { + _prevRenderTransform = transform; + } if (!proceduralRender) { drawMaterial->setTextureTransforms(textureTransform, MaterialMappingMode::UV, true); diff --git a/libraries/entities-renderer/src/RenderableModelEntityItem.cpp b/libraries/entities-renderer/src/RenderableModelEntityItem.cpp index 793871e55a..eeb7945660 100644 --- a/libraries/entities-renderer/src/RenderableModelEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableModelEntityItem.cpp @@ -4,6 +4,7 @@ // // Created by Brad Hefta-Gaub on 8/6/14. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -1510,7 +1511,11 @@ void ModelEntityRenderer::doRender(RenderArgs* args) { // If the model doesn't have visual geometry, render our bounding box as green wireframe static glm::vec4 greenColor(0.0f, 1.0f, 0.0f, 1.0f); gpu::Batch& batch = *args->_batch; - batch.setModelTransform(getModelTransform()); // we want to include the scale as well + Transform transform = getModelTransform(); + batch.setModelTransform(transform, _prevRenderTransform); // we want to include the scale as well + if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) { + _prevRenderTransform = transform; + } auto geometryCache = DependencyManager::get(); geometryCache->renderWireCubeInstance(args, batch, greenColor, geometryCache->getShapePipelinePointer(false, false, args->_renderMethod == Args::RenderMethod::FORWARD)); diff --git a/libraries/entities-renderer/src/RenderableParticleEffectEntityItem.cpp b/libraries/entities-renderer/src/RenderableParticleEffectEntityItem.cpp index 6393f76603..71d4ec5374 100644 --- a/libraries/entities-renderer/src/RenderableParticleEffectEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableParticleEffectEntityItem.cpp @@ -3,6 +3,7 @@ // interface/src // // Created by Jason Rickwald on 3/2/15. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -53,7 +54,7 @@ static ShapePipelinePointer shapePipelineFactory(const ShapePlumber& plumber, co state->setDepthTest(true, !transparent, gpu::LESS_EQUAL); state->setBlendFunction(transparent, gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ONE, gpu::State::FACTOR_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ONE); - transparent ? PrepareStencil::testMask(*state) : PrepareStencil::testMaskDrawShape(*state); + transparent ? PrepareStencil::testMaskResetNoAA(*state) : PrepareStencil::testMaskDrawShapeNoAA(*state); auto program = gpu::Shader::createProgram(std::get<3>(key)); _pipelines[std::make_tuple(std::get<0>(key), transparent, std::get<2>(key), wireframe)] = gpu::Pipeline::create(program, state); @@ -620,4 +621,4 @@ void ParticleEffectEntityRenderer::computeTriangles(const hfm::Model& hfmModel) glm::vec3 scale = bounds.getScale(); _triangleInfo.transform = glm::scale(1.0f / scale) * glm::translate(-bounds.calcCenter()); -} \ No newline at end of file +} diff --git a/libraries/entities-renderer/src/RenderablePolyLineEntityItem.cpp b/libraries/entities-renderer/src/RenderablePolyLineEntityItem.cpp index a75beb143f..1d57f51ae7 100644 --- a/libraries/entities-renderer/src/RenderablePolyLineEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderablePolyLineEntityItem.cpp @@ -4,6 +4,7 @@ // // Created by Eric Levin on 8/10/15 // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -331,6 +332,10 @@ void PolyLineEntityRenderer::doRender(RenderArgs* args) { batch.setModelTransform(transform); batch.setPipeline(_pipelines[{args->_renderMethod, isTransparent()}]); + batch.setModelTransform(transform, _prevRenderTransform); + if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) { + _prevRenderTransform = transform; + } batch.setResourceTexture(0, texture); batch.draw(gpu::TRIANGLE_STRIP, (gpu::uint32)(2 * _numVertices), 0); } diff --git a/libraries/entities-renderer/src/RenderablePolyVoxEntityItem.cpp b/libraries/entities-renderer/src/RenderablePolyVoxEntityItem.cpp index 5d0d337cda..ef0d8d4449 100644 --- a/libraries/entities-renderer/src/RenderablePolyVoxEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderablePolyVoxEntityItem.cpp @@ -1848,7 +1848,10 @@ void PolyVoxEntityRenderer::doRender(RenderArgs* args) { glm::mat4 rotation = glm::mat4_cast(BillboardModeHelpers::getBillboardRotation(_position, _orientation, _billboardMode, usePrimaryFrustum ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition())); Transform transform(glm::translate(_position) * rotation * _lastVoxelToLocalMatrix); - batch.setModelTransform(transform); + batch.setModelTransform(transform, _prevRenderTransform); + if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) { + _prevRenderTransform = transform; + } batch.setInputFormat(_vertexFormat); batch.setInputBuffer(gpu::Stream::POSITION, _mesh->getVertexBuffer()._buffer, 0, sizeof(PolyVox::PositionMaterialNormal)); diff --git a/libraries/entities-renderer/src/RenderableProceduralParticleEffectEntityItem.h b/libraries/entities-renderer/src/RenderableProceduralParticleEffectEntityItem.h index deb3f70f33..3f16e574ee 100644 --- a/libraries/entities-renderer/src/RenderableProceduralParticleEffectEntityItem.h +++ b/libraries/entities-renderer/src/RenderableProceduralParticleEffectEntityItem.h @@ -46,7 +46,7 @@ private: QString _particleUpdateData; Procedural _updateProcedural; QString _particleRenderData; - Procedural _renderProcedural; + Procedural _renderProcedural { false }; // No AA on Particles size_t _numParticles { 0 }; size_t _particlePropTextureDim { 128 }; // 2^ceil(log2(sqrt(10,000))) diff --git a/libraries/entities-renderer/src/RenderableShapeEntityItem.cpp b/libraries/entities-renderer/src/RenderableShapeEntityItem.cpp index e4f42b6133..e7c671cb1b 100644 --- a/libraries/entities-renderer/src/RenderableShapeEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableShapeEntityItem.cpp @@ -1,6 +1,7 @@ // // Created by Bradley Austin Davis on 2016/05/09 // Copyright 2013 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -128,7 +129,10 @@ void ShapeEntityRenderer::doRender(RenderArgs* args) { transform.setRotation(BillboardModeHelpers::getBillboardRotation(transform.getTranslation(), transform.getRotation(), _billboardMode, usePrimaryFrustum ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition(), _shape < EntityShape::Cube || _shape > EntityShape::Icosahedron)); - batch.setModelTransform(transform); + batch.setModelTransform(transform, _prevRenderTransform); + if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) { + _prevRenderTransform = transform; + } Pipeline pipelineType = getPipelineType(materials); if (pipelineType == Pipeline::PROCEDURAL) { diff --git a/libraries/entities-renderer/src/RenderableTextEntityItem.cpp b/libraries/entities-renderer/src/RenderableTextEntityItem.cpp index 9aec4c47f1..e644a5fdc7 100644 --- a/libraries/entities-renderer/src/RenderableTextEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableTextEntityItem.cpp @@ -4,6 +4,7 @@ // // Created by Brad Hefta-Gaub on 8/6/14. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -163,7 +164,10 @@ void TextEntityRenderer::doRender(RenderArgs* args) { bool usePrimaryFrustum = args->_renderMode == RenderArgs::RenderMode::SHADOW_RENDER_MODE || args->_mirrorDepth > 0; transform.setRotation(BillboardModeHelpers::getBillboardRotation(transform.getTranslation(), transform.getRotation(), _billboardMode, usePrimaryFrustum ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition())); - batch.setModelTransform(transform); + batch.setModelTransform(transform, _prevRenderTransform); + if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) { + _prevRenderTransform = transform; + } Pipeline pipelineType = getPipelineType(materials); if (pipelineType == Pipeline::PROCEDURAL) { @@ -374,7 +378,10 @@ void entities::TextPayload::render(RenderArgs* args) { } transform.postTranslate(glm::vec3(-0.5, 0.5, 1.0f + EPSILON / dimensions.z)); transform.setScale(scale); - batch.setModelTransform(transform); + batch.setModelTransform(transform, _prevRenderTransform); + if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) { + _prevRenderTransform = transform; + } glm::vec2 bounds = glm::vec2(dimensions.x - (textRenderable->_leftMargin + textRenderable->_rightMargin), dimensions.y - (textRenderable->_topMargin + textRenderable->_bottomMargin)); textRenderer->draw(batch, textRenderable->_font, { textRenderable->_text, textColor, effectColor, { textRenderable->_leftMargin / scale, -textRenderable->_topMargin / scale }, diff --git a/libraries/entities-renderer/src/RenderableTextEntityItem.h b/libraries/entities-renderer/src/RenderableTextEntityItem.h index 782b4d4f34..e537ef9ca3 100644 --- a/libraries/entities-renderer/src/RenderableTextEntityItem.h +++ b/libraries/entities-renderer/src/RenderableTextEntityItem.h @@ -107,6 +107,7 @@ public: protected: QUuid _entityID; std::weak_ptr _textRenderer; + Transform _prevRenderTransform; int _geometryID { 0 }; }; diff --git a/libraries/entities-renderer/src/RenderableWebEntityItem.cpp b/libraries/entities-renderer/src/RenderableWebEntityItem.cpp index 400de6cf72..b9ba013eee 100644 --- a/libraries/entities-renderer/src/RenderableWebEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableWebEntityItem.cpp @@ -2,6 +2,7 @@ // Created by Bradley Austin Davis on 2015/05/12 // Copyright 2013 High Fidelity, Inc. // Copyright 2020 Vircadia contributors. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -324,13 +325,16 @@ void WebEntityRenderer::doRender(RenderArgs* args) { bool usePrimaryFrustum = args->_renderMode == RenderArgs::RenderMode::SHADOW_RENDER_MODE || args->_mirrorDepth > 0; transform.setRotation(BillboardModeHelpers::getBillboardRotation(transform.getTranslation(), transform.getRotation(), _billboardMode, usePrimaryFrustum ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition())); - batch.setModelTransform(transform); + batch.setModelTransform(transform, _prevRenderTransform); + if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) { + _prevRenderTransform = transform; + } // Turn off jitter for these entities - batch.pushProjectionJitter(); + batch.pushProjectionJitterEnabled(false); DependencyManager::get()->bindWebBrowserProgram(batch, transparent, forward); DependencyManager::get()->renderQuad(batch, topLeft, bottomRight, texMin, texMax, color, _geometryId); - batch.popProjectionJitter(); + batch.popProjectionJitterEnabled(); batch.setResourceTexture(0, nullptr); } diff --git a/libraries/entities-renderer/src/paintStroke.slf b/libraries/entities-renderer/src/paintStroke.slf index eb46be1e20..2d4d8e4d26 100644 --- a/libraries/entities-renderer/src/paintStroke.slf +++ b/libraries/entities-renderer/src/paintStroke.slf @@ -5,6 +5,7 @@ // // Created by Eric Levin on 8/10/2015 // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -23,12 +24,15 @@ LAYOUT(binding=0) uniform sampler2D _texture; +<@include render-utils/ShaderConstants.h@> + <@if not HIFI_USE_FORWARD@> - layout(location=0) in vec3 _normalWS; + layout(location=RENDER_UTILS_ATTR_NORMAL_WS) in vec3 _normalWS; + layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) in vec4 _prevPositionCS; <@endif@> -layout(location=1) in vec2 _texCoord; -layout(location=2) in vec4 _color; -layout(location=3) in float _distanceFromCenter; +layout(location=RENDER_UTILS_ATTR_TEXCOORD01) in vec2 _texCoord; +layout(location=RENDER_UTILS_ATTR_COLOR) in vec4 _color; +layout(location=2) in float _distanceFromCenter; void main(void) { vec4 texel = texture(_texture, _texCoord); @@ -37,9 +41,9 @@ void main(void) { <@if not HIFI_USE_FORWARD@> <@if HIFI_USE_TRANSLUCENT@> - packDeferredFragmentTranslucent(evalFrontOrBackFaceNormal(_normalWS), texel.a, texel.rgb, DEFAULT_ROUGHNESS); + packDeferredFragmentTranslucentUnlit(_prevPositionCS, evalFrontOrBackFaceNormal(_normalWS), texel.a, texel.rgb); <@else@> - packDeferredFragmentUnlit(evalFrontOrBackFaceNormal(_normalWS), texel.a, texel.rgb); + packDeferredFragmentUnlit(_prevPositionCS, evalFrontOrBackFaceNormal(_normalWS), texel.a, texel.rgb); <@endif@> <@else@> _fragColor0 = texel; diff --git a/libraries/entities-renderer/src/paintStroke.slv b/libraries/entities-renderer/src/paintStroke.slv index cf91438746..bdf31012f0 100644 --- a/libraries/entities-renderer/src/paintStroke.slv +++ b/libraries/entities-renderer/src/paintStroke.slv @@ -5,6 +5,7 @@ // // Created by Eric Levin on 7/20/15. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -12,18 +13,22 @@ <@include gpu/Inputs.slh@> <@include gpu/Color.slh@> + <@include gpu/Transform.slh@> <$declareStandardTransform()$> <@include paintStroke.slh@> <$declarePolyLineBuffers()$> +<@include render-utils/ShaderConstants.h@> + <@if not HIFI_USE_FORWARD@> - layout(location=0) out vec3 _normalWS; + layout(location=RENDER_UTILS_ATTR_NORMAL_WS) out vec3 _normalWS; + layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) out vec4 _prevPositionCS; <@endif@> -layout(location=1) out vec2 _texCoord; -layout(location=2) out vec4 _color; -layout(location=3) out float _distanceFromCenter; +layout(location=RENDER_UTILS_ATTR_TEXCOORD01) out vec2 _texCoord; +layout(location=RENDER_UTILS_ATTR_COLOR) out vec4 _color; +layout(location=2) out float _distanceFromCenter; void main(void) { PolylineVertex vertex = getPolylineVertex(gl_VertexID / 2); @@ -54,14 +59,17 @@ void main(void) { posEye.z += _distanceFromCenter * vertex.binormalAndHalfWidth.w * binormalEye.z; <$transformEyeToClipPos(cam, posEye, gl_Position)$> <@if not HIFI_USE_FORWARD@> + <$transformEyeToPrevClipPos(cam, posEye, _prevPositionCS)$> <$transformEyeToWorldDir(cam, normalEye, _normalWS)$> <@endif@> } else { vec3 normal = vertex.normal.xyz; position.xyz += _distanceFromCenter * vertex.binormalAndHalfWidth.w * binormal; +<@if HIFI_USE_FORWARD@> <$transformModelToClipPos(cam, obj, position, gl_Position)$> -<@if not HIFI_USE_FORWARD@> +<@else@> + <$transformModelToClipPosAndPrevClipPos(cam, obj, position, gl_Position, _prevPositionCS)$> <$transformModelToWorldDir(cam, obj, normal, _normalWS)$> <@endif@> } -} \ No newline at end of file +} diff --git a/libraries/entities-renderer/src/polyvox.slf b/libraries/entities-renderer/src/polyvox.slf index 3456823081..f5251fe1f8 100644 --- a/libraries/entities-renderer/src/polyvox.slf +++ b/libraries/entities-renderer/src/polyvox.slf @@ -5,6 +5,7 @@ // // Created by Seth Alves on 2015-8-3 // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -36,6 +37,8 @@ <@if HIFI_USE_FORWARD@> layout(location=RENDER_UTILS_ATTR_POSITION_ES) in vec4 _positionES; + <@else@> + layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) in vec4 _prevPositionCS; <@endif@> layout(location=RENDER_UTILS_ATTR_POSITION_MS) in vec3 _positionMS; layout(location=RENDER_UTILS_ATTR_NORMAL_WS) in vec3 _normalWS; @@ -88,6 +91,7 @@ void main(void) { <@if not HIFI_USE_FORWARD@> packDeferredFragment( + _prevPositionCS, normalize(_normalWS), 1.0, diffuse, diff --git a/libraries/entities-renderer/src/polyvox.slv b/libraries/entities-renderer/src/polyvox.slv index 34547cef1a..794d56645d 100644 --- a/libraries/entities-renderer/src/polyvox.slv +++ b/libraries/entities-renderer/src/polyvox.slv @@ -4,6 +4,7 @@ // Generated on <$_SCRIBE_DATE$> // // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -23,6 +24,7 @@ layout(location=RENDER_UTILS_ATTR_POSITION_ES) out vec4 _positionES; <@endif@> layout(location=RENDER_UTILS_ATTR_POSITION_MS) out vec3 _positionMS; + layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) out vec4 _prevPositionCS; layout(location=RENDER_UTILS_ATTR_NORMAL_WS) out vec3 _normalWS; <@endif@> @@ -34,7 +36,7 @@ void main(void) { <$transformModelToClipPos(cam, obj, inPosition, gl_Position)$> <@else@> <@if not HIFI_USE_FORWARD@> - <$transformModelToClipPos(cam, obj, inPosition, gl_Position)$> + <$transformModelToClipPosAndPrevClipPos(cam, obj, inPosition, gl_Position, _prevPositionCS)$> <@else@> <$transformModelToEyeAndClipPos(cam, obj, inPosition, _positionES, gl_Position)$> <@endif@> diff --git a/libraries/entities-renderer/src/proceduralParticle.slf b/libraries/entities-renderer/src/proceduralParticle.slf index e2ad5bf7ff..7f752e844f 100644 --- a/libraries/entities-renderer/src/proceduralParticle.slf +++ b/libraries/entities-renderer/src/proceduralParticle.slf @@ -132,8 +132,11 @@ void main(void) { #endif <@if not HIFI_USE_TRANSLUCENT@> + // Particles have AA disabled so this doesn't matter + vec4 PREV_POSITION_CS = vec4(0.0, 0.0, 0.0, 1.0); if (emissiveAmount > 0.0) { packDeferredFragmentLightmap( + PREV_POSITION_CS, normal, 1.0, diffuse, @@ -142,6 +145,7 @@ void main(void) { emissive); } else { packDeferredFragment( + PREV_POSITION_CS, normal, 1.0, diffuse, diff --git a/libraries/entities-renderer/src/textured_particle.slf b/libraries/entities-renderer/src/textured_particle.slf index 04b74771a0..1f7b95d201 100644 --- a/libraries/entities-renderer/src/textured_particle.slf +++ b/libraries/entities-renderer/src/textured_particle.slf @@ -5,6 +5,7 @@ // textured_particle.frag // // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -38,11 +39,13 @@ void main(void) { _fragColor0 = vec4(1.0); <@endif@> <@else@> + // Particles have AA disabled so this doesn't matter + vec4 PREV_POSITION_CS = vec4(0.0, 0.0, 0.0, 1.0); vec3 NORMAL = vec3(1.0, 0.0, 0.0); <@if not HIFI_USE_TRANSLUCENT@> - packDeferredFragmentUnlit(NORMAL, albedo.a, albedo.rgb); + packDeferredFragmentUnlit(PREV_POSITION_CS, NORMAL, albedo.a, albedo.rgb); <@else@> - packDeferredFragmentTranslucent(NORMAL, albedo.a, albedo.rgb, DEFAULT_ROUGHNESS); + packDeferredFragmentTranslucent(PREV_POSITION_CS, NORMAL, albedo.a, albedo.rgb, DEFAULT_ROUGHNESS); <@endif@> <@endif@> } diff --git a/libraries/entities/src/EntityTypes.h b/libraries/entities/src/EntityTypes.h index ab3233e639..2b14e417df 100644 --- a/libraries/entities/src/EntityTypes.h +++ b/libraries/entities/src/EntityTypes.h @@ -2,9 +2,9 @@ // EntityTypes.h // libraries/entities/src // -// Created by Brad Hefta-Gaub on 12/4/13. +// Created by Brad Hefta-Gaub on December 4th, 2013. // Copyright 2013 High Fidelity, Inc. -// Copyright 2023 Overte e.V. +// Copyright 2023-2025 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -91,7 +91,7 @@ public: * "Material"Modifies the existing materials on entities and avatars. * {@link Entities.EntityProperties-Material|EntityProperties-Material} * "Sound"Plays a sound. - * {@link Entities.EntityProperties-Material|EntityProperties-Sound} + * {@link Entities.EntityProperties-Sound|EntityProperties-Sound} * * * @typedef {string} Entities.EntityType diff --git a/libraries/gpu-gl-common/src/gpu/gl/GLBackend.cpp b/libraries/gpu-gl-common/src/gpu/gl/GLBackend.cpp index 86b0df982a..931d6ad4e5 100644 --- a/libraries/gpu-gl-common/src/gpu/gl/GLBackend.cpp +++ b/libraries/gpu-gl-common/src/gpu/gl/GLBackend.cpp @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 10/27/2014. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -50,10 +51,16 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] = (&::gpu::gl::GLBackend::do_setModelTransform), (&::gpu::gl::GLBackend::do_setViewTransform), (&::gpu::gl::GLBackend::do_setProjectionTransform), - (&::gpu::gl::GLBackend::do_setProjectionJitter), + (&::gpu::gl::GLBackend::do_setProjectionJitterEnabled), + (&::gpu::gl::GLBackend::do_setProjectionJitterSequence), + (&::gpu::gl::GLBackend::do_setProjectionJitterScale), (&::gpu::gl::GLBackend::do_setViewportTransform), (&::gpu::gl::GLBackend::do_setDepthRangeTransform), + (&::gpu::gl::GLBackend::do_saveViewProjectionTransform), + (&::gpu::gl::GLBackend::do_setSavedViewProjectionTransform), + (&::gpu::gl::GLBackend::do_copySavedViewProjectionTransformToBuffer), + (&::gpu::gl::GLBackend::do_setPipeline), (&::gpu::gl::GLBackend::do_setStateBlendFactor), (&::gpu::gl::GLBackend::do_setStateScissorRect), @@ -268,12 +275,10 @@ bool GLBackend::availableMemoryKnown() { } GLBackend::GLBackend(bool syncCache) { - _pipeline._cameraCorrectionBuffer._buffer->flush(); initShaderBinaryCache(); } GLBackend::GLBackend() { - _pipeline._cameraCorrectionBuffer._buffer->flush(); initShaderBinaryCache(); } @@ -319,19 +324,8 @@ void GLBackend::renderPassTransfer(const Batch& batch) { case Batch::COMMAND_drawIndexedInstanced: case Batch::COMMAND_multiDrawIndirect: case Batch::COMMAND_multiDrawIndexedIndirect: - { - Vec2u outputSize{ 1,1 }; - - auto framebuffer = acquire(_output._framebuffer); - if (framebuffer) { - outputSize.x = framebuffer->getWidth(); - outputSize.y = framebuffer->getHeight(); - } else if (glm::dot(_transform._projectionJitter, _transform._projectionJitter)>0.0f) { - qCWarning(gpugllogging) << "Jittering needs to have a frame buffer to be set"; - } - - _transform.preUpdate(_commandIndex, _stereo, outputSize); - } + case Batch::COMMAND_copySavedViewProjectionTransformToBuffer: // We need to store this transform state in the transform buffer + preUpdateTransform(); break; case Batch::COMMAND_disableContextStereo: @@ -346,7 +340,11 @@ void GLBackend::renderPassTransfer(const Batch& batch) { case Batch::COMMAND_setViewportTransform: case Batch::COMMAND_setViewTransform: case Batch::COMMAND_setProjectionTransform: - case Batch::COMMAND_setProjectionJitter: + case Batch::COMMAND_setProjectionJitterEnabled: + case Batch::COMMAND_setProjectionJitterSequence: + case Batch::COMMAND_setProjectionJitterScale: + case Batch::COMMAND_saveViewProjectionTransform: + case Batch::COMMAND_setSavedViewProjectionTransform: case Batch::COMMAND_setContextMirrorViewCorrection: { CommandCall call = _commandCalls[(*command)]; @@ -385,6 +383,9 @@ void GLBackend::renderPassDraw(const Batch& batch) { case Batch::COMMAND_setModelTransform: case Batch::COMMAND_setViewTransform: case Batch::COMMAND_setProjectionTransform: + case Batch::COMMAND_saveViewProjectionTransform: + case Batch::COMMAND_setSavedViewProjectionTransform: + case Batch::COMMAND_setProjectionJitterSequence: break; case Batch::COMMAND_draw: @@ -410,7 +411,6 @@ void GLBackend::renderPassDraw(const Batch& batch) { //case Batch::COMMAND_setModelTransform: //case Batch::COMMAND_setViewTransform: //case Batch::COMMAND_setProjectionTransform: - case Batch::COMMAND_setProjectionJitter: case Batch::COMMAND_setViewportTransform: case Batch::COMMAND_setDepthRangeTransform: case Batch::COMMAND_setContextMirrorViewCorrection: @@ -555,7 +555,7 @@ void GLBackend::render(const Batch& batch) { _stereo._enable = false; } // Reset jitter - _transform._projectionJitter = Vec2(0.0f, 0.0f); + _transform._projectionJitter._isEnabled = false; { GL_PROFILE_RANGE(render_gpu_gl_detail, "Transfer"); @@ -579,6 +579,14 @@ void GLBackend::render(const Batch& batch) { // Restore the saved stereo state for the next batch _stereo._enable = savedStereo; + + if (batch._mustUpdatePreviousModels) { + // Update object transform history for when the batch will be reexecuted + for (auto& objectTransform : batch._objects) { + objectTransform._previousModel = objectTransform._model; + } + batch._mustUpdatePreviousModels = false; + } } @@ -621,11 +629,11 @@ void GLBackend::do_restoreContextViewCorrection(const Batch& batch, size_t param } void GLBackend::do_setContextMirrorViewCorrection(const Batch& batch, size_t paramOffset) { - bool prevMirrorViewCorrection = _transform._mirrorViewCorrection; - _transform._mirrorViewCorrection = batch._params[paramOffset]._uint != 0; + bool prevMirrorViewCorrection = _transform._presentFrame.mirrorViewCorrection; + _transform._presentFrame.mirrorViewCorrection = batch._params[paramOffset]._uint != 0; - if (_transform._correction.correction != glm::mat4()) { - setCameraCorrection(_transform._mirrorViewCorrection ? _transform._flippedCorrection : _transform._unflippedCorrection, _transform._correction.prevView, false); + if (_transform._presentFrame.correction != glm::mat4()) { + updatePresentFrame(_transform._presentFrame.mirrorViewCorrection ? _transform._presentFrame.flippedCorrection : _transform._presentFrame.unflippedCorrection, false); _transform._invalidView = true; } } @@ -992,28 +1000,26 @@ void GLBackend::recycle() const { _textureManagement._transferEngine->manageMemory(); } -void GLBackend::setCameraCorrection(const Mat4& correction, const Mat4& prevRenderView, bool primary, bool reset) { - auto invCorrection = glm::inverse(correction); - auto invPrevView = glm::inverse(prevRenderView); - _transform._correction.prevView = (reset ? Mat4() : prevRenderView); - _transform._correction.prevViewInverse = (reset ? Mat4() : invPrevView); - _transform._correction.correction = correction; - _transform._correction.correctionInverse = invCorrection; +void GLBackend::updatePresentFrame(const Mat4& correction, bool primary) { + _transform._presentFrame.correction = correction; + _transform._presentFrame.correctionInverse = glm::inverse(correction); - if (!_inRenderTransferPass) { - _pipeline._cameraCorrectionBuffer._buffer->setSubData(0, _transform._correction); - _pipeline._cameraCorrectionBuffer._buffer->flush(); + // Update previous views of saved transforms + for (auto& viewProjState : _transform._savedTransforms) { + viewProjState._state._previousCorrectedView = viewProjState._state._correctedView; + viewProjState._state._previousProjection = viewProjState._state._projection; } if (primary) { - _transform._unflippedCorrection = _transform._correction.correction; - quat flippedRotation = glm::quat_cast(_transform._unflippedCorrection); + _transform._projectionJitter._currentSampleIndex++; + _transform._presentFrame.unflippedCorrection = _transform._presentFrame.correction; + quat flippedRotation = glm::quat_cast(_transform._presentFrame.unflippedCorrection); flippedRotation.y *= -1.0f; flippedRotation.z *= -1.0f; - vec3 flippedTranslation = _transform._unflippedCorrection[3]; + vec3 flippedTranslation = _transform._presentFrame.unflippedCorrection[3]; flippedTranslation.x *= -1.0f; - _transform._flippedCorrection = glm::translate(glm::mat4_cast(flippedRotation), flippedTranslation); - _transform._mirrorViewCorrection = false; + _transform._presentFrame.flippedCorrection = glm::translate(glm::mat4_cast(flippedRotation), flippedTranslation); + _transform._presentFrame.mirrorViewCorrection = false; } } diff --git a/libraries/gpu-gl-common/src/gpu/gl/GLBackend.h b/libraries/gpu-gl-common/src/gpu/gl/GLBackend.h index 8a1648a01b..ec85d0fb90 100644 --- a/libraries/gpu-gl-common/src/gpu/gl/GLBackend.h +++ b/libraries/gpu-gl-common/src/gpu/gl/GLBackend.h @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 10/27/2014. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -26,7 +27,7 @@ #include #include -#include +#include #include "GLShared.h" @@ -121,7 +122,8 @@ public: // Shutdown rendering and persist any required resources void shutdown() override; - void setCameraCorrection(const Mat4& correction, const Mat4& prevRenderView, bool primary, bool reset = false) override; + void updatePresentFrame(const Mat4& correction = Mat4(), bool primary = true) override; + void render(const Batch& batch) final override; // This call synchronize the Full Backend cache with the current GLState @@ -177,10 +179,16 @@ public: virtual void do_setModelTransform(const Batch& batch, size_t paramOffset) final; virtual void do_setViewTransform(const Batch& batch, size_t paramOffset) final; virtual void do_setProjectionTransform(const Batch& batch, size_t paramOffset) final; - virtual void do_setProjectionJitter(const Batch& batch, size_t paramOffset) final; + virtual void do_setProjectionJitterEnabled(const Batch& batch, size_t paramOffset) final; + virtual void do_setProjectionJitterSequence(const Batch& batch, size_t paramOffset) final; + virtual void do_setProjectionJitterScale(const Batch& batch, size_t paramOffset) final; virtual void do_setViewportTransform(const Batch& batch, size_t paramOffset) final; virtual void do_setDepthRangeTransform(const Batch& batch, size_t paramOffset) final; + virtual void do_saveViewProjectionTransform(const Batch& batch, size_t paramOffset) final; + virtual void do_setSavedViewProjectionTransform(const Batch& batch, size_t paramOffset) final; + virtual void do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) = 0; + // Uniform Stage virtual void do_setUniformBuffer(const Batch& batch, size_t paramOffset) final; @@ -299,8 +307,9 @@ protected: virtual bool supportsBindless() const { return false; } static const size_t INVALID_OFFSET = (size_t)-1; - bool _inRenderTransferPass{ false }; - int _currentDraw{ -1 }; + static const uint INVALID_SAVED_CAMERA_SLOT = (uint)-1; + bool _inRenderTransferPass { false }; + int _currentDraw { -1 }; struct FrameTrash { GLsync fence = nullptr; @@ -387,11 +396,13 @@ protected: // between the time when a was recorded and the time(s) when it is // executed // Prev is the previous correction used at previous frame - struct CameraCorrection { + struct PresentFrame { mat4 correction; mat4 correctionInverse; - mat4 prevView; - mat4 prevViewInverse; + + mat4 unflippedCorrection; + mat4 flippedCorrection; + bool mirrorViewCorrection { false }; }; struct TransformStageState { @@ -413,34 +424,60 @@ protected: #endif using TransformCameras = std::vector; + struct ViewProjectionState { + Transform _view; + Transform _correctedView; + Transform _previousCorrectedView; + Mat4 _projection; + Mat4 _previousProjection; + bool _viewIsCamera; + + void copyExceptPrevious(const ViewProjectionState& other) { + _view = other._view; + _correctedView = other._correctedView; + _projection = other._projection; + _viewIsCamera = other._viewIsCamera; + } + }; + + struct SaveTransform { + ViewProjectionState _state; + size_t _cameraOffset { INVALID_OFFSET }; + }; + TransformCamera _camera; TransformCameras _cameras; + std::array _savedTransforms; mutable std::map _drawCallInfoOffsets; - GLuint _objectBuffer{ 0 }; - GLuint _cameraBuffer{ 0 }; - GLuint _drawCallInfoBuffer{ 0 }; - GLuint _objectBufferTexture{ 0 }; - size_t _cameraUboSize{ 0 }; - bool _viewIsCamera{ false }; - bool _skybox{ false }; - Transform _view; - CameraCorrection _correction; - bool _viewCorrectionEnabled{ true }; - mat4 _unflippedCorrection; - mat4 _flippedCorrection; - bool _mirrorViewCorrection{ false }; + GLuint _objectBuffer { 0 }; + GLuint _cameraBuffer { 0 }; + GLuint _drawCallInfoBuffer { 0 }; + GLuint _objectBufferTexture { 0 }; + size_t _cameraUboSize { 0 }; + ViewProjectionState _viewProjectionState; + uint _currentSavedTransformSlot { INVALID_SAVED_CAMERA_SLOT }; + bool _skybox { false }; + PresentFrame _presentFrame; + bool _viewCorrectionEnabled { true }; - Mat4 _projection; - Vec4i _viewport{ 0, 0, 1, 1 }; - Vec2 _depthRange{ 0.0f, 1.0f }; - Vec2 _projectionJitter{ 0.0f, 0.0f }; - bool _invalidView{ false }; - bool _invalidProj{ false }; - bool _invalidViewport{ false }; + struct Jitter { + std::vector _offsetSequence; + Vec2 _offset { 0.0f }; + float _scale { 0.f }; + unsigned int _currentSampleIndex { 0 }; + bool _isEnabled { false }; + }; - bool _enabledDrawcallInfoBuffer{ false }; + Jitter _projectionJitter; + Vec4i _viewport { 0, 0, 1, 1 }; + Vec2 _depthRange { 0.0f, 1.0f }; + bool _invalidView { false }; + bool _invalidProj { false }; + bool _invalidViewport { false }; + + bool _enabledDrawcallInfoBuffer { false }; using Pair = std::pair; using List = std::list; @@ -448,11 +485,13 @@ protected: mutable List::const_iterator _camerasItr; mutable size_t _currentCameraOffset{ INVALID_OFFSET }; - void preUpdate(size_t commandIndex, const StereoState& stereo, Vec2u framebufferSize); + void pushCameraBufferElement(const StereoState& stereo, const StereoState& prevStereo, TransformCameras& cameras) const; + void preUpdate(size_t commandIndex, const StereoState& stereo, const StereoState& prevStereo); void update(size_t commandIndex, const StereoState& stereo) const; void bindCurrentCamera(int stereoSide) const; } _transform; + void preUpdateTransform(); virtual void transferTransformState(const Batch& batch) const = 0; struct UniformStageState { @@ -522,25 +561,16 @@ protected: PipelineReference _pipeline{}; GLuint _program{ 0 }; - bool _cameraCorrection{ false }; - GLShader* _programShader{ nullptr }; - bool _invalidProgram{ false }; + GLShader* _programShader { nullptr }; + bool _invalidProgram { false }; - BufferView _cameraCorrectionBuffer{ gpu::BufferView(std::make_shared(sizeof(CameraCorrection), nullptr)) }; - BufferView _cameraCorrectionBufferIdentity{ gpu::BufferView( - std::make_shared(sizeof(CameraCorrection), nullptr)) }; + State::Data _stateCache { State::DEFAULT }; + State::Signature _stateSignatureCache { 0 }; - State::Data _stateCache{ State::DEFAULT }; - State::Signature _stateSignatureCache{ 0 }; + GLState* _state { nullptr }; + bool _invalidState { false }; - GLState* _state{ nullptr }; - bool _invalidState{ false }; - - PipelineStageState() { - _cameraCorrectionBuffer.edit() = CameraCorrection(); - _cameraCorrectionBufferIdentity.edit() = CameraCorrection(); - _cameraCorrectionBufferIdentity._buffer->flush(); - } + PipelineStageState() {} } _pipeline; // Backend dependent compilation of the shader diff --git a/libraries/gpu-gl-common/src/gpu/gl/GLBackendPipeline.cpp b/libraries/gpu-gl-common/src/gpu/gl/GLBackendPipeline.cpp index e94d2986ee..5c2219e720 100644 --- a/libraries/gpu-gl-common/src/gpu/gl/GLBackendPipeline.cpp +++ b/libraries/gpu-gl-common/src/gpu/gl/GLBackendPipeline.cpp @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 3/8/2015. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -37,7 +38,6 @@ void GLBackend::do_setPipeline(const Batch& batch, size_t paramOffset) { reset(_pipeline._pipeline); _pipeline._program = 0; - _pipeline._cameraCorrection = false; _pipeline._programShader = nullptr; _pipeline._invalidProgram = true; @@ -63,7 +63,6 @@ void GLBackend::do_setPipeline(const Batch& batch, size_t paramOffset) { _pipeline._program = glprogram; _pipeline._programShader = pipelineObject->_program; _pipeline._invalidProgram = true; - _pipeline._cameraCorrection = pipelineObject->_cameraCorrection; } // Now for the state @@ -79,16 +78,6 @@ void GLBackend::do_setPipeline(const Batch& batch, size_t paramOffset) { // THis should be done on Pipeline::update... if (_pipeline._invalidProgram) { glUseProgram(_pipeline._program); - if (_pipeline._cameraCorrection) { - // Invalidate uniform buffer cache slot - _uniform._buffers[gpu::slot::buffer::CameraCorrection].reset(); - auto& cameraCorrectionBuffer = _transform._viewCorrectionEnabled ? - _pipeline._cameraCorrectionBuffer._buffer : - _pipeline._cameraCorrectionBufferIdentity._buffer; - // Because we don't sync Buffers in the bindUniformBuffer, let s force this buffer synced - getBufferID(*cameraCorrectionBuffer); - bindUniformBuffer(gpu::slot::buffer::CameraCorrection, cameraCorrectionBuffer, 0, sizeof(CameraCorrection)); - } (void)CHECK_GL_ERROR(); _pipeline._invalidProgram = false; } diff --git a/libraries/gpu-gl-common/src/gpu/gl/GLBackendTransform.cpp b/libraries/gpu-gl-common/src/gpu/gl/GLBackendTransform.cpp index 67ab502b6b..db228281e3 100644 --- a/libraries/gpu-gl-common/src/gpu/gl/GLBackendTransform.cpp +++ b/libraries/gpu-gl-common/src/gpu/gl/GLBackendTransform.cpp @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 3/8/2015. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -18,20 +19,48 @@ void GLBackend::do_setModelTransform(const Batch& batch, size_t paramOffset) { } void GLBackend::do_setViewTransform(const Batch& batch, size_t paramOffset) { - _transform._view = batch._transforms.get(batch._params[paramOffset]._uint); - _transform._viewIsCamera = batch._params[paramOffset + 1]._uint != 0; + _transform._viewProjectionState._view = batch._transforms.get(batch._params[paramOffset]._uint); + // View history is only supported with saved transforms and if setViewTransform is called (and not setSavedViewProjectionTransform) + // then, in consequence, the view will NOT be corrected in the present thread. In which case + // the previousCorrectedView should be the same as the view. + _transform._viewProjectionState._previousCorrectedView = _transform._viewProjectionState._view; + _transform._viewProjectionState._previousProjection = _transform._viewProjectionState._projection; + _transform._viewProjectionState._viewIsCamera = batch._params[paramOffset + 1]._uint != 0; _transform._invalidView = true; + // The current view / proj doesn't correspond to a saved camera slot + _transform._currentSavedTransformSlot = INVALID_SAVED_CAMERA_SLOT; } void GLBackend::do_setProjectionTransform(const Batch& batch, size_t paramOffset) { - memcpy(glm::value_ptr(_transform._projection), batch.readData(batch._params[paramOffset]._uint), sizeof(Mat4)); + memcpy(glm::value_ptr(_transform._viewProjectionState._projection), batch.readData(batch._params[paramOffset]._uint), sizeof(Mat4)); _transform._invalidProj = true; + // The current view / proj doesn't correspond to a saved camera slot + _transform._currentSavedTransformSlot = INVALID_SAVED_CAMERA_SLOT; } -void GLBackend::do_setProjectionJitter(const Batch& batch, size_t paramOffset) { - _transform._projectionJitter.x = batch._params[paramOffset]._float; - _transform._projectionJitter.y = batch._params[paramOffset+1]._float; +void GLBackend::do_setProjectionJitterEnabled(const Batch& batch, size_t paramOffset) { + _transform._projectionJitter._isEnabled = (batch._params[paramOffset]._int & 1) != 0; _transform._invalidProj = true; + // The current view / proj doesn't correspond to a saved camera slot + _transform._currentSavedTransformSlot = INVALID_SAVED_CAMERA_SLOT; +} + +void GLBackend::do_setProjectionJitterSequence(const Batch& batch, size_t paramOffset) { + auto count = batch._params[paramOffset + 0]._uint; + auto& projectionJitter = _transform._projectionJitter; + projectionJitter._offsetSequence.resize(count); + if (count) { + memcpy(projectionJitter._offsetSequence.data(), batch.readData(batch._params[paramOffset + 1]._uint), sizeof(Vec2) * count); + projectionJitter._offset = projectionJitter._offsetSequence[projectionJitter._currentSampleIndex % count]; + } else { + projectionJitter._offset = Vec2(0.0f); + } +} + +void GLBackend::do_setProjectionJitterScale(const Batch& batch, size_t paramOffset) { + // Should be 2 for one pixel amplitude as clip space is between -1 and 1, but lower values give less blur + // but more aliasing... + _transform._projectionJitter._scale = 2.0f * batch._params[paramOffset + 0]._float; } void GLBackend::do_setViewportTransform(const Batch& batch, size_t paramOffset) { @@ -90,55 +119,80 @@ void GLBackend::syncTransformStateCache() { Mat4 modelView; auto modelViewInv = glm::inverse(modelView); - _transform._view.evalFromRawMatrix(modelViewInv); + _transform._viewProjectionState._view.evalFromRawMatrix(modelViewInv); glDisableVertexAttribArray(gpu::Stream::DRAW_CALL_INFO); _transform._enabledDrawcallInfoBuffer = false; } -void GLBackend::TransformStageState::preUpdate(size_t commandIndex, const StereoState& stereo, Vec2u framebufferSize) { +void GLBackend::TransformStageState::pushCameraBufferElement(const StereoState& stereo, const StereoState& prevStereo, TransformCameras& cameras) const { + const float jitterAmplitude = _projectionJitter._scale; + const Vec2 jitterScale = Vec2(jitterAmplitude * float(_projectionJitter._isEnabled & 1)) / Vec2(_viewport.z, _viewport.w); + const Vec2 jitter = jitterScale * _projectionJitter._offset; + + if (stereo.isStereo()) { +#ifdef GPU_STEREO_CAMERA_BUFFER + cameras.push_back(CameraBufferElement(_camera.getEyeCamera(0, stereo, prevStereo, _viewProjectionState._correctedView, + _viewProjectionState._previousCorrectedView, jitter), + _camera.getEyeCamera(1, stereo, prevStereo, _viewProjectionState._correctedView, + _viewProjectionState._previousCorrectedView, jitter))); +#else + cameras.push_back((_camera.getEyeCamera(0, stereo, prevStereo, _viewProjectionState._correctedView, + _viewProjectionState._previousCorrectedView, jitter))); + cameras.push_back((_camera.getEyeCamera(1, stereo, prevStereo, _viewProjectionState._correctedView, + _viewProjectionState._previousCorrectedView, jitter))); +#endif + } else { +#ifdef GPU_STEREO_CAMERA_BUFFER + cameras.push_back(CameraBufferElement( + _camera.getMonoCamera(_skybox, _viewProjectionState._correctedView, _viewProjectionState._previousCorrectedView, + _viewProjectionState._previousProjection, jitter))); +#else + cameras.push_back((_camera.getMonoCamera(_skybox, _viewProjectionState._correctedView, + _viewProjectionState._previousCorrectedView, _viewProjectionState._previousProjection, + jitter))); +#endif + } +} + +void GLBackend::preUpdateTransform() { + _transform.preUpdate(_commandIndex, _stereo, _prevStereo); +} + +void GLBackend::TransformStageState::preUpdate(size_t commandIndex, const StereoState& stereo, const StereoState& prevStereo) { // Check all the dirty flags and update the state accordingly if (_invalidViewport) { _camera._viewport = glm::vec4(_viewport); } if (_invalidProj) { - _camera._projection = _projection; + _camera._projection = _viewProjectionState._projection; } if (_invalidView) { // Apply the correction - if (_viewIsCamera && (_viewCorrectionEnabled && _correction.correction != glm::mat4())) { - // FIXME should I switch to using the camera correction buffer in Transform.slf and leave this out? - Transform result; - _view.mult(result, _view, _correction.correctionInverse); - if (_skybox) { - result.setTranslation(vec3()); - } - _view = result; + if (_viewProjectionState._viewIsCamera && (_viewCorrectionEnabled && _presentFrame.correction != glm::mat4())) { + Transform::mult(_viewProjectionState._correctedView, _viewProjectionState._view, _presentFrame.correctionInverse); + } else { + _viewProjectionState._correctedView = _viewProjectionState._view; + } + + if (_skybox) { + _viewProjectionState._correctedView.setTranslation(vec3()); } // This is when the _view matrix gets assigned - _view.getInverseMatrix(_camera._view); + _viewProjectionState._correctedView.getInverseMatrix(_camera._view); } if (_invalidView || _invalidProj || _invalidViewport) { size_t offset = _cameraUboSize * _cameras.size(); - Vec2 finalJitter = _projectionJitter / Vec2(framebufferSize); _cameraOffsets.push_back(TransformStageState::Pair(commandIndex, offset)); - if (stereo.isStereo()) { -#ifdef GPU_STEREO_CAMERA_BUFFER - _cameras.push_back(CameraBufferElement(_camera.getEyeCamera(0, stereo, _view, finalJitter), _camera.getEyeCamera(1, stereo, _view, finalJitter))); -#else - _cameras.push_back((_camera.getEyeCamera(0, stereo, _view, finalJitter))); - _cameras.push_back((_camera.getEyeCamera(1, stereo, _view, finalJitter))); -#endif - } else { -#ifdef GPU_STEREO_CAMERA_BUFFER - _cameras.push_back(CameraBufferElement(_camera.getMonoCamera(_view, finalJitter))); -#else - _cameras.push_back((_camera.getMonoCamera(_view, finalJitter))); -#endif + pushCameraBufferElement(stereo, prevStereo, _cameras); + if (_currentSavedTransformSlot != INVALID_SAVED_CAMERA_SLOT) { + // Save the offset of the saved camera slot in the camera buffer. Can be used to copy + // that data, or (in the future) to reuse the offset. + _savedTransforms[_currentSavedTransformSlot]._cameraOffset = offset; } } @@ -177,3 +231,28 @@ void GLBackend::resetTransformStage() { glDisableVertexAttribArray(gpu::Stream::DRAW_CALL_INFO); _transform._enabledDrawcallInfoBuffer = false; } + +void GLBackend::do_saveViewProjectionTransform(const Batch& batch, size_t paramOffset) { + auto slotId = batch._params[paramOffset + 0]._uint; + slotId = std::min(slotId, gpu::Batch::MAX_TRANSFORM_SAVE_SLOT_COUNT); + + auto& savedTransform = _transform._savedTransforms[slotId]; + savedTransform._cameraOffset = INVALID_OFFSET; + _transform._currentSavedTransformSlot = slotId; + // If we are saving this transform to a save slot, then it means we are tracking the history of the view + // so copy the previous corrected view to the transform state. + _transform._viewProjectionState._previousCorrectedView = savedTransform._state._previousCorrectedView; + _transform._viewProjectionState._previousProjection = savedTransform._state._previousProjection; + preUpdateTransform(); + savedTransform._state.copyExceptPrevious(_transform._viewProjectionState); +} + +void GLBackend::do_setSavedViewProjectionTransform(const Batch& batch, size_t paramOffset) { + auto slotId = batch._params[paramOffset + 0]._uint; + slotId = std::min(slotId, gpu::Batch::MAX_TRANSFORM_SAVE_SLOT_COUNT); + + _transform._viewProjectionState = _transform._savedTransforms[slotId]._state; + _transform._invalidView = true; + _transform._invalidProj = true; + _transform._currentSavedTransformSlot = slotId; +} diff --git a/libraries/gpu-gl-common/src/gpu/gl/GLPipeline.cpp b/libraries/gpu-gl-common/src/gpu/gl/GLPipeline.cpp index 52e10eb417..0d7b28e223 100644 --- a/libraries/gpu-gl-common/src/gpu/gl/GLPipeline.cpp +++ b/libraries/gpu-gl-common/src/gpu/gl/GLPipeline.cpp @@ -1,6 +1,7 @@ // // Created by Bradley Austin Davis on 2016/05/15 // Copyright 2013-2016 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -49,11 +50,6 @@ GLPipeline* GLPipeline::sync(GLBackend& backend, const Pipeline& pipeline) { Backend::setGPUObject(pipeline, object); } - // Special case for view correction matrices, any pipeline that declares the correction buffer - // uniform will automatically have it provided without any client code necessary. - // Required for stable lighting in the HMD. - auto reflection = shader->getReflection(backend.getShaderDialect(), backend.getShaderVariant()); - object->_cameraCorrection = reflection.validUniformBuffer(gpu::slot::buffer::CameraCorrection); object->_program = programObject; object->_state = stateObject; diff --git a/libraries/gpu-gl-common/src/gpu/gl/GLPipeline.h b/libraries/gpu-gl-common/src/gpu/gl/GLPipeline.h index a102e33b14..b58ca3be6d 100644 --- a/libraries/gpu-gl-common/src/gpu/gl/GLPipeline.h +++ b/libraries/gpu-gl-common/src/gpu/gl/GLPipeline.h @@ -1,6 +1,7 @@ // // Created by Bradley Austin Davis on 2016/05/15 // Copyright 2013-2016 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -18,9 +19,6 @@ public: GLShader* _program { nullptr }; GLState* _state { nullptr }; - // Bit of a hack, any pipeline can need the camera correction buffer at execution time, so - // we store whether a given pipeline has declared the uniform buffer for it. - bool _cameraCorrection{ false }; }; } } diff --git a/libraries/gpu-gl/src/gpu/gl41/GL41Backend.h b/libraries/gpu-gl/src/gpu/gl41/GL41Backend.h index 881487c9db..967d94a687 100644 --- a/libraries/gpu-gl/src/gpu/gl41/GL41Backend.h +++ b/libraries/gpu-gl/src/gpu/gl41/GL41Backend.h @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 10/27/2014. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -167,6 +168,8 @@ protected: bool bindResourceBuffer(uint32_t slot, const BufferPointer& buffer) override; void releaseResourceBuffer(uint32_t slot) override; + void do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) override; + // Output stage void do_blit(const Batch& batch, size_t paramOffset) override; diff --git a/libraries/gpu-gl/src/gpu/gl41/GL41BackendTransform.cpp b/libraries/gpu-gl/src/gpu/gl41/GL41BackendTransform.cpp index b11707eba2..e49405d3cf 100644 --- a/libraries/gpu-gl/src/gpu/gl41/GL41BackendTransform.cpp +++ b/libraries/gpu-gl/src/gpu/gl41/GL41BackendTransform.cpp @@ -4,12 +4,15 @@ // // Created by Sam Gateau on 3/8/2015. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // #include "GL41Backend.h" +#include "gpu/gl/GLBuffer.h" + using namespace gpu; using namespace gpu::gl41; @@ -97,4 +100,34 @@ void GL41Backend::updateTransform(const Batch& batch) { } (void)CHECK_GL_ERROR(); -} \ No newline at end of file +} + +void GL41Backend::do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) { + auto slotId = batch._params[paramOffset + 0]._uint; + BufferPointer buffer = batch._buffers.get(batch._params[paramOffset + 1]._uint); + auto dstOffset = batch._params[paramOffset + 2]._uint; + size_t size = _transform._cameraUboSize; + + slotId = std::min(slotId, gpu::Batch::MAX_TRANSFORM_SAVE_SLOT_COUNT); + const auto& savedTransform = _transform._savedTransforms[slotId]; + + if ((dstOffset + size) > buffer->getBufferCPUMemSize()) { + qCWarning(gpugllogging) << "Copying saved TransformCamera data out of bounds of uniform buffer"; + size = (size_t)std::max((ptrdiff_t)buffer->getBufferCPUMemSize() - (ptrdiff_t)dstOffset, 0); + } + if (savedTransform._cameraOffset == INVALID_OFFSET) { + qCWarning(gpugllogging) << "Saved TransformCamera data has an invalid transform offset. Copy aborted."; + return; + } + + // Sync BufferObject + auto* object = syncGPUObject(*buffer); + if (object) { + glBindBuffer(GL_COPY_READ_BUFFER, _transform._cameraBuffer); + glBindBuffer(GL_COPY_WRITE_BUFFER, object->_buffer); + glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, savedTransform._cameraOffset, dstOffset, size); + glBindBuffer(GL_COPY_READ_BUFFER, 0); + glBindBuffer(GL_COPY_WRITE_BUFFER, 0); + (void)CHECK_GL_ERROR(); + } +} diff --git a/libraries/gpu-gl/src/gpu/gl45/GL45Backend.h b/libraries/gpu-gl/src/gpu/gl45/GL45Backend.h index 3e7392e366..e0b921237e 100644 --- a/libraries/gpu-gl/src/gpu/gl45/GL45Backend.h +++ b/libraries/gpu-gl/src/gpu/gl45/GL45Backend.h @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 10/27/2014. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -269,6 +270,8 @@ protected: bool bindResourceBuffer(uint32_t slot, const BufferPointer& buffer) override; void releaseResourceBuffer(uint32_t slot) override; + void do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) override; + // Output stage void do_blit(const Batch& batch, size_t paramOffset) override; diff --git a/libraries/gpu-gl/src/gpu/gl45/GL45BackendTexture.cpp b/libraries/gpu-gl/src/gpu/gl45/GL45BackendTexture.cpp index bb31903d8e..3d30ebf03e 100644 --- a/libraries/gpu-gl/src/gpu/gl45/GL45BackendTexture.cpp +++ b/libraries/gpu-gl/src/gpu/gl45/GL45BackendTexture.cpp @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 1/19/2015. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -167,7 +168,7 @@ public: glSamplerParameteri(result, GL_TEXTURE_WRAP_T, GLTexture::WRAP_MODES[sampler.getWrapModeV()]); glSamplerParameteri(result, GL_TEXTURE_WRAP_R, GLTexture::WRAP_MODES[sampler.getWrapModeW()]); - glSamplerParameterf(result, GL_TEXTURE_MAX_ANISOTROPY_EXT, sampler.getMaxAnisotropy()); + glSamplerParameterf(result, GL_TEXTURE_MAX_ANISOTROPY, sampler.getMaxAnisotropy()); glSamplerParameterfv(result, GL_TEXTURE_BORDER_COLOR, (const float*)&sampler.getBorderColor()); glSamplerParameterf(result, GL_TEXTURE_MIN_LOD, sampler.getMinMip()); @@ -314,7 +315,7 @@ void GL45Texture::syncSampler() const { glTextureParameteri(_id, GL_TEXTURE_WRAP_T, WRAP_MODES[sampler.getWrapModeV()]); glTextureParameteri(_id, GL_TEXTURE_WRAP_R, WRAP_MODES[sampler.getWrapModeW()]); - glTextureParameterf(_id, GL_TEXTURE_MAX_ANISOTROPY_EXT, sampler.getMaxAnisotropy()); + glTextureParameterf(_id, GL_TEXTURE_MAX_ANISOTROPY, sampler.getMaxAnisotropy()); glTextureParameterfv(_id, GL_TEXTURE_BORDER_COLOR, (const float*)&sampler.getBorderColor()); glTextureParameterf(_id, GL_TEXTURE_MIN_LOD, sampler.getMinMip()); diff --git a/libraries/gpu-gl/src/gpu/gl45/GL45BackendTransform.cpp b/libraries/gpu-gl/src/gpu/gl45/GL45BackendTransform.cpp index f389c5f62c..0fd328f353 100644 --- a/libraries/gpu-gl/src/gpu/gl45/GL45BackendTransform.cpp +++ b/libraries/gpu-gl/src/gpu/gl45/GL45BackendTransform.cpp @@ -4,12 +4,15 @@ // // Created by Sam Gateau on 3/8/2015. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // #include "GL45Backend.h" +#include "gpu/gl/GLBuffer.h" + using namespace gpu; using namespace gpu::gl45; @@ -101,4 +104,30 @@ void GL45Backend::updateTransform(const Batch& batch) { } (void)CHECK_GL_ERROR(); -} \ No newline at end of file +} + +void GL45Backend::do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) { + auto slotId = batch._params[paramOffset + 0]._uint; + BufferPointer buffer = batch._buffers.get(batch._params[paramOffset + 1]._uint); + auto dstOffset = batch._params[paramOffset + 2]._uint; + size_t size = _transform._cameraUboSize; + + slotId = std::min(slotId, gpu::Batch::MAX_TRANSFORM_SAVE_SLOT_COUNT); + const auto& savedTransform = _transform._savedTransforms[slotId]; + + if ((dstOffset + size) > buffer->getBufferCPUMemSize()) { + qCWarning(gpugllogging) << "Copying saved TransformCamera data out of bounds of uniform buffer"; + size = (size_t)std::max((ptrdiff_t)buffer->getBufferCPUMemSize() - (ptrdiff_t)dstOffset, 0); + } + if (savedTransform._cameraOffset == INVALID_OFFSET) { + qCWarning(gpugllogging) << "Saved TransformCamera data has an invalid transform offset. Copy aborted."; + return; + } + + // Sync BufferObject + auto* object = syncGPUObject(*buffer); + if (object) { + glCopyNamedBufferSubData(_transform._cameraBuffer, object->_buffer, savedTransform._cameraOffset, dstOffset, size); + (void)CHECK_GL_ERROR(); + } +} diff --git a/libraries/gpu-gles/src/gpu/gles/GLESBackend.h b/libraries/gpu-gles/src/gpu/gles/GLESBackend.h index 636518c85a..65eb85fd6b 100644 --- a/libraries/gpu-gles/src/gpu/gles/GLESBackend.h +++ b/libraries/gpu-gles/src/gpu/gles/GLESBackend.h @@ -164,6 +164,8 @@ protected: bool bindResourceBuffer(uint32_t slot, const BufferPointer& buffer) override; void releaseResourceBuffer(uint32_t slot) override; + void do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) override; + // Output stage void do_blit(const Batch& batch, size_t paramOffset) override; diff --git a/libraries/gpu-gles/src/gpu/gles/GLESBackendTransform.cpp b/libraries/gpu-gles/src/gpu/gles/GLESBackendTransform.cpp index 7e1ee0da3b..14cc9a5a4d 100644 --- a/libraries/gpu-gles/src/gpu/gles/GLESBackendTransform.cpp +++ b/libraries/gpu-gles/src/gpu/gles/GLESBackendTransform.cpp @@ -10,6 +10,8 @@ // #include "GLESBackend.h" +#include "gpu/gl/GLBuffer.h" + using namespace gpu; using namespace gpu::gles; @@ -99,4 +101,34 @@ void GLESBackend::updateTransform(const Batch& batch) { } (void)CHECK_GL_ERROR(); -} \ No newline at end of file +} + +void GLESBackend::do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) { + auto slotId = batch._params[paramOffset + 0]._uint; + BufferPointer buffer = batch._buffers.get(batch._params[paramOffset + 1]._uint); + auto dstOffset = batch._params[paramOffset + 2]._uint; + size_t size = _transform._cameraUboSize; + + slotId = std::min(slotId, gpu::Batch::MAX_TRANSFORM_SAVE_SLOT_COUNT); + const auto& savedTransform = _transform._savedTransforms[slotId]; + + if ((dstOffset + size) > buffer->getBufferCPUMemSize()) { + qCWarning(gpugllogging) << "Copying saved TransformCamera data out of bounds of uniform buffer"; + size = (size_t)std::max((ptrdiff_t)buffer->getBufferCPUMemSize() - (ptrdiff_t)dstOffset, 0); + } + if (savedTransform._cameraOffset == INVALID_OFFSET) { + qCWarning(gpugllogging) << "Saved TransformCamera data has an invalid transform offset. Copy aborted."; + return; + } + + // Sync BufferObject + auto* object = syncGPUObject(*buffer); + if (object) { + glBindBuffer(GL_COPY_READ_BUFFER, _transform._cameraBuffer); + glBindBuffer(GL_COPY_WRITE_BUFFER, object->_buffer); + glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, savedTransform._cameraOffset, dstOffset, size); + glBindBuffer(GL_COPY_READ_BUFFER, 0); + glBindBuffer(GL_COPY_WRITE_BUFFER, 0); + (void)CHECK_GL_ERROR(); + } +} diff --git a/libraries/gpu/src/gpu/Backend.cpp b/libraries/gpu/src/gpu/Backend.cpp new file mode 100644 index 0000000000..ddda894306 --- /dev/null +++ b/libraries/gpu/src/gpu/Backend.cpp @@ -0,0 +1,126 @@ +// +// Backend.cpp +// interface/src/gpu +// +// Created by Olivier Prat on 05/25/2018. +// Copyright 2018 High Fidelity, Inc. +// Copyright 2024 Overte e.V. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// +#include "Backend.h" + +using namespace gpu; + +// Counters for Buffer and Texture usage in GPU/Context + +ContextMetricSize Backend::freeGPUMemSize; + +ContextMetricCount Backend::bufferCount; +ContextMetricSize Backend::bufferGPUMemSize; + +ContextMetricCount Backend::textureResidentCount; +ContextMetricCount Backend::textureFramebufferCount; +ContextMetricCount Backend::textureResourceCount; +ContextMetricCount Backend::textureExternalCount; + +ContextMetricSize Backend::textureResidentGPUMemSize; +ContextMetricSize Backend::textureFramebufferGPUMemSize; +ContextMetricSize Backend::textureResourceGPUMemSize; +ContextMetricSize Backend::textureExternalGPUMemSize; + +ContextMetricCount Backend::texturePendingGPUTransferCount; +ContextMetricSize Backend::texturePendingGPUTransferMemSize; + +ContextMetricSize Backend::textureResourcePopulatedGPUMemSize; +ContextMetricSize Backend::textureResourceIdealGPUMemSize; + +void Backend::setStereoState(const StereoState& stereo) { + _prevStereo = _stereo; + _stereo = stereo; +} + +Backend::TransformCamera Backend::TransformCamera::getEyeCamera(int eye, + const StereoState& stereo, + const StereoState& prevStereo, + const Transform& view, + const Transform& previousView, + Vec2 normalizedJitter) const { + TransformCamera result = *this; + Transform eyeView = view; + Transform eyePreviousView = previousView; + if (!stereo._skybox) { + eyeView.postTranslate(-Vec3(stereo._eyeViews[eye][3])); + eyePreviousView.postTranslate(-Vec3(prevStereo._eyeViews[eye][3])); + } else { + // FIXME: If "skybox" the ipd is set to 0 for now, let s try to propose a better solution for this in the future + eyePreviousView.setTranslation(vec3()); + } + result._projection = stereo._eyeProjections[eye]; + Mat4 previousProjection = prevStereo._eyeProjections[eye]; + + // Apply jitter to projections + // We divided by the framebuffer size, which was double-sized, to normalize the jitter, but we want a normal amount of jitter + // for each eye, so we multiply by 2 to get back to normal + //normalizedJitter.x *= 2.0f; + result._projection[2][0] += normalizedJitter.x; + result._projection[2][1] += normalizedJitter.y; + + previousProjection[2][0] += normalizedJitter.x; + previousProjection[2][1] += normalizedJitter.y; + + result.recomputeDerived(eyeView, eyePreviousView, previousProjection); + + result._stereoInfo = Vec4(1.0f, (float)eye, 1.0f / result._viewport.z, 1.0f / result._viewport.w); + + return result; +} + +Backend::TransformCamera Backend::TransformCamera::getMonoCamera(bool isSkybox, + const Transform& view, + Transform previousView, + Mat4 previousProjection, + Vec2 normalizedJitter) const { + TransformCamera result = *this; + + if (isSkybox) { + previousView.setTranslation(vec3()); + } + result._projection[2][0] += normalizedJitter.x; + result._projection[2][1] += normalizedJitter.y; + + previousProjection[2][0] += normalizedJitter.x; + previousProjection[2][1] += normalizedJitter.y; + + result.recomputeDerived(view, previousView, previousProjection); + + result._stereoInfo = Vec4(0.0f, 0.0f, 1.0f / result._viewport.z, 1.0f / result._viewport.w); + return result; +} + +const Backend::TransformCamera& Backend::TransformCamera::recomputeDerived(const Transform& view, + const Transform& previousView, + const Mat4& previousProjection) const { + _projectionInverse = glm::inverse(_projection); + + // Get the viewEyeToWorld matrix form the transformView as passed to the gpu::Batch + // this is the "_viewInverse" fed to the shader + // Genetrate the "_view" matrix as well from the xform + view.getMatrix(_viewInverse); + _view = glm::inverse(_viewInverse); + previousView.getMatrix(_previousViewInverse); + _previousView = glm::inverse(_previousViewInverse); + + Mat4 viewUntranslated = _view; + viewUntranslated[3] = Vec4(0.0f, 0.0f, 0.0f, 1.0f); + _projectionViewUntranslated = _projection * viewUntranslated; + + viewUntranslated = _previousView; + viewUntranslated[3] = Vec4(0.0f, 0.0f, 0.0f, 1.0f); + _previousProjectionViewUntranslated = previousProjection * viewUntranslated; + + _stereoInfo = Vec4(0.0f); + + return *this; +} diff --git a/libraries/gpu/src/gpu/Backend.h b/libraries/gpu/src/gpu/Backend.h new file mode 100644 index 0000000000..1a4acd6a12 --- /dev/null +++ b/libraries/gpu/src/gpu/Backend.h @@ -0,0 +1,141 @@ +// +// Backend.h +// interface/src/gpu +// +// Created by Olivier Prat on 05/18/2018. +// Copyright 2018 High Fidelity, Inc. +// Copyright 2024 Overte e.V. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// +#ifndef hifi_gpu_Backend_h +#define hifi_gpu_Backend_h + +#include + +#include "Forward.h" +#include "Batch.h" +#include "Buffer.h" +#include "Framebuffer.h" + +class QImage; + +namespace gpu { +class Context; + +struct ContextStats { +public: + int _ISNumFormatChanges = 0; + int _ISNumInputBufferChanges = 0; + int _ISNumIndexBufferChanges = 0; + + int _RSNumResourceBufferBounded = 0; + int _RSNumTextureBounded = 0; + int _RSAmountTextureMemoryBounded = 0; + + int _DSNumAPIDrawcalls = 0; + int _DSNumDrawcalls = 0; + int _DSNumTriangles = 0; + + int _PSNumSetPipelines = 0; + + ContextStats() {} + ContextStats(const ContextStats& stats) = default; + + void evalDelta(const ContextStats& begin, const ContextStats& end); +}; + +class Backend { +public: + virtual ~Backend() {} + + virtual void shutdown() {} + virtual const std::string& getVersion() const = 0; + + void setStereoState(const StereoState& stereo); + + virtual void render(const Batch& batch) = 0; + virtual void syncCache() = 0; + virtual void syncProgram(const gpu::ShaderPointer& program) = 0; + virtual void recycle() const = 0; + virtual void downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) = 0; + virtual void updatePresentFrame(const Mat4& correction = Mat4(), bool primary = true) = 0; + + virtual bool supportedTextureFormat(const gpu::Element& format) = 0; + + // Shared header between C++ and GLSL +#include "TransformCamera_shared.slh" + + class TransformCamera : public _TransformCamera { + public: + const Backend::TransformCamera& recomputeDerived(const Transform& view, const Transform& previousView, const Mat4& previousProjection) const; + // Jitter should be divided by framebuffer size + TransformCamera getMonoCamera(bool isSkybox, const Transform& view, Transform previousView, Mat4 previousProjection, Vec2 normalizedJitter) const; + // Jitter should be divided by framebuffer size + TransformCamera getEyeCamera(int eye, const StereoState& stereo, const StereoState& prevStereo, const Transform& view, const Transform& previousView, + Vec2 normalizedJitter) const; + }; + + template + static void setGPUObject(const U& object, T* gpuObject) { + object.gpuObject.setGPUObject(gpuObject); + } + template + static T* getGPUObject(const U& object) { + return reinterpret_cast(object.gpuObject.getGPUObject()); + } + + void resetStats() const { _stats = ContextStats(); } + void getStats(ContextStats& stats) const { stats = _stats; } + + virtual bool isTextureManagementSparseEnabled() const = 0; + + // These should only be accessed by Backend implementation to report the buffer and texture allocations, + // they are NOT public objects + static ContextMetricSize freeGPUMemSize; + + static ContextMetricCount bufferCount; + static ContextMetricSize bufferGPUMemSize; + + static ContextMetricCount textureResidentCount; + static ContextMetricCount textureFramebufferCount; + static ContextMetricCount textureResourceCount; + static ContextMetricCount textureExternalCount; + + static ContextMetricSize textureResidentGPUMemSize; + static ContextMetricSize textureFramebufferGPUMemSize; + static ContextMetricSize textureResourceGPUMemSize; + static ContextMetricSize textureExternalGPUMemSize; + + static ContextMetricCount texturePendingGPUTransferCount; + static ContextMetricSize texturePendingGPUTransferMemSize; + static ContextMetricSize textureResourcePopulatedGPUMemSize; + static ContextMetricSize textureResourceIdealGPUMemSize; + +protected: + virtual bool isStereo() const { + return _stereo.isStereo(); + } + + void getStereoProjections(mat4* eyeProjections) const { + for (int i = 0; i < 2; ++i) { + eyeProjections[i] = _stereo._eyeProjections[i]; + } + } + + void getStereoViews(mat4* eyeViews) const { + for (int i = 0; i < 2; ++i) { + eyeViews[i] = _stereo._eyeViews[i]; + } + } + + friend class Context; + mutable ContextStats _stats; + StereoState _stereo; + StereoState _prevStereo; +}; + +} + +#endif diff --git a/libraries/gpu/src/gpu/Batch.cpp b/libraries/gpu/src/gpu/Batch.cpp index bd4aef9768..0e1da3c4f0 100644 --- a/libraries/gpu/src/gpu/Batch.cpp +++ b/libraries/gpu/src/gpu/Batch.cpp @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 10/14/2014. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -53,6 +54,7 @@ Batch::Batch(const std::string& name) { _data.reserve(_dataMax); _objects.reserve(_objectsMax); _drawCallInfos.reserve(_drawCallInfosMax); + _mustUpdatePreviousModels = true; } Batch::~Batch() { @@ -101,17 +103,18 @@ void Batch::clear() { _currentModel = Transform(); _drawcallUniform = 0; _drawcallUniformReset = 0; - _projectionJitter = glm::vec2(0.0f); _enableStereo = true; _enableSkybox = false; + _mustUpdatePreviousModels = true; } size_t Batch::cacheData(size_t size, const void* data) { size_t offset = _data.size(); size_t numBytes = size; _data.resize(offset + numBytes); - memcpy(_data.data() + offset, data, size); - + if (data) { + memcpy(_data.data() + offset, data, size); + } return offset; } @@ -236,6 +239,15 @@ void Batch::setModelTransform(const Transform& model) { ADD_COMMAND(setModelTransform); _currentModel = model; + _previousModel = model; + _invalidModel = true; +} + +void Batch::setModelTransform(const Transform& model, const Transform& previousModel) { + ADD_COMMAND(setModelTransform); + + _currentModel = model; + _previousModel = previousModel; _invalidModel = true; } @@ -252,20 +264,29 @@ void Batch::setProjectionTransform(const Mat4& proj) { _params.emplace_back(cacheData(sizeof(Mat4), &proj)); } -void Batch::setProjectionJitter(float jx, float jy) { - _projectionJitter.x = jx; - _projectionJitter.y = jy; - pushProjectionJitter(jx, jy); +void Batch::setProjectionJitterEnabled(bool isProjectionEnabled) { + _isJitterOnProjectionEnabled = isProjectionEnabled; + pushProjectionJitterEnabled(_isJitterOnProjectionEnabled); } -void Batch::pushProjectionJitter(float jx, float jy) { - ADD_COMMAND(setProjectionJitter); - _params.emplace_back(jx); - _params.emplace_back(jy); +void Batch::pushProjectionJitterEnabled(bool isProjectionEnabled) { + ADD_COMMAND(setProjectionJitterEnabled); + _params.emplace_back(isProjectionEnabled & 1); } -void Batch::popProjectionJitter() { - pushProjectionJitter(_projectionJitter.x, _projectionJitter.y); +void Batch::popProjectionJitterEnabled() { + pushProjectionJitterEnabled(_isJitterOnProjectionEnabled); +} + +void Batch::setProjectionJitterSequence(const Vec2* sequence, size_t count) { + ADD_COMMAND(setProjectionJitterSequence); + _params.emplace_back((uint)count); + _params.emplace_back(cacheData(sizeof(Vec2) * count, sequence)); +} + +void Batch::setProjectionJitterScale(float scale) { + ADD_COMMAND(setProjectionJitterScale); + _params.emplace_back(scale); } void Batch::setViewportTransform(const Vec4i& viewport) { @@ -281,6 +302,34 @@ void Batch::setDepthRangeTransform(float nearDepth, float farDepth) { _params.emplace_back(nearDepth); } +void Batch::saveViewProjectionTransform(uint saveSlot) { + ADD_COMMAND(saveViewProjectionTransform); + if (saveSlot >= MAX_TRANSFORM_SAVE_SLOT_COUNT) { + qCWarning(gpulogging) << "Transform save slot" << saveSlot << "exceeds max save slot count of" << MAX_TRANSFORM_SAVE_SLOT_COUNT; + } + _params.emplace_back(saveSlot); +} + +void Batch::setSavedViewProjectionTransform(uint saveSlot) { + ADD_COMMAND(setSavedViewProjectionTransform); + if (saveSlot >= MAX_TRANSFORM_SAVE_SLOT_COUNT) { + qCWarning(gpulogging) << "Transform save slot" << saveSlot << "exceeds max save slot count of" + << MAX_TRANSFORM_SAVE_SLOT_COUNT; + } + _params.emplace_back(saveSlot); +} + +void Batch::copySavedViewProjectionTransformToBuffer(uint saveSlot, const BufferPointer& buffer, Offset offset) { + ADD_COMMAND(copySavedViewProjectionTransformToBuffer); + if (saveSlot >= MAX_TRANSFORM_SAVE_SLOT_COUNT) { + qCWarning(gpulogging) << "Transform save slot" << saveSlot << "exceeds max save slot count of" + << MAX_TRANSFORM_SAVE_SLOT_COUNT; + } + _params.emplace_back(saveSlot); + _params.emplace_back(_buffers.cache(buffer)); + _params.emplace_back(offset); +} + void Batch::setPipeline(const PipelinePointer& pipeline) { ADD_COMMAND(setPipeline); @@ -554,12 +603,15 @@ void Batch::captureDrawCallInfoImpl() { if (_invalidModel) { TransformObject object; _currentModel.getMatrix(object._model); + _previousModel.getMatrix(object._previousModel); // FIXME - we don't want to be using glm::inverse() here but it fixes the flickering issue we are // seeing with planky blocks in toybox. Our implementation of getInverseMatrix() is buggy in cases // of non-uniform scale. We need to fix that. In the mean time, glm::inverse() works. //_model.getInverseMatrix(_object._modelInverse); + //_previousModel.getInverseMatrix(_object._previousModelInverse); object._modelInverse = glm::inverse(object._model); + object._previousModelInverse = glm::inverse(object._previousModel); _objects.emplace_back(object); @@ -757,4 +809,4 @@ void Batch::flush() { } buffer->flush(); } -} \ No newline at end of file +} diff --git a/libraries/gpu/src/gpu/Batch.h b/libraries/gpu/src/gpu/Batch.h index a4e6bf6e05..765fb23964 100644 --- a/libraries/gpu/src/gpu/Batch.h +++ b/libraries/gpu/src/gpu/Batch.h @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 10/14/2014. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -42,6 +43,13 @@ class Batch { public: typedef Stream::Slot Slot; + enum { + // This is tied to RenderMirrorTask::MAX_MIRROR_DEPTH and RenderMirrorTask::MAX_MIRRORS_PER_LEVEL + // We have 1 view at mirror depth 0, 3 more at mirror depth 1, 9 more at mirror depth 2, and 27 more at mirror depth 3 + // For each view, we have one slot for the background and one for the primary view, and that's all repeated for the secondary camera + // So this is 2 slots/view/camera * 2 cameras * (1 + 3 + 9 + 27) views + MAX_TRANSFORM_SAVE_SLOT_COUNT = 160 + }; class DrawCallInfo { public: @@ -151,20 +159,20 @@ public: // multi command desctription for multiDrawIndexedIndirect class DrawIndirectCommand { public: - uint _count{ 0 }; - uint _instanceCount{ 0 }; - uint _firstIndex{ 0 }; - uint _baseInstance{ 0 }; + uint _count { 0 }; + uint _instanceCount { 0 }; + uint _firstIndex { 0 }; + uint _baseInstance { 0 }; }; // multi command desctription for multiDrawIndexedIndirect class DrawIndexedIndirectCommand { public: - uint _count{ 0 }; - uint _instanceCount{ 0 }; - uint _firstIndex{ 0 }; - uint _baseVertex{ 0 }; - uint _baseInstance{ 0 }; + uint _count { 0 }; + uint _instanceCount { 0 }; + uint _firstIndex { 0 }; + uint _baseVertex { 0 }; + uint _baseInstance { 0 }; }; // Transform Stage @@ -174,17 +182,24 @@ public: // WARNING: ViewTransform transform from eye space to world space, its inverse is composed // with the ModelTransform to create the equivalent of the gl ModelViewMatrix void setModelTransform(const Transform& model); + void setModelTransform(const Transform& model, const Transform& previousModel); void resetViewTransform() { setViewTransform(Transform(), false); } void setViewTransform(const Transform& view, bool camera = true); void setProjectionTransform(const Mat4& proj); - void setProjectionJitter(float jx = 0.0f, float jy = 0.0f); + void setProjectionJitterEnabled(bool isProjectionEnabled); + void setProjectionJitterSequence(const Vec2* sequence, size_t count); + void setProjectionJitterScale(float scale); // Very simple 1 level stack management of jitter. - void pushProjectionJitter(float jx = 0.0f, float jy = 0.0f); - void popProjectionJitter(); + void pushProjectionJitterEnabled(bool isProjectionEnabled); + void popProjectionJitterEnabled(); // Viewport is xy = low left corner in framebuffer, zw = width height of the viewport, expressed in pixels void setViewportTransform(const Vec4i& viewport); void setDepthRangeTransform(float nearDepth, float farDepth); + void saveViewProjectionTransform(uint saveSlot); + void setSavedViewProjectionTransform(uint saveSlot); + void copySavedViewProjectionTransformToBuffer(uint saveSlot, const BufferPointer& buffer, Offset offset); + // Pipeline Stage void setPipeline(const PipelinePointer& pipeline); @@ -202,7 +217,7 @@ public: void setResourceTexture(uint32 slot, const TexturePointer& texture); void setResourceTexture(uint32 slot, const TextureView& view); // not a command, just a shortcut from a TextureView void setResourceTextureTable(const TextureTablePointer& table, uint32 slot = 0); - void setResourceFramebufferSwapChainTexture(uint32 slot, const FramebufferSwapChainPointer& framebuffer, unsigned int swpaChainIndex, unsigned int renderBufferSlot = 0U); // not a command, just a shortcut from a TextureView + void setResourceFramebufferSwapChainTexture(uint32 slot, const FramebufferSwapChainPointer& framebuffer, unsigned int swapChainIndex, unsigned int renderBufferSlot = 0U); // not a command, just a shortcut from a TextureView // Ouput Stage void setFramebuffer(const FramebufferPointer& framebuffer); @@ -312,10 +327,16 @@ public: COMMAND_setModelTransform, COMMAND_setViewTransform, COMMAND_setProjectionTransform, - COMMAND_setProjectionJitter, + COMMAND_setProjectionJitterEnabled, + COMMAND_setProjectionJitterSequence, + COMMAND_setProjectionJitterScale, COMMAND_setViewportTransform, COMMAND_setDepthRangeTransform, + COMMAND_saveViewProjectionTransform, + COMMAND_setSavedViewProjectionTransform, + COMMAND_copySavedViewProjectionTransformToBuffer, + COMMAND_setPipeline, COMMAND_setStateBlendFactor, COMMAND_setStateScissorRect, @@ -497,17 +518,14 @@ public: Bytes _data; static size_t _dataMax; - // SSBO class... layout MUST match the layout in Transform.slh - class TransformObject { - public: - Mat4 _model; - Mat4 _modelInverse; - }; +#include "TransformObject_shared.slh" using TransformObjects = std::vector; bool _invalidModel { true }; Transform _currentModel; - TransformObjects _objects; + Transform _previousModel; + mutable bool _mustUpdatePreviousModels; + mutable TransformObjects _objects; static size_t _objectsMax; BufferCaches _buffers; @@ -525,11 +543,12 @@ public: NamedBatchDataMap _namedData; - uint16_t _drawcallUniform{ 0 }; - uint16_t _drawcallUniformReset{ 0 }; + bool _isJitterOnProjectionEnabled { false }; - glm::vec2 _projectionJitter{ 0.0f, 0.0f }; - bool _enableStereo{ true }; + uint16_t _drawcallUniform { 0 }; + uint16_t _drawcallUniformReset { 0 }; + + bool _enableStereo { true }; bool _enableSkybox { false }; protected: @@ -558,7 +577,7 @@ protected: template size_t Batch::Cache::_max = BATCH_PREALLOCATE_MIN; -} +} // namespace gpu #if defined(NSIGHT_FOUND) diff --git a/libraries/gpu/src/gpu/Context.cpp b/libraries/gpu/src/gpu/Context.cpp index 8dee120555..cb85923033 100644 --- a/libraries/gpu/src/gpu/Context.cpp +++ b/libraries/gpu/src/gpu/Context.cpp @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 10/27/2014. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -217,74 +218,6 @@ double Context::getFrameTimerBatchAverage() const { return 0.0; } -const Backend::TransformCamera& Backend::TransformCamera::recomputeDerived(const Transform& xformView) const { - _projectionInverse = glm::inverse(_projection); - - // Get the viewEyeToWorld matrix from the transformView as passed to the gpu::Batch - // this is the "_viewInverse" fed to the shader - // Genetrate the "_view" matrix as well from the xform - xformView.getMatrix(_viewInverse); - _view = glm::inverse(_viewInverse); - - Mat4 viewUntranslated = _view; - viewUntranslated[3] = Vec4(0.0f, 0.0f, 0.0f, 1.0f); - _projectionViewUntranslated = _projection * viewUntranslated; - - _stereoInfo = Vec4(0.0f); - - return *this; -} - -Backend::TransformCamera Backend::TransformCamera::getEyeCamera(int eye, const StereoState& _stereo, const Transform& xformView, Vec2 normalizedJitter) const { - TransformCamera result = *this; - Transform offsetTransform = xformView; - if (!_stereo._skybox) { - offsetTransform.postTranslate(-Vec3(_stereo._eyeViews[eye][3])); - } else { - // FIXME: If "skybox" the ipd is set to 0 for now, let s try to propose a better solution for this in the future - } - result._projection = _stereo._eyeProjections[eye]; - normalizedJitter.x *= 2.0f; - result._projection[2][0] += normalizedJitter.x; - result._projection[2][1] += normalizedJitter.y; - result.recomputeDerived(offsetTransform); - - result._stereoInfo = Vec4(1.0f, (float)eye, 0.0f, 0.0f); - - return result; -} - -Backend::TransformCamera Backend::TransformCamera::getMonoCamera(const Transform& xformView, Vec2 normalizedJitter) const { - TransformCamera result = *this; - result._projection[2][0] += normalizedJitter.x; - result._projection[2][1] += normalizedJitter.y; - result.recomputeDerived(xformView); - return result; -} - -// Counters for Buffer and Texture usage in GPU/Context - -ContextMetricSize Backend::freeGPUMemSize; - -ContextMetricCount Backend::bufferCount; -ContextMetricSize Backend::bufferGPUMemSize; - -ContextMetricCount Backend::textureResidentCount; -ContextMetricCount Backend::textureFramebufferCount; -ContextMetricCount Backend::textureResourceCount; -ContextMetricCount Backend::textureExternalCount; - -ContextMetricSize Backend::textureResidentGPUMemSize; -ContextMetricSize Backend::textureFramebufferGPUMemSize; -ContextMetricSize Backend::textureResourceGPUMemSize; -ContextMetricSize Backend::textureExternalGPUMemSize; - -ContextMetricCount Backend::texturePendingGPUTransferCount; -ContextMetricSize Backend::texturePendingGPUTransferMemSize; - -ContextMetricSize Backend::textureResourcePopulatedGPUMemSize; -ContextMetricSize Backend::textureResourceIdealGPUMemSize; - Size Context::getFreeGPUMemSize() { return Backend::freeGPUMemSize.getValue(); } diff --git a/libraries/gpu/src/gpu/Context.h b/libraries/gpu/src/gpu/Context.h index ebc81f14e9..439aeec6bf 100644 --- a/libraries/gpu/src/gpu/Context.h +++ b/libraries/gpu/src/gpu/Context.h @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 10/27/2014. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -15,131 +16,14 @@ #include #include -#include - -#include "Forward.h" -#include "Batch.h" -#include "Buffer.h" #include "Texture.h" #include "Pipeline.h" -#include "Framebuffer.h" #include "Frame.h" #include "PointerStorage.h" - -class QImage; +#include "Backend.h" namespace gpu { -struct ContextStats { -public: - uint32_t _ISNumFormatChanges { 0 }; - uint32_t _ISNumInputBufferChanges { 0 }; - uint32_t _ISNumIndexBufferChanges { 0 }; - - uint32_t _RSNumResourceBufferBounded { 0 }; - uint32_t _RSNumTextureBounded { 0 }; - uint64_t _RSAmountTextureMemoryBounded { 0 }; - - uint32_t _DSNumAPIDrawcalls { 0 }; - uint32_t _DSNumDrawcalls { 0 }; - uint32_t _DSNumTriangles { 0 }; - - uint32_t _PSNumSetPipelines { 0 }; - - ContextStats() {} - ContextStats(const ContextStats& stats) = default; - - void evalDelta(const ContextStats& begin, const ContextStats& end); -}; - -class Backend { -public: - virtual ~Backend(){}; - - virtual void shutdown() {} - virtual const std::string& getVersion() const = 0; - - void setStereoState(const StereoState& stereo) { _stereo = stereo; } - - virtual void render(const Batch& batch) = 0; - virtual void syncCache() = 0; - virtual void syncProgram(const gpu::ShaderPointer& program) = 0; - virtual void recycle() const = 0; - virtual void downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) = 0; - virtual void setCameraCorrection(const Mat4& correction, const Mat4& prevRenderView, bool primary, bool reset = false) {} - - virtual bool supportedTextureFormat(const gpu::Element& format) = 0; - - // Shared header between C++ and GLSL -#include "TransformCamera_shared.slh" - - class TransformCamera : public _TransformCamera { - public: - const Backend::TransformCamera& recomputeDerived(const Transform& xformView) const; - // Jitter should be divided by framebuffer size - TransformCamera getMonoCamera(const Transform& xformView, Vec2 normalizedJitter) const; - // Jitter should be divided by framebuffer size - TransformCamera getEyeCamera(int eye, const StereoState& stereo, const Transform& xformView, Vec2 normalizedJitter) const; - }; - - template - static void setGPUObject(const U& object, T* gpuObject) { - object.gpuObject.setGPUObject(gpuObject); - } - template - static T* getGPUObject(const U& object) { - return reinterpret_cast(object.gpuObject.getGPUObject()); - } - - void resetStats() const { _stats = ContextStats(); } - void getStats(ContextStats& stats) const { stats = _stats; } - - virtual bool isTextureManagementSparseEnabled() const = 0; - - // These should only be accessed by Backend implementation to report the buffer and texture allocations, - // they are NOT public objects - static ContextMetricSize freeGPUMemSize; - - static ContextMetricCount bufferCount; - static ContextMetricSize bufferGPUMemSize; - - static ContextMetricCount textureResidentCount; - static ContextMetricCount textureFramebufferCount; - static ContextMetricCount textureResourceCount; - static ContextMetricCount textureExternalCount; - - static ContextMetricSize textureResidentGPUMemSize; - static ContextMetricSize textureFramebufferGPUMemSize; - static ContextMetricSize textureResourceGPUMemSize; - static ContextMetricSize textureExternalGPUMemSize; - - static ContextMetricCount texturePendingGPUTransferCount; - static ContextMetricSize texturePendingGPUTransferMemSize; - static ContextMetricSize textureResourcePopulatedGPUMemSize; - static ContextMetricSize textureResourceIdealGPUMemSize; - - virtual bool isStereo() const { - return _stereo.isStereo(); - } - - void getStereoProjections(mat4* eyeProjections) const { - for (int i = 0; i < 2; ++i) { - eyeProjections[i] = _stereo._eyeProjections[i]; - } - } -protected: - - void getStereoViews(mat4* eyeViews) const { - for (int i = 0; i < 2; ++i) { - eyeViews[i] = _stereo._eyeViews[i]; - } - } - - friend class Context; - mutable ContextStats _stats; - StereoState _stereo; -}; - class Context { public: using Size = Resource::Size; diff --git a/libraries/gpu/src/gpu/DrawUnitQuad.slv b/libraries/gpu/src/gpu/DrawUnitQuad.slv new file mode 100644 index 0000000000..b87b9d0e8d --- /dev/null +++ b/libraries/gpu/src/gpu/DrawUnitQuad.slv @@ -0,0 +1,28 @@ +<@include gpu/Config.slh@> +<$VERSION_HEADER$> +// <$_SCRIBE_FILENAME$> +// Generated on <$_SCRIBE_DATE$> +// Draw the unit quad [-1,-1 -> 1,1]. +// No transform used. +// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed +// +// Created by Olivier Prat on 10/22/2018 +// Copyright 2018 High Fidelity, Inc. +// Copyright 2024 Overte e.V. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +void main(void) { + const float depth = 1.0; + const vec4 UNIT_QUAD[4] = vec4[4]( + vec4(-1.0, -1.0, depth, 1.0), + vec4(1.0, -1.0, depth, 1.0), + vec4(-1.0, 1.0, depth, 1.0), + vec4(1.0, 1.0, depth, 1.0) + ); + vec4 pos = UNIT_QUAD[gl_VertexID]; + + gl_Position = pos; +} diff --git a/libraries/gpu/src/gpu/FrameIOKeys.h b/libraries/gpu/src/gpu/FrameIOKeys.h index 5a5cfdf2b1..3df1ef9d9a 100644 --- a/libraries/gpu/src/gpu/FrameIOKeys.h +++ b/libraries/gpu/src/gpu/FrameIOKeys.h @@ -1,6 +1,7 @@ // // Created by Bradley Austin Davis on 2018/10/14 // Copyright 2013-2018 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -80,7 +81,7 @@ constexpr const char* pose = "pose"; constexpr const char* profileRanges = "profileRanges"; constexpr const char* program = "program"; constexpr const char* programs = "programs"; -constexpr const char* projectionJitter = "projectionJitter"; +constexpr const char* isJitterOnProjectionEnabled = "isJitterOnProjectionEnabled"; constexpr const char* queries = "queries"; constexpr const char* sampleCount = "sampleCount"; constexpr const char* sampleMask = "sampleMask"; @@ -150,10 +151,16 @@ constexpr const char* COMMAND_NAMES[] = { "setModelTransform", "setViewTransform", "setProjectionTransform", - "setProjectionJitter", + "setProjectionJitterEnabled", + "setProjectionJitterSequence", + "setProjectionJitterScale", "setViewportTransform", "setDepthRangeTransform", + "saveViewProjectionTransform", + "setSavedViewProjectionTransform", + "copySavedViewProjectionTransformToBuffer", + "setPipeline", "setStateBlendFactor", "setStateScissorRect", diff --git a/libraries/gpu/src/gpu/FrameReader.cpp b/libraries/gpu/src/gpu/FrameReader.cpp index 96f6b99f7a..d156720a1f 100644 --- a/libraries/gpu/src/gpu/FrameReader.cpp +++ b/libraries/gpu/src/gpu/FrameReader.cpp @@ -1,6 +1,7 @@ // // Created by Bradley Austin Davis on 2018/10/14 // Copyright 2013-2018 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -740,7 +741,7 @@ BatchPointer Deserializer::readBatch(const json& node) { auto& batch = *result; readOptional(batch._enableStereo, node, keys::stereo); readOptional(batch._enableSkybox, node, keys::skybox); - readOptionalTransformed(batch._projectionJitter, node, keys::projectionJitter, &readVec2); + readOptional(batch._isJitterOnProjectionEnabled, node, keys::isJitterOnProjectionEnabled); readOptional(batch._drawcallUniform, node, keys::drawcallUniform); readOptional(batch._drawcallUniformReset, node, keys::drawcallUniformReset); readPointerCache(batch._textures, node, keys::textures, textures); diff --git a/libraries/gpu/src/gpu/FrameWriter.cpp b/libraries/gpu/src/gpu/FrameWriter.cpp index 761f37a620..f3e632bcad 100644 --- a/libraries/gpu/src/gpu/FrameWriter.cpp +++ b/libraries/gpu/src/gpu/FrameWriter.cpp @@ -1,6 +1,7 @@ // // Created by Bradley Austin Davis on 2018/10/14 // Copyright 2013-2018 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -220,8 +221,8 @@ json Serializer::writeBatch(const Batch& batch) { if (batch._enableStereo != DEFAULT_BATCH._enableStereo) { batchNode[keys::stereo] = batch._enableStereo; } - if (batch._projectionJitter != DEFAULT_BATCH._projectionJitter) { - batchNode[keys::projectionJitter] = writeVec2(batch._projectionJitter); + if (batch._isJitterOnProjectionEnabled != DEFAULT_BATCH._isJitterOnProjectionEnabled) { + batchNode[keys::isJitterOnProjectionEnabled] = batch._isJitterOnProjectionEnabled; } if (batch._drawcallUniform != DEFAULT_BATCH._drawcallUniform) { batchNode[keys::drawcallUniform] = batch._drawcallUniform; diff --git a/libraries/gpu/src/gpu/Query.h b/libraries/gpu/src/gpu/Query.h index 912901951c..e327e14cc0 100644 --- a/libraries/gpu/src/gpu/Query.h +++ b/libraries/gpu/src/gpu/Query.h @@ -4,6 +4,7 @@ // // Created by Niraj Venkat on 7/7/2015. // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -66,6 +67,8 @@ namespace gpu { double getGPUAverage() const; double getBatchAverage() const; + const std::string& name() const { return _name; } + protected: static const int QUERY_QUEUE_SIZE { 4 }; diff --git a/libraries/gpu/src/gpu/Texture.h b/libraries/gpu/src/gpu/Texture.h index 907f9ff392..a6f527e657 100644 --- a/libraries/gpu/src/gpu/Texture.h +++ b/libraries/gpu/src/gpu/Texture.h @@ -24,6 +24,7 @@ #include "Forward.h" #include "Resource.h" #include "Metric.h" +#include "SerDes.h" const int ABSOLUTE_MAX_TEXTURE_NUM_PIXELS = 8192 * 8192; @@ -91,6 +92,37 @@ public: }; typedef std::shared_ptr< SphericalHarmonics > SHPointer; + +inline DataSerializer &operator<<(DataSerializer &ser, const SphericalHarmonics &h) { + DataSerializer::SizeTracker tracker(ser); + + ser << h.L00 << h.spare0; + ser << h.L1m1 << h.spare1; + ser << h.L10 << h.spare2; + ser << h.L11 << h.spare3; + ser << h.L2m2 << h.spare4; + ser << h.L2m1 << h.spare5; + ser << h.L20 << h.spare6; + ser << h.L21 << h.spare7; + ser << h.L22 << h.spare8; + return ser; +} + +inline DataDeserializer &operator>>(DataDeserializer &des, SphericalHarmonics &h) { + DataDeserializer::SizeTracker tracker(des); + + des >> h.L00 >> h.spare0; + des >> h.L1m1 >> h.spare1; + des >> h.L10 >> h.spare2; + des >> h.L11 >> h.spare3; + des >> h.L2m2 >> h.spare4; + des >> h.L2m1 >> h.spare5; + des >> h.L20 >> h.spare6; + des >> h.L21 >> h.spare7; + des >> h.L22 >> h.spare8; + return des; +} + class Sampler { public: @@ -136,7 +168,7 @@ public: uint8 _wrapModeU = WRAP_REPEAT; uint8 _wrapModeV = WRAP_REPEAT; uint8 _wrapModeW = WRAP_REPEAT; - + uint8 _mipOffset = 0; uint8 _minMip = 0; uint8 _maxMip = MAX_MIP_LEVEL; @@ -193,6 +225,35 @@ protected: friend class Deserializer; }; +inline DataSerializer &operator<<(DataSerializer &ser, const Sampler::Desc &d) { + DataSerializer::SizeTracker tracker(ser); + ser << d._borderColor; + ser << d._maxAnisotropy; + ser << d._filter; + ser << d._comparisonFunc; + ser << d._wrapModeU; + ser << d._wrapModeV; + ser << d._wrapModeW; + ser << d._mipOffset; + ser << d._minMip; + ser << d._maxMip; + return ser; +} + +inline DataDeserializer &operator>>(DataDeserializer &dsr, Sampler::Desc &d) { + DataDeserializer::SizeTracker tracker(dsr); + dsr >> d._borderColor; + dsr >> d._maxAnisotropy; + dsr >> d._filter; + dsr >> d._comparisonFunc; + dsr >> d._wrapModeU; + dsr >> d._wrapModeV; + dsr >> d._wrapModeW; + dsr >> d._mipOffset; + dsr >> d._minMip; + dsr >> d._maxMip; + return dsr; +} enum class TextureUsageType : uint8 { RENDERBUFFER, // Used as attachments to a framebuffer RESOURCE, // Resource textures, like materials... subject to memory manipulation @@ -230,7 +291,7 @@ public: NORMAL, // Texture is a normal map ALPHA, // Texture has an alpha channel ALPHA_MASK, // Texture alpha channel is a Mask 0/1 - NUM_FLAGS, + NUM_FLAGS, }; typedef std::bitset Flags; @@ -478,7 +539,7 @@ public: uint16 evalMipDepth(uint16 level) const { return std::max(_depth >> level, 1); } // The true size of an image line or surface depends on the format, tiling and padding rules - // + // // Here are the static function to compute the different sizes from parametered dimensions and format // Tile size must be a power of 2 static uint16 evalTiledPadding(uint16 length, int tile) { int tileMinusOne = (tile - 1); return (tileMinusOne - (length + tileMinusOne) % tile); } @@ -507,7 +568,7 @@ public: uint32 evalMipFaceNumTexels(uint16 level) const { return evalMipWidth(level) * evalMipHeight(level) * evalMipDepth(level); } uint32 evalMipNumTexels(uint16 level) const { return evalMipFaceNumTexels(level) * getNumFaces(); } - // For convenience assign a source name + // For convenience assign a source name const std::string& source() const { return _source; } void setSource(const std::string& source) { _source = source; } const std::string& sourceHash() const { return _sourceHash; } @@ -633,7 +694,7 @@ protected: uint16 _maxMipLevel { 0 }; uint16 _minMip { 0 }; - + Type _type { TEX_1D }; Usage _usage; @@ -643,7 +704,7 @@ protected: bool _isIrradianceValid = false; bool _defined = false; bool _important = false; - + static TexturePointer create(TextureUsageType usageType, Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, uint16 numMips, const Sampler& sampler); Size resize(Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, uint16 numMips); diff --git a/libraries/gpu/src/gpu/Texture_ktx.cpp b/libraries/gpu/src/gpu/Texture_ktx.cpp index c4b674a917..2a4d678208 100644 --- a/libraries/gpu/src/gpu/Texture_ktx.cpp +++ b/libraries/gpu/src/gpu/Texture_ktx.cpp @@ -18,6 +18,7 @@ #include #include "GPULogging.h" +#include "SerDes.h" using namespace gpu; @@ -27,71 +28,94 @@ using KtxStorage = Texture::KtxStorage; std::vector, std::shared_ptr>> KtxStorage::_cachedKtxFiles; std::mutex KtxStorage::_cachedKtxFilesMutex; + +/** + * @brief Payload for a KTX (texture) + * + * This contains a ready to use texture. This is both used for the local cache, and for baked textures. + * + * @note The usage for textures means breaking compatibility is a bad idea, and that the implementation + * should just keep on adding extra data at the bottom of the structure, and remain able to read old + * formats. In fact, version 1 KTX can be found in older baked assets. + */ struct GPUKTXPayload { using Version = uint8; static const std::string KEY; static const Version CURRENT_VERSION { 2 }; static const size_t PADDING { 2 }; - static const size_t SIZE { sizeof(Version) + sizeof(Sampler::Desc) + sizeof(uint32) + sizeof(TextureUsageType) + sizeof(glm::ivec2) + PADDING }; + static const size_t SIZE { sizeof(Version) + sizeof(Sampler::Desc) + sizeof(uint32_t) + sizeof(TextureUsageType) + sizeof(glm::ivec2) + PADDING }; + static_assert(GPUKTXPayload::SIZE == 44, "Packing size may differ between platforms"); - static_assert(GPUKTXPayload::SIZE % 4 == 0, "GPUKTXPayload is not 4 bytes aligned"); Sampler::Desc _samplerDesc; Texture::Usage _usage; TextureUsageType _usageType; glm::ivec2 _originalSize { 0, 0 }; - Byte* serialize(Byte* data) const { - *(Version*)data = CURRENT_VERSION; - data += sizeof(Version); + /** + * @brief Serialize the KTX payload + * + * @warning Be careful modifying this code, as it influences baked assets. + * Backwards compatibility must be maintained. + * + * @param ser Destination serializer + */ + void serialize(DataSerializer &ser) { - memcpy(data, &_samplerDesc, sizeof(Sampler::Desc)); - data += sizeof(Sampler::Desc); + ser << CURRENT_VERSION; - // We can't copy the bitset in Texture::Usage in a crossplateform manner - // So serialize it manually - uint32 usageData = _usage._flags.to_ulong(); - memcpy(data, &usageData, sizeof(uint32)); - data += sizeof(uint32); + ser << _samplerDesc; - memcpy(data, &_usageType, sizeof(TextureUsageType)); - data += sizeof(TextureUsageType); + uint32_t usageData = (uint32_t)_usage._flags.to_ulong(); + ser << usageData; + ser << ((uint8_t)_usageType); + ser << _originalSize; - memcpy(data, glm::value_ptr(_originalSize), sizeof(glm::ivec2)); - data += sizeof(glm::ivec2); + ser.addPadding(PADDING); - return data + PADDING; + assert(ser.length() == GPUKTXPayload::SIZE); } - bool unserialize(const Byte* data, size_t size) { - Version version = *(const Version*)data; - data += sizeof(Version); + /** + * @brief Deserialize the KTX payload + * + * @warning Be careful modifying this code, as it influences baked assets. + * Backwards compatibility must be maintained. + * + * @param dsr Deserializer object + * @return true Successful + * @return false Version check failed + */ + bool unserialize(DataDeserializer &dsr) { + Version version = 0; + uint32_t usageData = 0; + uint8_t usagetype = 0; + + dsr >> version; if (version > CURRENT_VERSION) { // If we try to load a version that we don't know how to parse, // it will render incorrectly + qCWarning(gpulogging) << "KTX version" << version << "is newer than our own," << CURRENT_VERSION; + qCWarning(gpulogging) << dsr; return false; } - memcpy(&_samplerDesc, data, sizeof(Sampler::Desc)); - data += sizeof(Sampler::Desc); + dsr >> _samplerDesc; - // We can't copy the bitset in Texture::Usage in a crossplateform manner - // So unserialize it manually - uint32 usageData; - memcpy(&usageData, data, sizeof(uint32)); - _usage = Texture::Usage(usageData); - data += sizeof(uint32); + dsr >> usageData; + _usage = gpu::Texture::Usage(usageData); - memcpy(&_usageType, data, sizeof(TextureUsageType)); - data += sizeof(TextureUsageType); + dsr >> usagetype; + _usageType = (TextureUsageType)usagetype; if (version >= 2) { - memcpy(&_originalSize, data, sizeof(glm::ivec2)); - data += sizeof(glm::ivec2); + dsr >> _originalSize; } + dsr.skipPadding(PADDING); + return true; } @@ -103,7 +127,8 @@ struct GPUKTXPayload { auto found = std::find_if(keyValues.begin(), keyValues.end(), isGPUKTX); if (found != keyValues.end()) { auto value = found->_value; - return payload.unserialize(value.data(), value.size()); + DataDeserializer dsr(value.data(), value.size()); + return payload.unserialize(dsr); } return false; } @@ -123,29 +148,24 @@ struct IrradianceKTXPayload { SphericalHarmonics _irradianceSH; - Byte* serialize(Byte* data) const { - *(Version*)data = CURRENT_VERSION; - data += sizeof(Version); - - memcpy(data, &_irradianceSH, sizeof(SphericalHarmonics)); - data += sizeof(SphericalHarmonics); - - return data + PADDING; + void serialize(DataSerializer &ser) const { + ser << CURRENT_VERSION; + ser << _irradianceSH; + ser.addPadding(PADDING); } - bool unserialize(const Byte* data, size_t size) { - if (size != SIZE) { + bool unserialize(DataDeserializer &des) { + Version version; + if (des.length() != SIZE) { return false; } - Version version = *(const Version*)data; + des >> version; if (version != CURRENT_VERSION) { return false; } - data += sizeof(Version); - - memcpy(&_irradianceSH, data, sizeof(SphericalHarmonics)); + des >> _irradianceSH; return true; } @@ -157,7 +177,8 @@ struct IrradianceKTXPayload { auto found = std::find_if(keyValues.begin(), keyValues.end(), isIrradianceKTX); if (found != keyValues.end()) { auto value = found->_value; - return payload.unserialize(value.data(), value.size()); + DataDeserializer des(value.data(), value.size()); + return payload.unserialize(des); } return false; } @@ -467,7 +488,9 @@ ktx::KTXUniquePointer Texture::serialize(const Texture& texture, const glm::ivec gpuKeyval._originalSize = originalSize; Byte keyvalPayload[GPUKTXPayload::SIZE]; - gpuKeyval.serialize(keyvalPayload); + DataSerializer ser(keyvalPayload, sizeof(keyvalPayload)); + + gpuKeyval.serialize(ser); ktx::KeyValues keyValues; keyValues.emplace_back(GPUKTXPayload::KEY, (uint32)GPUKTXPayload::SIZE, (ktx::Byte*) &keyvalPayload); @@ -477,7 +500,8 @@ ktx::KTXUniquePointer Texture::serialize(const Texture& texture, const glm::ivec irradianceKeyval._irradianceSH = *texture.getIrradiance(); Byte irradianceKeyvalPayload[IrradianceKTXPayload::SIZE]; - irradianceKeyval.serialize(irradianceKeyvalPayload); + DataSerializer ser(irradianceKeyvalPayload, sizeof(irradianceKeyvalPayload)); + irradianceKeyval.serialize(ser); keyValues.emplace_back(IrradianceKTXPayload::KEY, (uint32)IrradianceKTXPayload::SIZE, (ktx::Byte*) &irradianceKeyvalPayload); } diff --git a/libraries/gpu/src/gpu/Transform.slh b/libraries/gpu/src/gpu/Transform.slh index 767db13595..42dcc4ec6a 100644 --- a/libraries/gpu/src/gpu/Transform.slh +++ b/libraries/gpu/src/gpu/Transform.slh @@ -3,6 +3,7 @@ // // Created by Sam Gateau on 2/10/15. // Copyright 2013 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -10,8 +11,12 @@ <@if not GPU_TRANSFORM_STATE_SLH@> <@def GPU_TRANSFORM_STATE_SLH@> -<@include gpu/ShaderConstants.h@> <@func declareStandardCameraTransform()@> + +#ifndef STANDARD_TRANSFORM_CAMERA +#define STANDARD_TRANSFORM_CAMERA + +<@include gpu/ShaderConstants.h@> <@include gpu/TransformCamera_shared.slh@> #define TransformCamera _TransformCamera @@ -90,32 +95,23 @@ vec3 getEyeWorldPos() { } bool cam_isStereo() { -#ifdef GPU_TRANSFORM_IS_STEREO return getTransformCamera()._stereoInfo.x > 0.0; -#else - return _cameraBlock._camera._stereoInfo.x > 0.0; -#endif } float cam_getStereoSide() { -#ifdef GPU_TRANSFORM_IS_STEREO -#ifdef GPU_TRANSFORM_STEREO_CAMERA return getTransformCamera()._stereoInfo.y; -#else - return _cameraBlock._camera._stereoInfo.y; -#endif -#else - return _cameraBlock._camera._stereoInfo.y; -#endif } +vec2 cam_getInvWidthHeight() { + return getTransformCamera()._stereoInfo.zw; +} + +#endif // STANDARD_TRANSFORM_CAMERA + <@endfunc@> <@func declareStandardObjectTransform()@> -struct TransformObject { - mat4 _model; - mat4 _modelInverse; -}; +<@include gpu/TransformObject_shared.slh@> layout(location=GPU_ATTR_DRAW_CALL_INFO) in ivec2 _drawCallInfo; @@ -155,11 +151,7 @@ TransformObject getTransformObject() { <$declareStandardObjectTransform()$> <@endfunc@> -<@func transformCameraViewport(cameraTransform, viewport)@> - <$viewport$> = <$cameraTransform$>._viewport; -<@endfunc@> - -<@func transformStereoClipsSpace(cameraTransform, clipPos)@> +<@func transformStereoClipSpace(clipPos)@> { #ifdef GPU_TRANSFORM_IS_STEREO @@ -190,6 +182,18 @@ TransformObject getTransformObject() { } <@endfunc@> +<@func transformModelToEyeAndPrevEyeWorldAlignedPos(cameraTransform, objectTransform, modelPos, eyeWAPos, prevEyeWAPos)@> + + { // transformModelToEyeAndPrevEyeWorldAlignedPos + highp mat4 _mv = <$objectTransform$>._model; + highp mat4 _pmv = <$objectTransform$>._previousModel; + _mv[3].xyz -= <$cameraTransform$>._viewInverse[3].xyz; + _pmv[3].xyz -= <$cameraTransform$>._previousViewInverse[3].xyz; + <$eyeWAPos$> = (_mv * <$modelPos$>); + <$prevEyeWAPos$> = (_pmv * <$modelPos$>); + } +<@endfunc@> + <@func transformModelToMonoClipPos(cameraTransform, objectTransform, modelPos, clipPos)@> { // transformModelToMonoClipPos vec4 eyeWAPos; @@ -201,7 +205,7 @@ TransformObject getTransformObject() { <@func transformModelToClipPos(cameraTransform, objectTransform, modelPos, clipPos)@> { // transformModelToClipPos <$transformModelToMonoClipPos($cameraTransform$, $objectTransform$, $modelPos$, $clipPos$)$> - <$transformStereoClipsSpace($cameraTransform$, $clipPos$)$> + <$transformStereoClipSpace($clipPos$)$> } <@endfunc@> @@ -212,19 +216,59 @@ TransformObject getTransformObject() { <$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * eyeWAPos; <$eyePos$> = vec4((<$cameraTransform$>._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0); - <$transformStereoClipsSpace($cameraTransform$, $clipPos$)$> + <$transformStereoClipSpace($clipPos$)$> } <@endfunc@> -<@func transformModelToWorldAndEyeAndClipPos(cameraTransform, objectTransform, modelPos, worldPos, eyePos, clipPos)@> - { // transformModelToEyeAndClipPos +<@func transformModelToEyeClipPosAndPrevClipPos(cameraTransform, objectTransform, modelPos, eyePos, clipPos, prevClipPos)@> + { // transformModelToEyeClipPosAndPrevClipPos + vec4 eyeWAPos; + vec4 prevEyeWAPos; + <$transformModelToEyeAndPrevEyeWorldAlignedPos($cameraTransform$, $objectTransform$, $modelPos$, eyeWAPos, prevEyeWAPos)$> + <$eyePos$> = vec4((<$cameraTransform$>._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0); + <$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * eyeWAPos; + <$transformStereoClipSpace($clipPos$)$> + + <$prevClipPos$> = <$cameraTransform$>._previousProjectionViewUntranslated * prevEyeWAPos; + // Prev clip pos is in mono clip space + } +<@endfunc@> + +<@func transformModelToClipPosAndPrevClipPos(cameraTransform, objectTransform, modelPos, clipPos, prevClipPos)@> + { // transformModelToClipPosAndPrevClipPos + vec4 eyeWAPos; + vec4 prevEyeWAPos; + <$transformModelToEyeAndPrevEyeWorldAlignedPos($cameraTransform$, $objectTransform$, $modelPos$, eyeWAPos, prevEyeWAPos)$> + <$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * eyeWAPos; + <$transformStereoClipSpace($clipPos$)$> + + <$prevClipPos$> = <$cameraTransform$>._previousProjectionViewUntranslated * prevEyeWAPos; + // Prev clip pos is in mono clip space + } +<@endfunc@> + +<@func transformModelToWorldEyeAndClipPos(cameraTransform, objectTransform, modelPos, worldPos, eyePos, clipPos)@> + { // transformModelToWorldEyeAndClipPos vec4 eyeWAPos; <$transformModelToEyeWorldAlignedPos($cameraTransform$, $objectTransform$, $modelPos$, eyeWAPos)$> <$worldPos$> = vec4(eyeWAPos.xyz + <$cameraTransform$>._viewInverse[3].xyz, 1.0); <$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * eyeWAPos; <$eyePos$> = vec4((<$cameraTransform$>._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0); - <$transformStereoClipsSpace($cameraTransform$, $clipPos$)$> + <$transformStereoClipSpace($clipPos$)$> + } +<@endfunc@> + +<@func transformModelToWorldEyeClipPosAndPrevClipPos(cameraTransform, objectTransform, modelPos, worldPos, eyePos, clipPos, prevClipPos)@> + { // transformModelToWorldEyeClipPosAndPrevClipPos + vec4 eyeWAPos; + vec4 prevEyeWAPos; + <$transformModelToEyeAndPrevEyeWorldAlignedPos($cameraTransform$, $objectTransform$, $modelPos$, eyeWAPos, prevEyeWAPos)$> + <$worldPos$> = vec4(eyeWAPos.xyz + <$cameraTransform$>._viewInverse[3].xyz, 1.0); + <$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * eyeWAPos; + <$eyePos$> = vec4((<$cameraTransform$>._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0); + <$prevClipPos$> = <$cameraTransform$>._previousProjectionViewUntranslated * prevEyeWAPos; + <$transformStereoClipSpace($clipPos$)$> } <@endfunc@> @@ -236,13 +280,22 @@ TransformObject getTransformObject() { } <@endfunc@> +<@func transformModelToEyePosAndPrevEyePos(cameraTransform, objectTransform, modelPos, eyePos, prevEyePos)@> + { // transformModelToEyePosAndPrevEyePos + vec4 eyeWAPos; + vec4 prevEyeWAPos; + <$transformModelToEyeAndPrevEyeWorldAlignedPos($cameraTransform$, $objectTransform$, $modelPos$, eyeWAPos, prevEyeWAPos)$> + <$eyePos$> = vec4((<$cameraTransform$>._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0); + <$prevEyePos$> = vec4((<$cameraTransform$>._previousView * vec4(prevEyeWAPos.xyz, 0.0)).xyz, 1.0); + } +<@endfunc@> <@func transformWorldToClipPos(cameraTransform, worldPos, clipPos)@> { // transformWorldToClipPos vec4 eyeWAPos = <$worldPos$> - vec4(<$cameraTransform$>._viewInverse[3].xyz, 0.0); <$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * eyeWAPos; - <$transformStereoClipsSpace($cameraTransform$, $clipPos$)$> + <$transformStereoClipSpace($clipPos$)$> } <@endfunc@> @@ -285,6 +338,20 @@ TransformObject getTransformObject() { } <@endfunc@> +<@func transformModelToPrevEyeDir(cameraTransform, objectTransform, modelDir, prevEyeDir)@> + { // transformModelToPrevEyeDir + vec3 mr0 = vec3(<$objectTransform$>._previousModelInverse[0].x, <$objectTransform$>._previousModelInverse[1].x, <$objectTransform$>._previousModelInverse[2].x); + vec3 mr1 = vec3(<$objectTransform$>._previousModelInverse[0].y, <$objectTransform$>._previousModelInverse[1].y, <$objectTransform$>._previousModelInverse[2].y); + vec3 mr2 = vec3(<$objectTransform$>._previousModelInverse[0].z, <$objectTransform$>._previousModelInverse[1].z, <$objectTransform$>._previousModelInverse[2].z); + + vec3 mvc0 = vec3(dot(<$cameraTransform$>._previousViewInverse[0].xyz, mr0), dot(<$cameraTransform$>._previousViewInverse[0].xyz, mr1), dot(<$cameraTransform$>._previousViewInverse[0].xyz, mr2)); + vec3 mvc1 = vec3(dot(<$cameraTransform$>._previousViewInverse[1].xyz, mr0), dot(<$cameraTransform$>._previousViewInverse[1].xyz, mr1), dot(<$cameraTransform$>._previousViewInverse[1].xyz, mr2)); + vec3 mvc2 = vec3(dot(<$cameraTransform$>._previousViewInverse[2].xyz, mr0), dot(<$cameraTransform$>._previousViewInverse[2].xyz, mr1), dot(<$cameraTransform$>._previousViewInverse[2].xyz, mr2)); + + <$prevEyeDir$> = vec3(dot(mvc0, <$modelDir$>), dot(mvc1, <$modelDir$>), dot(mvc2, <$modelDir$>)); + } +<@endfunc@> + <@func transformEyeToWorldDir(cameraTransform, eyeDir, worldDir)@> { // transformEyeToWorldDir <$worldDir$> = vec3(<$cameraTransform$>._viewInverse * vec4(<$eyeDir$>.xyz, 0.0)); @@ -301,7 +368,34 @@ TransformObject getTransformObject() { { // transformEyeToClipPos <$clipPos$> = <$cameraTransform$>._projection * vec4(<$eyePos$>.xyz, 1.0); - <$transformStereoClipsSpace($cameraTransform$, $clipPos$)$> + <$transformStereoClipSpace($clipPos$)$> + } +<@endfunc@> + +<@func transformEyeToPrevClipPos(cameraTransform, eyePos, prevClipPos)@> + { // transformEyeToPrevClipPos + vec4 worldPos = <$cameraTransform$>._viewInverse * vec4(<$eyePos$>.xyz, 1.0); + <$prevClipPos$> = <$cameraTransform$>._previousProjectionViewUntranslated * worldPos; + // Prev clip pos is in mono clip space + } +<@endfunc@> + +<@func transformEyeToClipPosAndPrevClipPos(cameraTransform, eyePos, clipPos, prevClipPos)@> + { // transformEyeToClipPosAndPrevClipPos + <$clipPos$> = <$cameraTransform$>._projection * vec4(<$eyePos$>.xyz, 1.0); + + <$transformStereoClipSpace($clipPos$)$> + + vec4 worldPos = <$cameraTransform$>._viewInverse * vec4(<$eyePos$>.xyz, 1.0); + <$prevClipPos$> = <$cameraTransform$>._previousProjectionViewUntranslated * worldPos; + // Prev clip pos is in mono clip space + } +<@endfunc@> + +<@func transformPrevEyeToPrevClipPos(cameraTransform, prevEyePos, prevClipPos)@> + { // transformPrevEyeToPrevClipPos + <$prevClipPos$> = <$cameraTransform$>._previousViewInverse * vec4(<$prevEyePos$>.xyz, 1.0) - vec4(<$cameraTransform$>._previousViewInverse[3].xyz, 0.0); + <$prevClipPos$> = <$cameraTransform$>._previousProjectionViewUntranslated * <$prevClipPos$>; } <@endfunc@> diff --git a/libraries/gpu/src/gpu/TransformCamera_shared.slh b/libraries/gpu/src/gpu/TransformCamera_shared.slh index e4a0f8c2cc..86a3b06db2 100644 --- a/libraries/gpu/src/gpu/TransformCamera_shared.slh +++ b/libraries/gpu/src/gpu/TransformCamera_shared.slh @@ -1,22 +1,26 @@ -// glsl / C++ compatible source as interface for FadeEffect +// glsl / C++ compatible source as interface for TransformCamera #ifdef __cplusplus -# define _MAT4 Mat4 -# define _VEC4 Vec4 -# define _MUTABLE mutable +# define TC_MAT4 gpu::Mat4 +# define TC_VEC4 gpu::Vec4 +# define TC_MUTABLE mutable #else -# define _MAT4 mat4 -# define _VEC4 vec4 -# define _MUTABLE +# define TC_MAT4 mat4 +# define TC_VEC4 vec4 +# define TC_MUTABLE #endif struct _TransformCamera { - _MUTABLE _MAT4 _view; - _MUTABLE _MAT4 _viewInverse; - _MUTABLE _MAT4 _projectionViewUntranslated; - _MAT4 _projection; - _MUTABLE _MAT4 _projectionInverse; - _VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations. - _MUTABLE _VEC4 _stereoInfo; + TC_MUTABLE TC_MAT4 _view; + TC_MUTABLE TC_MAT4 _viewInverse; + TC_MUTABLE TC_MAT4 _previousView; + TC_MUTABLE TC_MAT4 _previousViewInverse; + TC_MAT4 _projection; + TC_MUTABLE TC_MAT4 _projectionInverse; + TC_MUTABLE TC_MAT4 _projectionViewUntranslated; + // Previous projection view untranslated AND jittered with current jitter + TC_MUTABLE TC_MAT4 _previousProjectionViewUntranslated; + TC_VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations. + TC_MUTABLE TC_VEC4 _stereoInfo; }; // <@if 1@> diff --git a/libraries/gpu/src/gpu/TransformObject_shared.slh b/libraries/gpu/src/gpu/TransformObject_shared.slh new file mode 100644 index 0000000000..edb7ca4a5b --- /dev/null +++ b/libraries/gpu/src/gpu/TransformObject_shared.slh @@ -0,0 +1,19 @@ +// glsl / C++ compatible source as interface for TransformCamera +#ifdef __cplusplus +# define TO_MAT4 Mat4 +#else +# define TO_MAT4 mat4 +#endif + +struct TransformObject { + TO_MAT4 _model; + TO_MAT4 _modelInverse; + TO_MAT4 _previousModel; + TO_MAT4 _previousModelInverse; +}; + + // <@if 1@> + // Trigger Scribe include + // <@endif@> +// + diff --git a/libraries/gpu/src/gpu/drawColor.slp b/libraries/gpu/src/gpu/drawColor.slp index 1c81242fed..8289f46550 100644 --- a/libraries/gpu/src/gpu/drawColor.slp +++ b/libraries/gpu/src/gpu/drawColor.slp @@ -1,3 +1,2 @@ -VERTEX DrawTransformVertexPosition +VERTEX DrawUnitQuad FRAGMENT DrawColor -r diff --git a/libraries/gpu/src/gpu/drawWhite.slp b/libraries/gpu/src/gpu/drawWhite.slp new file mode 100644 index 0000000000..c3e7ec1121 --- /dev/null +++ b/libraries/gpu/src/gpu/drawWhite.slp @@ -0,0 +1,2 @@ +VERTEX DrawUnitQuad +FRAGMENT DrawWhite diff --git a/libraries/graphics/src/graphics/Haze.slh b/libraries/graphics/src/graphics/Haze.slh index a2d8bb0523..e10f1b1449 100644 --- a/libraries/graphics/src/graphics/Haze.slh +++ b/libraries/graphics/src/graphics/Haze.slh @@ -3,6 +3,7 @@ // // Created by Nissim Hadar on 9/13/2017 // Copyright 2013 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -181,10 +182,9 @@ vec4 computeHazeColor(vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePosition // Mix with background at far range const float BLEND_DISTANCE = 27000.0f; - vec4 outFragColor = potentialFragColor; - outFragColor.a *= mix(1.0, hazeParams.backgroundBlend, float(distance > BLEND_DISTANCE)); + potentialFragColor.a *= mix(1.0, hazeParams.backgroundBlend, float(distance > BLEND_DISTANCE)); - return outFragColor; + return potentialFragColor; } <@endif@> diff --git a/libraries/graphics/src/graphics/Skybox.cpp b/libraries/graphics/src/graphics/Skybox.cpp index 7d7fd35018..b9232418fc 100644 --- a/libraries/graphics/src/graphics/Skybox.cpp +++ b/libraries/graphics/src/graphics/Skybox.cpp @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 5/4/2015. // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -73,14 +74,14 @@ void Skybox::prepare(gpu::Batch& batch) const { } } -void Skybox::render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward) const { +void Skybox::render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward, uint transformSlot) const { updateSchemaBuffer(); - Skybox::render(batch, frustum, (*this), forward); + Skybox::render(batch, frustum, (*this), forward, transformSlot); } static std::map _pipelines; -void Skybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum, const Skybox& skybox, bool forward) { +void Skybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum, const Skybox& skybox, bool forward, uint transformSlot) { if (_pipelines.empty()) { static const std::vector> keys = { std::make_tuple(false, shader::graphics::program::skybox), @@ -109,6 +110,8 @@ void Skybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum, const Sky batch.setProjectionTransform(projMat); batch.setViewTransform(viewTransform); + // This is needed if we want to have motion vectors on the sky + batch.saveViewProjectionTransform(transformSlot); batch.setModelTransform(Transform()); // only for Mac batch.setPipeline(_pipelines[forward]); diff --git a/libraries/graphics/src/graphics/Skybox.h b/libraries/graphics/src/graphics/Skybox.h index 5668604c8b..6dc788c48c 100644 --- a/libraries/graphics/src/graphics/Skybox.h +++ b/libraries/graphics/src/graphics/Skybox.h @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 5/4/2015. // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -44,9 +45,9 @@ public: virtual void clear(); void prepare(gpu::Batch& batch) const; - virtual void render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward) const; + virtual void render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward, uint transformSlot) const; - static void render(gpu::Batch& batch, const ViewFrustum& frustum, const Skybox& skybox, bool forward); + static void render(gpu::Batch& batch, const ViewFrustum& frustum, const Skybox& skybox, bool forward, uint transformSlot); const UniformBufferView& getSchemaBuffer() const { return _schemaBuffer; } diff --git a/libraries/graphics/src/graphics/skybox.slf b/libraries/graphics/src/graphics/skybox.slf index 2d5ba4d26b..a7e0f9ef8b 100755 --- a/libraries/graphics/src/graphics/skybox.slf +++ b/libraries/graphics/src/graphics/skybox.slf @@ -10,7 +10,7 @@ // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // -<@include graphics/ShaderConstants.h@> +<@include skybox.slh@> <@include graphics/Light.slh@> <@if HIFI_USE_FORWARD@> @@ -20,28 +20,25 @@ <$declareLightBuffer()$> <@include graphics/Haze.slh@> + + layout(location=0) out vec4 _fragColor; +<@else@> + <$declarePackDeferredFragmentSky()$> <@endif@> -LAYOUT(binding=GRAPHICS_TEXTURE_SKYBOX) uniform samplerCube cubeMap; - -struct Skybox { - vec4 color; -}; - -LAYOUT(binding=GRAPHICS_BUFFER_SKYBOX_PARAMS) uniform skyboxBuffer { - Skybox skybox; -}; - -layout(location=0) in vec3 _normal; -layout(location=0) out vec4 _fragColor; +layout(location=0) in vec3 _normal; +<@if not HIFI_USE_FORWARD@> + layout(location=1) in vec4 _prevPositionCS; +<@endif@> void main(void) { vec3 normal = normalize(_normal); vec3 skyboxTexel = texture(cubeMap, normal).rgb; vec3 skyboxColor = skybox.color.rgb; - _fragColor = vec4(applySkyboxColorMix(skyboxTexel, skyboxColor, skybox.color.a), 1.0); + vec3 color = applySkyboxColorMix(skyboxTexel, skyboxColor, skybox.color.a); <@if HIFI_USE_FORWARD@> + _fragColor = vec4(color, 1.0); // FIXME: either move this elsewhere or give it access to isHazeEnabled() (which is in render-utils/LightingModel.slh) if (/*(isHazeEnabled() > 0.0) && */(hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) { TransformCamera cam = getTransformCamera(); @@ -57,6 +54,8 @@ void main(void) { vec4 hazeColor = computeHazeColor(fragPositionES.xyz, fragPositionWS.xyz, eyePositionWS.xyz, lightDirectionWS); _fragColor.rgb = mix(_fragColor.rgb, hazeColor.rgb, hazeColor.a); } +<@else@> + packDeferredFragmentSky(_prevPositionCS, color); <@endif@> } diff --git a/libraries/graphics/src/graphics/skybox.slh b/libraries/graphics/src/graphics/skybox.slh new file mode 100644 index 0000000000..d6dfc8046e --- /dev/null +++ b/libraries/graphics/src/graphics/skybox.slh @@ -0,0 +1,60 @@ + +<@if not SKYBOX_SLH@> +<@def SKYBOX_SLH@> + +<@include graphics/ShaderConstants.h@> + +<@include gpu/Transform.slh@> +<$declareStandardCameraTransform()$> + +<@include gpu/PackedNormal.slh@> + +LAYOUT(binding=GRAPHICS_TEXTURE_SKYBOX) uniform samplerCube cubeMap; + +struct Skybox { + vec4 color; +}; + +LAYOUT(binding=GRAPHICS_BUFFER_SKYBOX_PARAMS) uniform skyboxBuffer { + Skybox skybox; +}; + +<@func declarePackDeferredFragmentSky()@> +// This code belongs in render-utils/VelocityWrite.slh but because graphics can't include render-utils, we have to have it here + +vec2 getEyeTexcoordPos() { + // No need to add 0.5 as, by default, frag coords are pixel centered at (0.5, 0.5) + vec2 texCoordPos = gl_FragCoord.xy; + texCoordPos *= cam_getInvWidthHeight(); + texCoordPos.x -= cam_getStereoSide(); + return texCoordPos; +} + +vec2 packVelocity(vec4 prevPositionCS) { + vec2 uv = getEyeTexcoordPos(); + vec2 prevUV = (prevPositionCS.xy / prevPositionCS.w) * 0.5 + 0.5; + vec2 deltaUV = uv - prevUV; + // Velocity should be computed without any jitter inside. + return deltaUV; +} + +layout(location = 0) out vec4 _lighting; // calculated lighting +layout(location = 1) out vec4 _velocity; // velocity + +void packDeferredFragmentSky(vec4 prevPositionCS, vec3 color) { + _lighting = vec4(color, 1.0f); + _velocity = vec4(packVelocity(prevPositionCS), 0.0f, 0.0f); +} + +<@endfunc@> + +<@endif@> diff --git a/libraries/graphics/src/graphics/skybox.slv b/libraries/graphics/src/graphics/skybox.slv index a6e6930d22..5c60cf73a9 100755 --- a/libraries/graphics/src/graphics/skybox.slv +++ b/libraries/graphics/src/graphics/skybox.slv @@ -5,6 +5,7 @@ // // Created by Sam Gateau on 5/5/2015. // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -14,6 +15,9 @@ <$declareStandardTransform()$> layout(location=0) out vec3 _normal; +<@if not HIFI_USE_FORWARD@> + layout(location=1) out vec4 _prevPositionCS; +<@endif@> void main(void) { const float depth = 0.0; @@ -23,17 +27,21 @@ void main(void) { vec4(-1.0, 1.0, depth, 1.0), vec4(1.0, 1.0, depth, 1.0) ); - vec4 inPosition = UNIT_QUAD[gl_VertexID]; // standard transform TransformCamera cam = getTransformCamera(); - vec3 clipDir = vec3(inPosition.xy, 0.0); + vec3 clipDir = UNIT_QUAD[gl_VertexID].xyz; vec3 eyeDir; <$transformClipToEyeDir(cam, clipDir, eyeDir)$> <$transformEyeToWorldDir(cam, eyeDir, _normal)$> - - // Position is supposed to come in clip space - gl_Position = vec4(inPosition.xy, 0.0, 1.0); - <$transformStereoClipsSpace(cam, gl_Position)$> +<@if not HIFI_USE_FORWARD@> + // FIXME: this is probably wrong + _prevPositionCS = cam._previousProjectionViewUntranslated * (cam._viewInverse * (cam._projectionInverse * vec4(clipDir, 1.0))); +<@endif@> + + // Position is supposed to come in clip space + gl_Position = vec4(clipDir, 1.0); + + <$transformStereoClipSpace(gl_Position)$> } diff --git a/libraries/octree/src/OctreePacketData.cpp b/libraries/octree/src/OctreePacketData.cpp index c13d58226b..3745582728 100644 --- a/libraries/octree/src/OctreePacketData.cpp +++ b/libraries/octree/src/OctreePacketData.cpp @@ -17,6 +17,7 @@ #include "OctreeLogging.h" #include "NumericalConstants.h" #include +#include "SerDes.h" bool OctreePacketData::_debug = false; AtomicUIntStat OctreePacketData::_totalBytesOfOctalCodes { 0 }; @@ -847,10 +848,10 @@ int OctreePacketData::unpackDataFromBytes(const unsigned char* dataBytes, QByteA } int OctreePacketData::unpackDataFromBytes(const unsigned char* dataBytes, AACube& result) { - aaCubeData cube; - memcpy(&cube, dataBytes, sizeof(aaCubeData)); - result = AACube(cube.corner, cube.scale); - return sizeof(aaCubeData); + DataDeserializer des(dataBytes, sizeof(aaCubeData)); + des >> result; + + return des.length(); } int OctreePacketData::unpackDataFromBytes(const unsigned char* dataBytes, QRect& result) { diff --git a/libraries/procedural/src/procedural/Procedural.cpp b/libraries/procedural/src/procedural/Procedural.cpp index 7b202ad625..b6550516e4 100644 --- a/libraries/procedural/src/procedural/Procedural.cpp +++ b/libraries/procedural/src/procedural/Procedural.cpp @@ -115,16 +115,16 @@ void ProceduralData::parse(const QJsonObject& proceduralData) { channels = proceduralData[CHANNELS_KEY].toArray(); } -std::function Procedural::opaqueStencil = [](gpu::StatePointer state) {}; +std::function Procedural::opaqueStencil = [](gpu::StatePointer state, bool useAA) {}; std::function Procedural::transparentStencil = [](gpu::StatePointer state) {}; -Procedural::Procedural() { +Procedural::Procedural(bool useAA) { _opaqueState->setCullMode(gpu::State::CULL_NONE); _opaqueState->setDepthTest(true, true, gpu::LESS_EQUAL); _opaqueState->setBlendFunction(false, gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA, gpu::State::FACTOR_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ONE); - opaqueStencil(_opaqueState); + opaqueStencil(_opaqueState, useAA); _transparentState->setCullMode(gpu::State::CULL_NONE); _transparentState->setDepthTest(true, false, gpu::LESS_EQUAL); diff --git a/libraries/procedural/src/procedural/Procedural.h b/libraries/procedural/src/procedural/Procedural.h index c1836095a7..b4d6c55f3d 100644 --- a/libraries/procedural/src/procedural/Procedural.h +++ b/libraries/procedural/src/procedural/Procedural.h @@ -99,7 +99,7 @@ inline bool operator!=(const ProceduralProgramKey& a, const ProceduralProgramKey // FIXME better mechanism for extending to things rendered using shaders other than simple.slv struct Procedural { public: - Procedural(); + Procedural(bool useAA = true); void setProceduralData(const ProceduralData& proceduralData); bool isReady() const; @@ -132,7 +132,7 @@ public: gpu::StatePointer _opaqueState { std::make_shared() }; gpu::StatePointer _transparentState { std::make_shared() }; - static std::function opaqueStencil; + static std::function opaqueStencil; static std::function transparentStencil; static bool enableProceduralShaders; diff --git a/libraries/procedural/src/procedural/ProceduralSkybox.cpp b/libraries/procedural/src/procedural/ProceduralSkybox.cpp index 5cbf11f298..67f3972400 100644 --- a/libraries/procedural/src/procedural/ProceduralSkybox.cpp +++ b/libraries/procedural/src/procedural/ProceduralSkybox.cpp @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 9/21/2015. // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -45,15 +46,15 @@ void ProceduralSkybox::clear() { Skybox::clear(); } -void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward) const { +void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward, uint transformSlot) const { if (_procedural.isReady()) { - ProceduralSkybox::render(batch, frustum, (*this), forward); + ProceduralSkybox::render(batch, frustum, (*this), forward, transformSlot); } else { - Skybox::render(batch, frustum, forward); + Skybox::render(batch, frustum, forward, transformSlot); } } -void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum, const ProceduralSkybox& skybox, bool forward) { +void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum, const ProceduralSkybox& skybox, bool forward, uint transformSlot) { glm::mat4 projMat; viewFrustum.evalProjectionMatrix(projMat); @@ -61,6 +62,8 @@ void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum, viewFrustum.evalViewTransform(viewTransform); batch.setProjectionTransform(projMat); batch.setViewTransform(viewTransform); + // This is needed if we want to have motion vectors on the sky + batch.saveViewProjectionTransform(transformSlot); batch.setModelTransform(Transform()); // only for Mac auto& procedural = skybox._procedural; diff --git a/libraries/procedural/src/procedural/ProceduralSkybox.h b/libraries/procedural/src/procedural/ProceduralSkybox.h index 983b432089..5aef9171d9 100644 --- a/libraries/procedural/src/procedural/ProceduralSkybox.h +++ b/libraries/procedural/src/procedural/ProceduralSkybox.h @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 9/21/15. // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -26,8 +27,8 @@ public: bool empty() override; void clear() override; - void render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward) const override; - static void render(gpu::Batch& batch, const ViewFrustum& frustum, const ProceduralSkybox& skybox, bool forward); + void render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward, uint transformSlot) const override; + static void render(gpu::Batch& batch, const ViewFrustum& frustum, const ProceduralSkybox& skybox, bool forward, uint transformSlot); uint64_t getCreated() const { return _created; } diff --git a/libraries/procedural/src/procedural/proceduralSkybox.slf b/libraries/procedural/src/procedural/proceduralSkybox.slf index f938e0b9a2..f7b40d22cc 100644 --- a/libraries/procedural/src/procedural/proceduralSkybox.slf +++ b/libraries/procedural/src/procedural/proceduralSkybox.slf @@ -6,27 +6,19 @@ // // Created by Sam Gateau on 5/5/2015. // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // -<@include graphics/ShaderConstants.h@> - -LAYOUT(binding=GRAPHICS_TEXTURE_SKYBOX) uniform samplerCube cubeMap; - -struct Skybox { - vec4 color; -}; - -LAYOUT(binding=GRAPHICS_BUFFER_SKYBOX_PARAMS) uniform skyboxBuffer { - Skybox skybox; -}; - -layout(location=0) in vec3 _normal; -layout(location=0) out vec4 _fragColor; +<@include graphics/skybox.slh@> +<$declarePackDeferredFragmentSky()$> <@include procedural/ProceduralCommon.slh@> +layout(location=0) in vec3 _normal; +layout(location=1) in vec4 _prevPositionCS; + #line 1001 //PROCEDURAL_BLOCK_BEGIN vec3 getSkyboxColor() { @@ -42,5 +34,6 @@ void main(void) { color = max(color, vec3(0)); // Procedural Shaders are expected to be Gamma corrected so let's bring back the RGB in linear space for the rest of the pipeline color = pow(color, vec3(2.2)); - _fragColor = vec4(color, 1.0); + + packDeferredFragmentSky(_prevPositionCS, color); } diff --git a/libraries/render-utils/src/AntialiasingEffect.cpp b/libraries/render-utils/src/AntialiasingEffect.cpp index 599c28ceca..01b17e5a9d 100644 --- a/libraries/render-utils/src/AntialiasingEffect.cpp +++ b/libraries/render-utils/src/AntialiasingEffect.cpp @@ -4,6 +4,7 @@ // // Created by Raffi Bedikian on 8/30/15 // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -13,7 +14,6 @@ #include -#include #include #include #include @@ -21,11 +21,6 @@ #include "render-utils/ShaderConstants.h" #include "StencilMaskPass.h" -#include "TextureCache.h" -#include "DependencyManager.h" -#include "ViewFrustum.h" -#include "GeometryCache.h" -#include "FramebufferCache.h" #include "RandomAndNoise.h" namespace ru { @@ -38,136 +33,129 @@ namespace gr { using graphics::slot::buffer::Buffer; } -#if !ANTIALIASING_USE_TAA - gpu::PipelinePointer Antialiasing::_antialiasingPipeline; +gpu::PipelinePointer Antialiasing::_intensityPipeline; gpu::PipelinePointer Antialiasing::_blendPipeline; +gpu::PipelinePointer Antialiasing::_debugBlendPipeline; -Antialiasing::Antialiasing() { - _geometryId = DependencyManager::get()->allocateID(); -} + #define TAA_JITTER_SEQUENCE_LENGTH 16 -Antialiasing::~Antialiasing() { - auto geometryCache = DependencyManager::get(); - if (geometryCache) { - geometryCache->releaseID(_geometryId); - } -} - -const gpu::PipelinePointer& Antialiasing::getAntialiasingPipeline() { - if (!_antialiasingPipeline) { - gpu::ShaderPointer program = gpu::Shader::createProgram(shader::render_utils::program::fxaa); - gpu::StatePointer state = std::make_shared(); - - state->setDepthTest(false, false, gpu::LESS_EQUAL); - PrepareStencil::testNoAA(*state); - - // Good to go add the brand new pipeline - _antialiasingPipeline = gpu::Pipeline::create(program, state); - } - - return _antialiasingPipeline; -} - -const gpu::PipelinePointer& Antialiasing::getBlendPipeline() { - if (!_blendPipeline) { - gpu::ShaderPointer program = gpu::Shader::createProgram(shader::render_utils::program::fxaa_blend); - gpu::StatePointer state = std::make_shared(); - state->setDepthTest(false, false, gpu::LESS_EQUAL); - PrepareStencil::testNoAA(*state); - - // Good to go add the brand new pipeline - _blendPipeline = gpu::Pipeline::create(program, state); - } - return _blendPipeline; -} - -void Antialiasing::run(const render::RenderContextPointer& renderContext, const gpu::FramebufferPointer& sourceBuffer) { - assert(renderContext->args); - assert(renderContext->args->hasViewFrustum()); - - RenderArgs* args = renderContext->args; - - gpu::doInBatch("Antialiasing::run", args->_context, [&](gpu::Batch& batch) { - batch.enableStereo(false); - batch.setViewportTransform(args->_viewport); - - if (!_paramsBuffer) { - _paramsBuffer = std::make_shared(sizeof(glm::vec4), nullptr); - } - - { - int width = args->_viewport.z; - int height = args->_viewport.w; - if (_antialiasingBuffer && _antialiasingBuffer->getSize() != uvec2(width, height)) { - _antialiasingBuffer.reset(); - } - - if (!_antialiasingBuffer) { - // Link the antialiasing FBO to texture - _antialiasingBuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("antialiasing")); - auto format = gpu::Element::COLOR_SRGBA_32; - auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT); - _antialiasingTexture = gpu::Texture::createRenderBuffer(format, width, height, gpu::Texture::SINGLE_MIP, defaultSampler); - _antialiasingBuffer->setRenderBuffer(0, _antialiasingTexture); - glm::vec2 fbExtent { args->_viewport.z, args->_viewport.w }; - glm::vec2 inverseFbExtent = 1.0f / fbExtent; - _paramsBuffer->setSubData(0, glm::vec4(inverseFbExtent, 0.0, 0.0)); - } - } - - - glm::mat4 projMat; - Transform viewMat; - args->getViewFrustum().evalProjectionMatrix(projMat); - args->getViewFrustum().evalViewTransform(viewMat); - batch.setProjectionTransform(projMat); - batch.setViewTransform(viewMat, true); - batch.setModelTransform(Transform()); - - // FXAA step - auto pipeline = getAntialiasingPipeline(); - batch.setResourceTexture(0, sourceBuffer->getRenderBuffer(0)); - batch.setFramebuffer(_antialiasingBuffer); - batch.setPipeline(pipeline); - batch.setUniformBuffer(0, _paramsBuffer); - batch.draw(gpu::TRIANGLE_STRIP, 4); - - // Blend step - batch.setResourceTexture(0, _antialiasingTexture); - batch.setFramebuffer(sourceBuffer); - batch.setPipeline(getBlendPipeline()); - batch.draw(gpu::TRIANGLE_STRIP, 4); - }); -} -#else - -void AntialiasingConfig::setAAMode(int mode) { - _mode = std::min((int)AntialiasingConfig::MODE_COUNT, std::max(0, mode)); // Just use unsigned? +void AntialiasingSetupConfig::setIndex(int current) { + _index = (current + TAA_JITTER_SEQUENCE_LENGTH) % TAA_JITTER_SEQUENCE_LENGTH; emit dirty(); } -gpu::PipelinePointer Antialiasing::_antialiasingPipeline; -gpu::PipelinePointer Antialiasing::_blendPipeline; -gpu::PipelinePointer Antialiasing::_debugBlendPipeline; +void AntialiasingSetupConfig::setState(State state) { + _state = (State)((int)state % (int)State::STATE_COUNT); + switch (_state) { + case State::NONE: { + none(); + break; + } + case State::PAUSE: { + pause(); + break; + } + case State::PLAY: + default: { + play(); + break; + } + } + emit dirty(); +} + +int AntialiasingSetupConfig::prev() { + setIndex(_index - 1); + return _index; +} + +int AntialiasingSetupConfig::next() { + setIndex(_index + 1); + return _index; +} + +AntialiasingSetupConfig::State AntialiasingSetupConfig::none() { + _state = State::NONE; + stop = true; + freeze = false; + setIndex(-1); + return _state; +} + +AntialiasingSetupConfig::State AntialiasingSetupConfig::pause() { + _state = State::PAUSE; + stop = false; + freeze = true; + setIndex(0); + return _state; +} + +AntialiasingSetupConfig::State AntialiasingSetupConfig::play() { + _state = State::PLAY; + stop = false; + freeze = false; + setIndex(0); + return _state; +} + +void AntialiasingSetupConfig::setAAMode(Mode mode) { + this->mode = (Mode)glm::clamp((int)mode, 0, (int)AntialiasingSetupConfig::Mode::MODE_COUNT); + emit dirty(); +} + +AntialiasingSetup::AntialiasingSetup() { + _sampleSequence.reserve(TAA_JITTER_SEQUENCE_LENGTH + 1); + // Fill in with jitter samples + for (int i = 0; i < TAA_JITTER_SEQUENCE_LENGTH; i++) { + _sampleSequence.emplace_back(glm::vec2(halton::evaluate<2>(i), halton::evaluate<3>(i)) - vec2(0.5f)); + } +} + +void AntialiasingSetup::configure(const Config& config) { + _isStopped = config.stop; + _isFrozen = config.freeze; + + if (config.freeze) { + _freezedSampleIndex = config.getIndex(); + } + _scale = config.scale; + + _mode = config.mode; +} + +void AntialiasingSetup::run(const render::RenderContextPointer& renderContext, Output& output) { + assert(renderContext->args); + if (!_isStopped && _mode == AntialiasingSetupConfig::Mode::TAA) { + RenderArgs* args = renderContext->args; + + gpu::doInBatch("AntialiasingSetup::run", args->_context, [&](gpu::Batch& batch) { + auto offset = 0; + auto count = _sampleSequence.size(); + if (_isFrozen) { + count = 1; + offset = _freezedSampleIndex; + } + batch.setProjectionJitterSequence(_sampleSequence.data() + offset, count); + batch.setProjectionJitterScale(_scale); + }); + } + + output = _mode; +} Antialiasing::Antialiasing(bool isSharpenEnabled) : _isSharpenEnabled{ isSharpenEnabled } { } Antialiasing::~Antialiasing() { - _antialiasingBuffers.reset(); - _antialiasingTextures[0].reset(); - _antialiasingTextures[1].reset(); + _antialiasingBuffers.clear(); } -gpu::PipelinePointer& Antialiasing::getAntialiasingPipeline() { - +const gpu::PipelinePointer& Antialiasing::getAntialiasingPipeline() { if (!_antialiasingPipeline) { gpu::ShaderPointer program = gpu::Shader::createProgram(shader::render_utils::program::taa); gpu::StatePointer state = std::make_shared(); - - PrepareStencil::testNoAA(*state); // Good to go add the brand new pipeline _antialiasingPipeline = gpu::Pipeline::create(program, state); @@ -176,24 +164,36 @@ gpu::PipelinePointer& Antialiasing::getAntialiasingPipeline() { return _antialiasingPipeline; } -gpu::PipelinePointer& Antialiasing::getBlendPipeline() { - if (!_blendPipeline) { - gpu::ShaderPointer program = gpu::Shader::createProgram(shader::render_utils::program::fxaa_blend); +const gpu::PipelinePointer& Antialiasing::getIntensityPipeline() { + if (!_intensityPipeline) { + gpu::ShaderPointer program = gpu::Shader::createProgram(shader::gpu::program::drawWhite); gpu::StatePointer state = std::make_shared(); + PrepareStencil::testNoAA(*state); + + // Good to go add the brand new pipeline + _intensityPipeline = gpu::Pipeline::create(program, state); + } + + return _intensityPipeline; +} + +const gpu::PipelinePointer& Antialiasing::getBlendPipeline() { + if (!_blendPipeline) { + gpu::ShaderPointer program = gpu::Shader::createProgram(shader::render_utils::program::aa_blend); + gpu::StatePointer state = std::make_shared(); // Good to go add the brand new pipeline _blendPipeline = gpu::Pipeline::create(program, state); } return _blendPipeline; } -gpu::PipelinePointer& Antialiasing::getDebugBlendPipeline() { +const gpu::PipelinePointer& Antialiasing::getDebugBlendPipeline() { if (!_debugBlendPipeline) { gpu::ShaderPointer program = gpu::Shader::createProgram(shader::render_utils::program::taa_blend); gpu::StatePointer state = std::make_shared(); PrepareStencil::testNoAA(*state); - // Good to go add the brand new pipeline _debugBlendPipeline = gpu::Pipeline::create(program, state); } @@ -201,12 +201,11 @@ gpu::PipelinePointer& Antialiasing::getDebugBlendPipeline() { } void Antialiasing::configure(const Config& config) { - _mode = (AntialiasingConfig::Mode) config.getAAMode(); - _sharpen = config.sharpen * 0.25f; if (!_isSharpenEnabled) { _sharpen = 0.0f; } + _params.edit().setSharpenedOutput(_sharpen > 0.0f); _params.edit().blend = config.blend * config.blend; _params.edit().covarianceGamma = config.covarianceGamma; @@ -216,7 +215,9 @@ void Antialiasing::configure(const Config& config) { _params.edit().debugShowVelocityThreshold = config.debugShowVelocityThreshold; _params.edit().regionInfo.x = config.debugX; - _params.edit().regionInfo.z = config.debugFXAAX; + _debugFXAAX = config.debugFXAAX; + + _params.edit().setBicubicHistoryFetch(config.bicubicHistoryFetch); _params.edit().setDebug(config.debug); _params.edit().setShowDebugCursor(config.showCursorPixel); @@ -227,56 +228,82 @@ void Antialiasing::configure(const Config& config) { } -void Antialiasing::run(const render::RenderContextPointer& renderContext, const Inputs& inputs) { +void Antialiasing::run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& output) { assert(renderContext->args); - assert(renderContext->args->hasViewFrustum()); RenderArgs* args = renderContext->args; auto& deferredFrameTransform = inputs.get0(); - auto& sourceBuffer = inputs.get1(); - auto& linearDepthBuffer = inputs.get2(); - auto& velocityBuffer = inputs.get3(); - + const auto& deferredFrameBuffer = inputs.get1(); + const auto& sourceBuffer = deferredFrameBuffer->getLightingFramebuffer(); + const auto& linearDepthBuffer = inputs.get2(); + const auto& velocityTexture = deferredFrameBuffer->getDeferredVelocityTexture(); + const auto& mode = inputs.get3(); + + _params.edit().regionInfo.z = mode == AntialiasingSetupConfig::Mode::TAA ? _debugFXAAX : 0.0f; + int width = sourceBuffer->getWidth(); int height = sourceBuffer->getHeight(); - if (_antialiasingBuffers && _antialiasingBuffers->get(0) && _antialiasingBuffers->get(0)->getSize() != uvec2(width, height)) { - _antialiasingBuffers.reset(); - _antialiasingTextures[0].reset(); - _antialiasingTextures[1].reset(); + if (_antialiasingBuffers._swapChain && _antialiasingBuffers._swapChain->get(0) && _antialiasingBuffers._swapChain->get(0)->getSize() != uvec2(width, height)) { + _antialiasingBuffers.clear(); } - - if (!_antialiasingBuffers) { + if (!_antialiasingBuffers._swapChain || !_intensityFramebuffer) { std::vector antiAliasingBuffers; // Link the antialiasing FBO to texture - auto format = sourceBuffer->getRenderBuffer(0)->getTexelFormat(); + auto format = gpu::Element(gpu::VEC4, gpu::HALF, gpu::RGBA); auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR, gpu::Sampler::WRAP_CLAMP); for (int i = 0; i < 2; i++) { antiAliasingBuffers.emplace_back(gpu::Framebuffer::create("antialiasing")); const auto& antiAliasingBuffer = antiAliasingBuffers.back(); - _antialiasingTextures[i] = gpu::Texture::createRenderBuffer(format, width, height, gpu::Texture::SINGLE_MIP, defaultSampler); - antiAliasingBuffer->setRenderBuffer(0, _antialiasingTextures[i]); + _antialiasingBuffers._textures[i] = gpu::Texture::createRenderBuffer(format, width, height, gpu::Texture::SINGLE_MIP, defaultSampler); + antiAliasingBuffer->setRenderBuffer(0, _antialiasingBuffers._textures[i]); } - _antialiasingBuffers = std::make_shared(antiAliasingBuffers); + _antialiasingBuffers._swapChain = std::make_shared(antiAliasingBuffers); + + _intensityTexture = gpu::Texture::createRenderBuffer(gpu::Element::COLOR_R_8, width, height, gpu::Texture::SINGLE_MIP, defaultSampler); + _intensityFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("taaIntensity")); + _intensityFramebuffer->setRenderBuffer(0, _intensityTexture); + _intensityFramebuffer->setStencilBuffer(deferredFrameBuffer->getDeferredFramebuffer()->getDepthStencilBuffer(), deferredFrameBuffer->getDeferredFramebuffer()->getDepthStencilBufferFormat()); } - + + output = _intensityTexture; + gpu::doInBatch("Antialiasing::run", args->_context, [&](gpu::Batch& batch) { + PROFILE_RANGE_BATCH(batch, "TAA"); + batch.enableStereo(false); batch.setViewportTransform(args->_viewport); + // Set the intensity buffer to 1 except when the stencil is masked as NoAA, where it should be 0 + // This is a bit of a hack as it is not possible and not portable to use the stencil value directly + // as a texture + batch.setFramebuffer(_intensityFramebuffer); + batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, gpu::Vec4(0.0f)); + batch.setResourceTexture(0, nullptr); + batch.setPipeline(getIntensityPipeline()); + batch.draw(gpu::TRIANGLE_STRIP, 4); + // TAA step - batch.setResourceFramebufferSwapChainTexture(ru::Texture::TaaHistory, _antialiasingBuffers, 0); + if (!_params->isFXAAEnabled()) { + batch.setResourceFramebufferSwapChainTexture(ru::Texture::TaaHistory, _antialiasingBuffers._swapChain, 0); + batch.setResourceTexture(ru::Texture::TaaVelocity, velocityTexture); + } else { + batch.setResourceTexture(ru::Texture::TaaHistory, nullptr); + batch.setResourceTexture(ru::Texture::TaaVelocity, nullptr); + } + batch.setResourceTexture(ru::Texture::TaaSource, sourceBuffer->getRenderBuffer(0)); - batch.setResourceTexture(ru::Texture::TaaVelocity, velocityBuffer->getVelocityTexture()); - // This is only used during debug + batch.setResourceTexture(ru::Texture::TaaIntensity, _intensityTexture); + + // This is only used during debug batch.setResourceTexture(ru::Texture::TaaDepth, linearDepthBuffer->getLinearDepthTexture()); batch.setUniformBuffer(ru::Buffer::TaaParams, _params); batch.setUniformBuffer(ru::Buffer::DeferredFrameTransform, deferredFrameTransform->getFrameTransformBuffer()); - batch.setFramebufferSwapChain(_antialiasingBuffers, 1); + batch.setFramebufferSwapChain(_antialiasingBuffers._swapChain, 1); batch.setPipeline(getAntialiasingPipeline()); batch.draw(gpu::TRIANGLE_STRIP, 4); @@ -286,11 +313,11 @@ void Antialiasing::run(const render::RenderContextPointer& renderContext, const batch.setFramebuffer(sourceBuffer); if (_params->isDebug()) { batch.setPipeline(getDebugBlendPipeline()); - batch.setResourceFramebufferSwapChainTexture(ru::Texture::TaaNext, _antialiasingBuffers, 1); - } else { + batch.setResourceFramebufferSwapChainTexture(ru::Texture::TaaNext, _antialiasingBuffers._swapChain, 1); + } else { batch.setPipeline(getBlendPipeline()); - // Must match the bindg point in the fxaa_blend.slf shader - batch.setResourceFramebufferSwapChainTexture(0, _antialiasingBuffers, 1); + // Must match the binding point in the aa_blend.slf shader + batch.setResourceFramebufferSwapChainTexture(0, _antialiasingBuffers._swapChain, 1); // Disable sharpen if FXAA if (!_blendParamsBuffer) { _blendParamsBuffer = std::make_shared(sizeof(glm::vec4), nullptr); @@ -299,8 +326,8 @@ void Antialiasing::run(const render::RenderContextPointer& renderContext, const batch.setUniformBuffer(0, _blendParamsBuffer); } batch.draw(gpu::TRIANGLE_STRIP, 4); - batch.advance(_antialiasingBuffers); - + batch.advance(_antialiasingBuffers._swapChain); + batch.setUniformBuffer(ru::Buffer::TaaParams, nullptr); batch.setUniformBuffer(ru::Buffer::DeferredFrameTransform, nullptr); @@ -308,114 +335,8 @@ void Antialiasing::run(const render::RenderContextPointer& renderContext, const batch.setResourceTexture(ru::Texture::TaaHistory, nullptr); batch.setResourceTexture(ru::Texture::TaaVelocity, nullptr); batch.setResourceTexture(ru::Texture::TaaNext, nullptr); + + // Reset jitter sequence + batch.setProjectionJitterSequence(nullptr, 0); }); } - -void JitterSampleConfig::setIndex(int current) { - _index = (current) % JitterSample::SEQUENCE_LENGTH; - emit dirty(); -} - -void JitterSampleConfig::setState(int state) { - _state = (state) % 3; - switch (_state) { - case 0: { - none(); - break; - } - case 1: { - pause(); - break; - } - case 2: - default: { - play(); - break; - } - } - emit dirty(); -} - -int JitterSampleConfig::cycleStopPauseRun() { - setState((_state + 1) % 3); - return _state; -} - -int JitterSampleConfig::prev() { - setIndex(_index - 1); - return _index; -} - -int JitterSampleConfig::next() { - setIndex(_index + 1); - return _index; -} - -int JitterSampleConfig::none() { - _state = 0; - stop = true; - freeze = false; - setIndex(-1); - return _state; -} - -int JitterSampleConfig::pause() { - _state = 1; - stop = false; - freeze = true; - setIndex(0); - return _state; -} - - -int JitterSampleConfig::play() { - _state = 2; - stop = false; - freeze = false; - setIndex(0); - return _state; -} - -JitterSample::SampleSequence::SampleSequence(){ - // Halton sequence (2,3) - - for (int i = 0; i < SEQUENCE_LENGTH; i++) { - offsets[i] = glm::vec2(halton::evaluate<2>(i), halton::evaluate<3>(i)); - offsets[i] -= vec2(0.5f); - } - offsets[SEQUENCE_LENGTH] = glm::vec2(0.0f); -} - -void JitterSample::configure(const Config& config) { - _freeze = config.stop || config.freeze; - if (config.freeze) { - auto pausedIndex = config.getIndex(); - if (_sampleSequence.currentIndex != pausedIndex) { - _sampleSequence.currentIndex = pausedIndex; - } - } else if (config.stop) { - _sampleSequence.currentIndex = -1; - } else { - _sampleSequence.currentIndex = config.getIndex(); - } - _scale = config.scale; -} - -void JitterSample::run(const render::RenderContextPointer& renderContext, Output& jitter) { - auto& current = _sampleSequence.currentIndex; - if (!_freeze) { - if (current >= 0) { - current = (current + 1) % SEQUENCE_LENGTH; - } else { - current = -1; - } - } - - if (current >= 0) { - jitter = _sampleSequence.offsets[current]; - } else { - jitter = glm::vec2(0.0f); - } -} - -#endif diff --git a/libraries/render-utils/src/AntialiasingEffect.h b/libraries/render-utils/src/AntialiasingEffect.h index 8273959c14..b99e96a31c 100644 --- a/libraries/render-utils/src/AntialiasingEffect.h +++ b/libraries/render-utils/src/AntialiasingEffect.h @@ -18,85 +18,128 @@ #include "render/DrawTask.h" #include "DeferredFrameTransform.h" -#include "VelocityBufferPass.h" +#include "DeferredFramebuffer.h" +#include "SurfaceGeometryPass.h" - -class JitterSampleConfig : public render::Job::Config { +class AntialiasingSetupConfig : public render::Job::Config { Q_OBJECT - Q_PROPERTY(float scale MEMBER scale NOTIFY dirty) - Q_PROPERTY(bool freeze MEMBER freeze NOTIFY dirty) - Q_PROPERTY(bool stop MEMBER stop NOTIFY dirty) - Q_PROPERTY(int index READ getIndex NOTIFY dirty) - Q_PROPERTY(int state READ getState WRITE setState NOTIFY dirty) + Q_PROPERTY(float scale MEMBER scale NOTIFY dirty) + Q_PROPERTY(bool freeze MEMBER freeze NOTIFY dirty) + Q_PROPERTY(bool stop MEMBER stop NOTIFY dirty) + Q_PROPERTY(int index READ getIndex NOTIFY dirty) + Q_PROPERTY(State state READ getState WRITE setState NOTIFY dirty) + Q_PROPERTY(Mode mode READ getAAMode WRITE setAAMode NOTIFY dirty) + public: - JitterSampleConfig() : render::Job::Config(true) {} + AntialiasingSetupConfig() : render::Job::Config(true) {} - float scale{ 0.5f }; - bool stop{ false }; - bool freeze{ false }; + /*@jsdoc + *Antialiasing modes. + * + * + * + * + * + * + * + * + * + *
ValueNameDescription
0NONEAntialiasing is disabled.
1TAATemporal Antialiasing.
2FXAAFXAA.
3MODE_COUNTIndicates number of antialiasing modes
+ * @typedef {number} AntialiasingMode + */ + enum class Mode { + NONE = 0, + TAA, + FXAA, + MODE_COUNT + }; + Q_ENUM(Mode) // Stored as signed int. - void setIndex(int current); - void setState(int state); + /*@jsdoc + *TAA Antialiasing state. + * + * + * + * + * + * + * + * + * + *
ValueNameDescription
0NONETAA is disabled.
1PAUSETAA jitter is paused.
2PLAYTAA jitter is playing.
3STATE_COUNTIndicates number of antialiasing states
+ * @typedef {number} AntialiasingState + */ + enum class State + { + NONE = 0, + PAUSE, + PLAY, + STATE_COUNT + }; + Q_ENUM(State) + + float scale { 0.75f }; + bool stop { false }; + bool freeze { false }; + Mode mode { Mode::TAA }; public slots: - int cycleStopPauseRun(); int prev(); int next(); - int none(); - int pause(); - int play(); + State none(); + State pause(); + State play(); int getIndex() const { return _index; } - int getState() const { return _state; } + void setIndex(int current); + + State getState() const { return _state; } + void setState(State state); + + Mode getAAMode() const { return mode; } + void setAAMode(Mode mode); + signals: void dirty(); private: - int _state{ 0 }; - int _index{ 0 }; + State _state { State::PLAY }; + int _index { 0 }; }; - -class JitterSample { +class AntialiasingSetup { public: - enum { - SEQUENCE_LENGTH = 64 - }; + using Config = AntialiasingSetupConfig; + using Output = AntialiasingSetupConfig::Mode; + using JobModel = render::Job::ModelO; - using Config = JitterSampleConfig; - using Output = glm::vec2; - using JobModel = render::Job::ModelO; + AntialiasingSetup(); void configure(const Config& config); - void run(const render::RenderContextPointer& renderContext, Output& jitter); + void run(const render::RenderContextPointer& renderContext, Output& output); private: - struct SampleSequence { - SampleSequence(); - - glm::vec2 offsets[SEQUENCE_LENGTH + 1]; - int sequenceLength{ SEQUENCE_LENGTH }; - int currentIndex{ 0 }; - }; - - SampleSequence _sampleSequence; - float _scale{ 1.0 }; - bool _freeze{ false }; + std::vector _sampleSequence; + float _scale { 1.0f }; + int _freezedSampleIndex { 0 }; + bool _isStopped { false }; + bool _isFrozen { false }; + AntialiasingSetupConfig::Mode _mode{ AntialiasingSetupConfig::Mode::TAA }; }; class AntialiasingConfig : public render::Job::Config { Q_OBJECT - Q_PROPERTY(int mode READ getAAMode WRITE setAAMode NOTIFY dirty) Q_PROPERTY(float blend MEMBER blend NOTIFY dirty) Q_PROPERTY(float sharpen MEMBER sharpen NOTIFY dirty) Q_PROPERTY(float covarianceGamma MEMBER covarianceGamma NOTIFY dirty) Q_PROPERTY(bool constrainColor MEMBER constrainColor NOTIFY dirty) Q_PROPERTY(bool feedbackColor MEMBER feedbackColor NOTIFY dirty) + Q_PROPERTY(bool bicubicHistoryFetch MEMBER bicubicHistoryFetch NOTIFY dirty) Q_PROPERTY(bool debug MEMBER debug NOTIFY dirty) Q_PROPERTY(float debugX MEMBER debugX NOTIFY dirty) @@ -111,52 +154,26 @@ class AntialiasingConfig : public render::Job::Config { public: AntialiasingConfig() : render::Job::Config(true) {} - /*@jsdoc - *Antialiasing modes. - * - * - * - * - * - * - * - * - * - *
ValueNameDescription
0NONEAntialiasing is disabled.
1TAATemporal Antialiasing.
2FXAAFXAA.
3MODE_COUNTInducates number of antialiasing modes
- * @typedef {number} AntialiasingMode - */ - enum Mode { - NONE = 0, - TAA, - FXAA, - MODE_COUNT - }; - Q_ENUM(Mode) // Stored as signed int. - - void setAAMode(int mode); - int getAAMode() const { return _mode; } - void setDebugFXAA(bool debug) { debugFXAAX = (debug ? 0.0f : 1.0f); emit dirty();} bool debugFXAA() const { return (debugFXAAX == 0.0f ? true : false); } - int _mode{ TAA }; // '_' prefix but not private? + float blend { 0.2f }; + float sharpen { 0.05f }; - float blend{ 0.25f }; - float sharpen{ 0.05f }; + bool constrainColor { true }; + float covarianceGamma { 1.15f }; + bool feedbackColor { false }; + bool bicubicHistoryFetch { true }; - bool constrainColor{ true }; - float covarianceGamma{ 0.65f }; - bool feedbackColor{ false }; - - float debugX{ 0.0f }; - float debugFXAAX{ 1.0f }; - float debugShowVelocityThreshold{ 1.0f }; - glm::vec2 debugCursorTexcoord{ 0.5f, 0.5f }; - float debugOrbZoom{ 2.0f }; + float debugX { 0.0f }; + float debugFXAAX { 1.0f }; + float debugShowVelocityThreshold { 1.0f }; + glm::vec2 debugCursorTexcoord { 0.5f, 0.5f }; + float debugOrbZoom { 2.0f }; bool debug { false }; bool showCursorPixel { false }; - bool showClosestFragment{ false }; + bool showClosestFragment { false }; signals: void dirty(); @@ -165,19 +182,15 @@ signals: #define SET_BIT(bitfield, bitIndex, value) bitfield = ((bitfield) & ~(1 << (bitIndex))) | ((value) << (bitIndex)) #define GET_BIT(bitfield, bitIndex) ((bitfield) & (1 << (bitIndex))) -#define ANTIALIASING_USE_TAA 1 - -#if ANTIALIASING_USE_TAA - struct TAAParams { - float nope{ 0.0f }; - float blend{ 0.15f }; - float covarianceGamma{ 1.0f }; - float debugShowVelocityThreshold{ 1.0f }; + float nope { 0.0f }; + float blend { 0.15f }; + float covarianceGamma { 0.9f }; + float debugShowVelocityThreshold { 1.0f }; - glm::ivec4 flags{ 0 }; - glm::vec4 pixelInfo{ 0.5f, 0.5f, 2.0f, 0.0f }; - glm::vec4 regionInfo{ 0.0f, 0.0f, 1.0f, 0.0f }; + glm::ivec4 flags { 0 }; + glm::vec4 pixelInfo { 0.5f, 0.5f, 2.0f, 0.0f }; + glm::vec4 regionInfo { 0.0f, 0.0f, 1.0f, 0.0f }; void setConstrainColor(bool enabled) { SET_BIT(flags.y, 1, enabled); } bool isConstrainColor() const { return (bool)GET_BIT(flags.y, 1); } @@ -185,6 +198,12 @@ struct TAAParams { void setFeedbackColor(bool enabled) { SET_BIT(flags.y, 4, enabled); } bool isFeedbackColor() const { return (bool)GET_BIT(flags.y, 4); } + void setBicubicHistoryFetch(bool enabled) { SET_BIT(flags.y, 0, enabled); } + bool isBicubicHistoryFetch() const { return (bool)GET_BIT(flags.y, 0); } + + void setSharpenedOutput(bool enabled) { SET_BIT(flags.y, 2, enabled); } + bool isSharpenedOutput() const { return (bool)GET_BIT(flags.y, 2); } + void setDebug(bool enabled) { SET_BIT(flags.x, 0, enabled); } bool isDebug() const { return (bool) GET_BIT(flags.x, 0); } @@ -199,71 +218,52 @@ struct TAAParams { void setShowClosestFragment(bool enabled) { SET_BIT(flags.x, 3, enabled); } + bool isFXAAEnabled() const { return regionInfo.z == 0.0f; } }; using TAAParamsBuffer = gpu::StructBuffer; class Antialiasing { public: - using Inputs = render::VaryingSet4 < DeferredFrameTransformPointer, gpu::FramebufferPointer, LinearDepthFramebufferPointer, VelocityFramebufferPointer > ; + using Inputs = render::VaryingSet4; + using Outputs = gpu::TexturePointer; using Config = AntialiasingConfig; - using JobModel = render::Job::ModelI; + using JobModel = render::Job::ModelIO; Antialiasing(bool isSharpenEnabled = true); ~Antialiasing(); void configure(const Config& config); - void run(const render::RenderContextPointer& renderContext, const Inputs& inputs); + void run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs); - static gpu::PipelinePointer& getAntialiasingPipeline(); - static gpu::PipelinePointer& getBlendPipeline(); - static gpu::PipelinePointer& getDebugBlendPipeline(); + static const gpu::PipelinePointer& getAntialiasingPipeline(); + static const gpu::PipelinePointer& getIntensityPipeline(); + static const gpu::PipelinePointer& getBlendPipeline(); + static const gpu::PipelinePointer& getDebugBlendPipeline(); private: + struct AntialiasingBuffer { + gpu::FramebufferSwapChainPointer _swapChain; + gpu::TexturePointer _textures[2]; - gpu::FramebufferSwapChainPointer _antialiasingBuffers; - gpu::TexturePointer _antialiasingTextures[2]; + void clear() { + _swapChain.reset(); + _textures[0].reset(); + _textures[1].reset(); + } + }; + AntialiasingBuffer _antialiasingBuffers; + gpu::FramebufferPointer _intensityFramebuffer; + gpu::TexturePointer _intensityTexture; gpu::BufferPointer _blendParamsBuffer; + static gpu::PipelinePointer _antialiasingPipeline; + static gpu::PipelinePointer _intensityPipeline; static gpu::PipelinePointer _blendPipeline; static gpu::PipelinePointer _debugBlendPipeline; TAAParamsBuffer _params; - AntialiasingConfig::Mode _mode{ AntialiasingConfig::TAA }; - float _sharpen{ 0.15f }; - bool _isSharpenEnabled{ true }; + float _sharpen { 0.15f }; + bool _isSharpenEnabled { true }; + float _debugFXAAX { 0.0f }; }; - -#else // User setting for antialias mode will probably be broken. -class AntiAliasingConfig : public render::Job::Config { // Not to be confused with AntialiasingConfig... - Q_OBJECT - Q_PROPERTY(bool enabled MEMBER enabled) -public: - AntiAliasingConfig() : render::Job::Config(true) {} -}; - -class Antialiasing { -public: - using Config = AntiAliasingConfig; - using JobModel = render::Job::ModelI; - - Antialiasing(); - ~Antialiasing(); - void configure(const Config& config) {} - void run(const render::RenderContextPointer& renderContext, const gpu::FramebufferPointer& sourceBuffer); - - static gpu::PipelinePointer& getAntialiasingPipeline(); - static gpu::PipelinePointer& getBlendPipeline(); - -private: - gpu::FramebufferPointer _antialiasingBuffer; - - gpu::TexturePointer _antialiasingTexture; - gpu::BufferPointer _paramsBuffer; - - static gpu::PipelinePointer _antialiasingPipeline; - static gpu::PipelinePointer _blendPipeline; - int _geometryId { 0 }; -}; -#endif - #endif // hifi_AntialiasingEffect_h diff --git a/libraries/render-utils/src/BackgroundStage.cpp b/libraries/render-utils/src/BackgroundStage.cpp index f3f287bdac..ca09b7598b 100644 --- a/libraries/render-utils/src/BackgroundStage.cpp +++ b/libraries/render-utils/src/BackgroundStage.cpp @@ -42,6 +42,7 @@ void DrawBackgroundStage::run(const render::RenderContextPointer& renderContext, auto args = renderContext->args; gpu::doInBatch("DrawBackgroundStage::run", args->_context, [&](gpu::Batch& batch) { + PROFILE_RANGE_BATCH(batch, "Background"); args->_batch = &batch; batch.enableSkybox(true); @@ -49,16 +50,11 @@ void DrawBackgroundStage::run(const render::RenderContextPointer& renderContext, batch.setViewportTransform(args->_viewport); batch.setStateScissorRect(args->_viewport); - glm::mat4 projMat; - Transform viewMat; - args->getViewFrustum().evalProjectionMatrix(projMat); - args->getViewFrustum().evalViewTransform(viewMat); - - batch.setProjectionTransform(projMat); - batch.setViewTransform(viewMat); + bool forward = args->_renderMethod == render::Args::RenderMethod::FORWARD; + batch.setProjectionJitterEnabled(!forward); // If we're using forward rendering, we need to calculate haze - if (args->_renderMethod == render::Args::RenderMethod::FORWARD) { + if (forward) { const auto& hazeStage = args->_scene->getStage(); if (hazeStage && hazeFrame->_elements.size() > 0) { const auto& hazePointer = hazeStage->getElement(hazeFrame->_elements.front()); @@ -68,7 +64,7 @@ void DrawBackgroundStage::run(const render::RenderContextPointer& renderContext, } } - skybox->render(batch, args->getViewFrustum(), args->_renderMethod == render::Args::RenderMethod::FORWARD); + skybox->render(batch, args->getViewFrustum(), forward, _transformSlot); }); args->_batch = nullptr; } diff --git a/libraries/render-utils/src/BackgroundStage.h b/libraries/render-utils/src/BackgroundStage.h index 4da8fbf9fb..dedd086eb9 100644 --- a/libraries/render-utils/src/BackgroundStage.h +++ b/libraries/render-utils/src/BackgroundStage.h @@ -33,9 +33,12 @@ public: using Inputs = render::VaryingSet3; using JobModel = render::Job::ModelI; - DrawBackgroundStage() {} + DrawBackgroundStage(uint transformSlot) : _transformSlot(transformSlot) {} void run(const render::RenderContextPointer& renderContext, const Inputs& inputs); + +private: + uint _transformSlot; }; #endif diff --git a/libraries/render-utils/src/BloomEffect.cpp b/libraries/render-utils/src/BloomEffect.cpp index 763f12cf0f..367a277553 100644 --- a/libraries/render-utils/src/BloomEffect.cpp +++ b/libraries/render-utils/src/BloomEffect.cpp @@ -17,6 +17,7 @@ #include #include "render-utils/ShaderConstants.h" +#include "StencilMaskPass.h" #define BLOOM_BLUR_LEVEL_COUNT 3 @@ -27,7 +28,9 @@ gpu::PipelinePointer DebugBloom::_pipeline; BloomThreshold::BloomThreshold(unsigned int downsamplingFactor) { assert(downsamplingFactor > 0); - _parameters.edit()._sampleCount = downsamplingFactor; + auto& params = _parameters.edit(); + params._sampleCount = downsamplingFactor; + params._offset = (1.0f - downsamplingFactor) * 0.5f; } void BloomThreshold::configure(const Config& config) {} @@ -56,11 +59,6 @@ void BloomThreshold::run(const render::RenderContextPointer& renderContext, cons auto inputBuffer = inputFrameBuffer->getRenderBuffer(0); auto bufferSize = gpu::Vec2u(inputBuffer->getDimensions()); - const auto downSamplingFactor = _parameters.get()._sampleCount; - - // Downsample resolution - bufferSize.x /= downSamplingFactor; - bufferSize.y /= downSamplingFactor; if (!_outputBuffer || _outputBuffer->getSize() != bufferSize) { auto colorTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(inputBuffer->getTexelFormat(), bufferSize.x, bufferSize.y, @@ -68,6 +66,7 @@ void BloomThreshold::run(const render::RenderContextPointer& renderContext, cons _outputBuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("BloomThreshold")); _outputBuffer->setRenderBuffer(0, colorTexture); + _outputBuffer->setStencilBuffer(inputFrameBuffer->getDepthStencilBuffer(), inputFrameBuffer->getDepthStencilBufferFormat()); _parameters.edit()._deltaUV = { 1.0f / bufferSize.x, 1.0f / bufferSize.y }; } diff --git a/libraries/render-utils/src/BloomThreshold.shared.slh b/libraries/render-utils/src/BloomThreshold.shared.slh index 5ad490a1ca..e1bcae11ea 100644 --- a/libraries/render-utils/src/BloomThreshold.shared.slh +++ b/libraries/render-utils/src/BloomThreshold.shared.slh @@ -8,8 +8,10 @@ struct Parameters { BT_VEC2 _deltaUV; + float _offset; float _threshold; int _sampleCount; + float _padding[3]; }; // <@if 1@> diff --git a/libraries/render-utils/src/BloomThreshold.slf b/libraries/render-utils/src/BloomThreshold.slf index bbf863994f..d8e4153198 100644 --- a/libraries/render-utils/src/BloomThreshold.slf +++ b/libraries/render-utils/src/BloomThreshold.slf @@ -5,6 +5,7 @@ // // Created by Olivier Prat on 09/26/2017 // Copyright 2017 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -17,11 +18,10 @@ LAYOUT_STD140(binding=RENDER_UTILS_BUFFER_BLOOM_PARAMS) uniform parametersBuffer Parameters parameters; }; -layout(location=0) in vec2 varTexCoord0; layout(location=0) out vec4 outFragColor; void main(void) { - vec2 startUv = varTexCoord0; + vec2 startUv = (vec2(gl_FragCoord.xy) + vec2(parameters._offset)) * parameters._deltaUV; vec4 maskedColor = vec4(0,0,0,0); for (int y=0 ; y 1e-4 ? velColor : vec4(0.0f, 0.0f, 1.0f, 0.0f);" + "}" +}; + +static const std::string DEFAULT_ANTIALIASING_INTENSITY_SHADER{ + "vec4 getFragmentColor() {" + " return vec4(texture(debugTexture0, uv).rrr, 1.0);" " }" }; @@ -254,7 +262,7 @@ DebugDeferredBuffer::StandardPipelines DebugDeferredBuffer::_pipelines; DebugDeferredBuffer::CustomPipelines DebugDeferredBuffer::_customPipelines; #include // TODO REMOVE: Temporary until UI -DebugDeferredBuffer::DebugDeferredBuffer() { +DebugDeferredBuffer::DebugDeferredBuffer(uint transformSlot) : _transformSlot(transformSlot) { // TODO REMOVE: Temporary until UI static const auto DESKTOP_PATH = QStandardPaths::writableLocation(QStandardPaths::DesktopLocation); static const auto CUSTOM_FILE = DESKTOP_PATH.toStdString() + "/custom.slh"; @@ -328,6 +336,8 @@ std::string DebugDeferredBuffer::getShaderSourceCode(Mode mode, const std::strin return DEFAULT_HALF_NORMAL_SHADER; case VelocityMode: return DEFAULT_VELOCITY_SHADER; + case AntialiasingIntensityMode: + return DEFAULT_ANTIALIASING_INTENSITY_SHADER; case CustomMode: return getFileContent(customFile, DEFAULT_CUSTOM_SHADER); default: @@ -404,9 +414,9 @@ void DebugDeferredBuffer::run(const RenderContextPointer& renderContext, const I auto& linearDepthTarget = inputs.get1(); auto& surfaceGeometryFramebuffer = inputs.get2(); auto& ambientOcclusionFramebuffer = inputs.get3(); - auto& velocityFramebuffer = inputs.get4(); - auto& frameTransform = inputs.get5(); - auto& shadowFrame = inputs.get6(); + auto& frameTransform = inputs.get4(); + auto& shadowFrame = inputs.get5(); + const auto& antialiasingIntensityTexture = inputs.get6(); gpu::doInBatch("DebugDeferredBuffer::run", args->_context, [&](gpu::Batch& batch) { batch.enableStereo(false); @@ -415,12 +425,7 @@ void DebugDeferredBuffer::run(const RenderContextPointer& renderContext, const I const auto geometryBuffer = DependencyManager::get(); const auto textureCache = DependencyManager::get(); - glm::mat4 projMat; - Transform viewMat; - args->getViewFrustum().evalProjectionMatrix(projMat); - args->getViewFrustum().evalViewTransform(viewMat); - batch.setProjectionTransform(projMat); - batch.setViewTransform(viewMat, true); + batch.setSavedViewProjectionTransform(_transformSlot); batch.setModelTransform(Transform()); using Textures = render_utils::slot::texture::Texture; @@ -438,8 +443,8 @@ void DebugDeferredBuffer::run(const RenderContextPointer& renderContext, const I batch.setResourceTexture(Textures::DeferredDepth, deferredFramebuffer->getPrimaryDepthTexture()); batch.setResourceTexture(Textures::DeferredLighting, deferredFramebuffer->getLightingTexture()); } - if (velocityFramebuffer && _mode == VelocityMode) { - batch.setResourceTexture(Textures::DebugTexture0, velocityFramebuffer->getVelocityTexture()); + if (_mode == VelocityMode) { + batch.setResourceTexture(Textures::DebugTexture0, deferredFramebuffer->getDeferredVelocityTexture()); } if (!shadowFrame->_objects.empty()) { @@ -475,6 +480,10 @@ void DebugDeferredBuffer::run(const RenderContextPointer& renderContext, const I batch.setResourceTexture(Textures::DebugTexture0, ambientOcclusionFramebuffer->getNormalTexture()); } } + if (antialiasingIntensityTexture && _mode == AntialiasingIntensityMode) { + batch.setResourceTexture(Textures::DebugTexture0, antialiasingIntensityTexture); + } + const glm::vec4 color(1.0f, 1.0f, 1.0f, 1.0f); const glm::vec2 bottomLeft(_size.x, _size.y); const glm::vec2 topRight(_size.z, _size.w); diff --git a/libraries/render-utils/src/DebugDeferredBuffer.h b/libraries/render-utils/src/DebugDeferredBuffer.h index fd49bd2826..a93c8f9021 100644 --- a/libraries/render-utils/src/DebugDeferredBuffer.h +++ b/libraries/render-utils/src/DebugDeferredBuffer.h @@ -4,6 +4,7 @@ // // Created by Clement on 12/3/15. // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -19,7 +20,6 @@ #include "DeferredFramebuffer.h" #include "SurfaceGeometryPass.h" #include "AmbientOcclusionEffect.h" -#include "VelocityBufferPass.h" #include "LightStage.h" @@ -44,13 +44,13 @@ public: LinearDepthFramebufferPointer, SurfaceGeometryFramebufferPointer, AmbientOcclusionFramebufferPointer, - VelocityFramebufferPointer, DeferredFrameTransformPointer, - LightStage::ShadowFramePointer>; + LightStage::ShadowFramePointer, + gpu::TexturePointer>; using Config = DebugDeferredBufferConfig; using JobModel = render::Job::ModelI; - DebugDeferredBuffer(); + DebugDeferredBuffer(uint transformSlot); ~DebugDeferredBuffer(); void configure(const Config& config); @@ -92,6 +92,7 @@ protected: AmbientOcclusionBlurredMode, AmbientOcclusionNormalMode, VelocityMode, + AntialiasingIntensityMode, CustomMode, // Needs to stay last NumModes, @@ -100,6 +101,7 @@ protected: private: Mode _mode{ Off }; glm::vec4 _size; + uint _transformSlot; #include "debug_deferred_buffer_shared.slh" diff --git a/libraries/render-utils/src/DeferredBufferRead.slh b/libraries/render-utils/src/DeferredBufferRead.slh index 868b93ff91..8d30bd6b18 100644 --- a/libraries/render-utils/src/DeferredBufferRead.slh +++ b/libraries/render-utils/src/DeferredBufferRead.slh @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 5/4/16. // Copyright 2013 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -45,6 +46,7 @@ struct DeferredFragment { vec3 fresnel; float roughness; int mode; + int side; float scattering; float depthVal; }; @@ -58,6 +60,9 @@ vec3 getFresnelF0(float metallic, vec3 metalF0) { } <@endif@> +<@include DeferredTransform.slh@> +<$declareDeferredFrameTransform()$> + DeferredFragment unpackDeferredFragmentNoPosition(vec2 texcoord) { vec4 normalVal; vec4 diffuseVal; @@ -82,6 +87,8 @@ DeferredFragment unpackDeferredFragmentNoPosition(vec2 texcoord) { frag.scattering = float(frag.mode == FRAG_MODE_SCATTERING) * specularVal.x; frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz); + frag.side = getStereoSideFromUV(texcoord.x); + return frag; } @@ -109,18 +116,14 @@ DeferredFragment unpackDeferredFragmentNoPositionNoAmbient(vec2 texcoord) { frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz); + frag.side = getStereoSideFromUV(texcoord.x); + return frag; } - -<@include DeferredTransform.slh@> -<$declareDeferredFrameTransform()$> - -vec4 unpackDeferredPosition(float depthValue, vec2 texcoord) { +vec4 unpackDeferredPosition(int side, float depthValue, vec2 texcoord) { float check = float(isStereo()); - float check2 = check * float(texcoord.x > 0.5); - texcoord.x -= check2 * 0.5; - int side = int(check2); + texcoord.x -= check * 0.5 * float(side); texcoord.x *= 1.0 + check; return vec4(evalEyePositionFromZdb(side, depthValue, texcoord), 1.0); @@ -129,7 +132,7 @@ vec4 unpackDeferredPosition(float depthValue, vec2 texcoord) { // This method to unpack position is fastesst vec4 unpackDeferredPositionFromZdb(vec2 texcoord) { float Zdb = texture(depthMap, texcoord).x; - return unpackDeferredPosition(Zdb, texcoord); + return unpackDeferredPosition(getStereoSideFromUV(texcoord.x), Zdb, texcoord); } vec4 unpackDeferredPositionFromZeye(vec2 texcoord) { @@ -144,13 +147,13 @@ vec4 unpackDeferredPositionFromZeye(vec2 texcoord) { return vec4(evalEyePositionFromZeye(side, Zeye, texcoord), 1.0); } -DeferredFragment unpackDeferredFragment(DeferredFrameTransform deferredTransform, vec2 texcoord) { +DeferredFragment unpackDeferredFragment(vec2 texcoord) { float depthValue = texture(depthMap, texcoord).r; DeferredFragment frag = unpackDeferredFragmentNoPosition(texcoord); frag.depthVal = depthValue; - frag.position = unpackDeferredPosition(frag.depthVal, texcoord); + frag.position = unpackDeferredPosition(frag.side, frag.depthVal, texcoord); return frag; } diff --git a/libraries/render-utils/src/DeferredBufferWrite.slh b/libraries/render-utils/src/DeferredBufferWrite.slh index de3d0a3087..ff965d9eb9 100644 --- a/libraries/render-utils/src/DeferredBufferWrite.slh +++ b/libraries/render-utils/src/DeferredBufferWrite.slh @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 1/12/15. // Copyright 2013 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -12,12 +13,13 @@ <@def DEFERRED_BUFFER_WRITE_SLH@> <@include DeferredBuffer.slh@> +<@include DeferredBufferWrite_shared.slh@> - -layout(location=0) out vec4 _fragColor0; // albedo / metallic -layout(location=1) out vec4 _fragColor1; // Normal -layout(location=2) out vec4 _fragColor2; // scattering / emissive / occlusion -layout(location=3) out vec4 _fragColor3; // emissive +layout(location = DEFERRED_COLOR_SLOT) out vec4 _albedoMetallic; // albedo / metallic +layout(location = DEFERRED_NORMAL_SLOT) out vec4 _normalRoughness; // normal / roughness +layout(location = DEFERRED_SPECULAR_SLOT) out vec4 _scatteringEmissiveOcclusion; // scattering / emissive / occlusion +layout(location = DEFERRED_VELOCITY_SLOT) out vec4 _velocity; // velocity +layout(location = DEFERRED_LIGHTING_SLOT) out vec4 _lighting; // emissive // the alpha threshold const float alphaThreshold = 0.5; @@ -25,51 +27,67 @@ float evalOpaqueFinalAlpha(float alpha, float mapAlpha) { return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold)); } +<@include VelocityWrite.slh@> <@include DefaultMaterials.slh@> <@include LightingModel.slh@> -void packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) { +void packDeferredFragment(vec4 prevPositionCS, vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) { if (alpha < 1.0) { discard; } float check = float(scattering > 0.0); - _fragColor0 = vec4(albedo, mix(packShadedMetallic(metallic), packScatteringMetallic(metallic), check)); - _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0)); - _fragColor2 = vec4(mix(emissive, vec3(scattering), check), occlusion); - _fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0); + _albedoMetallic = vec4(albedo, mix(packShadedMetallic(metallic), packScatteringMetallic(metallic), check)); + _normalRoughness = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0)); + _scatteringEmissiveOcclusion = vec4(mix(emissive, vec3(scattering), check), occlusion); + _velocity = vec4(packVelocity(prevPositionCS), 0.0, 0.0); + _lighting = vec4(isEmissiveEnabled() * emissive, 1.0); } -void packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 lightmap) { +void packDeferredFragmentLightmap(vec4 prevPositionCS, vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 lightmap) { if (alpha < 1.0) { discard; } - _fragColor0 = vec4(albedo, packLightmappedMetallic(metallic)); - _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0)); - _fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0); - _fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0); + _albedoMetallic = vec4(albedo, packLightmappedMetallic(metallic)); + _normalRoughness = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0)); + _scatteringEmissiveOcclusion = vec4(isLightmapEnabled() * lightmap, 1.0); + _velocity = vec4(packVelocity(prevPositionCS), 0.0, 0.0); + _lighting = vec4(isLightmapEnabled() * lightmap * albedo, 1.0); } -void packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) { +void packDeferredFragmentUnlit(vec4 prevPositionCS, vec3 normal, float alpha, vec3 color) { // to reduce texel flickering for floating point error we discard when alpha is "almost one" if (alpha < 0.999999) { discard; } - _fragColor0 = vec4(color, packUnlit()); - _fragColor1 = vec4(packNormal(normal), 1.0); - _fragColor2 = vec4(vec3(0.0), 1.0); - _fragColor3 = vec4(color, 1.0); + _albedoMetallic = vec4(color, packUnlit()); + _normalRoughness = vec4(packNormal(normal), 1.0); + _scatteringEmissiveOcclusion = vec4(vec3(0.0), 1.0); + _velocity = vec4(packVelocity(prevPositionCS), 0.0, 0.0); + _lighting = vec4(color, 1.0); } -void packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, float roughness) { +void packDeferredFragmentTranslucent(vec4 prevPositionCS, vec3 normal, float alpha, vec3 albedo, float roughness) { if (alpha <= 0.0) { discard; } - _fragColor0 = vec4(albedo.rgb, alpha); - _fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0)); - _fragColor2 = vec4(vec3(0.0), 1.0); - _fragColor3 = vec4(0.0); + _albedoMetallic = vec4(albedo.rgb, alpha); + _normalRoughness = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0)); + _scatteringEmissiveOcclusion = vec4(vec3(0.0), 1.0); + _velocity = vec4(packVelocity(prevPositionCS), 0.0, 0.0); + _lighting = vec4(0.0); +} + +void packDeferredFragmentTranslucentUnlit(vec4 prevPositionCS, vec3 normal, float alpha, vec3 color) { + if (alpha <= 0.0) { + discard; + } + _albedoMetallic = vec4(color, alpha); + _normalRoughness = vec4(packNormal(normal), 1.0); + _scatteringEmissiveOcclusion = vec4(vec3(0.0), 1.0); + _velocity = vec4(packVelocity(prevPositionCS), 0.0, 0.0); + _lighting = vec4(color, 1.0); } <@endif@> diff --git a/libraries/render-utils/src/DeferredBufferWrite_shared.slh b/libraries/render-utils/src/DeferredBufferWrite_shared.slh new file mode 100644 index 0000000000..f26bc3ac49 --- /dev/null +++ b/libraries/render-utils/src/DeferredBufferWrite_shared.slh @@ -0,0 +1,12 @@ +// glsl / C++ compatible source as interface for DeferredBuffer layout + +#define DEFERRED_COLOR_SLOT 0 +#define DEFERRED_NORMAL_SLOT 1 +#define DEFERRED_SPECULAR_SLOT 2 +#define DEFERRED_VELOCITY_SLOT 3 +#define DEFERRED_LIGHTING_SLOT 4 + + // <@if 1@> + // Trigger Scribe include + // <@endif@> +// diff --git a/libraries/render-utils/src/DeferredFrameTransform.cpp b/libraries/render-utils/src/DeferredFrameTransform.cpp index 21d5b120d6..5e6826ae26 100644 --- a/libraries/render-utils/src/DeferredFrameTransform.cpp +++ b/libraries/render-utils/src/DeferredFrameTransform.cpp @@ -4,6 +4,7 @@ // // Created by Sam Gateau 6/3/2016. // Copyright 2016 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -18,73 +19,51 @@ DeferredFrameTransform::DeferredFrameTransform() { _frameTransformBuffer = gpu::BufferView(std::make_shared(sizeof(FrameTransform), (const gpu::Byte*) &frameTransform)); } -void DeferredFrameTransform::update(RenderArgs* args, glm::vec2 jitter) { +void DeferredFrameTransform::update(RenderArgs* args) { // Update the depth info with near and far (same for stereo) auto nearZ = args->getViewFrustum().getNearClip(); auto farZ = args->getViewFrustum().getFarClip(); auto& frameTransformBuffer = _frameTransformBuffer.edit(); - frameTransformBuffer.depthInfo = glm::vec4(nearZ*farZ, farZ - nearZ, -farZ, 0.0f); + frameTransformBuffer.infos.depthInfo = glm::vec4(nearZ * farZ, farZ - nearZ, -farZ, 0.0f); + frameTransformBuffer.infos.pixelInfo = args->_viewport; - frameTransformBuffer.pixelInfo = args->_viewport; - - //_parametersBuffer.edit()._ditheringInfo.y += 0.25f; - - Transform cameraTransform; - args->getViewFrustum().evalViewTransform(cameraTransform); - cameraTransform.getMatrix(frameTransformBuffer.invView); - cameraTransform.getInverseMatrix(frameTransformBuffer.view); - - args->getViewFrustum().evalProjectionMatrix(frameTransformBuffer.projectionMono); - - // There may be some sort of mismatch here if the viewport size isn't the same as the frame buffer size as - // jitter is normalized by frame buffer size in TransformCamera. But we should be safe. - jitter.x /= args->_viewport.z; - jitter.y /= args->_viewport.w; + args->getViewFrustum().evalProjectionMatrix(frameTransformBuffer.infos.projectionMono); // Running in stereo ? bool isStereo = args->isStereo(); if (!isStereo) { - frameTransformBuffer.projectionUnjittered[0] = frameTransformBuffer.projectionMono; - frameTransformBuffer.invProjectionUnjittered[0] = glm::inverse(frameTransformBuffer.projectionUnjittered[0]); - - frameTransformBuffer.stereoInfo = glm::vec4(0.0f, (float)args->_viewport.z, 0.0f, 0.0f); - frameTransformBuffer.invpixelInfo = glm::vec4(1.0f / args->_viewport.z, 1.0f / args->_viewport.w, 0.0f, 0.0f); - - frameTransformBuffer.projection[0] = frameTransformBuffer.projectionUnjittered[0]; - frameTransformBuffer.projection[0][2][0] += jitter.x; - frameTransformBuffer.projection[0][2][1] += jitter.y; - frameTransformBuffer.invProjection[0] = glm::inverse(frameTransformBuffer.projection[0]); + frameTransformBuffer.infos.stereoInfo = glm::vec4(0.0f, (float)args->_viewport.z, 0.0f, 0.0f); + frameTransformBuffer.infos.invPixelInfo = glm::vec4(1.0f / args->_viewport.z, 1.0f / args->_viewport.w, 0.0f, 0.0f); } else { - - mat4 projMats[2]; - mat4 eyeViews[2]; - args->_context->getStereoProjections(projMats); - args->_context->getStereoViews(eyeViews); - - jitter.x *= 2.0f; - - for (int i = 0; i < 2; i++) { - // Compose the mono Eye space to Stereo clip space Projection Matrix - auto sideViewMat = projMats[i] * eyeViews[i]; - frameTransformBuffer.projectionUnjittered[i] = sideViewMat; - frameTransformBuffer.invProjectionUnjittered[i] = glm::inverse(sideViewMat); - - frameTransformBuffer.projection[i] = frameTransformBuffer.projectionUnjittered[i]; - frameTransformBuffer.projection[i][2][0] += jitter.x; - frameTransformBuffer.projection[i][2][1] += jitter.y; - frameTransformBuffer.invProjection[i] = glm::inverse(frameTransformBuffer.projection[i]); - } - - frameTransformBuffer.stereoInfo = glm::vec4(1.0f, (float)(args->_viewport.z >> 1), 0.0f, 1.0f); - frameTransformBuffer.invpixelInfo = glm::vec4(1.0f / (float)(args->_viewport.z >> 1), 1.0f / args->_viewport.w, 0.0f, 0.0f); + frameTransformBuffer.infos.pixelInfo.z *= 0.5f; + frameTransformBuffer.infos.stereoInfo = glm::vec4(1.0f, (float)(args->_viewport.z >> 1), 0.0f, 1.0f); + frameTransformBuffer.infos.invPixelInfo = glm::vec4(2.0f / (float)(args->_viewport.z), 1.0f / args->_viewport.w, 0.0f, 0.0f); } } -void GenerateDeferredFrameTransform::run(const render::RenderContextPointer& renderContext, const Input& jitter, Output& frameTransform) { +void GenerateDeferredFrameTransform::run(const render::RenderContextPointer& renderContext, Output& frameTransform) { if (!frameTransform) { frameTransform = std::make_shared(); } - frameTransform->update(renderContext->args, jitter); + + RenderArgs* args = renderContext->args; + frameTransform->update(args); + + gpu::doInBatch("GenerateDeferredFrameTransform::run", args->_context, [&](gpu::Batch& batch) { + args->_batch = &batch; + + glm::mat4 projMat; + Transform viewMat; + args->getViewFrustum().evalProjectionMatrix(projMat); + args->getViewFrustum().evalViewTransform(viewMat); + batch.setProjectionTransform(projMat); + batch.setViewTransform(viewMat); + // This is the main view / projection transform that will be reused later on + batch.saveViewProjectionTransform(_transformSlot); + // Copy it to the deferred transform for the lighting pass + batch.copySavedViewProjectionTransformToBuffer(_transformSlot, frameTransform->getFrameTransformBuffer()._buffer, + sizeof(DeferredFrameTransform::DeferredFrameInfo)); + }); } diff --git a/libraries/render-utils/src/DeferredFrameTransform.h b/libraries/render-utils/src/DeferredFrameTransform.h index f7700cb2dc..8b8ce376f9 100644 --- a/libraries/render-utils/src/DeferredFrameTransform.h +++ b/libraries/render-utils/src/DeferredFrameTransform.h @@ -4,6 +4,7 @@ // // Created by Sam Gateau 6/3/2016. // Copyright 2016 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -20,68 +21,44 @@ // DeferredFrameTransform is a helper class gathering in one place the needed camera transform // and frame resolution needed for all the deferred rendering passes taking advantage of the Deferred buffers class DeferredFrameTransform { + friend class GenerateDeferredFrameTransform; public: using UniformBufferView = gpu::BufferView; DeferredFrameTransform(); - void update(RenderArgs* args, glm::vec2 jitter); + void update(RenderArgs* args); UniformBufferView getFrameTransformBuffer() const { return _frameTransformBuffer; } protected: - // Class describing the uniform buffer with the transform info common to the AO shaders // It s changing every frame - class FrameTransform { +#include "DeferredTransform_shared.slh" + + class FrameTransform : public _DeferredFrameTransform { public: - // Pixel info is { viewport width height} - glm::vec4 pixelInfo; - glm::vec4 invpixelInfo; - // Depth info is { n.f, f - n, -f} - glm::vec4 depthInfo; - // Stereo info is { isStereoFrame, halfWidth } - glm::vec4 stereoInfo{ 0.0 }; - // Mono proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space - glm::mat4 projection[2]; - // Inverse proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space - glm::mat4 invProjection[2]; - // THe mono projection for sure - glm::mat4 projectionMono; - // Inv View matrix from eye space (mono) to world space - glm::mat4 invView; - // View matrix from world space to eye space (mono) - glm::mat4 view; - // Mono proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space without jittering - glm::mat4 projectionUnjittered[2]; - // Inverse proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space without jittering - glm::mat4 invProjectionUnjittered[2]; - - FrameTransform() {} + FrameTransform() { infos.stereoInfo = glm::vec4(0.0f); } }; - UniformBufferView _frameTransformBuffer; - + UniformBufferView _frameTransformBuffer; }; using DeferredFrameTransformPointer = std::shared_ptr; - - class GenerateDeferredFrameTransform { public: - - using Input = glm::vec2; using Output = DeferredFrameTransformPointer; - using JobModel = render::Job::ModelIO; + using JobModel = render::Job::ModelO; - GenerateDeferredFrameTransform() {} + GenerateDeferredFrameTransform(uint transformSlot) : _transformSlot(transformSlot) {} - void run(const render::RenderContextPointer& renderContext, const Input& jitter, Output& frameTransform); + void run(const render::RenderContextPointer& renderContext, Output& frameTransform); private: + uint _transformSlot; }; #endif // hifi_DeferredFrameTransform_h diff --git a/libraries/render-utils/src/DeferredFramebuffer.cpp b/libraries/render-utils/src/DeferredFramebuffer.cpp index 1906375654..e1104d2cbb 100644 --- a/libraries/render-utils/src/DeferredFramebuffer.cpp +++ b/libraries/render-utils/src/DeferredFramebuffer.cpp @@ -4,12 +4,18 @@ // // Created by Sam Gateau 7/11/2016. // Copyright 2016 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // #include "DeferredFramebuffer.h" +#include "DeferredBufferWrite_shared.slh" + +#include "gpu/Batch.h" +#include "gpu/Context.h" + DeferredFramebuffer::DeferredFramebuffer() { } @@ -36,8 +42,10 @@ void DeferredFramebuffer::updatePrimaryDepth(const gpu::TexturePointer& depthBuf _deferredColorTexture.reset(); _deferredNormalTexture.reset(); _deferredSpecularTexture.reset(); + _deferredVelocityTexture.reset(); _lightingTexture.reset(); _lightingFramebuffer.reset(); + _lightingWithVelocityFramebuffer.reset(); } } @@ -46,8 +54,9 @@ void DeferredFramebuffer::allocate() { _deferredFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("deferred")); _deferredFramebufferDepthColor = gpu::FramebufferPointer(gpu::Framebuffer::create("deferredDepthColor")); - auto colorFormat = gpu::Element::COLOR_SRGBA_32; - auto linearFormat = gpu::Element::COLOR_RGBA_32; + const auto colorFormat = gpu::Element::COLOR_SRGBA_32; + const auto linearFormat = gpu::Element::COLOR_RGBA_32; + const auto halfFormat = gpu::Element(gpu::VEC2, gpu::HALF, gpu::XY); auto width = _frameSize.x; auto height = _frameSize.y; @@ -56,10 +65,12 @@ void DeferredFramebuffer::allocate() { _deferredColorTexture = gpu::Texture::createRenderBuffer(colorFormat, width, height, gpu::Texture::SINGLE_MIP, defaultSampler); _deferredNormalTexture = gpu::Texture::createRenderBuffer(linearFormat, width, height, gpu::Texture::SINGLE_MIP, defaultSampler); _deferredSpecularTexture = gpu::Texture::createRenderBuffer(linearFormat, width, height, gpu::Texture::SINGLE_MIP, defaultSampler); + _deferredVelocityTexture = gpu::Texture::createRenderBuffer(halfFormat, width, height, gpu::Texture::SINGLE_MIP, defaultSampler); - _deferredFramebuffer->setRenderBuffer(0, _deferredColorTexture); - _deferredFramebuffer->setRenderBuffer(1, _deferredNormalTexture); - _deferredFramebuffer->setRenderBuffer(2, _deferredSpecularTexture); + _deferredFramebuffer->setRenderBuffer(DEFERRED_COLOR_SLOT, _deferredColorTexture); + _deferredFramebuffer->setRenderBuffer(DEFERRED_NORMAL_SLOT, _deferredNormalTexture); + _deferredFramebuffer->setRenderBuffer(DEFERRED_SPECULAR_SLOT, _deferredSpecularTexture); + _deferredFramebuffer->setRenderBuffer(DEFERRED_VELOCITY_SLOT, _deferredVelocityTexture); _deferredFramebufferDepthColor->setRenderBuffer(0, _deferredColorTexture); @@ -80,8 +91,12 @@ void DeferredFramebuffer::allocate() { _lightingFramebuffer->setRenderBuffer(0, _lightingTexture); _lightingFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat); - _deferredFramebuffer->setRenderBuffer(3, _lightingTexture); + _lightingWithVelocityFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("lighting_velocity")); + _lightingWithVelocityFramebuffer->setRenderBuffer(0, _lightingTexture); + _lightingWithVelocityFramebuffer->setRenderBuffer(1, _deferredVelocityTexture); + _lightingWithVelocityFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat); + _deferredFramebuffer->setRenderBuffer(DEFERRED_LIGHTING_SLOT, _lightingTexture); } @@ -127,6 +142,13 @@ gpu::TexturePointer DeferredFramebuffer::getDeferredSpecularTexture() { return _deferredSpecularTexture; } +gpu::TexturePointer DeferredFramebuffer::getDeferredVelocityTexture() { + if (!_deferredVelocityTexture) { + allocate(); + } + return _deferredVelocityTexture; +} + gpu::FramebufferPointer DeferredFramebuffer::getLightingFramebuffer() { if (!_lightingFramebuffer) { allocate(); @@ -134,6 +156,13 @@ gpu::FramebufferPointer DeferredFramebuffer::getLightingFramebuffer() { return _lightingFramebuffer; } +gpu::FramebufferPointer DeferredFramebuffer::getLightingWithVelocityFramebuffer() { + if (!_lightingWithVelocityFramebuffer) { + allocate(); + } + return _lightingWithVelocityFramebuffer; +} + gpu::TexturePointer DeferredFramebuffer::getLightingTexture() { if (!_lightingTexture) { allocate(); diff --git a/libraries/render-utils/src/DeferredFramebuffer.h b/libraries/render-utils/src/DeferredFramebuffer.h index 6002bf6494..272ba42e44 100644 --- a/libraries/render-utils/src/DeferredFramebuffer.h +++ b/libraries/render-utils/src/DeferredFramebuffer.h @@ -4,6 +4,7 @@ // // Created by Sam Gateau 7/11/2016. // Copyright 2016 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -15,10 +16,10 @@ #include "gpu/Resource.h" #include "gpu/Framebuffer.h" - // DeferredFramebuffer is a helper class gathering in one place the GBuffer (Framebuffer) and lighting framebuffer class DeferredFramebuffer { public: + DeferredFramebuffer(); gpu::FramebufferPointer getDeferredFramebuffer(); @@ -27,8 +28,10 @@ public: gpu::TexturePointer getDeferredColorTexture(); gpu::TexturePointer getDeferredNormalTexture(); gpu::TexturePointer getDeferredSpecularTexture(); + gpu::TexturePointer getDeferredVelocityTexture(); gpu::FramebufferPointer getLightingFramebuffer(); + gpu::FramebufferPointer getLightingWithVelocityFramebuffer(); gpu::TexturePointer getLightingTexture(); // Update the depth buffer which will drive the allocation of all the other resources according to its size. @@ -47,13 +50,15 @@ protected: gpu::TexturePointer _deferredColorTexture; gpu::TexturePointer _deferredNormalTexture; gpu::TexturePointer _deferredSpecularTexture; + gpu::TexturePointer _deferredVelocityTexture; gpu::TexturePointer _lightingTexture; gpu::FramebufferPointer _lightingFramebuffer; + gpu::FramebufferPointer _lightingWithVelocityFramebuffer; glm::ivec2 _frameSize; }; using DeferredFramebufferPointer = std::shared_ptr; -#endif // hifi_DeferredFramebuffer_h \ No newline at end of file +#endif // hifi_DeferredFramebuffer_h diff --git a/libraries/render-utils/src/DeferredLightingEffect.cpp b/libraries/render-utils/src/DeferredLightingEffect.cpp index f2f6639f88..e9d2a7f6e5 100644 --- a/libraries/render-utils/src/DeferredLightingEffect.cpp +++ b/libraries/render-utils/src/DeferredLightingEffect.cpp @@ -300,8 +300,9 @@ void PrepareDeferred::run(const RenderContextPointer& renderContext, const Input // Clear Color, Depth and Stencil for deferred buffer batch.clearFramebuffer( - gpu::Framebuffer::BUFFER_COLOR0 | gpu::Framebuffer::BUFFER_COLOR1 | gpu::Framebuffer::BUFFER_COLOR2 | gpu::Framebuffer::BUFFER_COLOR3 | - gpu::Framebuffer::BUFFER_DEPTH | + gpu::Framebuffer::BUFFER_COLOR0 | gpu::Framebuffer::BUFFER_COLOR1 | + gpu::Framebuffer::BUFFER_COLOR2 | gpu::Framebuffer::BUFFER_COLOR3 | + gpu::Framebuffer::BUFFER_COLOR4 | gpu::Framebuffer::BUFFER_DEPTH | gpu::Framebuffer::BUFFER_STENCIL, vec4(vec3(0), 0), 1.0, 0, true); @@ -506,7 +507,7 @@ void RenderDeferredLocals::run(const render::RenderContextPointer& renderContext } } -void RenderDeferredCleanup::run(const render::RenderContextPointer& renderContext) { +void RenderDeferredCleanup::run(const render::RenderContextPointer& renderContext, const DeferredFramebufferPointer& deferredFramebuffer) { auto args = renderContext->args; auto& batch = (*args->_batch); { @@ -531,6 +532,8 @@ void RenderDeferredCleanup::run(const render::RenderContextPointer& renderContex batch.setUniformBuffer(ru::Buffer::LightClusterGrid, nullptr); batch.setUniformBuffer(ru::Buffer::LightClusterContent, nullptr); + // Restore the lighting with velocity framebuffer so that following stages, like drawing the background, can get motion vectors. + batch.setFramebuffer(deferredFramebuffer->getLightingWithVelocityFramebuffer()); } } @@ -571,7 +574,7 @@ void RenderDeferred::run(const RenderContextPointer& renderContext, const Inputs lightsJob.run(renderContext, deferredTransform, deferredFramebuffer, lightingModel, surfaceGeometryFramebuffer, lightClusters); - cleanupJob.run(renderContext); + cleanupJob.run(renderContext, deferredFramebuffer); _gpuTimer->end(batch); }); diff --git a/libraries/render-utils/src/DeferredLightingEffect.h b/libraries/render-utils/src/DeferredLightingEffect.h index 73c43c52a3..70574211d5 100644 --- a/libraries/render-utils/src/DeferredLightingEffect.h +++ b/libraries/render-utils/src/DeferredLightingEffect.h @@ -124,8 +124,8 @@ public: class RenderDeferredCleanup { public: using JobModel = render::Job::Model; - - void run(const render::RenderContextPointer& renderContext); + + void run(const render::RenderContextPointer& renderContext, const DeferredFramebufferPointer& deferredFramebuffer); }; using RenderDeferredConfig = render::GPUJobConfig; diff --git a/libraries/render-utils/src/DeferredTransform.slh b/libraries/render-utils/src/DeferredTransform.slh index 93a3e61c51..19c6f67973 100644 --- a/libraries/render-utils/src/DeferredTransform.slh +++ b/libraries/render-utils/src/DeferredTransform.slh @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 6/2/16. // Copyright 2016 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -16,109 +17,103 @@ <@func declareDeferredFrameTransform()@> -struct CameraCorrection { - mat4 _correction; - mat4 _correctionInverse; - - mat4 _prevView; - mat4 _prevViewInverse; -}; - -LAYOUT(binding=GPU_BUFFER_CAMERA_CORRECTION) uniform cameraCorrectionBuffer { - CameraCorrection cameraCorrection; -}; +<@include DeferredTransform_shared.slh@> -struct DeferredFrameTransform { - vec4 _pixelInfo; - vec4 _invPixelInfo; - vec4 _depthInfo; - vec4 _stereoInfo; - mat4 _projection[2]; - mat4 _invProjection[2]; - mat4 _projectionMono; - mat4 _viewInverse; - mat4 _view; - mat4 _projectionUnJittered[2]; - mat4 _invProjectionUnJittered[2]; -}; +#define DeferredFrameTransform _DeferredFrameTransform +#define TransformCamera _TransformCamera -LAYOUT(binding=RENDER_UTILS_BUFFER_DEFERRED_FRAME_TRANSFORM) uniform deferredFrameTransformBuffer { +LAYOUT_STD140(binding=RENDER_UTILS_BUFFER_DEFERRED_FRAME_TRANSFORM) uniform deferredFrameTransformBuffer { DeferredFrameTransform frameTransform; }; vec2 getWidthHeight(int resolutionLevel) { - return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel); + return vec2(ivec2(frameTransform.infos.pixelInfo.zw) >> resolutionLevel); } vec2 getInvWidthHeight() { - return frameTransform._invPixelInfo.xy; + return frameTransform.infos.invPixelInfo.xy; +} + +mat4 getProjection(int side) { + return frameTransform.cameras[side]._projection; +} + +mat4 getProjectionInverse(int side) { + return frameTransform.cameras[side]._projectionInverse; } float getProjScaleEye() { - return frameTransform._projection[0][1][1]; + return getProjection(0)[1][1]; } float getProjScale(int resolutionLevel) { - return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5; -} -mat4 getProjection(int side) { - return frameTransform._projection[side]; + return getWidthHeight(resolutionLevel).y * getProjScaleEye() * 0.5; } + mat4 getProjectionMono() { - return frameTransform._projectionMono; -} -mat4 getUnjitteredProjection(int side) { - return frameTransform._projectionUnJittered[side]; -} -mat4 getUnjitteredInvProjection(int side) { - return frameTransform._invProjectionUnJittered[side]; + return frameTransform.infos.projectionMono; } // positive near distance of the projection float getProjectionNear() { - float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2]; - float planeD = frameTransform._projection[0][3][2]; + mat4 projection = getProjection(0); + float planeC = projection[2][3] + projection[2][2]; + float planeD = projection[3][2]; return planeD / planeC; } // positive far distance of the projection float getPosLinearDepthFar() { - return -frameTransform._depthInfo.z; + return -frameTransform.infos.depthInfo.z; } -mat4 getViewInverse() { - return frameTransform._viewInverse * cameraCorrection._correctionInverse; +mat4 getViewInverse(int side) { + return frameTransform.cameras[side]._viewInverse; } -mat4 getView() { - return cameraCorrection._correction * frameTransform._view; +mat4 getView(int side) { + return frameTransform.cameras[side]._view; } -mat4 getPreviousView() { - return cameraCorrection._prevView; +mat4 getPreviousView(int side) { + return frameTransform.cameras[side]._previousView; } -mat4 getPreviousViewInverse() { - return cameraCorrection._prevViewInverse; -} - -DeferredFrameTransform getDeferredFrameTransform() { - DeferredFrameTransform result = frameTransform; - result._view = getView(); - result._viewInverse = getViewInverse(); - return result; +mat4 getPreviousViewInverse(int side) { + return frameTransform.cameras[side]._previousViewInverse; } bool isStereo() { - return frameTransform._stereoInfo.x > 0.0f; + return frameTransform.infos.stereoInfo.x > 0.0f; } float getStereoSideWidth(int resolutionLevel) { - return float(int(frameTransform._stereoInfo.y) >> resolutionLevel); + return float(int(frameTransform.infos.stereoInfo.y) >> resolutionLevel); } float getStereoSideHeight(int resolutionLevel) { - return float(int(frameTransform._pixelInfo.w) >> resolutionLevel); + return float(int(frameTransform.infos.pixelInfo.w) >> resolutionLevel); +} + +vec2 getSideImageSize(int resolutionLevel) { + return vec2(float(int(frameTransform.infos.stereoInfo.y) >> resolutionLevel), float(int(frameTransform.infos.pixelInfo.w) >> resolutionLevel)); +} + +int getStereoSideFromPixel(int xPos, int resolutionLevel) { + int sideWidth = int(getStereoSideWidth(resolutionLevel)); + return int(xPos >= sideWidth && isStereo()); +} + +int getStereoSideFromPixel(int xPos) { + return getStereoSideFromPixel(xPos, 0); +} + +int getStereoSideFromFragCoord() { + return getStereoSideFromPixel(int(gl_FragCoord.x), 0); +} + +int getStereoSideFromUV(float uPos) { + return int(uPos >= 0.5 && isStereo()); } vec2 getStereoSideSize(int resolutionLevel) { @@ -134,17 +129,16 @@ ivec4 getStereoSideInfo(int xPos, int resolutionLevel) { return getStereoSideInfoFromWidth(xPos, sideWidth); } - int getStereoSide(ivec4 sideInfo) { return sideInfo.x; } float evalZeyeFromZdb(float depth) { - return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z); + return frameTransform.infos.depthInfo.x / (depth * frameTransform.infos.depthInfo.y + frameTransform.infos.depthInfo.z); } float evalZdbFromZeye(float Zeye) { - return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y); + return (frameTransform.infos.depthInfo.x - Zeye * frameTransform.infos.depthInfo.z) / (Zeye * frameTransform.infos.depthInfo.y); } vec3 evalEyeNormal(vec3 C) { @@ -155,15 +149,7 @@ vec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) { // compute the view space position using the depth vec3 clipPos; clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0; - vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0); - return eyePos.xyz / eyePos.w; -} - -vec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) { - // compute the view space position using the depth - vec3 clipPos; - clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0; - vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0); + vec4 eyePos = getProjectionInverse(side) * vec4(clipPos.xyz, 1.0); return eyePos.xyz / eyePos.w; } diff --git a/libraries/render-utils/src/DeferredTransform_shared.slh b/libraries/render-utils/src/DeferredTransform_shared.slh new file mode 100644 index 0000000000..2c3441ffed --- /dev/null +++ b/libraries/render-utils/src/DeferredTransform_shared.slh @@ -0,0 +1,33 @@ +// glsl / C++ compatible source as interface for DeferredFrameTransform layout +#ifdef __cplusplus +# define DFT_VEC4 glm::vec4 +# define DFT_MAT4 glm::mat4 +#include "gpu/TransformCamera_shared.slh" +#else +# define DFT_VEC4 vec4 +# define DFT_MAT4 mat4 +<@include gpu/TransformCamera_shared.slh@> +#endif + +struct DeferredFrameInfo { + // Pixel info is { viewport width height} + DFT_VEC4 pixelInfo; + DFT_VEC4 invPixelInfo; + // Depth info is { n.f, f - n, -f} + DFT_VEC4 depthInfo; + // Stereo info is { isStereoFrame, halfWidth } + DFT_VEC4 stereoInfo; + // The mono projection for sure + DFT_MAT4 projectionMono; +}; + +struct _DeferredFrameTransform { + DeferredFrameInfo infos; + // The camera transforms for the two eyes (or only first one if mono, of course) + _TransformCamera cameras[2]; +}; + + // <@if 1@> + // Trigger Scribe include + // <@endif@> +// diff --git a/libraries/render-utils/src/GeometryCache.cpp b/libraries/render-utils/src/GeometryCache.cpp index 808bae7b9d..38fe0764e4 100644 --- a/libraries/render-utils/src/GeometryCache.cpp +++ b/libraries/render-utils/src/GeometryCache.cpp @@ -4,6 +4,7 @@ // // Created by Andrzej Kapolka on 6/21/13. // Copyright 2013 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -1677,7 +1678,7 @@ void GeometryCache::useSimpleDrawPipeline(gpu::Batch& batch, bool noBlend) { // enable decal blend state->setBlendFunction(true, gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA); - PrepareStencil::testMask(*state); + PrepareStencil::testMaskResetNoAA(*state); _standardDrawPipeline = gpu::Pipeline::create(program, state); @@ -1709,7 +1710,7 @@ void GeometryCache::useGridPipeline(gpu::Batch& batch, GridBuffer gridBuffer, bo gpu::StatePointer state = std::make_shared(); state->setDepthTest(true, !std::get<0>(key), gpu::LESS_EQUAL); if (std::get<0>(key)) { - PrepareStencil::testMask(*state); + PrepareStencil::testMaskResetNoAA(*state); } else { PrepareStencil::testMaskDrawShape(*state); } @@ -1816,7 +1817,6 @@ gpu::PipelinePointer GeometryCache::getWebBrowserProgram(bool transparent, bool gpu::StatePointer state = std::make_shared(); state->setDepthTest(true, !transparent, gpu::LESS_EQUAL); - // FIXME: do we need a testMaskDrawNoAA? PrepareStencil::testMaskDrawShapeNoAA(*state); state->setBlendFunction(transparent, gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA, @@ -1897,7 +1897,7 @@ gpu::PipelinePointer GeometryCache::getSimplePipeline(bool textured, bool transp gpu::State::FACTOR_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ONE); if (config.isAntiAliased()) { - config.isTransparent() ? PrepareStencil::testMask(*state) : PrepareStencil::testMaskDrawShape(*state); + config.isTransparent() ? PrepareStencil::testMaskResetNoAA(*state) : PrepareStencil::testMaskDrawShape(*state); } else { PrepareStencil::testMaskDrawShapeNoAA(*state); } diff --git a/libraries/render-utils/src/Haze.slf b/libraries/render-utils/src/Haze.slf index e7c3459f4a..57f55d32a1 100644 --- a/libraries/render-utils/src/Haze.slf +++ b/libraries/render-utils/src/Haze.slf @@ -26,7 +26,7 @@ LAYOUT(binding=RENDER_UTILS_TEXTURE_HAZE_LINEAR_DEPTH) uniform sampler2D linearDepthMap; <@endif@> -vec4 unpackPositionFromZeye(vec2 texcoord) { +vec4 unpackPositionFromZeyeAndGetSide(vec2 texcoord, out int side) { <@if not HIFI_USE_BACKGROUND@> float Zeye = -texture(linearDepthMap, texcoord).x; <@else@> @@ -36,7 +36,7 @@ vec4 unpackPositionFromZeye(vec2 texcoord) { float check = float(isStereo()); float check2 = check * float(texcoord.x > 0.5); texcoord.x -= check2 * 0.5; - int side = int(check2); + side = int(check2); texcoord.x *= 1.0 + check; return vec4(evalEyePositionFromZeye(side, Zeye, texcoord), 1.0); @@ -46,8 +46,9 @@ layout(location=0) in vec2 varTexCoord0; layout(location=0) out vec4 outFragColor; void main(void) { - vec4 fragPositionES = unpackPositionFromZeye(varTexCoord0); - mat4 viewInverse = getViewInverse(); + int side; + vec4 fragPositionES = unpackPositionFromZeyeAndGetSide(varTexCoord0, side); + mat4 viewInverse = getViewInverse(side); <@if HIFI_USE_BACKGROUND@> // We choose an arbitrary large number > BLEND_DISTANCE in Haze.slh diff --git a/libraries/render-utils/src/HighlightEffect.cpp b/libraries/render-utils/src/HighlightEffect.cpp index 755d0a60be..f513dd0fbf 100644 --- a/libraries/render-utils/src/HighlightEffect.cpp +++ b/libraries/render-utils/src/HighlightEffect.cpp @@ -124,7 +124,8 @@ gpu::PipelinePointer DrawHighlightMask::_stencilMaskPipeline; gpu::PipelinePointer DrawHighlightMask::_stencilMaskFillPipeline; DrawHighlightMask::DrawHighlightMask(unsigned int highlightIndex, render::ShapePlumberPointer shapePlumber, - HighlightSharedParametersPointer parameters) : _highlightPassIndex(highlightIndex), _shapePlumber(shapePlumber), _sharedParameters(parameters) {} + HighlightSharedParametersPointer parameters, uint transformSlot) : + _highlightPassIndex(highlightIndex), _shapePlumber(shapePlumber), _sharedParameters(parameters), _transformSlot(transformSlot) {} void DrawHighlightMask::run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs) { assert(renderContext->args); @@ -177,8 +178,6 @@ void DrawHighlightMask::run(const render::RenderContextPointer& renderContext, c batch.clearDepthStencilFramebuffer(1.0f, 0); }); - const auto jitter = inputs.get2(); - render::ItemBounds itemBounds; gpu::doInBatch("DrawHighlightMask::run", args->_context, [&](gpu::Batch& batch) { @@ -190,9 +189,8 @@ void DrawHighlightMask::run(const render::RenderContextPointer& renderContext, c args->getViewFrustum().evalProjectionMatrix(projMat); args->getViewFrustum().evalViewTransform(viewMat); batch.setViewportTransform(args->_viewport); - batch.setProjectionTransform(projMat); - batch.setProjectionJitter(jitter.x, jitter.y); - batch.setViewTransform(viewMat); + batch.setProjectionJitterEnabled(true); + batch.setSavedViewProjectionTransform(_transformSlot); sortAndRenderZPassShapes(_shapePlumber, renderContext, inShapes, itemBounds); }); @@ -209,6 +207,11 @@ void DrawHighlightMask::run(const render::RenderContextPointer& renderContext, c } gpu::doInBatch("DrawHighlightMask::run::end", args->_context, [&](gpu::Batch& batch) { + // Setup camera, projection and viewport for all items + batch.setViewportTransform(args->_viewport); + batch.setProjectionJitterEnabled(true); + batch.setSavedViewProjectionTransform(_transformSlot); + // Draw stencil mask with object bounding boxes auto stencilPipeline = highlight._style.isFilled() ? _stencilMaskFillPipeline : _stencilMaskPipeline; batch.setPipeline(stencilPipeline); @@ -269,7 +272,6 @@ void DrawHighlight::run(const render::RenderContextPointer& renderContext, const shaderParameters._size.y = size; } - auto primaryFramebuffer = inputs.get4(); gpu::doInBatch("DrawHighlight::run", args->_context, [&](gpu::Batch& batch) { batch.enableStereo(false); batch.setFramebuffer(destinationFrameBuffer); @@ -285,9 +287,6 @@ void DrawHighlight::run(const render::RenderContextPointer& renderContext, const batch.setResourceTexture(ru::Texture::HighlightSceneDepth, sceneDepthBuffer->getPrimaryDepthTexture()); batch.setResourceTexture(ru::Texture::HighlightDepth, highlightedDepthTexture); batch.draw(gpu::TRIANGLE_STRIP, 4); - - // Reset the framebuffer for overlay drawing - batch.setFramebuffer(primaryFramebuffer); }); } } @@ -313,7 +312,7 @@ const gpu::PipelinePointer& DrawHighlight::getPipeline(const render::HighlightSt gpu::PipelinePointer DebugHighlight::_depthPipeline; -DebugHighlight::DebugHighlight() { +DebugHighlight::DebugHighlight(uint transformSlot) : _transformSlot(transformSlot) { _geometryDepthId = DependencyManager::get()->allocateID(); } @@ -336,22 +335,15 @@ void DebugHighlight::run(const render::RenderContextPointer& renderContext, cons assert(renderContext->args); assert(renderContext->args->hasViewFrustum()); RenderArgs* args = renderContext->args; - const auto jitter = input.get2(); - auto primaryFramebuffer = input.get3(); gpu::doInBatch("DebugHighlight::run", args->_context, [&](gpu::Batch& batch) { batch.setViewportTransform(args->_viewport); batch.setFramebuffer(highlightResources->getColorFramebuffer()); const auto geometryBuffer = DependencyManager::get(); - glm::mat4 projMat; - Transform viewMat; - args->getViewFrustum().evalProjectionMatrix(projMat); - args->getViewFrustum().evalViewTransform(viewMat); - batch.setProjectionTransform(projMat); - batch.setProjectionJitter(jitter.x, jitter.y); - batch.setViewTransform(viewMat); + batch.setProjectionJitterEnabled(true); + batch.setSavedViewProjectionTransform(_transformSlot); batch.setModelTransform(Transform()); const glm::vec4 color(1.0f, 1.0f, 1.0f, 1.0f); @@ -363,9 +355,6 @@ void DebugHighlight::run(const render::RenderContextPointer& renderContext, cons geometryBuffer->renderQuad(batch, bottomLeft, topRight, color, _geometryDepthId); batch.setResourceTexture(0, nullptr); - - // Reset the framebuffer for overlay drawing - batch.setFramebuffer(primaryFramebuffer); }); } } @@ -467,13 +456,12 @@ void DrawHighlightTask::configure(const Config& config) { } -void DrawHighlightTask::build(JobModel& task, const render::Varying& inputs, render::Varying& outputs) { +void DrawHighlightTask::build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, uint transformSlot) { const auto items = inputs.getN(0).get(); const auto& outlines = items[RenderFetchCullSortTask::OUTLINE]; const auto sceneFrameBuffer = inputs.getN(1); const auto primaryFramebuffer = inputs.getN(2); const auto deferredFrameTransform = inputs.getN(3); - const auto jitter = inputs.getN(4); // Prepare the ShapePipeline static ShapePlumberPointer shapePlumber = std::make_shared(); @@ -513,8 +501,8 @@ void DrawHighlightTask::build(JobModel& task, const render::Varying& inputs, ren stream << "HighlightMask" << i; name = stream.str(); } - const auto drawMaskInputs = DrawHighlightMask::Inputs(sortedBounds, highlightResources, jitter).asVarying(); - const auto highlightedRect = task.addJob(name, drawMaskInputs, i, shapePlumber, sharedParameters); + const auto drawMaskInputs = DrawHighlightMask::Inputs(sortedBounds, highlightResources).asVarying(); + const auto highlightedRect = task.addJob(name, drawMaskInputs, i, shapePlumber, sharedParameters, transformSlot); if (i == 0) { highlight0Rect = highlightedRect; } @@ -525,7 +513,7 @@ void DrawHighlightTask::build(JobModel& task, const render::Varying& inputs, ren stream << "HighlightEffect" << i; name = stream.str(); } - const auto drawHighlightInputs = DrawHighlight::Inputs(deferredFrameTransform, highlightResources, sceneFrameBuffer, highlightedRect, primaryFramebuffer).asVarying(); + const auto drawHighlightInputs = DrawHighlight::Inputs(deferredFrameTransform, highlightResources, sceneFrameBuffer, highlightedRect).asVarying(); task.addJob(name, drawHighlightInputs, i, sharedParameters); } @@ -534,8 +522,8 @@ void DrawHighlightTask::build(JobModel& task, const render::Varying& inputs, ren task.addJob("HighlightCleanup", cleanupInput); // Debug highlight - const auto debugInputs = DebugHighlight::Inputs(highlightResources, const_cast(highlight0Rect), jitter, primaryFramebuffer).asVarying(); - task.addJob("HighlightDebug", debugInputs); + const auto debugInputs = DebugHighlight::Inputs(highlightResources, const_cast(highlight0Rect)).asVarying(); + task.addJob("HighlightDebug", debugInputs, transformSlot); } const render::Varying DrawHighlightTask::addSelectItemJobs(JobModel& task, const render::Varying& selectionName, diff --git a/libraries/render-utils/src/HighlightEffect.h b/libraries/render-utils/src/HighlightEffect.h index ee9bcf4267..e87b1f765b 100644 --- a/libraries/render-utils/src/HighlightEffect.h +++ b/libraries/render-utils/src/HighlightEffect.h @@ -114,11 +114,11 @@ private: class DrawHighlightMask { public: - using Inputs = render::VaryingSet3; + using Inputs = render::VaryingSet2; using Outputs = glm::ivec4; using JobModel = render::Job::ModelIO; - DrawHighlightMask(unsigned int highlightIndex, render::ShapePlumberPointer shapePlumber, HighlightSharedParametersPointer parameters); + DrawHighlightMask(unsigned int highlightIndex, render::ShapePlumberPointer shapePlumber, HighlightSharedParametersPointer parameters, uint transformSlot); void run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs); @@ -128,6 +128,7 @@ protected: HighlightSharedParametersPointer _sharedParameters; gpu::BufferPointer _boundsBuffer; gpu::StructBuffer _outlineWidth; + uint _transformSlot { 0 }; static gpu::PipelinePointer _stencilMaskPipeline; static gpu::PipelinePointer _stencilMaskFillPipeline; @@ -136,7 +137,7 @@ protected: class DrawHighlight { public: - using Inputs = render::VaryingSet5; + using Inputs = render::VaryingSet4; using Config = render::Job::Config; using JobModel = render::Job::ModelI; @@ -174,11 +175,11 @@ signals: class DebugHighlight { public: - using Inputs = render::VaryingSet4; + using Inputs = render::VaryingSet2; using Config = DebugHighlightConfig; using JobModel = render::Job::ModelI; - DebugHighlight(); + DebugHighlight(uint transformSlot); ~DebugHighlight(); void configure(const Config& config); @@ -187,8 +188,9 @@ public: private: static gpu::PipelinePointer _depthPipeline; - int _geometryDepthId{ 0 }; - bool _isDisplayEnabled{ false }; + int _geometryDepthId { 0 }; + bool _isDisplayEnabled { false }; + uint _transformSlot { 0 }; static gpu::PipelinePointer& getDepthPipeline(); static void initializePipelines(); @@ -197,14 +199,13 @@ private: class DrawHighlightTask { public: - using Inputs = render::VaryingSet5; - using Config = render::Task::Config; + using Inputs = render::VaryingSet4; using Config = render::Task::Config; using JobModel = render::Task::ModelI; DrawHighlightTask(); void configure(const Config& config); - void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs); + void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, uint transformSlot); private: static const render::Varying addSelectItemJobs(JobModel& task, const render::Varying& selectionName, const RenderFetchCullSortTask::BucketList& items); diff --git a/libraries/render-utils/src/Highlight_aabox.slv b/libraries/render-utils/src/Highlight_aabox.slv index 65b98355ae..6d7e5d5bc2 100644 --- a/libraries/render-utils/src/Highlight_aabox.slv +++ b/libraries/render-utils/src/Highlight_aabox.slv @@ -1,20 +1,19 @@ <@include gpu/Config.slh@> <$VERSION_HEADER$> -// Generated on <$_SCRIBE_DATE$> -// +// <$_SCRIBE_FILENAME$> +// Generated on <$_SCRIBE_DATE$> // Draw and transform the fed vertex position with the standard MVP stack // and offset the vertices by a certain amount in the vertex direction // // Created by Olivier Prat on 11/02/2017 // Copyright 2017 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // -<@include gpu/ShaderConstants.h@> <@include gpu/Transform.slh@> - <$declareStandardTransform()$> struct ItemBound { @@ -107,5 +106,5 @@ void main(void) { vec4 offsetPosition; <$transformModelToMonoClipPos(cam, obj, pos, offsetPosition)$> gl_Position.xy += normalize(offsetPosition.xy-gl_Position.xy) * _parameters.outlineWidth * gl_Position.w; - <$transformStereoClipsSpace(cam, gl_Position)$> + <$transformStereoClipSpace(gl_Position)$> } diff --git a/libraries/render-utils/src/LightClusters.cpp b/libraries/render-utils/src/LightClusters.cpp index f0ec35238f..ec7974a580 100644 --- a/libraries/render-utils/src/LightClusters.cpp +++ b/libraries/render-utils/src/LightClusters.cpp @@ -3,6 +3,7 @@ // // Created by Sam Gateau on 9/7/2016. // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -649,13 +650,7 @@ void DebugLightClusters::run(const render::RenderContextPointer& renderContext, // Assign the camera transform batch.setViewportTransform(args->_viewport); - glm::mat4 projMat; - Transform viewMat; - args->getViewFrustum().evalProjectionMatrix(projMat); - args->getViewFrustum().evalViewTransform(viewMat); - batch.setProjectionTransform(projMat); - batch.setViewTransform(viewMat, true); - + batch.setSavedViewProjectionTransform(_transformSlot); // Then the actual ClusterGrid attributes batch.setModelTransform(Transform()); @@ -667,8 +662,6 @@ void DebugLightClusters::run(const render::RenderContextPointer& renderContext, batch.setUniformBuffer(ru::Buffer::LightClusterGrid, lightClusters->_clusterGridBuffer); batch.setUniformBuffer(ru::Buffer::LightClusterContent, lightClusters->_clusterContentBuffer); - - if (doDrawClusterFromDepth) { batch.setPipeline(getDrawClusterFromDepthPipeline()); batch.setUniformBuffer(ru::Buffer::DeferredFrameTransform, deferredTransform->getFrameTransformBuffer()); diff --git a/libraries/render-utils/src/LightClusters.h b/libraries/render-utils/src/LightClusters.h index 94e1e37ae3..4aa0e73309 100644 --- a/libraries/render-utils/src/LightClusters.h +++ b/libraries/render-utils/src/LightClusters.h @@ -217,7 +217,7 @@ public: using Config = DebugLightClustersConfig; using JobModel = render::Job::ModelI; - DebugLightClusters() {} + DebugLightClusters(uint transformSlot) : _transformSlot(transformSlot) {} void configure(const Config& config); @@ -228,6 +228,7 @@ protected: static gpu::PipelinePointer _drawClusterFromDepth; static gpu::PipelinePointer _drawClusterContent; gpu::BufferPointer _gridBuffer; + uint _transformSlot; bool doDrawGrid { false }; bool doDrawClusterFromDepth { false }; diff --git a/libraries/render-utils/src/MeshPartPayload.cpp b/libraries/render-utils/src/MeshPartPayload.cpp index 0ea4f14767..1f8cb17841 100644 --- a/libraries/render-utils/src/MeshPartPayload.cpp +++ b/libraries/render-utils/src/MeshPartPayload.cpp @@ -192,7 +192,11 @@ void ModelMeshPartPayload::bindTransform(gpu::Batch& batch, const Transform& tra if (_clusterBuffer) { batch.setUniformBuffer(graphics::slot::buffer::Skinning, _clusterBuffer); } - batch.setModelTransform(transform); + + batch.setModelTransform(transform, _previousRenderTransform); + if (renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || renderMode == Args::RenderMode::MIRROR_RENDER_MODE) { + _previousRenderTransform = transform; + } } void ModelMeshPartPayload::drawCall(gpu::Batch& batch) const { diff --git a/libraries/render-utils/src/MeshPartPayload.h b/libraries/render-utils/src/MeshPartPayload.h index ecc9e997f0..752da66df0 100644 --- a/libraries/render-utils/src/MeshPartPayload.h +++ b/libraries/render-utils/src/MeshPartPayload.h @@ -71,6 +71,9 @@ public: void setBlendshapeBuffer(const std::unordered_map& blendshapeBuffers, const QVector& blendedMeshSizes); +protected: + mutable Transform _previousRenderTransform; + private: void initCache(const ModelPointer& model, int shapeID); diff --git a/libraries/render-utils/src/RenderCommonTask.cpp b/libraries/render-utils/src/RenderCommonTask.cpp index ba0460417c..6cf171d632 100644 --- a/libraries/render-utils/src/RenderCommonTask.cpp +++ b/libraries/render-utils/src/RenderCommonTask.cpp @@ -34,18 +34,19 @@ namespace gr { using RenderArgsPointer = std::shared_ptr; using namespace render; -extern void initForwardPipelines(ShapePlumber& plumber); extern void initMirrorPipelines(ShapePlumber& plumber, gpu::StatePointer state, const render::ShapePipeline::BatchSetter& batchSetter, const render::ShapePipeline::ItemSetter& itemSetter, bool forward); void BeginGPURangeTimer::run(const render::RenderContextPointer& renderContext, gpu::RangeTimerPointer& timer) { timer = _gpuTimer; gpu::doInBatch("BeginGPURangeTimer", renderContext->args->_context, [&](gpu::Batch& batch) { _gpuTimer->begin(batch); + batch.pushProfileRange(timer->name().c_str()); }); } void EndGPURangeTimer::run(const render::RenderContextPointer& renderContext, const gpu::RangeTimerPointer& timer) { gpu::doInBatch("EndGPURangeTimer", renderContext->args->_context, [&](gpu::Batch& batch) { + batch.popProfileRange(); timer->end(batch); }); @@ -53,14 +54,11 @@ void EndGPURangeTimer::run(const render::RenderContextPointer& renderContext, co config->setGPUBatchRunTime(timer->getGPUAverage(), timer->getBatchAverage()); } -render::ShapePlumberPointer DrawLayered3D::_shapePlumber = std::make_shared(); - -DrawLayered3D::DrawLayered3D(bool opaque) : - _opaquePass(opaque) { - static std::once_flag once; - std::call_once(once, [] { - initForwardPipelines(*_shapePlumber); - }); +DrawLayered3D::DrawLayered3D(const render::ShapePlumberPointer& shapePlumber, bool opaque, bool jitter, uint transformSlot) : + _shapePlumber(shapePlumber), + _transformSlot(transformSlot), + _opaquePass(opaque), + _isJitterEnabled(jitter) { } void DrawLayered3D::run(const RenderContextPointer& renderContext, const Inputs& inputs) { @@ -70,9 +68,9 @@ void DrawLayered3D::run(const RenderContextPointer& renderContext, const Inputs& auto config = std::static_pointer_cast(renderContext->jobConfig); const auto& inItems = inputs.get0(); - const auto& lightingModel = inputs.get1(); - const auto& hazeFrame = inputs.get2(); - const auto jitter = inputs.get3(); + const auto& frameTransform = inputs.get1(); + const auto& lightingModel = inputs.get2(); + const auto& hazeFrame = inputs.get3(); config->setNumDrawn((int)inItems.size()); emit config->numDrawnChanged(); @@ -92,7 +90,7 @@ void DrawLayered3D::run(const RenderContextPointer& renderContext, const Inputs& if (_opaquePass) { gpu::doInBatch("DrawLayered3D::run::clear", args->_context, [&](gpu::Batch& batch) { batch.enableStereo(false); - batch.clearFramebuffer(gpu::Framebuffer::BUFFER_DEPTH, glm::vec4(), 1.f, 0, false); + batch.clearDepthFramebuffer(true, false); }); } @@ -101,22 +99,18 @@ void DrawLayered3D::run(const RenderContextPointer& renderContext, const Inputs& // Render the items gpu::doInBatch("DrawLayered3D::main", args->_context, [&](gpu::Batch& batch) { + PROFILE_RANGE_BATCH(batch, "DrawLayered3D::main"); args->_batch = &batch; batch.setViewportTransform(args->_viewport); batch.setStateScissorRect(args->_viewport); - glm::mat4 projMat; - Transform viewMat; - args->getViewFrustum().evalProjectionMatrix(projMat); - args->getViewFrustum().evalViewTransform(viewMat); - - batch.setProjectionTransform(projMat); - batch.setProjectionJitter(jitter.x, jitter.y); - batch.setViewTransform(viewMat); + batch.setProjectionJitterEnabled(_isJitterEnabled); + batch.setSavedViewProjectionTransform(_transformSlot); // Setup lighting model for all items; batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer()); batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT()); + batch.setUniformBuffer(ru::Buffer::DeferredFrameTransform, frameTransform->getFrameTransformBuffer()); if (haze) { batch.setUniformBuffer(graphics::slot::buffer::Buffer::HazeParams, haze->getHazeParametersBuffer()); @@ -288,7 +282,7 @@ void ResolveFramebuffer::run(const render::RenderContextPointer& renderContext, class SetupMirrorTask { public: using Input = RenderMirrorTask::Inputs; - using Outputs = render::VaryingSet4; + using Outputs = render::VaryingSet3; using JobModel = render::Job::ModelIO; SetupMirrorTask(size_t mirrorIndex, size_t depth) : _mirrorIndex(mirrorIndex), _depth(depth) {} @@ -336,7 +330,6 @@ public: outputs.edit0() = mirror; outputs.edit1() = inputFramebuffer; outputs.edit2() = _cachedArgsPointer; - outputs.edit3() = inputs.get2(); } protected: @@ -352,13 +345,13 @@ public: using Inputs = SetupMirrorTask::Outputs; using JobModel = render::Job::ModelI; - DrawMirrorTask() { + DrawMirrorTask(uint transformSlot) : _transformSlot(transformSlot) { static std::once_flag once; std::call_once(once, [this] { auto state = std::make_shared(); state->setCullMode(gpu::State::CULL_BACK); state->setDepthTest(true, true, gpu::LESS_EQUAL); - PrepareStencil::testMaskDrawShape(*state); + PrepareStencil::testMaskDrawShapeNoAA(*state); initMirrorPipelines(*_forwardPipelines, state, FadeEffect::getBatchSetter(), FadeEffect::getItemUniformSetter(), true); initMirrorPipelines(*_deferredPipelines, state, FadeEffect::getBatchSetter(), FadeEffect::getItemUniformSetter(), false); @@ -370,7 +363,6 @@ public: auto mirror = inputs.get0(); auto framebuffer = inputs.get1(); auto cachedArgs = inputs.get2(); - auto jitter = inputs.get3(); if (cachedArgs) { args->_renderMode = cachedArgs->_renderMode; @@ -389,14 +381,7 @@ public: batch.setViewportTransform(args->_viewport); batch.setStateScissorRect(args->_viewport); - glm::mat4 projMat; - Transform viewMat; - args->getViewFrustum().evalProjectionMatrix(projMat); - args->getViewFrustum().evalViewTransform(viewMat); - - batch.setProjectionTransform(projMat); - batch.setProjectionJitter(jitter.x, jitter.y); - batch.setViewTransform(viewMat); + batch.setSavedViewProjectionTransform(_transformSlot); batch.setResourceTexture(gr::Texture::MaterialMirror, args->_blitFramebuffer->getRenderBuffer(0)); @@ -417,19 +402,55 @@ public: private: static ShapePlumberPointer _forwardPipelines; static ShapePlumberPointer _deferredPipelines; + + uint _transformSlot; }; ShapePlumberPointer DrawMirrorTask::_forwardPipelines = std::make_shared(); ShapePlumberPointer DrawMirrorTask::_deferredPipelines = std::make_shared(); - void RenderMirrorTask::build(JobModel& task, const render::Varying& inputs, render::Varying& output, size_t mirrorIndex, render::CullFunctor cullFunctor, size_t depth) { +void RenderMirrorTask::build(JobModel& task, const render::Varying& inputs, render::Varying& output, size_t mirrorIndex, render::CullFunctor cullFunctor, + uint transformOffset,size_t depth) { size_t nextDepth = depth + 1; const auto setupOutput = task.addJob("SetupMirror" + std::to_string(mirrorIndex) + "Depth" + std::to_string(depth), inputs, mirrorIndex, nextDepth); - task.addJob("RenderMirrorView" + std::to_string(mirrorIndex) + "Depth" + std::to_string(depth), cullFunctor, render::ItemKey::TAG_BITS_1, render::ItemKey::TAG_BITS_1, nextDepth); + // Our primary view starts at transformOffset 0, and the secondary camera starts at transformOffset 2 + // Our primary mirror views thus start after the secondary camera, at transformOffset 4, and the secondary + // camera mirror views start after all of the primary camera mirror views, at 4 + NUM_MAIN_MIRROR_SLOTS + static uint NUM_MAIN_MIRROR_SLOTS = 0; + static std::once_flag once; + std::call_once(once, [] { + for (size_t mirrorDepth = 0; mirrorDepth < MAX_MIRROR_DEPTH; mirrorDepth++) { + NUM_MAIN_MIRROR_SLOTS += pow(MAX_MIRRORS_PER_LEVEL, mirrorDepth + 1); + } + NUM_MAIN_MIRROR_SLOTS *= 2; + }); - task.addJob("DrawMirrorTask" + std::to_string(mirrorIndex) + "Depth" + std::to_string(depth), setupOutput); - } + uint mirrorOffset; + if (transformOffset == RenderViewTask::TransformOffset::MAIN_VIEW) { + mirrorOffset = RenderViewTask::TransformOffset::FIRST_MIRROR_VIEW - 2; + } else if (transformOffset == RenderViewTask::TransformOffset::SECONDARY_VIEW) { + mirrorOffset = RenderViewTask::TransformOffset::FIRST_MIRROR_VIEW + NUM_MAIN_MIRROR_SLOTS - 2; + } else { + mirrorOffset = transformOffset; + } + + // To calculate our transformSlot, we take the transformSlot of our parent and add numSubSlots (the number of slots + // taken up by a sub-tree starting at this depth) per preceding mirrorIndex + uint numSubSlots = 0; + for (size_t mirrorDepth = depth; mirrorDepth < MAX_MIRROR_DEPTH; mirrorDepth++) { + numSubSlots += pow(MAX_MIRRORS_PER_LEVEL, mirrorDepth + 1 - nextDepth); + } + numSubSlots *= 2; + + mirrorOffset += 2 + numSubSlots * (uint)mirrorIndex; + + task.addJob("RenderMirrorView" + std::to_string(mirrorIndex) + "Depth" + std::to_string(depth), cullFunctor, render::ItemKey::TAG_BITS_1, + render::ItemKey::TAG_BITS_1, (RenderViewTask::TransformOffset) mirrorOffset, nextDepth); + + task.addJob("DrawMirrorTask" + std::to_string(mirrorIndex) + "Depth" + std::to_string(depth), setupOutput, + render::RenderEngine::TS_MAIN_VIEW + transformOffset); +} void RenderSimulateTask::run(const render::RenderContextPointer& renderContext, const Inputs& inputs) { auto args = renderContext->args; diff --git a/libraries/render-utils/src/RenderCommonTask.h b/libraries/render-utils/src/RenderCommonTask.h index 5d6395aceb..41bf66cdba 100644 --- a/libraries/render-utils/src/RenderCommonTask.h +++ b/libraries/render-utils/src/RenderCommonTask.h @@ -18,6 +18,7 @@ #include "LightStage.h" #include "HazeStage.h" #include "LightingModel.h" +#include "DeferredFrameTransform.h" class BeginGPURangeTimer { public: @@ -66,19 +67,21 @@ protected: class DrawLayered3D { public: - using Inputs = render::VaryingSet4; + using Inputs = render::VaryingSet4; using Config = DrawLayered3DConfig; using JobModel = render::Job::ModelI; - DrawLayered3D(bool opaque); + DrawLayered3D(const render::ShapePlumberPointer& shapePlumber, bool opaque, bool jitter, uint transformSlot); void configure(const Config& config) { _maxDrawn = config.maxDrawn; } void run(const render::RenderContextPointer& renderContext, const Inputs& inputs); protected: - static render::ShapePlumberPointer _shapePlumber; + render::ShapePlumberPointer _shapePlumber; int _maxDrawn; // initialized by Config + uint _transformSlot; bool _opaquePass { true }; + bool _isJitterEnabled { false }; }; class Blit { @@ -159,13 +162,14 @@ protected: class RenderMirrorTask { public: - using Inputs = render::VaryingSet3; + using Inputs = render::VaryingSet2; using JobModel = render::Task::ModelI; RenderMirrorTask() {} - void build(JobModel& task, const render::Varying& inputs, render::Varying& output, size_t mirrorIndex, render::CullFunctor cullFunctor, size_t depth); + void build(JobModel& task, const render::Varying& inputs, render::Varying& output, size_t mirrorIndex, render::CullFunctor cullFunctor, uint transformOffset, size_t depth); + // NOTE: if these change, must also change Batch::MAX_TRANSFORM_SAVE_SLOT_COUNT static const size_t MAX_MIRROR_DEPTH { 3 }; static const size_t MAX_MIRRORS_PER_LEVEL { 3 }; }; diff --git a/libraries/render-utils/src/RenderDeferredTask.cpp b/libraries/render-utils/src/RenderDeferredTask.cpp index 261658039c..296fc22c40 100644 --- a/libraries/render-utils/src/RenderDeferredTask.cpp +++ b/libraries/render-utils/src/RenderDeferredTask.cpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -40,7 +41,6 @@ #include "DeferredFramebuffer.h" #include "DeferredLightingEffect.h" #include "SurfaceGeometryPass.h" -#include "VelocityBufferPass.h" #include "FramebufferCache.h" #include "TextureCache.h" #include "ZoneRenderer.h" @@ -61,6 +61,7 @@ using namespace render; extern void initDeferredPipelines(render::ShapePlumber& plumber, const render::ShapePipeline::BatchSetter& batchSetter, const render::ShapePipeline::ItemSetter& itemSetter); +extern void initForwardPipelines(render::ShapePlumber& plumber); namespace ru { using render_utils::slot::texture::Texture; @@ -75,25 +76,17 @@ namespace gr { class RenderDeferredTaskDebug { public: - using ExtraBuffers = render::VaryingSet6; + using ExtraBuffers = render::VaryingSet5; using Input = render::VaryingSet9; + LightingModel, Antialiasing::Outputs>; using JobModel = render::Task::ModelI; - RenderDeferredTaskDebug(); - - void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs); -private: + void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, uint mainViewTransformSlot); }; - -RenderDeferredTask::RenderDeferredTask() -{ -} - void RenderDeferredTask::configure(const Config& config) { // Propagate resolution scale to sub jobs who need it auto preparePrimaryBufferConfig = config.getConfig("PreparePrimaryBufferDeferred"); @@ -101,16 +94,18 @@ void RenderDeferredTask::configure(const Config& config) { preparePrimaryBufferConfig->setResolutionScale(config.resolutionScale); } -void RenderDeferredTask::build(JobModel& task, const render::Varying& input, render::Varying& output, render::CullFunctor cullFunctor, size_t depth) { +void RenderDeferredTask::build(JobModel& task, const render::Varying& input, render::Varying& output, render::CullFunctor cullFunctor, uint transformOffset, size_t depth) { // Prepare the ShapePipelines - static ShapePlumberPointer shapePlumber = std::make_shared(); - static std::once_flag once; - std::call_once(once, [] { - initDeferredPipelines(*shapePlumber, FadeEffect::getBatchSetter(), FadeEffect::getItemUniformSetter()); - }); + ShapePlumberPointer shapePlumberDeferred = std::make_shared(); + initDeferredPipelines(*shapePlumberDeferred, FadeEffect::getBatchSetter(), FadeEffect::getItemUniformSetter()); + ShapePlumberPointer shapePlumberForward = std::make_shared(); + initForwardPipelines(*shapePlumberForward); + + uint backgroundViewTransformSlot = render::RenderEngine::TS_BACKGROUND_VIEW + transformOffset; + uint mainViewTransformSlot = render::RenderEngine::TS_MAIN_VIEW + transformOffset; const auto& inputs = input.get(); - + // Separate the fetched items const auto& fetchedItems = inputs.get0(); @@ -146,13 +141,13 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren // Shadow Stage Frame const auto shadowFrame = shadowTaskOutputs[1]; - const auto jitter = task.addJob("JitterCam"); + const auto antialiasingMode = task.addJob("AntialiasingSetup"); // GPU jobs: Start preparing the primary, deferred and lighting buffer const auto scaledPrimaryFramebuffer = task.addJob("PreparePrimaryBufferDeferred"); // Prepare deferred, generate the shared Deferred Frame Transform. Only valid with the scaled frame buffer - const auto deferredFrameTransform = task.addJob("DeferredFrameTransform", jitter); + const auto deferredFrameTransform = task.addJob("DeferredFrameTransform", mainViewTransformSlot); const auto prepareDeferredInputs = PrepareDeferred::Inputs(scaledPrimaryFramebuffer, lightingModel).asVarying(); const auto prepareDeferredOutputs = task.addJob("PrepareDeferred", prepareDeferredInputs); @@ -169,13 +164,13 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren task.addJob("PrepareStencil", scaledPrimaryFramebuffer); // Render opaque objects in DeferredBuffer - const auto opaqueInputs = DrawStateSortDeferred::Inputs(opaques, lightingModel, jitter).asVarying(); - task.addJob("DrawOpaqueDeferred", opaqueInputs, shapePlumber); + const auto opaqueInputs = DrawStateSortDeferred::Inputs(opaques, lightingModel, deferredFrameTransform).asVarying(); + task.addJob("DrawOpaqueDeferred", opaqueInputs, shapePlumberDeferred, mainViewTransformSlot); if (depth < RenderMirrorTask::MAX_MIRROR_DEPTH) { - const auto mirrorInputs = RenderMirrorTask::Inputs(mirrors, mainTargetFramebuffer, jitter).asVarying(); + const auto mirrorInputs = RenderMirrorTask::Inputs(mirrors, mainTargetFramebuffer).asVarying(); for (size_t i = 0; i < RenderMirrorTask::MAX_MIRRORS_PER_LEVEL; i++) { - task.addJob("RenderMirrorTask" + std::to_string(i) + "Depth" + std::to_string(depth), mirrorInputs, i, cullFunctor, depth); + task.addJob("RenderMirrorTask" + std::to_string(i) + "Depth" + std::to_string(depth), mirrorInputs, i, cullFunctor, transformOffset, depth); } } @@ -203,11 +198,6 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren const auto ambientOcclusionFramebuffer = ambientOcclusionOutputs.getN(0); const auto ambientOcclusionUniforms = ambientOcclusionOutputs.getN(1); - // Velocity - const auto velocityBufferInputs = VelocityBufferPass::Inputs(deferredFrameTransform, deferredFramebuffer).asVarying(); - const auto velocityBufferOutputs = task.addJob("VelocityBuffer", velocityBufferInputs); - const auto velocityBuffer = velocityBufferOutputs.getN(0); - // Light Clustering // Create the cluster grid of lights, cpu job for now const auto lightClusteringPassInputs = LightClusteringPass::Input(deferredFrameTransform, lightingModel, lightFrame, linearDepthTarget).asVarying(); @@ -220,28 +210,28 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren // Similar to light stage, background stage has been filled by several potential render items and resolved for the frame in this job const auto backgroundInputs = DrawBackgroundStage::Inputs(lightingModel, backgroundFrame, hazeFrame).asVarying(); - task.addJob("DrawBackgroundDeferred", backgroundInputs); + task.addJob("DrawBackgroundDeferred", backgroundInputs, backgroundViewTransformSlot); const auto drawHazeInputs = render::Varying(DrawHaze::Inputs(hazeFrame, lightingFramebuffer, linearDepthTarget, deferredFrameTransform, lightingModel, lightFrame)); task.addJob("DrawHazeDeferred", drawHazeInputs, depth > 0); // Render transparent objects forward in LightingBuffer - const auto transparentsInputs = RenderTransparentDeferred::Inputs(transparents, hazeFrame, lightFrame, lightingModel, lightClusters, shadowFrame, jitter).asVarying(); - task.addJob("DrawTransparentDeferred", transparentsInputs, shapePlumber); + const auto transparentsInputs = RenderTransparentDeferred::Inputs(transparents, hazeFrame, lightFrame, lightingModel, lightClusters, shadowFrame, deferredFrameTransform).asVarying(); + task.addJob("DrawTransparentDeferred", transparentsInputs, shapePlumberDeferred, mainViewTransformSlot); // Highlight - const auto outlineInputs = DrawHighlightTask::Inputs(items, deferredFramebuffer, lightingFramebuffer, deferredFrameTransform, jitter).asVarying(); - task.addJob("DrawHighlight", outlineInputs); + const auto outlineInputs = DrawHighlightTask::Inputs(items, deferredFramebuffer, lightingFramebuffer, deferredFrameTransform).asVarying(); + task.addJob("DrawHighlight", outlineInputs, mainViewTransformSlot); // Layered Over (in front) - const auto inFrontOpaquesInputs = DrawLayered3D::Inputs(inFrontOpaque, lightingModel, hazeFrame, jitter).asVarying(); - const auto inFrontTransparentsInputs = DrawLayered3D::Inputs(inFrontTransparent, lightingModel, hazeFrame, jitter).asVarying(); - task.addJob("DrawInFrontOpaque", inFrontOpaquesInputs, true); - task.addJob("DrawInFrontTransparent", inFrontTransparentsInputs, false); + const auto inFrontOpaquesInputs = DrawLayered3D::Inputs(inFrontOpaque, deferredFrameTransform, lightingModel, hazeFrame).asVarying(); + task.addJob("DrawInFrontOpaque", inFrontOpaquesInputs, shapePlumberForward, true, true, mainViewTransformSlot); + const auto inFrontTransparentsInputs = DrawLayered3D::Inputs(inFrontTransparent, deferredFrameTransform, lightingModel, hazeFrame).asVarying(); + task.addJob("DrawInFrontTransparent", inFrontTransparentsInputs, shapePlumberForward, false, true, mainViewTransformSlot); // AA job before bloom to limit flickering - const auto antialiasingInputs = Antialiasing::Inputs(deferredFrameTransform, lightingFramebuffer, linearDepthTarget, velocityBuffer).asVarying(); - task.addJob("Antialiasing", antialiasingInputs); + const auto antialiasingInputs = Antialiasing::Inputs(deferredFrameTransform, deferredFramebuffer, linearDepthTarget, antialiasingMode).asVarying(); + const auto antialiasingIntensityTexture = task.addJob("Antialiasing", antialiasingInputs); // Add bloom const auto bloomInputs = BloomEffect::Inputs(deferredFrameTransform, lightingFramebuffer, bloomFrame, lightingModel).asVarying(); @@ -254,22 +244,19 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren const auto toneMappedBuffer = task.addJob("ToneMapping", toneMappingInputs); // Debugging task is happening in the "over" layer after tone mapping and just before HUD - if (depth == 0) { // Debug the bounds of the rendered items, still look at the zbuffer - const auto extraDebugBuffers = RenderDeferredTaskDebug::ExtraBuffers(linearDepthTarget, surfaceGeometryFramebuffer, ambientOcclusionFramebuffer, ambientOcclusionUniforms, scatteringResource, velocityBuffer); + if (depth == 0) { // Debug the bounds of the rendered items, still look at the zbuffer + const auto extraDebugBuffers = RenderDeferredTaskDebug::ExtraBuffers(linearDepthTarget, surfaceGeometryFramebuffer, ambientOcclusionFramebuffer, ambientOcclusionUniforms, scatteringResource); const auto debugInputs = RenderDeferredTaskDebug::Input(fetchedItems, shadowTaskOutputs, lightingStageInputs, lightClusters, prepareDeferredOutputs, extraDebugBuffers, - deferredFrameTransform, jitter, lightingModel).asVarying(); - task.addJob("DebugRenderDeferredTask", debugInputs); + deferredFrameTransform, lightingModel, antialiasingIntensityTexture).asVarying(); + task.addJob("DebugRenderDeferredTask", debugInputs, mainViewTransformSlot); } // HUD Layer - const auto renderHUDLayerInputs = RenderHUDLayerTask::Input(toneMappedBuffer, lightingModel, hudOpaque, hudTransparent, hazeFrame).asVarying(); - task.addJob("RenderHUDLayer", renderHUDLayerInputs); + const auto renderHUDLayerInputs = RenderHUDLayerTask::Input(toneMappedBuffer, lightingModel, hudOpaque, hudTransparent, hazeFrame, deferredFrameTransform).asVarying(); + task.addJob("RenderHUDLayer", renderHUDLayerInputs, shapePlumberForward, mainViewTransformSlot); } -RenderDeferredTaskDebug::RenderDeferredTaskDebug() { -} - -void RenderDeferredTaskDebug::build(JobModel& task, const render::Varying& input, render::Varying& outputs) { +void RenderDeferredTaskDebug::build(JobModel& task, const render::Varying& input, render::Varying& outputs, uint mainViewTransformSlot) { const auto& inputs = input.get(); @@ -291,14 +278,13 @@ void RenderDeferredTaskDebug::build(JobModel& task, const render::Varying& input // RenderShadowTask out const auto& shadowOut = inputs.get1(); - - const auto& renderShadowTaskOut = shadowOut[0]; - const auto& shadowFrame = shadowOut[1]; + const auto& renderShadowTaskOut = shadowOut[0]; + const auto& shadowFrame = shadowOut[1]; // Extract the Lighting Stages Current frame ( and zones) const auto lightingStageInputs = inputs.get2(); // Fetch the current frame stacks from all the stages - const auto stageCurrentFrames = lightingStageInputs.get0(); + const auto stageCurrentFrames = lightingStageInputs[0]; const auto lightFrame = stageCurrentFrames[0]; const auto backgroundFrame = stageCurrentFrames[1]; const auto hazeFrame = stageCurrentFrames[2]; @@ -321,33 +307,30 @@ void RenderDeferredTaskDebug::build(JobModel& task, const render::Varying& input const auto& ambientOcclusionFramebuffer = extraDeferredBuffer[2]; const auto& ambientOcclusionUniforms = extraDeferredBuffer[3]; const auto& scatteringResource = extraDeferredBuffer[4]; - const auto& velocityBuffer = extraDeferredBuffer[5]; // GenerateDeferredFrameTransform out const auto& deferredFrameTransform = inputs[6]; - // Jitter out - const auto& jitter = inputs[7]; - // Lighting Model out - const auto& lightingModel = inputs[8]; - + const auto& lightingModel = inputs[7]; + // Antialiasing out + const auto& antialiasingIntensityTexture = inputs[8]; // Light Cluster Grid Debuging job { const auto debugLightClustersInputs = DebugLightClusters::Inputs(deferredFrameTransform, lightingModel, linearDepthTarget, lightClusters).asVarying(); - task.addJob("DebugLightClusters", debugLightClustersInputs); + task.addJob("DebugLightClusters", debugLightClustersInputs, mainViewTransformSlot); } { // Debug the bounds of the rendered items, still look at the zbuffer - task.addJob("DrawMetaBounds", metas); - task.addJob("DrawOpaqueBounds", opaques); - task.addJob("DrawTransparentBounds", transparents); + task.addJob("DrawMetaBounds", metas, mainViewTransformSlot); + task.addJob("DrawOpaqueBounds", opaques, mainViewTransformSlot); + task.addJob("DrawTransparentBounds", transparents, mainViewTransformSlot); - task.addJob("DrawLightBounds", lights); - task.addJob("DrawZones", zones); + task.addJob("DrawLightBounds", lights, mainViewTransformSlot); + task.addJob("DrawZones", zones, mainViewTransformSlot); const auto frustums = task.addJob("ExtractFrustums", shadowFrame); const auto viewFrustum = frustums.getN(ExtractFrustums::VIEW_FRUSTUM); task.addJob("DrawViewFrustum", viewFrustum, glm::vec3(0.0f, 1.0f, 0.0f)); @@ -379,25 +362,25 @@ void RenderDeferredTaskDebug::build(JobModel& task, const render::Varying& input const auto selectedItems = task.addJob("TransparentSelection", selectItemInput, selectionBaseName); // Render.getConfig("RenderMainView.DrawSelectionBounds").enabled = true - task.addJob("DrawSelectionBounds", selectedItems); + task.addJob("DrawSelectionBounds", selectedItems, mainViewTransformSlot); } { // Debug the bounds of the layered objects, still look at the zbuffer - task.addJob("DrawInFrontOpaqueBounds", inFrontOpaque); - task.addJob("DrawInFrontTransparentBounds", inFrontTransparent); + task.addJob("DrawInFrontOpaqueBounds", inFrontOpaque, mainViewTransformSlot); + task.addJob("DrawInFrontTransparentBounds", inFrontTransparent, mainViewTransformSlot); } { // Debug the bounds of the layered objects, still look at the zbuffer - task.addJob("DrawHUDOpaqueBounds", hudOpaque); - task.addJob("DrawHUDTransparentBounds", hudTransparent); + task.addJob("DrawHUDOpaqueBounds", hudOpaque, mainViewTransformSlot); + task.addJob("DrawHUDTransparentBounds", hudTransparent, mainViewTransformSlot); } // Debugging stages { // Debugging Deferred buffer job - const auto debugFramebuffers = DebugDeferredBuffer::Inputs(deferredFramebuffer, linearDepthTarget, surfaceGeometryFramebuffer, ambientOcclusionFramebuffer, velocityBuffer, deferredFrameTransform, shadowFrame).asVarying(); - task.addJob("DebugDeferredBuffer", debugFramebuffers); + const auto debugFramebuffers = DebugDeferredBuffer::Inputs(deferredFramebuffer, linearDepthTarget, surfaceGeometryFramebuffer, ambientOcclusionFramebuffer, deferredFrameTransform, shadowFrame, antialiasingIntensityTexture).asVarying(); + task.addJob("DebugDeferredBuffer", debugFramebuffers, mainViewTransformSlot); const auto debugSubsurfaceScatteringInputs = DebugSubsurfaceScattering::Inputs(deferredFrameTransform, deferredFramebuffer, lightingModel, surfaceGeometryFramebuffer, ambientOcclusionFramebuffer, scatteringResource).asVarying(); @@ -408,8 +391,8 @@ void RenderDeferredTaskDebug::build(JobModel& task, const render::Varying& input // Scene Octree Debugging job { - task.addJob("DrawSceneOctree", spatialSelection); - task.addJob("DrawItemSelection", spatialSelection); + task.addJob("DrawSceneOctree", spatialSelection, mainViewTransformSlot); + task.addJob("DrawItemSelection", spatialSelection, mainViewTransformSlot); } // Status icon rendering job @@ -419,10 +402,10 @@ void RenderDeferredTaskDebug::build(JobModel& task, const render::Varying& input static std::once_flag once; std::call_once(once, [] { auto iconMapPath = PathUtils::resourcesPath() + "icons/statusIconAtlas.svg"; - statusIconMap = DependencyManager::get()->getImageTexture(iconMapPath, image::TextureUsage::STRICT_TEXTURE); + statusIconMap = + DependencyManager::get()->getImageTexture(iconMapPath, image::TextureUsage::STRICT_TEXTURE); }); - const auto drawStatusInputs = DrawStatus::Input(opaques, jitter).asVarying(); - task.addJob("DrawStatus", drawStatusInputs, DrawStatus(statusIconMap)); + task.addJob("DrawStatus", opaques, DrawStatus(statusIconMap, mainViewTransformSlot)); } const auto debugZoneInputs = DebugZoneLighting::Inputs(deferredFrameTransform, lightFrame, backgroundFrame).asVarying(); @@ -481,7 +464,7 @@ void RenderTransparentDeferred::run(const RenderContextPointer& renderContext, c const auto& lightingModel = inputs.get3(); const auto& lightClusters = inputs.get4(); // Not needed yet: const auto& shadowFrame = inputs.get5(); - const auto jitter = inputs.get6(); + const auto& deferredFrameTransform = inputs.get6(); auto deferredLightingEffect = DependencyManager::get(); RenderArgs* args = renderContext->args; @@ -493,18 +476,13 @@ void RenderTransparentDeferred::run(const RenderContextPointer& renderContext, c batch.setViewportTransform(args->_viewport); batch.setStateScissorRect(args->_viewport); - glm::mat4 projMat; - Transform viewMat; - args->getViewFrustum().evalProjectionMatrix(projMat); - args->getViewFrustum().evalViewTransform(viewMat); - - batch.setProjectionTransform(projMat); - batch.setProjectionJitter(jitter.x, jitter.y); - batch.setViewTransform(viewMat); + batch.setProjectionJitterEnabled(true); + batch.setSavedViewProjectionTransform(_transformSlot); // Setup lighting model for all items; batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer()); batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT()); + batch.setUniformBuffer(ru::Buffer::DeferredFrameTransform, deferredFrameTransform->getFrameTransformBuffer()); // Set the light deferredLightingEffect->setupKeyLightBatch(args, batch, *lightFrame); @@ -548,7 +526,7 @@ void DrawStateSortDeferred::run(const RenderContextPointer& renderContext, const const auto& inItems = inputs.get0(); const auto& lightingModel = inputs.get1(); - const auto jitter = inputs.get2(); + const auto deferredFrameTransform = inputs.get2(); RenderArgs* args = renderContext->args; @@ -559,18 +537,13 @@ void DrawStateSortDeferred::run(const RenderContextPointer& renderContext, const batch.setViewportTransform(args->_viewport); batch.setStateScissorRect(args->_viewport); - glm::mat4 projMat; - Transform viewMat; - args->getViewFrustum().evalProjectionMatrix(projMat); - args->getViewFrustum().evalViewTransform(viewMat); - - batch.setProjectionTransform(projMat); - batch.setProjectionJitter(jitter.x, jitter.y); - batch.setViewTransform(viewMat); + batch.setProjectionJitterEnabled(true); + batch.setSavedViewProjectionTransform(_transformSlot); // Setup lighting model for all items; batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer()); batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT()); + batch.setUniformBuffer(ru::Buffer::DeferredFrameTransform, deferredFrameTransform->getFrameTransformBuffer()); // From the lighting model define a global shapeKey ORED with individiual keys ShapeKey::Builder keyBuilder; diff --git a/libraries/render-utils/src/RenderDeferredTask.h b/libraries/render-utils/src/RenderDeferredTask.h index 5fc9580981..5ecca97306 100644 --- a/libraries/render-utils/src/RenderDeferredTask.h +++ b/libraries/render-utils/src/RenderDeferredTask.h @@ -3,7 +3,8 @@ // render-utils/src/ // // Created by Sam Gateau on 5/29/15. -// Copyright 20154 High Fidelity, Inc. +// Copyright 2016 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -43,18 +44,19 @@ protected: class RenderTransparentDeferred { public: - using Inputs = render::VaryingSet7; + using Inputs = render::VaryingSet7; using Config = RenderTransparentDeferredConfig; using JobModel = render::Job::ModelI; - RenderTransparentDeferred(render::ShapePlumberPointer shapePlumber) - : _shapePlumber{ shapePlumber } {} + RenderTransparentDeferred(render::ShapePlumberPointer shapePlumber, uint transformSlot) + : _shapePlumber(shapePlumber), _transformSlot(transformSlot) {} void configure(const Config& config) { _maxDrawn = config.maxDrawn; } void run(const render::RenderContextPointer& renderContext, const Inputs& inputs); protected: render::ShapePlumberPointer _shapePlumber; + uint _transformSlot; int _maxDrawn; // initialized by Config }; @@ -83,13 +85,13 @@ protected: class DrawStateSortDeferred { public: - using Inputs = render::VaryingSet3; + using Inputs = render::VaryingSet3; using Config = DrawStateSortConfig; using JobModel = render::Job::ModelI; - DrawStateSortDeferred(render::ShapePlumberPointer shapePlumber) - : _shapePlumber{ shapePlumber } { + DrawStateSortDeferred(render::ShapePlumberPointer shapePlumber, uint transformSlot) + : _shapePlumber(shapePlumber), _transformSlot(transformSlot) { } void configure(const Config& config) { @@ -100,6 +102,7 @@ public: protected: render::ShapePlumberPointer _shapePlumber; + uint _transformSlot; int _maxDrawn; // initialized by Config bool _stateSort; }; @@ -141,12 +144,8 @@ public: using Config = RenderDeferredTaskConfig; using JobModel = render::Task::ModelI; - RenderDeferredTask(); - void configure(const Config& config); - void build(JobModel& task, const render::Varying& input, render::Varying& output, render::CullFunctor cullFunctor, size_t depth); - -private: + void build(JobModel& task, const render::Varying& input, render::Varying& output, render::CullFunctor cullFunctor, uint transformOffset, size_t depth); }; diff --git a/libraries/render-utils/src/RenderForwardTask.cpp b/libraries/render-utils/src/RenderForwardTask.cpp index 85ea0facbe..40581aaea1 100644 --- a/libraries/render-utils/src/RenderForwardTask.cpp +++ b/libraries/render-utils/src/RenderForwardTask.cpp @@ -68,7 +68,7 @@ void RenderForwardTask::configure(const Config& config) { preparePrimaryBufferConfig->setResolutionScale(config.resolutionScale); } -void RenderForwardTask::build(JobModel& task, const render::Varying& input, render::Varying& output, render::CullFunctor cullFunctor, size_t depth) { +void RenderForwardTask::build(JobModel& task, const render::Varying& input, render::Varying& output, render::CullFunctor cullFunctor, uint transformOffset, size_t depth) { task.addJob("SetRenderMethodTask", render::Args::FORWARD); // Prepare the ShapePipelines @@ -78,6 +78,9 @@ void RenderForwardTask::build(JobModel& task, const render::Varying& input, rend initForwardPipelines(*shapePlumber); }); + uint backgroundViewTransformSlot = render::RenderEngine::TS_BACKGROUND_VIEW + transformOffset; + uint mainViewTransformSlot = render::RenderEngine::TS_MAIN_VIEW + transformOffset; + // Unpack inputs const auto& inputs = input.get(); @@ -115,7 +118,7 @@ void RenderForwardTask::build(JobModel& task, const render::Varying& input, rend const auto scaledPrimaryFramebuffer = task.addJob("PreparePrimaryBufferForward"); // Prepare deferred, generate the shared Deferred Frame Transform. Only valid with the scaled frame buffer - const auto deferredFrameTransform = task.addJob("DeferredFrameTransform"); + const auto deferredFrameTransform = task.addJob("DeferredFrameTransform", mainViewTransformSlot); // Prepare Forward Framebuffer pass const auto prepareForwardInputs = PrepareForward::Inputs(scaledPrimaryFramebuffer, lightFrame).asVarying(); @@ -131,38 +134,37 @@ void RenderForwardTask::build(JobModel& task, const render::Varying& input, rend // Draw opaques forward const auto opaqueInputs = DrawForward::Inputs(opaques, lightingModel, hazeFrame).asVarying(); - task.addJob("DrawOpaques", opaqueInputs, shapePlumber, true); + task.addJob("DrawOpaques", opaqueInputs, shapePlumber, true, mainViewTransformSlot); - const auto nullJitter = Varying(glm::vec2(0.0f, 0.0f)); #ifndef Q_OS_ANDROID if (depth < RenderMirrorTask::MAX_MIRROR_DEPTH) { - const auto mirrorInputs = RenderMirrorTask::Inputs(mirrors, scaledPrimaryFramebuffer, nullJitter).asVarying(); + const auto mirrorInputs = RenderMirrorTask::Inputs(mirrors, scaledPrimaryFramebuffer).asVarying(); for (size_t i = 0; i < RenderMirrorTask::MAX_MIRRORS_PER_LEVEL; i++) { - task.addJob("RenderMirrorTask" + std::to_string(i) + "Depth" + std::to_string(depth), mirrorInputs, i, cullFunctor, depth); + task.addJob("RenderMirrorTask" + std::to_string(i) + "Depth" + std::to_string(depth), mirrorInputs, i, cullFunctor, transformOffset, depth); } } #endif // Similar to light stage, background stage has been filled by several potential render items and resolved for the frame in this job const auto backgroundInputs = DrawBackgroundStage::Inputs(lightingModel, backgroundFrame, hazeFrame).asVarying(); - task.addJob("DrawBackgroundForward", backgroundInputs); + task.addJob("DrawBackgroundForward", backgroundInputs, backgroundViewTransformSlot); // Draw transparent objects forward const auto transparentInputs = DrawForward::Inputs(transparents, lightingModel, hazeFrame).asVarying(); - task.addJob("DrawTransparents", transparentInputs, shapePlumber, false); + task.addJob("DrawTransparents", transparentInputs, shapePlumber, false, mainViewTransformSlot); // Layered - const auto inFrontOpaquesInputs = DrawLayered3D::Inputs(inFrontOpaque, lightingModel, hazeFrame, nullJitter).asVarying(); - const auto inFrontTransparentsInputs = DrawLayered3D::Inputs(inFrontTransparent, lightingModel, hazeFrame, nullJitter).asVarying(); - task.addJob("DrawInFrontOpaque", inFrontOpaquesInputs, true); - task.addJob("DrawInFrontTransparent", inFrontTransparentsInputs, false); + const auto inFrontOpaquesInputs = DrawLayered3D::Inputs(inFrontOpaque, deferredFrameTransform, lightingModel, hazeFrame).asVarying(); + const auto inFrontTransparentsInputs = DrawLayered3D::Inputs(inFrontTransparent, deferredFrameTransform, lightingModel, hazeFrame).asVarying(); + task.addJob("DrawInFrontOpaque", inFrontOpaquesInputs, shapePlumber, true, false, mainViewTransformSlot); + task.addJob("DrawInFrontTransparent", inFrontTransparentsInputs, shapePlumber, false, false, mainViewTransformSlot); if (depth == 0) { // Debug the bounds of the rendered items, still look at the zbuffer - task.addJob("DrawMetaBounds", metas); - task.addJob("DrawBounds", opaques); - task.addJob("DrawTransparentBounds", transparents); + task.addJob("DrawMetaBounds", metas, mainViewTransformSlot); + task.addJob("DrawBounds", opaques, mainViewTransformSlot); + task.addJob("DrawTransparentBounds", transparents, mainViewTransformSlot); - task.addJob("DrawZones", zones); + task.addJob("DrawZones", zones, mainViewTransformSlot); const auto debugZoneInputs = DebugZoneLighting::Inputs(deferredFrameTransform, lightFrame, backgroundFrame).asVarying(); task.addJob("DrawZoneStack", debugZoneInputs); } @@ -177,8 +179,8 @@ void RenderForwardTask::build(JobModel& task, const render::Varying& input, rend const auto toneMappingInputs = ToneMapAndResample::Input(resolvedFramebuffer, destFramebuffer, tonemappingFrame).asVarying(); const auto toneMappedBuffer = task.addJob("ToneMapping", toneMappingInputs); // HUD Layer - const auto renderHUDLayerInputs = RenderHUDLayerTask::Input(toneMappedBuffer, lightingModel, hudOpaque, hudTransparent, hazeFrame).asVarying(); - task.addJob("RenderHUDLayer", renderHUDLayerInputs); + const auto renderHUDLayerInputs = RenderHUDLayerTask::Input(toneMappedBuffer, lightingModel, hudOpaque, hudTransparent, hazeFrame, deferredFrameTransform).asVarying(); + task.addJob("RenderHUDLayer", renderHUDLayerInputs, shapePlumber, mainViewTransformSlot); } gpu::FramebufferPointer PreparePrimaryFramebufferMSAA::createFramebuffer(const char* name, const glm::uvec2& frameSize, int numSamples) { @@ -280,12 +282,7 @@ void DrawForward::run(const RenderContextPointer& renderContext, const Inputs& i args->_batch = &batch; // Setup projection - glm::mat4 projMat; - Transform viewMat; - args->getViewFrustum().evalProjectionMatrix(projMat); - args->getViewFrustum().evalViewTransform(viewMat); - batch.setProjectionTransform(projMat); - batch.setViewTransform(viewMat); + batch.setSavedViewProjectionTransform(_transformSlot); batch.setModelTransform(Transform()); // Setup lighting model for all items; diff --git a/libraries/render-utils/src/RenderForwardTask.h b/libraries/render-utils/src/RenderForwardTask.h index de3a6dd205..ef7b6f5a92 100644 --- a/libraries/render-utils/src/RenderForwardTask.h +++ b/libraries/render-utils/src/RenderForwardTask.h @@ -4,6 +4,7 @@ // // Created by Zach Pomerantz on 12/13/2016. // Copyright 2016 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -36,7 +37,7 @@ public: RenderForwardTask() {} void configure(const Config& config); - void build(JobModel& task, const render::Varying& input, render::Varying& output, render::CullFunctor cullFunctor, size_t depth); + void build(JobModel& task, const render::Varying& input, render::Varying& output, render::CullFunctor cullFunctor, uint transformOffset, size_t depth); }; @@ -93,13 +94,15 @@ public: using Inputs = render::VaryingSet3; using JobModel = render::Job::ModelI; - DrawForward(const render::ShapePlumberPointer& shapePlumber, bool opaquePass) : _shapePlumber(shapePlumber), _opaquePass(opaquePass) {} + DrawForward(const render::ShapePlumberPointer& shapePlumber, bool opaquePass, uint transformSlot) : + _shapePlumber(shapePlumber), _opaquePass(opaquePass), _transformSlot(transformSlot) {} void run(const render::RenderContextPointer& renderContext, const Inputs& inputs); private: render::ShapePlumberPointer _shapePlumber; bool _opaquePass; + uint _transformSlot; }; #endif // hifi_RenderForwardTask_h diff --git a/libraries/render-utils/src/RenderHUDLayerTask.cpp b/libraries/render-utils/src/RenderHUDLayerTask.cpp index 8fee3d57bc..93736ada1c 100644 --- a/libraries/render-utils/src/RenderHUDLayerTask.cpp +++ b/libraries/render-utils/src/RenderHUDLayerTask.cpp @@ -1,6 +1,7 @@ // // Created by Sam Gateau on 2019/06/14 // Copyright 2013-2019 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -24,12 +25,8 @@ void CompositeHUD::run(const RenderContextPointer& renderContext, const gpu::Fra // Grab the HUD texture #if !defined(DISABLE_QML) gpu::doInBatch("CompositeHUD", renderContext->args->_context, [&](gpu::Batch& batch) { - glm::mat4 projMat; - Transform viewMat; - renderContext->args->getViewFrustum().evalProjectionMatrix(projMat); - renderContext->args->getViewFrustum().evalViewTransform(viewMat); - batch.setProjectionTransform(projMat); - batch.setViewTransform(viewMat, true); + PROFILE_RANGE_BATCH(batch, "HUD"); + batch.setSavedViewProjectionTransform(_transformSlot); if (inputs) { batch.setFramebuffer(inputs); } @@ -40,7 +37,8 @@ void CompositeHUD::run(const RenderContextPointer& renderContext, const gpu::Fra #endif } -void RenderHUDLayerTask::build(JobModel& task, const render::Varying& input, render::Varying& output) { +void RenderHUDLayerTask::build(JobModel& task, const render::Varying& input, render::Varying& output, + render::ShapePlumberPointer shapePlumber, uint transformSlot) { const auto& inputs = input.get(); const auto& primaryFramebuffer = inputs[0]; @@ -48,14 +46,15 @@ void RenderHUDLayerTask::build(JobModel& task, const render::Varying& input, ren const auto& hudOpaque = inputs[2]; const auto& hudTransparent = inputs[3]; const auto& hazeFrame = inputs[4]; + const auto& deferredFrameTransform = inputs[5]; // Composite the HUD and HUD overlays - task.addJob("HUD", primaryFramebuffer); + task.addJob("HUD", primaryFramebuffer, transformSlot); // And HUD Layer objects const auto nullJitter = Varying(glm::vec2(0.0f, 0.0f)); - const auto hudOpaquesInputs = DrawLayered3D::Inputs(hudOpaque, lightingModel, hazeFrame, nullJitter).asVarying(); - const auto hudTransparentsInputs = DrawLayered3D::Inputs(hudTransparent, lightingModel, hazeFrame, nullJitter).asVarying(); - task.addJob("DrawHUDOpaque", hudOpaquesInputs, true); - task.addJob("DrawHUDTransparent", hudTransparentsInputs, false); + const auto hudOpaquesInputs = DrawLayered3D::Inputs(hudOpaque, deferredFrameTransform, lightingModel, hazeFrame).asVarying(); + const auto hudTransparentsInputs = DrawLayered3D::Inputs(hudTransparent, deferredFrameTransform, lightingModel, hazeFrame).asVarying(); + task.addJob("DrawHUDOpaque", hudOpaquesInputs, shapePlumber, true, false, transformSlot); + task.addJob("DrawHUDTransparent", hudTransparentsInputs, shapePlumber, false, false, transformSlot); } diff --git a/libraries/render-utils/src/RenderHUDLayerTask.h b/libraries/render-utils/src/RenderHUDLayerTask.h index c30b0498a8..de7bb63670 100644 --- a/libraries/render-utils/src/RenderHUDLayerTask.h +++ b/libraries/render-utils/src/RenderHUDLayerTask.h @@ -1,6 +1,7 @@ // // Created by Sam Gateau on 2019/06/14 // Copyright 2013-2019 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -11,6 +12,7 @@ #include "LightingModel.h" #include "HazeStage.h" +#include "DeferredFrameTransform.h" class CompositeHUD { public: @@ -19,16 +21,21 @@ public: //using Inputs = gpu::FramebufferPointer; using JobModel = render::Job::ModelI; + CompositeHUD(uint transformSlot) : _transformSlot(transformSlot) {} + void run(const render::RenderContextPointer& renderContext, const gpu::FramebufferPointer& inputs); + +private: + uint _transformSlot; }; class RenderHUDLayerTask { public: // Framebuffer where to draw, lighting model, opaque items, transparent items - using Input = render::VaryingSet5; + using Input = render::VaryingSet6; using JobModel = render::Task::ModelI; - void build(JobModel& task, const render::Varying& input, render::Varying& output); + void build(JobModel& task, const render::Varying& input, render::Varying& output, render::ShapePlumberPointer shapePlumber, uint transformSlot); }; #endif // hifi_RenderHUDLayerTask_h \ No newline at end of file diff --git a/libraries/render-utils/src/RenderPipelines.cpp b/libraries/render-utils/src/RenderPipelines.cpp index e7253f6adc..f491d127c9 100644 --- a/libraries/render-utils/src/RenderPipelines.cpp +++ b/libraries/render-utils/src/RenderPipelines.cpp @@ -311,7 +311,7 @@ void addPlumberPipeline(ShapePlumber& plumber, bool isWireframed = (i & 2); for (int cullFaceMode = graphics::MaterialKey::CullFaceMode::CULL_NONE; cullFaceMode < graphics::MaterialKey::CullFaceMode::NUM_CULL_FACE_MODES; cullFaceMode++) { auto state = std::make_shared(*baseState); - key.isTranslucent() ? PrepareStencil::testMask(*state) : PrepareStencil::testMaskDrawShape(*state); + key.isTranslucent() ? PrepareStencil::testMaskResetNoAA(*state) : PrepareStencil::testMaskDrawShape(*state); // Depth test depends on transparency state->setDepthTest(true, !key.isTranslucent(), gpu::LESS_EQUAL); diff --git a/libraries/render-utils/src/RenderViewTask.cpp b/libraries/render-utils/src/RenderViewTask.cpp index 0748006fb6..78afce5ced 100644 --- a/libraries/render-utils/src/RenderViewTask.cpp +++ b/libraries/render-utils/src/RenderViewTask.cpp @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 5/25/2017. // Copyright 2017 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -16,7 +17,8 @@ #include "RenderDeferredTask.h" #include "RenderForwardTask.h" -void RenderShadowsAndDeferredTask::build(JobModel& task, const render::Varying& input, render::Varying& output, render::CullFunctor cullFunctor, uint8_t tagBits, uint8_t tagMask, size_t depth) { +void RenderShadowsAndDeferredTask::build(JobModel& task, const render::Varying& input, render::Varying& output, render::CullFunctor cullFunctor, uint8_t tagBits, + uint8_t tagMask, uint8_t transformOffset, size_t depth) { task.addJob("SetRenderMethodTask", render::Args::DEFERRED); const auto items = input.getN(0); @@ -29,16 +31,18 @@ void RenderShadowsAndDeferredTask::build(JobModel& task, const render::Varying& const auto shadowTaskOut = task.addJob("RenderShadowTask", shadowTaskIn, cullFunctor, tagBits, tagMask); const auto renderDeferredInput = RenderDeferredTask::Input(items, lightingModel, lightingStageFramesAndZones, shadowTaskOut).asVarying(); - task.addJob("RenderDeferredTask", renderDeferredInput, cullFunctor, depth); + task.addJob("RenderDeferredTask", renderDeferredInput, cullFunctor, transformOffset, depth); } -void DeferredForwardSwitchJob::build(JobModel& task, const render::Varying& input, render::Varying& output, render::CullFunctor cullFunctor, uint8_t tagBits, uint8_t tagMask, size_t depth) { - task.addBranch("RenderShadowsAndDeferredTask", 0, input, cullFunctor, tagBits, tagMask, depth); +void DeferredForwardSwitchJob::build(JobModel& task, const render::Varying& input, render::Varying& output, render::CullFunctor cullFunctor, uint8_t tagBits, + uint8_t tagMask, uint8_t transformOffset, size_t depth) { + task.addBranch("RenderShadowsAndDeferredTask", 0, input, cullFunctor, tagBits, tagMask, transformOffset, depth); - task.addBranch("RenderForwardTask", 1, input, cullFunctor, depth); + task.addBranch("RenderForwardTask", 1, input, cullFunctor, transformOffset, depth); } -void RenderViewTask::build(JobModel& task, const render::Varying& input, render::Varying& output, render::CullFunctor cullFunctor, uint8_t tagBits, uint8_t tagMask, size_t depth) { +void RenderViewTask::build(JobModel& task, const render::Varying& input, render::Varying& output, render::CullFunctor cullFunctor, uint8_t tagBits, uint8_t tagMask, + TransformOffset transformOffset, size_t depth) { const auto items = task.addJob("FetchCullSort", cullFunctor, tagBits, tagMask); if (depth == 0 && tagBits == render::ItemKey::TAG_BITS_0) { @@ -56,9 +60,9 @@ void RenderViewTask::build(JobModel& task, const render::Varying& input, render: #ifndef Q_OS_ANDROID const auto deferredForwardIn = DeferredForwardSwitchJob::Input(items, lightingModel, lightingStageFramesAndZones).asVarying(); - task.addJob("DeferredForwardSwitch", deferredForwardIn, cullFunctor, tagBits, tagMask, depth); + task.addJob("DeferredForwardSwitch", deferredForwardIn, cullFunctor, tagBits, tagMask, transformOffset, depth); #else const auto renderInput = RenderForwardTask::Input(items, lightingModel, lightingStageFramesAndZones).asVarying(); - task.addJob("RenderForwardTask", renderInput); + task.addJob("RenderForwardTask", renderInput, cullFunctor, transformOffset, depth); #endif } diff --git a/libraries/render-utils/src/RenderViewTask.h b/libraries/render-utils/src/RenderViewTask.h index 139d00125e..956af8d3df 100644 --- a/libraries/render-utils/src/RenderViewTask.h +++ b/libraries/render-utils/src/RenderViewTask.h @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 5/25/2017. // Copyright 2017 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -24,8 +25,8 @@ public: RenderShadowsAndDeferredTask() {} - void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor, uint8_t tagBits, uint8_t tagMask, size_t depth); - + void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor, + uint8_t tagBits, uint8_t tagMask, uint8_t transformOffset, size_t depth); }; class DeferredForwardSwitchJob { @@ -36,8 +37,8 @@ public: DeferredForwardSwitchJob() {} void configure(const render::SwitchConfig& config) {} - void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor, uint8_t tagBits, uint8_t tagMask, size_t depth); - + void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor, + uint8_t tagBits, uint8_t tagMask, uint8_t transformOffset, size_t depth); }; class RenderViewTask { @@ -47,8 +48,15 @@ public: RenderViewTask() {} - void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor, uint8_t tagBits = 0x00, uint8_t tagMask = 0x00, size_t depth = 0); + // each view uses 1 transform for the main view, and one for the background, so these need to be increments of 2 + enum TransformOffset: uint8_t { + MAIN_VIEW = 0, + SECONDARY_VIEW = 2, + FIRST_MIRROR_VIEW = 4 + }; + void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor, + uint8_t tagBits = 0x00, uint8_t tagMask = 0x00, TransformOffset transformOffset = TransformOffset::MAIN_VIEW, size_t depth = 0); }; diff --git a/libraries/render-utils/src/StencilMaskPass.cpp b/libraries/render-utils/src/StencilMaskPass.cpp index 59dbd1c0dc..7329168d72 100644 --- a/libraries/render-utils/src/StencilMaskPass.cpp +++ b/libraries/render-utils/src/StencilMaskPass.cpp @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 5/31/17. // Copyright 2016 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -126,6 +127,12 @@ void PrepareStencil::testMask(gpu::State& state) { gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP)); } +// Pass if this area has NOT been marked as MASK or anything containing MASK and reset NO_AA if it passes +void PrepareStencil::testMaskResetNoAA(gpu::State& state) { + state.setStencilTest(true, STENCIL_NO_AA, gpu::State::StencilTest(STENCIL_MASK, STENCIL_MASK, gpu::NOT_EQUAL, + gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_REPLACE)); +} + // Pass if this area has NOT been marked as NO_AA or anything containing NO_AA void PrepareStencil::testNoAA(gpu::State& state) { state.setStencilTest(true, 0x00, gpu::State::StencilTest(STENCIL_NO_AA, STENCIL_NO_AA, gpu::NOT_EQUAL, diff --git a/libraries/render-utils/src/StencilMaskPass.h b/libraries/render-utils/src/StencilMaskPass.h index ed1487d4b0..d0dec8de9e 100644 --- a/libraries/render-utils/src/StencilMaskPass.h +++ b/libraries/render-utils/src/StencilMaskPass.h @@ -3,7 +3,8 @@ // render-utils/src/ // // Created by Sam Gateau on 5/31/17. -// Copyright 20154 High Fidelity, Inc. +// Copyright 2016 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -52,6 +53,7 @@ public: static void drawMask(gpu::State& state); static void drawBackground(gpu::State& state); static void testMask(gpu::State& state); + static void testMaskResetNoAA(gpu::State& state); static void testNoAA(gpu::State& state); static void testBackground(gpu::State& state); static void testShape(gpu::State& state); diff --git a/libraries/render-utils/src/VelocityBufferPass.cpp b/libraries/render-utils/src/VelocityBufferPass.cpp deleted file mode 100644 index 36735a7832..0000000000 --- a/libraries/render-utils/src/VelocityBufferPass.cpp +++ /dev/null @@ -1,164 +0,0 @@ -// -// VelocityBufferPass.cpp -// libraries/render-utils/src/ -// -// Created by Sam Gateau 8/15/2017. -// Copyright 2017 High Fidelity, Inc. -// -// Distributed under the Apache License, Version 2.0. -// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html -// -#include "VelocityBufferPass.h" - -#include - -#include -#include - -#include "StencilMaskPass.h" -#include "render-utils/ShaderConstants.h" - -namespace ru { - using render_utils::slot::texture::Texture; - using render_utils::slot::buffer::Buffer; -} - -VelocityFramebuffer::VelocityFramebuffer() { -} - - -void VelocityFramebuffer::updatePrimaryDepth(const gpu::TexturePointer& depthBuffer) { - //If the depth buffer or size changed, we need to delete our FBOs - bool reset = false; - if ((_primaryDepthTexture != depthBuffer)) { - _primaryDepthTexture = depthBuffer; - reset = true; - } - if (_primaryDepthTexture) { - auto newFrameSize = glm::ivec2(_primaryDepthTexture->getDimensions()); - if (_frameSize != newFrameSize) { - _frameSize = newFrameSize; - _halfFrameSize = newFrameSize >> 1; - - reset = true; - } - } - - if (reset) { - clear(); - } -} - -void VelocityFramebuffer::clear() { - _velocityFramebuffer.reset(); - _velocityTexture.reset(); -} - -void VelocityFramebuffer::allocate() { - - auto width = _frameSize.x; - auto height = _frameSize.y; - - // For Velocity Buffer: - _velocityTexture = gpu::Texture::createRenderBuffer(gpu::Element(gpu::VEC2, gpu::HALF, gpu::RGB), width, height, gpu::Texture::SINGLE_MIP, - gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR)); - _velocityFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("velocity")); - _velocityFramebuffer->setRenderBuffer(0, _velocityTexture); - _velocityFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, _primaryDepthTexture->getTexelFormat()); -} - -gpu::FramebufferPointer VelocityFramebuffer::getVelocityFramebuffer() { - if (!_velocityFramebuffer) { - allocate(); - } - return _velocityFramebuffer; -} - -gpu::TexturePointer VelocityFramebuffer::getVelocityTexture() { - if (!_velocityTexture) { - allocate(); - } - return _velocityTexture; -} - -gpu::PipelinePointer VelocityBufferPass::_cameraMotionPipeline; - -void VelocityBufferPass::configure(const Config& config) { -} - -void VelocityBufferPass::run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs) { - assert(renderContext->args); - assert(renderContext->args->hasViewFrustum()); - - RenderArgs* args = renderContext->args; - - const auto& frameTransform = inputs.get0(); - const auto& deferredFramebuffer = inputs.get1(); - - if (!_gpuTimer) { - _gpuTimer = std::make_shared < gpu::RangeTimer>(__FUNCTION__); - } - - if (!_velocityFramebuffer) { - _velocityFramebuffer = std::make_shared(); - } - _velocityFramebuffer->updatePrimaryDepth(deferredFramebuffer->getPrimaryDepthTexture()); - - auto depthBuffer = deferredFramebuffer->getPrimaryDepthTexture(); - - auto velocityFBO = _velocityFramebuffer->getVelocityFramebuffer(); - auto velocityTexture = _velocityFramebuffer->getVelocityTexture(); - - outputs.edit0() = _velocityFramebuffer; - outputs.edit1() = velocityFBO; - outputs.edit2() = velocityTexture; - - auto cameraMotionPipeline = getCameraMotionPipeline(); - - auto fullViewport = args->_viewport; - - gpu::doInBatch("VelocityBufferPass::run", args->_context, [=](gpu::Batch& batch) { - _gpuTimer->begin(batch); - batch.enableStereo(false); - - batch.setViewportTransform(fullViewport); - batch.setProjectionTransform(glm::mat4()); - batch.resetViewTransform(); - batch.setModelTransform(gpu::Framebuffer::evalSubregionTexcoordTransform(_velocityFramebuffer->getDepthFrameSize(), fullViewport)); - - batch.setUniformBuffer(ru::Buffer::DeferredFrameTransform, frameTransform->getFrameTransformBuffer()); - - // Velocity buffer camera motion - batch.setFramebuffer(velocityFBO); - batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, glm::vec4(0.0f, 0.0f, 0.0f, 0.0f)); - batch.setPipeline(cameraMotionPipeline); - batch.setResourceTexture(ru::Texture::TaaDepth, depthBuffer); - batch.draw(gpu::TRIANGLE_STRIP, 4); - - _gpuTimer->end(batch); - }); - - auto config = std::static_pointer_cast(renderContext->jobConfig); - config->setGPUBatchRunTime(_gpuTimer->getGPUAverage(), _gpuTimer->getBatchAverage()); -} - - -const gpu::PipelinePointer& VelocityBufferPass::getCameraMotionPipeline() { - if (!_cameraMotionPipeline) { - gpu::ShaderPointer program = gpu::Shader::createProgram(shader::render_utils::program::velocityBuffer_cameraMotion); - gpu::StatePointer state = std::make_shared(); - - // Stencil test the curvature pass for objects pixels only, not the background - // PrepareStencil::testShape(*state); - - state->setColorWriteMask(true, true, false, false); - - // Good to go add the brand new pipeline - _cameraMotionPipeline = gpu::Pipeline::create(program, state); - } - - return _cameraMotionPipeline; -} - - - diff --git a/libraries/render-utils/src/VelocityBufferPass.h b/libraries/render-utils/src/VelocityBufferPass.h deleted file mode 100644 index a94b772ec8..0000000000 --- a/libraries/render-utils/src/VelocityBufferPass.h +++ /dev/null @@ -1,89 +0,0 @@ -// -// VelocityBufferPass.h -// libraries/render-utils/src/ -// -// Created by Sam Gateau 8/15/2017. -// Copyright 2017 High Fidelity, Inc. -// -// Distributed under the Apache License, Version 2.0. -// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html -// - -#ifndef hifi_VelocityBufferPass_h -#define hifi_VelocityBufferPass_h - -#include "SurfaceGeometryPass.h" - - -// VelocityFramebuffer is a helper class gathering in one place theframebuffers and targets describing the surface geometry linear depth -// from a z buffer -class VelocityFramebuffer { -public: - VelocityFramebuffer(); - - gpu::FramebufferPointer getVelocityFramebuffer(); - gpu::TexturePointer getVelocityTexture(); - - // Update the depth buffer which will drive the allocation of all the other resources according to its size. - void updatePrimaryDepth(const gpu::TexturePointer& depthBuffer); - - gpu::TexturePointer getPrimaryDepthTexture(); - const glm::ivec2& getDepthFrameSize() const { return _frameSize; } - - void setResolutionLevel(int level); - int getResolutionLevel() const { return _resolutionLevel; } - -protected: - void clear(); - void allocate(); - - gpu::TexturePointer _primaryDepthTexture; - - gpu::FramebufferPointer _velocityFramebuffer; - gpu::TexturePointer _velocityTexture; - - glm::ivec2 _frameSize; - glm::ivec2 _halfFrameSize; - int _resolutionLevel{ 0 }; -}; - -using VelocityFramebufferPointer = std::shared_ptr; - -class VelocityBufferPassConfig : public render::GPUJobConfig { - Q_OBJECT - Q_PROPERTY(float depthThreshold MEMBER depthThreshold NOTIFY dirty) - -public: - VelocityBufferPassConfig() : render::GPUJobConfig(true) {} - - float depthThreshold{ 5.0f }; - -signals: - void dirty(); -}; - -class VelocityBufferPass { -public: - using Inputs = render::VaryingSet2; - using Outputs = render::VaryingSet3; - using Config = VelocityBufferPassConfig; - using JobModel = render::Job::ModelIO; - - VelocityBufferPass() {} - - void configure(const Config& config); - void run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs); - -private: - typedef gpu::BufferView UniformBufferView; - - VelocityFramebufferPointer _velocityFramebuffer; - - static const gpu::PipelinePointer& getCameraMotionPipeline(); - static gpu::PipelinePointer _cameraMotionPipeline; - - gpu::RangeTimerPointer _gpuTimer; -}; - - -#endif // hifi_VelocityBufferPass_h diff --git a/libraries/render-utils/src/VelocityWrite.slh b/libraries/render-utils/src/VelocityWrite.slh new file mode 100644 index 0000000000..79f52bb687 --- /dev/null +++ b/libraries/render-utils/src/VelocityWrite.slh @@ -0,0 +1,34 @@ + +<@if not VELOCITY_WRITE_SLH@> +<@def VELOCITY_WRITE_SLH@> + +<@include gpu/Transform.slh@> +<$declareStandardCameraTransform()$> + +vec2 getEyeTexcoordPos() { + // No need to add 0.5 as, by default, frag coords are pixel centered at (0.5, 0.5) + vec2 texCoordPos = gl_FragCoord.xy; + texCoordPos *= cam_getInvWidthHeight(); + texCoordPos.x -= cam_getStereoSide(); + return texCoordPos; +} + +vec2 packVelocity(vec4 prevPositionCS) { + vec2 uv = getEyeTexcoordPos(); + vec2 prevUV = (prevPositionCS.xy / prevPositionCS.w) * 0.5 + 0.5; + vec2 deltaUV = uv - prevUV; + // Velocity should be computed without any jitter inside. + return deltaUV; +} + +<@endif@> diff --git a/libraries/render-utils/src/ZoneRenderer.cpp b/libraries/render-utils/src/ZoneRenderer.cpp index 5d958d8a84..762de84ba2 100644 --- a/libraries/render-utils/src/ZoneRenderer.cpp +++ b/libraries/render-utils/src/ZoneRenderer.cpp @@ -103,7 +103,7 @@ const gpu::PipelinePointer& DebugZoneLighting::getKeyLightPipeline() { gpu::ShaderPointer program = gpu::Shader::createProgram(shader::render_utils::program::zone_drawKeyLight); gpu::StatePointer state = std::make_shared(); - PrepareStencil::testMask(*state); + PrepareStencil::testMaskResetNoAA(*state); state->setBlendFunction(true, gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA); _keyLightPipeline = gpu::Pipeline::create(program, state); } @@ -115,7 +115,7 @@ const gpu::PipelinePointer& DebugZoneLighting::getAmbientPipeline() { gpu::ShaderPointer program = gpu::Shader::createProgram(shader::render_utils::program::zone_drawAmbient); gpu::StatePointer state = std::make_shared(); - PrepareStencil::testMask(*state); + PrepareStencil::testMaskResetNoAA(*state); state->setBlendFunction(true, gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA); _ambientPipeline = gpu::Pipeline::create(program, state); } @@ -127,7 +127,7 @@ const gpu::PipelinePointer& DebugZoneLighting::getBackgroundPipeline() { gpu::ShaderPointer program = gpu::Shader::createProgram(shader::render_utils::program::zone_drawSkybox); gpu::StatePointer state = std::make_shared(); - PrepareStencil::testMask(*state); + PrepareStencil::testMaskResetNoAA(*state); state->setBlendFunction(true, gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA); _backgroundPipeline = gpu::Pipeline::create(program, state); } diff --git a/libraries/render-utils/src/aa_blend.slf b/libraries/render-utils/src/aa_blend.slf new file mode 100644 index 0000000000..c42c6373cc --- /dev/null +++ b/libraries/render-utils/src/aa_blend.slf @@ -0,0 +1,53 @@ +<@include gpu/Config.slh@> +<$VERSION_HEADER$> +// <$_SCRIBE_FILENAME$> +// Generated on <$_SCRIBE_DATE$> +// +// Created by Raffi Bedikian on 8/30/15 +// Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +<@include gpu/ShaderConstants.h@> + +layout(location=0) in vec2 varTexCoord0; +layout(location=0) out vec4 outFragColor; + +layout(binding=0) uniform sampler2D colorTexture; + +struct aaBlendParams { + vec4 sharpenIntensity; +}; + +layout(binding=0) uniform aaBlendParamsBuffer { + aaBlendParams params; +}; + +void main(void) { + if (params.sharpenIntensity.x > 0.0) { + vec4 pixels[9]; + vec4 sharpenedPixel; + pixels[0] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy) + ivec2(-1,-1), 0); + pixels[1] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy) + ivec2(0,-1), 0); + pixels[2] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy) + ivec2(1,-1), 0); + + pixels[3] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy) + ivec2(-1,0), 0); + pixels[4] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy), 0); + pixels[5] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy) + ivec2(1,0), 0); + + pixels[6] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy) + ivec2(-1,1), 0); + pixels[7] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy) + ivec2(0,1), 0); + pixels[8] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy) + ivec2(1,1), 0); + + sharpenedPixel = pixels[4] * 6.8 - (pixels[1] + pixels[3] + pixels[5] + pixels[7]) - (pixels[0] + pixels[2] + pixels[6] + pixels[8]) *0.7; + + vec4 minColor = max(vec4(0), pixels[4] - vec4(0.5)); + vec4 maxColor = pixels[4] + vec4(0.5); + outFragColor = clamp(pixels[4] + sharpenedPixel * params.sharpenIntensity.x, minColor, maxColor); + } else { + outFragColor = texelFetch(colorTexture, ivec2(gl_FragCoord.xy), 0); + } +} diff --git a/libraries/render-utils/src/deferred_light_limited.slv b/libraries/render-utils/src/deferred_light_limited.slv index 0126d54664..f99a43771b 100644 --- a/libraries/render-utils/src/deferred_light_limited.slv +++ b/libraries/render-utils/src/deferred_light_limited.slv @@ -7,6 +7,7 @@ // // Created by Sam Gateau on 6/16/16. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -54,8 +55,7 @@ void main(void) { #ifdef GPU_TRANSFORM_IS_STEREO #ifdef GPU_TRANSFORM_STEREO_SPLIT_SCREEN - TransformCamera cam = getTransformCamera(); - <$transformStereoClipsSpace(cam, pos)$> + <$transformStereoClipSpace(pos)$> #endif #endif diff --git a/libraries/render-utils/src/directional_skybox_light.slf b/libraries/render-utils/src/directional_skybox_light.slf index 20026283be..c02c29681c 100644 --- a/libraries/render-utils/src/directional_skybox_light.slf +++ b/libraries/render-utils/src/directional_skybox_light.slf @@ -5,6 +5,7 @@ // // Created by Sam Gateau on 5/8/2015. // Copyright 2016 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -30,12 +31,11 @@ layout(location=RENDER_UTILS_ATTR_TEXCOORD01) in vec4 _texCoord01; layout(location=0) out vec4 _fragColor; void main(void) { - DeferredFrameTransform deferredTransform = getDeferredFrameTransform(); - DeferredFragment frag = unpackDeferredFragment(deferredTransform, _texCoord0); + DeferredFragment frag = unpackDeferredFragment(_texCoord0); <@if HIFI_USE_SHADOW@> vec4 viewPos = vec4(frag.position.xyz, 1.0); - vec4 worldPos = getViewInverse() * viewPos; + vec4 worldPos = getViewInverse(frag.side) * viewPos; Light shadowLight = getKeyLight(); vec3 worldLightDirection = getLightDirection(shadowLight); float shadowAttenuation = evalShadowAttenuation(worldLightDirection, worldPos, -viewPos.z, frag.normal); @@ -55,7 +55,7 @@ void main(void) { <@if HIFI_USE_AMBIENT@> vec3 color = evalAmbientSphereGlobalColor( - getViewInverse(), + getViewInverse(frag.side), shadowAttenuation, frag.obscurance, frag.position.xyz, @@ -69,7 +69,7 @@ void main(void) { lowNormalCurvature); <@else@> vec3 color = evalSkyboxGlobalColor( - getViewInverse(), + getViewInverse(frag.side), shadowAttenuation, frag.obscurance, frag.position.xyz, diff --git a/libraries/render-utils/src/drawWorkloadProxy.slf b/libraries/render-utils/src/drawWorkloadProxy.slf index f0bd9d474c..9a05498441 100644 --- a/libraries/render-utils/src/drawWorkloadProxy.slf +++ b/libraries/render-utils/src/drawWorkloadProxy.slf @@ -6,6 +6,7 @@ // // Created by Sam Gateau on 6/29/15. // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -16,6 +17,7 @@ layout(location=0) in vec4 varColor; layout(location=1) in vec3 varTexcoord; layout(location=2) in vec3 varEyePos; +layout(location=3) in vec4 _prevPositionCS; void main(void) { if (varColor.w > 0.0) { @@ -28,6 +30,7 @@ void main(void) { } packDeferredFragmentUnlit( + _prevPositionCS, vec3(0.0, 1.0, 0.0), 1.0, varColor.rgb); diff --git a/libraries/render-utils/src/drawWorkloadProxy.slv b/libraries/render-utils/src/drawWorkloadProxy.slv index e485f14a93..c74d42e8cc 100644 --- a/libraries/render-utils/src/drawWorkloadProxy.slv +++ b/libraries/render-utils/src/drawWorkloadProxy.slv @@ -7,6 +7,7 @@ // // Created by Sam Gateau on 6/29/2015. // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -22,6 +23,20 @@ layout(location=0) out vec4 varColor; layout(location=1) out vec3 varTexcoord; layout(location=2) out vec3 varEyePos; +layout(location=3) out vec4 _prevPositionCS; + +vec4 getPosition(WorkloadProxy proxy, vec4 spriteVert, vec4 proxyPosEye) { + vec3 dirZ = -normalize(proxyPosEye.xyz); + vec3 dirX = normalize(cross(vec3(0.0, 1.0, 0.0), dirZ)); + vec3 dirY = vec3(0.0, 1.0, 0.0); + // Workaround for Nvidia driver bug + vec4 pos = vec4(1.0, 1.0, 1.0, 1.0); + pos.x = proxyPosEye.x + proxy.sphere.w * ( dirX.x * spriteVert.x + dirY.x * spriteVert.y + dirZ.x * spriteVert.z); + pos.y = proxyPosEye.y + proxy.sphere.w * ( dirX.y * spriteVert.x + dirY.y * spriteVert.y + dirZ.y * spriteVert.z); + pos.z = proxyPosEye.z + proxy.sphere.w * ( dirX.z * spriteVert.x + dirY.z * spriteVert.y + dirZ.z * spriteVert.z); + return pos; + //return vec4(proxyPosEye.xyz + proxy.sphere.w * (dirX * spriteVert.x + dirY * spriteVert.y /* + dirZ * spriteVert.z*/), 1.0); +} void main(void) { const vec4 UNIT_SPRITE[3] = vec4[3]( @@ -44,22 +59,17 @@ void main(void) { TransformCamera cam = getTransformCamera(); TransformObject obj = getTransformObject(); vec4 proxyPosEye; - <$transformModelToEyePos(cam, obj, proxyPosWorld, proxyPosEye)$> + vec4 prevProxyPosEye; + <$transformModelToEyePosAndPrevEyePos(cam, obj, proxyPosWorld, proxyPosEye, prevProxyPosEye)$> // Define the billboarded space - vec3 dirZ = -normalize(proxyPosEye.xyz); - vec3 dirX = normalize(cross(vec3(0.0, 1.0, 0.0), dirZ)); - vec3 dirY = vec3(0.0, 1.0, 0.0); + vec4 pos = getPosition(proxy, spriteVert, proxyPosEye); + vec4 prevPos = getPosition(proxy, spriteVert, prevProxyPosEye); - //vec4 pos = vec4(proxyPosEye.xyz + proxy.sphere.w * ( dirX * spriteVert.x + dirY * spriteVert.y + dirZ * spriteVert.z), 1.0); - //Nvidia driver workaround - vec4 pos = vec4(1.0, 1.0, 1.0, 1.0); - pos.x = proxyPosEye.x + proxy.sphere.w * ( dirX.x * spriteVert.x + dirY.x * spriteVert.y + dirZ.x * spriteVert.z); - pos.y = proxyPosEye.y + proxy.sphere.w * ( dirX.y * spriteVert.x + dirY.y * spriteVert.y + dirZ.y * spriteVert.z); - pos.z = proxyPosEye.z + proxy.sphere.w * ( dirX.z * spriteVert.x + dirY.z * spriteVert.y + dirZ.z * spriteVert.z); varEyePos = pos.xyz; varTexcoord = spriteVert.xyz; <$transformEyeToClipPos(cam, pos, gl_Position)$> + <$transformPrevEyeToPrevClipPos(cam, prevPos, _prevPositionCS)$>; // Convert region to color int region = floatBitsToInt(proxy.region.x); diff --git a/libraries/render-utils/src/drawWorkloadView.slf b/libraries/render-utils/src/drawWorkloadView.slf index b638824204..fb9c430490 100644 --- a/libraries/render-utils/src/drawWorkloadView.slf +++ b/libraries/render-utils/src/drawWorkloadView.slf @@ -7,6 +7,7 @@ // // Created by Sam Gateau on 6/29/15. // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -17,6 +18,7 @@ layout(location=0) in vec4 varColor; layout(location=1) in vec3 varTexcoord; layout(location=2) in vec3 varEyePos; +layout(location=3) in vec4 _prevPositionCS; void main(void) { if (varColor.w > 0.0) { @@ -29,6 +31,7 @@ void main(void) { } packDeferredFragmentUnlit( + _prevPositionCS, vec3(0.0, 1.0, 0.0), 1.0, varColor.rgb); diff --git a/libraries/render-utils/src/drawWorkloadView.slv b/libraries/render-utils/src/drawWorkloadView.slv index 2fdf3d773e..6320d9ea83 100644 --- a/libraries/render-utils/src/drawWorkloadView.slv +++ b/libraries/render-utils/src/drawWorkloadView.slv @@ -7,6 +7,7 @@ // // Created by Sam Gateau on 6/29/2015. // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -21,6 +22,7 @@ layout(location=0) out vec4 varColor; layout(location=1) out vec3 varTexcoord; layout(location=2) out vec3 varEyePos; +layout(location=3) out vec4 _prevPositionCS; const int NUM_VERTICES_PER_SEGMENT = 2; const int NUM_SEGMENT_PER_VIEW_REGION = 65; @@ -36,6 +38,12 @@ LAYOUT_STD140(binding=0) uniform DrawMeshBuffer { DrawMesh _drawMeshBuffer; }; +vec4 getPosition(int regionID, int segmentVertexID, vec4 posEye, vec3 tanEye) { + vec3 lateralDir = normalize(cross(vec3(0.0, 0.0, 1.0), normalize(tanEye))); + posEye.xyz += (0.005 * abs(posEye.z) * float(regionID + 1)) * (-1.0 + 2.0 * float(segmentVertexID)) * lateralDir; + return posEye; +} + void main(void) { int viewID = gl_VertexID / NUM_VERTICES_PER_VIEW; int viewVertexID = gl_VertexID - viewID * NUM_VERTICES_PER_VIEW; @@ -51,8 +59,6 @@ void main(void) { vec4 spriteVert = vec4(segment.y, 0.0, segment.x, 1.0); vec3 spriteTan = vec3(segment.x, 0.0, -segment.y); - vec3 lateralDir = vec3(0.0, -1.0 + 2.0 * float(segmentVertexID), 0.0); - WorkloadView view = getWorkloadView(viewID); vec4 region = view.regions[regionID]; vec4 proxyPosWorld = vec4(region.xyz, 1.0); @@ -74,15 +80,19 @@ void main(void) { TransformCamera cam = getTransformCamera(); TransformObject obj = getTransformObject(); vec4 posEye; - <$transformModelToEyePos(cam, obj, pos, posEye)$> + vec4 prevPosEye; + <$transformModelToEyePosAndPrevEyePos(cam, obj, pos, posEye, prevPosEye)$> vec3 tanEye; + vec3 prevTanEye; <$transformModelToEyeDir(cam, obj, originSpaceTan, tanEye)$> + <$transformModelToPrevEyeDir(cam, obj, originSpaceTan, prevTanEye)$> - lateralDir = normalize(cross(vec3(0.0, 0.0, 1.0), normalize(tanEye))); - posEye.xyz += (0.005 * abs(posEye.z) * float(regionID + 1)) * (-1.0 + 2.0 * float(segmentVertexID)) * lateralDir; + posEye = getPosition(regionID, segmentVertexID, posEye, tanEye); + prevPosEye = getPosition(regionID, segmentVertexID, prevPosEye, prevTanEye); varEyePos = posEye.xyz; <$transformEyeToClipPos(cam, posEye, gl_Position)$> + <$transformPrevEyeToPrevClipPos(cam, prevPosEye, _prevPositionCS)$>; varTexcoord = spriteVert.xyz; diff --git a/libraries/render-utils/src/grid.slf b/libraries/render-utils/src/grid.slf index 8d54dfef4a..fa510b9204 100644 --- a/libraries/render-utils/src/grid.slf +++ b/libraries/render-utils/src/grid.slf @@ -5,6 +5,7 @@ // // Created by Zach Pomerantz on 2/16/2016. // Copyright 2016 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -15,6 +16,7 @@ <@if not HIFI_USE_FORWARD@> <@include DeferredBufferWrite.slh@> + layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) in vec4 _prevPositionCS; <@else@> layout(location=0) out vec4 _fragColor0; <@endif@> @@ -43,9 +45,9 @@ void main(void) { <@if not HIFI_USE_FORWARD@> vec3 NORMAL = vec3(1.0, 0.0, 0.0); <@if not HIFI_USE_TRANSLUCENT@> - packDeferredFragmentUnlit(NORMAL, 1.0, varColor.rgb); + packDeferredFragmentUnlit(_prevPositionCS, NORMAL, 1.0, varColor.rgb); <@else@> - packDeferredFragmentTranslucent(NORMAL, varColor.a, varColor.rgb, DEFAULT_ROUGHNESS); + packDeferredFragmentTranslucent(_prevPositionCS, NORMAL, varColor.a, varColor.rgb, DEFAULT_ROUGHNESS); <@endif@> <@else@> _fragColor0 = varColor; diff --git a/libraries/render-utils/src/grid.slv b/libraries/render-utils/src/grid.slv new file mode 100644 index 0000000000..cd6143fa50 --- /dev/null +++ b/libraries/render-utils/src/grid.slv @@ -0,0 +1,36 @@ +<@include gpu/Config.slh@> +<$VERSION_HEADER$> +// <$_SCRIBE_FILENAME$> +// Generated on <$_SCRIBE_DATE$> +// +// Created by HifiExperiments on 7/24/2020 +// Copyright 2024 Overte e.V. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +<@include gpu/Inputs.slh@> +<@include gpu/Color.slh@> +<@include render-utils/ShaderConstants.h@> + +<@include gpu/Transform.slh@> +<$declareStandardTransform()$> + +layout(location=GPU_ATTR_POSITION) out vec3 varPosition; +layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) out vec4 _prevPositionCS; +layout(location=GPU_ATTR_NORMAL) out vec3 varNormal; +layout(location=GPU_ATTR_TEXCOORD0) out vec2 varTexCoord0; +layout(location=GPU_ATTR_COLOR) out vec4 varColor; + +void main(void) { + varTexCoord0 = inTexCoord0.st; + varColor = color_sRGBAToLinear(inColor); + + // standard transform + TransformCamera cam = getTransformCamera(); + TransformObject obj = getTransformObject(); + <$transformModelToClipPosAndPrevClipPos(cam, obj, inPosition, gl_Position, _prevPositionCS)$> + <$transformModelToWorldDir(cam, obj, inNormal.xyz, varNormal)$> + varPosition = inPosition.xyz; +} diff --git a/libraries/render-utils/src/lightClusters_drawClusterContent.slf b/libraries/render-utils/src/lightClusters_drawClusterContent.slf index 80013bc3cc..18602954b8 100644 --- a/libraries/render-utils/src/lightClusters_drawClusterContent.slf +++ b/libraries/render-utils/src/lightClusters_drawClusterContent.slf @@ -5,6 +5,7 @@ // // Created by Sam Gateau on 9/8/2016. // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -33,7 +34,7 @@ void main(void) { vec2 texCoord = varTexCoord0.st; vec4 fragEyePos = unpackDeferredPositionFromZdb(texCoord); - vec4 fragWorldPos = getViewInverse() * fragEyePos; + vec4 fragWorldPos = getViewInverse(getStereoSideFromUV(texCoord.x)) * fragEyePos; // From frag world pos find the cluster vec4 clusterEyePos = frustumGrid_worldToEye(fragWorldPos); diff --git a/libraries/render-utils/src/lightClusters_drawClusterFromDepth.slf b/libraries/render-utils/src/lightClusters_drawClusterFromDepth.slf index 0e3f8a5ea5..31a8eac667 100644 --- a/libraries/render-utils/src/lightClusters_drawClusterFromDepth.slf +++ b/libraries/render-utils/src/lightClusters_drawClusterFromDepth.slf @@ -5,6 +5,7 @@ // // Created by Sam Gateau on 9/8/2016. // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -30,7 +31,7 @@ void main(void) { vec2 texCoord = varTexCoord0.st; vec4 fragEyePos = unpackDeferredPositionFromZdb(texCoord); - vec4 fragWorldPos = getViewInverse() * fragEyePos; + vec4 fragWorldPos = getViewInverse(getStereoSideFromUV(texCoord.x)) * fragEyePos; // From frag world pos find the cluster vec4 clusterEyePos = frustumGrid_worldToEye(fragWorldPos); diff --git a/libraries/render-utils/src/local_lights_drawOutline.slf b/libraries/render-utils/src/local_lights_drawOutline.slf index a2b4cc1d10..1b8773dc63 100644 --- a/libraries/render-utils/src/local_lights_drawOutline.slf +++ b/libraries/render-utils/src/local_lights_drawOutline.slf @@ -7,6 +7,7 @@ // // Created by Sam Gateau on 9/6/2016. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -54,7 +55,7 @@ void main(void) { // Frag pos in world - mat4 invViewMat = getViewInverse(); + mat4 invViewMat = getViewInverse(frag.side); vec4 fragPos = invViewMat * fragPosition; <$fetchClusterInfo(fragPos)$>; diff --git a/libraries/render-utils/src/local_lights_shading.slf b/libraries/render-utils/src/local_lights_shading.slf index 538bdacc99..166005b7f0 100644 --- a/libraries/render-utils/src/local_lights_shading.slf +++ b/libraries/render-utils/src/local_lights_shading.slf @@ -7,6 +7,7 @@ // // Created by Sam Gateau on 9/6/2016. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -32,8 +33,7 @@ void main(void) { // Grab the fragment data from the uv vec2 texCoord = _texCoord0.st; - DeferredFrameTransform deferredTransform = getDeferredFrameTransform(); - DeferredFragment frag = unpackDeferredFragment(deferredTransform, texCoord); + DeferredFragment frag = unpackDeferredFragment(texCoord); vec4 fragPosition = frag.position; if (frag.mode == FRAG_MODE_UNLIT) { @@ -41,7 +41,7 @@ void main(void) { } // Frag pos in world - mat4 invViewMat = getViewInverse(); + mat4 invViewMat = getViewInverse(frag.side); vec4 fragWorldPos = invViewMat * fragPosition; <$fetchClusterInfo(fragWorldPos)$>; diff --git a/libraries/render-utils/src/model.slf b/libraries/render-utils/src/model.slf index 48e483edf7..a2f15df8a4 100644 --- a/libraries/render-utils/src/model.slf +++ b/libraries/render-utils/src/model.slf @@ -47,19 +47,12 @@ <@if not HIFI_USE_SHADOW@> <@if HIFI_USE_MTOON@> - <@include DefaultMaterials.slh@> <@include GlobalLight.slh@> <$declareEvalGlobalLightingAlphaBlendedMToon()$> <@include gpu/Transform.slh@> <$declareStandardCameraTransform()$> - - <@if HIFI_USE_FORWARD or HIFI_USE_TRANSLUCENT@> - layout(location=0) out vec4 _fragColor0; - <@else@> - <@include DeferredBufferWrite.slh@> - <@endif@> <@elif HIFI_USE_FORWARD or HIFI_USE_TRANSLUCENT@> <@include DefaultMaterials.slh@> <@include GlobalLight.slh@> @@ -75,7 +68,9 @@ <@endif@> <@include gpu/Transform.slh@> <$declareStandardCameraTransform()$> + <@endif@> + <@if HIFI_USE_FORWARD@> layout(location=0) out vec4 _fragColor0; <@else@> <@include DeferredBufferWrite.slh@> @@ -111,6 +106,7 @@ layout(location=RENDER_UTILS_ATTR_TEXCOORD01) in vec4 _texCoord01; <@if not HIFI_USE_MTOON@> layout(location=RENDER_UTILS_ATTR_COLOR) in vec4 _color; <@endif@> + layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) in vec4 _prevPositionCS; <@if HIFI_USE_NORMALMAP@> layout(location=RENDER_UTILS_ATTR_TANGENT_WS) in vec3 _tangentWS; <@endif@> @@ -169,14 +165,22 @@ void main(void) { <@if HIFI_USE_FORWARD@> _fragColor0 = vec4(mirrorColor, 1.0); <@else@> - packDeferredFragmentUnlit(vec3(1.0, 0.0, 0.0), 1.0, mirrorColor); + // Mirrors have AA disabled + packDeferredFragmentUnlit(vec4(0.0, 0.0, 0.0, 1.0), vec3(1.0, 0.0, 0.0), 1.0, mirrorColor); <@endif@> <@elif HIFI_USE_SHADOW@> _fragColor0 = vec4(1.0); - <@elif HIFI_USE_TRANSLUCENT or HIFI_USE_FORWARD@> + <@elif HIFI_USE_FORWARD@> _fragColor0 = vec4(albedo * isUnlitEnabled(), opacity); + <@elif HIFI_USE_TRANSLUCENT@> + packDeferredFragmentTranslucentUnlit( + _prevPositionCS, + evalFrontOrBackFaceNormal(normalize(_normalWS)), + opacity, + albedo * isUnlitEnabled()); <@else@> packDeferredFragmentUnlit( + _prevPositionCS, evalFrontOrBackFaceNormal(normalize(_normalWS)), opacity, albedo * isUnlitEnabled()); @@ -231,14 +235,25 @@ void main(void) { shade, shadingShift, getMaterialShadingToony(mat), getMaterialMatcap(mat), getMaterialParametricRim(mat), getMaterialParametricRimFresnelPower(mat), getMaterialParametricRimLift(mat), rimTex, getMaterialRimLightingMix(mat), matKey), opacity); - <@if HIFI_USE_FORWARD or HIFI_USE_TRANSLUCENT@> + <@if HIFI_USE_FORWARD@> _fragColor0 = isUnlitEnabled() * vec4(color.rgb <@if HIFI_USE_FADE@> + fadeEmissive <@endif@> , color.a); + <@elif HIFI_USE_TRANSLUCENT@> + packDeferredFragmentTranslucent( + _prevPositionCS, + fragNormalWS, + color.a, + color.rgb + <@if HIFI_USE_FADE@> + + fadeEmissive + <@endif@> + , DEFAULT_ROUGHNESS); <@else@> packDeferredFragmentUnlit( + _prevPositionCS, fragNormalWS, 1.0, color.rgb @@ -368,6 +383,7 @@ void main(void) { <@if not HIFI_USE_TRANSLUCENT@> <@if not HIFI_USE_LIGHTMAP@> packDeferredFragment( + _prevPositionCS, fragNormalWS, opacity, albedo, @@ -382,6 +398,7 @@ void main(void) { scattering); <@else@> packDeferredFragmentLightmap( + _prevPositionCS, fragNormalWS, evalOpaqueFinalAlpha(getMaterialOpacity(mat), opacity), albedo, @@ -411,7 +428,7 @@ void main(void) { vec4(0), vec4(0), opacity); } - _fragColor0 = vec4(evalGlobalLightingAlphaBlended( + vec4 outColor = vec4(evalGlobalLightingAlphaBlended( cam._viewInverse, 1.0, occlusion, @@ -427,8 +444,9 @@ void main(void) { , surfaceWS, opacity, localLighting.rgb), opacity); + packDeferredFragmentTranslucent(_prevPositionCS, fragNormalWS, outColor.a, outColor.rgb, roughness); <@else@> - _fragColor0 = vec4(evalLightmappedColor( + vec4 outColor = vec4(evalLightmappedColor( cam._viewInverse, 1.0, DEFAULT_OCCLUSION, @@ -436,6 +454,7 @@ void main(void) { albedo, lightmap), opacity); + packDeferredFragmentLightmap(_prevPositionCS, fragNormalWS, outColor.a, outColor.rgb, roughness, metallic, outColor.rgb); <@endif@> <@endif@> <@endif@> diff --git a/libraries/render-utils/src/model.slv b/libraries/render-utils/src/model.slv index 848acfc331..dc4bcde7fe 100644 --- a/libraries/render-utils/src/model.slv +++ b/libraries/render-utils/src/model.slv @@ -51,6 +51,7 @@ layout(location=RENDER_UTILS_ATTR_TEXCOORD01) out vec4 _texCoord01; <@if not HIFI_USE_MTOON@> layout(location=RENDER_UTILS_ATTR_COLOR) out vec4 _color; <@endif@> + layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) out vec4 _prevPositionCS; <@if HIFI_USE_NORMALMAP@> layout(location=RENDER_UTILS_ATTR_TANGENT_WS) out vec3 _tangentWS; <@endif@> @@ -76,7 +77,7 @@ void main(void) { TransformCamera cam = getTransformCamera(); TransformObject obj = getTransformObject(); <@if not HIFI_USE_SHADOW@> - <$transformModelToWorldAndEyeAndClipPos(cam, obj, positionMS, _positionWS, _positionES, gl_Position)$> + <$transformModelToWorldEyeClipPosAndPrevClipPos(cam, obj, positionMS, _positionWS, _positionES, gl_Position, _prevPositionCS)$> <$transformModelToWorldDir(cam, obj, normalMS, _normalWS)$> <@else@> <$transformModelToClipPos(cam, obj, positionMS, gl_Position)$> diff --git a/libraries/render-utils/src/parabola.slf b/libraries/render-utils/src/parabola.slf index f19f82ec59..dbf7c85404 100644 --- a/libraries/render-utils/src/parabola.slf +++ b/libraries/render-utils/src/parabola.slf @@ -5,25 +5,27 @@ // // Created by Sam Gondelman on 7/18/2018 // Copyright 2018 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // +layout(location=0) in vec4 _color; + <@if not HIFI_USE_FORWARD@> <@include DeferredBufferWrite.slh@> + layout(location=1) in vec4 _prevPositionCS; <@else@> layout(location=0) out vec4 _fragColor0; <@endif@> -layout(location=0) in vec4 _color; - void main(void) { <@if not HIFI_USE_FORWARD@> <@if not HIFI_USE_TRANSLUCENT@> - packDeferredFragmentUnlit(vec3(1.0, 0.0, 0.0), 1.0, _color.rgb); + packDeferredFragmentUnlit(_prevPositionCS, vec3(1.0, 0.0, 0.0), 1.0, _color.rgb); <@else@> - packDeferredFragmentTranslucent(vec3(1.0, 0.0, 0.0), _color.a, _color.rgb, DEFAULT_ROUGHNESS); + packDeferredFragmentTranslucent(_prevPositionCS, vec3(1.0, 0.0, 0.0), _color.a, _color.rgb, DEFAULT_ROUGHNESS); <@endif@> <@else@> _fragColor0 = _color; diff --git a/libraries/render-utils/src/parabola.slv b/libraries/render-utils/src/parabola.slv index 6032452d1d..4dade8f091 100644 --- a/libraries/render-utils/src/parabola.slv +++ b/libraries/render-utils/src/parabola.slv @@ -4,6 +4,7 @@ // // Created by Sam Gondelman on 7/18/2018 // Copyright 2018 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -27,6 +28,9 @@ LAYOUT_STD140(binding=0) uniform parabolaData { }; layout(location=0) out vec4 _color; +<@if not HIFI_USE_FORWARD@> + layout(location=1) out vec4 _prevPositionCS; +<@endif@> void main(void) { _color = _parabolaData.color; @@ -51,5 +55,9 @@ void main(void) { pos += 0.5 * _parabolaData.width * normal * (-1.0 + 2.0 * float(gl_VertexID % 2 == 0)); - <$transformModelToClipPos(cam, obj, pos, gl_Position)$> + <@if HIFI_USE_FORWARD@> + <$transformModelToClipPos(cam, obj, pos, gl_Position)$> + <@else@> + <$transformModelToClipPosAndPrevClipPos(cam, obj, pos, gl_Position, _prevPositionCS)$> + <@endif@> } diff --git a/libraries/render-utils/src/render-utils/ShaderConstants.h b/libraries/render-utils/src/render-utils/ShaderConstants.h index 19eb4dd249..e1b78c9cb5 100644 --- a/libraries/render-utils/src/render-utils/ShaderConstants.h +++ b/libraries/render-utils/src/render-utils/ShaderConstants.h @@ -1,6 +1,7 @@ // // // Created by Bradley Austin Davis on 2015-02-04 +// Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. +// // Based on fragment shader code from // https://github.com/paulhoux/Cinder-Samples/blob/master/TextRendering/include/text/Text.cpp // Distributed under the Apache License, Version 2.0. @@ -21,7 +24,9 @@ <@include gpu/Transform.slh@> <$declareStandardCameraTransform()$> +<@endif@> +<@if HIFI_USE_FORWARD@> layout(location=0) out vec4 _fragColor0; <@else@> <@include DeferredBufferWrite.slh@> @@ -36,6 +41,9 @@ layout(location=RENDER_UTILS_ATTR_POSITION_MS) in vec2 _positionMS; <@if HIFI_USE_TRANSLUCENT or HIFI_USE_FORWARD@> layout(location=RENDER_UTILS_ATTR_POSITION_ES) in vec4 _positionES; <@endif@> +<@if not HIFI_USE_FORWARD@> + layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) in vec4 _prevPositionCS; +<@endif@> layout(location=RENDER_UTILS_ATTR_NORMAL_WS) in vec3 _normalWS; layout(location=RENDER_UTILS_ATTR_TEXCOORD01) in vec4 _texCoord01; #define _texCoord0 _texCoord01.xy @@ -63,27 +71,35 @@ void main() { color.a = 1.0; <@endif@> + vec3 normal = normalize(_normalWS); + <@if HIFI_USE_UNLIT@> - <@if HIFI_USE_TRANSLUCENT or HIFI_USE_FORWARD@> - _fragColor0 = vec4(color.rgb * isUnlitEnabled(), color.a); + vec4 outColor = vec4(color.rgb * isUnlitEnabled(), color.a); + + <@if HIFI_USE_FORWARD@> + _fragColor0 = outColor; + <@elif HIFI_USE_TRANSLUCENT@> + packDeferredFragmentTranslucent(_prevPositionCS, normal, outColor.a, outColor.rgb, DEFAULT_ROUGHNESS); <@else@> packDeferredFragmentUnlit( - normalize(_normalWS), - color.a, - color.rgb); + _prevPositionCS, + normal, + outColor.a, + outColor.rgb); <@endif@> <@else@> <@if HIFI_USE_TRANSLUCENT or HIFI_USE_FORWARD@> TransformCamera cam = getTransformCamera(); vec3 fragPosition = _positionES.xyz; + vec4 outColor; <@if HIFI_USE_TRANSLUCENT@> - _fragColor0 = vec4(evalGlobalLightingAlphaBlended( + outColor = vec4(evalGlobalLightingAlphaBlended( cam._viewInverse, 1.0, DEFAULT_OCCLUSION, fragPosition, - normalize(_normalWS), + normal, color.rgb, DEFAULT_FRESNEL, DEFAULT_METALLIC, @@ -91,21 +107,28 @@ void main() { DEFAULT_ROUGHNESS, color.a), color.a); <@else@> - _fragColor0 = vec4(evalSkyboxGlobalColor( + outColor = vec4(evalSkyboxGlobalColor( cam._viewInverse, 1.0, DEFAULT_OCCLUSION, fragPosition, - normalize(_normalWS), + normal, color.rgb, DEFAULT_FRESNEL, DEFAULT_METALLIC, DEFAULT_ROUGHNESS), color.a); <@endif@> + + <@if HIFI_USE_FORWARD@> + _fragColor0 = outColor; + <@else@> + packDeferredFragmentTranslucent(_prevPositionCS, normal, outColor.a, outColor.rgb, DEFAULT_ROUGHNESS); + <@endif@> <@else@> packDeferredFragment( - normalize(_normalWS), + _prevPositionCS, + normal, color.a, color.rgb, DEFAULT_ROUGHNESS, diff --git a/libraries/render-utils/src/sdf_text3D.slh b/libraries/render-utils/src/sdf_text3D.slh index 5471415e1c..9b7d54f3da 100644 --- a/libraries/render-utils/src/sdf_text3D.slh +++ b/libraries/render-utils/src/sdf_text3D.slh @@ -6,6 +6,7 @@ // // Created by Sam Gondelman on 3/15/19 // Copyright 2019 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -38,6 +39,7 @@ LAYOUT(binding=0) uniform textParamsBuffer { #define TAA_TEXTURE_LOD_BIAS -3.0 const float interiorCutoff = 0.5; +const float smoothStrength = 4.0; const float taaBias = pow(2.0, TAA_TEXTURE_LOD_BIAS); // MSDF logic from: https://github.com/Chlumsky/msdfgen?tab=readme-ov-file#using-a-multi-channel-distance-field @@ -100,18 +102,16 @@ vec4 evalSDFSuperSampled(vec2 texCoord, vec2 positionMS, vec4 glyphBounds) { return vec4(0.0); } - vec2 dxTexCoord = dFdx(texCoord) * 0.5 * taaBias; - vec2 dyTexCoord = dFdy(texCoord) * 0.5 * taaBias; + vec4 color = evalSDFColor(texCoord, glyphBounds); - // Perform 4x supersampling for anisotropic filtering - vec4 color; - color = evalSDFColor(texCoord, glyphBounds); - color += evalSDFColor(texCoord + dxTexCoord, glyphBounds); - color += evalSDFColor(texCoord + dyTexCoord, glyphBounds); - color += evalSDFColor(texCoord + dxTexCoord + dyTexCoord, glyphBounds); - color *= 0.25; + // Rely on TAA for anti-aliasing but smooth transition when minification + // to help filtering + float uvFootprint = length(fwidth(texCoord) * smoothStrength); + float smoothStart = max(0.0, 0.5 - uvFootprint); + float smoothEnd = min(1.0, 0.5 + uvFootprint); + float alpha = max(smoothstep(smoothStart, smoothEnd, color.a), step(interiorCutoff, color.a)); - return vec4(color.rgb, step(interiorCutoff, color.a)); + return vec4(color.rgb, alpha); } <@endfunc@> diff --git a/libraries/render-utils/src/sdf_text3D.slv b/libraries/render-utils/src/sdf_text3D.slv index db13e170e9..d7b2abd7bc 100644 --- a/libraries/render-utils/src/sdf_text3D.slv +++ b/libraries/render-utils/src/sdf_text3D.slv @@ -5,6 +5,8 @@ // vertex shader // // Created by Brad Davis on 10/14/13. +// Copyright 2013 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -23,6 +25,9 @@ layout(location=RENDER_UTILS_ATTR_POSITION_MS) out vec2 _positionMS; <@if HIFI_USE_TRANSLUCENT or HIFI_USE_FORWARD@> layout(location=RENDER_UTILS_ATTR_POSITION_ES) out vec4 _positionES; <@endif@> +<@if not HIFI_USE_FORWARD@> + layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) out vec4 _prevPositionCS; +<@endif@> layout(location=RENDER_UTILS_ATTR_NORMAL_WS) out vec3 _normalWS; layout(location=RENDER_UTILS_ATTR_TEXCOORD01) out vec4 _texCoord01; layout(location=RENDER_UTILS_ATTR_FADE1) flat out vec4 _glyphBounds; // we're reusing the fade texcoord locations here @@ -43,10 +48,12 @@ void main() { TransformCamera cam = getTransformCamera(); TransformObject obj = getTransformObject(); -<@if HIFI_USE_TRANSLUCENT or HIFI_USE_FORWARD@> +<@if HIFI_USE_FORWARD@> <$transformModelToEyeAndClipPos(cam, obj, position, _positionES, gl_Position)$> +<@elif HIFI_USE_TRANSLUCENT@> + <$transformModelToEyeClipPosAndPrevClipPos(cam, obj, position, _positionES, gl_Position, _prevPositionCS)$> <@else@> - <$transformModelToClipPos(cam, obj, position, gl_Position)$> + <$transformModelToClipPosAndPrevClipPos(cam, obj, position, gl_Position, _prevPositionCS)$> <@endif@> const vec3 normal = vec3(0, 0, 1); diff --git a/libraries/render-utils/src/simple.slf b/libraries/render-utils/src/simple.slf index fabe85cb4f..72c8870a74 100644 --- a/libraries/render-utils/src/simple.slf +++ b/libraries/render-utils/src/simple.slf @@ -5,6 +5,7 @@ // // Created by Andrzej Kapolka on 9/15/14. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -21,26 +22,21 @@ <$declareStandardCameraTransform()$> <@else@> <@include LightingModel.slh@> + <@endif@> <@endif@> +<@if HIFI_USE_FORWARD@> layout(location=0) out vec4 _fragColor0; +<@else@> + <@include DeferredBufferWrite.slh@> <@endif@> <@if not HIFI_USE_UNLIT@> + <@include GlobalLight.slh@> <@if HIFI_USE_TRANSLUCENT@> - <@include GlobalLight.slh@> <$declareEvalGlobalLightingAlphaBlended()$> <@elif HIFI_USE_FORWARD@> - <@include GlobalLight.slh@> <$declareEvalSkyboxGlobalColor(_SCRIBE_NULL, HIFI_USE_FORWARD)$> - <@else@> - <@include DeferredBufferWrite.slh@> - <@endif@> -<@else@> - <@if not HIFI_USE_FORWARD@> - <@if not HIFI_USE_TRANSLUCENT@> - <@include DeferredBufferWrite.slh@> - <@endif@> <@endif@> <@endif@> @@ -49,11 +45,14 @@ <$declareFadeFragmentInstanced()$> <@endif@> -<@if not HIFI_USE_UNLIT@> - <@if HIFI_USE_FORWARD or HIFI_USE_TRANSLUCENT@> +<@if HIFI_USE_FORWARD or HIFI_USE_TRANSLUCENT@> + <@if not HIFI_USE_UNLIT@> layout(location=RENDER_UTILS_ATTR_POSITION_ES) in vec4 _positionES; <@endif@> <@endif@> +<@if not HIFI_USE_FORWARD@> + layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) in vec4 _prevPositionCS; +<@endif@> <@if HIFI_USE_FADE@> layout(location=RENDER_UTILS_ATTR_POSITION_WS) in vec4 _positionWS; <@endif@> @@ -90,12 +89,13 @@ void main(void) { <@if not HIFI_USE_UNLIT@> <@if HIFI_USE_TRANSLUCENT@> - _fragColor0 = vec4(evalGlobalLightingAlphaBlended( + vec3 normal = evalFrontOrBackFaceNormal(normalize(_normalWS)); + vec4 color = vec4(evalGlobalLightingAlphaBlended( cam._viewInverse, 1.0, DEFAULT_OCCLUSION, fragPosition, - evalFrontOrBackFaceNormal(normalize(_normalWS)), + normal, texel.rgb, fresnel, metallic, @@ -106,6 +106,12 @@ void main(void) { , DEFAULT_ROUGHNESS, texel.a), texel.a); + + <@if HIFI_USE_FORWARD@> + _fragColor0 = color; + <@else@> + packDeferredFragmentTranslucent(_prevPositionCS, normal, color.a, color.rgb, DEFAULT_ROUGHNESS); + <@endif@> <@elif HIFI_USE_FORWARD@> _fragColor0 = vec4(evalSkyboxGlobalColor( cam._viewInverse, @@ -120,6 +126,7 @@ void main(void) { texel.a); <@else@> packDeferredFragment( + _prevPositionCS, evalFrontOrBackFaceNormal(normalize(_normalWS)), 1.0, texel.rgb, @@ -134,21 +141,25 @@ void main(void) { DEFAULT_SCATTERING); <@endif@> <@else@> - <@if HIFI_USE_FORWARD or HIFI_USE_TRANSLUCENT@> - _fragColor0 = isUnlitEnabled() * vec4(texel.rgb + vec3 color = isUnlitEnabled() * (texel.rgb <@if HIFI_USE_FADE@> + fadeEmissive <@endif@> - , texel.a); + ); + <@if HIFI_USE_FORWARD@> + _fragColor0 = vec4(color, texel.a); + <@elif HIFI_USE_TRANSLUCENT@> + packDeferredFragmentTranslucentUnlit( + _prevPositionCS, + evalFrontOrBackFaceNormal(normalize(_normalWS)), + texel.a, + color); <@else@> packDeferredFragmentUnlit( + _prevPositionCS, evalFrontOrBackFaceNormal(normalize(_normalWS)), 1.0, - texel.rgb - <@if HIFI_USE_FADE@> - + fadeEmissive - <@endif@> - ); + color); <@endif@> <@endif@> } diff --git a/libraries/render-utils/src/simple.slv b/libraries/render-utils/src/simple.slv index e47af5d23f..50dfc558de 100644 --- a/libraries/render-utils/src/simple.slv +++ b/libraries/render-utils/src/simple.slv @@ -5,6 +5,7 @@ // // Created by Andrzej Kapolka on 9/15/14. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -24,11 +25,14 @@ layout(location=RENDER_UTILS_ATTR_POSITION_WS) out vec4 _positionWS; <@endif@> -<@if not HIFI_USE_UNLIT@> - <@if HIFI_USE_FORWARD or HIFI_USE_TRANSLUCENT@> +<@if HIFI_USE_FORWARD or HIFI_USE_TRANSLUCENT@> + <@if not HIFI_USE_UNLIT@> layout(location=RENDER_UTILS_ATTR_POSITION_ES) out vec4 _positionES; <@endif@> <@endif@> +<@if not HIFI_USE_FORWARD@> + layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) out vec4 _prevPositionCS; +<@endif@> layout(location=RENDER_UTILS_ATTR_NORMAL_WS) out vec3 _normalWS; layout(location=RENDER_UTILS_ATTR_COLOR) out vec4 _color; layout(location=RENDER_UTILS_ATTR_TEXCOORD01) out vec4 _texCoord01; @@ -39,12 +43,13 @@ void main(void) { TransformCamera cam = getTransformCamera(); TransformObject obj = getTransformObject(); -<@if not HIFI_USE_UNLIT@> - <@if HIFI_USE_FORWARD or HIFI_USE_TRANSLUCENT@> - <$transformModelToEyeAndClipPos(cam, obj, inPosition, _positionES, gl_Position)$> - <@else@> - <$transformModelToClipPos(cam, obj, inPosition, gl_Position)$> +<@if HIFI_USE_FORWARD or HIFI_USE_TRANSLUCENT@> + <@if not HIFI_USE_UNLIT@> + <$transformModelToEyeWorldAlignedPos(cam, obj, inPosition, _positionES)$> <@endif@> +<@endif@> +<@if not HIFI_USE_FORWARD@> + <$transformModelToClipPosAndPrevClipPos(cam, obj, inPosition, gl_Position, _prevPositionCS)$> <@else@> <$transformModelToClipPos(cam, obj, inPosition, gl_Position)$> <@endif@> diff --git a/libraries/render-utils/src/simple_procedural.slf b/libraries/render-utils/src/simple_procedural.slf index cc8edbb415..9f9636e34a 100644 --- a/libraries/render-utils/src/simple_procedural.slf +++ b/libraries/render-utils/src/simple_procedural.slf @@ -5,20 +5,19 @@ // // Created by Andrzej Kapolka on 9/15/14. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // -<@if not HIFI_USE_TRANSLUCENT@> - <@include DeferredBufferWrite.slh@> -<@else@> +<@include DeferredBufferWrite.slh@> + +<@if HIFI_USE_TRANSLUCENT@> <@include DefaultMaterials.slh@> <@include GlobalLight.slh@> <$declareEvalGlobalLightingAlphaBlended()$> - - layout(location=0) out vec4 _fragColor0; <@endif@> <@include gpu/Transform.slh@> @@ -29,6 +28,7 @@ layout(location=RENDER_UTILS_ATTR_POSITION_MS) in vec4 _positionMS; layout(location=RENDER_UTILS_ATTR_POSITION_ES) in vec4 _positionES; +layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) in vec4 _prevPositionCS; layout(location=RENDER_UTILS_ATTR_NORMAL_MS) in vec3 _normalMS; layout(location=RENDER_UTILS_ATTR_NORMAL_WS) in vec3 _normalWS; layout(location=RENDER_UTILS_ATTR_COLOR) in vec4 _color; @@ -147,6 +147,7 @@ void main(void) { <@if not HIFI_USE_TRANSLUCENT@> if (emissiveAmount > 0.0) { packDeferredFragmentLightmap( + _prevPositionCS, normal, 1.0, diffuse, @@ -155,6 +156,7 @@ void main(void) { emissive); } else { packDeferredFragment( + _prevPositionCS, normal, 1.0, diffuse, @@ -165,10 +167,11 @@ void main(void) { scattering); } <@else@> + vec4 color; if (emissiveAmount > 0.0) { - _fragColor0 = vec4(diffuse, alpha); + color = vec4(diffuse, alpha); } else { - _fragColor0 = vec4(evalGlobalLightingAlphaBlended( + color = vec4(evalGlobalLightingAlphaBlended( cam._viewInverse, 1.0, occlusion, @@ -181,5 +184,6 @@ void main(void) { roughness, alpha), alpha); } + packDeferredFragmentTranslucent(_prevPositionCS, normal, color.a, color.rgb, roughness); <@endif@> } diff --git a/libraries/render-utils/src/simple_procedural.slv b/libraries/render-utils/src/simple_procedural.slv index 70bce451d3..69b7f14e6d 100644 --- a/libraries/render-utils/src/simple_procedural.slv +++ b/libraries/render-utils/src/simple_procedural.slv @@ -5,6 +5,7 @@ // // Created by Andrzej Kapolka on 9/15/14. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -29,6 +30,7 @@ layout(location=RENDER_UTILS_ATTR_POSITION_MS) out vec4 _positionMS; layout(location=RENDER_UTILS_ATTR_POSITION_ES) out vec4 _positionES; +layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) out vec4 _prevPositionCS; layout(location=RENDER_UTILS_ATTR_NORMAL_MS) out vec3 _normalMS; layout(location=RENDER_UTILS_ATTR_NORMAL_WS) out vec3 _normalWS; layout(location=RENDER_UTILS_ATTR_COLOR) out vec4 _color; @@ -84,6 +86,6 @@ void main(void) { TransformCamera cam = getTransformCamera(); TransformObject obj = getTransformObject(); - <$transformModelToEyeAndClipPos(cam, obj, positionMS, _positionES, gl_Position)$> + <$transformModelToEyeClipPosAndPrevClipPos(cam, obj, positionMS, _positionES, gl_Position, _prevPositionCS)$> <$transformModelToWorldDir(cam, obj, normalMS, _normalWS)$> } \ No newline at end of file diff --git a/libraries/render-utils/src/surfaceGeometry_makeCurvature.slf b/libraries/render-utils/src/surfaceGeometry_makeCurvature.slf index dd9b98b5e5..d5eebefec0 100644 --- a/libraries/render-utils/src/surfaceGeometry_makeCurvature.slf +++ b/libraries/render-utils/src/surfaceGeometry_makeCurvature.slf @@ -6,6 +6,7 @@ // // Created by Sam Gateau on 6/3/16. // Copyright 2016 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -108,7 +109,6 @@ void main(void) { // The position of the pixel fragment in Eye space then in world space vec3 eyePos = evalEyePositionFromZeye(stereoSide.x, Zeye, texcoordPos); - // vec3 worldPos = (frameTransform._viewInverse * vec4(eyePos, 1.0)).xyz; /* if (texcoordPos.y > 0.5) { outFragColor = vec4(fract(10.0 * worldPos.xyz), 1.0); @@ -138,9 +138,10 @@ void main(void) { // Eval px, py, pz world positions of the basis centered on the world pos of the fragment float axeLength = nearPlaneScale; - vec3 ax = (frameTransform._view[0].xyz * axeLength); - vec3 ay = (frameTransform._view[1].xyz * axeLength); - vec3 az = (frameTransform._view[2].xyz * axeLength); + mat4 view = getView(stereoSide.x); + vec3 ax = (view[0].xyz * axeLength); + vec3 ay = (view[1].xyz * axeLength); + vec3 az = (view[2].xyz * axeLength); vec4 px = vec4(eyePos + ax, 0.0); vec4 py = vec4(eyePos + ay, 0.0); @@ -184,7 +185,7 @@ void main(void) { vec2 nclipPos = (texcoordPos - 0.5) * 2.0; - //vec4 clipPos = frameTransform._projection[stereoSide.x] * vec4(eyePos, 1.0); + //vec4 clipPos = getProjection(stereoSide.x) * vec4(eyePos, 1.0); vec4 clipPos = getProjectionMono() * vec4(eyePos, 1.0); nclipPos = clipPos.xy / clipPos.w; diff --git a/libraries/render-utils/src/taa.slf b/libraries/render-utils/src/taa.slf index 25320179f5..cd40c6b46f 100644 --- a/libraries/render-utils/src/taa.slf +++ b/libraries/render-utils/src/taa.slf @@ -1,12 +1,11 @@ <@include gpu/Config.slh@> <$VERSION_HEADER$> -// Generated on <$_SCRIBE_DATE$> -// -// taa.frag -// fragment shader +// <$_SCRIBE_FILENAME$> +// Generated on <$_SCRIBE_DATE$> // // Created by Sam Gateau on 8/14/2017 // Copyright 2017 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -28,18 +27,24 @@ void main() { return; } + vec4 sourceColor; + vec4 historyColor; + vec2 fragVel = taa_fetchVelocityMapBest(fragUV).xy; + bool needsTAA = taa_fetchSourceAndHistory(fragUV, fragVel, sourceColor, historyColor); + vec4 nextColor = sourceColor; + if (needsTAA) { + // clamp history to neighbourhood of current sample + historyColor.rgb = mix(historyColor.rgb, taa_evalConstrainColor(sourceColor.rgb, fragUV, fragVel, historyColor.rgb).rgb, float(taa_constrainColor())); - vec3 sourceColor; - vec3 historyColor; - vec2 prevFragUV = taa_fetchSourceAndHistory(fragUV, fragVel, sourceColor, historyColor); + if (taa_feedbackColor()) { + nextColor = taa_evalFeedbackColor(sourceColor, historyColor, params.blend); + } else { + nextColor = mix(historyColor, sourceColor, params.blend); + } - vec3 nextColor = sourceColor; + nextColor.rgb = taa_resolveColor(mix(sourceColor.rgb, nextColor.rgb, nextColor.a)); + } - // clamp history to neighbourhood of current sample - historyColor = mix(historyColor, taa_evalConstrainColor(sourceColor, fragUV, fragVel, historyColor), float(taa_constrainColor())); - - nextColor = mix(mix(historyColor, sourceColor, params.blend), taa_evalFeedbackColor(sourceColor, historyColor, params.blend), float(taa_feedbackColor())); - - outFragColor = vec4(taa_resolveColor(nextColor), 1.0); + outFragColor = nextColor; } diff --git a/libraries/render-utils/src/taa.slh b/libraries/render-utils/src/taa.slh index ed9162516e..4df30094df 100644 --- a/libraries/render-utils/src/taa.slh +++ b/libraries/render-utils/src/taa.slh @@ -5,6 +5,7 @@ // // Created by Sam Gateau on 8/17/2017 // Copyright 2017 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -21,6 +22,7 @@ LAYOUT(binding=RENDER_UTILS_TEXTURE_TAA_SOURCE) uniform sampler2D sourceMap; LAYOUT(binding=RENDER_UTILS_TEXTURE_TAA_VELOCITY) uniform sampler2D velocityMap; LAYOUT(binding=RENDER_UTILS_TEXTURE_TAA_DEPTH) uniform sampler2D depthMap; LAYOUT(binding=RENDER_UTILS_TEXTURE_TAA_NEXT) uniform sampler2D nextMap; +LAYOUT(binding=RENDER_UTILS_TEXTURE_TAA_INTENSITY) uniform sampler2D intensityMap; struct TAAParams { @@ -51,10 +53,18 @@ bool taa_showClosestFragment() { return GET_BIT(params.flags.x, 3); } +bool taa_bicubicHistoryFetch() { + return GET_BIT(params.flags.y, 0); +} + bool taa_constrainColor() { return GET_BIT(params.flags.y, 1); } +bool taa_sharpenOutput() { + return GET_BIT(params.flags.y, 2); +} + bool taa_feedbackColor() { return GET_BIT(params.flags.y, 4); } @@ -76,12 +86,23 @@ vec2 taa_getRegionFXAA() { } #define USE_YCOCG 1 -vec4 taa_fetchColor(sampler2D map, vec2 uv) { - vec4 c = texture(map, uv); +vec2 taa_getImageSize() { + vec2 imageSize = getWidthHeight(0); + imageSize.x *= 1.0 + float(isStereo()); + return imageSize; +} + +vec2 taa_getTexelSize() { + vec2 texelSize = getInvWidthHeight(); + texelSize.x *= 1.0 - 0.5 * float(isStereo()); + return texelSize; +} + +vec4 taa_transformColor(vec4 c) { // Apply rapid pseudo tonemapping as TAA is applied to a tonemapped image, using luminance as weight, as proposed in // https://de45xmedrsdbp.cloudfront.net/Resources/files/TemporalAA_small-59732822.pdf - float lum = dot(vec3(0.3,0.5,0.2),c.rgb); - c.rgb = c.rgb / (1.0+lum); + float lum = dot(vec3(0.3, 0.5, 0.2), c.rgb); + c.rgb = c.rgb / (1.0 + lum); #if USE_YCOCG return vec4(color_LinearToYCoCg(c.rgb), c.a); #else @@ -89,23 +110,76 @@ vec4 taa_fetchColor(sampler2D map, vec2 uv) { #endif } +vec4 taa_fetchColor(sampler2D map, vec2 uv) { + vec4 c = texture(map, uv); + return taa_transformColor(c); +} + vec3 taa_resolveColor(vec3 color) { #if USE_YCOCG color = max(vec3(0), color_YCoCgToUnclampedLinear(color)); #endif // Apply rapid inverse tonemapping, using luminance as weight, as proposed in // https://de45xmedrsdbp.cloudfront.net/Resources/files/TemporalAA_small-59732822.pdf - float lum = dot(vec3(0.3,0.5,0.2),color.rgb); - color = color / (1.0-lum); + float lum = dot(vec3(0.3, 0.5, 0.2),color.rgb); + color = color / (1.0 - lum); return color; } vec4 taa_fetchSourceMap(vec2 uv) { - return taa_fetchColor(sourceMap, uv); + vec4 source = taa_fetchColor(sourceMap, uv); + // Store AA intensity in alpha + source.a = texture(intensityMap, uv).r; + return source; } vec4 taa_fetchHistoryMap(vec2 uv) { - return taa_fetchColor(historyMap, uv); + vec4 result; + + if (taa_bicubicHistoryFetch()) { + // Perform a Catmull-Rom interpolation for a (possibly) sharper result. + // Optimized 9-tap fetch based on http://vec3.ca/bicubic-filtering-in-fewer-taps/ + vec2 samplePixelPos = uv * taa_getImageSize() - 0.5; + vec2 samplePixelIntPos = floor(samplePixelPos); + + vec2 samplePixelFracPos = samplePixelPos - samplePixelIntPos; + vec2 samplePixelFracPos2 = samplePixelFracPos * samplePixelFracPos; + + // Catmull-Rom interpolation weights + vec2 w0 = samplePixelFracPos * (samplePixelFracPos * (1.0 - 0.5*samplePixelFracPos) - 0.5); + vec2 w1 = 1.0 + samplePixelFracPos2 * (1.5*samplePixelFracPos - 2.5); + vec2 w2 = samplePixelFracPos * (samplePixelFracPos * (2.0 - 1.5*samplePixelFracPos) + 0.5); + vec2 w3 = samplePixelFracPos2 * (0.5 * samplePixelFracPos - 0.5); + + vec2 w12 = w1 + w2; + vec2 offset12 = w2 / (w1 + w2); + + vec2 sampleUV0 = samplePixelIntPos - vec2(1.0); + vec2 sampleUV3 = samplePixelIntPos + vec2(2.0); + vec2 sampleUV12 = samplePixelIntPos + offset12; + + vec2 texelSize = taa_getTexelSize(); + sampleUV0 = (sampleUV0 + 0.5) * texelSize; + sampleUV3 = (sampleUV3 + 0.5) * texelSize; + sampleUV12 = (sampleUV12 + 0.5) * texelSize; + + result = texture(historyMap, vec2(sampleUV0.x, sampleUV0.y)) * w0.x * w0.y; + result += texture(historyMap, vec2(sampleUV12.x, sampleUV0.y)) * w12.x * w0.y; + result += texture(historyMap, vec2(sampleUV3.x, sampleUV0.y)) * w3.x * w0.y; + + result += texture(historyMap, vec2(sampleUV0.x, sampleUV12.y)) * w0.x * w12.y; + result += texture(historyMap, vec2(sampleUV12.x, sampleUV12.y)) * w12.x * w12.y; + result += texture(historyMap, vec2(sampleUV3.x, sampleUV12.y)) * w3.x * w12.y; + + result += texture(historyMap, vec2(sampleUV0.x, sampleUV3.y)) * w0.x * w3.y; + result += texture(historyMap, vec2(sampleUV12.x, sampleUV3.y)) * w12.x * w3.y; + result += texture(historyMap, vec2(sampleUV3.x, sampleUV3.y)) * w3.x * w3.y; + + result.a = clamp(result.a, 0.0, 1.0); + } else { + result = texture(historyMap, uv); + } + return taa_transformColor(result); } vec4 taa_fetchNextMap(vec2 uv) { @@ -123,35 +197,24 @@ float taa_fetchDepth(vec2 uv) { #define ZCMP_GT(a, b) float(a > b) -vec2 taa_getImageSize() { - vec2 imageSize = getWidthHeight(0); - imageSize.x *= 1.0 + float(isStereo()); - return imageSize; -} - -vec2 taa_getTexelSize() { - vec2 texelSize = getInvWidthHeight(); - texelSize.x *= 1.0 - 0.5 * float(isStereo()); - return texelSize; -} - vec3 taa_findClosestFragment3x3(vec2 uv) { - vec2 dd = abs(taa_getTexelSize()); + vec2 dd = taa_getTexelSize(); vec2 du = vec2(dd.x, 0.0); vec2 dv = vec2(0.0, dd.y); + vec2 dm = vec2(-dd.x, dd.y); - vec3 dtl = vec3(-1, -1, taa_fetchDepth(uv - dv - du)); + vec3 dtl = vec3(-1, -1, taa_fetchDepth(uv - dd)); vec3 dtc = vec3( 0, -1, taa_fetchDepth(uv - dv)); - vec3 dtr = vec3( 1, -1, taa_fetchDepth(uv - dv + du)); + vec3 dtr = vec3( 1, -1, taa_fetchDepth(uv - dm)); vec3 dml = vec3(-1, 0, taa_fetchDepth(uv - du)); vec3 dmc = vec3( 0, 0, taa_fetchDepth(uv)); vec3 dmr = vec3( 1, 0, taa_fetchDepth(uv + du)); - vec3 dbl = vec3(-1, 1, taa_fetchDepth(uv + dv - du)); + vec3 dbl = vec3(-1, 1, taa_fetchDepth(uv + dm)); vec3 dbc = vec3( 0, 1, taa_fetchDepth(uv + dv)); - vec3 dbr = vec3( 1, 1, taa_fetchDepth(uv + dv + du)); + vec3 dbr = vec3( 1, 1, taa_fetchDepth(uv + dd)); vec3 dmin = dtl; dmin = mix(dmin, dtc, ZCMP_GT(dmin.z, dtc.z)); @@ -169,21 +232,22 @@ vec3 taa_findClosestFragment3x3(vec2 uv) } vec2 taa_fetchVelocityMapBest(vec2 uv) { - vec2 dd = abs(taa_getTexelSize()); + vec2 dd = taa_getTexelSize(); vec2 du = vec2(dd.x, 0.0); vec2 dv = vec2(0.0, dd.y); + vec2 dm = vec2(dd.x, -dd.y); - vec2 dtl = taa_fetchVelocityMap(uv - dv - du); + vec2 dtl = taa_fetchVelocityMap(uv - dd); vec2 dtc = taa_fetchVelocityMap(uv - dv); - vec2 dtr = taa_fetchVelocityMap(uv - dv + du); + vec2 dtr = taa_fetchVelocityMap(uv - dm); vec2 dml = taa_fetchVelocityMap(uv - du); vec2 dmc = taa_fetchVelocityMap(uv); vec2 dmr = taa_fetchVelocityMap(uv + du); - vec2 dbl = taa_fetchVelocityMap(uv + dv - du); + vec2 dbl = taa_fetchVelocityMap(uv + dm); vec2 dbc = taa_fetchVelocityMap(uv + dv); - vec2 dbr = taa_fetchVelocityMap(uv + dv + du); + vec2 dbr = taa_fetchVelocityMap(uv + dd); vec3 best = vec3(dtl, dot(dtl, dtl)); @@ -223,8 +287,8 @@ vec2 taa_fromFragUVToEyeUVAndSide(vec2 fragUV, out int stereoSide) { vec2 taa_fromEyeUVToFragUV(vec2 eyeUV, int stereoSide) { vec2 fragUV = eyeUV; float check = float(isStereo()); + fragUV.x += check * float(stereoSide); fragUV.x *= 1.0 - 0.5 * check; - fragUV.x += check * float(stereoSide) * 0.5; return fragUV; } @@ -235,14 +299,22 @@ vec2 taa_computePrevFragAndEyeUV(vec2 fragUV, vec2 fragVelocity, out vec2 prevEy return taa_fromEyeUVToFragUV(prevEyeUV, stereoSide); } -vec2 taa_fetchSourceAndHistory(vec2 fragUV, vec2 fragVelocity, out vec3 sourceColor, out vec3 historyColor) { - vec2 prevEyeUV; - vec2 prevFragUV = taa_computePrevFragAndEyeUV(fragUV, fragVelocity, prevEyeUV); - sourceColor = taa_fetchSourceMap(fragUV).xyz; +bool taa_fetchSourceAndHistory(vec2 fragUV, vec2 fragVelocity, out vec4 sourceColor, out vec4 historyColor) { + sourceColor.rgb = texture(sourceMap, fragUV).rgb; + // Store AA intensity in alpha + sourceColor.a = texture(intensityMap, fragUV).r; - historyColor = mix(sourceColor, taa_fetchHistoryMap(prevFragUV).xyz, float(!(any(lessThan(prevEyeUV, vec2(0.0))) || any(greaterThan(prevEyeUV, vec2(1.0)))))); - - return prevFragUV; + // If velocity is 0 then don't fetch history, just return the source. This means there is no jitter on this pixel + if (any(notEqual(fragVelocity, vec2(0.0)))) { + vec2 prevEyeUV; + vec2 prevFragUV = taa_computePrevFragAndEyeUV(fragUV, fragVelocity, prevEyeUV); + sourceColor = taa_transformColor(sourceColor); + historyColor = mix(sourceColor, taa_fetchHistoryMap(prevFragUV), float(!(any(lessThan(prevEyeUV, vec2(0.0))) || any(greaterThan(prevEyeUV, vec2(1.0)))))); + return true; + } else { + historyColor = sourceColor; + return false; + } } float Luminance(vec3 rgb) { @@ -253,12 +325,13 @@ float Luminance(vec3 rgb) { mat3 taa_evalNeighbourColorVariance(vec3 sourceColor, vec2 fragUV, vec2 fragVelocity) { vec2 texelSize = taa_getTexelSize(); - - vec2 du = vec2(texelSize.x, 0.0); - vec2 dv = vec2(0.0, texelSize.y); + vec2 dd = texelSize; + vec2 du = vec2(dd.x, 0.0); + vec2 dv = vec2(0.0, dd.y); + vec2 dm = vec2(dd.x, -dd.y); - vec3 sampleColor = taa_fetchSourceMap(fragUV - dv - du).rgb; + vec3 sampleColor = taa_fetchSourceMap(fragUV - dd).rgb; vec3 sumSamples = sampleColor; vec3 sumSamples2 = sampleColor * sampleColor; @@ -266,7 +339,7 @@ mat3 taa_evalNeighbourColorVariance(vec3 sourceColor, vec2 fragUV, vec2 fragVelo sumSamples += sampleColor; sumSamples2 += sampleColor * sampleColor; - sampleColor = taa_fetchSourceMap(fragUV - dv + du).rgb; + sampleColor = taa_fetchSourceMap(fragUV - dm).rgb; sumSamples += sampleColor; sumSamples2 += sampleColor * sampleColor; @@ -282,27 +355,27 @@ mat3 taa_evalNeighbourColorVariance(vec3 sourceColor, vec2 fragUV, vec2 fragVelo sumSamples += sampleColor; sumSamples2 += sampleColor * sampleColor; - sampleColor = taa_fetchSourceMap(fragUV + dv - du).rgb; + sampleColor = taa_fetchSourceMap(fragUV + dm).rgb; sumSamples += sampleColor; sumSamples2 += sampleColor * sampleColor; sampleColor = taa_fetchSourceMap(fragUV + dv).rgb; sumSamples += sampleColor; sumSamples2 += sampleColor * sampleColor; - - sampleColor = taa_fetchSourceMap(fragUV + dv + du).rgb; + + sampleColor = taa_fetchSourceMap(fragUV + dd).rgb; sumSamples += sampleColor; sumSamples2 += sampleColor * sampleColor; - + vec3 mu = sumSamples / vec3(9.0); vec3 sigma = sqrt(max(sumSamples2 / vec3(9.0) - mu * mu, vec3(0.0))); - - float gamma = params.covarianceGamma; - vec3 cmin = mu - gamma * sigma; - vec3 cmax = mu + gamma * sigma; - return mat3(cmin, cmax, mu); + sigma *= params.covarianceGamma; + vec3 cmin = mu - sigma; + vec3 cmax = mu + sigma; + + return mat3(cmin, cmax, mu); } mat3 taa_evalNeighbourColorRegion(vec3 sourceColor, vec2 fragUV, vec2 fragVelocity, float fragZe) { @@ -310,73 +383,73 @@ mat3 taa_evalNeighbourColorRegion(vec3 sourceColor, vec2 fragUV, vec2 fragVeloci vec2 texelSize = taa_getTexelSize(); vec3 cmin, cmax, cavg; - #if MINMAX_3X3_ROUNDED - vec2 du = vec2(texelSize.x, 0.0); - vec2 dv = vec2(0.0, texelSize.y); +#if MINMAX_3X3_ROUNDED + vec2 du = vec2(texelSize.x, 0.0); + vec2 dv = vec2(0.0, texelSize.y); - vec3 ctl = taa_fetchSourceMap(fragUV - dv - du).rgb; - vec3 ctc = taa_fetchSourceMap(fragUV - dv).rgb; - vec3 ctr = taa_fetchSourceMap(fragUV - dv + du).rgb; - vec3 cml = taa_fetchSourceMap(fragUV - du).rgb; - vec3 cmc = sourceColor; //taa_fetchSourceMap(fragUV).rgb; // could resuse the same osurce sample isn't it ? - vec3 cmr = taa_fetchSourceMap(fragUV + du).rgb; - vec3 cbl = taa_fetchSourceMap(fragUV + dv - du).rgb; - vec3 cbc = taa_fetchSourceMap(fragUV + dv).rgb; - vec3 cbr = taa_fetchSourceMap(fragUV + dv + du).rgb; + vec3 ctl = taa_fetchSourceMap(fragUV - dv - du).rgb; + vec3 ctc = taa_fetchSourceMap(fragUV - dv).rgb; + vec3 ctr = taa_fetchSourceMap(fragUV - dv + du).rgb; + vec3 cml = taa_fetchSourceMap(fragUV - du).rgb; + vec3 cmc = sourceColor; //taa_fetchSourceMap(fragUV).rgb; // could resuse the same osurce sample isn't it ? + vec3 cmr = taa_fetchSourceMap(fragUV + du).rgb; + vec3 cbl = taa_fetchSourceMap(fragUV + dv - du).rgb; + vec3 cbc = taa_fetchSourceMap(fragUV + dv).rgb; + vec3 cbr = taa_fetchSourceMap(fragUV + dv + du).rgb; - cmin = min(ctl, min(ctc, min(ctr, min(cml, min(cmc, min(cmr, min(cbl, min(cbc, cbr)))))))); - cmax = max(ctl, max(ctc, max(ctr, max(cml, max(cmc, max(cmr, max(cbl, max(cbc, cbr)))))))); + cmin = min(ctl, min(ctc, min(ctr, min(cml, min(cmc, min(cmr, min(cbl, min(cbc, cbr)))))))); + cmax = max(ctl, max(ctc, max(ctr, max(cml, max(cmc, max(cmr, max(cbl, max(cbc, cbr)))))))); - #if MINMAX_3X3_ROUNDED || USE_YCOCG || USE_CLIPPING - cavg = (ctl + ctc + ctr + cml + cmc + cmr + cbl + cbc + cbr) / 9.0; - #elif - cavg = (cmin + cmax ) * 0.5; - #endif - - #if MINMAX_3X3_ROUNDED - vec3 cmin5 = min(ctc, min(cml, min(cmc, min(cmr, cbc)))); - vec3 cmax5 = max(ctc, max(cml, max(cmc, max(cmr, cbc)))); - vec3 cavg5 = (ctc + cml + cmc + cmr + cbc) / 5.0; - cmin = 0.5 * (cmin + cmin5); - cmax = 0.5 * (cmax + cmax5); - cavg = 0.5 * (cavg + cavg5); - #endif - #else - const float _SubpixelThreshold = 0.5; - const float _GatherBase = 0.5; - const float _GatherSubpixelMotion = 0.1666; - - vec2 texel_vel = fragVelocity * imageSize; - float texel_vel_mag = length(texel_vel) * -fragZe; - float k_subpixel_motion = clamp(_SubpixelThreshold / (0.0001 + texel_vel_mag), 0.0, 1.0); - float k_min_max_support = _GatherBase + _GatherSubpixelMotion * k_subpixel_motion; - - vec2 ss_offset01 = k_min_max_support * vec2(-texelSize.x, texelSize.y); - vec2 ss_offset11 = k_min_max_support * vec2(texelSize.x, texelSize.y); - vec3 c00 = taa_fetchSourceMap(fragUV - ss_offset11).rgb; - vec3 c10 = taa_fetchSourceMap(fragUV - ss_offset01).rgb; - vec3 c01 = taa_fetchSourceMap(fragUV + ss_offset01).rgb; - vec3 c11 = taa_fetchSourceMap(fragUV + ss_offset11).rgb; - - cmin = min(c00, min(c10, min(c01, c11))); - cmax = max(c00, max(c10, max(c01, c11))); + #if MINMAX_3X3_ROUNDED || USE_YCOCG || USE_CLIPPING + cavg = (ctl + ctc + ctr + cml + cmc + cmr + cbl + cbc + cbr) / 9.0; + #elif cavg = (cmin + cmax ) * 0.5; - - #if USE_YCOCG || USE_CLIPPING - cavg = (c00 + c10 + c01 + c11) / 4.0; - #elif - cavg = (cmin + cmax ) * 0.5; - #endif #endif - // shrink chroma min-max - #if USE_YCOCG - vec2 chroma_extent = vec2(0.25 * 0.5 * (cmax.r - cmin.r)); - vec2 chroma_center = sourceColor.gb; - cmin.yz = chroma_center - chroma_extent; - cmax.yz = chroma_center + chroma_extent; - cavg.yz = chroma_center; + #if MINMAX_3X3_ROUNDED + vec3 cmin5 = min(ctc, min(cml, min(cmc, min(cmr, cbc)))); + vec3 cmax5 = max(ctc, max(cml, max(cmc, max(cmr, cbc)))); + vec3 cavg5 = (ctc + cml + cmc + cmr + cbc) / 5.0; + cmin = 0.5 * (cmin + cmin5); + cmax = 0.5 * (cmax + cmax5); + cavg = 0.5 * (cavg + cavg5); #endif +#else + const float _SubpixelThreshold = 0.5; + const float _GatherBase = 0.5; + const float _GatherSubpixelMotion = 0.1666; + + vec2 texel_vel = fragVelocity * imageSize; + float texel_vel_mag = length(texel_vel) * -fragZe; + float k_subpixel_motion = clamp(_SubpixelThreshold / (0.0001 + texel_vel_mag), 0.0, 1.0); + float k_min_max_support = _GatherBase + _GatherSubpixelMotion * k_subpixel_motion; + + vec2 ss_offset01 = k_min_max_support * vec2(-texelSize.x, texelSize.y); + vec2 ss_offset11 = k_min_max_support * vec2(texelSize.x, texelSize.y); + vec3 c00 = taa_fetchSourceMap(fragUV - ss_offset11).rgb; + vec3 c10 = taa_fetchSourceMap(fragUV - ss_offset01).rgb; + vec3 c01 = taa_fetchSourceMap(fragUV + ss_offset01).rgb; + vec3 c11 = taa_fetchSourceMap(fragUV + ss_offset11).rgb; + + cmin = min(c00, min(c10, min(c01, c11))); + cmax = max(c00, max(c10, max(c01, c11))); + cavg = (cmin + cmax ) * 0.5; + + #if USE_YCOCG || USE_CLIPPING + cavg = (c00 + c10 + c01 + c11) / 4.0; + #elif + cavg = (cmin + cmax ) * 0.5; + #endif +#endif + + // shrink chroma min-max +#if USE_YCOCG + vec2 chroma_extent = vec2(0.25 * 0.5 * (cmax.r - cmin.r)); + vec2 chroma_center = sourceColor.gb; + cmin.yz = chroma_center - chroma_extent; + cmax.yz = chroma_center + chroma_extent; + cavg.yz = chroma_center; +#endif return mat3(cmin, cmax, cavg); } @@ -412,24 +485,23 @@ vec3 taa_evalConstrainColor(vec3 sourceColor, vec2 sourceUV, vec2 sourceVel, vec return taa_clampColor(colorMinMaxAvg[0], colorMinMaxAvg[1], sourceColor, candidateColor); } -vec3 taa_evalFeedbackColor(vec3 sourceColor, vec3 historyColor, float blendFactor) { +vec4 taa_evalFeedbackColor(vec4 sourceColor, vec4 historyColor, float blendFactor) { const float _FeedbackMin = 0.1; const float _FeedbackMax = 0.9; // feedback weight from unbiased luminance diff (t.lottes) - #if USE_YCOCG - float lum0 = sourceColor.r; - float lum1 = historyColor.r; - #else - float lum0 = Luminance(sourceColor.rgb); - float lum1 = Luminance(historyColor.rgb); - #endif +#if USE_YCOCG + float lum0 = sourceColor.r; + float lum1 = historyColor.r; +#else + float lum0 = Luminance(sourceColor.rgb); + float lum1 = Luminance(historyColor.rgb); +#endif float unbiased_diff = abs(lum0 - lum1) / max(lum0, max(lum1, 0.2)); float unbiased_weight = 1.0 - unbiased_diff; float unbiased_weight_sqr = unbiased_weight * unbiased_weight; float k_feedback = mix(_FeedbackMin, _FeedbackMax, unbiased_weight_sqr); - - vec3 nextColor = mix(historyColor, sourceColor, k_feedback * blendFactor).xyz; + vec4 nextColor = mix(historyColor, sourceColor, k_feedback * blendFactor); return nextColor; } @@ -447,7 +519,7 @@ vec3 taa_getVelocityColorAboveThreshold(float velocityPixLength) { vec3 taa_evalFXAA(vec2 fragUV) { - // vec2 texelSize = getInvWidthHeight(); + // vec2 texelSize = getInvWidthHeight(); vec2 texelSize = taa_getTexelSize(); // filter width limit for dependent "two-tap" texture samples diff --git a/libraries/render-utils/src/taa_blend.slf b/libraries/render-utils/src/taa_blend.slf index 0999b2482f..fa0d94dd78 100644 --- a/libraries/render-utils/src/taa_blend.slf +++ b/libraries/render-utils/src/taa_blend.slf @@ -7,6 +7,7 @@ // // Created by Sam Gateau on 8/17/2017 // Copyright 2017 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -21,7 +22,6 @@ void main(void) { vec3 nextColor = texture(nextMap, varTexCoord0).xyz; outFragColor = vec4(nextColor, 1.0); - // Pixel being shaded vec3 sourceColor = texture(sourceMap, varTexCoord0).xyz; diff --git a/libraries/render-utils/src/velocityBuffer_cameraMotion.slf b/libraries/render-utils/src/velocityBuffer_cameraMotion.slf deleted file mode 100644 index 0ec63a7b1d..0000000000 --- a/libraries/render-utils/src/velocityBuffer_cameraMotion.slf +++ /dev/null @@ -1,43 +0,0 @@ -<@include gpu/Config.slh@> -<$VERSION_HEADER$> -// Generated on <$_SCRIBE_DATE$> -// -// velocityBuffer_cameraMotion.frag -// -// Created by Sam Gateau on 6/3/16. -// Copyright 2016 High Fidelity, Inc. -// -// Distributed under the Apache License, Version 2.0. -// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html -// - -<@include DeferredTransform.slh@> -<$declareDeferredFrameTransform()$> - -layout(location=0) in vec2 varTexCoord0; -layout(location=0) out vec4 outFragColor; - -LAYOUT(binding=RENDER_UTILS_TEXTURE_TAA_DEPTH) uniform sampler2D depthMap; - - -void main(void) { - // Pixel being shaded - ivec2 pixelPos; - vec2 texcoordPos; - ivec4 stereoSide; - ivec2 framePixelPos = getPixelPosTexcoordPosAndSide(gl_FragCoord.xy, pixelPos, texcoordPos, stereoSide); - - float Zdb = texelFetch(depthMap, ivec2(gl_FragCoord.xy), 0).x; - - // The position of the pixel fragment in Eye space then in world space - vec3 eyePos = evalUnjitteredEyePositionFromZdb(stereoSide.x, Zdb, texcoordPos); - vec3 worldPos = (getViewInverse() * vec4(eyePos, 1.0)).xyz; - - vec3 prevEyePos = (getPreviousView() * vec4(worldPos, 1.0)).xyz; - vec4 prevClipPos = (getUnjitteredProjection(stereoSide.x) * vec4(prevEyePos, 1.0)); - vec2 prevUV = 0.5 * (prevClipPos.xy / prevClipPos.w) + vec2(0.5); - - //vec2 imageSize = getWidthHeight(0); - vec2 imageSize = vec2(1.0, 1.0); - outFragColor = vec4( ((texcoordPos - prevUV) * imageSize), 0.0, 0.0); -} diff --git a/libraries/render-utils/src/web_browser.slf b/libraries/render-utils/src/web_browser.slf index f746916d3d..4ba7422d24 100644 --- a/libraries/render-utils/src/web_browser.slf +++ b/libraries/render-utils/src/web_browser.slf @@ -5,6 +5,7 @@ // // Created by Anthony Thibault on 7/25/16. // Copyright 2016 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -35,6 +36,7 @@ LAYOUT(binding=0) uniform sampler2D webTexture; layout(location=RENDER_UTILS_ATTR_POSITION_ES) in vec4 _positionES; <@else@> layout(location=RENDER_UTILS_ATTR_NORMAL_WS) in vec3 _normalWS; + layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) in vec4 _prevPositionCS; <@endif@> layout(location=RENDER_UTILS_ATTR_COLOR) in vec4 _color; layout(location=RENDER_UTILS_ATTR_TEXCOORD01) in vec4 _texCoord01; @@ -69,6 +71,6 @@ void main(void) { } } <@else@> - packDeferredFragmentUnlit(normalize(_normalWS), 1.0, texel.rgb); + packDeferredFragmentUnlit(_prevPositionCS, normalize(_normalWS), 1.0, texel.rgb); <@endif@> } diff --git a/libraries/render-utils/src/web_browser.slv b/libraries/render-utils/src/web_browser.slv index 07b4d7d3d7..ee6305ab75 100644 --- a/libraries/render-utils/src/web_browser.slv +++ b/libraries/render-utils/src/web_browser.slv @@ -5,6 +5,7 @@ // // Created by Andrzej Kapolka on 9/15/14. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -22,6 +23,7 @@ layout(location=RENDER_UTILS_ATTR_POSITION_ES) out vec4 _positionES; <@else@> layout(location=RENDER_UTILS_ATTR_NORMAL_WS) out vec3 _normalWS; + layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) out vec4 _prevPositionCS; <@endif@> layout(location=RENDER_UTILS_ATTR_COLOR) out vec4 _color; layout(location=RENDER_UTILS_ATTR_TEXCOORD01) out vec4 _texCoord01; @@ -33,9 +35,9 @@ void main(void) { TransformCamera cam = getTransformCamera(); TransformObject obj = getTransformObject(); <@if HIFI_USE_FORWARD@> - <$transformModelToWorldAndEyeAndClipPos(cam, obj, inPosition, _positionWS, _positionES, gl_Position)$> + <$transformModelToWorldEyeAndClipPos(cam, obj, inPosition, _positionWS, _positionES, gl_Position)$> <@else@> - <$transformModelToClipPos(cam, obj, inPosition, gl_Position)$> + <$transformModelToClipPosAndPrevClipPos(cam, obj, inPosition, gl_Position, _prevPositionCS)$> <$transformModelToWorldDir(cam, obj, inNormal.xyz, _normalWS)$> <@endif@> } \ No newline at end of file diff --git a/libraries/render-utils/src/zone_drawAmbient.slf b/libraries/render-utils/src/zone_drawAmbient.slf index 07eab08a35..64c91de362 100644 --- a/libraries/render-utils/src/zone_drawAmbient.slf +++ b/libraries/render-utils/src/zone_drawAmbient.slf @@ -30,7 +30,7 @@ void main(void) { vec3 spherePos = normalize(vec3(sphereUV, sqrt(1.0 - sphereR2))); - vec3 fragNormal = vec3(getViewInverse() * vec4(spherePos, 0.0)); + vec3 fragNormal = vec3(getViewInverse(getStereoSideFromFragCoord()) * vec4(spherePos, 0.0)); LightAmbient lightAmbient = getLightAmbient(); diff --git a/libraries/render-utils/src/zone_drawKeyLight.slf b/libraries/render-utils/src/zone_drawKeyLight.slf index 7174914ed8..0cd6bbdbb1 100644 --- a/libraries/render-utils/src/zone_drawKeyLight.slf +++ b/libraries/render-utils/src/zone_drawKeyLight.slf @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 5/16/17. // Copyright 2017 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -33,8 +34,9 @@ void main(void) { const float INOUT_RATIO = 0.4; const float SUN_THRESHOLD = 0.99; + mat4 viewInverse = getViewInverse(getStereoSideFromFragCoord()); vec3 outSpherePos = normalize(vec3(sphereUV, -sqrt(1.0 - sphereR2))); - vec3 outNormal = vec3(getViewInverse() * vec4(outSpherePos, 0.0)); + vec3 outNormal = vec3(viewInverse * vec4(outSpherePos, 0.0)); float val = step(SUN_THRESHOLD, dot(-lightDirection, outNormal)); @@ -43,7 +45,7 @@ void main(void) { if (sphereR2 < INOUT_RATIO * INOUT_RATIO * SCOPE_RADIUS2) { vec2 inSphereUV = sphereUV / INOUT_RATIO; vec3 inSpherePos = normalize(vec3(inSphereUV, sqrt(1.0 - dot(inSphereUV.xy, inSphereUV.xy)))); - vec3 inNormal = vec3(getViewInverse() * vec4(inSpherePos, 0.0)); + vec3 inNormal = vec3(viewInverse * vec4(inSpherePos, 0.0)); vec3 marbleColor = max(lightIrradiance * vec3(dot(-lightDirection, inNormal)), vec3(0.01)); color += marbleColor; diff --git a/libraries/render-utils/src/zone_drawSkybox.slf b/libraries/render-utils/src/zone_drawSkybox.slf index 743b48d0bf..cd9d478ba7 100644 --- a/libraries/render-utils/src/zone_drawSkybox.slf +++ b/libraries/render-utils/src/zone_drawSkybox.slf @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 5/16/17. // Copyright 2017 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -30,7 +31,7 @@ void main(void) { vec3 spherePos = normalize(vec3(sphereUV, -sqrt(1.0 - sphereR2))); - vec3 direction = vec3(getViewInverse() * vec4(spherePos, 0.0)); + vec3 direction = vec3(getViewInverse(getStereoSideFromFragCoord()) * vec4(spherePos, 0.0)); vec3 color = skybox.color.rgb; diff --git a/libraries/render/src/render/DrawSceneOctree.cpp b/libraries/render/src/render/DrawSceneOctree.cpp index 1b52de1429..9c2f802ca9 100644 --- a/libraries/render/src/render/DrawSceneOctree.cpp +++ b/libraries/render/src/render/DrawSceneOctree.cpp @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 1/25/16. // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -29,7 +30,7 @@ gpu::PipelinePointer DrawSceneOctree::_drawLODReticlePipeline; gpu::PipelinePointer DrawSceneOctree::_drawItemBoundPipeline; gpu::Stream::FormatPointer DrawSceneOctree::_cellBoundsFormat = std::make_shared(); -DrawSceneOctree::DrawSceneOctree() { +DrawSceneOctree::DrawSceneOctree(uint transformSlot) : _transformSlot(transformSlot) { std::once_flag once; std::call_once(once, [] { _cellBoundsFormat->setAttribute(0, 0, gpu::Element(gpu::VEC4, gpu::INT32, gpu::XYZW), 0, gpu::Stream::PER_INSTANCE); @@ -69,6 +70,7 @@ const gpu::PipelinePointer DrawSceneOctree::getDrawLODReticlePipeline() { void DrawSceneOctree::configure(const Config& config) { _showVisibleCells = config.showVisibleCells; _showEmptyCells = config.showEmptyCells; + _showLODReticle = config.showLODReticle; } void DrawSceneOctree::run(const RenderContextPointer& renderContext, const ItemSpatialTree::ItemSelection& inSelection) { @@ -81,53 +83,50 @@ void DrawSceneOctree::run(const RenderContextPointer& renderContext, const ItemS std::static_pointer_cast(renderContext->jobConfig)->numFreeCells = (int)scene->getSpatialTree().getNumFreeCells(); gpu::doInBatch("DrawSceneOctree::run", args->_context, [&](gpu::Batch& batch) { - glm::mat4 projMat; - Transform viewMat; - args->getViewFrustum().evalProjectionMatrix(projMat); - args->getViewFrustum().evalViewTransform(viewMat); batch.setViewportTransform(args->_viewport); + batch.setSavedViewProjectionTransform(_transformSlot); - batch.setProjectionTransform(projMat); - batch.setViewTransform(viewMat, true); - batch.setModelTransform(Transform()); + if (_showEmptyCells || _showVisibleCells) { + batch.setModelTransform(Transform()); - // bind the one gpu::Pipeline we need - batch.setPipeline(getDrawCellBoundsPipeline()); - batch.setInputFormat(_cellBoundsFormat); + // bind the one gpu::Pipeline we need + batch.setPipeline(getDrawCellBoundsPipeline()); + batch.setInputFormat(_cellBoundsFormat); - std::vector cellBounds; - auto drawCellBounds = [this, &cellBounds, &scene](const std::vector& cells) { - cellBounds.reserve(cellBounds.size() + cells.size()); - for (const auto& cellID : cells) { - auto cell = scene->getSpatialTree().getConcreteCell(cellID); - auto cellLoc = cell.getlocation(); - glm::ivec4 cellLocation(cellLoc.pos.x, cellLoc.pos.y, cellLoc.pos.z, cellLoc.depth); + std::vector cellBounds; + auto drawCellBounds = [this, &cellBounds, &scene](const std::vector& cells) { + cellBounds.reserve(cellBounds.size() + cells.size()); + for (const auto& cellID : cells) { + auto cell = scene->getSpatialTree().getConcreteCell(cellID); + auto cellLoc = cell.getlocation(); + glm::ivec4 cellLocation(cellLoc.pos.x, cellLoc.pos.y, cellLoc.pos.z, cellLoc.depth); - bool empty = cell.isBrickEmpty() || !cell.hasBrick(); - if (empty) { - if (!_showEmptyCells) { + bool empty = cell.isBrickEmpty() || !cell.hasBrick(); + if (empty) { + if (!_showEmptyCells) { + continue; + } + cellLocation.w *= -1.0; + } else if (!_showVisibleCells) { continue; } - cellLocation.w *= -1.0; - } else if (!empty && !_showVisibleCells) { - continue; + cellBounds.push_back(cellLocation); } - cellBounds.push_back(cellLocation); - } - }; + }; - drawCellBounds(inSelection.cellSelection.insideCells); - drawCellBounds(inSelection.cellSelection.partialCells); - auto size = cellBounds.size() * sizeof(ivec4); - if (size > _cellBoundsBuffer->getSize()) { - _cellBoundsBuffer->resize(size); + drawCellBounds(inSelection.cellSelection.insideCells); + drawCellBounds(inSelection.cellSelection.partialCells); + auto size = cellBounds.size() * sizeof(ivec4); + if (size > _cellBoundsBuffer->getSize()) { + _cellBoundsBuffer->resize(size); + } + _cellBoundsBuffer->setSubData(0, cellBounds); + batch.setInputBuffer(0, _cellBoundsBuffer, 0, sizeof(ivec4)); + batch.drawInstanced((uint32_t)cellBounds.size(), gpu::LINES, 24); } - _cellBoundsBuffer->setSubData(0, cellBounds); - batch.setInputBuffer(0, _cellBoundsBuffer, 0, sizeof(ivec4)); - batch.drawInstanced((uint32_t)cellBounds.size(), gpu::LINES, 24); // Draw the LOD Reticle - { + if (_showLODReticle) { float angle = glm::degrees(getPerspectiveAccuracyHalfAngle(args->_sizeScale, args->_boundaryLevelAdjust)); Transform crosshairModel; crosshairModel.setTranslation(glm::vec3(0.0, 0.0, -1000.0)); @@ -187,14 +186,8 @@ void DrawItemSelection::run(const RenderContextPointer& renderContext, const Ite } gpu::doInBatch("DrawItemSelection::run", args->_context, [&](gpu::Batch& batch) { - glm::mat4 projMat; - Transform viewMat; - args->getViewFrustum().evalProjectionMatrix(projMat); - args->getViewFrustum().evalViewTransform(viewMat); batch.setViewportTransform(args->_viewport); - - batch.setProjectionTransform(projMat); - batch.setViewTransform(viewMat, true); + batch.setSavedViewProjectionTransform(_transformSlot); batch.setModelTransform(Transform()); // bind the one gpu::Pipeline we need diff --git a/libraries/render/src/render/DrawSceneOctree.h b/libraries/render/src/render/DrawSceneOctree.h index d6359264f2..eb000b4b89 100644 --- a/libraries/render/src/render/DrawSceneOctree.h +++ b/libraries/render/src/render/DrawSceneOctree.h @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 1/25/16. // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -22,6 +23,7 @@ namespace render { Q_OBJECT Q_PROPERTY(bool showVisibleCells READ getShowVisibleCells WRITE setShowVisibleCells NOTIFY dirty()) Q_PROPERTY(bool showEmptyCells READ getShowEmptyCells WRITE setShowEmptyCells NOTIFY dirty()) + Q_PROPERTY(bool showLODReticle READ getShowLODReticle WRITE setShowLODReticle NOTIFY dirty()) Q_PROPERTY(int numAllocatedCells READ getNumAllocatedCells) Q_PROPERTY(int numFreeCells READ getNumFreeCells) @@ -35,15 +37,18 @@ namespace render { int getNumAllocatedCells() const { return numAllocatedCells; } int getNumFreeCells() const { return numFreeCells; } - bool showVisibleCells{ true }; - bool showEmptyCells{ false }; + bool showVisibleCells { false }; + bool showEmptyCells { false }; + bool showLODReticle { false }; bool getShowVisibleCells() { return showVisibleCells; } bool getShowEmptyCells() { return showEmptyCells; } + bool getShowLODReticle() { return showLODReticle; } public slots: void setShowVisibleCells(bool show) { showVisibleCells = show; emit dirty(); } void setShowEmptyCells(bool show) { showEmptyCells = show; emit dirty(); } + void setShowLODReticle(bool show) { showLODReticle = show; emit dirty(); } signals: void dirty(); @@ -56,18 +61,23 @@ namespace render { static gpu::Stream::FormatPointer _cellBoundsFormat; gpu::BufferPointer _cellBoundsBuffer { std::make_shared() }; - bool _showVisibleCells; // initialized by Config - bool _showEmptyCells; // initialized by Config + // initialized by Config + bool _showVisibleCells; + bool _showEmptyCells; + bool _showLODReticle; public: using Config = DrawSceneOctreeConfig; using JobModel = Job::ModelI; - DrawSceneOctree(); + DrawSceneOctree(uint transformSlot); void configure(const Config& config); void run(const RenderContextPointer& renderContext, const ItemSpatialTree::ItemSelection& selection); + private: + uint _transformSlot; + static const gpu::PipelinePointer getDrawCellBoundsPipeline(); static const gpu::PipelinePointer getDrawLODReticlePipeline(); static const gpu::PipelinePointer getDrawItemBoundPipeline(); @@ -120,11 +130,14 @@ namespace render { using Config = DrawItemSelectionConfig; using JobModel = Job::ModelI; - DrawItemSelection() {} + DrawItemSelection(uint transformSlot) : _transformSlot(transformSlot) {} void configure(const Config& config); void run(const RenderContextPointer& renderContext, const ItemSpatialTree::ItemSelection& selection); + private: + uint _transformSlot; + static const gpu::PipelinePointer getDrawItemBoundPipeline(); }; } diff --git a/libraries/render/src/render/DrawStatus.cpp b/libraries/render/src/render/DrawStatus.cpp index d722197205..eadffe6439 100644 --- a/libraries/render/src/render/DrawStatus.cpp +++ b/libraries/render/src/render/DrawStatus.cpp @@ -89,7 +89,7 @@ void DrawStatus::configure(const Config& config) { _showFade = config.showFade; } -void DrawStatus::run(const RenderContextPointer& renderContext, const Input& input) { +void DrawStatus::run(const RenderContextPointer& renderContext, const Input& inItems) { assert(renderContext->args); assert(renderContext->args->hasViewFrustum()); RenderArgs* args = renderContext->args; @@ -97,9 +97,6 @@ void DrawStatus::run(const RenderContextPointer& renderContext, const Input& inp const int NUM_STATUS_VEC4_PER_ITEM = 2; const int VEC4_LENGTH = 4; - const auto& inItems = input.get0(); - const auto jitter = input.get1(); - // First thing, we collect the bound and the status for all the items we want to render int nbItems = 0; render::ItemBounds itemBounds; @@ -213,15 +210,8 @@ void DrawStatus::run(const RenderContextPointer& renderContext, const Input& inp // Alright, something to render let's do it gpu::doInBatch("DrawStatus::run", args->_context, [&](gpu::Batch& batch) { - glm::mat4 projMat; - Transform viewMat; - args->getViewFrustum().evalProjectionMatrix(projMat); - args->getViewFrustum().evalViewTransform(viewMat); batch.setViewportTransform(args->_viewport); - - batch.setProjectionTransform(projMat); - batch.setProjectionJitter(jitter.x, jitter.y); - batch.setViewTransform(viewMat, true); + batch.setSavedViewProjectionTransform(_transformSlot); batch.setModelTransform(Transform()); // bind the one gpu::Pipeline we need diff --git a/libraries/render/src/render/DrawStatus.h b/libraries/render/src/render/DrawStatus.h index 82bd50d479..1fb460e1da 100644 --- a/libraries/render/src/render/DrawStatus.h +++ b/libraries/render/src/render/DrawStatus.h @@ -43,11 +43,11 @@ namespace render { class DrawStatus { public: using Config = DrawStatusConfig; - using Input = VaryingSet2; + using Input = ItemBounds; using JobModel = Job::ModelI; DrawStatus() {} - DrawStatus(const gpu::TexturePointer statusIconMap) { setStatusIconMap(statusIconMap); } + DrawStatus(const gpu::TexturePointer statusIconMap, uint transformSlot) : _transformSlot(transformSlot) { setStatusIconMap(statusIconMap); } void configure(const Config& config); void run(const RenderContextPointer& renderContext, const Input& input); @@ -70,6 +70,8 @@ namespace render { gpu::BufferPointer _boundsBuffer; gpu::BufferPointer _instanceBuffer; gpu::TexturePointer _statusIconMap; + + uint _transformSlot; }; } diff --git a/libraries/render/src/render/DrawTask.cpp b/libraries/render/src/render/DrawTask.cpp index 3684f790e3..a1a2da1d2f 100644 --- a/libraries/render/src/render/DrawTask.cpp +++ b/libraries/render/src/render/DrawTask.cpp @@ -3,7 +3,8 @@ // render/src/render // // Created by Sam Gateau on 5/21/15. -// Copyright 20154 High Fidelity, Inc. +// Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -196,12 +197,7 @@ void DrawBounds::run(const RenderContextPointer& renderContext, args->_batch = &batch; // Setup projection - glm::mat4 projMat; - Transform viewMat; - args->getViewFrustum().evalProjectionMatrix(projMat); - args->getViewFrustum().evalViewTransform(viewMat); - batch.setProjectionTransform(projMat); - batch.setViewTransform(viewMat); + batch.setSavedViewProjectionTransform(_transformSlot); batch.setModelTransform(Transform()); // Bind program diff --git a/libraries/render/src/render/DrawTask.h b/libraries/render/src/render/DrawTask.h index 5ac5edd5f9..5311ad58d2 100644 --- a/libraries/render/src/render/DrawTask.h +++ b/libraries/render/src/render/DrawTask.h @@ -3,7 +3,8 @@ // render/src/render // // Created by Sam Gateau on 5/21/15. -// Copyright 20154 High Fidelity, Inc. +// Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -58,6 +59,8 @@ public: using Inputs = render::ItemBounds; using JobModel = render::Job::ModelI; + DrawBounds(uint transformSlot) : _transformSlot(transformSlot) {} + void configure(const Config& configuration) {} void run(const render::RenderContextPointer& renderContext, const Inputs& items); @@ -67,6 +70,7 @@ private: static gpu::PipelinePointer _boundsPipeline; gpu::BufferPointer _drawBuffer; gpu::BufferPointer _paramsBuffer; + uint _transformSlot; }; class DrawQuadVolumeConfig : public render::JobConfig { diff --git a/libraries/render/src/render/Engine.h b/libraries/render/src/render/Engine.h index f1d59727d3..e769297bbf 100644 --- a/libraries/render/src/render/Engine.h +++ b/libraries/render/src/render/Engine.h @@ -4,6 +4,7 @@ // // Created by Sam Gateau on 3/3/15. // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -85,6 +86,11 @@ namespace render { class RenderEngine : public Engine { public: + enum TransformSlot : uint8_t { + TS_MAIN_VIEW = 0, + TS_BACKGROUND_VIEW + }; + RenderEngine(); ~RenderEngine() = default; diff --git a/libraries/render/src/render/ShapePipeline.cpp b/libraries/render/src/render/ShapePipeline.cpp index 4d1682de9a..bd00c360eb 100644 --- a/libraries/render/src/render/ShapePipeline.cpp +++ b/libraries/render/src/render/ShapePipeline.cpp @@ -4,6 +4,7 @@ // // Created by Zach Pomerantz on 12/31/15. // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -101,6 +102,7 @@ void ShapePlumber::addPipeline(const Filter& filter, const gpu::ShaderPointer& p locations->lightBufferUnit = reflection.validUniformBuffer(graphics::slot::buffer::Light); locations->lightAmbientBufferUnit = reflection.validUniformBuffer(graphics::slot::buffer::AmbientLight); locations->lightAmbientMapUnit = reflection.validTexture(graphics::slot::texture::Skybox); + locations->deferredFrameTransformBufferUnit = reflection.validUniformBuffer(render_utils::slot::buffer::DeferredFrameTransform); locations->fadeMaskTextureUnit = reflection.validTexture(render_utils::slot::texture::FadeMask); locations->fadeParameterBufferUnit = reflection.validUniformBuffer(render_utils::slot::buffer::FadeParameters); locations->fadeObjectParameterBufferUnit = reflection.validUniformBuffer(render_utils::slot::buffer::FadeObjectParameters); diff --git a/libraries/render/src/render/ShapePipeline.h b/libraries/render/src/render/ShapePipeline.h index fd8b729ffa..87a1d3c834 100644 --- a/libraries/render/src/render/ShapePipeline.h +++ b/libraries/render/src/render/ShapePipeline.h @@ -288,6 +288,7 @@ public: bool lightBufferUnit{ false }; bool lightAmbientBufferUnit{ false }; bool lightAmbientMapUnit{ false }; + bool deferredFrameTransformBufferUnit{ false }; bool fadeMaskTextureUnit{ false }; bool fadeParameterBufferUnit{ false }; bool fadeObjectParameterBufferUnit{ false }; diff --git a/libraries/render/src/render/drawItemStatus.slv b/libraries/render/src/render/drawItemStatus.slv index 7aac26fe2e..166b1f7894 100644 --- a/libraries/render/src/render/drawItemStatus.slv +++ b/libraries/render/src/render/drawItemStatus.slv @@ -7,6 +7,7 @@ // // Created by Sam Gateau on 6/30/2015. // Copyright 2015 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -128,8 +129,7 @@ void main(void) { // Final position in pixel space vec2 quadPixelPos = offset.xy + quadPos.xy * 0.5 * iconScale; - vec4 viewport; - <$transformCameraViewport(cam, viewport)$>; + vec4 viewport = cam._viewport; vec2 pixelToClip = vec2(2.0 / viewport.z, 2.0 / viewport.w); gl_Position = anchorPoint + (anchorPoint.w * vec4(quadPixelPos * pixelToClip, 0.0, 0.0)); diff --git a/libraries/shared/CMakeLists.txt b/libraries/shared/CMakeLists.txt index 3c08c9a1bc..bc54bcc034 100644 --- a/libraries/shared/CMakeLists.txt +++ b/libraries/shared/CMakeLists.txt @@ -1,12 +1,11 @@ # Copyright 2013-2020, High Fidelity, Inc. -# Copyright 2021-2023 Overte e.V. +# Copyright 2021-2025 Overte e.V. # SPDX-License-Identifier: Apache-2.0 set(TARGET_NAME shared) include_directories("${QT_DIR}/include/QtCore/${QT_VERSION}/QtCore" "${QT_DIR}/include/QtCore/${QT_VERSION}") -# TODO: there isn't really a good reason to have Script linked here - let's get what is requiring it out (RegisteredMetaTypes.cpp) setup_hifi_library(Gui Network) if (WIN32) diff --git a/libraries/shared/src/AACube.h b/libraries/shared/src/AACube.h index 66b29e3185..27c424cadb 100644 --- a/libraries/shared/src/AACube.h +++ b/libraries/shared/src/AACube.h @@ -20,6 +20,7 @@ #include #include "BoxBase.h" +#include "SerDes.h" class AABox; class Extents; @@ -80,6 +81,10 @@ private: glm::vec3 _corner; float _scale; + + friend DataSerializer& operator<<(DataSerializer &ser, const AACube &cube); + friend DataDeserializer& operator>>(DataDeserializer &des, AACube &cube); + }; inline bool operator==(const AACube& a, const AACube& b) { @@ -99,5 +104,16 @@ inline QDebug operator<<(QDebug debug, const AACube& cube) { return debug; } +inline DataSerializer& operator<<(DataSerializer &ser, const AACube &cube) { + ser << cube._corner; + ser << cube._scale; + return ser; +} + +inline DataDeserializer& operator>>(DataDeserializer &des, AACube &cube) { + des >> cube._corner; + des >> cube._scale; + return des; +} #endif // hifi_AACube_h diff --git a/libraries/shared/src/BlendshapeConstants.h b/libraries/shared/src/BlendshapeConstants.h index 596e7df4ee..b741059146 100644 --- a/libraries/shared/src/BlendshapeConstants.h +++ b/libraries/shared/src/BlendshapeConstants.h @@ -122,6 +122,25 @@ struct BlendshapeOffsetUnpacked { float positionOffsetX, positionOffsetY, positionOffsetZ; float normalOffsetX, normalOffsetY, normalOffsetZ; float tangentOffsetX, tangentOffsetY, tangentOffsetZ; + + /** + * @brief Set all components of all the offsets to zero + * + * @note glm::vec3 is not trivially copyable, so it's not correct to clear it with memset. + */ + void clear() { + positionOffsetX = 0.0f; + positionOffsetY = 0.0f; + positionOffsetZ = 0.0f; + + normalOffsetX = 0.0f; + normalOffsetY = 0.0f; + normalOffsetZ = 0.0f; + + tangentOffsetX = 0.0f; + tangentOffsetY = 0.0f; + tangentOffsetZ = 0.0f; + } }; using BlendshapeOffset = BlendshapeOffsetPacked; diff --git a/libraries/shared/src/PickFilter.h b/libraries/shared/src/PickFilter.h index 1cc1a8b0b5..acf0c70eab 100644 --- a/libraries/shared/src/PickFilter.h +++ b/libraries/shared/src/PickFilter.h @@ -1,6 +1,7 @@ // -// Created by Sam Gondelman on 12/7/18. +// Created by Sam Gondelman on December 7th, 2018. // Copyright 2018 High Fidelity, Inc. +// Copyright 2025 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -26,7 +27,7 @@ public: * PICK_DOMAIN_ENTITIES1Include domain entities when intersecting. * PICK_AVATAR_ENTITIES2Include avatar entities when intersecting. * PICK_LOCAL_ENTITIES4Include local entities when intersecting. - * PICK_AVATATRS8Include avatars when intersecting. + * PICK_AVATARS8Include avatars when intersecting. * PICK_HUD16Include the HUD surface when intersecting in HMD mode. * PICK_INCLUDE_VISIBLE32Include visible objects when intersecting. * PICK_INCLUDE_INVISIBLE64Include invisible objects when intersecting. diff --git a/libraries/shared/src/SerDes.cpp b/libraries/shared/src/SerDes.cpp new file mode 100644 index 0000000000..ad32d7014f --- /dev/null +++ b/libraries/shared/src/SerDes.cpp @@ -0,0 +1,85 @@ +// +// SerDes.h +// +// +// Created by Dale Glass on 5/6/2022 +// Copyright 2024 Overte e.V. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#include + +#include "SerDes.h" +const int DataSerializer::DEFAULT_SIZE; +const char DataSerializer::PADDING_CHAR; + + +static void dumpHex(QDebug &debug, const char*buf, size_t len) { + QString literal; + QString hex; + + for(size_t i=0;i(c), 16 ); + if ( hnum.length() == 1 ) { + hnum.prepend("0"); + } + + hex.append(hnum + " "); + + if ( literal.length() == 16 || (i+1 == len) ) { + while( literal.length() < 16 ) { + literal.append(" "); + hex.append(" "); + } + + debug << literal << " " << hex << "\n"; + literal.clear(); + hex.clear(); + } + } +} + + +QDebug operator<<(QDebug debug, const DataSerializer &ser) { + debug << "{ capacity =" << ser.capacity() << "; length = " << ser.length() << "; pos = " << ser.pos() << "}"; + debug << "\n"; + + dumpHex(debug, ser.buffer(), ser.length()); + return debug; +} + + +QDebug operator<<(QDebug debug, const DataDeserializer &des) { + debug << "{ length = " << des.length() << "; pos = " << des.pos() << "}"; + debug << "\n"; + + + dumpHex(debug, des.buffer(), des.length()); + return debug; +} + + +void DataSerializer::changeAllocation(size_t new_size) { + while ( _capacity < new_size) { + _capacity *= 2; + } + + char *new_buf = new char[_capacity]; + assert( *new_buf ); + + memcpy(new_buf, _store, _length); + char *prev_buf = _store; + _store = new_buf; + + delete []prev_buf; +} diff --git a/libraries/shared/src/SerDes.h b/libraries/shared/src/SerDes.h new file mode 100644 index 0000000000..f80d09a60a --- /dev/null +++ b/libraries/shared/src/SerDes.h @@ -0,0 +1,953 @@ +// +// SerDes.h +// +// +// Created by Dale Glass on 5/6/2022 +// Copyright 2024 Overte e.V. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#pragma once +#include +#include +#include +#include +#include + +/** + * @brief Data serializer + * + * When encoding, this class takes in data and encodes it into a buffer. No attempt is made to store version numbers, lengths, + * or any other metadata. It's entirely up to the user to use the class in such a way that the process can be + * correctly reversed if variable-length or optional fields are used. + * + * It can operate both on an internal, dynamically-allocated buffer, or an externally provided, fixed-size one. + * If an external store is used, the class will refuse to add data once capacity is reached and set the overflow flag. + * When decoding, this class operates on a fixed size buffer. If an attempt to read past the end is made, the read fails, + * and the overflow flag is set. + * + * The class was written for the maximum simplicity possible and inline friendliness. + * + * Example of encoding: + * + * @code {.cpp} + * uint8_t version = 1; + * uint16_t count = 1; + * glm::vec3 pos{1.5, 2.0, 9.0}; + * + * Serializer ser; + * ser << version; + * ser << count; + * ser << pos; + * + * // Serialized data is in ser.buffer(), ser.length() long. + * @endcode + * + * This object should be modified directly to add support for any primitive and common datatypes in the code. To support serializing/deserializing + * classes and structures, implement a `operator<<` and `operator>>` functions for that object, eg: + * + * @code {.cpp} + * DataSerializer &operator<<(DataSerializer &ser, const Object &o) { + * ser << o._borderColor; + * ser << o._maxAnisotropy; + * ser << o._filter; + * return ser; + * } + * @endcode + * + */ +class DataSerializer { + public: + + /** + * @brief RAII tracker of advance position + * + * When a custom operator<< is implemented for DataSserializer, + * this class allows to easily keep track of how much data has been added and + * adjust the parent's lastAdvance() count on this class' destruction. + * + * @code {.cpp} + * DataSerializer &operator<<(DataSerializer &ser, const Object &o) { + * DataSerializer::SizeTracker tracker(ser); + * + * ser << o._borderColor; + * ser << o._maxAnisotropy; + * ser << o._filter; + * return ser; + * } + * @endcode + */ + class SizeTracker { + public: + SizeTracker(DataSerializer &parent) : _parent(parent) { + _start_pos = _parent.pos(); + } + + ~SizeTracker() { + size_t cur_pos = _parent.pos(); + + if ( cur_pos >= _start_pos ) { + _parent._lastAdvance = cur_pos - _start_pos; + } else { + _parent._lastAdvance = 0; + } + } + + private: + DataSerializer &_parent; + size_t _start_pos = 0; + }; + + /** + * @brief Default size for a dynamically allocated buffer. + * + * Since this is mostly intended to be used for networking, we default to the largest probable MTU here. + */ + static const int DEFAULT_SIZE = 1500; + + /** + * @brief Character to use for padding. + * + * Padding should be ignored, so it doesn't matter what we go with here, but it can be useful to set it + * to something that would be distinctive in a dump. + */ + static const char PADDING_CHAR = (char)0xAA; + + /** + * @brief Construct a dynamically allocated serializer + * + * If constructed this way, an internal buffer will be dynamically allocated and grown as needed. + * + * The buffer is SerDes::DEFAULT_SIZE bytes by default, and doubles in size every time the limit is reached. + */ + DataSerializer() { + _capacity = DEFAULT_SIZE; + _pos = 0; + _length = 0; + _store = new char[_capacity]; + } + + /** + * @brief Construct a statically allocated serializer + * + * If constructed this way, the external buffer will be used to store data. The class will refuse to + * keep adding data if the maximum length is reached, write a critical message to the log, and set + * the overflow flag. + * + * The flag can be read with isOverflow() + * + * @param externalStore External data store + * @param storeLength Length of the data store + */ + DataSerializer(char *externalStore, size_t storeLength) { + _capacity = storeLength; + _length = 0; + _pos = 0; + _storeIsExternal = true; + _store = externalStore; + } + + /** + * @brief Construct a statically allocated serializer + * + * If constructed this way, the external buffer will be used to store data. The class will refuse to + * keep adding data if the maximum length is reached, and set the overflow flag. + * + * The flag can be read with isOverflow() + * + * @param externalStore External data store + * @param storeLength Length of the data store + */ + DataSerializer(uint8_t *externalStore, size_t storeLength) : DataSerializer((char*)externalStore, storeLength) { + + } + + DataSerializer(const DataSerializer &) = delete; + DataSerializer &operator=(const DataSerializer &) = delete; + + + + ~DataSerializer() { + if (!_storeIsExternal) { + delete[] _store; + } + } + + /** + * @brief Adds padding to the output + * + * The bytes will be set to SerDes::PADDING_CHAR, which is a constant in the source code. + * Since padding isn't supposed to be read, it can be any value and is intended to + * be set to something that can be easily recognized in a dump. + * + * @param bytes Number of bytes to add + */ + void addPadding(size_t bytes) { + if (!extendBy(bytes, "padding")) { + return; + } + + // Fill padding with something recognizable. Will keep valgrind happier. + memset(&_store[_pos], PADDING_CHAR, bytes); + _pos += bytes; + } + + /** + * @brief Add an uint8_t to the output + * + * @param c Character to add + * @return SerDes& This object + */ + DataSerializer &operator<<(uint8_t c) { + return *this << int8_t(c); + } + + /** + * @brief Add an int8_t to the output + * + * @param c Character to add + * @return SerDes& This object + */ + DataSerializer &operator<<(int8_t c) { + if (!extendBy(1, "int8_t")) { + return *this; + } + + _store[_pos++] = c; + return *this; + } + + + /////////////////////////////////////////////////////////// + + /** + * @brief Add an uint16_t to the output + * + * @param val Value to add + * @return SerDes& This object + */ + DataSerializer &operator<<(uint16_t val) { + return *this << int16_t(val); + } + + /** + * @brief Add an int16_t to the output + * + * @param val Value to add + * @return SerDes& This object + */ + DataSerializer &operator<<(int16_t val) { + if (!extendBy(sizeof(val), "int16_t")) { + return *this; + } + + memcpy(&_store[_pos], (char*)&val, sizeof(val)); + _pos += sizeof(val); + return *this; + } + + + + /////////////////////////////////////////////////////////// + + /** + * @brief Add an uint32_t to the output + * + * @param val Value to add + * @return SerDes& This object + */ + DataSerializer &operator<<(uint32_t val) { + return *this << int32_t(val); + } + + /** + * @brief Add an int32_t to the output + * + * @param val Value to add + * @return SerDes& This object + */ + DataSerializer &operator<<(int32_t val) { + if (!extendBy(sizeof(val), "int32_t")) { + return *this; + } + + memcpy(&_store[_pos], (char*)&val, sizeof(val)); + _pos += sizeof(val); + return *this; + } + + /////////////////////////////////////////////////////////// + + /** + * @brief Add an uint64_t to the output + * + * @param val Value to add + * @return SerDes& This object + */ + DataSerializer &operator<<(uint64_t val) { + return *this << int64_t(val); + } + + /** + * @brief Add an int64_t to the output + * + * @param val Value to add + * @return SerDes& This object + */ + DataSerializer &operator<<(int64_t val) { + if (!extendBy(sizeof(val), "int64_t")) { + return *this; + } + + memcpy(&_store[_pos], (char*)&val, sizeof(val)); + _pos += sizeof(val); + return *this; + } + + /////////////////////////////////////////////////////////// + + /** + * @brief Add an float to the output + * + * @param val Value to add + * @return SerDes& This object + */ + DataSerializer &operator<<(float val) { + if (extendBy(sizeof(val), "float")) { + memcpy(&_store[_pos], (char*)&val, sizeof(val)); + _pos += sizeof(val); + } + return *this; + } + + + /////////////////////////////////////////////////////////// + + + /** + * @brief Add an glm::vec3 to the output + * + * @param val Value to add + * @return SerDes& This object + */ + DataSerializer &operator<<(glm::vec3 val) { + size_t sz = sizeof(val.x); + if (extendBy(sz*3, "glm::vec3")) { + memcpy(&_store[_pos ], (char*)&val.x, sz); + memcpy(&_store[_pos + sz ], (char*)&val.y, sz); + memcpy(&_store[_pos + sz*2], (char*)&val.z, sz); + + _pos += sz*3; + } + return *this; + } + + /////////////////////////////////////////////////////////// + + /** + * @brief Add a glm::vec4 to the output + * + * @param val Value to add + * @return SerDes& This object + */ + DataSerializer &operator<<(glm::vec4 val) { + size_t sz = sizeof(val.x); + if (extendBy(sz*4, "glm::vec4")) { + memcpy(&_store[_pos ], (char*)&val.x, sz); + memcpy(&_store[_pos + sz ], (char*)&val.y, sz); + memcpy(&_store[_pos + sz*2], (char*)&val.z, sz); + memcpy(&_store[_pos + sz*3], (char*)&val.w, sz); + + _pos += sz*4; + } + return *this; + } + + /////////////////////////////////////////////////////////// + + /** + * @brief Add a glm::ivec2 to the output + * + * @param val Value to add + * @return SerDes& This object + */ + DataSerializer &operator<<(glm::ivec2 val) { + size_t sz = sizeof(val.x); + if (extendBy(sz*2, "glm::ivec2")) { + memcpy(&_store[_pos ], (char*)&val.x, sz); + memcpy(&_store[_pos + sz ], (char*)&val.y, sz); + + _pos += sz*2; + } + return *this; + } + + + /////////////////////////////////////////////////////////// + + /** + * @brief Write a null-terminated string into the buffer + * + * The `\0` at the end of the string is also written. + * + * @param val Value to write + * @return SerDes& This object + */ + DataSerializer &operator<<(const char *val) { + size_t len = strlen(val)+1; + if (extendBy(len, "string")) { + memcpy(&_store[_pos], val, len); + _pos += len; + } + return *this; + } + + /** + * @brief Write a QString into the buffer + * + * The string is encoded in UTF-8 and the `\0` at the end of the string is also written. + * + * @param val Value to write + * @return SerDes& This object + */ + DataSerializer &operator<<(const QString &val) { + return *this << val.toUtf8().constData(); + } + + + /////////////////////////////////////////////////////////// + + /** + * @brief Pointer to the start of the internal buffer. + * + * The allocated amount can be found with capacity(). + * + * The end of the stored data can be found with length(). + * + * @return Pointer to buffer + */ + char *buffer() const { return _store; } + + /** + * @brief Current position in the buffer. Starts at 0. + * + * @return size_t + */ + size_t pos() const { return _pos; } + + /** + * @brief Last position that was written to in the buffer. Starts at 0. + * + * @return size_t + */ + size_t length() const { return _length; } + + /** + * @brief Current capacity of the buffer. + * + * If the buffer is dynamically allocated, it can grow. + * + * If the buffer is static, this is a fixed limit. + * + * @return size_t + */ + size_t capacity() const { return _capacity; } + + /** + * @brief Whether there's any data in the buffer + * + * @return true Something has been written + * @return false The buffer is empty + */ + bool isEmpty() const { return _length == 0; } + + /** + * @brief The buffer size limit has been reached + * + * This can only return true for a statically allocated buffer. + * + * @return true Limit reached + * @return false There is still room + */ + bool isOverflow() const { return _overflow; } + + /** + * @brief Reset the serializer to the start, clear overflow bit. + * + */ + void rewind() { _pos = 0; _overflow = false; _lastAdvance = 0; } + + + /** + * @brief Size of the last advance + * + * This can be used to get how many bytes were added in the last operation. + * It is reset on rewind() + * + * @return size_t + */ + size_t lastAdvance() const { return _lastAdvance; } + + /** + * @brief Dump the contents of this object into QDebug + * + * This produces a dump of the internal state, and an ASCII/hex dump of + * the contents, for debugging. + * + * @param debug Qt QDebug stream + * @param ds This object + * @return QDebug + */ + friend QDebug operator<<(QDebug debug, const DataSerializer &ds); + + private: + bool extendBy(size_t bytes, const QString &type_name) { + //qDebug() << "Extend by" << bytes; + + if ( _capacity < _length + bytes) { + if ( _storeIsExternal ) { + qCritical() << "Serializer trying to write past end of output buffer of" << _capacity << "bytes. Error writing" << bytes << "bytes for" << type_name << " from position " << _pos << ", length " << _length; + _overflow = true; + return false; + } + + changeAllocation(_length + bytes); + } + + _length += bytes; + _lastAdvance = bytes; + return true; + } + + // This is split up here to try to make the class as inline-friendly as possible. + void changeAllocation(size_t new_size); + + char *_store; + bool _storeIsExternal = false; + bool _overflow = false; + size_t _capacity = 0; + size_t _length = 0; + size_t _pos = 0; + size_t _lastAdvance = 0; +}; + +/** + * @brief Data deserializer + * + * This class operates on a fixed size buffer. If an attempt to read past the end is made, the read fails, + * and the overflow flag is set. + * + * The class was written for the maximum simplicity possible and inline friendliness. + * + * Example of decoding: + * + * @code {.cpp} + * // Incoming data has been placed in: + * // char buffer[1024]; + * + * uint8_t version; + * uint16_t count; + * glm::vec3 pos; + * + * DataDeserializer des(buffer, sizeof(buffer)); + * des >> version; + * des >> count; + * des >> pos; + * @endcode + * + * This object should be modified directly to add support for any primitive and common datatypes in the code. To support deserializing + * classes and structures, implement an `operator>>` function for that object, eg: + * + * @code {.cpp} + * DataDeserializer &operator>>(DataDeserializer &des, Object &o) { + * des >> o._borderColor; + * des >> o._maxAnisotropy; + * des >> o._filter; + * return des; + * } + * @endcode + * + */ +class DataDeserializer { + public: + + /** + * @brief RAII tracker of advance position + * + * When a custom operator>> is implemented for DataDeserializer, + * this class allows to easily keep track of how much data has been added and + * adjust the parent's lastAdvance() count on this class' destruction. + * + * @code {.cpp} + * DataDeserializer &operator>>(Deserializer &des, Object &o) { + * DataDeserializer::SizeTracker tracker(des); + * + * des >> o._borderColor; + * des >> o._maxAnisotropy; + * des >> o._filter; + * return des; + * } + * @endcode + */ + class SizeTracker { + public: + SizeTracker(DataDeserializer &parent) : _parent(parent) { + _start_pos = _parent.pos(); + } + + ~SizeTracker() { + size_t cur_pos = _parent.pos(); + + if ( cur_pos >= _start_pos ) { + _parent._lastAdvance = cur_pos - _start_pos; + } else { + _parent._lastAdvance = 0; + } + } + + private: + DataDeserializer &_parent; + size_t _start_pos = 0; + }; + + /** + * @brief Construct a Deserializer + * * + * @param externalStore External data store + * @param storeLength Length of the data store + */ + DataDeserializer(const char *externalStore, size_t storeLength) { + _length = storeLength; + _pos = 0; + _store = externalStore; + _lastAdvance = 0; + } + + /** + * @brief Construct a Deserializer + * + * @param externalStore External data store + * @param storeLength Length of the data store + */ + DataDeserializer(const uint8_t *externalStore, size_t storeLength) : DataDeserializer((const char*)externalStore, storeLength) { + + } + + /** + * @brief Construct a new Deserializer reading data from a Serializer + * + * This is a convenience function for testing. + * + * @param serializer Serializer with data + */ + DataDeserializer(const DataSerializer &serializer) : DataDeserializer(serializer.buffer(), serializer.length()) { + + } + + /** + * @brief Skips padding in the input + * + * @param bytes Number of bytes to skip + */ + void skipPadding(size_t bytes) { + if (!canAdvanceBy(bytes, "padding")) { + return; + } + + _pos += bytes; + _lastAdvance = bytes; + } + + + /** + * @brief Read an uint8_t from the buffer + * + * @param c Character to read + * @return SerDes& This object + */ + DataDeserializer &operator>>(uint8_t &c) { + return *this >> reinterpret_cast(c); + } + + /** + * @brief Read an int8_t from the buffer + * + * @param c Character to read + * @return SerDes& This object + */ + DataDeserializer &operator>>(int8_t &c) { + if ( canAdvanceBy(1, "int8_t") ) { + c = _store[_pos++]; + _lastAdvance = sizeof(c); + } + + return *this; + } + + /////////////////////////////////////////////////////////// + + /** + * @brief Read an uint16_t from the buffer + * + * @param val Value to read + * @return SerDes& This object + */ + DataDeserializer &operator>>(uint16_t &val) { + return *this >> reinterpret_cast(val); + } + + /** + * @brief Read an int16_t from the buffer + * + * @param val Value to read + * @return SerDes& This object + */ + DataDeserializer &operator>>(int16_t &val) { + if ( canAdvanceBy(sizeof(val), "int16_t") ) { + memcpy((char*)&val, &_store[_pos], sizeof(val)); + _pos += sizeof(val); + _lastAdvance = sizeof(val); + } + + return *this; + } + + /////////////////////////////////////////////////////////// + + /** + * @brief Read an uint32_t from the buffer + * + * @param val Value to read + * @return SerDes& This object + */ + DataDeserializer &operator>>(uint32_t &val) { + return *this >> reinterpret_cast(val); + } + + /** + * @brief Read an int32_t from the buffer + * + * @param val Value to read + * @return SerDes& This object + */ + DataDeserializer &operator>>(int32_t &val) { + if ( canAdvanceBy(sizeof(val), "int32_t") ) { + memcpy((char*)&val, &_store[_pos], sizeof(val)); + _pos += sizeof(val); + _lastAdvance = sizeof(val); + } + return *this; + } + + /////////////////////////////////////////////////////////// + + /** + * @brief Read an uint64_t from the buffer + * + * @param val Value to read + * @return SerDes& This object + */ + DataDeserializer &operator>>(uint64_t &val) { + return *this >> reinterpret_cast(val); + } + + /** + * @brief Read an int64_t from the buffer + * + * @param val Value to read + * @return SerDes& This object + */ + DataDeserializer &operator>>(int64_t &val) { + if ( canAdvanceBy(sizeof(val), "int64_t") ) { + memcpy((char*)&val, &_store[_pos], sizeof(val)); + _pos += sizeof(val); + _lastAdvance = sizeof(val); + } + return *this; + } + + /////////////////////////////////////////////////////////// + + + /** + * @brief Read an float from the buffer + * + * @param val Value to read + * @return SerDes& This object + */ + DataDeserializer &operator>>(float &val) { + if ( canAdvanceBy(sizeof(val), "float") ) { + memcpy((char*)&val, &_store[_pos], sizeof(val)); + _pos += sizeof(val); + _lastAdvance = sizeof(val); + } + return *this; + } + + /////////////////////////////////////////////////////////// + + + + + /** + * @brief Read a glm::vec3 from the buffer + * + * @param val Value to read + * @return SerDes& This object + */ + DataDeserializer &operator>>(glm::vec3 &val) { + size_t sz = sizeof(val.x); + + if ( canAdvanceBy(sz*3, "glm::vec3") ) { + memcpy((char*)&val.x, &_store[_pos ], sz); + memcpy((char*)&val.y, &_store[_pos + sz ], sz); + memcpy((char*)&val.z, &_store[_pos + sz*2], sz); + + _pos += sz*3; + _lastAdvance = sz * 3; + } + + return *this; + } + + /////////////////////////////////////////////////////////// + + + /** + * @brief Read a glm::vec4 from the buffer + * + * @param val Value to read + * @return SerDes& This object + */ + DataDeserializer &operator>>(glm::vec4 &val) { + size_t sz = sizeof(val.x); + + if ( canAdvanceBy(sz*4, "glm::vec4")) { + memcpy((char*)&val.x, &_store[_pos ], sz); + memcpy((char*)&val.y, &_store[_pos + sz ], sz); + memcpy((char*)&val.z, &_store[_pos + sz*2], sz); + memcpy((char*)&val.w, &_store[_pos + sz*3], sz); + + _pos += sz*4; + _lastAdvance = sz*4; + } + return *this; + } + + /////////////////////////////////////////////////////////// + + + /** + * @brief Read a glm::ivec2 from the buffer + * + * @param val Value to read + * @return SerDes& This object + */ + DataDeserializer &operator>>(glm::ivec2 &val) { + size_t sz = sizeof(val.x); + + if ( canAdvanceBy(sz*2, "glm::ivec2") ) { + memcpy((char*)&val.x, &_store[_pos ], sz); + memcpy((char*)&val.y, &_store[_pos + sz ], sz); + + _pos += sz*2; + _lastAdvance = sz * 2; + } + + return *this; + } + + /////////////////////////////////////////////////////////// + + /** + * @brief Pointer to the start of the internal buffer. + * + * The allocated amount can be found with capacity(). + * + * The end of the stored data can be found with length(). + * + * @return Pointer to buffer + */ + const char *buffer() const { return _store; } + + /** + * @brief Current position in the buffer. Starts at 0. + * + * @return size_t + */ + size_t pos() const { return _pos; } + + /** + * @brief Last position that was written to in the buffer. Starts at 0. + * + * @return size_t + */ + size_t length() const { return _length; } + + /** + * @brief Whether there's any data in the buffer + * + * @return true Something has been written + * @return false The buffer is empty + */ + bool isEmpty() const { return _length == 0; } + + /** + * @brief The buffer size limit has been reached + * + * This can only return true for a statically allocated buffer. + * + * @return true Limit reached + * @return false There is still room + */ + bool isOverflow() const { return _overflow; } + + /** + * @brief Reset the serializer to the start, clear overflow bit. + * + */ + void rewind() { _pos = 0; _overflow = false; _lastAdvance = 0; } + + /** + * @brief Size of the last advance + * + * This can be used to get how many bytes were added in the last operation. + * It is reset on rewind() + * + * @return size_t + */ + size_t lastAdvance() const { return _lastAdvance; } + + /** + * @brief Dump the contents of this object into QDebug + * + * This produces a dump of the internal state, and an ASCII/hex dump of + * the contents, for debugging. + * + * @param debug Qt QDebug stream + * @param ds This object + * @return QDebug + */ + friend QDebug operator<<(QDebug debug, const DataDeserializer &ds); + + private: + bool canAdvanceBy(size_t bytes, const QString &type_name) { + //qDebug() << "Checking advance by" << bytes; + + if ( _length < _pos + bytes) { + qCritical() << "Deserializer trying to read past end of input buffer of" << _length << "bytes, reading" << bytes << "bytes for" << type_name << "from position " << _pos; + _overflow = true; + return false; + } + + return true; + } + + const char *_store; + bool _overflow = false; + size_t _length = 0; + size_t _pos = 0; + size_t _lastAdvance = 0; +}; diff --git a/scripts/communityScripts/armored-chat/armored_chat.js b/scripts/communityScripts/armored-chat/armored_chat.js index 779dc3ff54..ae46f4d8f3 100644 --- a/scripts/communityScripts/armored-chat/armored_chat.js +++ b/scripts/communityScripts/armored-chat/armored_chat.js @@ -1,7 +1,7 @@ // // armored_chat.js // -// Created by Armored Dragon, 2024. +// Created by Armored Dragon, May 17th, 2024. // Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. @@ -47,6 +47,7 @@ icon: Script.resolvePath("./img/icon_white.png"), activeIcon: Script.resolvePath("./img/icon_black.png"), text: "CHAT", + sortOrder: 8, isActive: appIsVisible, }); diff --git a/scripts/developer/utilities/render/lod.qml b/scripts/developer/utilities/render/lod.qml index 6497fb967e..3d4cafdd38 100644 --- a/scripts/developer/utilities/render/lod.qml +++ b/scripts/developer/utilities/render/lod.qml @@ -22,8 +22,7 @@ Item { anchors.fill:parent Component.onCompleted: { - Render.getConfig("RenderMainView.DrawSceneOctree").showVisibleCells = false - Render.getConfig("RenderMainView.DrawSceneOctree").showEmptyCells = false + Render.getConfig("RenderMainView.DrawSceneOctree").enabled = true } Component.onDestruction: { @@ -38,9 +37,9 @@ Item { HifiControls.CheckBox { boxSize: 20 - text: "Show LOD Reticule" - checked: Render.getConfig("RenderMainView.DrawSceneOctree").enabled - onCheckedChanged: { Render.getConfig("RenderMainView.DrawSceneOctree").enabled = checked } + text: "Show LOD Reticle" + checked: Render.getConfig("RenderMainView.DrawSceneOctree").showLODReticle + onCheckedChanged: { Render.getConfig("RenderMainView.DrawSceneOctree").showLODReticle = checked } } RichSlider { diff --git a/scripts/developer/utilities/render/luci/Antialiasing.qml b/scripts/developer/utilities/render/luci/Antialiasing.qml index 2a52dfed46..7c174d53c5 100644 --- a/scripts/developer/utilities/render/luci/Antialiasing.qml +++ b/scripts/developer/utilities/render/luci/Antialiasing.qml @@ -3,6 +3,7 @@ // // Created by Sam Gateau on 8/14/2017 // Copyright 2016 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or https://www.apache.org/licenses/LICENSE-2.0.html @@ -38,7 +39,7 @@ Column{ Prop.PropEnum { label: "Deferred AA Method" - object: Render.getConfig("RenderMainView.Antialiasing") + object: Render.getConfig("RenderMainView.AntialiasingSetup") property: "mode" enums: [ "Off", @@ -49,20 +50,20 @@ Column{ Prop.PropEnum { id: jitter label: "Jitter" - object: Render.getConfig("RenderMainView.JitterCam") + object: Render.getConfig("RenderMainView.AntialiasingSetup") property: "state" enums: [ "Off", - "On", "Paused", + "On", ] } Separator {} Prop.PropScalar { - visible: (Render.getConfig("RenderMainView.JitterCam").state == 2) + visible: (Render.getConfig("RenderMainView.AntialiasingSetup").state == 1) label: "Sample Index" - object: Render.getConfig("RenderMainView.JitterCam") + object: Render.getConfig("RenderMainView.AntialiasingSetup") property: "index" // min: -1 // max: 32 @@ -70,18 +71,27 @@ Column{ integral: true } Row { - visible: (Render.getConfig("RenderMainView.JitterCam").state == 2) + visible: (Render.getConfig("RenderMainView.AntialiasingSetup").state == 1) spacing: 10 HifiControls.Button { text: "<" - onClicked: { Render.getConfig("RenderMainView.JitterCam").prev(); } + onClicked: { Render.getConfig("RenderMainView.AntialiasingSetup").prev(); } } HifiControls.Button { text: ">" - onClicked: { Render.getConfig("RenderMainView.JitterCam").next(); } + onClicked: { Render.getConfig("RenderMainView.AntialiasingSetup").next(); } } } + ConfigSlider { + label: qsTr("Jitter scale") + integral: false + config: Render.getConfig("RenderMainView.AntialiasingSetup") + property: "scale" + max: 2.0 + min: 0.25 + height: 38 + } Separator {} Prop.PropBool { label: "Constrain color" @@ -100,7 +110,12 @@ Column{ label: "Feedback history color" object: Render.getConfig("RenderMainView.Antialiasing") property: "feedbackColor" - } + } + Prop.PropBool { + label: "History bicubic fetch" + object: Render.getConfig("RenderMainView.Antialiasing") + property: "bicubicHistoryFetch" + } Prop.PropScalar { label: "Source blend" object: Render.getConfig("RenderMainView.Antialiasing") diff --git a/scripts/developer/utilities/render/luci/Framebuffer.qml b/scripts/developer/utilities/render/luci/Framebuffer.qml index b7a992c589..7847d2523e 100644 --- a/scripts/developer/utilities/render/luci/Framebuffer.qml +++ b/scripts/developer/utilities/render/luci/Framebuffer.qml @@ -3,6 +3,7 @@ // // Created by Sam Gateau on 4/18/2019 // Copyright 2019 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or https://www.apache.org/licenses/LICENSE-2.0.html @@ -61,6 +62,7 @@ Column { "Ambient Occlusion Blurred", "Ambient Occlusion Normal", "Velocity", + "Antialiasing Intensity", "Custom", ] diff --git a/scripts/system/create/edit.js b/scripts/system/create/edit.js index 82cab1c76c..24df1c9e0f 100644 --- a/scripts/system/create/edit.js +++ b/scripts/system/create/edit.js @@ -261,6 +261,8 @@ visible: false }); + var savedClippingEnabled = false; + function adjustPositionPerBoundingBox(position, direction, registration, dimensions, orientation) { // Adjust the position such that the bounding box (registration, dimensions and orientation) lies behind the original // position in the given direction. @@ -1195,6 +1197,7 @@ selectionDisplay.disableTriggerMapping(); tablet.landscape = false; Controller.disableMapping(CONTROLLER_MAPPING_NAME); + Render.cameraClippingEnabled = savedClippingEnabled; } else { if (shouldUseEditTabletApp()) { tablet.loadQMLSource(Script.resolvePath("qml/Edit.qml"), true); @@ -1212,6 +1215,8 @@ print("starting tablet in landscape mode"); tablet.landscape = true; Controller.enableMapping(CONTROLLER_MAPPING_NAME); + savedClippingEnabled = Render.cameraClippingEnabled; + Render.cameraClippingEnabled = false; // Not sure what the following was meant to accomplish, but it currently causes // everybody else to think that Interface has lost focus overall. fogbugzid:558 // Window.setFocus(); diff --git a/scripts/system/places/icons/portalFX.png b/scripts/system/places/icons/portalFX.png new file mode 100644 index 0000000000..6c781c824b Binary files /dev/null and b/scripts/system/places/icons/portalFX.png differ diff --git a/scripts/system/places/places.css b/scripts/system/places/places.css index 37eac2d002..684139a5b8 100644 --- a/scripts/system/places/places.css +++ b/scripts/system/places/places.css @@ -3,7 +3,7 @@ // places.css // // Created by Alezia Kurdis, January 1st, 2022. -// Copyright 2022 Overte e.V. +// Copyright 2022-2025 Overte e.V. // // css for the ui of the Places application. // @@ -750,19 +750,20 @@ font.domain-nbrUser_small { color: #cccccc; padding: 10px; text-align: justify; - text-justify: inter-word; + text-justify: inter-word; } #placeDetail-visitBtn { background: #0000ff; background-image: linear-gradient(to bottom, #0000ff, #000020); border: 0px; - border-radius: 10px; - font-weight: 800; + border-radius: 6px; + font-weight: 700; color: #ffffff; - font-size: 20px; - padding: 3px 22px 3px 22px; + font-size: 14px; + padding: 2px 22px 2px 22px; text-decoration: none; + width: 90%; } #placeDetail-visitBtn:hover { @@ -774,7 +775,57 @@ font.domain-nbrUser_small { #placeDetail-visitBtn-container { width: 100%; text-align: left; - margin-bottom: 40px; + margin-bottom: 8px; +} + +#placeDetail-rezPortalBtn { + background: #0000ff; + background-image: linear-gradient(to bottom, #0000ff, #000020); + border: 0px; + border-radius: 6px; + font-weight: 700; + color: #ffffff; + font-size: 14px; + padding: 2px 22px 2px 22px; + text-decoration: none; + width: 90%; +} + +#placeDetail-rezPortalBtn:hover { + background: #057eff; + background-image: linear-gradient(to bottom, #057eff, #00090f); + text-decoration: none; +} + +#placeDetail-rezPortalBtn-container { + width: 100%; + text-align: left; + margin-bottom: 8px; +} + +#placeDetail-copyPlaceURLBtn { + background: #0000ff; + background-image: linear-gradient(to bottom, #0000ff, #000020); + border: 0px; + border-radius: 6px; + font-weight: 700; + color: #ffffff; + font-size: 14px; + padding: 2px 22px 2px 22px; + text-decoration: none; + width: 90%; +} + +#placeDetail-copyPlaceURLBtn:hover { + background: #057eff; + background-image: linear-gradient(to bottom, #057eff, #00090f); + text-decoration: none; +} + +#placeDetail-copyPlaceURLBtn-container { + width: 100%; + text-align: left; + margin-bottom: 8px; } #placeDetail-placedata { @@ -804,7 +855,7 @@ font.domain-nbrUser_small { #placeDetail-users { font-size: 30px; - font-weight: 600; + font-weight: 600; } #placeDetail-capacity { diff --git a/scripts/system/places/places.html b/scripts/system/places/places.html index fda67f4066..6b7727c2d4 100644 --- a/scripts/system/places/places.html +++ b/scripts/system/places/places.html @@ -4,7 +4,7 @@ // places.html // // Created by Alezia Kurdis, January 1st, 2022. -// Copyright 2022 Overte e.V. +// Copyright 2022-2025 Overte e.V. // // html for the ui of the Places application. // @@ -107,7 +107,7 @@
×
- +
@@ -118,6 +118,8 @@
+
+
DOMAIN: @@ -502,6 +504,28 @@ } + function rezPortal(name, address, placeID) { + var portalOrder = { + "channel": channel, + "action": "REQUEST_PORTAL", + "name": name, + "address": address, + "placeID": placeID + }; + EventBridge.emitWebEvent(JSON.stringify(portalOrder)); + + } + + function copyPlaceURL(address) { + var portalOrder = { + "channel": channel, + "action": "COPY_URL", + "address": address + }; + EventBridge.emitWebEvent(JSON.stringify(portalOrder)); + + } + function goHome() { var message = { "channel": channel, @@ -751,12 +775,17 @@ } } - document.getElementById("placeDetail-image").src = ""; + var pictureUrl = ""; if (placeDetail.thumbnail === "") { - document.getElementById("placeDetail-image").src = "icons/placeholder_" + placeDetail.metaverseRegion + ".jpg"; + pictureUrl = "icons/placeholder_" + placeDetail.metaverseRegion + ".jpg"; } else { - document.getElementById("placeDetail-image").src = placeDetail.thumbnail; + pictureUrl = placeDetail.thumbnail; } + document.getElementById("placeDetail-image").style.backgroundImage = "url(" + pictureUrl + ")"; + document.getElementById("placeDetail-image").style.backgroundRepeat = "no-repeat"; + document.getElementById("placeDetail-image").style.backgroundPosition = "center center"; + document.getElementById("placeDetail-image").style.backgroundSize = "cover"; + document.getElementById("placeDetail-placeName").innerHTML = placeDetail.name; document.getElementById("placeDetail-managers").innerHTML = "By
    " + placeDetail.managers; document.getElementById("placeDetail-description").innerHTML = placeDetail.description; @@ -766,6 +795,8 @@ placeUrl = "hifi://" + placeDetail.address; } document.getElementById("placeDetail-visitBtn-container").innerHTML = ""; + document.getElementById("placeDetail-rezPortalBtn-container").innerHTML = ""; + document.getElementById("placeDetail-copyPlaceURLBtn-container").innerHTML = ""; document.getElementById("placeDetail-maturity").innerHTML = placeDetail.maturity.toUpperCase(); document.getElementById("placeDetail-maturity").className = placeDetail.maturity + "FilterOn placeMaturity"; document.getElementById("placeDetail-domain").innerHTML = placeDetail.domain.toUpperCase(); diff --git a/scripts/system/places/places.js b/scripts/system/places/places.js index fa22d536b7..5aa8d282b1 100644 --- a/scripts/system/places/places.js +++ b/scripts/system/places/places.js @@ -3,7 +3,7 @@ // places.js // // Created by Alezia Kurdis, January 1st, 2022. -// Copyright 2022-2023 Overte e.V. +// Copyright 2022-2025 Overte e.V. // // Generate an explore app based on the differents source of placename data. // @@ -36,6 +36,12 @@ var APP_ICON_ACTIVE = ROOT + "icons/appicon_a.png"; var appStatus = false; var channel = "com.overte.places"; + + var portalChannelName = "com.overte.places.portalRezzer"; + var MAX_DISTANCE_TO_CONSIDER_PORTAL = 100.0; //in meters + var PORTAL_DURATION_MILLISEC = 45000; //45 sec + var rezzerPortalCount = 0; + var MAX_REZZED_PORTAL = 15; var tablet = Tablet.getTablet("com.highfidelity.interface.tablet.system"); @@ -92,6 +98,24 @@ Window.location = messageObj.address; } + } else if (messageObj.action === "REQUEST_PORTAL" && (n - timestamp) > INTERCALL_DELAY) { + d = new Date(); + timestamp = d.getTime(); + var portalPosition = Vec3.sum(MyAvatar.feetPosition, Vec3.multiplyQbyV(MyAvatar.orientation, {"x": 0.0, "y": 0.0, "z": -2.0})); + var requestToSend = { + "action": "REZ_PORTAL", + "position": portalPosition, + "url": messageObj.address, + "name": messageObj.name, + "placeID": messageObj.placeID + }; + Messages.sendMessage(portalChannelName, JSON.stringify(requestToSend), false); + + } else if (messageObj.action === "COPY_URL" && (n - timestamp) > INTERCALL_DELAY) { + d = new Date(); + timestamp = d.getTime(); + Window.copyToClipboard(messageObj.address); + Window.displayAnnouncement("Place URL copied."); } else if (messageObj.action === "GO_HOME" && (n - timestamp) > INTERCALL_DELAY) { d = new Date(); timestamp = d.getTime(); @@ -284,8 +308,8 @@ region = "local"; order = "A"; fetch = true; - pinned = false; - currentFound = true; + pinned = false; + currentFound = true; } else { region = "federation"; order = "F"; @@ -555,6 +579,57 @@ } //####### END of seed random library ################ + function onMessageReceived(paramChannel, paramMessage, paramSender, paramLocalOnly) { + if (paramChannel === portalChannelName) { + var instruction = JSON.parse(paramMessage); + if (instruction.action === "REZ_PORTAL") { + generatePortal(instruction.position, instruction.url, instruction.name, instruction.placeID); + } + } + } + + function generatePortal(position, url, name, placeID) { + if (rezzerPortalCount <= MAX_REZZED_PORTAL) { + var TOLERANCE_FACTOR = 1.1; + if (Vec3.distance(MyAvatar.position, position) < MAX_DISTANCE_TO_CONSIDER_PORTAL) { + var height = MyAvatar.userHeight * MyAvatar.scale * TOLERANCE_FACTOR; + + var portalPosition = Vec3.sum(position, {"x": 0.0, "y": height/2, "z": 0.0}); + var dimensions = {"x": height * 0.618, "y": height, "z": height * 0.618}; + var userdata = { + "url": url, + "name": name, + "placeID": placeID + }; + + var portalID = Entities.addEntity({ + "position": portalPosition, + "dimensions": dimensions, + "type": "Shape", + "shape": "Sphere", + "name": "Portal to " + name, + "canCastShadow": false, + "collisionless": true, + "userData": JSON.stringify(userdata), + "script": ROOT + "portal.js", + "visible": "false", + "grab": { + "grabbable": false + } + }, "local"); + rezzerPortalCount = rezzerPortalCount + 1; + + Script.setTimeout(function () { + Entities.deleteEntity(portalID); + rezzerPortalCount = rezzerPortalCount - 1; + if (rezzerPortalCount < 0) { + rezzerPortalCount = 0; + } + }, PORTAL_DURATION_MILLISEC); + } + } + } + function cleanup() { if (appStatus) { @@ -562,9 +637,15 @@ tablet.webEventReceived.disconnect(onAppWebEventReceived); } + Messages.messageReceived.disconnect(onMessageReceived); + Messages.unsubscribe(portalChannelName); + tablet.screenChanged.disconnect(onScreenChanged); tablet.removeButton(button); } + Messages.subscribe(portalChannelName); + Messages.messageReceived.connect(onMessageReceived); + Script.scriptEnding.connect(cleanup); }()); diff --git a/scripts/system/places/portal.js b/scripts/system/places/portal.js new file mode 100644 index 0000000000..c77fbc648d --- /dev/null +++ b/scripts/system/places/portal.js @@ -0,0 +1,201 @@ +// +// portal.js +// +// Created by Alezia Kurdis, January 14th, 2025. +// Copyright 2025, Overte e.V. +// +// 3D portal for Places app. portal spawner. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// +(function(){ + + var ROOT = Script.resolvePath('').split("portal.js")[0]; + var portalURL = ""; + var portalName = ""; + var TP_SOUND = SoundCache.getSound(ROOT + "sounds/teleportSound.mp3"); + + this.preload = function(entityID) { + + var properties = Entities.getEntityProperties(entityID, ["userData", "dimensions"]); + var userDataObj = JSON.parse(properties.userData); + portalURL = userDataObj.url; + portalName = userDataObj.name; + var portalColor = getColorFromPlaceID(userDataObj.placeID); + + var textLocalPosition = {"x": 0.0, "y": (properties.dimensions.y / 2) * 1.2, "z": 0.0}; + var scale = textLocalPosition.y/1.2; + var textID = Entities.addEntity({ + "type": "Text", + "parentID": entityID, + "localPosition": textLocalPosition, + "dimensions": { + "x": 1 * scale, + "y": 0.15 * scale, + "z": 0.01 + }, + "name": portalName, + "text": portalName, + "textColor": portalColor.light, + "lineHeight": 0.10 * scale, + "backgroundAlpha": 0.0, + "unlit": true, + "alignment": "center", + "verticalAlignment": "center", + "canCastShadow": false, + "billboardMode": "yaw", + "grab": { + "grabbable": false + } + },"local"); + + var fxID = Entities.addEntity({ + "type": "ParticleEffect", + "parentID": entityID, + "localPosition": { + "x": 0.0, + "y": 0.0, + "z": 0.0 + }, + "name": "PORTAL_FX", + "dimensions": { + "x": 5.2 * scale, + "y": 5.2 * scale, + "z": 5.2 * scale + }, + "grab": { + "grabbable": false + }, + "shapeType": "ellipsoid", + "color": portalColor.light, + "alpha": 0.1, + "textures": ROOT + "icons/portalFX.png", + "maxParticles": 600, + "lifespan": 0.6, + "emitRate": 1000, + "emitSpeed": -1 * scale, + "speedSpread": 0 * scale, + "emitOrientation": { + "x": 0, + "y": 0, + "z": 0, + "w": 1 + }, + "emitDimensions": { + "x": 1.28 * scale, + "y": 2 * scale, + "z": 1.28 * scale + }, + "polarFinish": 3.1415927410125732, + "emitAcceleration": { + "x": 0, + "y": 0, + "z": 0 + }, + "particleRadius": 0.4000000059604645 * scale, + "radiusSpread": 0.30000001192092896 * scale, + "radiusStart": 1 * scale, + "radiusFinish": 0 * scale, + "colorStart": portalColor.saturated, + "colorFinish": { + "red": 255, + "green": 255, + "blue": 255 + }, + "alphaSpread": 0.019999999552965164, + "alphaStart": 0, + "alphaFinish": 0.20000000298023224, + "emitterShouldTrail": true, + "particleSpin": 1.5700000524520874, + "spinSpread": 2.9700000286102295, + "spinStart": 0, + "spinFinish": 0 + },"local"); + + var loopSoundID = Entities.addEntity({ + "type": "Sound", + "parentID": entityID, + "localPosition": {"x": 0.0, "y": 0.0, "z": 0.0}, + "name": "PORTAL SOUND", + "soundURL": ROOT + "sounds/portalSound.mp3", + "volume": 0.15, + "loop": true, + "positional": true, + "localOnly": true + },"local"); + + } + + this.enterEntity = function(entityID) { + var injectorOptions = { + "position": MyAvatar.position, + "volume": 0.3, + "loop": false, + "localOnly": true + }; + var injector = Audio.playSound(TP_SOUND, injectorOptions); + + var timer = Script.setTimeout(function () { + Window.location = portalURL; + Entities.deleteEntity(entityID); + }, 1000); + + }; + + function getColorFromPlaceID(placeID) { + var idIntegerConstant = getStringScore(placeID); + var hue = (idIntegerConstant%360)/360; + var color = hslToRgb(hue, 1, 0.5); + var colorLight = hslToRgb(hue, 1, 0.75); + return { + "saturated": {"red": color[0], "green": color[1], "blue": color[2]}, + "light": {"red": colorLight[0], "green": colorLight[1], "blue": colorLight[2]}, + }; + } + + function getStringScore(str) { + var score = 0; + for (var j = 0; j < str.length; j++){ + score += str.charCodeAt(j); + } + return score; + } + + /* + * Converts an HSL color value to RGB. Conversion formula + * adapted from http://en.wikipedia.org/wiki/HSL_color_space. + * Assumes h, s, and l are contained in the set [0, 1] and + * returns r, g, and b in the set [0, 255]. + * + * @param {number} h The hue + * @param {number} s The saturation + * @param {number} l The lightness + * @return {Array} The RGB representation + */ + function hslToRgb(h, s, l){ + var r, g, b; + + if(s == 0){ + r = g = b = l; // achromatic + }else{ + var hue2rgb = function hue2rgb(p, q, t){ + if(t < 0) t += 1; + if(t > 1) t -= 1; + if(t < 1/6) return p + (q - p) * 6 * t; + if(t < 1/2) return q; + if(t < 2/3) return p + (q - p) * (2/3 - t) * 6; + return p; + } + + var q = l < 0.5 ? l * (1 + s) : l + s - l * s; + var p = 2 * l - q; + r = hue2rgb(p, q, h + 1/3); + g = hue2rgb(p, q, h); + b = hue2rgb(p, q, h - 1/3); + } + + return [Math.round(r * 255), Math.round(g * 255), Math.round(b * 255)]; + } + +}) diff --git a/scripts/system/places/sounds/portalSound.mp3 b/scripts/system/places/sounds/portalSound.mp3 new file mode 100644 index 0000000000..5e7f5a9bd0 Binary files /dev/null and b/scripts/system/places/sounds/portalSound.mp3 differ diff --git a/scripts/system/places/sounds/teleportSound.mp3 b/scripts/system/places/sounds/teleportSound.mp3 new file mode 100644 index 0000000000..e5000e55b5 Binary files /dev/null and b/scripts/system/places/sounds/teleportSound.mp3 differ diff --git a/tests-manual/gpu/src/TestWindow.cpp b/tests-manual/gpu/src/TestWindow.cpp index f667f20f2b..af8c6af243 100644 --- a/tests-manual/gpu/src/TestWindow.cpp +++ b/tests-manual/gpu/src/TestWindow.cpp @@ -28,7 +28,7 @@ extern void initDeferredPipelines(render::ShapePlumber& plumber, const render::S extern void initStencilPipeline(gpu::PipelinePointer& pipeline); #endif -TestWindow::TestWindow() { +TestWindow::TestWindow() : _generateDeferredFrameTransform(render::RenderEngine::TS_MAIN_VIEW) { setSurfaceType(QSurface::OpenGLSurface); @@ -98,7 +98,7 @@ void TestWindow::beginFrame() { _preparePrimaryFramebuffer.run(_renderContext, primaryFramebuffer); DeferredFrameTransformPointer frameTransform; - _generateDeferredFrameTransform.run(_renderContext, glm::vec2(0.0f, 0.0f), frameTransform); + _generateDeferredFrameTransform.run(_renderContext, frameTransform); LightingModelPointer lightingModel; _generateLightingModel.run(_renderContext, lightingModel); diff --git a/tests/ktx/src/KtxTests.cpp b/tests/ktx/src/KtxTests.cpp index 25ab15f5db..9b104a7992 100644 --- a/tests/ktx/src/KtxTests.cpp +++ b/tests/ktx/src/KtxTests.cpp @@ -16,6 +16,7 @@ #include #include #include +#include "SerDes.h" QTEST_GUILESS_MAIN(KtxTests) @@ -31,6 +32,19 @@ QString getRootPath() { return result; } +#if 0 +ktx::Byte* serializeSPH(ktx::Byte* data, const gpu::IrradianceKTXPayload &payload) const { + *(ktx::IrradianceKTXPayload::Version*)data = IrradianceKTXPayload::CURRENT_VERSION; + data += sizeof(ktx::IrradianceKTXPayload::Version); + + memcpy(data, &payload._irradianceSH, sizeof(ktx::SphericalHarmonics)); + data += sizeof(SphericalHarmonics); + + return data + PADDING; +} +#endif + + void KtxTests::initTestCase() { } @@ -147,6 +161,14 @@ void KtxTests::testKtxSerialization() { testTexture->setKtxBacking(TEST_IMAGE_KTX.fileName().toStdString()); } + +void KtxTests::testKtxNewSerializationSphericalHarmonics() { + DataSerializer ser; + + +} + + #if 0 static const QString TEST_FOLDER { "H:/ktx_cacheold" }; diff --git a/tests/ktx/src/KtxTests.h b/tests/ktx/src/KtxTests.h index 5627dc313d..c59fc17ccc 100644 --- a/tests/ktx/src/KtxTests.h +++ b/tests/ktx/src/KtxTests.h @@ -16,6 +16,7 @@ private slots: void testKtxEvalFunctions(); void testKhronosCompressionFunctions(); void testKtxSerialization(); + void testKtxNewSerializationSphericalHarmonics(); }; diff --git a/tests/shared/src/SerializerTests.cpp b/tests/shared/src/SerializerTests.cpp new file mode 100644 index 0000000000..ae0198f573 --- /dev/null +++ b/tests/shared/src/SerializerTests.cpp @@ -0,0 +1,232 @@ +// +// SerializerTests.cpp +// +// Copyright 2022 Dale Glass +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + + +#include "SerializerTests.h" +#include +#include +#include + +QTEST_GUILESS_MAIN(SerializerTests) + + +void SerializerTests::initTestCase() { +} + +void SerializerTests::testCreate() { + DataSerializer s; + QCOMPARE(s.length(), 0); + QCOMPARE(s.capacity(), DataSerializer::DEFAULT_SIZE); + QCOMPARE(s.isEmpty(), true); + + + DataDeserializer d(s); + QCOMPARE(d.length(), 0); +} + +void SerializerTests::testAdd() { + DataSerializer s; + s << (qint8)1; + QCOMPARE(s.length(), 1); + QCOMPARE(s.isEmpty(), false); + + s << (quint8)-1; + QCOMPARE(s.length(), 2); + + s << (qint16)0xaabb; + QCOMPARE(s.length(), 4); + + s << (quint16)-18000; + QCOMPARE(s.length(), 6); + + s << (qint32)0xCCDDEEFF; + QCOMPARE(s.length(), 10); + + s << (quint32)-1818000000; + QCOMPARE(s.length(), 14); + + s << "Hello, world!"; + QCOMPARE(s.length(), 28); + + glm::vec3 v{1.f,2.f,3.f}; + s << v; + QCOMPARE(s.length(), 40); + + s << 1.2345f; + QCOMPARE(s.length(), 44); + + + qDebug() << s; +} + +void SerializerTests::testAddAndRead() { + DataSerializer s; + glm::vec3 v3_a{1.f, 3.1415f, 2.71828f}; + glm::vec3 v3_b; + glm::vec4 v4_a{3.1415f, 2.71828f, 1.4142f, 1.6180f}; + glm::vec4 v4_b; + glm::ivec2 iv2_a{10, 24}; + glm::ivec2 iv2_b; + float f_a = 1.2345f; + float f_b; + + s << (qint8)1; + s << (qint16)0xaabb; + s << (qint32)0xccddeeff; + s << v3_a; + s << v4_a; + s << iv2_a; + s << f_a; + + qint8 i8; + qint16 i16; + qint32 i32; + + DataDeserializer d(s); + + d >> i8; + d >> i16; + d >> i32; + d >> v3_b; + d >> v4_b; + d >> iv2_b; + d >> f_b; + + qDebug() << d; + + QCOMPARE(i8, (qint8)1); + QCOMPARE(i16, (qint16)0xaabb); + QCOMPARE(i32, (qint32)0xccddeeff); + QCOMPARE(v3_a, v3_b); + QCOMPARE(v4_a, v4_b); + QCOMPARE(iv2_a, iv2_b); + QCOMPARE(f_a, f_b); +} + +void SerializerTests::testReadPastEnd() { + DataSerializer s; + qint8 i8; + qint16 i16; + s << (qint8)1; + + DataDeserializer d(s); + d >> i8; + QCOMPARE(d.pos(), 1); + + d.rewind(); + d >> i16; + QCOMPARE(d.pos(), 0); +} + +void SerializerTests::testWritePastEnd() { + qint8 i8 = 255; + qint16 i16 = 65535; + + + char buf[16]; + + + // 1 byte buffer, we can write 1 byte + memset(buf, 0, sizeof(buf)); + DataSerializer s1(buf, 1); + s1 << i8; + QCOMPARE(s1.pos(), 1); + QCOMPARE(s1.isOverflow(), false); + QCOMPARE(buf[0], i8); + + // 1 byte buffer, we can't write 2 bytes + memset(buf, 0, sizeof(buf)); + DataSerializer s2(buf, 1); + s2 << i16; + QCOMPARE(s2.pos(), 0); + QCOMPARE(s2.isOverflow(), true); + QCOMPARE(buf[0], 0); // We didn't write + QCOMPARE(buf[1], 0); +} + + + + +void SerializerTests::benchmarkEncodingDynamicAlloc() { + QBENCHMARK { + DataSerializer s; + glm::vec3 v3_a{1.f, 3.1415f, 2.71828f}; + glm::vec3 v3_b; + glm::vec4 v4_a{3.1415f, 2.71828f, 1.4142f, 1.6180f}; + glm::vec4 v4_b; + glm::ivec2 iv2_a{10, 24}; + glm::ivec2 iv2_b; + + s << (qint8)1; + s << (qint16)0xaabb; + s << (qint32)0xccddeeff; + s << v3_a; + s << v4_a; + s << iv2_a; + } +} + +void SerializerTests::benchmarkEncodingStaticAlloc() { + char buf[1024]; + + QBENCHMARK { + DataSerializer s(buf, sizeof(buf)); + glm::vec3 v3_a{1.f, 3.1415f, 2.71828f}; + glm::vec3 v3_b; + glm::vec4 v4_a{3.1415f, 2.71828f, 1.4142f, 1.6180f}; + glm::vec4 v4_b; + glm::ivec2 iv2_a{10, 24}; + glm::ivec2 iv2_b; + + s << (qint8)1; + s << (qint16)0xaabb; + s << (qint32)0xccddeeff; + s << v3_a; + s << v4_a; + s << iv2_a; + } +} + + +void SerializerTests::benchmarkDecoding() { + DataSerializer s; + qint8 q8 = 1; + qint16 q16 = 0xaabb; + qint32 q32 = 0xccddeeff; + + glm::vec3 v3_a{1.f, 3.1415f, 2.71828f}; + glm::vec3 v3_b; + glm::vec4 v4_a{3.1415f, 2.71828f, 1.4142f, 1.6180f}; + glm::vec4 v4_b; + glm::ivec2 iv2_a{10, 24}; + glm::ivec2 iv2_b; + + s << q8; + s << q16; + s << q32; + s << v3_a; + s << v4_a; + s << iv2_a; + + + QBENCHMARK { + DataDeserializer d(s); + d >> q8; + d >> q16; + d >> q32; + d >> v3_a; + d >> v4_a; + d >> iv2_a; + } +} + + +void SerializerTests::cleanupTestCase() { +} + diff --git a/tests/shared/src/SerializerTests.h b/tests/shared/src/SerializerTests.h new file mode 100644 index 0000000000..55da84c41a --- /dev/null +++ b/tests/shared/src/SerializerTests.h @@ -0,0 +1,33 @@ +// +// ResourceTests.h +// +// Copyright 2015 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#ifndef overte_SerializerTests_h +#define overte_SerializerTests_h + +#include +#include + +class SerializerTests : public QObject { + Q_OBJECT +private slots: + void initTestCase(); + void testCreate(); + void testAdd(); + void testAddAndRead(); + void testReadPastEnd(); + void testWritePastEnd(); + void benchmarkEncodingDynamicAlloc(); + void benchmarkEncodingStaticAlloc(); + void benchmarkDecoding(); + void cleanupTestCase(); +private: + +}; + +#endif // overte_SerializerTests_h diff --git a/tools/gpu-frame-player/src/RenderThread.cpp b/tools/gpu-frame-player/src/RenderThread.cpp index de39dacdea..fda551dfa4 100644 --- a/tools/gpu-frame-player/src/RenderThread.cpp +++ b/tools/gpu-frame-player/src/RenderThread.cpp @@ -1,6 +1,7 @@ // // Created by Bradley Austin Davis on 2018/10/21 // Copyright 2014 High Fidelity, Inc. +// Copyright 2024 Overte e.V. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -119,11 +120,10 @@ void RenderThread::renderFrame(gpu::FramePointer& frame) { #ifdef USE_GL _context.makeCurrent(); #endif - if (_correction != glm::mat4()) { + { std::unique_lock lock(_frameLock); if (_correction != glm::mat4()) { - _backend->setCameraCorrection(_correction, _activeFrame->view, true); - //_prevRenderView = _correction * _activeFrame->view; + _backend->updatePresentFrame(_correction); } } _backend->recycle();