Merge remote-tracking branch 'overte/master' into application

This commit is contained in:
HifiExperiments 2025-02-21 22:21:27 -08:00
commit d6d9cffc3b
201 changed files with 4941 additions and 2217 deletions

View file

@ -658,6 +658,36 @@ Flickable {
}
}
}
Item {
Layout.preferredWidth: parent.width
Layout.preferredHeight: 35
Layout.topMargin: 16
HifiStylesUit.RalewayRegular {
id: enableCameraClippingHeader
text: "3rd Person Camera Clipping"
anchors.left: parent.left
anchors.top: parent.top
width: 200
height: parent.height
size: 16
color: "#FFFFFF"
}
HifiControlsUit.CheckBox {
id: enableCameraClipping
checked: Render.cameraClippingEnabled
boxSize: 16
spacing: -1
colorScheme: hifi.colorSchemes.dark
anchors.left: enableCameraClippingHeader.right
anchors.leftMargin: 20
anchors.top: parent.top
onCheckedChanged: {
Render.cameraClippingEnabled = enableCameraClipping.checked;
}
}
}
}
ColumnLayout {
@ -683,7 +713,7 @@ Flickable {
ListModel {
id: antialiasingModel
// Maintain same order as "AntialiasingConfig::Mode".
// Maintain same order as "AntialiasingSetupConfig::Mode".
ListElement {
text: "None"
}

View file

@ -234,7 +234,8 @@ Application::Application(
_maxOctreePacketsPerSecond("maxOctreePPS", DEFAULT_MAX_OCTREE_PPS),
_maxOctreePPS(_maxOctreePacketsPerSecond.get()),
// Camera
_fieldOfView("fieldOfView", DEFAULT_FIELD_OF_VIEW_DEGREES)
_fieldOfView("fieldOfView", DEFAULT_FIELD_OF_VIEW_DEGREES),
_cameraClippingEnabled("cameraClippingEnabled", false)
{
setProperty(hifi::properties::CRASHED, _previousSessionCrashed);

View file

@ -300,6 +300,9 @@ public:
float getFieldOfView() { return _fieldOfView.get(); }
void setFieldOfView(float fov);
bool getCameraClippingEnabled() { return _cameraClippingEnabled.get(); }
void setCameraClippingEnabled(bool enabled);
void updateMyAvatarLookAtPosition(float deltaTime);
@ -880,6 +883,10 @@ private:
ConicalViewFrustums _lastQueriedViews; // last views used to query servers
Setting::Handle<float> _fieldOfView;
Setting::Handle<float> _cameraClippingEnabled;
bool _prevCameraClippingEnabled { false };
unsigned int _cameraClippingRayPickID;
// Graphics

View file

@ -18,6 +18,8 @@
#include <glm/gtx/transform.hpp>
#include <controllers/UserInputMapper.h>
#include <PickManager.h>
#include <raypick/RayPick.h>
#include <SecondaryCamera.h>
#include "avatar/MyAvatar.h"
@ -40,9 +42,7 @@ void Application::updateCamera(RenderArgs& renderArgs, float deltaTime) {
PROFILE_RANGE(render, __FUNCTION__);
PerformanceTimer perfTimer("updateCamera");
glm::vec3 boomOffset;
auto myAvatar = getMyAvatar();
boomOffset = myAvatar->getModelScale() * myAvatar->getBoomLength() * -IDENTITY_FORWARD;
// The render mode is default or mirror if the camera is in mirror mode, assigned further below
renderArgs._renderMode = RenderArgs::DEFAULT_RENDER_MODE;
@ -81,6 +81,16 @@ void Application::updateCamera(RenderArgs& renderArgs, float deltaTime) {
_myCamera.setOrientation(glm::normalize(glmExtractRotation(worldCameraMat)));
_myCamera.setPosition(extractTranslation(worldCameraMat));
} else {
float boomLength = myAvatar->getBoomLength();
if (getCameraClippingEnabled()) {
auto result =
DependencyManager::get<PickManager>()->getPrevPickResultTyped<RayPickResult>(_cameraClippingRayPickID);
if (result && result->doesIntersect()) {
const float CAMERA_CLIPPING_EPSILON = 0.1f;
boomLength = std::min(boomLength, result->distance - CAMERA_CLIPPING_EPSILON);
}
}
glm::vec3 boomOffset = myAvatar->getModelScale() * boomLength * -IDENTITY_FORWARD;
_thirdPersonHMDCameraBoomValid = false;
if (mode == CAMERA_MODE_THIRD_PERSON) {
_myCamera.setOrientation(myAvatar->getHead()->getOrientation());
@ -158,7 +168,19 @@ void Application::updateCamera(RenderArgs& renderArgs, float deltaTime) {
_myCamera.update();
}
renderArgs._cameraMode = (int8_t)_myCamera.getMode();
renderArgs._cameraMode = (int8_t)mode;
const bool shouldEnableCameraClipping =
(mode == CAMERA_MODE_THIRD_PERSON || mode == CAMERA_MODE_LOOK_AT || mode == CAMERA_MODE_SELFIE) && !isHMDMode() &&
getCameraClippingEnabled();
if (_prevCameraClippingEnabled != shouldEnableCameraClipping) {
if (shouldEnableCameraClipping) {
DependencyManager::get<PickManager>()->enablePick(_cameraClippingRayPickID);
} else {
DependencyManager::get<PickManager>()->disablePick(_cameraClippingRayPickID);
}
_prevCameraClippingEnabled = shouldEnableCameraClipping;
}
}
void Application::updateSecondaryCameraViewFrustum() {
@ -277,6 +299,16 @@ void Application::setFieldOfView(float fov) {
}
}
void Application::setCameraClippingEnabled(bool enabled) {
_cameraClippingEnabled.set(enabled);
_prevCameraClippingEnabled = enabled;
if (enabled) {
DependencyManager::get<PickManager>()->enablePick(_cameraClippingRayPickID);
} else {
DependencyManager::get<PickManager>()->disablePick(_cameraClippingRayPickID);
}
}
// Called during Application::update immediately before AvatarManager::updateMyAvatar, updating my data that is then sent
// to everyone.
// The principal result is to call updateLookAtTargetAvatar() and then setLookAtPosition().

View file

@ -35,6 +35,7 @@
#include <AutoUpdater.h>
#include <avatar/AvatarManager.h>
#include <BuildInfo.h>
#include <CameraRootTransformNode.h>
#include <crash-handler/CrashHandler.h>
#include <DebugDraw.h>
#include <DeferredLightingEffect.h>
@ -1277,6 +1278,17 @@ void Application::initialize(const QCommandLineParser &parser) {
DependencyManager::get<EntityTreeRenderer>()->setMouseRayPickID(mouseRayPickID);
}
// Setup the camera clipping ray pick
{
_prevCameraClippingEnabled = _cameraClippingEnabled.get();
auto cameraRayPick = std::make_shared<RayPick>(Vectors::ZERO, -Vectors::UP,
PickFilter(PickScriptingInterface::getPickEntities() |
PickScriptingInterface::getPickLocalEntities()),
MyAvatar::ZOOM_MAX, 0.0f, _prevCameraClippingEnabled);
cameraRayPick->parentTransform = std::make_shared<CameraRootTransformNode>();
_cameraClippingRayPickID = DependencyManager::get<PickManager>()->addPick(PickQuery::Ray, cameraRayPick);
}
// Preload Tablet sounds
DependencyManager::get<EntityScriptingInterface>()->setEntityTree(qApp->getEntities()->getTree());
DependencyManager::get<TabletScriptingInterface>()->preloadSounds();
@ -1656,8 +1668,10 @@ void Application::setupSignalsAndOperators() {
return nullptr;
});
Procedural::opaqueStencil = [](gpu::StatePointer state) { PrepareStencil::testMaskDrawShape(*state); };
Procedural::transparentStencil = [](gpu::StatePointer state) { PrepareStencil::testMask(*state); };
Procedural::opaqueStencil = [](gpu::StatePointer state, bool useAA) {
useAA ? PrepareStencil::testMaskDrawShape(*state) : PrepareStencil::testMaskDrawShapeNoAA(*state);
};
Procedural::transparentStencil = [](gpu::StatePointer state) { PrepareStencil::testMaskResetNoAA(*state); };
EntityTree::setGetEntityObjectOperator([this](const QUuid& id) -> QObject* {
auto entities = getEntities();

View file

@ -0,0 +1,48 @@
//
// Created by HifiExperiments on 10/30/2024
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "CameraRootTransformNode.h"
#include "Application.h"
#include "DependencyManager.h"
#include "avatar/AvatarManager.h"
#include "avatar/MyAvatar.h"
Transform CameraRootTransformNode::getTransform() {
auto myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar();
glm::vec3 pos;
glm::quat ori;
CameraMode mode = qApp->getCamera().getMode();
if (mode == CAMERA_MODE_FIRST_PERSON || mode == CAMERA_MODE_THIRD_PERSON) {
pos = myAvatar->getDefaultEyePosition();
ori = myAvatar->getHeadOrientation();
} else if (mode == CAMERA_MODE_FIRST_PERSON_LOOK_AT) {
pos = myAvatar->getCameraEyesPosition(0.0f);
ori = myAvatar->getLookAtRotation();
} else {
ori = myAvatar->getLookAtRotation();
pos = myAvatar->getLookAtPivotPoint();
if (mode == CAMERA_MODE_SELFIE) {
ori = ori * glm::angleAxis(PI, ori * Vectors::UP);
}
}
ori = ori * glm::angleAxis(-PI / 2.0f, Vectors::RIGHT);
glm::vec3 scale = glm::vec3(myAvatar->scaleForChildren());
return Transform(ori, scale, pos);
}
QVariantMap CameraRootTransformNode::toVariantMap() const {
QVariantMap map;
map["joint"] = "CameraRoot";
return map;
}

View file

@ -0,0 +1,20 @@
//
// Created by HifiExperiments on 10/30/2024
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_CameraRootTransformNode_h
#define hifi_CameraRootTransformNode_h
#include "TransformNode.h"
class CameraRootTransformNode : public TransformNode {
public:
CameraRootTransformNode() {}
Transform getTransform() override;
QVariantMap toVariantMap() const override;
};
#endif // hifi_CameraRootTransformNode_h

View file

@ -274,7 +274,7 @@ public:
void SecondaryCameraRenderTask::build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor) {
const auto cachedArg = task.addJob<SecondaryCameraJob>("SecondaryCamera");
task.addJob<RenderViewTask>("RenderSecondView", cullFunctor, render::ItemKey::TAG_BITS_1, render::ItemKey::TAG_BITS_1);
task.addJob<RenderViewTask>("RenderSecondView", cullFunctor, render::ItemKey::TAG_BITS_1, render::ItemKey::TAG_BITS_1, RenderViewTask::TransformOffset::SECONDARY_VIEW);
task.addJob<EndSecondaryCameraFrame>("EndSecondaryCamera", cachedArg);
}

View file

@ -262,14 +262,14 @@ void GraphicsEngine::render_performFrame() {
batch.enableStereo(isStereo);
batch.clearDepthStencilFramebuffer(1.0, 0);
batch.setViewportTransform({ 0, 0, finalFramebuffer->getSize() });
_splashScreen->render(batch, viewFrustum, renderArgs._renderMethod == RenderArgs::RenderMethod::FORWARD);
_splashScreen->render(batch, viewFrustum, renderArgs._renderMethod == RenderArgs::RenderMethod::FORWARD, render::RenderEngine::TS_BACKGROUND_VIEW);
});
} else {
{
PROFILE_RANGE(render, "/renderOverlay");
PerformanceTimer perfTimer("renderOverlay");
// NOTE: There is no batch associated with this renderArgs
// the ApplicationOverlay class assumes it's viewport is set up to be the device size
// the ApplicationOverlay class assumes its viewport is set up to be the device size
renderArgs._viewport = glm::ivec4(0, 0, qApp->getDeviceSize());
qApp->getApplicationOverlay().renderOverlay(&renderArgs);
}

View file

@ -446,7 +446,10 @@ void ParabolaPointer::RenderState::ParabolaRenderItem::render(RenderArgs* args)
Transform transform;
transform.setTranslation(_origin);
batch.setModelTransform(transform);
batch.setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == RenderArgs::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == RenderArgs::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
batch.setPipeline(getParabolaPipeline(args->_renderMethod == render::Args::RenderMethod::FORWARD));
@ -481,4 +484,4 @@ namespace render {
template <> const ShapeKey shapeGetShapeKey(const ParabolaPointer::RenderState::ParabolaRenderItem::Pointer& payload) {
return ShapeKey::Builder::ownPipeline();
}
}
}

View file

@ -1,6 +1,7 @@
//
// Created by Sam Gondelman 7/17/2018
// Copyright 2018 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -62,6 +63,7 @@ public:
render::ItemKey _key;
glm::vec3 _origin { 0.0f };
Transform _prevRenderTransform;
bool _isVisibleInSecondaryCamera { DEFAULT_PARABOLA_ISVISIBLEINSECONDARYCAMERA };
bool _drawInFront { DEFAULT_PARABOLA_DRAWINFRONT };
bool _visible { false };

View file

@ -21,6 +21,7 @@
#include "ParabolaPick.h"
#include "CollisionPick.h"
#include "CameraRootTransformNode.h"
#include "SpatialParentFinder.h"
#include "PickTransformNode.h"
#include "MouseTransformNode.h"
@ -537,6 +538,9 @@ void PickScriptingInterface::setParentTransform(std::shared_ptr<PickQuery> pick,
} else if (joint == "Avatar") {
pick->parentTransform = std::make_shared<MyAvatarHeadTransformNode>();
return;
} else if (joint == "CameraRoot") {
pick->parentTransform = std::make_shared<CameraRootTransformNode>();
return;
} else {
parentUuid = myAvatar->getSessionUUID();
parentJointIndex = myAvatar->getJointIndex(joint);

View file

@ -25,14 +25,14 @@ STATIC_SCRIPT_TYPES_INITIALIZER((+[](ScriptManager* manager){
auto scriptEngine = manager->engine().get();
scriptRegisterMetaType<RenderScriptingInterface::RenderMethod, scriptValueFromEnumClass<RenderScriptingInterface::RenderMethod>, scriptValueToEnumClass<RenderScriptingInterface::RenderMethod> >(scriptEngine, "RenderMethod");
scriptRegisterMetaType<AntialiasingConfig::Mode, scriptValueFromEnumClass<AntialiasingConfig::Mode>, scriptValueToEnumClass<AntialiasingConfig::Mode> >(scriptEngine, "Mode");
scriptRegisterMetaType<AntialiasingSetupConfig::Mode, scriptValueFromEnumClass<AntialiasingSetupConfig::Mode>, scriptValueToEnumClass<AntialiasingSetupConfig::Mode> >(scriptEngine, "Mode");
}));
STATIC_SCRIPT_INITIALIZER(+[](ScriptManager* manager){
auto scriptEngine = manager->engine().get();
scriptEngine->registerEnum("Render.RenderMethod",QMetaEnum::fromType<RenderScriptingInterface::RenderMethod>());
scriptEngine->registerEnum("AntialiasingMode",QMetaEnum::fromType<AntialiasingConfig::Mode>());
scriptEngine->registerEnum("AntialiasingMode",QMetaEnum::fromType<AntialiasingSetupConfig::Mode>());
});
RenderScriptingInterface* RenderScriptingInterface::getInstance() {
@ -56,7 +56,7 @@ void RenderScriptingInterface::loadSettings() {
_bloomEnabled = _bloomEnabledSetting.get();
_ambientOcclusionEnabled = _ambientOcclusionEnabledSetting.get();
_proceduralMaterialsEnabled = _proceduralMaterialsEnabledSetting.get();
_antialiasingMode = static_cast<AntialiasingConfig::Mode>(_antialiasingModeSetting.get());
_antialiasingMode = static_cast<AntialiasingSetupConfig::Mode>(_antialiasingModeSetting.get());
_viewportResolutionScale = _viewportResolutionScaleSetting.get();
_fullScreenScreen = _fullScreenScreenSetting.get();
});
@ -95,9 +95,10 @@ void recursivelyUpdateMirrorRenderMethods(const QString& parentTaskName, int ren
return;
}
auto renderConfig = qApp->getRenderEngine()->getConfiguration();
for (size_t mirrorIndex = 0; mirrorIndex < RenderMirrorTask::MAX_MIRRORS_PER_LEVEL; mirrorIndex++) {
std::string mirrorTaskString = parentTaskName.toStdString() + ".RenderMirrorView" + std::to_string(mirrorIndex) + "Depth" + std::to_string(depth) + ".DeferredForwardSwitch";
auto mirrorConfig = dynamic_cast<render::SwitchConfig*>(qApp->getRenderEngine()->getConfiguration()->getConfig(QString::fromStdString(mirrorTaskString)));
auto mirrorConfig = dynamic_cast<render::SwitchConfig*>(renderConfig->getConfig(QString::fromStdString(mirrorTaskString)));
if (mirrorConfig) {
mirrorConfig->setBranch((int)renderMethod);
recursivelyUpdateMirrorRenderMethods(QString::fromStdString(mirrorTaskString) + (renderMethod == 1 ? ".RenderForwardTask" : ".RenderShadowsAndDeferredTask.RenderDeferredTask"),
@ -111,14 +112,20 @@ void RenderScriptingInterface::forceRenderMethod(RenderMethod renderMethod) {
_renderMethod = (int)renderMethod;
_renderMethodSetting.set((int)renderMethod);
auto renderConfig = qApp->getRenderEngine()->getConfiguration();
QString configName = "RenderMainView.DeferredForwardSwitch";
auto config = dynamic_cast<render::SwitchConfig*>(qApp->getRenderEngine()->getConfiguration()->getConfig(configName));
auto config = dynamic_cast<render::SwitchConfig*>(renderConfig->getConfig(configName));
if (config) {
config->setBranch((int)renderMethod);
recursivelyUpdateMirrorRenderMethods(configName + (renderMethod == RenderMethod::FORWARD ? ".RenderForwardTask" : ".RenderShadowsAndDeferredTask.RenderDeferredTask"),
(int)renderMethod, 0);
}
auto secondaryConfig = dynamic_cast<render::SwitchConfig*>(renderConfig->getConfig("RenderSecondView.DeferredForwardSwitch"));
if (secondaryConfig) {
secondaryConfig->setBranch((int)renderMethod);
}
});
}
@ -128,13 +135,14 @@ QStringList RenderScriptingInterface::getRenderMethodNames() const {
}
void recursivelyUpdateLightingModel(const QString& parentTaskName, std::function<void(MakeLightingModelConfig *)> updateLambda, int depth = -1) {
auto renderConfig = qApp->getRenderEngine()->getConfiguration();
if (depth == -1) {
auto secondaryLightingModelConfig = qApp->getRenderEngine()->getConfiguration()->getConfig<MakeLightingModel>("RenderSecondView.LightingModel");
auto secondaryLightingModelConfig = renderConfig->getConfig<MakeLightingModel>("RenderSecondView.LightingModel");
if (secondaryLightingModelConfig) {
updateLambda(secondaryLightingModelConfig);
}
auto mainLightingModelConfig = qApp->getRenderEngine()->getConfiguration()->getConfig<MakeLightingModel>("RenderMainView.LightingModel");
auto mainLightingModelConfig = renderConfig->getConfig<MakeLightingModel>("RenderMainView.LightingModel");
if (mainLightingModelConfig) {
updateLambda(mainLightingModelConfig);
}
@ -146,7 +154,7 @@ void recursivelyUpdateLightingModel(const QString& parentTaskName, std::function
for (size_t mirrorIndex = 0; mirrorIndex < RenderMirrorTask::MAX_MIRRORS_PER_LEVEL; mirrorIndex++) {
std::string mirrorTaskString = parentTaskName.toStdString() + ".RenderMirrorView" + std::to_string(mirrorIndex) + "Depth" + std::to_string(depth);
auto lightingModelConfig = qApp->getRenderEngine()->getConfiguration()->getConfig<MakeLightingModel>(mirrorTaskString + ".LightingModel");
auto lightingModelConfig = renderConfig->getConfig<MakeLightingModel>(mirrorTaskString + ".LightingModel");
if (lightingModelConfig) {
updateLambda(lightingModelConfig);
recursivelyUpdateLightingModel(QString::fromStdString(mirrorTaskString), updateLambda, depth + 1);
@ -169,7 +177,6 @@ void RenderScriptingInterface::forceShadowsEnabled(bool enabled) {
_renderSettingLock.withWriteLock([&] {
_shadowsEnabled = (enabled);
_shadowsEnabledSetting.set(enabled);
Menu::getInstance()->setIsOptionChecked(MenuOption::Shadows, enabled);
recursivelyUpdateLightingModel("", [enabled] (MakeLightingModelConfig *config) { config->setShadow(enabled); });
@ -231,7 +238,6 @@ void RenderScriptingInterface::forceAmbientOcclusionEnabled(bool enabled) {
_renderSettingLock.withWriteLock([&] {
_ambientOcclusionEnabled = (enabled);
_ambientOcclusionEnabledSetting.set(enabled);
Menu::getInstance()->setIsOptionChecked(MenuOption::AmbientOcclusion, enabled);
recursivelyUpdateLightingModel("", [enabled] (MakeLightingModelConfig *config) { config->setAmbientOcclusion(enabled); });
@ -259,52 +265,53 @@ void RenderScriptingInterface::forceProceduralMaterialsEnabled(bool enabled) {
});
}
AntialiasingConfig::Mode RenderScriptingInterface::getAntialiasingMode() const {
AntialiasingSetupConfig::Mode RenderScriptingInterface::getAntialiasingMode() const {
return _antialiasingMode;
}
void RenderScriptingInterface::setAntialiasingMode(AntialiasingConfig::Mode mode) {
void RenderScriptingInterface::setAntialiasingMode(AntialiasingSetupConfig::Mode mode) {
if (_antialiasingMode != mode) {
forceAntialiasingMode(mode);
emit settingsChanged();
}
}
void setAntialiasingModeForView(AntialiasingConfig::Mode mode, JitterSampleConfig *jitterCamConfig, AntialiasingConfig *antialiasingConfig) {
void setAntialiasingModeForView(AntialiasingSetupConfig::Mode mode, AntialiasingSetupConfig *antialiasingSetupConfig, AntialiasingConfig *antialiasingConfig) {
switch (mode) {
case AntialiasingConfig::Mode::NONE:
jitterCamConfig->none();
case AntialiasingSetupConfig::Mode::NONE:
antialiasingSetupConfig->none();
antialiasingConfig->blend = 1;
antialiasingConfig->setDebugFXAA(false);
break;
case AntialiasingConfig::Mode::TAA:
jitterCamConfig->play();
case AntialiasingSetupConfig::Mode::TAA:
antialiasingSetupConfig->play();
antialiasingConfig->blend = 0.25;
antialiasingConfig->setDebugFXAA(false);
break;
case AntialiasingConfig::Mode::FXAA:
jitterCamConfig->none();
case AntialiasingSetupConfig::Mode::FXAA:
antialiasingSetupConfig->none();
antialiasingConfig->blend = 0.25;
antialiasingConfig->setDebugFXAA(true);
break;
default:
jitterCamConfig->none();
antialiasingSetupConfig->none();
antialiasingConfig->blend = 1;
antialiasingConfig->setDebugFXAA(false);
break;
}
}
void recursivelyUpdateAntialiasingMode(const QString& parentTaskName, AntialiasingConfig::Mode mode, int depth = -1) {
void recursivelyUpdateAntialiasingMode(const QString& parentTaskName, AntialiasingSetupConfig::Mode mode, int depth = -1) {
auto renderConfig = qApp->getRenderEngine()->getConfiguration();
if (depth == -1) {
auto secondViewJitterCamConfig = qApp->getRenderEngine()->getConfiguration()->getConfig<JitterSample>("RenderSecondView.JitterCam");
auto secondViewAntialiasingConfig = qApp->getRenderEngine()->getConfiguration()->getConfig<Antialiasing>("RenderSecondView.Antialiasing");
auto secondViewJitterCamConfig = renderConfig->getConfig<AntialiasingSetup>("RenderSecondView.AntialiasingSetup");
auto secondViewAntialiasingConfig = renderConfig->getConfig<Antialiasing>("RenderSecondView.Antialiasing");
if (secondViewJitterCamConfig && secondViewAntialiasingConfig) {
setAntialiasingModeForView(mode, secondViewJitterCamConfig, secondViewAntialiasingConfig);
}
auto mainViewJitterCamConfig = qApp->getRenderEngine()->getConfiguration()->getConfig<JitterSample>("RenderMainView.JitterCam");
auto mainViewAntialiasingConfig = qApp->getRenderEngine()->getConfiguration()->getConfig<Antialiasing>("RenderMainView.Antialiasing");
auto mainViewJitterCamConfig = renderConfig->getConfig<AntialiasingSetup>("RenderMainView.AntialiasingSetup");
auto mainViewAntialiasingConfig = renderConfig->getConfig<Antialiasing>("RenderMainView.Antialiasing");
if (mainViewJitterCamConfig && mainViewAntialiasingConfig) {
setAntialiasingModeForView( mode, mainViewJitterCamConfig, mainViewAntialiasingConfig);
}
@ -316,8 +323,8 @@ void recursivelyUpdateAntialiasingMode(const QString& parentTaskName, Antialiasi
for (size_t mirrorIndex = 0; mirrorIndex < RenderMirrorTask::MAX_MIRRORS_PER_LEVEL; mirrorIndex++) {
std::string mirrorTaskString = parentTaskName.toStdString() + ".RenderMirrorView" + std::to_string(mirrorIndex) + "Depth" + std::to_string(depth);
auto jitterCamConfig = qApp->getRenderEngine()->getConfiguration()->getConfig<JitterSample>(mirrorTaskString + ".JitterCam");
auto antialiasingConfig = qApp->getRenderEngine()->getConfiguration()->getConfig<Antialiasing>(mirrorTaskString + ".Antialiasing");
auto jitterCamConfig = renderConfig->getConfig<AntialiasingSetup>(mirrorTaskString + ".AntialiasingSetup");
auto antialiasingConfig = renderConfig->getConfig<Antialiasing>(mirrorTaskString + ".Antialiasing");
if (jitterCamConfig && antialiasingConfig) {
setAntialiasingModeForView(mode, jitterCamConfig, antialiasingConfig);
recursivelyUpdateAntialiasingMode(QString::fromStdString(mirrorTaskString), mode, depth + 1);
@ -325,14 +332,14 @@ void recursivelyUpdateAntialiasingMode(const QString& parentTaskName, Antialiasi
}
}
void RenderScriptingInterface::forceAntialiasingMode(AntialiasingConfig::Mode mode) {
if ((int)mode < 0 || mode >= AntialiasingConfig::Mode::MODE_COUNT) {
mode = AntialiasingConfig::Mode::NONE;
void RenderScriptingInterface::forceAntialiasingMode(AntialiasingSetupConfig::Mode mode) {
if ((int)mode < 0 || mode >= AntialiasingSetupConfig::Mode::MODE_COUNT) {
mode = AntialiasingSetupConfig::Mode::NONE;
}
_renderSettingLock.withWriteLock([&] {
_antialiasingMode = mode;
_antialiasingModeSetting.set(_antialiasingMode);
_antialiasingModeSetting.set((int)_antialiasingMode);
recursivelyUpdateAntialiasingMode("", _antialiasingMode);
});
@ -345,6 +352,13 @@ void RenderScriptingInterface::setVerticalFieldOfView(float fieldOfView) {
}
}
void RenderScriptingInterface::setCameraClippingEnabled(bool enabled) {
if (qApp->getCameraClippingEnabled() != enabled) {
qApp->setCameraClippingEnabled(enabled);
emit settingsChanged();
}
}
QStringList RenderScriptingInterface::getScreens() const {
QStringList screens;
@ -396,20 +410,27 @@ void RenderScriptingInterface::forceViewportResolutionScale(float scale) {
return;
}
_renderSettingLock.withWriteLock([&] {
_viewportResolutionScale = (scale);
_viewportResolutionScale = scale;
_viewportResolutionScaleSetting.set(scale);
auto renderConfig = qApp->getRenderEngine()->getConfiguration();
assert(renderConfig);
auto deferredView = renderConfig->getConfig("RenderMainView.RenderDeferredTask");
// mainView can be null if we're rendering in forward mode
if (deferredView) {
deferredView->setProperty("resolutionScale", _viewportResolutionScale);
deferredView->setProperty("resolutionScale", scale);
}
auto forwardView = renderConfig->getConfig("RenderMainView.RenderForwardTask");
// mainView can be null if we're rendering in forward mode
if (forwardView) {
forwardView->setProperty("resolutionScale", _viewportResolutionScale);
forwardView->setProperty("resolutionScale", scale);
}
auto deferredSecondView = renderConfig->getConfig("RenderSecondView.RenderDeferredTask");
if (deferredSecondView) {
deferredSecondView->setProperty("resolutionScale", scale);
}
auto forwardSecondView = renderConfig->getConfig("RenderSecondView.RenderForwardTask");
if (forwardSecondView) {
forwardSecondView->setProperty("resolutionScale", scale);
}
});
}

View file

@ -37,6 +37,7 @@
* they're disabled.
* @property {integer} antialiasingMode - The active anti-aliasing mode.
* @property {number} viewportResolutionScale - The view port resolution scale, <code>&gt; 0.0</code>.
* @property {boolean} cameraClippingEnabled - <code>true</code> if third person camera clipping is enabled, <code>false</code> if it's disabled.
*/
class RenderScriptingInterface : public QObject {
Q_OBJECT
@ -45,10 +46,11 @@ class RenderScriptingInterface : public QObject {
Q_PROPERTY(bool hazeEnabled READ getHazeEnabled WRITE setHazeEnabled NOTIFY settingsChanged)
Q_PROPERTY(bool bloomEnabled READ getBloomEnabled WRITE setBloomEnabled NOTIFY settingsChanged)
Q_PROPERTY(bool ambientOcclusionEnabled READ getAmbientOcclusionEnabled WRITE setAmbientOcclusionEnabled NOTIFY settingsChanged)
Q_PROPERTY(AntialiasingSetupConfig::Mode antialiasingMode READ getAntialiasingMode WRITE setAntialiasingMode NOTIFY settingsChanged)
Q_PROPERTY(bool proceduralMaterialsEnabled READ getProceduralMaterialsEnabled WRITE setProceduralMaterialsEnabled NOTIFY settingsChanged)
Q_PROPERTY(AntialiasingConfig::Mode antialiasingMode READ getAntialiasingMode WRITE setAntialiasingMode NOTIFY settingsChanged)
Q_PROPERTY(float viewportResolutionScale READ getViewportResolutionScale WRITE setViewportResolutionScale NOTIFY settingsChanged)
Q_PROPERTY(float verticalFieldOfView READ getVerticalFieldOfView WRITE setVerticalFieldOfView NOTIFY settingsChanged)
Q_PROPERTY(bool cameraClippingEnabled READ getCameraClippingEnabled WRITE setCameraClippingEnabled NOTIFY settingsChanged)
public:
RenderScriptingInterface();
@ -202,14 +204,14 @@ public slots:
* @function Render.getAntialiasingMode
* @returns {AntialiasingMode} The active anti-aliasing mode.
*/
AntialiasingConfig::Mode getAntialiasingMode() const;
AntialiasingSetupConfig::Mode getAntialiasingMode() const;
/*@jsdoc
* Sets the active anti-aliasing mode.
* @function Render.setAntialiasingMode
* @param {AntialiasingMode} The active anti-aliasing mode.
*/
void setAntialiasingMode(AntialiasingConfig::Mode mode);
void setAntialiasingMode(AntialiasingSetupConfig::Mode mode);
/*@jsdoc
* Gets the view port resolution scale.
@ -261,7 +263,21 @@ public slots:
* @function Render.setVerticalFieldOfView
* @param {number} fieldOfView - The vertical field of view in degrees to set.
*/
void setVerticalFieldOfView( float fieldOfView );
void setVerticalFieldOfView(float fieldOfView);
/*@jsdoc
* Gets whether or not third person camera clipping is enabled.
* @function Render.getCameraClippingEnabled
* @returns {boolean} <code>true</code> if camera clipping is enabled, <code>false</code> if it's disabled.
*/
bool getCameraClippingEnabled() { return qApp->getCameraClippingEnabled(); }
/*@jsdoc
* Sets whether or not third person camera clipping is enabled.
* @function Render.setCameraClippingEnabled
* @param {boolean} enabled - <code>true</code> to enable third person camera clipping, <code>false</code> to disable.
*/
void setCameraClippingEnabled(bool enabled);
signals:
@ -288,7 +304,7 @@ private:
bool _bloomEnabled { true };
bool _ambientOcclusionEnabled { true };
bool _proceduralMaterialsEnabled { true };
AntialiasingConfig::Mode _antialiasingMode { AntialiasingConfig::Mode::NONE };
AntialiasingSetupConfig::Mode _antialiasingMode { AntialiasingSetupConfig::Mode::NONE };
float _viewportResolutionScale { 1.0f };
QString _fullScreenScreen;
@ -299,7 +315,7 @@ private:
Setting::Handle<bool> _bloomEnabledSetting { "bloomEnabled", true };
Setting::Handle<bool> _ambientOcclusionEnabledSetting { "ambientOcclusionEnabled", true };
Setting::Handle<bool> _proceduralMaterialsEnabledSetting { "proceduralMaterialsEnabled", true };
Setting::Handle<int> _antialiasingModeSetting { "antialiasingMode", AntialiasingConfig::Mode::NONE };
Setting::Handle<int> _antialiasingModeSetting { "antialiasingMode", (int)AntialiasingSetupConfig::Mode::NONE };
Setting::Handle<float> _viewportResolutionScaleSetting { "viewportResolutionScale", 1.0f };
Setting::Handle<QString> _fullScreenScreenSetting { "fullScreenScreen", "" };
@ -310,7 +326,7 @@ private:
void forceBloomEnabled(bool enabled);
void forceAmbientOcclusionEnabled(bool enabled);
void forceProceduralMaterialsEnabled(bool enabled);
void forceAntialiasingMode(AntialiasingConfig::Mode mode);
void forceAntialiasingMode(AntialiasingSetupConfig::Mode mode);
void forceViewportResolutionScale(float scale);
static std::once_flag registry_flag;

View file

@ -231,7 +231,7 @@ void setupPreferences() {
preferences->addPreference(new CheckPreference(UI_CATEGORY, "Show Graphics icon on tablet and toolbar", getter, setter));
}
static const QString VIEW_CATEGORY{ "View" };
static const QString VIEW_CATEGORY { "View" };
{
auto getter = [myAvatar]()->float { return myAvatar->getRealWorldFieldOfView(); };
auto setter = [myAvatar](float value) { myAvatar->setRealWorldFieldOfView(value); };
@ -249,6 +249,11 @@ void setupPreferences() {
preference->setStep(1);
preferences->addPreference(preference);
}
{
auto getter = []()->bool { return qApp->getCameraClippingEnabled(); };
auto setter = [](bool value) { qApp->setCameraClippingEnabled(value); };
preferences->addPreference(new CheckPreference(VIEW_CATEGORY, "Enable 3rd Person Camera Clipping?", getter, setter));
}
// Snapshots
static const QString SNAPSHOTS { "Snapshots" };

View file

@ -360,7 +360,7 @@ void OpenGLDisplayPlugin::customizeContext() {
auto presentThread = DependencyManager::get<PresentThread>();
Q_ASSERT(thread() == presentThread->thread());
getGLBackend()->setCameraCorrection(mat4(), mat4(), true, true);
getGLBackend()->updatePresentFrame();
for (auto& cursorValue : _cursorsData) {
auto& cursorData = cursorValue.second;
@ -704,8 +704,7 @@ void OpenGLDisplayPlugin::present(const std::shared_ptr<RefreshRateController>&
if (_currentFrame) {
auto correction = getViewCorrection();
getGLBackend()->setCameraCorrection(correction, _prevRenderView, true);
_prevRenderView = correction * _currentFrame->view;
getGLBackend()->updatePresentFrame(correction);
{
withPresentThreadLock([&] {
_renderRate.increment();

View file

@ -154,7 +154,6 @@ protected:
gpu::FramePointer _currentFrame;
gpu::Frame* _lastFrame{ nullptr };
mat4 _prevRenderView;
gpu::FramebufferPointer _compositeFramebuffer;
gpu::PipelinePointer _hudPipeline;
gpu::PipelinePointer _mirrorHUDPipeline;

View file

@ -166,6 +166,7 @@ protected:
MirrorMode _mirrorMode { MirrorMode::NONE };
QUuid _portalExitID;
Transform _renderTransform;
Transform _prevRenderTransform; // each subclass is responsible for updating this after they render because they all handle transforms differently
MaterialMap _materials;
mutable std::mutex _materialsLock;

View file

@ -1,6 +1,7 @@
//
// Created by Sam Gondelman on 1/22/19
// Copyright 2019 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -266,7 +267,10 @@ void GizmoEntityRenderer::doRender(RenderArgs* args) {
bool usePrimaryFrustum = args->_renderMode == RenderArgs::RenderMode::SHADOW_RENDER_MODE || args->_mirrorDepth > 0;
transform.setRotation(BillboardModeHelpers::getBillboardRotation(transform.getTranslation(), transform.getRotation(), _billboardMode,
usePrimaryFrustum ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition(), true));
batch.setModelTransform(transform);
batch.setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
Pipeline pipelineType = getPipelineType(materials);
if (pipelineType == Pipeline::PROCEDURAL) {

View file

@ -1,6 +1,7 @@
//
// Created by Sam Gondelman on 11/29/18
// Copyright 2018 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -106,7 +107,10 @@ void GridEntityRenderer::doRender(RenderArgs* args) {
bool usePrimaryFrustum = args->_renderMode == RenderArgs::RenderMode::SHADOW_RENDER_MODE || args->_mirrorDepth > 0;
transform.setRotation(BillboardModeHelpers::getBillboardRotation(transform.getTranslation(), transform.getRotation(), _billboardMode,
usePrimaryFrustum ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition()));
batch->setModelTransform(transform);
batch->setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
auto minCorner = glm::vec2(-0.5f, -0.5f);
auto maxCorner = glm::vec2(0.5f, 0.5f);
@ -121,4 +125,4 @@ void GridEntityRenderer::doRender(RenderArgs* args) {
minorGridRowDivisions, minorGridColDivisions, MINOR_GRID_EDGE,
majorGridRowDivisions, majorGridColDivisions, MAJOR_GRID_EDGE,
color, forward, _geometryId);
}
}

View file

@ -1,6 +1,7 @@
//
// Created by Sam Gondelman on 11/29/18
// Copyright 2018 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -189,7 +190,10 @@ void ImageEntityRenderer::doRender(RenderArgs* args) {
}
transform.setScale(scale);
}
batch->setModelTransform(transform);
batch->setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
Pipeline pipelineType = getPipelineType(materials);
if (pipelineType == Pipeline::PROCEDURAL) {

View file

@ -4,6 +4,7 @@
//
// Created by Seth Alves on 5/11/15.
// Copyright 2015 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -44,13 +45,18 @@ void LineEntityRenderer::doRender(RenderArgs* args) {
PerformanceTimer perfTimer("RenderableLineEntityItem::render");
Q_ASSERT(args->_batch);
gpu::Batch& batch = *args->_batch;
const auto& modelTransform = getModelTransform();
Transform transform = Transform();
Transform transform;
transform.setTranslation(modelTransform.getTranslation());
bool usePrimaryFrustum = args->_renderMode == RenderArgs::RenderMode::SHADOW_RENDER_MODE || args->_mirrorDepth > 0;
transform.setRotation(BillboardModeHelpers::getBillboardRotation(modelTransform.getTranslation(), modelTransform.getRotation(), _billboardMode,
usePrimaryFrustum ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition()));
batch.setModelTransform(transform);
batch.setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
if (_linePoints.size() > 1) {
DependencyManager::get<GeometryCache>()->bindSimpleProgram(batch, false, false, false, false, true,
_renderLayer != RenderLayer::WORLD || args->_renderMethod == Args::RenderMethod::FORWARD);

View file

@ -328,7 +328,10 @@ void MaterialEntityRenderer::doRender(RenderArgs* args) {
bool usePrimaryFrustum = args->_renderMode == RenderArgs::RenderMode::SHADOW_RENDER_MODE || args->_mirrorDepth > 0;
transform.setRotation(BillboardModeHelpers::getBillboardRotation(transform.getTranslation(), transform.getRotation(), _billboardMode,
usePrimaryFrustum ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition()));
batch.setModelTransform(transform);
batch.setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
if (!proceduralRender) {
drawMaterial->setTextureTransforms(textureTransform, MaterialMappingMode::UV, true);

View file

@ -4,6 +4,7 @@
//
// Created by Brad Hefta-Gaub on 8/6/14.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -1510,7 +1511,11 @@ void ModelEntityRenderer::doRender(RenderArgs* args) {
// If the model doesn't have visual geometry, render our bounding box as green wireframe
static glm::vec4 greenColor(0.0f, 1.0f, 0.0f, 1.0f);
gpu::Batch& batch = *args->_batch;
batch.setModelTransform(getModelTransform()); // we want to include the scale as well
Transform transform = getModelTransform();
batch.setModelTransform(transform, _prevRenderTransform); // we want to include the scale as well
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
auto geometryCache = DependencyManager::get<GeometryCache>();
geometryCache->renderWireCubeInstance(args, batch, greenColor, geometryCache->getShapePipelinePointer(false, false, args->_renderMethod == Args::RenderMethod::FORWARD));

View file

@ -3,6 +3,7 @@
// interface/src
//
// Created by Jason Rickwald on 3/2/15.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -53,7 +54,7 @@ static ShapePipelinePointer shapePipelineFactory(const ShapePlumber& plumber, co
state->setDepthTest(true, !transparent, gpu::LESS_EQUAL);
state->setBlendFunction(transparent, gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ONE,
gpu::State::FACTOR_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ONE);
transparent ? PrepareStencil::testMask(*state) : PrepareStencil::testMaskDrawShape(*state);
transparent ? PrepareStencil::testMaskResetNoAA(*state) : PrepareStencil::testMaskDrawShapeNoAA(*state);
auto program = gpu::Shader::createProgram(std::get<3>(key));
_pipelines[std::make_tuple(std::get<0>(key), transparent, std::get<2>(key), wireframe)] = gpu::Pipeline::create(program, state);
@ -620,4 +621,4 @@ void ParticleEffectEntityRenderer::computeTriangles(const hfm::Model& hfmModel)
glm::vec3 scale = bounds.getScale();
_triangleInfo.transform = glm::scale(1.0f / scale) * glm::translate(-bounds.calcCenter());
}
}

View file

@ -4,6 +4,7 @@
//
// Created by Eric Levin on 8/10/15
// Copyright 2015 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -331,6 +332,10 @@ void PolyLineEntityRenderer::doRender(RenderArgs* args) {
batch.setModelTransform(transform);
batch.setPipeline(_pipelines[{args->_renderMethod, isTransparent()}]);
batch.setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
batch.setResourceTexture(0, texture);
batch.draw(gpu::TRIANGLE_STRIP, (gpu::uint32)(2 * _numVertices), 0);
}

View file

@ -1848,7 +1848,10 @@ void PolyVoxEntityRenderer::doRender(RenderArgs* args) {
glm::mat4 rotation = glm::mat4_cast(BillboardModeHelpers::getBillboardRotation(_position, _orientation, _billboardMode,
usePrimaryFrustum ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition()));
Transform transform(glm::translate(_position) * rotation * _lastVoxelToLocalMatrix);
batch.setModelTransform(transform);
batch.setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
batch.setInputFormat(_vertexFormat);
batch.setInputBuffer(gpu::Stream::POSITION, _mesh->getVertexBuffer()._buffer, 0, sizeof(PolyVox::PositionMaterialNormal));

View file

@ -46,7 +46,7 @@ private:
QString _particleUpdateData;
Procedural _updateProcedural;
QString _particleRenderData;
Procedural _renderProcedural;
Procedural _renderProcedural { false }; // No AA on Particles
size_t _numParticles { 0 };
size_t _particlePropTextureDim { 128 }; // 2^ceil(log2(sqrt(10,000)))

View file

@ -1,6 +1,7 @@
//
// Created by Bradley Austin Davis on 2016/05/09
// Copyright 2013 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -128,7 +129,10 @@ void ShapeEntityRenderer::doRender(RenderArgs* args) {
transform.setRotation(BillboardModeHelpers::getBillboardRotation(transform.getTranslation(), transform.getRotation(), _billboardMode,
usePrimaryFrustum ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition(),
_shape < EntityShape::Cube || _shape > EntityShape::Icosahedron));
batch.setModelTransform(transform);
batch.setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
Pipeline pipelineType = getPipelineType(materials);
if (pipelineType == Pipeline::PROCEDURAL) {

View file

@ -4,6 +4,7 @@
//
// Created by Brad Hefta-Gaub on 8/6/14.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -163,7 +164,10 @@ void TextEntityRenderer::doRender(RenderArgs* args) {
bool usePrimaryFrustum = args->_renderMode == RenderArgs::RenderMode::SHADOW_RENDER_MODE || args->_mirrorDepth > 0;
transform.setRotation(BillboardModeHelpers::getBillboardRotation(transform.getTranslation(), transform.getRotation(), _billboardMode,
usePrimaryFrustum ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition()));
batch.setModelTransform(transform);
batch.setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
Pipeline pipelineType = getPipelineType(materials);
if (pipelineType == Pipeline::PROCEDURAL) {
@ -374,7 +378,10 @@ void entities::TextPayload::render(RenderArgs* args) {
}
transform.postTranslate(glm::vec3(-0.5, 0.5, 1.0f + EPSILON / dimensions.z));
transform.setScale(scale);
batch.setModelTransform(transform);
batch.setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
glm::vec2 bounds = glm::vec2(dimensions.x - (textRenderable->_leftMargin + textRenderable->_rightMargin), dimensions.y - (textRenderable->_topMargin + textRenderable->_bottomMargin));
textRenderer->draw(batch, textRenderable->_font, { textRenderable->_text, textColor, effectColor, { textRenderable->_leftMargin / scale, -textRenderable->_topMargin / scale },

View file

@ -107,6 +107,7 @@ public:
protected:
QUuid _entityID;
std::weak_ptr<TextRenderer3D> _textRenderer;
Transform _prevRenderTransform;
int _geometryID { 0 };
};

View file

@ -2,6 +2,7 @@
// Created by Bradley Austin Davis on 2015/05/12
// Copyright 2013 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -324,13 +325,16 @@ void WebEntityRenderer::doRender(RenderArgs* args) {
bool usePrimaryFrustum = args->_renderMode == RenderArgs::RenderMode::SHADOW_RENDER_MODE || args->_mirrorDepth > 0;
transform.setRotation(BillboardModeHelpers::getBillboardRotation(transform.getTranslation(), transform.getRotation(), _billboardMode,
usePrimaryFrustum ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition()));
batch.setModelTransform(transform);
batch.setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
// Turn off jitter for these entities
batch.pushProjectionJitter();
batch.pushProjectionJitterEnabled(false);
DependencyManager::get<GeometryCache>()->bindWebBrowserProgram(batch, transparent, forward);
DependencyManager::get<GeometryCache>()->renderQuad(batch, topLeft, bottomRight, texMin, texMax, color, _geometryId);
batch.popProjectionJitter();
batch.popProjectionJitterEnabled();
batch.setResourceTexture(0, nullptr);
}

View file

@ -5,6 +5,7 @@
//
// Created by Eric Levin on 8/10/2015
// Copyright 2015 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -23,12 +24,15 @@
LAYOUT(binding=0) uniform sampler2D _texture;
<@include render-utils/ShaderConstants.h@>
<@if not HIFI_USE_FORWARD@>
layout(location=0) in vec3 _normalWS;
layout(location=RENDER_UTILS_ATTR_NORMAL_WS) in vec3 _normalWS;
layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) in vec4 _prevPositionCS;
<@endif@>
layout(location=1) in vec2 _texCoord;
layout(location=2) in vec4 _color;
layout(location=3) in float _distanceFromCenter;
layout(location=RENDER_UTILS_ATTR_TEXCOORD01) in vec2 _texCoord;
layout(location=RENDER_UTILS_ATTR_COLOR) in vec4 _color;
layout(location=2) in float _distanceFromCenter;
void main(void) {
vec4 texel = texture(_texture, _texCoord);
@ -37,9 +41,9 @@ void main(void) {
<@if not HIFI_USE_FORWARD@>
<@if HIFI_USE_TRANSLUCENT@>
packDeferredFragmentTranslucent(evalFrontOrBackFaceNormal(_normalWS), texel.a, texel.rgb, DEFAULT_ROUGHNESS);
packDeferredFragmentTranslucentUnlit(_prevPositionCS, evalFrontOrBackFaceNormal(_normalWS), texel.a, texel.rgb);
<@else@>
packDeferredFragmentUnlit(evalFrontOrBackFaceNormal(_normalWS), texel.a, texel.rgb);
packDeferredFragmentUnlit(_prevPositionCS, evalFrontOrBackFaceNormal(_normalWS), texel.a, texel.rgb);
<@endif@>
<@else@>
_fragColor0 = texel;

View file

@ -5,6 +5,7 @@
//
// Created by Eric Levin on 7/20/15.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -12,18 +13,22 @@
<@include gpu/Inputs.slh@>
<@include gpu/Color.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
<@include paintStroke.slh@>
<$declarePolyLineBuffers()$>
<@include render-utils/ShaderConstants.h@>
<@if not HIFI_USE_FORWARD@>
layout(location=0) out vec3 _normalWS;
layout(location=RENDER_UTILS_ATTR_NORMAL_WS) out vec3 _normalWS;
layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) out vec4 _prevPositionCS;
<@endif@>
layout(location=1) out vec2 _texCoord;
layout(location=2) out vec4 _color;
layout(location=3) out float _distanceFromCenter;
layout(location=RENDER_UTILS_ATTR_TEXCOORD01) out vec2 _texCoord;
layout(location=RENDER_UTILS_ATTR_COLOR) out vec4 _color;
layout(location=2) out float _distanceFromCenter;
void main(void) {
PolylineVertex vertex = getPolylineVertex(gl_VertexID / 2);
@ -54,14 +59,17 @@ void main(void) {
posEye.z += _distanceFromCenter * vertex.binormalAndHalfWidth.w * binormalEye.z;
<$transformEyeToClipPos(cam, posEye, gl_Position)$>
<@if not HIFI_USE_FORWARD@>
<$transformEyeToPrevClipPos(cam, posEye, _prevPositionCS)$>
<$transformEyeToWorldDir(cam, normalEye, _normalWS)$>
<@endif@>
} else {
vec3 normal = vertex.normal.xyz;
position.xyz += _distanceFromCenter * vertex.binormalAndHalfWidth.w * binormal;
<@if HIFI_USE_FORWARD@>
<$transformModelToClipPos(cam, obj, position, gl_Position)$>
<@if not HIFI_USE_FORWARD@>
<@else@>
<$transformModelToClipPosAndPrevClipPos(cam, obj, position, gl_Position, _prevPositionCS)$>
<$transformModelToWorldDir(cam, obj, normal, _normalWS)$>
<@endif@>
}
}
}

View file

@ -5,6 +5,7 @@
//
// Created by Seth Alves on 2015-8-3
// Copyright 2015 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -36,6 +37,8 @@
<@if HIFI_USE_FORWARD@>
layout(location=RENDER_UTILS_ATTR_POSITION_ES) in vec4 _positionES;
<@else@>
layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) in vec4 _prevPositionCS;
<@endif@>
layout(location=RENDER_UTILS_ATTR_POSITION_MS) in vec3 _positionMS;
layout(location=RENDER_UTILS_ATTR_NORMAL_WS) in vec3 _normalWS;
@ -88,6 +91,7 @@ void main(void) {
<@if not HIFI_USE_FORWARD@>
packDeferredFragment(
_prevPositionCS,
normalize(_normalWS),
1.0,
diffuse,

View file

@ -4,6 +4,7 @@
// Generated on <$_SCRIBE_DATE$>
//
// Copyright 2015 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -23,6 +24,7 @@
layout(location=RENDER_UTILS_ATTR_POSITION_ES) out vec4 _positionES;
<@endif@>
layout(location=RENDER_UTILS_ATTR_POSITION_MS) out vec3 _positionMS;
layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) out vec4 _prevPositionCS;
layout(location=RENDER_UTILS_ATTR_NORMAL_WS) out vec3 _normalWS;
<@endif@>
@ -34,7 +36,7 @@ void main(void) {
<$transformModelToClipPos(cam, obj, inPosition, gl_Position)$>
<@else@>
<@if not HIFI_USE_FORWARD@>
<$transformModelToClipPos(cam, obj, inPosition, gl_Position)$>
<$transformModelToClipPosAndPrevClipPos(cam, obj, inPosition, gl_Position, _prevPositionCS)$>
<@else@>
<$transformModelToEyeAndClipPos(cam, obj, inPosition, _positionES, gl_Position)$>
<@endif@>

View file

@ -132,8 +132,11 @@ void main(void) {
#endif
<@if not HIFI_USE_TRANSLUCENT@>
// Particles have AA disabled so this doesn't matter
vec4 PREV_POSITION_CS = vec4(0.0, 0.0, 0.0, 1.0);
if (emissiveAmount > 0.0) {
packDeferredFragmentLightmap(
PREV_POSITION_CS,
normal,
1.0,
diffuse,
@ -142,6 +145,7 @@ void main(void) {
emissive);
} else {
packDeferredFragment(
PREV_POSITION_CS,
normal,
1.0,
diffuse,

View file

@ -5,6 +5,7 @@
// textured_particle.frag
//
// Copyright 2015 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -38,11 +39,13 @@ void main(void) {
_fragColor0 = vec4(1.0);
<@endif@>
<@else@>
// Particles have AA disabled so this doesn't matter
vec4 PREV_POSITION_CS = vec4(0.0, 0.0, 0.0, 1.0);
vec3 NORMAL = vec3(1.0, 0.0, 0.0);
<@if not HIFI_USE_TRANSLUCENT@>
packDeferredFragmentUnlit(NORMAL, albedo.a, albedo.rgb);
packDeferredFragmentUnlit(PREV_POSITION_CS, NORMAL, albedo.a, albedo.rgb);
<@else@>
packDeferredFragmentTranslucent(NORMAL, albedo.a, albedo.rgb, DEFAULT_ROUGHNESS);
packDeferredFragmentTranslucent(PREV_POSITION_CS, NORMAL, albedo.a, albedo.rgb, DEFAULT_ROUGHNESS);
<@endif@>
<@endif@>
}

View file

@ -2,9 +2,9 @@
// EntityTypes.h
// libraries/entities/src
//
// Created by Brad Hefta-Gaub on 12/4/13.
// Created by Brad Hefta-Gaub on December 4th, 2013.
// Copyright 2013 High Fidelity, Inc.
// Copyright 2023 Overte e.V.
// Copyright 2023-2025 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -91,7 +91,7 @@ public:
* <tr><td><code>"Material"</code></td><td>Modifies the existing materials on entities and avatars.</td>
* <td>{@link Entities.EntityProperties-Material|EntityProperties-Material}</td></tr>
* <tr><td><code>"Sound"</code></td><td>Plays a sound.</td>
* <td>{@link Entities.EntityProperties-Material|EntityProperties-Sound}</td></tr>
* <td>{@link Entities.EntityProperties-Sound|EntityProperties-Sound}</td></tr>
* </tbody>
* </table>
* @typedef {string} Entities.EntityType

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 10/27/2014.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -50,10 +51,16 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
(&::gpu::gl::GLBackend::do_setModelTransform),
(&::gpu::gl::GLBackend::do_setViewTransform),
(&::gpu::gl::GLBackend::do_setProjectionTransform),
(&::gpu::gl::GLBackend::do_setProjectionJitter),
(&::gpu::gl::GLBackend::do_setProjectionJitterEnabled),
(&::gpu::gl::GLBackend::do_setProjectionJitterSequence),
(&::gpu::gl::GLBackend::do_setProjectionJitterScale),
(&::gpu::gl::GLBackend::do_setViewportTransform),
(&::gpu::gl::GLBackend::do_setDepthRangeTransform),
(&::gpu::gl::GLBackend::do_saveViewProjectionTransform),
(&::gpu::gl::GLBackend::do_setSavedViewProjectionTransform),
(&::gpu::gl::GLBackend::do_copySavedViewProjectionTransformToBuffer),
(&::gpu::gl::GLBackend::do_setPipeline),
(&::gpu::gl::GLBackend::do_setStateBlendFactor),
(&::gpu::gl::GLBackend::do_setStateScissorRect),
@ -268,12 +275,10 @@ bool GLBackend::availableMemoryKnown() {
}
GLBackend::GLBackend(bool syncCache) {
_pipeline._cameraCorrectionBuffer._buffer->flush();
initShaderBinaryCache();
}
GLBackend::GLBackend() {
_pipeline._cameraCorrectionBuffer._buffer->flush();
initShaderBinaryCache();
}
@ -319,19 +324,8 @@ void GLBackend::renderPassTransfer(const Batch& batch) {
case Batch::COMMAND_drawIndexedInstanced:
case Batch::COMMAND_multiDrawIndirect:
case Batch::COMMAND_multiDrawIndexedIndirect:
{
Vec2u outputSize{ 1,1 };
auto framebuffer = acquire(_output._framebuffer);
if (framebuffer) {
outputSize.x = framebuffer->getWidth();
outputSize.y = framebuffer->getHeight();
} else if (glm::dot(_transform._projectionJitter, _transform._projectionJitter)>0.0f) {
qCWarning(gpugllogging) << "Jittering needs to have a frame buffer to be set";
}
_transform.preUpdate(_commandIndex, _stereo, outputSize);
}
case Batch::COMMAND_copySavedViewProjectionTransformToBuffer: // We need to store this transform state in the transform buffer
preUpdateTransform();
break;
case Batch::COMMAND_disableContextStereo:
@ -346,7 +340,11 @@ void GLBackend::renderPassTransfer(const Batch& batch) {
case Batch::COMMAND_setViewportTransform:
case Batch::COMMAND_setViewTransform:
case Batch::COMMAND_setProjectionTransform:
case Batch::COMMAND_setProjectionJitter:
case Batch::COMMAND_setProjectionJitterEnabled:
case Batch::COMMAND_setProjectionJitterSequence:
case Batch::COMMAND_setProjectionJitterScale:
case Batch::COMMAND_saveViewProjectionTransform:
case Batch::COMMAND_setSavedViewProjectionTransform:
case Batch::COMMAND_setContextMirrorViewCorrection:
{
CommandCall call = _commandCalls[(*command)];
@ -385,6 +383,9 @@ void GLBackend::renderPassDraw(const Batch& batch) {
case Batch::COMMAND_setModelTransform:
case Batch::COMMAND_setViewTransform:
case Batch::COMMAND_setProjectionTransform:
case Batch::COMMAND_saveViewProjectionTransform:
case Batch::COMMAND_setSavedViewProjectionTransform:
case Batch::COMMAND_setProjectionJitterSequence:
break;
case Batch::COMMAND_draw:
@ -410,7 +411,6 @@ void GLBackend::renderPassDraw(const Batch& batch) {
//case Batch::COMMAND_setModelTransform:
//case Batch::COMMAND_setViewTransform:
//case Batch::COMMAND_setProjectionTransform:
case Batch::COMMAND_setProjectionJitter:
case Batch::COMMAND_setViewportTransform:
case Batch::COMMAND_setDepthRangeTransform:
case Batch::COMMAND_setContextMirrorViewCorrection:
@ -555,7 +555,7 @@ void GLBackend::render(const Batch& batch) {
_stereo._enable = false;
}
// Reset jitter
_transform._projectionJitter = Vec2(0.0f, 0.0f);
_transform._projectionJitter._isEnabled = false;
{
GL_PROFILE_RANGE(render_gpu_gl_detail, "Transfer");
@ -579,6 +579,14 @@ void GLBackend::render(const Batch& batch) {
// Restore the saved stereo state for the next batch
_stereo._enable = savedStereo;
if (batch._mustUpdatePreviousModels) {
// Update object transform history for when the batch will be reexecuted
for (auto& objectTransform : batch._objects) {
objectTransform._previousModel = objectTransform._model;
}
batch._mustUpdatePreviousModels = false;
}
}
@ -621,11 +629,11 @@ void GLBackend::do_restoreContextViewCorrection(const Batch& batch, size_t param
}
void GLBackend::do_setContextMirrorViewCorrection(const Batch& batch, size_t paramOffset) {
bool prevMirrorViewCorrection = _transform._mirrorViewCorrection;
_transform._mirrorViewCorrection = batch._params[paramOffset]._uint != 0;
bool prevMirrorViewCorrection = _transform._presentFrame.mirrorViewCorrection;
_transform._presentFrame.mirrorViewCorrection = batch._params[paramOffset]._uint != 0;
if (_transform._correction.correction != glm::mat4()) {
setCameraCorrection(_transform._mirrorViewCorrection ? _transform._flippedCorrection : _transform._unflippedCorrection, _transform._correction.prevView, false);
if (_transform._presentFrame.correction != glm::mat4()) {
updatePresentFrame(_transform._presentFrame.mirrorViewCorrection ? _transform._presentFrame.flippedCorrection : _transform._presentFrame.unflippedCorrection, false);
_transform._invalidView = true;
}
}
@ -992,28 +1000,26 @@ void GLBackend::recycle() const {
_textureManagement._transferEngine->manageMemory();
}
void GLBackend::setCameraCorrection(const Mat4& correction, const Mat4& prevRenderView, bool primary, bool reset) {
auto invCorrection = glm::inverse(correction);
auto invPrevView = glm::inverse(prevRenderView);
_transform._correction.prevView = (reset ? Mat4() : prevRenderView);
_transform._correction.prevViewInverse = (reset ? Mat4() : invPrevView);
_transform._correction.correction = correction;
_transform._correction.correctionInverse = invCorrection;
void GLBackend::updatePresentFrame(const Mat4& correction, bool primary) {
_transform._presentFrame.correction = correction;
_transform._presentFrame.correctionInverse = glm::inverse(correction);
if (!_inRenderTransferPass) {
_pipeline._cameraCorrectionBuffer._buffer->setSubData(0, _transform._correction);
_pipeline._cameraCorrectionBuffer._buffer->flush();
// Update previous views of saved transforms
for (auto& viewProjState : _transform._savedTransforms) {
viewProjState._state._previousCorrectedView = viewProjState._state._correctedView;
viewProjState._state._previousProjection = viewProjState._state._projection;
}
if (primary) {
_transform._unflippedCorrection = _transform._correction.correction;
quat flippedRotation = glm::quat_cast(_transform._unflippedCorrection);
_transform._projectionJitter._currentSampleIndex++;
_transform._presentFrame.unflippedCorrection = _transform._presentFrame.correction;
quat flippedRotation = glm::quat_cast(_transform._presentFrame.unflippedCorrection);
flippedRotation.y *= -1.0f;
flippedRotation.z *= -1.0f;
vec3 flippedTranslation = _transform._unflippedCorrection[3];
vec3 flippedTranslation = _transform._presentFrame.unflippedCorrection[3];
flippedTranslation.x *= -1.0f;
_transform._flippedCorrection = glm::translate(glm::mat4_cast(flippedRotation), flippedTranslation);
_transform._mirrorViewCorrection = false;
_transform._presentFrame.flippedCorrection = glm::translate(glm::mat4_cast(flippedRotation), flippedTranslation);
_transform._presentFrame.mirrorViewCorrection = false;
}
}

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 10/27/2014.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -26,7 +27,7 @@
#include <gl/GLShaders.h>
#include <gpu/Forward.h>
#include <gpu/Context.h>
#include <gpu/Backend.h>
#include "GLShared.h"
@ -121,7 +122,8 @@ public:
// Shutdown rendering and persist any required resources
void shutdown() override;
void setCameraCorrection(const Mat4& correction, const Mat4& prevRenderView, bool primary, bool reset = false) override;
void updatePresentFrame(const Mat4& correction = Mat4(), bool primary = true) override;
void render(const Batch& batch) final override;
// This call synchronize the Full Backend cache with the current GLState
@ -177,10 +179,16 @@ public:
virtual void do_setModelTransform(const Batch& batch, size_t paramOffset) final;
virtual void do_setViewTransform(const Batch& batch, size_t paramOffset) final;
virtual void do_setProjectionTransform(const Batch& batch, size_t paramOffset) final;
virtual void do_setProjectionJitter(const Batch& batch, size_t paramOffset) final;
virtual void do_setProjectionJitterEnabled(const Batch& batch, size_t paramOffset) final;
virtual void do_setProjectionJitterSequence(const Batch& batch, size_t paramOffset) final;
virtual void do_setProjectionJitterScale(const Batch& batch, size_t paramOffset) final;
virtual void do_setViewportTransform(const Batch& batch, size_t paramOffset) final;
virtual void do_setDepthRangeTransform(const Batch& batch, size_t paramOffset) final;
virtual void do_saveViewProjectionTransform(const Batch& batch, size_t paramOffset) final;
virtual void do_setSavedViewProjectionTransform(const Batch& batch, size_t paramOffset) final;
virtual void do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) = 0;
// Uniform Stage
virtual void do_setUniformBuffer(const Batch& batch, size_t paramOffset) final;
@ -299,8 +307,9 @@ protected:
virtual bool supportsBindless() const { return false; }
static const size_t INVALID_OFFSET = (size_t)-1;
bool _inRenderTransferPass{ false };
int _currentDraw{ -1 };
static const uint INVALID_SAVED_CAMERA_SLOT = (uint)-1;
bool _inRenderTransferPass { false };
int _currentDraw { -1 };
struct FrameTrash {
GLsync fence = nullptr;
@ -387,11 +396,13 @@ protected:
// between the time when a was recorded and the time(s) when it is
// executed
// Prev is the previous correction used at previous frame
struct CameraCorrection {
struct PresentFrame {
mat4 correction;
mat4 correctionInverse;
mat4 prevView;
mat4 prevViewInverse;
mat4 unflippedCorrection;
mat4 flippedCorrection;
bool mirrorViewCorrection { false };
};
struct TransformStageState {
@ -413,34 +424,60 @@ protected:
#endif
using TransformCameras = std::vector<CameraBufferElement>;
struct ViewProjectionState {
Transform _view;
Transform _correctedView;
Transform _previousCorrectedView;
Mat4 _projection;
Mat4 _previousProjection;
bool _viewIsCamera;
void copyExceptPrevious(const ViewProjectionState& other) {
_view = other._view;
_correctedView = other._correctedView;
_projection = other._projection;
_viewIsCamera = other._viewIsCamera;
}
};
struct SaveTransform {
ViewProjectionState _state;
size_t _cameraOffset { INVALID_OFFSET };
};
TransformCamera _camera;
TransformCameras _cameras;
std::array<SaveTransform, gpu::Batch::MAX_TRANSFORM_SAVE_SLOT_COUNT> _savedTransforms;
mutable std::map<std::string, GLvoid*> _drawCallInfoOffsets;
GLuint _objectBuffer{ 0 };
GLuint _cameraBuffer{ 0 };
GLuint _drawCallInfoBuffer{ 0 };
GLuint _objectBufferTexture{ 0 };
size_t _cameraUboSize{ 0 };
bool _viewIsCamera{ false };
bool _skybox{ false };
Transform _view;
CameraCorrection _correction;
bool _viewCorrectionEnabled{ true };
mat4 _unflippedCorrection;
mat4 _flippedCorrection;
bool _mirrorViewCorrection{ false };
GLuint _objectBuffer { 0 };
GLuint _cameraBuffer { 0 };
GLuint _drawCallInfoBuffer { 0 };
GLuint _objectBufferTexture { 0 };
size_t _cameraUboSize { 0 };
ViewProjectionState _viewProjectionState;
uint _currentSavedTransformSlot { INVALID_SAVED_CAMERA_SLOT };
bool _skybox { false };
PresentFrame _presentFrame;
bool _viewCorrectionEnabled { true };
Mat4 _projection;
Vec4i _viewport{ 0, 0, 1, 1 };
Vec2 _depthRange{ 0.0f, 1.0f };
Vec2 _projectionJitter{ 0.0f, 0.0f };
bool _invalidView{ false };
bool _invalidProj{ false };
bool _invalidViewport{ false };
struct Jitter {
std::vector<Vec2> _offsetSequence;
Vec2 _offset { 0.0f };
float _scale { 0.f };
unsigned int _currentSampleIndex { 0 };
bool _isEnabled { false };
};
bool _enabledDrawcallInfoBuffer{ false };
Jitter _projectionJitter;
Vec4i _viewport { 0, 0, 1, 1 };
Vec2 _depthRange { 0.0f, 1.0f };
bool _invalidView { false };
bool _invalidProj { false };
bool _invalidViewport { false };
bool _enabledDrawcallInfoBuffer { false };
using Pair = std::pair<size_t, size_t>;
using List = std::list<Pair>;
@ -448,11 +485,13 @@ protected:
mutable List::const_iterator _camerasItr;
mutable size_t _currentCameraOffset{ INVALID_OFFSET };
void preUpdate(size_t commandIndex, const StereoState& stereo, Vec2u framebufferSize);
void pushCameraBufferElement(const StereoState& stereo, const StereoState& prevStereo, TransformCameras& cameras) const;
void preUpdate(size_t commandIndex, const StereoState& stereo, const StereoState& prevStereo);
void update(size_t commandIndex, const StereoState& stereo) const;
void bindCurrentCamera(int stereoSide) const;
} _transform;
void preUpdateTransform();
virtual void transferTransformState(const Batch& batch) const = 0;
struct UniformStageState {
@ -522,25 +561,16 @@ protected:
PipelineReference _pipeline{};
GLuint _program{ 0 };
bool _cameraCorrection{ false };
GLShader* _programShader{ nullptr };
bool _invalidProgram{ false };
GLShader* _programShader { nullptr };
bool _invalidProgram { false };
BufferView _cameraCorrectionBuffer{ gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(CameraCorrection), nullptr)) };
BufferView _cameraCorrectionBufferIdentity{ gpu::BufferView(
std::make_shared<gpu::Buffer>(sizeof(CameraCorrection), nullptr)) };
State::Data _stateCache { State::DEFAULT };
State::Signature _stateSignatureCache { 0 };
State::Data _stateCache{ State::DEFAULT };
State::Signature _stateSignatureCache{ 0 };
GLState* _state { nullptr };
bool _invalidState { false };
GLState* _state{ nullptr };
bool _invalidState{ false };
PipelineStageState() {
_cameraCorrectionBuffer.edit<CameraCorrection>() = CameraCorrection();
_cameraCorrectionBufferIdentity.edit<CameraCorrection>() = CameraCorrection();
_cameraCorrectionBufferIdentity._buffer->flush();
}
PipelineStageState() {}
} _pipeline;
// Backend dependent compilation of the shader

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 3/8/2015.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -37,7 +38,6 @@ void GLBackend::do_setPipeline(const Batch& batch, size_t paramOffset) {
reset(_pipeline._pipeline);
_pipeline._program = 0;
_pipeline._cameraCorrection = false;
_pipeline._programShader = nullptr;
_pipeline._invalidProgram = true;
@ -63,7 +63,6 @@ void GLBackend::do_setPipeline(const Batch& batch, size_t paramOffset) {
_pipeline._program = glprogram;
_pipeline._programShader = pipelineObject->_program;
_pipeline._invalidProgram = true;
_pipeline._cameraCorrection = pipelineObject->_cameraCorrection;
}
// Now for the state
@ -79,16 +78,6 @@ void GLBackend::do_setPipeline(const Batch& batch, size_t paramOffset) {
// THis should be done on Pipeline::update...
if (_pipeline._invalidProgram) {
glUseProgram(_pipeline._program);
if (_pipeline._cameraCorrection) {
// Invalidate uniform buffer cache slot
_uniform._buffers[gpu::slot::buffer::CameraCorrection].reset();
auto& cameraCorrectionBuffer = _transform._viewCorrectionEnabled ?
_pipeline._cameraCorrectionBuffer._buffer :
_pipeline._cameraCorrectionBufferIdentity._buffer;
// Because we don't sync Buffers in the bindUniformBuffer, let s force this buffer synced
getBufferID(*cameraCorrectionBuffer);
bindUniformBuffer(gpu::slot::buffer::CameraCorrection, cameraCorrectionBuffer, 0, sizeof(CameraCorrection));
}
(void)CHECK_GL_ERROR();
_pipeline._invalidProgram = false;
}

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 3/8/2015.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -18,20 +19,48 @@ void GLBackend::do_setModelTransform(const Batch& batch, size_t paramOffset) {
}
void GLBackend::do_setViewTransform(const Batch& batch, size_t paramOffset) {
_transform._view = batch._transforms.get(batch._params[paramOffset]._uint);
_transform._viewIsCamera = batch._params[paramOffset + 1]._uint != 0;
_transform._viewProjectionState._view = batch._transforms.get(batch._params[paramOffset]._uint);
// View history is only supported with saved transforms and if setViewTransform is called (and not setSavedViewProjectionTransform)
// then, in consequence, the view will NOT be corrected in the present thread. In which case
// the previousCorrectedView should be the same as the view.
_transform._viewProjectionState._previousCorrectedView = _transform._viewProjectionState._view;
_transform._viewProjectionState._previousProjection = _transform._viewProjectionState._projection;
_transform._viewProjectionState._viewIsCamera = batch._params[paramOffset + 1]._uint != 0;
_transform._invalidView = true;
// The current view / proj doesn't correspond to a saved camera slot
_transform._currentSavedTransformSlot = INVALID_SAVED_CAMERA_SLOT;
}
void GLBackend::do_setProjectionTransform(const Batch& batch, size_t paramOffset) {
memcpy(glm::value_ptr(_transform._projection), batch.readData(batch._params[paramOffset]._uint), sizeof(Mat4));
memcpy(glm::value_ptr(_transform._viewProjectionState._projection), batch.readData(batch._params[paramOffset]._uint), sizeof(Mat4));
_transform._invalidProj = true;
// The current view / proj doesn't correspond to a saved camera slot
_transform._currentSavedTransformSlot = INVALID_SAVED_CAMERA_SLOT;
}
void GLBackend::do_setProjectionJitter(const Batch& batch, size_t paramOffset) {
_transform._projectionJitter.x = batch._params[paramOffset]._float;
_transform._projectionJitter.y = batch._params[paramOffset+1]._float;
void GLBackend::do_setProjectionJitterEnabled(const Batch& batch, size_t paramOffset) {
_transform._projectionJitter._isEnabled = (batch._params[paramOffset]._int & 1) != 0;
_transform._invalidProj = true;
// The current view / proj doesn't correspond to a saved camera slot
_transform._currentSavedTransformSlot = INVALID_SAVED_CAMERA_SLOT;
}
void GLBackend::do_setProjectionJitterSequence(const Batch& batch, size_t paramOffset) {
auto count = batch._params[paramOffset + 0]._uint;
auto& projectionJitter = _transform._projectionJitter;
projectionJitter._offsetSequence.resize(count);
if (count) {
memcpy(projectionJitter._offsetSequence.data(), batch.readData(batch._params[paramOffset + 1]._uint), sizeof(Vec2) * count);
projectionJitter._offset = projectionJitter._offsetSequence[projectionJitter._currentSampleIndex % count];
} else {
projectionJitter._offset = Vec2(0.0f);
}
}
void GLBackend::do_setProjectionJitterScale(const Batch& batch, size_t paramOffset) {
// Should be 2 for one pixel amplitude as clip space is between -1 and 1, but lower values give less blur
// but more aliasing...
_transform._projectionJitter._scale = 2.0f * batch._params[paramOffset + 0]._float;
}
void GLBackend::do_setViewportTransform(const Batch& batch, size_t paramOffset) {
@ -90,55 +119,80 @@ void GLBackend::syncTransformStateCache() {
Mat4 modelView;
auto modelViewInv = glm::inverse(modelView);
_transform._view.evalFromRawMatrix(modelViewInv);
_transform._viewProjectionState._view.evalFromRawMatrix(modelViewInv);
glDisableVertexAttribArray(gpu::Stream::DRAW_CALL_INFO);
_transform._enabledDrawcallInfoBuffer = false;
}
void GLBackend::TransformStageState::preUpdate(size_t commandIndex, const StereoState& stereo, Vec2u framebufferSize) {
void GLBackend::TransformStageState::pushCameraBufferElement(const StereoState& stereo, const StereoState& prevStereo, TransformCameras& cameras) const {
const float jitterAmplitude = _projectionJitter._scale;
const Vec2 jitterScale = Vec2(jitterAmplitude * float(_projectionJitter._isEnabled & 1)) / Vec2(_viewport.z, _viewport.w);
const Vec2 jitter = jitterScale * _projectionJitter._offset;
if (stereo.isStereo()) {
#ifdef GPU_STEREO_CAMERA_BUFFER
cameras.push_back(CameraBufferElement(_camera.getEyeCamera(0, stereo, prevStereo, _viewProjectionState._correctedView,
_viewProjectionState._previousCorrectedView, jitter),
_camera.getEyeCamera(1, stereo, prevStereo, _viewProjectionState._correctedView,
_viewProjectionState._previousCorrectedView, jitter)));
#else
cameras.push_back((_camera.getEyeCamera(0, stereo, prevStereo, _viewProjectionState._correctedView,
_viewProjectionState._previousCorrectedView, jitter)));
cameras.push_back((_camera.getEyeCamera(1, stereo, prevStereo, _viewProjectionState._correctedView,
_viewProjectionState._previousCorrectedView, jitter)));
#endif
} else {
#ifdef GPU_STEREO_CAMERA_BUFFER
cameras.push_back(CameraBufferElement(
_camera.getMonoCamera(_skybox, _viewProjectionState._correctedView, _viewProjectionState._previousCorrectedView,
_viewProjectionState._previousProjection, jitter)));
#else
cameras.push_back((_camera.getMonoCamera(_skybox, _viewProjectionState._correctedView,
_viewProjectionState._previousCorrectedView, _viewProjectionState._previousProjection,
jitter)));
#endif
}
}
void GLBackend::preUpdateTransform() {
_transform.preUpdate(_commandIndex, _stereo, _prevStereo);
}
void GLBackend::TransformStageState::preUpdate(size_t commandIndex, const StereoState& stereo, const StereoState& prevStereo) {
// Check all the dirty flags and update the state accordingly
if (_invalidViewport) {
_camera._viewport = glm::vec4(_viewport);
}
if (_invalidProj) {
_camera._projection = _projection;
_camera._projection = _viewProjectionState._projection;
}
if (_invalidView) {
// Apply the correction
if (_viewIsCamera && (_viewCorrectionEnabled && _correction.correction != glm::mat4())) {
// FIXME should I switch to using the camera correction buffer in Transform.slf and leave this out?
Transform result;
_view.mult(result, _view, _correction.correctionInverse);
if (_skybox) {
result.setTranslation(vec3());
}
_view = result;
if (_viewProjectionState._viewIsCamera && (_viewCorrectionEnabled && _presentFrame.correction != glm::mat4())) {
Transform::mult(_viewProjectionState._correctedView, _viewProjectionState._view, _presentFrame.correctionInverse);
} else {
_viewProjectionState._correctedView = _viewProjectionState._view;
}
if (_skybox) {
_viewProjectionState._correctedView.setTranslation(vec3());
}
// This is when the _view matrix gets assigned
_view.getInverseMatrix(_camera._view);
_viewProjectionState._correctedView.getInverseMatrix(_camera._view);
}
if (_invalidView || _invalidProj || _invalidViewport) {
size_t offset = _cameraUboSize * _cameras.size();
Vec2 finalJitter = _projectionJitter / Vec2(framebufferSize);
_cameraOffsets.push_back(TransformStageState::Pair(commandIndex, offset));
if (stereo.isStereo()) {
#ifdef GPU_STEREO_CAMERA_BUFFER
_cameras.push_back(CameraBufferElement(_camera.getEyeCamera(0, stereo, _view, finalJitter), _camera.getEyeCamera(1, stereo, _view, finalJitter)));
#else
_cameras.push_back((_camera.getEyeCamera(0, stereo, _view, finalJitter)));
_cameras.push_back((_camera.getEyeCamera(1, stereo, _view, finalJitter)));
#endif
} else {
#ifdef GPU_STEREO_CAMERA_BUFFER
_cameras.push_back(CameraBufferElement(_camera.getMonoCamera(_view, finalJitter)));
#else
_cameras.push_back((_camera.getMonoCamera(_view, finalJitter)));
#endif
pushCameraBufferElement(stereo, prevStereo, _cameras);
if (_currentSavedTransformSlot != INVALID_SAVED_CAMERA_SLOT) {
// Save the offset of the saved camera slot in the camera buffer. Can be used to copy
// that data, or (in the future) to reuse the offset.
_savedTransforms[_currentSavedTransformSlot]._cameraOffset = offset;
}
}
@ -177,3 +231,28 @@ void GLBackend::resetTransformStage() {
glDisableVertexAttribArray(gpu::Stream::DRAW_CALL_INFO);
_transform._enabledDrawcallInfoBuffer = false;
}
void GLBackend::do_saveViewProjectionTransform(const Batch& batch, size_t paramOffset) {
auto slotId = batch._params[paramOffset + 0]._uint;
slotId = std::min<gpu::uint32>(slotId, gpu::Batch::MAX_TRANSFORM_SAVE_SLOT_COUNT);
auto& savedTransform = _transform._savedTransforms[slotId];
savedTransform._cameraOffset = INVALID_OFFSET;
_transform._currentSavedTransformSlot = slotId;
// If we are saving this transform to a save slot, then it means we are tracking the history of the view
// so copy the previous corrected view to the transform state.
_transform._viewProjectionState._previousCorrectedView = savedTransform._state._previousCorrectedView;
_transform._viewProjectionState._previousProjection = savedTransform._state._previousProjection;
preUpdateTransform();
savedTransform._state.copyExceptPrevious(_transform._viewProjectionState);
}
void GLBackend::do_setSavedViewProjectionTransform(const Batch& batch, size_t paramOffset) {
auto slotId = batch._params[paramOffset + 0]._uint;
slotId = std::min<gpu::uint32>(slotId, gpu::Batch::MAX_TRANSFORM_SAVE_SLOT_COUNT);
_transform._viewProjectionState = _transform._savedTransforms[slotId]._state;
_transform._invalidView = true;
_transform._invalidProj = true;
_transform._currentSavedTransformSlot = slotId;
}

View file

@ -1,6 +1,7 @@
//
// Created by Bradley Austin Davis on 2016/05/15
// Copyright 2013-2016 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -49,11 +50,6 @@ GLPipeline* GLPipeline::sync(GLBackend& backend, const Pipeline& pipeline) {
Backend::setGPUObject(pipeline, object);
}
// Special case for view correction matrices, any pipeline that declares the correction buffer
// uniform will automatically have it provided without any client code necessary.
// Required for stable lighting in the HMD.
auto reflection = shader->getReflection(backend.getShaderDialect(), backend.getShaderVariant());
object->_cameraCorrection = reflection.validUniformBuffer(gpu::slot::buffer::CameraCorrection);
object->_program = programObject;
object->_state = stateObject;

View file

@ -1,6 +1,7 @@
//
// Created by Bradley Austin Davis on 2016/05/15
// Copyright 2013-2016 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -18,9 +19,6 @@ public:
GLShader* _program { nullptr };
GLState* _state { nullptr };
// Bit of a hack, any pipeline can need the camera correction buffer at execution time, so
// we store whether a given pipeline has declared the uniform buffer for it.
bool _cameraCorrection{ false };
};
} }

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 10/27/2014.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -167,6 +168,8 @@ protected:
bool bindResourceBuffer(uint32_t slot, const BufferPointer& buffer) override;
void releaseResourceBuffer(uint32_t slot) override;
void do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) override;
// Output stage
void do_blit(const Batch& batch, size_t paramOffset) override;

View file

@ -4,12 +4,15 @@
//
// Created by Sam Gateau on 3/8/2015.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "GL41Backend.h"
#include "gpu/gl/GLBuffer.h"
using namespace gpu;
using namespace gpu::gl41;
@ -97,4 +100,34 @@ void GL41Backend::updateTransform(const Batch& batch) {
}
(void)CHECK_GL_ERROR();
}
}
void GL41Backend::do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) {
auto slotId = batch._params[paramOffset + 0]._uint;
BufferPointer buffer = batch._buffers.get(batch._params[paramOffset + 1]._uint);
auto dstOffset = batch._params[paramOffset + 2]._uint;
size_t size = _transform._cameraUboSize;
slotId = std::min<gpu::uint32>(slotId, gpu::Batch::MAX_TRANSFORM_SAVE_SLOT_COUNT);
const auto& savedTransform = _transform._savedTransforms[slotId];
if ((dstOffset + size) > buffer->getBufferCPUMemSize()) {
qCWarning(gpugllogging) << "Copying saved TransformCamera data out of bounds of uniform buffer";
size = (size_t)std::max<ptrdiff_t>((ptrdiff_t)buffer->getBufferCPUMemSize() - (ptrdiff_t)dstOffset, 0);
}
if (savedTransform._cameraOffset == INVALID_OFFSET) {
qCWarning(gpugllogging) << "Saved TransformCamera data has an invalid transform offset. Copy aborted.";
return;
}
// Sync BufferObject
auto* object = syncGPUObject(*buffer);
if (object) {
glBindBuffer(GL_COPY_READ_BUFFER, _transform._cameraBuffer);
glBindBuffer(GL_COPY_WRITE_BUFFER, object->_buffer);
glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, savedTransform._cameraOffset, dstOffset, size);
glBindBuffer(GL_COPY_READ_BUFFER, 0);
glBindBuffer(GL_COPY_WRITE_BUFFER, 0);
(void)CHECK_GL_ERROR();
}
}

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 10/27/2014.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -269,6 +270,8 @@ protected:
bool bindResourceBuffer(uint32_t slot, const BufferPointer& buffer) override;
void releaseResourceBuffer(uint32_t slot) override;
void do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) override;
// Output stage
void do_blit(const Batch& batch, size_t paramOffset) override;

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 1/19/2015.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -167,7 +168,7 @@ public:
glSamplerParameteri(result, GL_TEXTURE_WRAP_T, GLTexture::WRAP_MODES[sampler.getWrapModeV()]);
glSamplerParameteri(result, GL_TEXTURE_WRAP_R, GLTexture::WRAP_MODES[sampler.getWrapModeW()]);
glSamplerParameterf(result, GL_TEXTURE_MAX_ANISOTROPY_EXT, sampler.getMaxAnisotropy());
glSamplerParameterf(result, GL_TEXTURE_MAX_ANISOTROPY, sampler.getMaxAnisotropy());
glSamplerParameterfv(result, GL_TEXTURE_BORDER_COLOR, (const float*)&sampler.getBorderColor());
glSamplerParameterf(result, GL_TEXTURE_MIN_LOD, sampler.getMinMip());
@ -314,7 +315,7 @@ void GL45Texture::syncSampler() const {
glTextureParameteri(_id, GL_TEXTURE_WRAP_T, WRAP_MODES[sampler.getWrapModeV()]);
glTextureParameteri(_id, GL_TEXTURE_WRAP_R, WRAP_MODES[sampler.getWrapModeW()]);
glTextureParameterf(_id, GL_TEXTURE_MAX_ANISOTROPY_EXT, sampler.getMaxAnisotropy());
glTextureParameterf(_id, GL_TEXTURE_MAX_ANISOTROPY, sampler.getMaxAnisotropy());
glTextureParameterfv(_id, GL_TEXTURE_BORDER_COLOR, (const float*)&sampler.getBorderColor());
glTextureParameterf(_id, GL_TEXTURE_MIN_LOD, sampler.getMinMip());

View file

@ -4,12 +4,15 @@
//
// Created by Sam Gateau on 3/8/2015.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "GL45Backend.h"
#include "gpu/gl/GLBuffer.h"
using namespace gpu;
using namespace gpu::gl45;
@ -101,4 +104,30 @@ void GL45Backend::updateTransform(const Batch& batch) {
}
(void)CHECK_GL_ERROR();
}
}
void GL45Backend::do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) {
auto slotId = batch._params[paramOffset + 0]._uint;
BufferPointer buffer = batch._buffers.get(batch._params[paramOffset + 1]._uint);
auto dstOffset = batch._params[paramOffset + 2]._uint;
size_t size = _transform._cameraUboSize;
slotId = std::min<gpu::uint32>(slotId, gpu::Batch::MAX_TRANSFORM_SAVE_SLOT_COUNT);
const auto& savedTransform = _transform._savedTransforms[slotId];
if ((dstOffset + size) > buffer->getBufferCPUMemSize()) {
qCWarning(gpugllogging) << "Copying saved TransformCamera data out of bounds of uniform buffer";
size = (size_t)std::max<ptrdiff_t>((ptrdiff_t)buffer->getBufferCPUMemSize() - (ptrdiff_t)dstOffset, 0);
}
if (savedTransform._cameraOffset == INVALID_OFFSET) {
qCWarning(gpugllogging) << "Saved TransformCamera data has an invalid transform offset. Copy aborted.";
return;
}
// Sync BufferObject
auto* object = syncGPUObject(*buffer);
if (object) {
glCopyNamedBufferSubData(_transform._cameraBuffer, object->_buffer, savedTransform._cameraOffset, dstOffset, size);
(void)CHECK_GL_ERROR();
}
}

View file

@ -164,6 +164,8 @@ protected:
bool bindResourceBuffer(uint32_t slot, const BufferPointer& buffer) override;
void releaseResourceBuffer(uint32_t slot) override;
void do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) override;
// Output stage
void do_blit(const Batch& batch, size_t paramOffset) override;

View file

@ -10,6 +10,8 @@
//
#include "GLESBackend.h"
#include "gpu/gl/GLBuffer.h"
using namespace gpu;
using namespace gpu::gles;
@ -99,4 +101,34 @@ void GLESBackend::updateTransform(const Batch& batch) {
}
(void)CHECK_GL_ERROR();
}
}
void GLESBackend::do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) {
auto slotId = batch._params[paramOffset + 0]._uint;
BufferPointer buffer = batch._buffers.get(batch._params[paramOffset + 1]._uint);
auto dstOffset = batch._params[paramOffset + 2]._uint;
size_t size = _transform._cameraUboSize;
slotId = std::min<gpu::uint32>(slotId, gpu::Batch::MAX_TRANSFORM_SAVE_SLOT_COUNT);
const auto& savedTransform = _transform._savedTransforms[slotId];
if ((dstOffset + size) > buffer->getBufferCPUMemSize()) {
qCWarning(gpugllogging) << "Copying saved TransformCamera data out of bounds of uniform buffer";
size = (size_t)std::max<ptrdiff_t>((ptrdiff_t)buffer->getBufferCPUMemSize() - (ptrdiff_t)dstOffset, 0);
}
if (savedTransform._cameraOffset == INVALID_OFFSET) {
qCWarning(gpugllogging) << "Saved TransformCamera data has an invalid transform offset. Copy aborted.";
return;
}
// Sync BufferObject
auto* object = syncGPUObject(*buffer);
if (object) {
glBindBuffer(GL_COPY_READ_BUFFER, _transform._cameraBuffer);
glBindBuffer(GL_COPY_WRITE_BUFFER, object->_buffer);
glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, savedTransform._cameraOffset, dstOffset, size);
glBindBuffer(GL_COPY_READ_BUFFER, 0);
glBindBuffer(GL_COPY_WRITE_BUFFER, 0);
(void)CHECK_GL_ERROR();
}
}

View file

@ -0,0 +1,126 @@
//
// Backend.cpp
// interface/src/gpu
//
// Created by Olivier Prat on 05/25/2018.
// Copyright 2018 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "Backend.h"
using namespace gpu;
// Counters for Buffer and Texture usage in GPU/Context
ContextMetricSize Backend::freeGPUMemSize;
ContextMetricCount Backend::bufferCount;
ContextMetricSize Backend::bufferGPUMemSize;
ContextMetricCount Backend::textureResidentCount;
ContextMetricCount Backend::textureFramebufferCount;
ContextMetricCount Backend::textureResourceCount;
ContextMetricCount Backend::textureExternalCount;
ContextMetricSize Backend::textureResidentGPUMemSize;
ContextMetricSize Backend::textureFramebufferGPUMemSize;
ContextMetricSize Backend::textureResourceGPUMemSize;
ContextMetricSize Backend::textureExternalGPUMemSize;
ContextMetricCount Backend::texturePendingGPUTransferCount;
ContextMetricSize Backend::texturePendingGPUTransferMemSize;
ContextMetricSize Backend::textureResourcePopulatedGPUMemSize;
ContextMetricSize Backend::textureResourceIdealGPUMemSize;
void Backend::setStereoState(const StereoState& stereo) {
_prevStereo = _stereo;
_stereo = stereo;
}
Backend::TransformCamera Backend::TransformCamera::getEyeCamera(int eye,
const StereoState& stereo,
const StereoState& prevStereo,
const Transform& view,
const Transform& previousView,
Vec2 normalizedJitter) const {
TransformCamera result = *this;
Transform eyeView = view;
Transform eyePreviousView = previousView;
if (!stereo._skybox) {
eyeView.postTranslate(-Vec3(stereo._eyeViews[eye][3]));
eyePreviousView.postTranslate(-Vec3(prevStereo._eyeViews[eye][3]));
} else {
// FIXME: If "skybox" the ipd is set to 0 for now, let s try to propose a better solution for this in the future
eyePreviousView.setTranslation(vec3());
}
result._projection = stereo._eyeProjections[eye];
Mat4 previousProjection = prevStereo._eyeProjections[eye];
// Apply jitter to projections
// We divided by the framebuffer size, which was double-sized, to normalize the jitter, but we want a normal amount of jitter
// for each eye, so we multiply by 2 to get back to normal
//normalizedJitter.x *= 2.0f;
result._projection[2][0] += normalizedJitter.x;
result._projection[2][1] += normalizedJitter.y;
previousProjection[2][0] += normalizedJitter.x;
previousProjection[2][1] += normalizedJitter.y;
result.recomputeDerived(eyeView, eyePreviousView, previousProjection);
result._stereoInfo = Vec4(1.0f, (float)eye, 1.0f / result._viewport.z, 1.0f / result._viewport.w);
return result;
}
Backend::TransformCamera Backend::TransformCamera::getMonoCamera(bool isSkybox,
const Transform& view,
Transform previousView,
Mat4 previousProjection,
Vec2 normalizedJitter) const {
TransformCamera result = *this;
if (isSkybox) {
previousView.setTranslation(vec3());
}
result._projection[2][0] += normalizedJitter.x;
result._projection[2][1] += normalizedJitter.y;
previousProjection[2][0] += normalizedJitter.x;
previousProjection[2][1] += normalizedJitter.y;
result.recomputeDerived(view, previousView, previousProjection);
result._stereoInfo = Vec4(0.0f, 0.0f, 1.0f / result._viewport.z, 1.0f / result._viewport.w);
return result;
}
const Backend::TransformCamera& Backend::TransformCamera::recomputeDerived(const Transform& view,
const Transform& previousView,
const Mat4& previousProjection) const {
_projectionInverse = glm::inverse(_projection);
// Get the viewEyeToWorld matrix form the transformView as passed to the gpu::Batch
// this is the "_viewInverse" fed to the shader
// Genetrate the "_view" matrix as well from the xform
view.getMatrix(_viewInverse);
_view = glm::inverse(_viewInverse);
previousView.getMatrix(_previousViewInverse);
_previousView = glm::inverse(_previousViewInverse);
Mat4 viewUntranslated = _view;
viewUntranslated[3] = Vec4(0.0f, 0.0f, 0.0f, 1.0f);
_projectionViewUntranslated = _projection * viewUntranslated;
viewUntranslated = _previousView;
viewUntranslated[3] = Vec4(0.0f, 0.0f, 0.0f, 1.0f);
_previousProjectionViewUntranslated = previousProjection * viewUntranslated;
_stereoInfo = Vec4(0.0f);
return *this;
}

View file

@ -0,0 +1,141 @@
//
// Backend.h
// interface/src/gpu
//
// Created by Olivier Prat on 05/18/2018.
// Copyright 2018 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_gpu_Backend_h
#define hifi_gpu_Backend_h
#include <GLMHelpers.h>
#include "Forward.h"
#include "Batch.h"
#include "Buffer.h"
#include "Framebuffer.h"
class QImage;
namespace gpu {
class Context;
struct ContextStats {
public:
int _ISNumFormatChanges = 0;
int _ISNumInputBufferChanges = 0;
int _ISNumIndexBufferChanges = 0;
int _RSNumResourceBufferBounded = 0;
int _RSNumTextureBounded = 0;
int _RSAmountTextureMemoryBounded = 0;
int _DSNumAPIDrawcalls = 0;
int _DSNumDrawcalls = 0;
int _DSNumTriangles = 0;
int _PSNumSetPipelines = 0;
ContextStats() {}
ContextStats(const ContextStats& stats) = default;
void evalDelta(const ContextStats& begin, const ContextStats& end);
};
class Backend {
public:
virtual ~Backend() {}
virtual void shutdown() {}
virtual const std::string& getVersion() const = 0;
void setStereoState(const StereoState& stereo);
virtual void render(const Batch& batch) = 0;
virtual void syncCache() = 0;
virtual void syncProgram(const gpu::ShaderPointer& program) = 0;
virtual void recycle() const = 0;
virtual void downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) = 0;
virtual void updatePresentFrame(const Mat4& correction = Mat4(), bool primary = true) = 0;
virtual bool supportedTextureFormat(const gpu::Element& format) = 0;
// Shared header between C++ and GLSL
#include "TransformCamera_shared.slh"
class TransformCamera : public _TransformCamera {
public:
const Backend::TransformCamera& recomputeDerived(const Transform& view, const Transform& previousView, const Mat4& previousProjection) const;
// Jitter should be divided by framebuffer size
TransformCamera getMonoCamera(bool isSkybox, const Transform& view, Transform previousView, Mat4 previousProjection, Vec2 normalizedJitter) const;
// Jitter should be divided by framebuffer size
TransformCamera getEyeCamera(int eye, const StereoState& stereo, const StereoState& prevStereo, const Transform& view, const Transform& previousView,
Vec2 normalizedJitter) const;
};
template <typename T, typename U>
static void setGPUObject(const U& object, T* gpuObject) {
object.gpuObject.setGPUObject(gpuObject);
}
template <typename T, typename U>
static T* getGPUObject(const U& object) {
return reinterpret_cast<T*>(object.gpuObject.getGPUObject());
}
void resetStats() const { _stats = ContextStats(); }
void getStats(ContextStats& stats) const { stats = _stats; }
virtual bool isTextureManagementSparseEnabled() const = 0;
// These should only be accessed by Backend implementation to report the buffer and texture allocations,
// they are NOT public objects
static ContextMetricSize freeGPUMemSize;
static ContextMetricCount bufferCount;
static ContextMetricSize bufferGPUMemSize;
static ContextMetricCount textureResidentCount;
static ContextMetricCount textureFramebufferCount;
static ContextMetricCount textureResourceCount;
static ContextMetricCount textureExternalCount;
static ContextMetricSize textureResidentGPUMemSize;
static ContextMetricSize textureFramebufferGPUMemSize;
static ContextMetricSize textureResourceGPUMemSize;
static ContextMetricSize textureExternalGPUMemSize;
static ContextMetricCount texturePendingGPUTransferCount;
static ContextMetricSize texturePendingGPUTransferMemSize;
static ContextMetricSize textureResourcePopulatedGPUMemSize;
static ContextMetricSize textureResourceIdealGPUMemSize;
protected:
virtual bool isStereo() const {
return _stereo.isStereo();
}
void getStereoProjections(mat4* eyeProjections) const {
for (int i = 0; i < 2; ++i) {
eyeProjections[i] = _stereo._eyeProjections[i];
}
}
void getStereoViews(mat4* eyeViews) const {
for (int i = 0; i < 2; ++i) {
eyeViews[i] = _stereo._eyeViews[i];
}
}
friend class Context;
mutable ContextStats _stats;
StereoState _stereo;
StereoState _prevStereo;
};
}
#endif

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 10/14/2014.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -53,6 +54,7 @@ Batch::Batch(const std::string& name) {
_data.reserve(_dataMax);
_objects.reserve(_objectsMax);
_drawCallInfos.reserve(_drawCallInfosMax);
_mustUpdatePreviousModels = true;
}
Batch::~Batch() {
@ -101,17 +103,18 @@ void Batch::clear() {
_currentModel = Transform();
_drawcallUniform = 0;
_drawcallUniformReset = 0;
_projectionJitter = glm::vec2(0.0f);
_enableStereo = true;
_enableSkybox = false;
_mustUpdatePreviousModels = true;
}
size_t Batch::cacheData(size_t size, const void* data) {
size_t offset = _data.size();
size_t numBytes = size;
_data.resize(offset + numBytes);
memcpy(_data.data() + offset, data, size);
if (data) {
memcpy(_data.data() + offset, data, size);
}
return offset;
}
@ -236,6 +239,15 @@ void Batch::setModelTransform(const Transform& model) {
ADD_COMMAND(setModelTransform);
_currentModel = model;
_previousModel = model;
_invalidModel = true;
}
void Batch::setModelTransform(const Transform& model, const Transform& previousModel) {
ADD_COMMAND(setModelTransform);
_currentModel = model;
_previousModel = previousModel;
_invalidModel = true;
}
@ -252,20 +264,29 @@ void Batch::setProjectionTransform(const Mat4& proj) {
_params.emplace_back(cacheData(sizeof(Mat4), &proj));
}
void Batch::setProjectionJitter(float jx, float jy) {
_projectionJitter.x = jx;
_projectionJitter.y = jy;
pushProjectionJitter(jx, jy);
void Batch::setProjectionJitterEnabled(bool isProjectionEnabled) {
_isJitterOnProjectionEnabled = isProjectionEnabled;
pushProjectionJitterEnabled(_isJitterOnProjectionEnabled);
}
void Batch::pushProjectionJitter(float jx, float jy) {
ADD_COMMAND(setProjectionJitter);
_params.emplace_back(jx);
_params.emplace_back(jy);
void Batch::pushProjectionJitterEnabled(bool isProjectionEnabled) {
ADD_COMMAND(setProjectionJitterEnabled);
_params.emplace_back(isProjectionEnabled & 1);
}
void Batch::popProjectionJitter() {
pushProjectionJitter(_projectionJitter.x, _projectionJitter.y);
void Batch::popProjectionJitterEnabled() {
pushProjectionJitterEnabled(_isJitterOnProjectionEnabled);
}
void Batch::setProjectionJitterSequence(const Vec2* sequence, size_t count) {
ADD_COMMAND(setProjectionJitterSequence);
_params.emplace_back((uint)count);
_params.emplace_back(cacheData(sizeof(Vec2) * count, sequence));
}
void Batch::setProjectionJitterScale(float scale) {
ADD_COMMAND(setProjectionJitterScale);
_params.emplace_back(scale);
}
void Batch::setViewportTransform(const Vec4i& viewport) {
@ -281,6 +302,34 @@ void Batch::setDepthRangeTransform(float nearDepth, float farDepth) {
_params.emplace_back(nearDepth);
}
void Batch::saveViewProjectionTransform(uint saveSlot) {
ADD_COMMAND(saveViewProjectionTransform);
if (saveSlot >= MAX_TRANSFORM_SAVE_SLOT_COUNT) {
qCWarning(gpulogging) << "Transform save slot" << saveSlot << "exceeds max save slot count of" << MAX_TRANSFORM_SAVE_SLOT_COUNT;
}
_params.emplace_back(saveSlot);
}
void Batch::setSavedViewProjectionTransform(uint saveSlot) {
ADD_COMMAND(setSavedViewProjectionTransform);
if (saveSlot >= MAX_TRANSFORM_SAVE_SLOT_COUNT) {
qCWarning(gpulogging) << "Transform save slot" << saveSlot << "exceeds max save slot count of"
<< MAX_TRANSFORM_SAVE_SLOT_COUNT;
}
_params.emplace_back(saveSlot);
}
void Batch::copySavedViewProjectionTransformToBuffer(uint saveSlot, const BufferPointer& buffer, Offset offset) {
ADD_COMMAND(copySavedViewProjectionTransformToBuffer);
if (saveSlot >= MAX_TRANSFORM_SAVE_SLOT_COUNT) {
qCWarning(gpulogging) << "Transform save slot" << saveSlot << "exceeds max save slot count of"
<< MAX_TRANSFORM_SAVE_SLOT_COUNT;
}
_params.emplace_back(saveSlot);
_params.emplace_back(_buffers.cache(buffer));
_params.emplace_back(offset);
}
void Batch::setPipeline(const PipelinePointer& pipeline) {
ADD_COMMAND(setPipeline);
@ -554,12 +603,15 @@ void Batch::captureDrawCallInfoImpl() {
if (_invalidModel) {
TransformObject object;
_currentModel.getMatrix(object._model);
_previousModel.getMatrix(object._previousModel);
// FIXME - we don't want to be using glm::inverse() here but it fixes the flickering issue we are
// seeing with planky blocks in toybox. Our implementation of getInverseMatrix() is buggy in cases
// of non-uniform scale. We need to fix that. In the mean time, glm::inverse() works.
//_model.getInverseMatrix(_object._modelInverse);
//_previousModel.getInverseMatrix(_object._previousModelInverse);
object._modelInverse = glm::inverse(object._model);
object._previousModelInverse = glm::inverse(object._previousModel);
_objects.emplace_back(object);
@ -757,4 +809,4 @@ void Batch::flush() {
}
buffer->flush();
}
}
}

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 10/14/2014.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -42,6 +43,13 @@ class Batch {
public:
typedef Stream::Slot Slot;
enum {
// This is tied to RenderMirrorTask::MAX_MIRROR_DEPTH and RenderMirrorTask::MAX_MIRRORS_PER_LEVEL
// We have 1 view at mirror depth 0, 3 more at mirror depth 1, 9 more at mirror depth 2, and 27 more at mirror depth 3
// For each view, we have one slot for the background and one for the primary view, and that's all repeated for the secondary camera
// So this is 2 slots/view/camera * 2 cameras * (1 + 3 + 9 + 27) views
MAX_TRANSFORM_SAVE_SLOT_COUNT = 160
};
class DrawCallInfo {
public:
@ -151,20 +159,20 @@ public:
// multi command desctription for multiDrawIndexedIndirect
class DrawIndirectCommand {
public:
uint _count{ 0 };
uint _instanceCount{ 0 };
uint _firstIndex{ 0 };
uint _baseInstance{ 0 };
uint _count { 0 };
uint _instanceCount { 0 };
uint _firstIndex { 0 };
uint _baseInstance { 0 };
};
// multi command desctription for multiDrawIndexedIndirect
class DrawIndexedIndirectCommand {
public:
uint _count{ 0 };
uint _instanceCount{ 0 };
uint _firstIndex{ 0 };
uint _baseVertex{ 0 };
uint _baseInstance{ 0 };
uint _count { 0 };
uint _instanceCount { 0 };
uint _firstIndex { 0 };
uint _baseVertex { 0 };
uint _baseInstance { 0 };
};
// Transform Stage
@ -174,17 +182,24 @@ public:
// WARNING: ViewTransform transform from eye space to world space, its inverse is composed
// with the ModelTransform to create the equivalent of the gl ModelViewMatrix
void setModelTransform(const Transform& model);
void setModelTransform(const Transform& model, const Transform& previousModel);
void resetViewTransform() { setViewTransform(Transform(), false); }
void setViewTransform(const Transform& view, bool camera = true);
void setProjectionTransform(const Mat4& proj);
void setProjectionJitter(float jx = 0.0f, float jy = 0.0f);
void setProjectionJitterEnabled(bool isProjectionEnabled);
void setProjectionJitterSequence(const Vec2* sequence, size_t count);
void setProjectionJitterScale(float scale);
// Very simple 1 level stack management of jitter.
void pushProjectionJitter(float jx = 0.0f, float jy = 0.0f);
void popProjectionJitter();
void pushProjectionJitterEnabled(bool isProjectionEnabled);
void popProjectionJitterEnabled();
// Viewport is xy = low left corner in framebuffer, zw = width height of the viewport, expressed in pixels
void setViewportTransform(const Vec4i& viewport);
void setDepthRangeTransform(float nearDepth, float farDepth);
void saveViewProjectionTransform(uint saveSlot);
void setSavedViewProjectionTransform(uint saveSlot);
void copySavedViewProjectionTransformToBuffer(uint saveSlot, const BufferPointer& buffer, Offset offset);
// Pipeline Stage
void setPipeline(const PipelinePointer& pipeline);
@ -202,7 +217,7 @@ public:
void setResourceTexture(uint32 slot, const TexturePointer& texture);
void setResourceTexture(uint32 slot, const TextureView& view); // not a command, just a shortcut from a TextureView
void setResourceTextureTable(const TextureTablePointer& table, uint32 slot = 0);
void setResourceFramebufferSwapChainTexture(uint32 slot, const FramebufferSwapChainPointer& framebuffer, unsigned int swpaChainIndex, unsigned int renderBufferSlot = 0U); // not a command, just a shortcut from a TextureView
void setResourceFramebufferSwapChainTexture(uint32 slot, const FramebufferSwapChainPointer& framebuffer, unsigned int swapChainIndex, unsigned int renderBufferSlot = 0U); // not a command, just a shortcut from a TextureView
// Ouput Stage
void setFramebuffer(const FramebufferPointer& framebuffer);
@ -312,10 +327,16 @@ public:
COMMAND_setModelTransform,
COMMAND_setViewTransform,
COMMAND_setProjectionTransform,
COMMAND_setProjectionJitter,
COMMAND_setProjectionJitterEnabled,
COMMAND_setProjectionJitterSequence,
COMMAND_setProjectionJitterScale,
COMMAND_setViewportTransform,
COMMAND_setDepthRangeTransform,
COMMAND_saveViewProjectionTransform,
COMMAND_setSavedViewProjectionTransform,
COMMAND_copySavedViewProjectionTransformToBuffer,
COMMAND_setPipeline,
COMMAND_setStateBlendFactor,
COMMAND_setStateScissorRect,
@ -497,17 +518,14 @@ public:
Bytes _data;
static size_t _dataMax;
// SSBO class... layout MUST match the layout in Transform.slh
class TransformObject {
public:
Mat4 _model;
Mat4 _modelInverse;
};
#include "TransformObject_shared.slh"
using TransformObjects = std::vector<TransformObject>;
bool _invalidModel { true };
Transform _currentModel;
TransformObjects _objects;
Transform _previousModel;
mutable bool _mustUpdatePreviousModels;
mutable TransformObjects _objects;
static size_t _objectsMax;
BufferCaches _buffers;
@ -525,11 +543,12 @@ public:
NamedBatchDataMap _namedData;
uint16_t _drawcallUniform{ 0 };
uint16_t _drawcallUniformReset{ 0 };
bool _isJitterOnProjectionEnabled { false };
glm::vec2 _projectionJitter{ 0.0f, 0.0f };
bool _enableStereo{ true };
uint16_t _drawcallUniform { 0 };
uint16_t _drawcallUniformReset { 0 };
bool _enableStereo { true };
bool _enableSkybox { false };
protected:
@ -558,7 +577,7 @@ protected:
template <typename T>
size_t Batch::Cache<T>::_max = BATCH_PREALLOCATE_MIN;
}
} // namespace gpu
#if defined(NSIGHT_FOUND)

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 10/27/2014.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -217,74 +218,6 @@ double Context::getFrameTimerBatchAverage() const {
return 0.0;
}
const Backend::TransformCamera& Backend::TransformCamera::recomputeDerived(const Transform& xformView) const {
_projectionInverse = glm::inverse(_projection);
// Get the viewEyeToWorld matrix from the transformView as passed to the gpu::Batch
// this is the "_viewInverse" fed to the shader
// Genetrate the "_view" matrix as well from the xform
xformView.getMatrix(_viewInverse);
_view = glm::inverse(_viewInverse);
Mat4 viewUntranslated = _view;
viewUntranslated[3] = Vec4(0.0f, 0.0f, 0.0f, 1.0f);
_projectionViewUntranslated = _projection * viewUntranslated;
_stereoInfo = Vec4(0.0f);
return *this;
}
Backend::TransformCamera Backend::TransformCamera::getEyeCamera(int eye, const StereoState& _stereo, const Transform& xformView, Vec2 normalizedJitter) const {
TransformCamera result = *this;
Transform offsetTransform = xformView;
if (!_stereo._skybox) {
offsetTransform.postTranslate(-Vec3(_stereo._eyeViews[eye][3]));
} else {
// FIXME: If "skybox" the ipd is set to 0 for now, let s try to propose a better solution for this in the future
}
result._projection = _stereo._eyeProjections[eye];
normalizedJitter.x *= 2.0f;
result._projection[2][0] += normalizedJitter.x;
result._projection[2][1] += normalizedJitter.y;
result.recomputeDerived(offsetTransform);
result._stereoInfo = Vec4(1.0f, (float)eye, 0.0f, 0.0f);
return result;
}
Backend::TransformCamera Backend::TransformCamera::getMonoCamera(const Transform& xformView, Vec2 normalizedJitter) const {
TransformCamera result = *this;
result._projection[2][0] += normalizedJitter.x;
result._projection[2][1] += normalizedJitter.y;
result.recomputeDerived(xformView);
return result;
}
// Counters for Buffer and Texture usage in GPU/Context
ContextMetricSize Backend::freeGPUMemSize;
ContextMetricCount Backend::bufferCount;
ContextMetricSize Backend::bufferGPUMemSize;
ContextMetricCount Backend::textureResidentCount;
ContextMetricCount Backend::textureFramebufferCount;
ContextMetricCount Backend::textureResourceCount;
ContextMetricCount Backend::textureExternalCount;
ContextMetricSize Backend::textureResidentGPUMemSize;
ContextMetricSize Backend::textureFramebufferGPUMemSize;
ContextMetricSize Backend::textureResourceGPUMemSize;
ContextMetricSize Backend::textureExternalGPUMemSize;
ContextMetricCount Backend::texturePendingGPUTransferCount;
ContextMetricSize Backend::texturePendingGPUTransferMemSize;
ContextMetricSize Backend::textureResourcePopulatedGPUMemSize;
ContextMetricSize Backend::textureResourceIdealGPUMemSize;
Size Context::getFreeGPUMemSize() {
return Backend::freeGPUMemSize.getValue();
}

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 10/27/2014.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -15,131 +16,14 @@
#include <mutex>
#include <queue>
#include <GLMHelpers.h>
#include "Forward.h"
#include "Batch.h"
#include "Buffer.h"
#include "Texture.h"
#include "Pipeline.h"
#include "Framebuffer.h"
#include "Frame.h"
#include "PointerStorage.h"
class QImage;
#include "Backend.h"
namespace gpu {
struct ContextStats {
public:
uint32_t _ISNumFormatChanges { 0 };
uint32_t _ISNumInputBufferChanges { 0 };
uint32_t _ISNumIndexBufferChanges { 0 };
uint32_t _RSNumResourceBufferBounded { 0 };
uint32_t _RSNumTextureBounded { 0 };
uint64_t _RSAmountTextureMemoryBounded { 0 };
uint32_t _DSNumAPIDrawcalls { 0 };
uint32_t _DSNumDrawcalls { 0 };
uint32_t _DSNumTriangles { 0 };
uint32_t _PSNumSetPipelines { 0 };
ContextStats() {}
ContextStats(const ContextStats& stats) = default;
void evalDelta(const ContextStats& begin, const ContextStats& end);
};
class Backend {
public:
virtual ~Backend(){};
virtual void shutdown() {}
virtual const std::string& getVersion() const = 0;
void setStereoState(const StereoState& stereo) { _stereo = stereo; }
virtual void render(const Batch& batch) = 0;
virtual void syncCache() = 0;
virtual void syncProgram(const gpu::ShaderPointer& program) = 0;
virtual void recycle() const = 0;
virtual void downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) = 0;
virtual void setCameraCorrection(const Mat4& correction, const Mat4& prevRenderView, bool primary, bool reset = false) {}
virtual bool supportedTextureFormat(const gpu::Element& format) = 0;
// Shared header between C++ and GLSL
#include "TransformCamera_shared.slh"
class TransformCamera : public _TransformCamera {
public:
const Backend::TransformCamera& recomputeDerived(const Transform& xformView) const;
// Jitter should be divided by framebuffer size
TransformCamera getMonoCamera(const Transform& xformView, Vec2 normalizedJitter) const;
// Jitter should be divided by framebuffer size
TransformCamera getEyeCamera(int eye, const StereoState& stereo, const Transform& xformView, Vec2 normalizedJitter) const;
};
template <typename T, typename U>
static void setGPUObject(const U& object, T* gpuObject) {
object.gpuObject.setGPUObject(gpuObject);
}
template <typename T, typename U>
static T* getGPUObject(const U& object) {
return reinterpret_cast<T*>(object.gpuObject.getGPUObject());
}
void resetStats() const { _stats = ContextStats(); }
void getStats(ContextStats& stats) const { stats = _stats; }
virtual bool isTextureManagementSparseEnabled() const = 0;
// These should only be accessed by Backend implementation to report the buffer and texture allocations,
// they are NOT public objects
static ContextMetricSize freeGPUMemSize;
static ContextMetricCount bufferCount;
static ContextMetricSize bufferGPUMemSize;
static ContextMetricCount textureResidentCount;
static ContextMetricCount textureFramebufferCount;
static ContextMetricCount textureResourceCount;
static ContextMetricCount textureExternalCount;
static ContextMetricSize textureResidentGPUMemSize;
static ContextMetricSize textureFramebufferGPUMemSize;
static ContextMetricSize textureResourceGPUMemSize;
static ContextMetricSize textureExternalGPUMemSize;
static ContextMetricCount texturePendingGPUTransferCount;
static ContextMetricSize texturePendingGPUTransferMemSize;
static ContextMetricSize textureResourcePopulatedGPUMemSize;
static ContextMetricSize textureResourceIdealGPUMemSize;
virtual bool isStereo() const {
return _stereo.isStereo();
}
void getStereoProjections(mat4* eyeProjections) const {
for (int i = 0; i < 2; ++i) {
eyeProjections[i] = _stereo._eyeProjections[i];
}
}
protected:
void getStereoViews(mat4* eyeViews) const {
for (int i = 0; i < 2; ++i) {
eyeViews[i] = _stereo._eyeViews[i];
}
}
friend class Context;
mutable ContextStats _stats;
StereoState _stereo;
};
class Context {
public:
using Size = Resource::Size;

View file

@ -0,0 +1,28 @@
<@include gpu/Config.slh@>
<$VERSION_HEADER$>
// <$_SCRIBE_FILENAME$>
// Generated on <$_SCRIBE_DATE$>
// Draw the unit quad [-1,-1 -> 1,1].
// No transform used.
// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed
//
// Created by Olivier Prat on 10/22/2018
// Copyright 2018 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
void main(void) {
const float depth = 1.0;
const vec4 UNIT_QUAD[4] = vec4[4](
vec4(-1.0, -1.0, depth, 1.0),
vec4(1.0, -1.0, depth, 1.0),
vec4(-1.0, 1.0, depth, 1.0),
vec4(1.0, 1.0, depth, 1.0)
);
vec4 pos = UNIT_QUAD[gl_VertexID];
gl_Position = pos;
}

View file

@ -1,6 +1,7 @@
//
// Created by Bradley Austin Davis on 2018/10/14
// Copyright 2013-2018 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -80,7 +81,7 @@ constexpr const char* pose = "pose";
constexpr const char* profileRanges = "profileRanges";
constexpr const char* program = "program";
constexpr const char* programs = "programs";
constexpr const char* projectionJitter = "projectionJitter";
constexpr const char* isJitterOnProjectionEnabled = "isJitterOnProjectionEnabled";
constexpr const char* queries = "queries";
constexpr const char* sampleCount = "sampleCount";
constexpr const char* sampleMask = "sampleMask";
@ -150,10 +151,16 @@ constexpr const char* COMMAND_NAMES[] = {
"setModelTransform",
"setViewTransform",
"setProjectionTransform",
"setProjectionJitter",
"setProjectionJitterEnabled",
"setProjectionJitterSequence",
"setProjectionJitterScale",
"setViewportTransform",
"setDepthRangeTransform",
"saveViewProjectionTransform",
"setSavedViewProjectionTransform",
"copySavedViewProjectionTransformToBuffer",
"setPipeline",
"setStateBlendFactor",
"setStateScissorRect",

View file

@ -1,6 +1,7 @@
//
// Created by Bradley Austin Davis on 2018/10/14
// Copyright 2013-2018 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -740,7 +741,7 @@ BatchPointer Deserializer::readBatch(const json& node) {
auto& batch = *result;
readOptional(batch._enableStereo, node, keys::stereo);
readOptional(batch._enableSkybox, node, keys::skybox);
readOptionalTransformed<glm::vec2>(batch._projectionJitter, node, keys::projectionJitter, &readVec2);
readOptional(batch._isJitterOnProjectionEnabled, node, keys::isJitterOnProjectionEnabled);
readOptional(batch._drawcallUniform, node, keys::drawcallUniform);
readOptional(batch._drawcallUniformReset, node, keys::drawcallUniformReset);
readPointerCache(batch._textures, node, keys::textures, textures);

View file

@ -1,6 +1,7 @@
//
// Created by Bradley Austin Davis on 2018/10/14
// Copyright 2013-2018 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -220,8 +221,8 @@ json Serializer::writeBatch(const Batch& batch) {
if (batch._enableStereo != DEFAULT_BATCH._enableStereo) {
batchNode[keys::stereo] = batch._enableStereo;
}
if (batch._projectionJitter != DEFAULT_BATCH._projectionJitter) {
batchNode[keys::projectionJitter] = writeVec2(batch._projectionJitter);
if (batch._isJitterOnProjectionEnabled != DEFAULT_BATCH._isJitterOnProjectionEnabled) {
batchNode[keys::isJitterOnProjectionEnabled] = batch._isJitterOnProjectionEnabled;
}
if (batch._drawcallUniform != DEFAULT_BATCH._drawcallUniform) {
batchNode[keys::drawcallUniform] = batch._drawcallUniform;

View file

@ -4,6 +4,7 @@
//
// Created by Niraj Venkat on 7/7/2015.
// Copyright 2015 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -66,6 +67,8 @@ namespace gpu {
double getGPUAverage() const;
double getBatchAverage() const;
const std::string& name() const { return _name; }
protected:
static const int QUERY_QUEUE_SIZE { 4 };

View file

@ -24,6 +24,7 @@
#include "Forward.h"
#include "Resource.h"
#include "Metric.h"
#include "SerDes.h"
const int ABSOLUTE_MAX_TEXTURE_NUM_PIXELS = 8192 * 8192;
@ -91,6 +92,37 @@ public:
};
typedef std::shared_ptr< SphericalHarmonics > SHPointer;
inline DataSerializer &operator<<(DataSerializer &ser, const SphericalHarmonics &h) {
DataSerializer::SizeTracker tracker(ser);
ser << h.L00 << h.spare0;
ser << h.L1m1 << h.spare1;
ser << h.L10 << h.spare2;
ser << h.L11 << h.spare3;
ser << h.L2m2 << h.spare4;
ser << h.L2m1 << h.spare5;
ser << h.L20 << h.spare6;
ser << h.L21 << h.spare7;
ser << h.L22 << h.spare8;
return ser;
}
inline DataDeserializer &operator>>(DataDeserializer &des, SphericalHarmonics &h) {
DataDeserializer::SizeTracker tracker(des);
des >> h.L00 >> h.spare0;
des >> h.L1m1 >> h.spare1;
des >> h.L10 >> h.spare2;
des >> h.L11 >> h.spare3;
des >> h.L2m2 >> h.spare4;
des >> h.L2m1 >> h.spare5;
des >> h.L20 >> h.spare6;
des >> h.L21 >> h.spare7;
des >> h.L22 >> h.spare8;
return des;
}
class Sampler {
public:
@ -136,7 +168,7 @@ public:
uint8 _wrapModeU = WRAP_REPEAT;
uint8 _wrapModeV = WRAP_REPEAT;
uint8 _wrapModeW = WRAP_REPEAT;
uint8 _mipOffset = 0;
uint8 _minMip = 0;
uint8 _maxMip = MAX_MIP_LEVEL;
@ -193,6 +225,35 @@ protected:
friend class Deserializer;
};
inline DataSerializer &operator<<(DataSerializer &ser, const Sampler::Desc &d) {
DataSerializer::SizeTracker tracker(ser);
ser << d._borderColor;
ser << d._maxAnisotropy;
ser << d._filter;
ser << d._comparisonFunc;
ser << d._wrapModeU;
ser << d._wrapModeV;
ser << d._wrapModeW;
ser << d._mipOffset;
ser << d._minMip;
ser << d._maxMip;
return ser;
}
inline DataDeserializer &operator>>(DataDeserializer &dsr, Sampler::Desc &d) {
DataDeserializer::SizeTracker tracker(dsr);
dsr >> d._borderColor;
dsr >> d._maxAnisotropy;
dsr >> d._filter;
dsr >> d._comparisonFunc;
dsr >> d._wrapModeU;
dsr >> d._wrapModeV;
dsr >> d._wrapModeW;
dsr >> d._mipOffset;
dsr >> d._minMip;
dsr >> d._maxMip;
return dsr;
}
enum class TextureUsageType : uint8 {
RENDERBUFFER, // Used as attachments to a framebuffer
RESOURCE, // Resource textures, like materials... subject to memory manipulation
@ -230,7 +291,7 @@ public:
NORMAL, // Texture is a normal map
ALPHA, // Texture has an alpha channel
ALPHA_MASK, // Texture alpha channel is a Mask 0/1
NUM_FLAGS,
NUM_FLAGS,
};
typedef std::bitset<NUM_FLAGS> Flags;
@ -478,7 +539,7 @@ public:
uint16 evalMipDepth(uint16 level) const { return std::max(_depth >> level, 1); }
// The true size of an image line or surface depends on the format, tiling and padding rules
//
//
// Here are the static function to compute the different sizes from parametered dimensions and format
// Tile size must be a power of 2
static uint16 evalTiledPadding(uint16 length, int tile) { int tileMinusOne = (tile - 1); return (tileMinusOne - (length + tileMinusOne) % tile); }
@ -507,7 +568,7 @@ public:
uint32 evalMipFaceNumTexels(uint16 level) const { return evalMipWidth(level) * evalMipHeight(level) * evalMipDepth(level); }
uint32 evalMipNumTexels(uint16 level) const { return evalMipFaceNumTexels(level) * getNumFaces(); }
// For convenience assign a source name
// For convenience assign a source name
const std::string& source() const { return _source; }
void setSource(const std::string& source) { _source = source; }
const std::string& sourceHash() const { return _sourceHash; }
@ -633,7 +694,7 @@ protected:
uint16 _maxMipLevel { 0 };
uint16 _minMip { 0 };
Type _type { TEX_1D };
Usage _usage;
@ -643,7 +704,7 @@ protected:
bool _isIrradianceValid = false;
bool _defined = false;
bool _important = false;
static TexturePointer create(TextureUsageType usageType, Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, uint16 numMips, const Sampler& sampler);
Size resize(Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, uint16 numMips);

View file

@ -18,6 +18,7 @@
#include <ktx/KTX.h>
#include "GPULogging.h"
#include "SerDes.h"
using namespace gpu;
@ -27,71 +28,94 @@ using KtxStorage = Texture::KtxStorage;
std::vector<std::pair<std::shared_ptr<storage::FileStorage>, std::shared_ptr<std::mutex>>> KtxStorage::_cachedKtxFiles;
std::mutex KtxStorage::_cachedKtxFilesMutex;
/**
* @brief Payload for a KTX (texture)
*
* This contains a ready to use texture. This is both used for the local cache, and for baked textures.
*
* @note The usage for textures means breaking compatibility is a bad idea, and that the implementation
* should just keep on adding extra data at the bottom of the structure, and remain able to read old
* formats. In fact, version 1 KTX can be found in older baked assets.
*/
struct GPUKTXPayload {
using Version = uint8;
static const std::string KEY;
static const Version CURRENT_VERSION { 2 };
static const size_t PADDING { 2 };
static const size_t SIZE { sizeof(Version) + sizeof(Sampler::Desc) + sizeof(uint32) + sizeof(TextureUsageType) + sizeof(glm::ivec2) + PADDING };
static const size_t SIZE { sizeof(Version) + sizeof(Sampler::Desc) + sizeof(uint32_t) + sizeof(TextureUsageType) + sizeof(glm::ivec2) + PADDING };
static_assert(GPUKTXPayload::SIZE == 44, "Packing size may differ between platforms");
static_assert(GPUKTXPayload::SIZE % 4 == 0, "GPUKTXPayload is not 4 bytes aligned");
Sampler::Desc _samplerDesc;
Texture::Usage _usage;
TextureUsageType _usageType;
glm::ivec2 _originalSize { 0, 0 };
Byte* serialize(Byte* data) const {
*(Version*)data = CURRENT_VERSION;
data += sizeof(Version);
/**
* @brief Serialize the KTX payload
*
* @warning Be careful modifying this code, as it influences baked assets.
* Backwards compatibility must be maintained.
*
* @param ser Destination serializer
*/
void serialize(DataSerializer &ser) {
memcpy(data, &_samplerDesc, sizeof(Sampler::Desc));
data += sizeof(Sampler::Desc);
ser << CURRENT_VERSION;
// We can't copy the bitset in Texture::Usage in a crossplateform manner
// So serialize it manually
uint32 usageData = _usage._flags.to_ulong();
memcpy(data, &usageData, sizeof(uint32));
data += sizeof(uint32);
ser << _samplerDesc;
memcpy(data, &_usageType, sizeof(TextureUsageType));
data += sizeof(TextureUsageType);
uint32_t usageData = (uint32_t)_usage._flags.to_ulong();
ser << usageData;
ser << ((uint8_t)_usageType);
ser << _originalSize;
memcpy(data, glm::value_ptr(_originalSize), sizeof(glm::ivec2));
data += sizeof(glm::ivec2);
ser.addPadding(PADDING);
return data + PADDING;
assert(ser.length() == GPUKTXPayload::SIZE);
}
bool unserialize(const Byte* data, size_t size) {
Version version = *(const Version*)data;
data += sizeof(Version);
/**
* @brief Deserialize the KTX payload
*
* @warning Be careful modifying this code, as it influences baked assets.
* Backwards compatibility must be maintained.
*
* @param dsr Deserializer object
* @return true Successful
* @return false Version check failed
*/
bool unserialize(DataDeserializer &dsr) {
Version version = 0;
uint32_t usageData = 0;
uint8_t usagetype = 0;
dsr >> version;
if (version > CURRENT_VERSION) {
// If we try to load a version that we don't know how to parse,
// it will render incorrectly
qCWarning(gpulogging) << "KTX version" << version << "is newer than our own," << CURRENT_VERSION;
qCWarning(gpulogging) << dsr;
return false;
}
memcpy(&_samplerDesc, data, sizeof(Sampler::Desc));
data += sizeof(Sampler::Desc);
dsr >> _samplerDesc;
// We can't copy the bitset in Texture::Usage in a crossplateform manner
// So unserialize it manually
uint32 usageData;
memcpy(&usageData, data, sizeof(uint32));
_usage = Texture::Usage(usageData);
data += sizeof(uint32);
dsr >> usageData;
_usage = gpu::Texture::Usage(usageData);
memcpy(&_usageType, data, sizeof(TextureUsageType));
data += sizeof(TextureUsageType);
dsr >> usagetype;
_usageType = (TextureUsageType)usagetype;
if (version >= 2) {
memcpy(&_originalSize, data, sizeof(glm::ivec2));
data += sizeof(glm::ivec2);
dsr >> _originalSize;
}
dsr.skipPadding(PADDING);
return true;
}
@ -103,7 +127,8 @@ struct GPUKTXPayload {
auto found = std::find_if(keyValues.begin(), keyValues.end(), isGPUKTX);
if (found != keyValues.end()) {
auto value = found->_value;
return payload.unserialize(value.data(), value.size());
DataDeserializer dsr(value.data(), value.size());
return payload.unserialize(dsr);
}
return false;
}
@ -123,29 +148,24 @@ struct IrradianceKTXPayload {
SphericalHarmonics _irradianceSH;
Byte* serialize(Byte* data) const {
*(Version*)data = CURRENT_VERSION;
data += sizeof(Version);
memcpy(data, &_irradianceSH, sizeof(SphericalHarmonics));
data += sizeof(SphericalHarmonics);
return data + PADDING;
void serialize(DataSerializer &ser) const {
ser << CURRENT_VERSION;
ser << _irradianceSH;
ser.addPadding(PADDING);
}
bool unserialize(const Byte* data, size_t size) {
if (size != SIZE) {
bool unserialize(DataDeserializer &des) {
Version version;
if (des.length() != SIZE) {
return false;
}
Version version = *(const Version*)data;
des >> version;
if (version != CURRENT_VERSION) {
return false;
}
data += sizeof(Version);
memcpy(&_irradianceSH, data, sizeof(SphericalHarmonics));
des >> _irradianceSH;
return true;
}
@ -157,7 +177,8 @@ struct IrradianceKTXPayload {
auto found = std::find_if(keyValues.begin(), keyValues.end(), isIrradianceKTX);
if (found != keyValues.end()) {
auto value = found->_value;
return payload.unserialize(value.data(), value.size());
DataDeserializer des(value.data(), value.size());
return payload.unserialize(des);
}
return false;
}
@ -467,7 +488,9 @@ ktx::KTXUniquePointer Texture::serialize(const Texture& texture, const glm::ivec
gpuKeyval._originalSize = originalSize;
Byte keyvalPayload[GPUKTXPayload::SIZE];
gpuKeyval.serialize(keyvalPayload);
DataSerializer ser(keyvalPayload, sizeof(keyvalPayload));
gpuKeyval.serialize(ser);
ktx::KeyValues keyValues;
keyValues.emplace_back(GPUKTXPayload::KEY, (uint32)GPUKTXPayload::SIZE, (ktx::Byte*) &keyvalPayload);
@ -477,7 +500,8 @@ ktx::KTXUniquePointer Texture::serialize(const Texture& texture, const glm::ivec
irradianceKeyval._irradianceSH = *texture.getIrradiance();
Byte irradianceKeyvalPayload[IrradianceKTXPayload::SIZE];
irradianceKeyval.serialize(irradianceKeyvalPayload);
DataSerializer ser(irradianceKeyvalPayload, sizeof(irradianceKeyvalPayload));
irradianceKeyval.serialize(ser);
keyValues.emplace_back(IrradianceKTXPayload::KEY, (uint32)IrradianceKTXPayload::SIZE, (ktx::Byte*) &irradianceKeyvalPayload);
}

View file

@ -3,6 +3,7 @@
//
// Created by Sam Gateau on 2/10/15.
// Copyright 2013 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -10,8 +11,12 @@
<@if not GPU_TRANSFORM_STATE_SLH@>
<@def GPU_TRANSFORM_STATE_SLH@>
<@include gpu/ShaderConstants.h@>
<@func declareStandardCameraTransform()@>
#ifndef STANDARD_TRANSFORM_CAMERA
#define STANDARD_TRANSFORM_CAMERA
<@include gpu/ShaderConstants.h@>
<@include gpu/TransformCamera_shared.slh@>
#define TransformCamera _TransformCamera
@ -90,32 +95,23 @@ vec3 getEyeWorldPos() {
}
bool cam_isStereo() {
#ifdef GPU_TRANSFORM_IS_STEREO
return getTransformCamera()._stereoInfo.x > 0.0;
#else
return _cameraBlock._camera._stereoInfo.x > 0.0;
#endif
}
float cam_getStereoSide() {
#ifdef GPU_TRANSFORM_IS_STEREO
#ifdef GPU_TRANSFORM_STEREO_CAMERA
return getTransformCamera()._stereoInfo.y;
#else
return _cameraBlock._camera._stereoInfo.y;
#endif
#else
return _cameraBlock._camera._stereoInfo.y;
#endif
}
vec2 cam_getInvWidthHeight() {
return getTransformCamera()._stereoInfo.zw;
}
#endif // STANDARD_TRANSFORM_CAMERA
<@endfunc@>
<@func declareStandardObjectTransform()@>
struct TransformObject {
mat4 _model;
mat4 _modelInverse;
};
<@include gpu/TransformObject_shared.slh@>
layout(location=GPU_ATTR_DRAW_CALL_INFO) in ivec2 _drawCallInfo;
@ -155,11 +151,7 @@ TransformObject getTransformObject() {
<$declareStandardObjectTransform()$>
<@endfunc@>
<@func transformCameraViewport(cameraTransform, viewport)@>
<$viewport$> = <$cameraTransform$>._viewport;
<@endfunc@>
<@func transformStereoClipsSpace(cameraTransform, clipPos)@>
<@func transformStereoClipSpace(clipPos)@>
{
#ifdef GPU_TRANSFORM_IS_STEREO
@ -190,6 +182,18 @@ TransformObject getTransformObject() {
}
<@endfunc@>
<@func transformModelToEyeAndPrevEyeWorldAlignedPos(cameraTransform, objectTransform, modelPos, eyeWAPos, prevEyeWAPos)@>
<!// Bring the model pos in the world aligned space centered on the eye axis !>
{ // transformModelToEyeAndPrevEyeWorldAlignedPos
highp mat4 _mv = <$objectTransform$>._model;
highp mat4 _pmv = <$objectTransform$>._previousModel;
_mv[3].xyz -= <$cameraTransform$>._viewInverse[3].xyz;
_pmv[3].xyz -= <$cameraTransform$>._previousViewInverse[3].xyz;
<$eyeWAPos$> = (_mv * <$modelPos$>);
<$prevEyeWAPos$> = (_pmv * <$modelPos$>);
}
<@endfunc@>
<@func transformModelToMonoClipPos(cameraTransform, objectTransform, modelPos, clipPos)@>
{ // transformModelToMonoClipPos
vec4 eyeWAPos;
@ -201,7 +205,7 @@ TransformObject getTransformObject() {
<@func transformModelToClipPos(cameraTransform, objectTransform, modelPos, clipPos)@>
{ // transformModelToClipPos
<$transformModelToMonoClipPos($cameraTransform$, $objectTransform$, $modelPos$, $clipPos$)$>
<$transformStereoClipsSpace($cameraTransform$, $clipPos$)$>
<$transformStereoClipSpace($clipPos$)$>
}
<@endfunc@>
@ -212,19 +216,59 @@ TransformObject getTransformObject() {
<$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * eyeWAPos;
<$eyePos$> = vec4((<$cameraTransform$>._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);
<$transformStereoClipsSpace($cameraTransform$, $clipPos$)$>
<$transformStereoClipSpace($clipPos$)$>
}
<@endfunc@>
<@func transformModelToWorldAndEyeAndClipPos(cameraTransform, objectTransform, modelPos, worldPos, eyePos, clipPos)@>
{ // transformModelToEyeAndClipPos
<@func transformModelToEyeClipPosAndPrevClipPos(cameraTransform, objectTransform, modelPos, eyePos, clipPos, prevClipPos)@>
{ // transformModelToEyeClipPosAndPrevClipPos
vec4 eyeWAPos;
vec4 prevEyeWAPos;
<$transformModelToEyeAndPrevEyeWorldAlignedPos($cameraTransform$, $objectTransform$, $modelPos$, eyeWAPos, prevEyeWAPos)$>
<$eyePos$> = vec4((<$cameraTransform$>._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);
<$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * eyeWAPos;
<$transformStereoClipSpace($clipPos$)$>
<$prevClipPos$> = <$cameraTransform$>._previousProjectionViewUntranslated * prevEyeWAPos;
// Prev clip pos is in mono clip space
}
<@endfunc@>
<@func transformModelToClipPosAndPrevClipPos(cameraTransform, objectTransform, modelPos, clipPos, prevClipPos)@>
{ // transformModelToClipPosAndPrevClipPos
vec4 eyeWAPos;
vec4 prevEyeWAPos;
<$transformModelToEyeAndPrevEyeWorldAlignedPos($cameraTransform$, $objectTransform$, $modelPos$, eyeWAPos, prevEyeWAPos)$>
<$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * eyeWAPos;
<$transformStereoClipSpace($clipPos$)$>
<$prevClipPos$> = <$cameraTransform$>._previousProjectionViewUntranslated * prevEyeWAPos;
// Prev clip pos is in mono clip space
}
<@endfunc@>
<@func transformModelToWorldEyeAndClipPos(cameraTransform, objectTransform, modelPos, worldPos, eyePos, clipPos)@>
{ // transformModelToWorldEyeAndClipPos
vec4 eyeWAPos;
<$transformModelToEyeWorldAlignedPos($cameraTransform$, $objectTransform$, $modelPos$, eyeWAPos)$>
<$worldPos$> = vec4(eyeWAPos.xyz + <$cameraTransform$>._viewInverse[3].xyz, 1.0);
<$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * eyeWAPos;
<$eyePos$> = vec4((<$cameraTransform$>._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);
<$transformStereoClipsSpace($cameraTransform$, $clipPos$)$>
<$transformStereoClipSpace($clipPos$)$>
}
<@endfunc@>
<@func transformModelToWorldEyeClipPosAndPrevClipPos(cameraTransform, objectTransform, modelPos, worldPos, eyePos, clipPos, prevClipPos)@>
{ // transformModelToWorldEyeClipPosAndPrevClipPos
vec4 eyeWAPos;
vec4 prevEyeWAPos;
<$transformModelToEyeAndPrevEyeWorldAlignedPos($cameraTransform$, $objectTransform$, $modelPos$, eyeWAPos, prevEyeWAPos)$>
<$worldPos$> = vec4(eyeWAPos.xyz + <$cameraTransform$>._viewInverse[3].xyz, 1.0);
<$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * eyeWAPos;
<$eyePos$> = vec4((<$cameraTransform$>._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);
<$prevClipPos$> = <$cameraTransform$>._previousProjectionViewUntranslated * prevEyeWAPos;
<$transformStereoClipSpace($clipPos$)$>
}
<@endfunc@>
@ -236,13 +280,22 @@ TransformObject getTransformObject() {
}
<@endfunc@>
<@func transformModelToEyePosAndPrevEyePos(cameraTransform, objectTransform, modelPos, eyePos, prevEyePos)@>
{ // transformModelToEyePosAndPrevEyePos
vec4 eyeWAPos;
vec4 prevEyeWAPos;
<$transformModelToEyeAndPrevEyeWorldAlignedPos($cameraTransform$, $objectTransform$, $modelPos$, eyeWAPos, prevEyeWAPos)$>
<$eyePos$> = vec4((<$cameraTransform$>._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);
<$prevEyePos$> = vec4((<$cameraTransform$>._previousView * vec4(prevEyeWAPos.xyz, 0.0)).xyz, 1.0);
}
<@endfunc@>
<@func transformWorldToClipPos(cameraTransform, worldPos, clipPos)@>
{ // transformWorldToClipPos
vec4 eyeWAPos = <$worldPos$> - vec4(<$cameraTransform$>._viewInverse[3].xyz, 0.0);
<$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * eyeWAPos;
<$transformStereoClipsSpace($cameraTransform$, $clipPos$)$>
<$transformStereoClipSpace($clipPos$)$>
}
<@endfunc@>
@ -285,6 +338,20 @@ TransformObject getTransformObject() {
}
<@endfunc@>
<@func transformModelToPrevEyeDir(cameraTransform, objectTransform, modelDir, prevEyeDir)@>
{ // transformModelToPrevEyeDir
vec3 mr0 = vec3(<$objectTransform$>._previousModelInverse[0].x, <$objectTransform$>._previousModelInverse[1].x, <$objectTransform$>._previousModelInverse[2].x);
vec3 mr1 = vec3(<$objectTransform$>._previousModelInverse[0].y, <$objectTransform$>._previousModelInverse[1].y, <$objectTransform$>._previousModelInverse[2].y);
vec3 mr2 = vec3(<$objectTransform$>._previousModelInverse[0].z, <$objectTransform$>._previousModelInverse[1].z, <$objectTransform$>._previousModelInverse[2].z);
vec3 mvc0 = vec3(dot(<$cameraTransform$>._previousViewInverse[0].xyz, mr0), dot(<$cameraTransform$>._previousViewInverse[0].xyz, mr1), dot(<$cameraTransform$>._previousViewInverse[0].xyz, mr2));
vec3 mvc1 = vec3(dot(<$cameraTransform$>._previousViewInverse[1].xyz, mr0), dot(<$cameraTransform$>._previousViewInverse[1].xyz, mr1), dot(<$cameraTransform$>._previousViewInverse[1].xyz, mr2));
vec3 mvc2 = vec3(dot(<$cameraTransform$>._previousViewInverse[2].xyz, mr0), dot(<$cameraTransform$>._previousViewInverse[2].xyz, mr1), dot(<$cameraTransform$>._previousViewInverse[2].xyz, mr2));
<$prevEyeDir$> = vec3(dot(mvc0, <$modelDir$>), dot(mvc1, <$modelDir$>), dot(mvc2, <$modelDir$>));
}
<@endfunc@>
<@func transformEyeToWorldDir(cameraTransform, eyeDir, worldDir)@>
{ // transformEyeToWorldDir
<$worldDir$> = vec3(<$cameraTransform$>._viewInverse * vec4(<$eyeDir$>.xyz, 0.0));
@ -301,7 +368,34 @@ TransformObject getTransformObject() {
{ // transformEyeToClipPos
<$clipPos$> = <$cameraTransform$>._projection * vec4(<$eyePos$>.xyz, 1.0);
<$transformStereoClipsSpace($cameraTransform$, $clipPos$)$>
<$transformStereoClipSpace($clipPos$)$>
}
<@endfunc@>
<@func transformEyeToPrevClipPos(cameraTransform, eyePos, prevClipPos)@>
{ // transformEyeToPrevClipPos
vec4 worldPos = <$cameraTransform$>._viewInverse * vec4(<$eyePos$>.xyz, 1.0);
<$prevClipPos$> = <$cameraTransform$>._previousProjectionViewUntranslated * worldPos;
// Prev clip pos is in mono clip space
}
<@endfunc@>
<@func transformEyeToClipPosAndPrevClipPos(cameraTransform, eyePos, clipPos, prevClipPos)@>
{ // transformEyeToClipPosAndPrevClipPos
<$clipPos$> = <$cameraTransform$>._projection * vec4(<$eyePos$>.xyz, 1.0);
<$transformStereoClipSpace($clipPos$)$>
vec4 worldPos = <$cameraTransform$>._viewInverse * vec4(<$eyePos$>.xyz, 1.0);
<$prevClipPos$> = <$cameraTransform$>._previousProjectionViewUntranslated * worldPos;
// Prev clip pos is in mono clip space
}
<@endfunc@>
<@func transformPrevEyeToPrevClipPos(cameraTransform, prevEyePos, prevClipPos)@>
{ // transformPrevEyeToPrevClipPos
<$prevClipPos$> = <$cameraTransform$>._previousViewInverse * vec4(<$prevEyePos$>.xyz, 1.0) - vec4(<$cameraTransform$>._previousViewInverse[3].xyz, 0.0);
<$prevClipPos$> = <$cameraTransform$>._previousProjectionViewUntranslated * <$prevClipPos$>;
}
<@endfunc@>

View file

@ -1,22 +1,26 @@
// glsl / C++ compatible source as interface for FadeEffect
// glsl / C++ compatible source as interface for TransformCamera
#ifdef __cplusplus
# define _MAT4 Mat4
# define _VEC4 Vec4
# define _MUTABLE mutable
# define TC_MAT4 gpu::Mat4
# define TC_VEC4 gpu::Vec4
# define TC_MUTABLE mutable
#else
# define _MAT4 mat4
# define _VEC4 vec4
# define _MUTABLE
# define TC_MAT4 mat4
# define TC_VEC4 vec4
# define TC_MUTABLE
#endif
struct _TransformCamera {
_MUTABLE _MAT4 _view;
_MUTABLE _MAT4 _viewInverse;
_MUTABLE _MAT4 _projectionViewUntranslated;
_MAT4 _projection;
_MUTABLE _MAT4 _projectionInverse;
_VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.
_MUTABLE _VEC4 _stereoInfo;
TC_MUTABLE TC_MAT4 _view;
TC_MUTABLE TC_MAT4 _viewInverse;
TC_MUTABLE TC_MAT4 _previousView;
TC_MUTABLE TC_MAT4 _previousViewInverse;
TC_MAT4 _projection;
TC_MUTABLE TC_MAT4 _projectionInverse;
TC_MUTABLE TC_MAT4 _projectionViewUntranslated;
// Previous projection view untranslated AND jittered with current jitter
TC_MUTABLE TC_MAT4 _previousProjectionViewUntranslated;
TC_VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.
TC_MUTABLE TC_VEC4 _stereoInfo;
};
// <@if 1@>

View file

@ -0,0 +1,19 @@
// glsl / C++ compatible source as interface for TransformCamera
#ifdef __cplusplus
# define TO_MAT4 Mat4
#else
# define TO_MAT4 mat4
#endif
struct TransformObject {
TO_MAT4 _model;
TO_MAT4 _modelInverse;
TO_MAT4 _previousModel;
TO_MAT4 _previousModelInverse;
};
// <@if 1@>
// Trigger Scribe include
// <@endif@> <!def that !>
//

View file

@ -1,3 +1,2 @@
VERTEX DrawTransformVertexPosition
VERTEX DrawUnitQuad
FRAGMENT DrawColor
r

View file

@ -0,0 +1,2 @@
VERTEX DrawUnitQuad
FRAGMENT DrawWhite

View file

@ -3,6 +3,7 @@
//
// Created by Nissim Hadar on 9/13/2017
// Copyright 2013 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -181,10 +182,9 @@ vec4 computeHazeColor(vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePosition
// Mix with background at far range
const float BLEND_DISTANCE = 27000.0f;
vec4 outFragColor = potentialFragColor;
outFragColor.a *= mix(1.0, hazeParams.backgroundBlend, float(distance > BLEND_DISTANCE));
potentialFragColor.a *= mix(1.0, hazeParams.backgroundBlend, float(distance > BLEND_DISTANCE));
return outFragColor;
return potentialFragColor;
}
<@endif@>

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 5/4/2015.
// Copyright 2015 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -73,14 +74,14 @@ void Skybox::prepare(gpu::Batch& batch) const {
}
}
void Skybox::render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward) const {
void Skybox::render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward, uint transformSlot) const {
updateSchemaBuffer();
Skybox::render(batch, frustum, (*this), forward);
Skybox::render(batch, frustum, (*this), forward, transformSlot);
}
static std::map<bool, gpu::PipelinePointer> _pipelines;
void Skybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum, const Skybox& skybox, bool forward) {
void Skybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum, const Skybox& skybox, bool forward, uint transformSlot) {
if (_pipelines.empty()) {
static const std::vector<std::tuple<bool, uint32_t>> keys = {
std::make_tuple(false, shader::graphics::program::skybox),
@ -109,6 +110,8 @@ void Skybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum, const Sky
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewTransform);
// This is needed if we want to have motion vectors on the sky
batch.saveViewProjectionTransform(transformSlot);
batch.setModelTransform(Transform()); // only for Mac
batch.setPipeline(_pipelines[forward]);

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 5/4/2015.
// Copyright 2015 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -44,9 +45,9 @@ public:
virtual void clear();
void prepare(gpu::Batch& batch) const;
virtual void render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward) const;
virtual void render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward, uint transformSlot) const;
static void render(gpu::Batch& batch, const ViewFrustum& frustum, const Skybox& skybox, bool forward);
static void render(gpu::Batch& batch, const ViewFrustum& frustum, const Skybox& skybox, bool forward, uint transformSlot);
const UniformBufferView& getSchemaBuffer() const { return _schemaBuffer; }

View file

@ -10,7 +10,7 @@
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include graphics/ShaderConstants.h@>
<@include skybox.slh@>
<@include graphics/Light.slh@>
<@if HIFI_USE_FORWARD@>
@ -20,28 +20,25 @@
<$declareLightBuffer()$>
<@include graphics/Haze.slh@>
layout(location=0) out vec4 _fragColor;
<@else@>
<$declarePackDeferredFragmentSky()$>
<@endif@>
LAYOUT(binding=GRAPHICS_TEXTURE_SKYBOX) uniform samplerCube cubeMap;
struct Skybox {
vec4 color;
};
LAYOUT(binding=GRAPHICS_BUFFER_SKYBOX_PARAMS) uniform skyboxBuffer {
Skybox skybox;
};
layout(location=0) in vec3 _normal;
layout(location=0) out vec4 _fragColor;
layout(location=0) in vec3 _normal;
<@if not HIFI_USE_FORWARD@>
layout(location=1) in vec4 _prevPositionCS;
<@endif@>
void main(void) {
vec3 normal = normalize(_normal);
vec3 skyboxTexel = texture(cubeMap, normal).rgb;
vec3 skyboxColor = skybox.color.rgb;
_fragColor = vec4(applySkyboxColorMix(skyboxTexel, skyboxColor, skybox.color.a), 1.0);
vec3 color = applySkyboxColorMix(skyboxTexel, skyboxColor, skybox.color.a);
<@if HIFI_USE_FORWARD@>
_fragColor = vec4(color, 1.0);
// FIXME: either move this elsewhere or give it access to isHazeEnabled() (which is in render-utils/LightingModel.slh)
if (/*(isHazeEnabled() > 0.0) && */(hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {
TransformCamera cam = getTransformCamera();
@ -57,6 +54,8 @@ void main(void) {
vec4 hazeColor = computeHazeColor(fragPositionES.xyz, fragPositionWS.xyz, eyePositionWS.xyz, lightDirectionWS);
_fragColor.rgb = mix(_fragColor.rgb, hazeColor.rgb, hazeColor.a);
}
<@else@>
packDeferredFragmentSky(_prevPositionCS, color);
<@endif@>
}

View file

@ -0,0 +1,60 @@
<!
// skybox.slh
// libraries/graphics/src
//
// Created by HifiExperiments on 8/5/2020.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
!>
<@if not SKYBOX_SLH@>
<@def SKYBOX_SLH@>
<@include graphics/ShaderConstants.h@>
<@include gpu/Transform.slh@>
<$declareStandardCameraTransform()$>
<@include gpu/PackedNormal.slh@>
LAYOUT(binding=GRAPHICS_TEXTURE_SKYBOX) uniform samplerCube cubeMap;
struct Skybox {
vec4 color;
};
LAYOUT(binding=GRAPHICS_BUFFER_SKYBOX_PARAMS) uniform skyboxBuffer {
Skybox skybox;
};
<@func declarePackDeferredFragmentSky()@>
// This code belongs in render-utils/VelocityWrite.slh but because graphics can't include render-utils, we have to have it here
vec2 getEyeTexcoordPos() {
// No need to add 0.5 as, by default, frag coords are pixel centered at (0.5, 0.5)
vec2 texCoordPos = gl_FragCoord.xy;
texCoordPos *= cam_getInvWidthHeight();
texCoordPos.x -= cam_getStereoSide();
return texCoordPos;
}
vec2 packVelocity(vec4 prevPositionCS) {
vec2 uv = getEyeTexcoordPos();
vec2 prevUV = (prevPositionCS.xy / prevPositionCS.w) * 0.5 + 0.5;
vec2 deltaUV = uv - prevUV;
// Velocity should be computed without any jitter inside.
return deltaUV;
}
layout(location = 0) out vec4 _lighting; // calculated lighting
layout(location = 1) out vec4 _velocity; // velocity
void packDeferredFragmentSky(vec4 prevPositionCS, vec3 color) {
_lighting = vec4(color, 1.0f);
_velocity = vec4(packVelocity(prevPositionCS), 0.0f, 0.0f);
}
<@endfunc@>
<@endif@>

View file

@ -5,6 +5,7 @@
//
// Created by Sam Gateau on 5/5/2015.
// Copyright 2015 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -14,6 +15,9 @@
<$declareStandardTransform()$>
layout(location=0) out vec3 _normal;
<@if not HIFI_USE_FORWARD@>
layout(location=1) out vec4 _prevPositionCS;
<@endif@>
void main(void) {
const float depth = 0.0;
@ -23,17 +27,21 @@ void main(void) {
vec4(-1.0, 1.0, depth, 1.0),
vec4(1.0, 1.0, depth, 1.0)
);
vec4 inPosition = UNIT_QUAD[gl_VertexID];
// standard transform
TransformCamera cam = getTransformCamera();
vec3 clipDir = vec3(inPosition.xy, 0.0);
vec3 clipDir = UNIT_QUAD[gl_VertexID].xyz;
vec3 eyeDir;
<$transformClipToEyeDir(cam, clipDir, eyeDir)$>
<$transformEyeToWorldDir(cam, eyeDir, _normal)$>
// Position is supposed to come in clip space
gl_Position = vec4(inPosition.xy, 0.0, 1.0);
<$transformStereoClipsSpace(cam, gl_Position)$>
<@if not HIFI_USE_FORWARD@>
// FIXME: this is probably wrong
_prevPositionCS = cam._previousProjectionViewUntranslated * (cam._viewInverse * (cam._projectionInverse * vec4(clipDir, 1.0)));
<@endif@>
// Position is supposed to come in clip space
gl_Position = vec4(clipDir, 1.0);
<$transformStereoClipSpace(gl_Position)$>
}

View file

@ -17,6 +17,7 @@
#include "OctreeLogging.h"
#include "NumericalConstants.h"
#include <glm/gtc/type_ptr.hpp>
#include "SerDes.h"
bool OctreePacketData::_debug = false;
AtomicUIntStat OctreePacketData::_totalBytesOfOctalCodes { 0 };
@ -847,10 +848,10 @@ int OctreePacketData::unpackDataFromBytes(const unsigned char* dataBytes, QByteA
}
int OctreePacketData::unpackDataFromBytes(const unsigned char* dataBytes, AACube& result) {
aaCubeData cube;
memcpy(&cube, dataBytes, sizeof(aaCubeData));
result = AACube(cube.corner, cube.scale);
return sizeof(aaCubeData);
DataDeserializer des(dataBytes, sizeof(aaCubeData));
des >> result;
return des.length();
}
int OctreePacketData::unpackDataFromBytes(const unsigned char* dataBytes, QRect& result) {

View file

@ -115,16 +115,16 @@ void ProceduralData::parse(const QJsonObject& proceduralData) {
channels = proceduralData[CHANNELS_KEY].toArray();
}
std::function<void(gpu::StatePointer)> Procedural::opaqueStencil = [](gpu::StatePointer state) {};
std::function<void(gpu::StatePointer, bool)> Procedural::opaqueStencil = [](gpu::StatePointer state, bool useAA) {};
std::function<void(gpu::StatePointer)> Procedural::transparentStencil = [](gpu::StatePointer state) {};
Procedural::Procedural() {
Procedural::Procedural(bool useAA) {
_opaqueState->setCullMode(gpu::State::CULL_NONE);
_opaqueState->setDepthTest(true, true, gpu::LESS_EQUAL);
_opaqueState->setBlendFunction(false,
gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA,
gpu::State::FACTOR_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ONE);
opaqueStencil(_opaqueState);
opaqueStencil(_opaqueState, useAA);
_transparentState->setCullMode(gpu::State::CULL_NONE);
_transparentState->setDepthTest(true, false, gpu::LESS_EQUAL);

View file

@ -99,7 +99,7 @@ inline bool operator!=(const ProceduralProgramKey& a, const ProceduralProgramKey
// FIXME better mechanism for extending to things rendered using shaders other than simple.slv
struct Procedural {
public:
Procedural();
Procedural(bool useAA = true);
void setProceduralData(const ProceduralData& proceduralData);
bool isReady() const;
@ -132,7 +132,7 @@ public:
gpu::StatePointer _opaqueState { std::make_shared<gpu::State>() };
gpu::StatePointer _transparentState { std::make_shared<gpu::State>() };
static std::function<void(gpu::StatePointer)> opaqueStencil;
static std::function<void(gpu::StatePointer, bool)> opaqueStencil;
static std::function<void(gpu::StatePointer)> transparentStencil;
static bool enableProceduralShaders;

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 9/21/2015.
// Copyright 2015 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -45,15 +46,15 @@ void ProceduralSkybox::clear() {
Skybox::clear();
}
void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward) const {
void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward, uint transformSlot) const {
if (_procedural.isReady()) {
ProceduralSkybox::render(batch, frustum, (*this), forward);
ProceduralSkybox::render(batch, frustum, (*this), forward, transformSlot);
} else {
Skybox::render(batch, frustum, forward);
Skybox::render(batch, frustum, forward, transformSlot);
}
}
void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum, const ProceduralSkybox& skybox, bool forward) {
void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum, const ProceduralSkybox& skybox, bool forward, uint transformSlot) {
glm::mat4 projMat;
viewFrustum.evalProjectionMatrix(projMat);
@ -61,6 +62,8 @@ void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum,
viewFrustum.evalViewTransform(viewTransform);
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewTransform);
// This is needed if we want to have motion vectors on the sky
batch.saveViewProjectionTransform(transformSlot);
batch.setModelTransform(Transform()); // only for Mac
auto& procedural = skybox._procedural;

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 9/21/15.
// Copyright 2015 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -26,8 +27,8 @@ public:
bool empty() override;
void clear() override;
void render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward) const override;
static void render(gpu::Batch& batch, const ViewFrustum& frustum, const ProceduralSkybox& skybox, bool forward);
void render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward, uint transformSlot) const override;
static void render(gpu::Batch& batch, const ViewFrustum& frustum, const ProceduralSkybox& skybox, bool forward, uint transformSlot);
uint64_t getCreated() const { return _created; }

View file

@ -6,27 +6,19 @@
//
// Created by Sam Gateau on 5/5/2015.
// Copyright 2015 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include graphics/ShaderConstants.h@>
LAYOUT(binding=GRAPHICS_TEXTURE_SKYBOX) uniform samplerCube cubeMap;
struct Skybox {
vec4 color;
};
LAYOUT(binding=GRAPHICS_BUFFER_SKYBOX_PARAMS) uniform skyboxBuffer {
Skybox skybox;
};
layout(location=0) in vec3 _normal;
layout(location=0) out vec4 _fragColor;
<@include graphics/skybox.slh@>
<$declarePackDeferredFragmentSky()$>
<@include procedural/ProceduralCommon.slh@>
layout(location=0) in vec3 _normal;
layout(location=1) in vec4 _prevPositionCS;
#line 1001
//PROCEDURAL_BLOCK_BEGIN
vec3 getSkyboxColor() {
@ -42,5 +34,6 @@ void main(void) {
color = max(color, vec3(0));
// Procedural Shaders are expected to be Gamma corrected so let's bring back the RGB in linear space for the rest of the pipeline
color = pow(color, vec3(2.2));
_fragColor = vec4(color, 1.0);
packDeferredFragmentSky(_prevPositionCS, color);
}

View file

@ -4,6 +4,7 @@
//
// Created by Raffi Bedikian on 8/30/15
// Copyright 2015 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -13,7 +14,6 @@
#include <glm/gtc/random.hpp>
#include <PathUtils.h>
#include <SharedUtil.h>
#include <gpu/Context.h>
#include <shaders/Shaders.h>
@ -21,11 +21,6 @@
#include "render-utils/ShaderConstants.h"
#include "StencilMaskPass.h"
#include "TextureCache.h"
#include "DependencyManager.h"
#include "ViewFrustum.h"
#include "GeometryCache.h"
#include "FramebufferCache.h"
#include "RandomAndNoise.h"
namespace ru {
@ -38,136 +33,129 @@ namespace gr {
using graphics::slot::buffer::Buffer;
}
#if !ANTIALIASING_USE_TAA
gpu::PipelinePointer Antialiasing::_antialiasingPipeline;
gpu::PipelinePointer Antialiasing::_intensityPipeline;
gpu::PipelinePointer Antialiasing::_blendPipeline;
gpu::PipelinePointer Antialiasing::_debugBlendPipeline;
Antialiasing::Antialiasing() {
_geometryId = DependencyManager::get<GeometryCache>()->allocateID();
}
#define TAA_JITTER_SEQUENCE_LENGTH 16
Antialiasing::~Antialiasing() {
auto geometryCache = DependencyManager::get<GeometryCache>();
if (geometryCache) {
geometryCache->releaseID(_geometryId);
}
}
const gpu::PipelinePointer& Antialiasing::getAntialiasingPipeline() {
if (!_antialiasingPipeline) {
gpu::ShaderPointer program = gpu::Shader::createProgram(shader::render_utils::program::fxaa);
gpu::StatePointer state = std::make_shared<gpu::State>();
state->setDepthTest(false, false, gpu::LESS_EQUAL);
PrepareStencil::testNoAA(*state);
// Good to go add the brand new pipeline
_antialiasingPipeline = gpu::Pipeline::create(program, state);
}
return _antialiasingPipeline;
}
const gpu::PipelinePointer& Antialiasing::getBlendPipeline() {
if (!_blendPipeline) {
gpu::ShaderPointer program = gpu::Shader::createProgram(shader::render_utils::program::fxaa_blend);
gpu::StatePointer state = std::make_shared<gpu::State>();
state->setDepthTest(false, false, gpu::LESS_EQUAL);
PrepareStencil::testNoAA(*state);
// Good to go add the brand new pipeline
_blendPipeline = gpu::Pipeline::create(program, state);
}
return _blendPipeline;
}
void Antialiasing::run(const render::RenderContextPointer& renderContext, const gpu::FramebufferPointer& sourceBuffer) {
assert(renderContext->args);
assert(renderContext->args->hasViewFrustum());
RenderArgs* args = renderContext->args;
gpu::doInBatch("Antialiasing::run", args->_context, [&](gpu::Batch& batch) {
batch.enableStereo(false);
batch.setViewportTransform(args->_viewport);
if (!_paramsBuffer) {
_paramsBuffer = std::make_shared<gpu::Buffer>(sizeof(glm::vec4), nullptr);
}
{
int width = args->_viewport.z;
int height = args->_viewport.w;
if (_antialiasingBuffer && _antialiasingBuffer->getSize() != uvec2(width, height)) {
_antialiasingBuffer.reset();
}
if (!_antialiasingBuffer) {
// Link the antialiasing FBO to texture
_antialiasingBuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("antialiasing"));
auto format = gpu::Element::COLOR_SRGBA_32;
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
_antialiasingTexture = gpu::Texture::createRenderBuffer(format, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
_antialiasingBuffer->setRenderBuffer(0, _antialiasingTexture);
glm::vec2 fbExtent { args->_viewport.z, args->_viewport.w };
glm::vec2 inverseFbExtent = 1.0f / fbExtent;
_paramsBuffer->setSubData(0, glm::vec4(inverseFbExtent, 0.0, 0.0));
}
}
glm::mat4 projMat;
Transform viewMat;
args->getViewFrustum().evalProjectionMatrix(projMat);
args->getViewFrustum().evalViewTransform(viewMat);
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewMat, true);
batch.setModelTransform(Transform());
// FXAA step
auto pipeline = getAntialiasingPipeline();
batch.setResourceTexture(0, sourceBuffer->getRenderBuffer(0));
batch.setFramebuffer(_antialiasingBuffer);
batch.setPipeline(pipeline);
batch.setUniformBuffer(0, _paramsBuffer);
batch.draw(gpu::TRIANGLE_STRIP, 4);
// Blend step
batch.setResourceTexture(0, _antialiasingTexture);
batch.setFramebuffer(sourceBuffer);
batch.setPipeline(getBlendPipeline());
batch.draw(gpu::TRIANGLE_STRIP, 4);
});
}
#else
void AntialiasingConfig::setAAMode(int mode) {
_mode = std::min((int)AntialiasingConfig::MODE_COUNT, std::max(0, mode)); // Just use unsigned?
void AntialiasingSetupConfig::setIndex(int current) {
_index = (current + TAA_JITTER_SEQUENCE_LENGTH) % TAA_JITTER_SEQUENCE_LENGTH;
emit dirty();
}
gpu::PipelinePointer Antialiasing::_antialiasingPipeline;
gpu::PipelinePointer Antialiasing::_blendPipeline;
gpu::PipelinePointer Antialiasing::_debugBlendPipeline;
void AntialiasingSetupConfig::setState(State state) {
_state = (State)((int)state % (int)State::STATE_COUNT);
switch (_state) {
case State::NONE: {
none();
break;
}
case State::PAUSE: {
pause();
break;
}
case State::PLAY:
default: {
play();
break;
}
}
emit dirty();
}
int AntialiasingSetupConfig::prev() {
setIndex(_index - 1);
return _index;
}
int AntialiasingSetupConfig::next() {
setIndex(_index + 1);
return _index;
}
AntialiasingSetupConfig::State AntialiasingSetupConfig::none() {
_state = State::NONE;
stop = true;
freeze = false;
setIndex(-1);
return _state;
}
AntialiasingSetupConfig::State AntialiasingSetupConfig::pause() {
_state = State::PAUSE;
stop = false;
freeze = true;
setIndex(0);
return _state;
}
AntialiasingSetupConfig::State AntialiasingSetupConfig::play() {
_state = State::PLAY;
stop = false;
freeze = false;
setIndex(0);
return _state;
}
void AntialiasingSetupConfig::setAAMode(Mode mode) {
this->mode = (Mode)glm::clamp((int)mode, 0, (int)AntialiasingSetupConfig::Mode::MODE_COUNT);
emit dirty();
}
AntialiasingSetup::AntialiasingSetup() {
_sampleSequence.reserve(TAA_JITTER_SEQUENCE_LENGTH + 1);
// Fill in with jitter samples
for (int i = 0; i < TAA_JITTER_SEQUENCE_LENGTH; i++) {
_sampleSequence.emplace_back(glm::vec2(halton::evaluate<2>(i), halton::evaluate<3>(i)) - vec2(0.5f));
}
}
void AntialiasingSetup::configure(const Config& config) {
_isStopped = config.stop;
_isFrozen = config.freeze;
if (config.freeze) {
_freezedSampleIndex = config.getIndex();
}
_scale = config.scale;
_mode = config.mode;
}
void AntialiasingSetup::run(const render::RenderContextPointer& renderContext, Output& output) {
assert(renderContext->args);
if (!_isStopped && _mode == AntialiasingSetupConfig::Mode::TAA) {
RenderArgs* args = renderContext->args;
gpu::doInBatch("AntialiasingSetup::run", args->_context, [&](gpu::Batch& batch) {
auto offset = 0;
auto count = _sampleSequence.size();
if (_isFrozen) {
count = 1;
offset = _freezedSampleIndex;
}
batch.setProjectionJitterSequence(_sampleSequence.data() + offset, count);
batch.setProjectionJitterScale(_scale);
});
}
output = _mode;
}
Antialiasing::Antialiasing(bool isSharpenEnabled) :
_isSharpenEnabled{ isSharpenEnabled } {
}
Antialiasing::~Antialiasing() {
_antialiasingBuffers.reset();
_antialiasingTextures[0].reset();
_antialiasingTextures[1].reset();
_antialiasingBuffers.clear();
}
gpu::PipelinePointer& Antialiasing::getAntialiasingPipeline() {
const gpu::PipelinePointer& Antialiasing::getAntialiasingPipeline() {
if (!_antialiasingPipeline) {
gpu::ShaderPointer program = gpu::Shader::createProgram(shader::render_utils::program::taa);
gpu::StatePointer state = std::make_shared<gpu::State>();
PrepareStencil::testNoAA(*state);
// Good to go add the brand new pipeline
_antialiasingPipeline = gpu::Pipeline::create(program, state);
@ -176,24 +164,36 @@ gpu::PipelinePointer& Antialiasing::getAntialiasingPipeline() {
return _antialiasingPipeline;
}
gpu::PipelinePointer& Antialiasing::getBlendPipeline() {
if (!_blendPipeline) {
gpu::ShaderPointer program = gpu::Shader::createProgram(shader::render_utils::program::fxaa_blend);
const gpu::PipelinePointer& Antialiasing::getIntensityPipeline() {
if (!_intensityPipeline) {
gpu::ShaderPointer program = gpu::Shader::createProgram(shader::gpu::program::drawWhite);
gpu::StatePointer state = std::make_shared<gpu::State>();
PrepareStencil::testNoAA(*state);
// Good to go add the brand new pipeline
_intensityPipeline = gpu::Pipeline::create(program, state);
}
return _intensityPipeline;
}
const gpu::PipelinePointer& Antialiasing::getBlendPipeline() {
if (!_blendPipeline) {
gpu::ShaderPointer program = gpu::Shader::createProgram(shader::render_utils::program::aa_blend);
gpu::StatePointer state = std::make_shared<gpu::State>();
// Good to go add the brand new pipeline
_blendPipeline = gpu::Pipeline::create(program, state);
}
return _blendPipeline;
}
gpu::PipelinePointer& Antialiasing::getDebugBlendPipeline() {
const gpu::PipelinePointer& Antialiasing::getDebugBlendPipeline() {
if (!_debugBlendPipeline) {
gpu::ShaderPointer program = gpu::Shader::createProgram(shader::render_utils::program::taa_blend);
gpu::StatePointer state = std::make_shared<gpu::State>();
PrepareStencil::testNoAA(*state);
// Good to go add the brand new pipeline
_debugBlendPipeline = gpu::Pipeline::create(program, state);
}
@ -201,12 +201,11 @@ gpu::PipelinePointer& Antialiasing::getDebugBlendPipeline() {
}
void Antialiasing::configure(const Config& config) {
_mode = (AntialiasingConfig::Mode) config.getAAMode();
_sharpen = config.sharpen * 0.25f;
if (!_isSharpenEnabled) {
_sharpen = 0.0f;
}
_params.edit().setSharpenedOutput(_sharpen > 0.0f);
_params.edit().blend = config.blend * config.blend;
_params.edit().covarianceGamma = config.covarianceGamma;
@ -216,7 +215,9 @@ void Antialiasing::configure(const Config& config) {
_params.edit().debugShowVelocityThreshold = config.debugShowVelocityThreshold;
_params.edit().regionInfo.x = config.debugX;
_params.edit().regionInfo.z = config.debugFXAAX;
_debugFXAAX = config.debugFXAAX;
_params.edit().setBicubicHistoryFetch(config.bicubicHistoryFetch);
_params.edit().setDebug(config.debug);
_params.edit().setShowDebugCursor(config.showCursorPixel);
@ -227,56 +228,82 @@ void Antialiasing::configure(const Config& config) {
}
void Antialiasing::run(const render::RenderContextPointer& renderContext, const Inputs& inputs) {
void Antialiasing::run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& output) {
assert(renderContext->args);
assert(renderContext->args->hasViewFrustum());
RenderArgs* args = renderContext->args;
auto& deferredFrameTransform = inputs.get0();
auto& sourceBuffer = inputs.get1();
auto& linearDepthBuffer = inputs.get2();
auto& velocityBuffer = inputs.get3();
const auto& deferredFrameBuffer = inputs.get1();
const auto& sourceBuffer = deferredFrameBuffer->getLightingFramebuffer();
const auto& linearDepthBuffer = inputs.get2();
const auto& velocityTexture = deferredFrameBuffer->getDeferredVelocityTexture();
const auto& mode = inputs.get3();
_params.edit().regionInfo.z = mode == AntialiasingSetupConfig::Mode::TAA ? _debugFXAAX : 0.0f;
int width = sourceBuffer->getWidth();
int height = sourceBuffer->getHeight();
if (_antialiasingBuffers && _antialiasingBuffers->get(0) && _antialiasingBuffers->get(0)->getSize() != uvec2(width, height)) {
_antialiasingBuffers.reset();
_antialiasingTextures[0].reset();
_antialiasingTextures[1].reset();
if (_antialiasingBuffers._swapChain && _antialiasingBuffers._swapChain->get(0) && _antialiasingBuffers._swapChain->get(0)->getSize() != uvec2(width, height)) {
_antialiasingBuffers.clear();
}
if (!_antialiasingBuffers) {
if (!_antialiasingBuffers._swapChain || !_intensityFramebuffer) {
std::vector<gpu::FramebufferPointer> antiAliasingBuffers;
// Link the antialiasing FBO to texture
auto format = sourceBuffer->getRenderBuffer(0)->getTexelFormat();
auto format = gpu::Element(gpu::VEC4, gpu::HALF, gpu::RGBA);
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR, gpu::Sampler::WRAP_CLAMP);
for (int i = 0; i < 2; i++) {
antiAliasingBuffers.emplace_back(gpu::Framebuffer::create("antialiasing"));
const auto& antiAliasingBuffer = antiAliasingBuffers.back();
_antialiasingTextures[i] = gpu::Texture::createRenderBuffer(format, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
antiAliasingBuffer->setRenderBuffer(0, _antialiasingTextures[i]);
_antialiasingBuffers._textures[i] = gpu::Texture::createRenderBuffer(format, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
antiAliasingBuffer->setRenderBuffer(0, _antialiasingBuffers._textures[i]);
}
_antialiasingBuffers = std::make_shared<gpu::FramebufferSwapChain>(antiAliasingBuffers);
_antialiasingBuffers._swapChain = std::make_shared<gpu::FramebufferSwapChain>(antiAliasingBuffers);
_intensityTexture = gpu::Texture::createRenderBuffer(gpu::Element::COLOR_R_8, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
_intensityFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("taaIntensity"));
_intensityFramebuffer->setRenderBuffer(0, _intensityTexture);
_intensityFramebuffer->setStencilBuffer(deferredFrameBuffer->getDeferredFramebuffer()->getDepthStencilBuffer(), deferredFrameBuffer->getDeferredFramebuffer()->getDepthStencilBufferFormat());
}
output = _intensityTexture;
gpu::doInBatch("Antialiasing::run", args->_context, [&](gpu::Batch& batch) {
PROFILE_RANGE_BATCH(batch, "TAA");
batch.enableStereo(false);
batch.setViewportTransform(args->_viewport);
// Set the intensity buffer to 1 except when the stencil is masked as NoAA, where it should be 0
// This is a bit of a hack as it is not possible and not portable to use the stencil value directly
// as a texture
batch.setFramebuffer(_intensityFramebuffer);
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, gpu::Vec4(0.0f));
batch.setResourceTexture(0, nullptr);
batch.setPipeline(getIntensityPipeline());
batch.draw(gpu::TRIANGLE_STRIP, 4);
// TAA step
batch.setResourceFramebufferSwapChainTexture(ru::Texture::TaaHistory, _antialiasingBuffers, 0);
if (!_params->isFXAAEnabled()) {
batch.setResourceFramebufferSwapChainTexture(ru::Texture::TaaHistory, _antialiasingBuffers._swapChain, 0);
batch.setResourceTexture(ru::Texture::TaaVelocity, velocityTexture);
} else {
batch.setResourceTexture(ru::Texture::TaaHistory, nullptr);
batch.setResourceTexture(ru::Texture::TaaVelocity, nullptr);
}
batch.setResourceTexture(ru::Texture::TaaSource, sourceBuffer->getRenderBuffer(0));
batch.setResourceTexture(ru::Texture::TaaVelocity, velocityBuffer->getVelocityTexture());
// This is only used during debug
batch.setResourceTexture(ru::Texture::TaaIntensity, _intensityTexture);
// This is only used during debug
batch.setResourceTexture(ru::Texture::TaaDepth, linearDepthBuffer->getLinearDepthTexture());
batch.setUniformBuffer(ru::Buffer::TaaParams, _params);
batch.setUniformBuffer(ru::Buffer::DeferredFrameTransform, deferredFrameTransform->getFrameTransformBuffer());
batch.setFramebufferSwapChain(_antialiasingBuffers, 1);
batch.setFramebufferSwapChain(_antialiasingBuffers._swapChain, 1);
batch.setPipeline(getAntialiasingPipeline());
batch.draw(gpu::TRIANGLE_STRIP, 4);
@ -286,11 +313,11 @@ void Antialiasing::run(const render::RenderContextPointer& renderContext, const
batch.setFramebuffer(sourceBuffer);
if (_params->isDebug()) {
batch.setPipeline(getDebugBlendPipeline());
batch.setResourceFramebufferSwapChainTexture(ru::Texture::TaaNext, _antialiasingBuffers, 1);
} else {
batch.setResourceFramebufferSwapChainTexture(ru::Texture::TaaNext, _antialiasingBuffers._swapChain, 1);
} else {
batch.setPipeline(getBlendPipeline());
// Must match the bindg point in the fxaa_blend.slf shader
batch.setResourceFramebufferSwapChainTexture(0, _antialiasingBuffers, 1);
// Must match the binding point in the aa_blend.slf shader
batch.setResourceFramebufferSwapChainTexture(0, _antialiasingBuffers._swapChain, 1);
// Disable sharpen if FXAA
if (!_blendParamsBuffer) {
_blendParamsBuffer = std::make_shared<gpu::Buffer>(sizeof(glm::vec4), nullptr);
@ -299,8 +326,8 @@ void Antialiasing::run(const render::RenderContextPointer& renderContext, const
batch.setUniformBuffer(0, _blendParamsBuffer);
}
batch.draw(gpu::TRIANGLE_STRIP, 4);
batch.advance(_antialiasingBuffers);
batch.advance(_antialiasingBuffers._swapChain);
batch.setUniformBuffer(ru::Buffer::TaaParams, nullptr);
batch.setUniformBuffer(ru::Buffer::DeferredFrameTransform, nullptr);
@ -308,114 +335,8 @@ void Antialiasing::run(const render::RenderContextPointer& renderContext, const
batch.setResourceTexture(ru::Texture::TaaHistory, nullptr);
batch.setResourceTexture(ru::Texture::TaaVelocity, nullptr);
batch.setResourceTexture(ru::Texture::TaaNext, nullptr);
// Reset jitter sequence
batch.setProjectionJitterSequence(nullptr, 0);
});
}
void JitterSampleConfig::setIndex(int current) {
_index = (current) % JitterSample::SEQUENCE_LENGTH;
emit dirty();
}
void JitterSampleConfig::setState(int state) {
_state = (state) % 3;
switch (_state) {
case 0: {
none();
break;
}
case 1: {
pause();
break;
}
case 2:
default: {
play();
break;
}
}
emit dirty();
}
int JitterSampleConfig::cycleStopPauseRun() {
setState((_state + 1) % 3);
return _state;
}
int JitterSampleConfig::prev() {
setIndex(_index - 1);
return _index;
}
int JitterSampleConfig::next() {
setIndex(_index + 1);
return _index;
}
int JitterSampleConfig::none() {
_state = 0;
stop = true;
freeze = false;
setIndex(-1);
return _state;
}
int JitterSampleConfig::pause() {
_state = 1;
stop = false;
freeze = true;
setIndex(0);
return _state;
}
int JitterSampleConfig::play() {
_state = 2;
stop = false;
freeze = false;
setIndex(0);
return _state;
}
JitterSample::SampleSequence::SampleSequence(){
// Halton sequence (2,3)
for (int i = 0; i < SEQUENCE_LENGTH; i++) {
offsets[i] = glm::vec2(halton::evaluate<2>(i), halton::evaluate<3>(i));
offsets[i] -= vec2(0.5f);
}
offsets[SEQUENCE_LENGTH] = glm::vec2(0.0f);
}
void JitterSample::configure(const Config& config) {
_freeze = config.stop || config.freeze;
if (config.freeze) {
auto pausedIndex = config.getIndex();
if (_sampleSequence.currentIndex != pausedIndex) {
_sampleSequence.currentIndex = pausedIndex;
}
} else if (config.stop) {
_sampleSequence.currentIndex = -1;
} else {
_sampleSequence.currentIndex = config.getIndex();
}
_scale = config.scale;
}
void JitterSample::run(const render::RenderContextPointer& renderContext, Output& jitter) {
auto& current = _sampleSequence.currentIndex;
if (!_freeze) {
if (current >= 0) {
current = (current + 1) % SEQUENCE_LENGTH;
} else {
current = -1;
}
}
if (current >= 0) {
jitter = _sampleSequence.offsets[current];
} else {
jitter = glm::vec2(0.0f);
}
}
#endif

View file

@ -18,85 +18,128 @@
#include "render/DrawTask.h"
#include "DeferredFrameTransform.h"
#include "VelocityBufferPass.h"
#include "DeferredFramebuffer.h"
#include "SurfaceGeometryPass.h"
class JitterSampleConfig : public render::Job::Config {
class AntialiasingSetupConfig : public render::Job::Config {
Q_OBJECT
Q_PROPERTY(float scale MEMBER scale NOTIFY dirty)
Q_PROPERTY(bool freeze MEMBER freeze NOTIFY dirty)
Q_PROPERTY(bool stop MEMBER stop NOTIFY dirty)
Q_PROPERTY(int index READ getIndex NOTIFY dirty)
Q_PROPERTY(int state READ getState WRITE setState NOTIFY dirty)
Q_PROPERTY(float scale MEMBER scale NOTIFY dirty)
Q_PROPERTY(bool freeze MEMBER freeze NOTIFY dirty)
Q_PROPERTY(bool stop MEMBER stop NOTIFY dirty)
Q_PROPERTY(int index READ getIndex NOTIFY dirty)
Q_PROPERTY(State state READ getState WRITE setState NOTIFY dirty)
Q_PROPERTY(Mode mode READ getAAMode WRITE setAAMode NOTIFY dirty)
public:
JitterSampleConfig() : render::Job::Config(true) {}
AntialiasingSetupConfig() : render::Job::Config(true) {}
float scale{ 0.5f };
bool stop{ false };
bool freeze{ false };
/*@jsdoc
*Antialiasing modes. <table>
* <thead>
* <tr><th>Value</th><th>Name</th><th>Description</th>
* </thead>
* <tbody>
* <tr><td><code>0</code></td><td>NONE</td><td>Antialiasing is disabled.</td></tr>
* <tr><td><code>1</code></td><td>TAA</td><td>Temporal Antialiasing.</td></tr>
* <tr><td><code>2</code></td><td>FXAA</td><td>FXAA.</td></tr>
* <tr><td><code>3</code></td><td>MODE_COUNT</td><td>Indicates number of antialiasing modes</td></tr>
* </tbody>
* </table>
* @typedef {number} AntialiasingMode
*/
enum class Mode {
NONE = 0,
TAA,
FXAA,
MODE_COUNT
};
Q_ENUM(Mode) // Stored as signed int.
void setIndex(int current);
void setState(int state);
/*@jsdoc
*TAA Antialiasing state. <table>
* <thead>
* <tr><th>Value</th><th>Name</th><th>Description</th>
* </thead>
* <tbody>
* <tr><td><code>0</code></td><td>NONE</td><td>TAA is disabled.</td></tr>
* <tr><td><code>1</code></td><td>PAUSE</td><td>TAA jitter is paused.</td></tr>
* <tr><td><code>2</code></td><td>PLAY</td><td>TAA jitter is playing.</td></tr>
* <tr><td><code>3</code></td><td>STATE_COUNT</td><td>Indicates number of antialiasing states</td></tr>
* </tbody>
* </table>
* @typedef {number} AntialiasingState
*/
enum class State
{
NONE = 0,
PAUSE,
PLAY,
STATE_COUNT
};
Q_ENUM(State)
float scale { 0.75f };
bool stop { false };
bool freeze { false };
Mode mode { Mode::TAA };
public slots:
int cycleStopPauseRun();
int prev();
int next();
int none();
int pause();
int play();
State none();
State pause();
State play();
int getIndex() const { return _index; }
int getState() const { return _state; }
void setIndex(int current);
State getState() const { return _state; }
void setState(State state);
Mode getAAMode() const { return mode; }
void setAAMode(Mode mode);
signals:
void dirty();
private:
int _state{ 0 };
int _index{ 0 };
State _state { State::PLAY };
int _index { 0 };
};
class JitterSample {
class AntialiasingSetup {
public:
enum {
SEQUENCE_LENGTH = 64
};
using Config = AntialiasingSetupConfig;
using Output = AntialiasingSetupConfig::Mode;
using JobModel = render::Job::ModelO<AntialiasingSetup, Output, Config>;
using Config = JitterSampleConfig;
using Output = glm::vec2;
using JobModel = render::Job::ModelO<JitterSample, Output, Config>;
AntialiasingSetup();
void configure(const Config& config);
void run(const render::RenderContextPointer& renderContext, Output& jitter);
void run(const render::RenderContextPointer& renderContext, Output& output);
private:
struct SampleSequence {
SampleSequence();
glm::vec2 offsets[SEQUENCE_LENGTH + 1];
int sequenceLength{ SEQUENCE_LENGTH };
int currentIndex{ 0 };
};
SampleSequence _sampleSequence;
float _scale{ 1.0 };
bool _freeze{ false };
std::vector<glm::vec2> _sampleSequence;
float _scale { 1.0f };
int _freezedSampleIndex { 0 };
bool _isStopped { false };
bool _isFrozen { false };
AntialiasingSetupConfig::Mode _mode{ AntialiasingSetupConfig::Mode::TAA };
};
class AntialiasingConfig : public render::Job::Config {
Q_OBJECT
Q_PROPERTY(int mode READ getAAMode WRITE setAAMode NOTIFY dirty)
Q_PROPERTY(float blend MEMBER blend NOTIFY dirty)
Q_PROPERTY(float sharpen MEMBER sharpen NOTIFY dirty)
Q_PROPERTY(float covarianceGamma MEMBER covarianceGamma NOTIFY dirty)
Q_PROPERTY(bool constrainColor MEMBER constrainColor NOTIFY dirty)
Q_PROPERTY(bool feedbackColor MEMBER feedbackColor NOTIFY dirty)
Q_PROPERTY(bool bicubicHistoryFetch MEMBER bicubicHistoryFetch NOTIFY dirty)
Q_PROPERTY(bool debug MEMBER debug NOTIFY dirty)
Q_PROPERTY(float debugX MEMBER debugX NOTIFY dirty)
@ -111,52 +154,26 @@ class AntialiasingConfig : public render::Job::Config {
public:
AntialiasingConfig() : render::Job::Config(true) {}
/*@jsdoc
*Antialiasing modes. <table>
* <thead>
* <tr><th>Value</th><th>Name</th><th>Description</th>
* </thead>
* <tbody>
* <tr><td><code>0</code></td><td>NONE</td><td>Antialiasing is disabled.</td></tr>
* <tr><td><code>1</code></td><td>TAA</td><td>Temporal Antialiasing.</td></tr>
* <tr><td><code>2</code></td><td>FXAA</td><td>FXAA.</td></tr>
* <tr><td><code>3</code></td><td>MODE_COUNT</td><td>Inducates number of antialiasing modes</td></tr>
* </tbody>
* </table>
* @typedef {number} AntialiasingMode
*/
enum Mode {
NONE = 0,
TAA,
FXAA,
MODE_COUNT
};
Q_ENUM(Mode) // Stored as signed int.
void setAAMode(int mode);
int getAAMode() const { return _mode; }
void setDebugFXAA(bool debug) { debugFXAAX = (debug ? 0.0f : 1.0f); emit dirty();}
bool debugFXAA() const { return (debugFXAAX == 0.0f ? true : false); }
int _mode{ TAA }; // '_' prefix but not private?
float blend { 0.2f };
float sharpen { 0.05f };
float blend{ 0.25f };
float sharpen{ 0.05f };
bool constrainColor { true };
float covarianceGamma { 1.15f };
bool feedbackColor { false };
bool bicubicHistoryFetch { true };
bool constrainColor{ true };
float covarianceGamma{ 0.65f };
bool feedbackColor{ false };
float debugX{ 0.0f };
float debugFXAAX{ 1.0f };
float debugShowVelocityThreshold{ 1.0f };
glm::vec2 debugCursorTexcoord{ 0.5f, 0.5f };
float debugOrbZoom{ 2.0f };
float debugX { 0.0f };
float debugFXAAX { 1.0f };
float debugShowVelocityThreshold { 1.0f };
glm::vec2 debugCursorTexcoord { 0.5f, 0.5f };
float debugOrbZoom { 2.0f };
bool debug { false };
bool showCursorPixel { false };
bool showClosestFragment{ false };
bool showClosestFragment { false };
signals:
void dirty();
@ -165,19 +182,15 @@ signals:
#define SET_BIT(bitfield, bitIndex, value) bitfield = ((bitfield) & ~(1 << (bitIndex))) | ((value) << (bitIndex))
#define GET_BIT(bitfield, bitIndex) ((bitfield) & (1 << (bitIndex)))
#define ANTIALIASING_USE_TAA 1
#if ANTIALIASING_USE_TAA
struct TAAParams {
float nope{ 0.0f };
float blend{ 0.15f };
float covarianceGamma{ 1.0f };
float debugShowVelocityThreshold{ 1.0f };
float nope { 0.0f };
float blend { 0.15f };
float covarianceGamma { 0.9f };
float debugShowVelocityThreshold { 1.0f };
glm::ivec4 flags{ 0 };
glm::vec4 pixelInfo{ 0.5f, 0.5f, 2.0f, 0.0f };
glm::vec4 regionInfo{ 0.0f, 0.0f, 1.0f, 0.0f };
glm::ivec4 flags { 0 };
glm::vec4 pixelInfo { 0.5f, 0.5f, 2.0f, 0.0f };
glm::vec4 regionInfo { 0.0f, 0.0f, 1.0f, 0.0f };
void setConstrainColor(bool enabled) { SET_BIT(flags.y, 1, enabled); }
bool isConstrainColor() const { return (bool)GET_BIT(flags.y, 1); }
@ -185,6 +198,12 @@ struct TAAParams {
void setFeedbackColor(bool enabled) { SET_BIT(flags.y, 4, enabled); }
bool isFeedbackColor() const { return (bool)GET_BIT(flags.y, 4); }
void setBicubicHistoryFetch(bool enabled) { SET_BIT(flags.y, 0, enabled); }
bool isBicubicHistoryFetch() const { return (bool)GET_BIT(flags.y, 0); }
void setSharpenedOutput(bool enabled) { SET_BIT(flags.y, 2, enabled); }
bool isSharpenedOutput() const { return (bool)GET_BIT(flags.y, 2); }
void setDebug(bool enabled) { SET_BIT(flags.x, 0, enabled); }
bool isDebug() const { return (bool) GET_BIT(flags.x, 0); }
@ -199,71 +218,52 @@ struct TAAParams {
void setShowClosestFragment(bool enabled) { SET_BIT(flags.x, 3, enabled); }
bool isFXAAEnabled() const { return regionInfo.z == 0.0f; }
};
using TAAParamsBuffer = gpu::StructBuffer<TAAParams>;
class Antialiasing {
public:
using Inputs = render::VaryingSet4 < DeferredFrameTransformPointer, gpu::FramebufferPointer, LinearDepthFramebufferPointer, VelocityFramebufferPointer > ;
using Inputs = render::VaryingSet4<DeferredFrameTransformPointer, DeferredFramebufferPointer, LinearDepthFramebufferPointer, AntialiasingSetupConfig::Mode>;
using Outputs = gpu::TexturePointer;
using Config = AntialiasingConfig;
using JobModel = render::Job::ModelI<Antialiasing, Inputs, Config>;
using JobModel = render::Job::ModelIO<Antialiasing, Inputs, Outputs, Config>;
Antialiasing(bool isSharpenEnabled = true);
~Antialiasing();
void configure(const Config& config);
void run(const render::RenderContextPointer& renderContext, const Inputs& inputs);
void run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs);
static gpu::PipelinePointer& getAntialiasingPipeline();
static gpu::PipelinePointer& getBlendPipeline();
static gpu::PipelinePointer& getDebugBlendPipeline();
static const gpu::PipelinePointer& getAntialiasingPipeline();
static const gpu::PipelinePointer& getIntensityPipeline();
static const gpu::PipelinePointer& getBlendPipeline();
static const gpu::PipelinePointer& getDebugBlendPipeline();
private:
struct AntialiasingBuffer {
gpu::FramebufferSwapChainPointer _swapChain;
gpu::TexturePointer _textures[2];
gpu::FramebufferSwapChainPointer _antialiasingBuffers;
gpu::TexturePointer _antialiasingTextures[2];
void clear() {
_swapChain.reset();
_textures[0].reset();
_textures[1].reset();
}
};
AntialiasingBuffer _antialiasingBuffers;
gpu::FramebufferPointer _intensityFramebuffer;
gpu::TexturePointer _intensityTexture;
gpu::BufferPointer _blendParamsBuffer;
static gpu::PipelinePointer _antialiasingPipeline;
static gpu::PipelinePointer _intensityPipeline;
static gpu::PipelinePointer _blendPipeline;
static gpu::PipelinePointer _debugBlendPipeline;
TAAParamsBuffer _params;
AntialiasingConfig::Mode _mode{ AntialiasingConfig::TAA };
float _sharpen{ 0.15f };
bool _isSharpenEnabled{ true };
float _sharpen { 0.15f };
bool _isSharpenEnabled { true };
float _debugFXAAX { 0.0f };
};
#else // User setting for antialias mode will probably be broken.
class AntiAliasingConfig : public render::Job::Config { // Not to be confused with AntialiasingConfig...
Q_OBJECT
Q_PROPERTY(bool enabled MEMBER enabled)
public:
AntiAliasingConfig() : render::Job::Config(true) {}
};
class Antialiasing {
public:
using Config = AntiAliasingConfig;
using JobModel = render::Job::ModelI<Antialiasing, gpu::FramebufferPointer, Config>;
Antialiasing();
~Antialiasing();
void configure(const Config& config) {}
void run(const render::RenderContextPointer& renderContext, const gpu::FramebufferPointer& sourceBuffer);
static gpu::PipelinePointer& getAntialiasingPipeline();
static gpu::PipelinePointer& getBlendPipeline();
private:
gpu::FramebufferPointer _antialiasingBuffer;
gpu::TexturePointer _antialiasingTexture;
gpu::BufferPointer _paramsBuffer;
static gpu::PipelinePointer _antialiasingPipeline;
static gpu::PipelinePointer _blendPipeline;
int _geometryId { 0 };
};
#endif
#endif // hifi_AntialiasingEffect_h

View file

@ -42,6 +42,7 @@ void DrawBackgroundStage::run(const render::RenderContextPointer& renderContext,
auto args = renderContext->args;
gpu::doInBatch("DrawBackgroundStage::run", args->_context, [&](gpu::Batch& batch) {
PROFILE_RANGE_BATCH(batch, "Background");
args->_batch = &batch;
batch.enableSkybox(true);
@ -49,16 +50,11 @@ void DrawBackgroundStage::run(const render::RenderContextPointer& renderContext,
batch.setViewportTransform(args->_viewport);
batch.setStateScissorRect(args->_viewport);
glm::mat4 projMat;
Transform viewMat;
args->getViewFrustum().evalProjectionMatrix(projMat);
args->getViewFrustum().evalViewTransform(viewMat);
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewMat);
bool forward = args->_renderMethod == render::Args::RenderMethod::FORWARD;
batch.setProjectionJitterEnabled(!forward);
// If we're using forward rendering, we need to calculate haze
if (args->_renderMethod == render::Args::RenderMethod::FORWARD) {
if (forward) {
const auto& hazeStage = args->_scene->getStage<HazeStage>();
if (hazeStage && hazeFrame->_elements.size() > 0) {
const auto& hazePointer = hazeStage->getElement(hazeFrame->_elements.front());
@ -68,7 +64,7 @@ void DrawBackgroundStage::run(const render::RenderContextPointer& renderContext,
}
}
skybox->render(batch, args->getViewFrustum(), args->_renderMethod == render::Args::RenderMethod::FORWARD);
skybox->render(batch, args->getViewFrustum(), forward, _transformSlot);
});
args->_batch = nullptr;
}

View file

@ -33,9 +33,12 @@ public:
using Inputs = render::VaryingSet3<LightingModelPointer, BackgroundStage::FramePointer, HazeStage::FramePointer>;
using JobModel = render::Job::ModelI<DrawBackgroundStage, Inputs>;
DrawBackgroundStage() {}
DrawBackgroundStage(uint transformSlot) : _transformSlot(transformSlot) {}
void run(const render::RenderContextPointer& renderContext, const Inputs& inputs);
private:
uint _transformSlot;
};
#endif

View file

@ -17,6 +17,7 @@
#include <render/BlurTask.h>
#include "render-utils/ShaderConstants.h"
#include "StencilMaskPass.h"
#define BLOOM_BLUR_LEVEL_COUNT 3
@ -27,7 +28,9 @@ gpu::PipelinePointer DebugBloom::_pipeline;
BloomThreshold::BloomThreshold(unsigned int downsamplingFactor) {
assert(downsamplingFactor > 0);
_parameters.edit()._sampleCount = downsamplingFactor;
auto& params = _parameters.edit();
params._sampleCount = downsamplingFactor;
params._offset = (1.0f - downsamplingFactor) * 0.5f;
}
void BloomThreshold::configure(const Config& config) {}
@ -56,11 +59,6 @@ void BloomThreshold::run(const render::RenderContextPointer& renderContext, cons
auto inputBuffer = inputFrameBuffer->getRenderBuffer(0);
auto bufferSize = gpu::Vec2u(inputBuffer->getDimensions());
const auto downSamplingFactor = _parameters.get()._sampleCount;
// Downsample resolution
bufferSize.x /= downSamplingFactor;
bufferSize.y /= downSamplingFactor;
if (!_outputBuffer || _outputBuffer->getSize() != bufferSize) {
auto colorTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(inputBuffer->getTexelFormat(), bufferSize.x, bufferSize.y,
@ -68,6 +66,7 @@ void BloomThreshold::run(const render::RenderContextPointer& renderContext, cons
_outputBuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("BloomThreshold"));
_outputBuffer->setRenderBuffer(0, colorTexture);
_outputBuffer->setStencilBuffer(inputFrameBuffer->getDepthStencilBuffer(), inputFrameBuffer->getDepthStencilBufferFormat());
_parameters.edit()._deltaUV = { 1.0f / bufferSize.x, 1.0f / bufferSize.y };
}

View file

@ -8,8 +8,10 @@
struct Parameters
{
BT_VEC2 _deltaUV;
float _offset;
float _threshold;
int _sampleCount;
float _padding[3];
};
// <@if 1@>

View file

@ -5,6 +5,7 @@
//
// Created by Olivier Prat on 09/26/2017
// Copyright 2017 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -17,11 +18,10 @@ LAYOUT_STD140(binding=RENDER_UTILS_BUFFER_BLOOM_PARAMS) uniform parametersBuffer
Parameters parameters;
};
layout(location=0) in vec2 varTexCoord0;
layout(location=0) out vec4 outFragColor;
void main(void) {
vec2 startUv = varTexCoord0;
vec2 startUv = (vec2(gl_FragCoord.xy) + vec2(parameters._offset)) * parameters._deltaUV;
vec4 maskedColor = vec4(0,0,0,0);
for (int y=0 ; y<parameters._sampleCount ; y++) {

View file

@ -4,6 +4,7 @@
//
// Created by Andrew Meadows 2017.01.17
// Copyright 2017 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -78,7 +79,10 @@ void CauterizedMeshPartPayload::bindTransform(gpu::Batch& batch, const Transform
if (_cauterizedClusterBuffer) {
batch.setUniformBuffer(graphics::slot::buffer::Skinning, _cauterizedClusterBuffer);
}
batch.setModelTransform(_cauterizedTransform);
batch.setModelTransform(_cauterizedTransform, _previousRenderTransform);
if (renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_previousRenderTransform = _cauterizedTransform;
}
} else {
ModelMeshPartPayload::bindTransform(batch, transform, renderMode, mirrorDepth);
}

View file

@ -4,6 +4,7 @@
//
// Created by Clement on 12/3/15.
// Copyright 2015 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -131,11 +132,10 @@ static const std::string DEFAULT_SHADOW_DEPTH_SHADER{
static const std::string DEFAULT_SHADOW_CASCADE_SHADER{
"vec3 cascadeColors[4] = vec3[4]( vec3(0,1,0), vec3(0,0,1), vec3(1,0,0), vec3(1) );"
"vec4 getFragmentColor() {"
" DeferredFrameTransform deferredTransform = getDeferredFrameTransform();"
" DeferredFragment frag = unpackDeferredFragment(deferredTransform, uv);"
" DeferredFragment frag = unpackDeferredFragment(uv);"
" vec4 viewPosition = vec4(frag.position.xyz, 1.0);"
" float viewDepth = -viewPosition.z;"
" vec4 worldPosition = getViewInverse() * viewPosition;"
" vec4 worldPosition = getViewInverse(frag.side) * viewPosition;"
" vec4 cascadeShadowCoords[2];"
" ivec2 cascadeIndices;"
" float cascadeMix = determineShadowCascadesOnPixel(worldPosition, viewDepth, cascadeShadowCoords, cascadeIndices);"
@ -231,7 +231,15 @@ static const std::string DEFAULT_AMBIENT_OCCLUSION_BLURRED_SHADER{
static const std::string DEFAULT_VELOCITY_SHADER{
"vec4 getFragmentColor() {"
" return vec4(vec2(texture(debugTexture0, uv).xy), 0.0, 1.0);"
" vec2 velocity = texture(debugTexture0, uv).xy * getWidthHeight(0);"
" vec4 velColor = vec4(0.1f * velocity + 0.5f, 0.0f, 1.0f);"
" return dot(velocity, velocity) > 1e-4 ? velColor : vec4(0.0f, 0.0f, 1.0f, 0.0f);"
"}"
};
static const std::string DEFAULT_ANTIALIASING_INTENSITY_SHADER{
"vec4 getFragmentColor() {"
" return vec4(texture(debugTexture0, uv).rrr, 1.0);"
" }"
};
@ -254,7 +262,7 @@ DebugDeferredBuffer::StandardPipelines DebugDeferredBuffer::_pipelines;
DebugDeferredBuffer::CustomPipelines DebugDeferredBuffer::_customPipelines;
#include <QStandardPaths> // TODO REMOVE: Temporary until UI
DebugDeferredBuffer::DebugDeferredBuffer() {
DebugDeferredBuffer::DebugDeferredBuffer(uint transformSlot) : _transformSlot(transformSlot) {
// TODO REMOVE: Temporary until UI
static const auto DESKTOP_PATH = QStandardPaths::writableLocation(QStandardPaths::DesktopLocation);
static const auto CUSTOM_FILE = DESKTOP_PATH.toStdString() + "/custom.slh";
@ -328,6 +336,8 @@ std::string DebugDeferredBuffer::getShaderSourceCode(Mode mode, const std::strin
return DEFAULT_HALF_NORMAL_SHADER;
case VelocityMode:
return DEFAULT_VELOCITY_SHADER;
case AntialiasingIntensityMode:
return DEFAULT_ANTIALIASING_INTENSITY_SHADER;
case CustomMode:
return getFileContent(customFile, DEFAULT_CUSTOM_SHADER);
default:
@ -404,9 +414,9 @@ void DebugDeferredBuffer::run(const RenderContextPointer& renderContext, const I
auto& linearDepthTarget = inputs.get1();
auto& surfaceGeometryFramebuffer = inputs.get2();
auto& ambientOcclusionFramebuffer = inputs.get3();
auto& velocityFramebuffer = inputs.get4();
auto& frameTransform = inputs.get5();
auto& shadowFrame = inputs.get6();
auto& frameTransform = inputs.get4();
auto& shadowFrame = inputs.get5();
const auto& antialiasingIntensityTexture = inputs.get6();
gpu::doInBatch("DebugDeferredBuffer::run", args->_context, [&](gpu::Batch& batch) {
batch.enableStereo(false);
@ -415,12 +425,7 @@ void DebugDeferredBuffer::run(const RenderContextPointer& renderContext, const I
const auto geometryBuffer = DependencyManager::get<GeometryCache>();
const auto textureCache = DependencyManager::get<TextureCache>();
glm::mat4 projMat;
Transform viewMat;
args->getViewFrustum().evalProjectionMatrix(projMat);
args->getViewFrustum().evalViewTransform(viewMat);
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewMat, true);
batch.setSavedViewProjectionTransform(_transformSlot);
batch.setModelTransform(Transform());
using Textures = render_utils::slot::texture::Texture;
@ -438,8 +443,8 @@ void DebugDeferredBuffer::run(const RenderContextPointer& renderContext, const I
batch.setResourceTexture(Textures::DeferredDepth, deferredFramebuffer->getPrimaryDepthTexture());
batch.setResourceTexture(Textures::DeferredLighting, deferredFramebuffer->getLightingTexture());
}
if (velocityFramebuffer && _mode == VelocityMode) {
batch.setResourceTexture(Textures::DebugTexture0, velocityFramebuffer->getVelocityTexture());
if (_mode == VelocityMode) {
batch.setResourceTexture(Textures::DebugTexture0, deferredFramebuffer->getDeferredVelocityTexture());
}
if (!shadowFrame->_objects.empty()) {
@ -475,6 +480,10 @@ void DebugDeferredBuffer::run(const RenderContextPointer& renderContext, const I
batch.setResourceTexture(Textures::DebugTexture0, ambientOcclusionFramebuffer->getNormalTexture());
}
}
if (antialiasingIntensityTexture && _mode == AntialiasingIntensityMode) {
batch.setResourceTexture(Textures::DebugTexture0, antialiasingIntensityTexture);
}
const glm::vec4 color(1.0f, 1.0f, 1.0f, 1.0f);
const glm::vec2 bottomLeft(_size.x, _size.y);
const glm::vec2 topRight(_size.z, _size.w);

View file

@ -4,6 +4,7 @@
//
// Created by Clement on 12/3/15.
// Copyright 2015 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -19,7 +20,6 @@
#include "DeferredFramebuffer.h"
#include "SurfaceGeometryPass.h"
#include "AmbientOcclusionEffect.h"
#include "VelocityBufferPass.h"
#include "LightStage.h"
@ -44,13 +44,13 @@ public:
LinearDepthFramebufferPointer,
SurfaceGeometryFramebufferPointer,
AmbientOcclusionFramebufferPointer,
VelocityFramebufferPointer,
DeferredFrameTransformPointer,
LightStage::ShadowFramePointer>;
LightStage::ShadowFramePointer,
gpu::TexturePointer>;
using Config = DebugDeferredBufferConfig;
using JobModel = render::Job::ModelI<DebugDeferredBuffer, Inputs, Config>;
DebugDeferredBuffer();
DebugDeferredBuffer(uint transformSlot);
~DebugDeferredBuffer();
void configure(const Config& config);
@ -92,6 +92,7 @@ protected:
AmbientOcclusionBlurredMode,
AmbientOcclusionNormalMode,
VelocityMode,
AntialiasingIntensityMode,
CustomMode, // Needs to stay last
NumModes,
@ -100,6 +101,7 @@ protected:
private:
Mode _mode{ Off };
glm::vec4 _size;
uint _transformSlot;
#include "debug_deferred_buffer_shared.slh"

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 5/4/16.
// Copyright 2013 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -45,6 +46,7 @@ struct DeferredFragment {
vec3 fresnel;
float roughness;
int mode;
int side;
float scattering;
float depthVal;
};
@ -58,6 +60,9 @@ vec3 getFresnelF0(float metallic, vec3 metalF0) {
}
<@endif@>
<@include DeferredTransform.slh@>
<$declareDeferredFrameTransform()$>
DeferredFragment unpackDeferredFragmentNoPosition(vec2 texcoord) {
vec4 normalVal;
vec4 diffuseVal;
@ -82,6 +87,8 @@ DeferredFragment unpackDeferredFragmentNoPosition(vec2 texcoord) {
frag.scattering = float(frag.mode == FRAG_MODE_SCATTERING) * specularVal.x;
frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);
frag.side = getStereoSideFromUV(texcoord.x);
return frag;
}
@ -109,18 +116,14 @@ DeferredFragment unpackDeferredFragmentNoPositionNoAmbient(vec2 texcoord) {
frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);
frag.side = getStereoSideFromUV(texcoord.x);
return frag;
}
<@include DeferredTransform.slh@>
<$declareDeferredFrameTransform()$>
vec4 unpackDeferredPosition(float depthValue, vec2 texcoord) {
vec4 unpackDeferredPosition(int side, float depthValue, vec2 texcoord) {
float check = float(isStereo());
float check2 = check * float(texcoord.x > 0.5);
texcoord.x -= check2 * 0.5;
int side = int(check2);
texcoord.x -= check * 0.5 * float(side);
texcoord.x *= 1.0 + check;
return vec4(evalEyePositionFromZdb(side, depthValue, texcoord), 1.0);
@ -129,7 +132,7 @@ vec4 unpackDeferredPosition(float depthValue, vec2 texcoord) {
// This method to unpack position is fastesst
vec4 unpackDeferredPositionFromZdb(vec2 texcoord) {
float Zdb = texture(depthMap, texcoord).x;
return unpackDeferredPosition(Zdb, texcoord);
return unpackDeferredPosition(getStereoSideFromUV(texcoord.x), Zdb, texcoord);
}
vec4 unpackDeferredPositionFromZeye(vec2 texcoord) {
@ -144,13 +147,13 @@ vec4 unpackDeferredPositionFromZeye(vec2 texcoord) {
return vec4(evalEyePositionFromZeye(side, Zeye, texcoord), 1.0);
}
DeferredFragment unpackDeferredFragment(DeferredFrameTransform deferredTransform, vec2 texcoord) {
DeferredFragment unpackDeferredFragment(vec2 texcoord) {
float depthValue = texture(depthMap, texcoord).r;
DeferredFragment frag = unpackDeferredFragmentNoPosition(texcoord);
frag.depthVal = depthValue;
frag.position = unpackDeferredPosition(frag.depthVal, texcoord);
frag.position = unpackDeferredPosition(frag.side, frag.depthVal, texcoord);
return frag;
}

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 1/12/15.
// Copyright 2013 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -12,12 +13,13 @@
<@def DEFERRED_BUFFER_WRITE_SLH@>
<@include DeferredBuffer.slh@>
<@include DeferredBufferWrite_shared.slh@>
layout(location=0) out vec4 _fragColor0; // albedo / metallic
layout(location=1) out vec4 _fragColor1; // Normal
layout(location=2) out vec4 _fragColor2; // scattering / emissive / occlusion
layout(location=3) out vec4 _fragColor3; // emissive
layout(location = DEFERRED_COLOR_SLOT) out vec4 _albedoMetallic; // albedo / metallic
layout(location = DEFERRED_NORMAL_SLOT) out vec4 _normalRoughness; // normal / roughness
layout(location = DEFERRED_SPECULAR_SLOT) out vec4 _scatteringEmissiveOcclusion; // scattering / emissive / occlusion
layout(location = DEFERRED_VELOCITY_SLOT) out vec4 _velocity; // velocity
layout(location = DEFERRED_LIGHTING_SLOT) out vec4 _lighting; // emissive
// the alpha threshold
const float alphaThreshold = 0.5;
@ -25,51 +27,67 @@ float evalOpaqueFinalAlpha(float alpha, float mapAlpha) {
return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));
}
<@include VelocityWrite.slh@>
<@include DefaultMaterials.slh@>
<@include LightingModel.slh@>
void packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {
void packDeferredFragment(vec4 prevPositionCS, vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {
if (alpha < 1.0) {
discard;
}
float check = float(scattering > 0.0);
_fragColor0 = vec4(albedo, mix(packShadedMetallic(metallic), packScatteringMetallic(metallic), check));
_fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));
_fragColor2 = vec4(mix(emissive, vec3(scattering), check), occlusion);
_fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);
_albedoMetallic = vec4(albedo, mix(packShadedMetallic(metallic), packScatteringMetallic(metallic), check));
_normalRoughness = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));
_scatteringEmissiveOcclusion = vec4(mix(emissive, vec3(scattering), check), occlusion);
_velocity = vec4(packVelocity(prevPositionCS), 0.0, 0.0);
_lighting = vec4(isEmissiveEnabled() * emissive, 1.0);
}
void packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 lightmap) {
void packDeferredFragmentLightmap(vec4 prevPositionCS, vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 lightmap) {
if (alpha < 1.0) {
discard;
}
_fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));
_fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));
_fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);
_fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);
_albedoMetallic = vec4(albedo, packLightmappedMetallic(metallic));
_normalRoughness = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));
_scatteringEmissiveOcclusion = vec4(isLightmapEnabled() * lightmap, 1.0);
_velocity = vec4(packVelocity(prevPositionCS), 0.0, 0.0);
_lighting = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);
}
void packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {
void packDeferredFragmentUnlit(vec4 prevPositionCS, vec3 normal, float alpha, vec3 color) {
// to reduce texel flickering for floating point error we discard when alpha is "almost one"
if (alpha < 0.999999) {
discard;
}
_fragColor0 = vec4(color, packUnlit());
_fragColor1 = vec4(packNormal(normal), 1.0);
_fragColor2 = vec4(vec3(0.0), 1.0);
_fragColor3 = vec4(color, 1.0);
_albedoMetallic = vec4(color, packUnlit());
_normalRoughness = vec4(packNormal(normal), 1.0);
_scatteringEmissiveOcclusion = vec4(vec3(0.0), 1.0);
_velocity = vec4(packVelocity(prevPositionCS), 0.0, 0.0);
_lighting = vec4(color, 1.0);
}
void packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, float roughness) {
void packDeferredFragmentTranslucent(vec4 prevPositionCS, vec3 normal, float alpha, vec3 albedo, float roughness) {
if (alpha <= 0.0) {
discard;
}
_fragColor0 = vec4(albedo.rgb, alpha);
_fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));
_fragColor2 = vec4(vec3(0.0), 1.0);
_fragColor3 = vec4(0.0);
_albedoMetallic = vec4(albedo.rgb, alpha);
_normalRoughness = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));
_scatteringEmissiveOcclusion = vec4(vec3(0.0), 1.0);
_velocity = vec4(packVelocity(prevPositionCS), 0.0, 0.0);
_lighting = vec4(0.0);
}
void packDeferredFragmentTranslucentUnlit(vec4 prevPositionCS, vec3 normal, float alpha, vec3 color) {
if (alpha <= 0.0) {
discard;
}
_albedoMetallic = vec4(color, alpha);
_normalRoughness = vec4(packNormal(normal), 1.0);
_scatteringEmissiveOcclusion = vec4(vec3(0.0), 1.0);
_velocity = vec4(packVelocity(prevPositionCS), 0.0, 0.0);
_lighting = vec4(color, 1.0);
}
<@endif@>

View file

@ -0,0 +1,12 @@
// glsl / C++ compatible source as interface for DeferredBuffer layout
#define DEFERRED_COLOR_SLOT 0
#define DEFERRED_NORMAL_SLOT 1
#define DEFERRED_SPECULAR_SLOT 2
#define DEFERRED_VELOCITY_SLOT 3
#define DEFERRED_LIGHTING_SLOT 4
// <@if 1@>
// Trigger Scribe include
// <@endif@> <!def that !>
//

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau 6/3/2016.
// Copyright 2016 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -18,73 +19,51 @@ DeferredFrameTransform::DeferredFrameTransform() {
_frameTransformBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(FrameTransform), (const gpu::Byte*) &frameTransform));
}
void DeferredFrameTransform::update(RenderArgs* args, glm::vec2 jitter) {
void DeferredFrameTransform::update(RenderArgs* args) {
// Update the depth info with near and far (same for stereo)
auto nearZ = args->getViewFrustum().getNearClip();
auto farZ = args->getViewFrustum().getFarClip();
auto& frameTransformBuffer = _frameTransformBuffer.edit<FrameTransform>();
frameTransformBuffer.depthInfo = glm::vec4(nearZ*farZ, farZ - nearZ, -farZ, 0.0f);
frameTransformBuffer.infos.depthInfo = glm::vec4(nearZ * farZ, farZ - nearZ, -farZ, 0.0f);
frameTransformBuffer.infos.pixelInfo = args->_viewport;
frameTransformBuffer.pixelInfo = args->_viewport;
//_parametersBuffer.edit<Parameters>()._ditheringInfo.y += 0.25f;
Transform cameraTransform;
args->getViewFrustum().evalViewTransform(cameraTransform);
cameraTransform.getMatrix(frameTransformBuffer.invView);
cameraTransform.getInverseMatrix(frameTransformBuffer.view);
args->getViewFrustum().evalProjectionMatrix(frameTransformBuffer.projectionMono);
// There may be some sort of mismatch here if the viewport size isn't the same as the frame buffer size as
// jitter is normalized by frame buffer size in TransformCamera. But we should be safe.
jitter.x /= args->_viewport.z;
jitter.y /= args->_viewport.w;
args->getViewFrustum().evalProjectionMatrix(frameTransformBuffer.infos.projectionMono);
// Running in stereo ?
bool isStereo = args->isStereo();
if (!isStereo) {
frameTransformBuffer.projectionUnjittered[0] = frameTransformBuffer.projectionMono;
frameTransformBuffer.invProjectionUnjittered[0] = glm::inverse(frameTransformBuffer.projectionUnjittered[0]);
frameTransformBuffer.stereoInfo = glm::vec4(0.0f, (float)args->_viewport.z, 0.0f, 0.0f);
frameTransformBuffer.invpixelInfo = glm::vec4(1.0f / args->_viewport.z, 1.0f / args->_viewport.w, 0.0f, 0.0f);
frameTransformBuffer.projection[0] = frameTransformBuffer.projectionUnjittered[0];
frameTransformBuffer.projection[0][2][0] += jitter.x;
frameTransformBuffer.projection[0][2][1] += jitter.y;
frameTransformBuffer.invProjection[0] = glm::inverse(frameTransformBuffer.projection[0]);
frameTransformBuffer.infos.stereoInfo = glm::vec4(0.0f, (float)args->_viewport.z, 0.0f, 0.0f);
frameTransformBuffer.infos.invPixelInfo = glm::vec4(1.0f / args->_viewport.z, 1.0f / args->_viewport.w, 0.0f, 0.0f);
} else {
mat4 projMats[2];
mat4 eyeViews[2];
args->_context->getStereoProjections(projMats);
args->_context->getStereoViews(eyeViews);
jitter.x *= 2.0f;
for (int i = 0; i < 2; i++) {
// Compose the mono Eye space to Stereo clip space Projection Matrix
auto sideViewMat = projMats[i] * eyeViews[i];
frameTransformBuffer.projectionUnjittered[i] = sideViewMat;
frameTransformBuffer.invProjectionUnjittered[i] = glm::inverse(sideViewMat);
frameTransformBuffer.projection[i] = frameTransformBuffer.projectionUnjittered[i];
frameTransformBuffer.projection[i][2][0] += jitter.x;
frameTransformBuffer.projection[i][2][1] += jitter.y;
frameTransformBuffer.invProjection[i] = glm::inverse(frameTransformBuffer.projection[i]);
}
frameTransformBuffer.stereoInfo = glm::vec4(1.0f, (float)(args->_viewport.z >> 1), 0.0f, 1.0f);
frameTransformBuffer.invpixelInfo = glm::vec4(1.0f / (float)(args->_viewport.z >> 1), 1.0f / args->_viewport.w, 0.0f, 0.0f);
frameTransformBuffer.infos.pixelInfo.z *= 0.5f;
frameTransformBuffer.infos.stereoInfo = glm::vec4(1.0f, (float)(args->_viewport.z >> 1), 0.0f, 1.0f);
frameTransformBuffer.infos.invPixelInfo = glm::vec4(2.0f / (float)(args->_viewport.z), 1.0f / args->_viewport.w, 0.0f, 0.0f);
}
}
void GenerateDeferredFrameTransform::run(const render::RenderContextPointer& renderContext, const Input& jitter, Output& frameTransform) {
void GenerateDeferredFrameTransform::run(const render::RenderContextPointer& renderContext, Output& frameTransform) {
if (!frameTransform) {
frameTransform = std::make_shared<DeferredFrameTransform>();
}
frameTransform->update(renderContext->args, jitter);
RenderArgs* args = renderContext->args;
frameTransform->update(args);
gpu::doInBatch("GenerateDeferredFrameTransform::run", args->_context, [&](gpu::Batch& batch) {
args->_batch = &batch;
glm::mat4 projMat;
Transform viewMat;
args->getViewFrustum().evalProjectionMatrix(projMat);
args->getViewFrustum().evalViewTransform(viewMat);
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewMat);
// This is the main view / projection transform that will be reused later on
batch.saveViewProjectionTransform(_transformSlot);
// Copy it to the deferred transform for the lighting pass
batch.copySavedViewProjectionTransformToBuffer(_transformSlot, frameTransform->getFrameTransformBuffer()._buffer,
sizeof(DeferredFrameTransform::DeferredFrameInfo));
});
}

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau 6/3/2016.
// Copyright 2016 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -20,68 +21,44 @@
// DeferredFrameTransform is a helper class gathering in one place the needed camera transform
// and frame resolution needed for all the deferred rendering passes taking advantage of the Deferred buffers
class DeferredFrameTransform {
friend class GenerateDeferredFrameTransform;
public:
using UniformBufferView = gpu::BufferView;
DeferredFrameTransform();
void update(RenderArgs* args, glm::vec2 jitter);
void update(RenderArgs* args);
UniformBufferView getFrameTransformBuffer() const { return _frameTransformBuffer; }
protected:
// Class describing the uniform buffer with the transform info common to the AO shaders
// It s changing every frame
class FrameTransform {
#include "DeferredTransform_shared.slh"
class FrameTransform : public _DeferredFrameTransform {
public:
// Pixel info is { viewport width height}
glm::vec4 pixelInfo;
glm::vec4 invpixelInfo;
// Depth info is { n.f, f - n, -f}
glm::vec4 depthInfo;
// Stereo info is { isStereoFrame, halfWidth }
glm::vec4 stereoInfo{ 0.0 };
// Mono proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space
glm::mat4 projection[2];
// Inverse proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space
glm::mat4 invProjection[2];
// THe mono projection for sure
glm::mat4 projectionMono;
// Inv View matrix from eye space (mono) to world space
glm::mat4 invView;
// View matrix from world space to eye space (mono)
glm::mat4 view;
// Mono proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space without jittering
glm::mat4 projectionUnjittered[2];
// Inverse proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space without jittering
glm::mat4 invProjectionUnjittered[2];
FrameTransform() {}
FrameTransform() { infos.stereoInfo = glm::vec4(0.0f); }
};
UniformBufferView _frameTransformBuffer;
UniformBufferView _frameTransformBuffer;
};
using DeferredFrameTransformPointer = std::shared_ptr<DeferredFrameTransform>;
class GenerateDeferredFrameTransform {
public:
using Input = glm::vec2;
using Output = DeferredFrameTransformPointer;
using JobModel = render::Job::ModelIO<GenerateDeferredFrameTransform, Input, Output>;
using JobModel = render::Job::ModelO<GenerateDeferredFrameTransform, Output>;
GenerateDeferredFrameTransform() {}
GenerateDeferredFrameTransform(uint transformSlot) : _transformSlot(transformSlot) {}
void run(const render::RenderContextPointer& renderContext, const Input& jitter, Output& frameTransform);
void run(const render::RenderContextPointer& renderContext, Output& frameTransform);
private:
uint _transformSlot;
};
#endif // hifi_DeferredFrameTransform_h

View file

@ -4,12 +4,18 @@
//
// Created by Sam Gateau 7/11/2016.
// Copyright 2016 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "DeferredFramebuffer.h"
#include "DeferredBufferWrite_shared.slh"
#include "gpu/Batch.h"
#include "gpu/Context.h"
DeferredFramebuffer::DeferredFramebuffer() {
}
@ -36,8 +42,10 @@ void DeferredFramebuffer::updatePrimaryDepth(const gpu::TexturePointer& depthBuf
_deferredColorTexture.reset();
_deferredNormalTexture.reset();
_deferredSpecularTexture.reset();
_deferredVelocityTexture.reset();
_lightingTexture.reset();
_lightingFramebuffer.reset();
_lightingWithVelocityFramebuffer.reset();
}
}
@ -46,8 +54,9 @@ void DeferredFramebuffer::allocate() {
_deferredFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("deferred"));
_deferredFramebufferDepthColor = gpu::FramebufferPointer(gpu::Framebuffer::create("deferredDepthColor"));
auto colorFormat = gpu::Element::COLOR_SRGBA_32;
auto linearFormat = gpu::Element::COLOR_RGBA_32;
const auto colorFormat = gpu::Element::COLOR_SRGBA_32;
const auto linearFormat = gpu::Element::COLOR_RGBA_32;
const auto halfFormat = gpu::Element(gpu::VEC2, gpu::HALF, gpu::XY);
auto width = _frameSize.x;
auto height = _frameSize.y;
@ -56,10 +65,12 @@ void DeferredFramebuffer::allocate() {
_deferredColorTexture = gpu::Texture::createRenderBuffer(colorFormat, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
_deferredNormalTexture = gpu::Texture::createRenderBuffer(linearFormat, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
_deferredSpecularTexture = gpu::Texture::createRenderBuffer(linearFormat, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
_deferredVelocityTexture = gpu::Texture::createRenderBuffer(halfFormat, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
_deferredFramebuffer->setRenderBuffer(0, _deferredColorTexture);
_deferredFramebuffer->setRenderBuffer(1, _deferredNormalTexture);
_deferredFramebuffer->setRenderBuffer(2, _deferredSpecularTexture);
_deferredFramebuffer->setRenderBuffer(DEFERRED_COLOR_SLOT, _deferredColorTexture);
_deferredFramebuffer->setRenderBuffer(DEFERRED_NORMAL_SLOT, _deferredNormalTexture);
_deferredFramebuffer->setRenderBuffer(DEFERRED_SPECULAR_SLOT, _deferredSpecularTexture);
_deferredFramebuffer->setRenderBuffer(DEFERRED_VELOCITY_SLOT, _deferredVelocityTexture);
_deferredFramebufferDepthColor->setRenderBuffer(0, _deferredColorTexture);
@ -80,8 +91,12 @@ void DeferredFramebuffer::allocate() {
_lightingFramebuffer->setRenderBuffer(0, _lightingTexture);
_lightingFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
_deferredFramebuffer->setRenderBuffer(3, _lightingTexture);
_lightingWithVelocityFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("lighting_velocity"));
_lightingWithVelocityFramebuffer->setRenderBuffer(0, _lightingTexture);
_lightingWithVelocityFramebuffer->setRenderBuffer(1, _deferredVelocityTexture);
_lightingWithVelocityFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
_deferredFramebuffer->setRenderBuffer(DEFERRED_LIGHTING_SLOT, _lightingTexture);
}
@ -127,6 +142,13 @@ gpu::TexturePointer DeferredFramebuffer::getDeferredSpecularTexture() {
return _deferredSpecularTexture;
}
gpu::TexturePointer DeferredFramebuffer::getDeferredVelocityTexture() {
if (!_deferredVelocityTexture) {
allocate();
}
return _deferredVelocityTexture;
}
gpu::FramebufferPointer DeferredFramebuffer::getLightingFramebuffer() {
if (!_lightingFramebuffer) {
allocate();
@ -134,6 +156,13 @@ gpu::FramebufferPointer DeferredFramebuffer::getLightingFramebuffer() {
return _lightingFramebuffer;
}
gpu::FramebufferPointer DeferredFramebuffer::getLightingWithVelocityFramebuffer() {
if (!_lightingWithVelocityFramebuffer) {
allocate();
}
return _lightingWithVelocityFramebuffer;
}
gpu::TexturePointer DeferredFramebuffer::getLightingTexture() {
if (!_lightingTexture) {
allocate();

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau 7/11/2016.
// Copyright 2016 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -15,10 +16,10 @@
#include "gpu/Resource.h"
#include "gpu/Framebuffer.h"
// DeferredFramebuffer is a helper class gathering in one place the GBuffer (Framebuffer) and lighting framebuffer
class DeferredFramebuffer {
public:
DeferredFramebuffer();
gpu::FramebufferPointer getDeferredFramebuffer();
@ -27,8 +28,10 @@ public:
gpu::TexturePointer getDeferredColorTexture();
gpu::TexturePointer getDeferredNormalTexture();
gpu::TexturePointer getDeferredSpecularTexture();
gpu::TexturePointer getDeferredVelocityTexture();
gpu::FramebufferPointer getLightingFramebuffer();
gpu::FramebufferPointer getLightingWithVelocityFramebuffer();
gpu::TexturePointer getLightingTexture();
// Update the depth buffer which will drive the allocation of all the other resources according to its size.
@ -47,13 +50,15 @@ protected:
gpu::TexturePointer _deferredColorTexture;
gpu::TexturePointer _deferredNormalTexture;
gpu::TexturePointer _deferredSpecularTexture;
gpu::TexturePointer _deferredVelocityTexture;
gpu::TexturePointer _lightingTexture;
gpu::FramebufferPointer _lightingFramebuffer;
gpu::FramebufferPointer _lightingWithVelocityFramebuffer;
glm::ivec2 _frameSize;
};
using DeferredFramebufferPointer = std::shared_ptr<DeferredFramebuffer>;
#endif // hifi_DeferredFramebuffer_h
#endif // hifi_DeferredFramebuffer_h

View file

@ -300,8 +300,9 @@ void PrepareDeferred::run(const RenderContextPointer& renderContext, const Input
// Clear Color, Depth and Stencil for deferred buffer
batch.clearFramebuffer(
gpu::Framebuffer::BUFFER_COLOR0 | gpu::Framebuffer::BUFFER_COLOR1 | gpu::Framebuffer::BUFFER_COLOR2 | gpu::Framebuffer::BUFFER_COLOR3 |
gpu::Framebuffer::BUFFER_DEPTH |
gpu::Framebuffer::BUFFER_COLOR0 | gpu::Framebuffer::BUFFER_COLOR1 |
gpu::Framebuffer::BUFFER_COLOR2 | gpu::Framebuffer::BUFFER_COLOR3 |
gpu::Framebuffer::BUFFER_COLOR4 | gpu::Framebuffer::BUFFER_DEPTH |
gpu::Framebuffer::BUFFER_STENCIL,
vec4(vec3(0), 0), 1.0, 0, true);
@ -506,7 +507,7 @@ void RenderDeferredLocals::run(const render::RenderContextPointer& renderContext
}
}
void RenderDeferredCleanup::run(const render::RenderContextPointer& renderContext) {
void RenderDeferredCleanup::run(const render::RenderContextPointer& renderContext, const DeferredFramebufferPointer& deferredFramebuffer) {
auto args = renderContext->args;
auto& batch = (*args->_batch);
{
@ -531,6 +532,8 @@ void RenderDeferredCleanup::run(const render::RenderContextPointer& renderContex
batch.setUniformBuffer(ru::Buffer::LightClusterGrid, nullptr);
batch.setUniformBuffer(ru::Buffer::LightClusterContent, nullptr);
// Restore the lighting with velocity framebuffer so that following stages, like drawing the background, can get motion vectors.
batch.setFramebuffer(deferredFramebuffer->getLightingWithVelocityFramebuffer());
}
}
@ -571,7 +574,7 @@ void RenderDeferred::run(const RenderContextPointer& renderContext, const Inputs
lightsJob.run(renderContext, deferredTransform, deferredFramebuffer, lightingModel, surfaceGeometryFramebuffer, lightClusters);
cleanupJob.run(renderContext);
cleanupJob.run(renderContext, deferredFramebuffer);
_gpuTimer->end(batch);
});

Some files were not shown because too many files have changed in this diff Show more