merge old work

This commit is contained in:
HifiExperiments 2024-02-24 23:26:34 -08:00
parent 1fe2fc5fba
commit 56f697e5f0
165 changed files with 2673 additions and 2031 deletions

View file

@ -4,6 +4,7 @@
//
// Created by Zach Fox on 2019-07-10
// Copyright 2019 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -501,7 +502,7 @@ Flickable {
ListModel {
id: antialiasingModel
// Maintain same order as "AntialiasingConfig::Mode".
// Maintain same order as "AntialiasingSetupConfig::Mode".
ListElement {
text: "None"
}

View file

@ -275,7 +275,7 @@ public:
void SecondaryCameraRenderTask::build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor) {
const auto cachedArg = task.addJob<SecondaryCameraJob>("SecondaryCamera");
task.addJob<RenderViewTask>("RenderSecondView", cullFunctor, render::ItemKey::TAG_BITS_1, render::ItemKey::TAG_BITS_1);
task.addJob<RenderViewTask>("RenderSecondView", cullFunctor, render::ItemKey::TAG_BITS_1, render::ItemKey::TAG_BITS_1, RenderViewTask::TransformOffset::SECONDARY_VIEW);
task.addJob<EndSecondaryCameraFrame>("EndSecondaryCamera", cachedArg);
}

View file

@ -262,14 +262,14 @@ void GraphicsEngine::render_performFrame() {
batch.enableStereo(isStereo);
batch.clearDepthStencilFramebuffer(1.0, 0);
batch.setViewportTransform({ 0, 0, finalFramebuffer->getSize() });
_splashScreen->render(batch, viewFrustum, renderArgs._renderMethod == RenderArgs::RenderMethod::FORWARD);
_splashScreen->render(batch, viewFrustum, renderArgs._renderMethod == RenderArgs::RenderMethod::FORWARD, render::RenderEngine::TS_BACKGROUND_VIEW);
});
} else {
{
PROFILE_RANGE(render, "/renderOverlay");
PerformanceTimer perfTimer("renderOverlay");
// NOTE: There is no batch associated with this renderArgs
// the ApplicationOverlay class assumes it's viewport is set up to be the device size
// the ApplicationOverlay class assumes its viewport is set up to be the device size
renderArgs._viewport = glm::ivec4(0, 0, qApp->getDeviceSize());
qApp->getApplicationOverlay().renderOverlay(&renderArgs);
}

View file

@ -444,7 +444,10 @@ void ParabolaPointer::RenderState::ParabolaRenderItem::render(RenderArgs* args)
Transform transform;
transform.setTranslation(_origin);
batch.setModelTransform(transform);
batch.setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == RenderArgs::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == RenderArgs::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
batch.setPipeline(getParabolaPipeline(args->_renderMethod == render::Args::RenderMethod::FORWARD));
@ -479,4 +482,4 @@ namespace render {
template <> const ShapeKey shapeGetShapeKey(const ParabolaPointer::RenderState::ParabolaRenderItem::Pointer& payload) {
return ShapeKey::Builder::ownPipeline();
}
}
}

View file

@ -1,6 +1,7 @@
//
// Created by Sam Gondelman 7/17/2018
// Copyright 2018 High Fidelity, Inc.
// Copyright 2024 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -62,6 +63,7 @@ public:
render::ItemKey _key;
glm::vec3 _origin { 0.0f };
Transform _prevRenderTransform;
bool _isVisibleInSecondaryCamera { DEFAULT_PARABOLA_ISVISIBLEINSECONDARYCAMERA };
bool _drawInFront { DEFAULT_PARABOLA_DRAWINFRONT };
bool _visible { false };

View file

@ -19,14 +19,14 @@ STATIC_SCRIPT_TYPES_INITIALIZER((+[](ScriptManager* manager){
auto scriptEngine = manager->engine().get();
scriptRegisterMetaType<RenderScriptingInterface::RenderMethod, scriptValueFromEnumClass<RenderScriptingInterface::RenderMethod>, scriptValueToEnumClass<RenderScriptingInterface::RenderMethod> >(scriptEngine, "RenderMethod");
scriptRegisterMetaType<AntialiasingConfig::Mode, scriptValueFromEnumClass<AntialiasingConfig::Mode>, scriptValueToEnumClass<AntialiasingConfig::Mode> >(scriptEngine, "Mode");
scriptRegisterMetaType<AntialiasingSetupConfig::Mode, scriptValueFromEnumClass<AntialiasingSetupConfig::Mode>, scriptValueToEnumClass<AntialiasingSetupConfig::Mode> >(scriptEngine, "Mode");
}));
STATIC_SCRIPT_INITIALIZER(+[](ScriptManager* manager){
auto scriptEngine = manager->engine().get();
scriptEngine->registerEnum("Render.RenderMethod",QMetaEnum::fromType<RenderScriptingInterface::RenderMethod>());
scriptEngine->registerEnum("AntialiasingMode",QMetaEnum::fromType<AntialiasingConfig::Mode>());
scriptEngine->registerEnum("AntialiasingMode",QMetaEnum::fromType<AntialiasingSetupConfig::Mode>());
});
RenderScriptingInterface* RenderScriptingInterface::getInstance() {
@ -49,7 +49,7 @@ void RenderScriptingInterface::loadSettings() {
_shadowsEnabled = (_shadowsEnabledSetting.get());
_ambientOcclusionEnabled = (_ambientOcclusionEnabledSetting.get());
//_antialiasingMode = (_antialiasingModeSetting.get());
_antialiasingMode = static_cast<AntialiasingConfig::Mode>(_antialiasingModeSetting.get());
_antialiasingMode = static_cast<AntialiasingSetupConfig::Mode>(_antialiasingModeSetting.get());
_viewportResolutionScale = (_viewportResolutionScaleSetting.get());
_fullScreenScreen = (_fullScreenScreenSetting.get());
});
@ -84,10 +84,16 @@ void RenderScriptingInterface::forceRenderMethod(RenderMethod renderMethod) {
_renderMethod = (int)renderMethod;
_renderMethodSetting.set((int)renderMethod);
auto config = dynamic_cast<render::SwitchConfig*>(qApp->getRenderEngine()->getConfiguration()->getConfig("RenderMainView.DeferredForwardSwitch"));
auto renderConfig = qApp->getRenderEngine()->getConfiguration();
auto config = dynamic_cast<render::SwitchConfig*>(renderConfig->getConfig("RenderMainView.DeferredForwardSwitch"));
if (config) {
config->setBranch((int)renderMethod);
}
auto secondaryConfig = dynamic_cast<render::SwitchConfig*>(renderConfig->getConfig("RenderSecondView.DeferredForwardSwitch"));
if (secondaryConfig) {
secondaryConfig->setBranch((int)renderMethod);
}
});
}
@ -111,17 +117,16 @@ void RenderScriptingInterface::forceShadowsEnabled(bool enabled) {
_renderSettingLock.withWriteLock([&] {
_shadowsEnabled = (enabled);
_shadowsEnabledSetting.set(enabled);
Menu::getInstance()->setIsOptionChecked(MenuOption::Shadows, enabled);
auto renderConfig = qApp->getRenderEngine()->getConfiguration();
assert(renderConfig);
auto lightingModelConfig = renderConfig->getConfig<MakeLightingModel>("RenderMainView.LightingModel");
if (lightingModelConfig) {
Menu::getInstance()->setIsOptionChecked(MenuOption::Shadows, enabled);
lightingModelConfig->setShadow(enabled);
}
auto secondaryLightingModelConfig = renderConfig->getConfig<MakeLightingModel>("RenderSecondView.LightingModel");
if (secondaryLightingModelConfig) {
Menu::getInstance()->setIsOptionChecked(MenuOption::Shadows, enabled);
secondaryLightingModelConfig->setShadow(enabled);
}
});
@ -142,63 +147,70 @@ void RenderScriptingInterface::forceAmbientOcclusionEnabled(bool enabled) {
_renderSettingLock.withWriteLock([&] {
_ambientOcclusionEnabled = (enabled);
_ambientOcclusionEnabledSetting.set(enabled);
Menu::getInstance()->setIsOptionChecked(MenuOption::AmbientOcclusion, enabled);
auto lightingModelConfig = qApp->getRenderEngine()->getConfiguration()->getConfig<MakeLightingModel>("RenderMainView.LightingModel");
auto renderConfig = qApp->getRenderEngine()->getConfiguration();
auto lightingModelConfig = renderConfig->getConfig<MakeLightingModel>("RenderMainView.LightingModel");
if (lightingModelConfig) {
Menu::getInstance()->setIsOptionChecked(MenuOption::AmbientOcclusion, enabled);
lightingModelConfig->setAmbientOcclusion(enabled);
}
auto secondaryLightingModelConfig = renderConfig->getConfig<MakeLightingModel>("RenderSecondView.LightingModel");
if (secondaryLightingModelConfig) {
secondaryLightingModelConfig->setAmbientOcclusion(enabled);
}
});
}
AntialiasingConfig::Mode RenderScriptingInterface::getAntialiasingMode() const {
AntialiasingSetupConfig::Mode RenderScriptingInterface::getAntialiasingMode() const {
return _antialiasingMode;
}
void RenderScriptingInterface::setAntialiasingMode(AntialiasingConfig::Mode mode) {
void RenderScriptingInterface::setAntialiasingMode(AntialiasingSetupConfig::Mode mode) {
if (_antialiasingMode != mode) {
forceAntialiasingMode(mode);
emit settingsChanged();
}
}
void setAntialiasingModeForView(AntialiasingConfig::Mode mode, JitterSampleConfig *jitterCamConfig, AntialiasingConfig *antialiasingConfig) {
void setAntialiasingModeForView(AntialiasingSetupConfig::Mode mode, AntialiasingSetupConfig *antialiasingSetupConfig, AntialiasingConfig *antialiasingConfig) {
switch (mode) {
case AntialiasingConfig::Mode::NONE:
jitterCamConfig->none();
case AntialiasingSetupConfig::Mode::NONE:
antialiasingSetupConfig->none();
antialiasingConfig->blend = 1;
antialiasingConfig->setDebugFXAA(false);
break;
case AntialiasingConfig::Mode::TAA:
jitterCamConfig->play();
case AntialiasingSetupConfig::Mode::TAA:
antialiasingSetupConfig->play();
antialiasingConfig->blend = 0.25;
antialiasingConfig->setDebugFXAA(false);
break;
case AntialiasingConfig::Mode::FXAA:
jitterCamConfig->none();
case AntialiasingSetupConfig::Mode::FXAA:
antialiasingSetupConfig->none();
antialiasingConfig->blend = 0.25;
antialiasingConfig->setDebugFXAA(true);
break;
default:
jitterCamConfig->none();
antialiasingSetupConfig->none();
antialiasingConfig->blend = 1;
antialiasingConfig->setDebugFXAA(false);
break;
}
}
void RenderScriptingInterface::forceAntialiasingMode(AntialiasingConfig::Mode mode) {
void RenderScriptingInterface::forceAntialiasingMode(AntialiasingSetupConfig::Mode mode) {
_renderSettingLock.withWriteLock([&] {
_antialiasingMode = mode;
auto mainViewJitterCamConfig = qApp->getRenderEngine()->getConfiguration()->getConfig<JitterSample>("RenderMainView.JitterCam");
auto mainViewAntialiasingConfig = qApp->getRenderEngine()->getConfiguration()->getConfig<Antialiasing>("RenderMainView.Antialiasing");
auto secondViewJitterCamConfig = qApp->getRenderEngine()->getConfiguration()->getConfig<JitterSample>("RenderSecondView.JitterCam");
auto secondViewAntialiasingConfig = qApp->getRenderEngine()->getConfiguration()->getConfig<Antialiasing>("RenderSecondView.Antialiasing");
if (mode != AntialiasingConfig::Mode::NONE
&& mode != AntialiasingConfig::Mode::TAA
&& mode != AntialiasingConfig::Mode::FXAA) {
_antialiasingMode = AntialiasingConfig::Mode::NONE;
auto renderConfig = qApp->getRenderEngine()->getConfiguration();
auto mainViewJitterCamConfig = renderConfig->getConfig<AntialiasingSetup>("RenderMainView.JitterCam");
auto mainViewAntialiasingConfig = renderConfig->getConfig<Antialiasing>("RenderMainView.Antialiasing");
auto secondViewJitterCamConfig = renderConfig->getConfig<AntialiasingSetup>("RenderSecondView.JitterCam");
auto secondViewAntialiasingConfig = renderConfig->getConfig<Antialiasing>("RenderSecondView.Antialiasing");
if (mode != AntialiasingSetupConfig::Mode::NONE
&& mode != AntialiasingSetupConfig::Mode::TAA
&& mode != AntialiasingSetupConfig::Mode::FXAA) {
_antialiasingMode = AntialiasingSetupConfig::Mode::NONE;
}
if (mainViewJitterCamConfig && mainViewAntialiasingConfig) {
setAntialiasingModeForView( mode, mainViewJitterCamConfig, mainViewAntialiasingConfig);
@ -271,7 +283,7 @@ void RenderScriptingInterface::forceViewportResolutionScale(float scale) {
return;
}
_renderSettingLock.withWriteLock([&] {
_viewportResolutionScale = (scale);
_viewportResolutionScale = scale;
_viewportResolutionScaleSetting.set(scale);
auto renderConfig = qApp->getRenderEngine()->getConfiguration();
@ -279,12 +291,23 @@ void RenderScriptingInterface::forceViewportResolutionScale(float scale) {
auto deferredView = renderConfig->getConfig("RenderMainView.RenderDeferredTask");
// mainView can be null if we're rendering in forward mode
if (deferredView) {
deferredView->setProperty("resolutionScale", _viewportResolutionScale);
deferredView->setProperty("resolutionScale", scale);
}
auto forwardView = renderConfig->getConfig("RenderMainView.RenderForwardTask");
// mainView can be null if we're rendering in forward mode
if (forwardView) {
forwardView->setProperty("resolutionScale", _viewportResolutionScale);
forwardView->setProperty("resolutionScale", scale);
}
auto deferredSecondView = renderConfig->getConfig("RenderSecondView.RenderDeferredTask");
// mainView can be null if we're rendering in forward mode
if (deferredSecondView) {
deferredSecondView->setProperty("resolutionScale", scale);
}
auto forwardSecondView = renderConfig->getConfig("RenderMainView.RenderForwardTask");
// mainView can be null if we're rendering in forward mode
if (forwardSecondView) {
forwardSecondView->setProperty("resolutionScale", scale);
}
});
}

View file

@ -39,7 +39,7 @@ class RenderScriptingInterface : public QObject {
Q_PROPERTY(RenderMethod renderMethod READ getRenderMethod WRITE setRenderMethod NOTIFY settingsChanged)
Q_PROPERTY(bool shadowsEnabled READ getShadowsEnabled WRITE setShadowsEnabled NOTIFY settingsChanged)
Q_PROPERTY(bool ambientOcclusionEnabled READ getAmbientOcclusionEnabled WRITE setAmbientOcclusionEnabled NOTIFY settingsChanged)
Q_PROPERTY(AntialiasingConfig::Mode antialiasingMode READ getAntialiasingMode WRITE setAntialiasingMode NOTIFY settingsChanged)
Q_PROPERTY(AntialiasingSetupConfig::Mode antialiasingMode READ getAntialiasingMode WRITE setAntialiasingMode NOTIFY settingsChanged)
Q_PROPERTY(float viewportResolutionScale READ getViewportResolutionScale WRITE setViewportResolutionScale NOTIFY settingsChanged)
Q_PROPERTY(float verticalFieldOfView READ getVerticalFieldOfView WRITE setVerticalFieldOfView NOTIFY settingsChanged)
@ -153,14 +153,14 @@ public slots:
* @function Render.getAntialiasingMode
* @returns {AntialiasingMode} The active anti-aliasing mode.
*/
AntialiasingConfig::Mode getAntialiasingMode() const;
AntialiasingSetupConfig::Mode getAntialiasingMode() const;
/*@jsdoc
* Sets the active anti-aliasing mode.
* @function Render.setAntialiasingMode
* @param {AntialiasingMode} The active anti-aliasing mode.
*/
void setAntialiasingMode(AntialiasingConfig::Mode mode);
void setAntialiasingMode(AntialiasingSetupConfig::Mode mode);
/*@jsdoc
* Gets the view port resolution scale.
@ -236,7 +236,7 @@ private:
int _renderMethod{ RENDER_FORWARD ? render::Args::RenderMethod::FORWARD : render::Args::RenderMethod::DEFERRED };
bool _shadowsEnabled{ true };
bool _ambientOcclusionEnabled{ false };
AntialiasingConfig::Mode _antialiasingMode{ AntialiasingConfig::Mode::NONE };
AntialiasingSetupConfig::Mode _antialiasingMode{ AntialiasingSetupConfig::Mode::NONE };
float _viewportResolutionScale{ 1.0f };
QString _fullScreenScreen;
@ -246,7 +246,7 @@ private:
Setting::Handle<bool> _shadowsEnabledSetting { "shadowsEnabled", true };
Setting::Handle<bool> _ambientOcclusionEnabledSetting { "ambientOcclusionEnabled", false };
//Setting::Handle<AntialiasingConfig::Mode> _antialiasingModeSetting { "antialiasingMode", AntialiasingConfig::Mode::TAA };
Setting::Handle<int> _antialiasingModeSetting { "antialiasingMode", AntialiasingConfig::Mode::NONE };
Setting::Handle<int> _antialiasingModeSetting { "antialiasingMode", AntialiasingSetupConfig::Mode::NONE };
Setting::Handle<float> _viewportResolutionScaleSetting { "viewportResolutionScale", 1.0f };
Setting::Handle<QString> _fullScreenScreenSetting { "fullScreenScreen", "" };
@ -254,7 +254,7 @@ private:
void forceRenderMethod(RenderMethod renderMethod);
void forceShadowsEnabled(bool enabled);
void forceAmbientOcclusionEnabled(bool enabled);
void forceAntialiasingMode(AntialiasingConfig::Mode mode);
void forceAntialiasingMode(AntialiasingSetupConfig::Mode mode);
void forceViewportResolutionScale(float scale);
static std::once_flag registry_flag;

View file

@ -357,7 +357,7 @@ void OpenGLDisplayPlugin::customizeContext() {
auto presentThread = DependencyManager::get<PresentThread>();
Q_ASSERT(thread() == presentThread->thread());
getGLBackend()->setCameraCorrection(mat4(), mat4(), true);
getGLBackend()->updatePresentFrame(mat4());
for (auto& cursorValue : _cursorsData) {
auto& cursorData = cursorValue.second;
@ -701,8 +701,7 @@ void OpenGLDisplayPlugin::present(const std::shared_ptr<RefreshRateController>&
if (_currentFrame) {
auto correction = getViewCorrection();
getGLBackend()->setCameraCorrection(correction, _prevRenderView);
_prevRenderView = correction * _currentFrame->view;
getGLBackend()->updatePresentFrame(correction);
{
withPresentThreadLock([&] {
_renderRate.increment();

View file

@ -150,7 +150,6 @@ protected:
gpu::FramePointer _currentFrame;
gpu::Frame* _lastFrame{ nullptr };
mat4 _prevRenderView;
gpu::FramebufferPointer _compositeFramebuffer;
gpu::PipelinePointer _hudPipeline;
gpu::PipelinePointer _mirrorHUDPipeline;

View file

@ -157,6 +157,7 @@ protected:
bool _cauterized { false };
bool _moving { false };
Transform _renderTransform;
Transform _prevRenderTransform; // each subclass is responsible for updating this after they render because they all handle transforms differently
MaterialMap _materials;
mutable std::mutex _materialsLock;

View file

@ -1,6 +1,7 @@
//
// Created by Sam Gondelman on 1/22/19
// Copyright 2019 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -265,7 +266,10 @@ void GizmoEntityRenderer::doRender(RenderArgs* args) {
transform.setRotation(BillboardModeHelpers::getBillboardRotation(transform.getTranslation(), transform.getRotation(), _billboardMode,
args->_renderMode == RenderArgs::RenderMode::SHADOW_RENDER_MODE ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition(), true));
batch.setModelTransform(transform);
batch.setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
Pipeline pipelineType = getPipelineType(materials);
if (pipelineType == Pipeline::PROCEDURAL) {

View file

@ -105,7 +105,10 @@ void GridEntityRenderer::doRender(RenderArgs* args) {
}
transform.setRotation(BillboardModeHelpers::getBillboardRotation(transform.getTranslation(), transform.getRotation(), _billboardMode,
args->_renderMode == RenderArgs::RenderMode::SHADOW_RENDER_MODE ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition()));
batch->setModelTransform(transform);
batch->setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
auto minCorner = glm::vec2(-0.5f, -0.5f);
auto maxCorner = glm::vec2(0.5f, 0.5f);
@ -120,4 +123,4 @@ void GridEntityRenderer::doRender(RenderArgs* args) {
minorGridRowDivisions, minorGridColDivisions, MINOR_GRID_EDGE,
majorGridRowDivisions, majorGridColDivisions, MAJOR_GRID_EDGE,
color, forward, _geometryId);
}
}

View file

@ -188,7 +188,10 @@ void ImageEntityRenderer::doRender(RenderArgs* args) {
}
transform.setScale(scale);
}
batch->setModelTransform(transform);
batch->setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
Pipeline pipelineType = getPipelineType(materials);
if (pipelineType == Pipeline::PROCEDURAL) {

View file

@ -4,6 +4,7 @@
//
// Created by Seth Alves on 5/11/15.
// Copyright 2015 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -44,12 +45,17 @@ void LineEntityRenderer::doRender(RenderArgs* args) {
PerformanceTimer perfTimer("RenderableLineEntityItem::render");
Q_ASSERT(args->_batch);
gpu::Batch& batch = *args->_batch;
const auto& modelTransform = getModelTransform();
Transform transform = Transform();
Transform transform;
transform.setTranslation(modelTransform.getTranslation());
transform.setRotation(BillboardModeHelpers::getBillboardRotation(modelTransform.getTranslation(), modelTransform.getRotation(), _billboardMode,
args->_renderMode == RenderArgs::RenderMode::SHADOW_RENDER_MODE ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition()));
batch.setModelTransform(transform);
batch.setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
if (_linePoints.size() > 1) {
DependencyManager::get<GeometryCache>()->bindSimpleProgram(batch, false, false, false, false, true,
_renderLayer != RenderLayer::WORLD || args->_renderMethod == Args::RenderMethod::FORWARD);

View file

@ -327,7 +327,10 @@ void MaterialEntityRenderer::doRender(RenderArgs* args) {
transform.setRotation(BillboardModeHelpers::getBillboardRotation(transform.getTranslation(), transform.getRotation(), _billboardMode,
args->_renderMode == RenderArgs::RenderMode::SHADOW_RENDER_MODE ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition()));
batch.setModelTransform(transform);
batch.setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
if (!proceduralRender) {
drawMaterial->setTextureTransforms(textureTransform, MaterialMappingMode::UV, true);

View file

@ -4,6 +4,7 @@
//
// Created by Brad Hefta-Gaub on 8/6/14.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -1474,7 +1475,11 @@ void ModelEntityRenderer::doRender(RenderArgs* args) {
// If the model doesn't have visual geometry, render our bounding box as green wireframe
static glm::vec4 greenColor(0.0f, 1.0f, 0.0f, 1.0f);
gpu::Batch& batch = *args->_batch;
batch.setModelTransform(getModelTransform()); // we want to include the scale as well
Transform transform = getModelTransform();
batch.setModelTransform(transform, _prevRenderTransform); // we want to include the scale as well
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
auto geometryCache = DependencyManager::get<GeometryCache>();
geometryCache->renderWireCubeInstance(args, batch, greenColor, geometryCache->getShapePipelinePointer(false, false, args->_renderMethod == Args::RenderMethod::FORWARD));

View file

@ -3,6 +3,7 @@
// interface/src
//
// Created by Jason Rickwald on 3/2/15.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -31,7 +32,7 @@ static ShapePipelinePointer shapePipelineFactory(const ShapePlumber& plumber, co
state->setDepthTest(true, false, gpu::LESS_EQUAL);
state->setBlendFunction(true, gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ONE,
gpu::State::FACTOR_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ONE);
PrepareStencil::testMask(*state);
PrepareStencil::testMaskResetNoAA(*state);
auto program = gpu::Shader::createProgram(shader::entities_renderer::program::textured_particle);
_texturedPipeline = texturedPipeline = gpu::Pipeline::create(program, state);
@ -455,7 +456,7 @@ void ParticleEffectEntityRenderer::doRender(RenderArgs* args) {
color.finish = EntityRenderer::calculatePulseColor(_particleProperties.getColorFinish(), _pulseProperties, _created);
color.spread = EntityRenderer::calculatePulseColor(_particleProperties.getColorSpread(), _pulseProperties, _created);
batch.setModelTransform(transform);
batch.setModelTransform(transform); // particles are currently always transparent so we don't worry about TAA right now
batch.setUniformBuffer(0, _uniformBuffer);
batch.setInputFormat(_vertexFormat);
@ -589,4 +590,4 @@ void ParticleEffectEntityRenderer::computeTriangles(const hfm::Model& hfmModel)
glm::vec3 scale = bounds.getScale();
_triangleInfo.transform = glm::scale(1.0f / scale) * glm::translate(-bounds.calcCenter());
}
}

View file

@ -4,6 +4,7 @@
//
// Created by Eric Levin on 8/10/15
// Copyright 2015 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -330,6 +331,10 @@ void PolyLineEntityRenderer::doRender(RenderArgs* args) {
batch.setModelTransform(transform);
batch.setPipeline(_pipelines[{args->_renderMethod, isTransparent()}]);
batch.setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
batch.setResourceTexture(0, texture);
batch.draw(gpu::TRIANGLE_STRIP, (gpu::uint32)(2 * _numVertices), 0);
}

View file

@ -4,6 +4,7 @@
//
// Created by Seth Alves on 5/19/15.
// Copyright 2015 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
// Copyright 2022-2023 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
@ -1863,7 +1864,10 @@ void PolyVoxEntityRenderer::doRender(RenderArgs* args) {
glm::mat4 rotation = glm::mat4_cast(BillboardModeHelpers::getBillboardRotation(_position, _orientation, _billboardMode,
args->_renderMode == RenderArgs::RenderMode::SHADOW_RENDER_MODE ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition()));
Transform transform(glm::translate(_position) * rotation * _lastVoxelToLocalMatrix);
batch.setModelTransform(transform);
batch.setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
batch.setInputFormat(_vertexFormat);
batch.setInputBuffer(gpu::Stream::POSITION, _mesh->getVertexBuffer()._buffer, 0,

View file

@ -1,6 +1,7 @@
//
// Created by Bradley Austin Davis on 2016/05/09
// Copyright 2013 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -111,8 +112,13 @@ void ShapeEntityRenderer::doRender(RenderArgs* args) {
auto geometryCache = DependencyManager::get<GeometryCache>();
GeometryCache::Shape geometryShape = geometryCache->getShapeForEntityShape(_shape);
Transform transform;
Transform prevTransform;
withReadLock([&] {
transform = _renderTransform;
prevTransform = _prevRenderTransform;
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = _renderTransform;
}
});
bool wireframe = render::ShapeKey(args->_globalShapeKey).isWireframe() || _primitiveMode == PrimitiveMode::LINES;
@ -120,7 +126,7 @@ void ShapeEntityRenderer::doRender(RenderArgs* args) {
transform.setRotation(BillboardModeHelpers::getBillboardRotation(transform.getTranslation(), transform.getRotation(), _billboardMode,
args->_renderMode == RenderArgs::RenderMode::SHADOW_RENDER_MODE ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition(),
_shape < entity::Shape::Cube || _shape > entity::Shape::Icosahedron));
batch.setModelTransform(transform);
batch.setModelTransform(transform, prevTransform);
Pipeline pipelineType = getPipelineType(materials);
if (pipelineType == Pipeline::PROCEDURAL) {

View file

@ -4,6 +4,8 @@
//
// Created by Brad Hefta-Gaub on 8/6/14.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
// Copyright 2023 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -158,14 +160,19 @@ void TextEntityRenderer::doRender(RenderArgs* args) {
bool transparent;
Transform transform;
Transform prevTransform;
withReadLock([&] {
transparent = isTransparent();
transform = _renderTransform;
prevTransform = _prevRenderTransform;
transform.setRotation(BillboardModeHelpers::getBillboardRotation(transform.getTranslation(), transform.getRotation(), _billboardMode,
args->_renderMode == RenderArgs::RenderMode::SHADOW_RENDER_MODE ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition()));
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
});
transform.setRotation(BillboardModeHelpers::getBillboardRotation(transform.getTranslation(), transform.getRotation(), _billboardMode,
args->_renderMode == RenderArgs::RenderMode::SHADOW_RENDER_MODE ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition()));
batch.setModelTransform(transform);
batch.setModelTransform(transform, prevTransform);
Pipeline pipelineType = getPipelineType(materials);
if (pipelineType == Pipeline::PROCEDURAL) {
@ -357,7 +364,10 @@ void entities::TextPayload::render(RenderArgs* args) {
float scale = textRenderable->_lineHeight / textRenderer->getFontSize();
transform.postTranslate(glm::vec3(-0.5, 0.5, 1.0f + EPSILON / dimensions.z));
transform.setScale(scale);
batch.setModelTransform(transform);
batch.setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
glm::vec2 bounds = glm::vec2(dimensions.x - (textRenderable->_leftMargin + textRenderable->_rightMargin), dimensions.y - (textRenderable->_topMargin + textRenderable->_bottomMargin));
textRenderer->draw(batch, textRenderable->_leftMargin / scale, -textRenderable->_topMargin / scale, bounds / scale, scale,

View file

@ -4,6 +4,7 @@
//
// Created by Brad Hefta-Gaub on 8/6/14.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
// Copyright 2023 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
@ -105,6 +106,7 @@ public:
protected:
QUuid _entityID;
std::weak_ptr<TextRenderer3D> _textRenderer;
Transform _prevRenderTransform;
int _geometryID { 0 };
};

View file

@ -322,13 +322,16 @@ void WebEntityRenderer::doRender(RenderArgs* args) {
transform.setRotation(BillboardModeHelpers::getBillboardRotation(transform.getTranslation(), transform.getRotation(), _billboardMode,
args->_renderMode == RenderArgs::RenderMode::SHADOW_RENDER_MODE ? BillboardModeHelpers::getPrimaryViewFrustumPosition() : args->getViewFrustum().getPosition()));
batch.setModelTransform(transform);
batch.setModelTransform(transform, _prevRenderTransform);
if (args->_renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || args->_renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_prevRenderTransform = transform;
}
// Turn off jitter for these entities
batch.pushProjectionJitter();
batch.pushProjectionJitterEnabled(false);
DependencyManager::get<GeometryCache>()->bindWebBrowserProgram(batch, transparent, forward);
DependencyManager::get<GeometryCache>()->renderQuad(batch, topLeft, bottomRight, texMin, texMax, color, _geometryId);
batch.popProjectionJitter();
batch.popProjectionJitterEnabled();
batch.setResourceTexture(0, nullptr);
}

View file

@ -5,6 +5,7 @@
//
// Created by Eric Levin on 8/10/2015
// Copyright 2015 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -23,12 +24,15 @@
LAYOUT(binding=0) uniform sampler2D _texture;
<@include render-utils/ShaderConstants.h@>
<@if not HIFI_USE_FORWARD@>
layout(location=0) in vec3 _normalWS;
layout(location=RENDER_UTILS_ATTR_NORMAL_WS) in vec3 _normalWS;
layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) in vec4 _prevPositionCS;
<@endif@>
layout(location=1) in vec2 _texCoord;
layout(location=2) in vec4 _color;
layout(location=3) in float _distanceFromCenter;
layout(location=RENDER_UTILS_ATTR_TEXCOORD01) in vec2 _texCoord;
layout(location=RENDER_UTILS_ATTR_COLOR) in vec4 _color;
layout(location=2) in float _distanceFromCenter;
void main(void) {
vec4 texel = texture(_texture, _texCoord);
@ -37,9 +41,9 @@ void main(void) {
<@if not HIFI_USE_FORWARD@>
<@if HIFI_USE_TRANSLUCENT@>
packDeferredFragmentTranslucent(evalFrontOrBackFaceNormal(_normalWS), texel.a, texel.rgb, DEFAULT_ROUGHNESS);
packDeferredFragmentTranslucentUnlit(_prevPositionCS, evalFrontOrBackFaceNormal(_normalWS), texel.a, texel.rgb);
<@else@>
packDeferredFragmentUnlit(evalFrontOrBackFaceNormal(_normalWS), texel.a, texel.rgb);
packDeferredFragmentUnlit(_prevPositionCS, evalFrontOrBackFaceNormal(_normalWS), texel.a, texel.rgb);
<@endif@>
<@else@>
_fragColor0 = texel;

View file

@ -5,6 +5,7 @@
//
// Created by Eric Levin on 7/20/15.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -12,18 +13,22 @@
<@include gpu/Inputs.slh@>
<@include gpu/Color.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
<@include paintStroke.slh@>
<$declarePolyLineBuffers()$>
<@include render-utils/ShaderConstants.h@>
<@if not HIFI_USE_FORWARD@>
layout(location=0) out vec3 _normalWS;
layout(location=RENDER_UTILS_ATTR_NORMAL_WS) out vec3 _normalWS;
layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) out vec4 _prevPositionCS;
<@endif@>
layout(location=1) out vec2 _texCoord;
layout(location=2) out vec4 _color;
layout(location=3) out float _distanceFromCenter;
layout(location=RENDER_UTILS_ATTR_TEXCOORD01) out vec2 _texCoord;
layout(location=RENDER_UTILS_ATTR_COLOR) out vec4 _color;
layout(location=2) out float _distanceFromCenter;
void main(void) {
PolylineVertex vertex = getPolylineVertex(gl_VertexID / 2);
@ -54,14 +59,17 @@ void main(void) {
posEye.z += _distanceFromCenter * vertex.binormalAndHalfWidth.w * binormalEye.z;
<$transformEyeToClipPos(cam, posEye, gl_Position)$>
<@if not HIFI_USE_FORWARD@>
<$transformEyeToPrevClipPos(cam, posEye, _prevPositionCS)$>
<$transformEyeToWorldDir(cam, normalEye, _normalWS)$>
<@endif@>
} else {
vec3 normal = vertex.normal.xyz;
position.xyz += _distanceFromCenter * vertex.binormalAndHalfWidth.w * binormal;
<@if HIFI_USE_FORWARD@>
<$transformModelToClipPos(cam, obj, position, gl_Position)$>
<@if not HIFI_USE_FORWARD@>
<@else@>
<$transformModelToClipPosAndPrevClipPos(cam, obj, position, gl_Position, _prevPositionCS)$>
<$transformModelToWorldDir(cam, obj, normal, _normalWS)$>
<@endif@>
}
}
}

View file

@ -36,6 +36,8 @@
<@if HIFI_USE_FORWARD@>
layout(location=RENDER_UTILS_ATTR_POSITION_ES) in vec4 _positionES;
<@else@>
layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) in vec4 _prevPositionCS;
<@endif@>
layout(location=RENDER_UTILS_ATTR_POSITION_MS) in vec3 _positionMS;
layout(location=RENDER_UTILS_ATTR_NORMAL_WS) in vec3 _normalWS;
@ -88,6 +90,7 @@ void main(void) {
<@if not HIFI_USE_FORWARD@>
packDeferredFragment(
_prevPositionCS,
normalize(_normalWS),
1.0,
diffuse,

View file

@ -23,6 +23,7 @@
layout(location=RENDER_UTILS_ATTR_POSITION_ES) out vec4 _positionES;
<@endif@>
layout(location=RENDER_UTILS_ATTR_POSITION_MS) out vec3 _positionMS;
layout(location=RENDER_UTILS_ATTR_PREV_POSITION_CS) out vec4 _prevPositionCS;
layout(location=RENDER_UTILS_ATTR_NORMAL_WS) out vec3 _normalWS;
<@endif@>
@ -34,7 +35,7 @@ void main(void) {
<$transformModelToClipPos(cam, obj, inPosition, gl_Position)$>
<@else@>
<@if not HIFI_USE_FORWARD@>
<$transformModelToClipPos(cam, obj, inPosition, gl_Position)$>
<$transformModelToClipPosAndPrevClipPos(cam, obj, inPosition, gl_Position, _prevPositionCS)$>
<@else@>
<$transformModelToEyeAndClipPos(cam, obj, inPosition, _positionES, gl_Position)$>
<@endif@>

View file

@ -5,6 +5,7 @@
// texture_particle.vert
//
// Copyright 2015 High Fidelity, Inc.
// Copyright 2023 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 10/27/2014.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -50,10 +51,16 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
(&::gpu::gl::GLBackend::do_setModelTransform),
(&::gpu::gl::GLBackend::do_setViewTransform),
(&::gpu::gl::GLBackend::do_setProjectionTransform),
(&::gpu::gl::GLBackend::do_setProjectionJitter),
(&::gpu::gl::GLBackend::do_setProjectionJitterEnabled),
(&::gpu::gl::GLBackend::do_setProjectionJitterSequence),
(&::gpu::gl::GLBackend::do_setProjectionJitterScale),
(&::gpu::gl::GLBackend::do_setViewportTransform),
(&::gpu::gl::GLBackend::do_setDepthRangeTransform),
(&::gpu::gl::GLBackend::do_saveViewProjectionTransform),
(&::gpu::gl::GLBackend::do_setSavedViewProjectionTransform),
(&::gpu::gl::GLBackend::do_copySavedViewProjectionTransformToBuffer),
(&::gpu::gl::GLBackend::do_setPipeline),
(&::gpu::gl::GLBackend::do_setStateBlendFactor),
(&::gpu::gl::GLBackend::do_setStateScissorRect),
@ -269,12 +276,10 @@ bool GLBackend::availableMemoryKnown() {
}
GLBackend::GLBackend(bool syncCache) {
_pipeline._cameraCorrectionBuffer._buffer->flush();
initShaderBinaryCache();
}
GLBackend::GLBackend() {
_pipeline._cameraCorrectionBuffer._buffer->flush();
initShaderBinaryCache();
}
@ -320,19 +325,7 @@ void GLBackend::renderPassTransfer(const Batch& batch) {
case Batch::COMMAND_drawIndexedInstanced:
case Batch::COMMAND_multiDrawIndirect:
case Batch::COMMAND_multiDrawIndexedIndirect:
{
Vec2u outputSize{ 1,1 };
auto framebuffer = acquire(_output._framebuffer);
if (framebuffer) {
outputSize.x = framebuffer->getWidth();
outputSize.y = framebuffer->getHeight();
} else if (glm::dot(_transform._projectionJitter, _transform._projectionJitter)>0.0f) {
qCWarning(gpugllogging) << "Jittering needs to have a frame buffer to be set";
}
_transform.preUpdate(_commandIndex, _stereo, outputSize);
}
preUpdateTransform();
break;
case Batch::COMMAND_disableContextStereo:
@ -343,11 +336,20 @@ void GLBackend::renderPassTransfer(const Batch& batch) {
_stereo._contextDisable = false;
break;
case Batch::COMMAND_copySavedViewProjectionTransformToBuffer:
// We need to store this transform state in the transform buffer
preUpdateTransform();
break;
case Batch::COMMAND_setFramebuffer:
case Batch::COMMAND_setViewportTransform:
case Batch::COMMAND_setViewTransform:
case Batch::COMMAND_setProjectionTransform:
case Batch::COMMAND_setProjectionJitter:
case Batch::COMMAND_setProjectionJitterEnabled:
case Batch::COMMAND_setProjectionJitterSequence:
case Batch::COMMAND_setProjectionJitterScale:
case Batch::COMMAND_saveViewProjectionTransform:
case Batch::COMMAND_setSavedViewProjectionTransform:
{
CommandCall call = _commandCalls[(*command)];
(this->*(call))(batch, *offset);
@ -385,6 +387,9 @@ void GLBackend::renderPassDraw(const Batch& batch) {
case Batch::COMMAND_setModelTransform:
case Batch::COMMAND_setViewTransform:
case Batch::COMMAND_setProjectionTransform:
case Batch::COMMAND_saveViewProjectionTransform:
case Batch::COMMAND_setSavedViewProjectionTransform:
case Batch::COMMAND_setProjectionJitterSequence:
break;
case Batch::COMMAND_draw:
@ -410,7 +415,6 @@ void GLBackend::renderPassDraw(const Batch& batch) {
//case Batch::COMMAND_setModelTransform:
//case Batch::COMMAND_setViewTransform:
//case Batch::COMMAND_setProjectionTransform:
case Batch::COMMAND_setProjectionJitter:
case Batch::COMMAND_setViewportTransform:
case Batch::COMMAND_setDepthRangeTransform:
{
@ -554,7 +558,7 @@ void GLBackend::render(const Batch& batch) {
_stereo._enable = false;
}
// Reset jitter
_transform._projectionJitter = Vec2(0.0f, 0.0f);
_transform._projectionJitter._isEnabled = false;
{
GL_PROFILE_RANGE(render_gpu_gl_detail, "Transfer");
@ -578,6 +582,14 @@ void GLBackend::render(const Batch& batch) {
// Restore the saved stereo state for the next batch
_stereo._enable = savedStereo;
if (batch._mustUpdatePreviousModels) {
// Update object transform history for when the batch will be reexecuted
for (auto& objectTransform : batch._objects) {
objectTransform._previousModel = objectTransform._model;
}
batch._mustUpdatePreviousModels = false;
}
}
@ -997,15 +1009,17 @@ void GLBackend::recycle() const {
_textureManagement._transferEngine->manageMemory();
}
void GLBackend::setCameraCorrection(const Mat4& correction, const Mat4& prevRenderView, bool reset) {
auto invCorrection = glm::inverse(correction);
auto invPrevView = glm::inverse(prevRenderView);
_transform._correction.prevView = (reset ? Mat4() : prevRenderView);
_transform._correction.prevViewInverse = (reset ? Mat4() : invPrevView);
_transform._correction.correction = correction;
_transform._correction.correctionInverse = invCorrection;
_pipeline._cameraCorrectionBuffer._buffer->setSubData(0, _transform._correction);
_pipeline._cameraCorrectionBuffer._buffer->flush();
void GLBackend::updatePresentFrame(const Mat4& correction) {
_transform._presentFrame.correction = correction;
_transform._presentFrame.correctionInverse = glm::inverse(correction);
_transform._projectionJitter._currentSampleIndex++;
// Update previous views of saved transforms
for (auto& viewProjState : _transform._savedTransforms) {
viewProjState._state._previousCorrectedView = viewProjState._state._correctedView;
viewProjState._state._previousProjection = viewProjState._state._projection;
}
}
void GLBackend::syncProgram(const gpu::ShaderPointer& program) {

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 10/27/2014.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -26,7 +27,7 @@
#include <gl/GLShaders.h>
#include <gpu/Forward.h>
#include <gpu/Context.h>
#include <gpu/Backend.h>
#include "GLShared.h"
@ -121,7 +122,8 @@ public:
// Shutdown rendering and persist any required resources
void shutdown() override;
void setCameraCorrection(const Mat4& correction, const Mat4& prevRenderView, bool reset = false) override;
void updatePresentFrame(const Mat4& correction = Mat4()) override;
void render(const Batch& batch) final override;
// This call synchronize the Full Backend cache with the current GLState
@ -177,10 +179,16 @@ public:
virtual void do_setModelTransform(const Batch& batch, size_t paramOffset) final;
virtual void do_setViewTransform(const Batch& batch, size_t paramOffset) final;
virtual void do_setProjectionTransform(const Batch& batch, size_t paramOffset) final;
virtual void do_setProjectionJitter(const Batch& batch, size_t paramOffset) final;
virtual void do_setProjectionJitterEnabled(const Batch& batch, size_t paramOffset) final;
virtual void do_setProjectionJitterSequence(const Batch& batch, size_t paramOffset) final;
virtual void do_setProjectionJitterScale(const Batch& batch, size_t paramOffset) final;
virtual void do_setViewportTransform(const Batch& batch, size_t paramOffset) final;
virtual void do_setDepthRangeTransform(const Batch& batch, size_t paramOffset) final;
virtual void do_saveViewProjectionTransform(const Batch& batch, size_t paramOffset) final;
virtual void do_setSavedViewProjectionTransform(const Batch& batch, size_t paramOffset) final;
virtual void do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) = 0;
// Uniform Stage
virtual void do_setUniformBuffer(const Batch& batch, size_t paramOffset) final;
@ -300,8 +308,9 @@ protected:
virtual bool supportsBindless() const { return false; }
static const size_t INVALID_OFFSET = (size_t)-1;
bool _inRenderTransferPass{ false };
int _currentDraw{ -1 };
static const uint INVALID_SAVED_CAMERA_SLOT = (uint)-1;
bool _inRenderTransferPass { false };
int _currentDraw { -1 };
struct FrameTrash {
GLsync fence = nullptr;
@ -392,11 +401,9 @@ protected:
// between the time when a was recorded and the time(s) when it is
// executed
// Prev is the previous correction used at previous frame
struct CameraCorrection {
struct PresentFrame {
mat4 correction;
mat4 correctionInverse;
mat4 prevView;
mat4 prevViewInverse;
};
struct TransformStageState {
@ -417,32 +424,61 @@ protected:
using CameraBufferElement = TransformCamera;
#endif
using TransformCameras = std::vector<CameraBufferElement>;
struct ViewProjectionState {
Transform _view;
Transform _correctedView;
Transform _previousCorrectedView;
Mat4 _projection;
Mat4 _previousProjection;
bool _viewIsCamera;
void copyExceptPrevious(const ViewProjectionState& other) {
_view = other._view;
_correctedView = other._correctedView;
_projection = other._projection;
_viewIsCamera = other._viewIsCamera;
}
};
struct SaveTransform {
ViewProjectionState _state;
size_t _cameraOffset { INVALID_OFFSET };
};
TransformCamera _camera;
TransformCameras _cameras;
std::array<SaveTransform, gpu::Batch::MAX_TRANSFORM_SAVE_SLOT_COUNT> _savedTransforms;
mutable std::map<std::string, GLvoid*> _drawCallInfoOffsets;
GLuint _objectBuffer{ 0 };
GLuint _cameraBuffer{ 0 };
GLuint _drawCallInfoBuffer{ 0 };
GLuint _objectBufferTexture{ 0 };
size_t _cameraUboSize{ 0 };
bool _viewIsCamera{ false };
bool _skybox{ false };
Transform _view;
CameraCorrection _correction;
bool _viewCorrectionEnabled{ true };
GLuint _objectBuffer { 0 };
GLuint _cameraBuffer { 0 };
GLuint _drawCallInfoBuffer { 0 };
GLuint _objectBufferTexture { 0 };
size_t _cameraUboSize { 0 };
ViewProjectionState _viewProjectionState;
uint _currentSavedTransformSlot { INVALID_SAVED_CAMERA_SLOT };
bool _skybox { false };
PresentFrame _presentFrame;
bool _viewCorrectionEnabled { true };
Mat4 _projection;
Vec4i _viewport{ 0, 0, 1, 1 };
Vec2 _depthRange{ 0.0f, 1.0f };
Vec2 _projectionJitter{ 0.0f, 0.0f };
bool _invalidView{ false };
bool _invalidProj{ false };
bool _invalidViewport{ false };
struct Jitter {
std::vector<Vec2> _offsetSequence;
Vec2 _offset { 0.0f };
float _scale { 0.f };
unsigned int _currentSampleIndex { 0 };
bool _isEnabled { false };
};
bool _enabledDrawcallInfoBuffer{ false };
Jitter _projectionJitter;
Vec4i _viewport { 0, 0, 1, 1 };
Vec2 _depthRange { 0.0f, 1.0f };
bool _invalidView { false };
bool _invalidProj { false };
bool _invalidViewport { false };
bool _enabledDrawcallInfoBuffer { false };
using Pair = std::pair<size_t, size_t>;
using List = std::list<Pair>;
@ -450,11 +486,13 @@ protected:
mutable List::const_iterator _camerasItr;
mutable size_t _currentCameraOffset{ INVALID_OFFSET };
void preUpdate(size_t commandIndex, const StereoState& stereo, Vec2u framebufferSize);
void pushCameraBufferElement(const StereoState& stereo, const StereoState& prevStereo, TransformCameras& cameras) const;
void preUpdate(size_t commandIndex, const StereoState& stereo, const StereoState& prevStereo);
void update(size_t commandIndex, const StereoState& stereo) const;
void bindCurrentCamera(int stereoSide) const;
} _transform;
void preUpdateTransform();
virtual void transferTransformState(const Batch& batch) const = 0;
struct UniformStageState {
@ -524,25 +562,16 @@ protected:
PipelineReference _pipeline{};
GLuint _program{ 0 };
bool _cameraCorrection{ false };
GLShader* _programShader{ nullptr };
bool _invalidProgram{ false };
GLShader* _programShader { nullptr };
bool _invalidProgram { false };
BufferView _cameraCorrectionBuffer{ gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(CameraCorrection), nullptr)) };
BufferView _cameraCorrectionBufferIdentity{ gpu::BufferView(
std::make_shared<gpu::Buffer>(sizeof(CameraCorrection), nullptr)) };
State::Data _stateCache { State::DEFAULT };
State::Signature _stateSignatureCache { 0 };
State::Data _stateCache{ State::DEFAULT };
State::Signature _stateSignatureCache{ 0 };
GLState* _state { nullptr };
bool _invalidState { false };
GLState* _state{ nullptr };
bool _invalidState{ false };
PipelineStageState() {
_cameraCorrectionBuffer.edit<CameraCorrection>() = CameraCorrection();
_cameraCorrectionBufferIdentity.edit<CameraCorrection>() = CameraCorrection();
_cameraCorrectionBufferIdentity._buffer->flush();
}
PipelineStageState() {}
} _pipeline;
// Backend dependent compilation of the shader

View file

@ -37,7 +37,6 @@ void GLBackend::do_setPipeline(const Batch& batch, size_t paramOffset) {
reset(_pipeline._pipeline);
_pipeline._program = 0;
_pipeline._cameraCorrection = false;
_pipeline._programShader = nullptr;
_pipeline._invalidProgram = true;
@ -63,7 +62,6 @@ void GLBackend::do_setPipeline(const Batch& batch, size_t paramOffset) {
_pipeline._program = glprogram;
_pipeline._programShader = pipelineObject->_program;
_pipeline._invalidProgram = true;
_pipeline._cameraCorrection = pipelineObject->_cameraCorrection;
}
// Now for the state
@ -79,16 +77,6 @@ void GLBackend::do_setPipeline(const Batch& batch, size_t paramOffset) {
// THis should be done on Pipeline::update...
if (_pipeline._invalidProgram) {
glUseProgram(_pipeline._program);
if (_pipeline._cameraCorrection) {
// Invalidate uniform buffer cache slot
_uniform._buffers[gpu::slot::buffer::CameraCorrection].reset();
auto& cameraCorrectionBuffer = _transform._viewCorrectionEnabled ?
_pipeline._cameraCorrectionBuffer._buffer :
_pipeline._cameraCorrectionBufferIdentity._buffer;
// Because we don't sync Buffers in the bindUniformBuffer, let s force this buffer synced
getBufferID(*cameraCorrectionBuffer);
bindUniformBuffer(gpu::slot::buffer::CameraCorrection, cameraCorrectionBuffer, 0, sizeof(CameraCorrection));
}
(void)CHECK_GL_ERROR();
_pipeline._invalidProgram = false;
}

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 3/8/2015.
// Copyright 2014 High Fidelity, Inc.
// Copyright 2023 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -18,20 +19,48 @@ void GLBackend::do_setModelTransform(const Batch& batch, size_t paramOffset) {
}
void GLBackend::do_setViewTransform(const Batch& batch, size_t paramOffset) {
_transform._view = batch._transforms.get(batch._params[paramOffset]._uint);
_transform._viewIsCamera = batch._params[paramOffset + 1]._uint != 0;
_transform._viewProjectionState._view = batch._transforms.get(batch._params[paramOffset]._uint);
// View history is only supported with saved transforms and if setViewTransform is called (and not setSavedViewProjectionTransform)
// then, in consequence, the view will NOT be corrected in the present thread. In which case
// the previousCorrectedView should be the same as the view.
_transform._viewProjectionState._previousCorrectedView = _transform._viewProjectionState._view;
_transform._viewProjectionState._previousProjection = _transform._viewProjectionState._projection;
_transform._viewProjectionState._viewIsCamera = batch._params[paramOffset + 1]._uint != 0;
_transform._invalidView = true;
// The current view / proj doesn't correspond to a saved camera slot
_transform._currentSavedTransformSlot = INVALID_SAVED_CAMERA_SLOT;
}
void GLBackend::do_setProjectionTransform(const Batch& batch, size_t paramOffset) {
memcpy(glm::value_ptr(_transform._projection), batch.readData(batch._params[paramOffset]._uint), sizeof(Mat4));
memcpy(glm::value_ptr(_transform._viewProjectionState._projection), batch.readData(batch._params[paramOffset]._uint), sizeof(Mat4));
_transform._invalidProj = true;
// The current view / proj doesn't correspond to a saved camera slot
_transform._currentSavedTransformSlot = INVALID_SAVED_CAMERA_SLOT;
}
void GLBackend::do_setProjectionJitter(const Batch& batch, size_t paramOffset) {
_transform._projectionJitter.x = batch._params[paramOffset]._float;
_transform._projectionJitter.y = batch._params[paramOffset+1]._float;
void GLBackend::do_setProjectionJitterEnabled(const Batch& batch, size_t paramOffset) {
_transform._projectionJitter._isEnabled = (batch._params[paramOffset]._int & 1) != 0;
_transform._invalidProj = true;
// The current view / proj doesn't correspond to a saved camera slot
_transform._currentSavedTransformSlot = INVALID_SAVED_CAMERA_SLOT;
}
void GLBackend::do_setProjectionJitterSequence(const Batch& batch, size_t paramOffset) {
auto count = batch._params[paramOffset + 0]._uint;
auto& projectionJitter = _transform._projectionJitter;
projectionJitter._offsetSequence.resize(count);
if (count) {
memcpy(projectionJitter._offsetSequence.data(), batch.readData(batch._params[paramOffset + 1]._uint), sizeof(Vec2) * count);
projectionJitter._offset = projectionJitter._offsetSequence[projectionJitter._currentSampleIndex % count];
} else {
projectionJitter._offset = Vec2(0.0f);
}
}
void GLBackend::do_setProjectionJitterScale(const Batch& batch, size_t paramOffset) {
// Should be 2 for one pixel amplitude as clip space is between -1 and 1, but lower values give less blur
// but more aliasing...
_transform._projectionJitter._scale = 2.0f * batch._params[paramOffset + 0]._float;
}
void GLBackend::do_setViewportTransform(const Batch& batch, size_t paramOffset) {
@ -90,55 +119,80 @@ void GLBackend::syncTransformStateCache() {
Mat4 modelView;
auto modelViewInv = glm::inverse(modelView);
_transform._view.evalFromRawMatrix(modelViewInv);
_transform._viewProjectionState._view.evalFromRawMatrix(modelViewInv);
glDisableVertexAttribArray(gpu::Stream::DRAW_CALL_INFO);
_transform._enabledDrawcallInfoBuffer = false;
}
void GLBackend::TransformStageState::preUpdate(size_t commandIndex, const StereoState& stereo, Vec2u framebufferSize) {
void GLBackend::TransformStageState::pushCameraBufferElement(const StereoState& stereo, const StereoState& prevStereo, TransformCameras& cameras) const {
const float jitterAmplitude = _projectionJitter._scale;
const Vec2 jitterScale = Vec2(jitterAmplitude * float(_projectionJitter._isEnabled & 1)) / Vec2(_viewport.z, _viewport.w);
const Vec2 jitter = jitterScale * _projectionJitter._offset;
if (stereo.isStereo()) {
#ifdef GPU_STEREO_CAMERA_BUFFER
cameras.push_back(CameraBufferElement(_camera.getEyeCamera(0, stereo, prevStereo, _viewProjectionState._correctedView,
_viewProjectionState._previousCorrectedView, jitter),
_camera.getEyeCamera(1, stereo, prevStereo, _viewProjectionState._correctedView,
_viewProjectionState._previousCorrectedView, jitter)));
#else
cameras.push_back((_camera.getEyeCamera(0, stereo, prevStereo, _viewProjectionState._correctedView,
_viewProjectionState._previousCorrectedView, jitter)));
cameras.push_back((_camera.getEyeCamera(1, stereo, prevStereo, _viewProjectionState._correctedView,
_viewProjectionState._previousCorrectedView, jitter)));
#endif
} else {
#ifdef GPU_STEREO_CAMERA_BUFFER
cameras.push_back(CameraBufferElement(
_camera.getMonoCamera(_skybox, _viewProjectionState._correctedView, _viewProjectionState._previousCorrectedView,
_viewProjectionState._previousProjection, jitter)));
#else
cameras.push_back((_camera.getMonoCamera(_skybox, _viewProjectionState._correctedView,
_viewProjectionState._previousCorrectedView, _viewProjectionState._previousProjection,
jitter)));
#endif
}
}
void GLBackend::preUpdateTransform() {
_transform.preUpdate(_commandIndex, _stereo, _prevStereo);
}
void GLBackend::TransformStageState::preUpdate(size_t commandIndex, const StereoState& stereo, const StereoState& prevStereo) {
// Check all the dirty flags and update the state accordingly
if (_invalidViewport) {
_camera._viewport = glm::vec4(_viewport);
}
if (_invalidProj) {
_camera._projection = _projection;
_camera._projection = _viewProjectionState._projection;
}
if (_invalidView) {
// Apply the correction
if (_viewIsCamera && (_viewCorrectionEnabled && _correction.correction != glm::mat4())) {
// FIXME should I switch to using the camera correction buffer in Transform.slf and leave this out?
Transform result;
_view.mult(result, _view, _correction.correctionInverse);
if (_skybox) {
result.setTranslation(vec3());
}
_view = result;
if (_viewProjectionState._viewIsCamera && (_viewCorrectionEnabled && _presentFrame.correction != glm::mat4())) {
Transform::mult(_viewProjectionState._correctedView, _viewProjectionState._view, _presentFrame.correctionInverse);
} else {
_viewProjectionState._correctedView = _viewProjectionState._view;
}
if (_skybox) {
_viewProjectionState._correctedView.setTranslation(vec3());
}
// This is when the _view matrix gets assigned
_view.getInverseMatrix(_camera._view);
_viewProjectionState._correctedView.getInverseMatrix(_camera._view);
}
if (_invalidView || _invalidProj || _invalidViewport) {
size_t offset = _cameraUboSize * _cameras.size();
Vec2 finalJitter = _projectionJitter / Vec2(framebufferSize);
_cameraOffsets.push_back(TransformStageState::Pair(commandIndex, offset));
if (stereo.isStereo()) {
#ifdef GPU_STEREO_CAMERA_BUFFER
_cameras.push_back(CameraBufferElement(_camera.getEyeCamera(0, stereo, _view, finalJitter), _camera.getEyeCamera(1, stereo, _view, finalJitter)));
#else
_cameras.push_back((_camera.getEyeCamera(0, stereo, _view, finalJitter)));
_cameras.push_back((_camera.getEyeCamera(1, stereo, _view, finalJitter)));
#endif
} else {
#ifdef GPU_STEREO_CAMERA_BUFFER
_cameras.push_back(CameraBufferElement(_camera.getMonoCamera(_view, finalJitter)));
#else
_cameras.push_back((_camera.getMonoCamera(_view, finalJitter)));
#endif
pushCameraBufferElement(stereo, prevStereo, _cameras);
if (_currentSavedTransformSlot != INVALID_SAVED_CAMERA_SLOT) {
// Save the offset of the saved camera slot in the camera buffer. Can be used to copy
// that data, or (in the future) to reuse the offset.
_savedTransforms[_currentSavedTransformSlot]._cameraOffset = offset;
}
}
@ -177,3 +231,28 @@ void GLBackend::resetTransformStage() {
glDisableVertexAttribArray(gpu::Stream::DRAW_CALL_INFO);
_transform._enabledDrawcallInfoBuffer = false;
}
void GLBackend::do_saveViewProjectionTransform(const Batch& batch, size_t paramOffset) {
auto slotId = batch._params[paramOffset + 0]._uint;
slotId = std::min<gpu::uint32>(slotId, gpu::Batch::MAX_TRANSFORM_SAVE_SLOT_COUNT);
auto& savedTransform = _transform._savedTransforms[slotId];
savedTransform._cameraOffset = INVALID_OFFSET;
_transform._currentSavedTransformSlot = slotId;
// If we are saving this transform to a save slot, then it means we are tracking the history of the view
// so copy the previous corrected view to the transform state.
_transform._viewProjectionState._previousCorrectedView = savedTransform._state._previousCorrectedView;
_transform._viewProjectionState._previousProjection = savedTransform._state._previousProjection;
preUpdateTransform();
savedTransform._state.copyExceptPrevious(_transform._viewProjectionState);
}
void GLBackend::do_setSavedViewProjectionTransform(const Batch& batch, size_t paramOffset) {
auto slotId = batch._params[paramOffset + 0]._uint;
slotId = std::min<gpu::uint32>(slotId, gpu::Batch::MAX_TRANSFORM_SAVE_SLOT_COUNT);
_transform._viewProjectionState = _transform._savedTransforms[slotId]._state;
_transform._invalidView = true;
_transform._invalidProj = true;
_transform._currentSavedTransformSlot = slotId;
}

View file

@ -49,11 +49,6 @@ GLPipeline* GLPipeline::sync(GLBackend& backend, const Pipeline& pipeline) {
Backend::setGPUObject(pipeline, object);
}
// Special case for view correction matrices, any pipeline that declares the correction buffer
// uniform will automatically have it provided without any client code necessary.
// Required for stable lighting in the HMD.
auto reflection = shader->getReflection(backend.getShaderDialect(), backend.getShaderVariant());
object->_cameraCorrection = reflection.validUniformBuffer(gpu::slot::buffer::CameraCorrection);
object->_program = programObject;
object->_state = stateObject;

View file

@ -18,9 +18,6 @@ public:
GLShader* _program { nullptr };
GLState* _state { nullptr };
// Bit of a hack, any pipeline can need the camera correction buffer at execution time, so
// we store whether a given pipeline has declared the uniform buffer for it.
bool _cameraCorrection{ false };
};
} }

View file

@ -167,6 +167,8 @@ protected:
bool bindResourceBuffer(uint32_t slot, const BufferPointer& buffer) override;
void releaseResourceBuffer(uint32_t slot) override;
void do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) override;
// Output stage
void do_blit(const Batch& batch, size_t paramOffset) override;

View file

@ -10,6 +10,8 @@
//
#include "GL41Backend.h"
#include "gpu/gl/GLBuffer.h"
using namespace gpu;
using namespace gpu::gl41;
@ -97,4 +99,34 @@ void GL41Backend::updateTransform(const Batch& batch) {
}
(void)CHECK_GL_ERROR();
}
}
void GL41Backend::do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) {
auto slotId = batch._params[paramOffset + 0]._uint;
BufferPointer buffer = batch._buffers.get(batch._params[paramOffset + 1]._uint);
auto dstOffset = batch._params[paramOffset + 2]._uint;
size_t size = _transform._cameraUboSize;
slotId = std::min<gpu::uint32>(slotId, gpu::Batch::MAX_TRANSFORM_SAVE_SLOT_COUNT);
const auto& savedTransform = _transform._savedTransforms[slotId];
if ((dstOffset + size) > buffer->getBufferCPUMemSize()) {
qCWarning(gpugllogging) << "Copying saved TransformCamera data out of bounds of uniform buffer";
size = (size_t)std::max<ptrdiff_t>((ptrdiff_t)buffer->getBufferCPUMemSize() - (ptrdiff_t)dstOffset, 0);
}
if (savedTransform._cameraOffset == INVALID_OFFSET) {
qCWarning(gpugllogging) << "Saved TransformCamera data has an invalid transform offset. Copy aborted.";
return;
}
// Sync BufferObject
auto* object = syncGPUObject(*buffer);
if (object) {
glBindBuffer(GL_COPY_READ_BUFFER, _transform._cameraBuffer);
glBindBuffer(GL_COPY_WRITE_BUFFER, object->_buffer);
glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, savedTransform._cameraOffset, dstOffset, size);
glBindBuffer(GL_COPY_READ_BUFFER, 0);
glBindBuffer(GL_COPY_WRITE_BUFFER, 0);
(void)CHECK_GL_ERROR();
}
}

View file

@ -269,6 +269,8 @@ protected:
bool bindResourceBuffer(uint32_t slot, const BufferPointer& buffer) override;
void releaseResourceBuffer(uint32_t slot) override;
void do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) override;
// Output stage
void do_blit(const Batch& batch, size_t paramOffset) override;

View file

@ -167,7 +167,7 @@ public:
glSamplerParameteri(result, GL_TEXTURE_WRAP_T, GLTexture::WRAP_MODES[sampler.getWrapModeV()]);
glSamplerParameteri(result, GL_TEXTURE_WRAP_R, GLTexture::WRAP_MODES[sampler.getWrapModeW()]);
glSamplerParameterf(result, GL_TEXTURE_MAX_ANISOTROPY_EXT, sampler.getMaxAnisotropy());
glSamplerParameterf(result, GL_TEXTURE_MAX_ANISOTROPY, sampler.getMaxAnisotropy());
glSamplerParameterfv(result, GL_TEXTURE_BORDER_COLOR, (const float*)&sampler.getBorderColor());
glSamplerParameterf(result, GL_TEXTURE_MIN_LOD, sampler.getMinMip());
@ -314,7 +314,7 @@ void GL45Texture::syncSampler() const {
glTextureParameteri(_id, GL_TEXTURE_WRAP_T, WRAP_MODES[sampler.getWrapModeV()]);
glTextureParameteri(_id, GL_TEXTURE_WRAP_R, WRAP_MODES[sampler.getWrapModeW()]);
glTextureParameterf(_id, GL_TEXTURE_MAX_ANISOTROPY_EXT, sampler.getMaxAnisotropy());
glTextureParameterf(_id, GL_TEXTURE_MAX_ANISOTROPY, sampler.getMaxAnisotropy());
glTextureParameterfv(_id, GL_TEXTURE_BORDER_COLOR, (const float*)&sampler.getBorderColor());
glTextureParameterf(_id, GL_TEXTURE_MIN_LOD, sampler.getMinMip());

View file

@ -10,6 +10,8 @@
//
#include "GL45Backend.h"
#include "gpu/gl/GLBuffer.h"
using namespace gpu;
using namespace gpu::gl45;
@ -101,4 +103,30 @@ void GL45Backend::updateTransform(const Batch& batch) {
}
(void)CHECK_GL_ERROR();
}
}
void GL45Backend::do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) {
auto slotId = batch._params[paramOffset + 0]._uint;
BufferPointer buffer = batch._buffers.get(batch._params[paramOffset + 1]._uint);
auto dstOffset = batch._params[paramOffset + 2]._uint;
size_t size = _transform._cameraUboSize;
slotId = std::min<gpu::uint32>(slotId, gpu::Batch::MAX_TRANSFORM_SAVE_SLOT_COUNT);
const auto& savedTransform = _transform._savedTransforms[slotId];
if ((dstOffset + size) > buffer->getBufferCPUMemSize()) {
qCWarning(gpugllogging) << "Copying saved TransformCamera data out of bounds of uniform buffer";
size = (size_t)std::max<ptrdiff_t>((ptrdiff_t)buffer->getBufferCPUMemSize() - (ptrdiff_t)dstOffset, 0);
}
if (savedTransform._cameraOffset == INVALID_OFFSET) {
qCWarning(gpugllogging) << "Saved TransformCamera data has an invalid transform offset. Copy aborted.";
return;
}
// Sync BufferObject
auto* object = syncGPUObject(*buffer);
if (object) {
glCopyNamedBufferSubData(_transform._cameraBuffer, object->_buffer, savedTransform._cameraOffset, dstOffset, size);
(void)CHECK_GL_ERROR();
}
}

View file

@ -164,6 +164,8 @@ protected:
bool bindResourceBuffer(uint32_t slot, const BufferPointer& buffer) override;
void releaseResourceBuffer(uint32_t slot) override;
void do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) override;
// Output stage
void do_blit(const Batch& batch, size_t paramOffset) override;

View file

@ -10,6 +10,8 @@
//
#include "GLESBackend.h"
#include "gpu/gl/GLBuffer.h"
using namespace gpu;
using namespace gpu::gles;
@ -99,4 +101,34 @@ void GLESBackend::updateTransform(const Batch& batch) {
}
(void)CHECK_GL_ERROR();
}
}
void GLESBackend::do_copySavedViewProjectionTransformToBuffer(const Batch& batch, size_t paramOffset) {
auto slotId = batch._params[paramOffset + 0]._uint;
BufferPointer buffer = batch._buffers.get(batch._params[paramOffset + 1]._uint);
auto dstOffset = batch._params[paramOffset + 2]._uint;
size_t size = _transform._cameraUboSize;
slotId = std::min<gpu::uint32>(slotId, gpu::Batch::MAX_TRANSFORM_SAVE_SLOT_COUNT);
const auto& savedTransform = _transform._savedTransforms[slotId];
if ((dstOffset + size) > buffer->getBufferCPUMemSize()) {
qCWarning(gpugllogging) << "Copying saved TransformCamera data out of bounds of uniform buffer";
size = (size_t)std::max<ptrdiff_t>((ptrdiff_t)buffer->getBufferCPUMemSize() - (ptrdiff_t)dstOffset, 0);
}
if (savedTransform._cameraOffset == INVALID_OFFSET) {
qCWarning(gpugllogging) << "Saved TransformCamera data has an invalid transform offset. Copy aborted.";
return;
}
// Sync BufferObject
auto* object = syncGPUObject(*buffer);
if (object) {
glBindBuffer(GL_COPY_READ_BUFFER, _transform._cameraBuffer);
glBindBuffer(GL_COPY_WRITE_BUFFER, object->_buffer);
glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, savedTransform._cameraOffset, dstOffset, size);
glBindBuffer(GL_COPY_READ_BUFFER, 0);
glBindBuffer(GL_COPY_WRITE_BUFFER, 0);
(void)CHECK_GL_ERROR();
}
}

View file

@ -0,0 +1,126 @@
//
// Backend.cpp
// interface/src/gpu
//
// Created by Olivier Prat on 05/25/2018.
// Copyright 2018 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "Backend.h"
using namespace gpu;
// Counters for Buffer and Texture usage in GPU/Context
ContextMetricSize Backend::freeGPUMemSize;
ContextMetricCount Backend::bufferCount;
ContextMetricSize Backend::bufferGPUMemSize;
ContextMetricCount Backend::textureResidentCount;
ContextMetricCount Backend::textureFramebufferCount;
ContextMetricCount Backend::textureResourceCount;
ContextMetricCount Backend::textureExternalCount;
ContextMetricSize Backend::textureResidentGPUMemSize;
ContextMetricSize Backend::textureFramebufferGPUMemSize;
ContextMetricSize Backend::textureResourceGPUMemSize;
ContextMetricSize Backend::textureExternalGPUMemSize;
ContextMetricCount Backend::texturePendingGPUTransferCount;
ContextMetricSize Backend::texturePendingGPUTransferMemSize;
ContextMetricSize Backend::textureResourcePopulatedGPUMemSize;
ContextMetricSize Backend::textureResourceIdealGPUMemSize;
void Backend::setStereoState(const StereoState& stereo) {
_prevStereo = _stereo;
_stereo = stereo;
}
Backend::TransformCamera Backend::TransformCamera::getEyeCamera(int eye,
const StereoState& stereo,
const StereoState& prevStereo,
const Transform& view,
const Transform& previousView,
Vec2 normalizedJitter) const {
TransformCamera result = *this;
Transform eyeView = view;
Transform eyePreviousView = previousView;
if (!stereo._skybox) {
eyeView.postTranslate(-Vec3(stereo._eyeViews[eye][3]));
eyePreviousView.postTranslate(-Vec3(prevStereo._eyeViews[eye][3]));
} else {
// FIXME: If "skybox" the ipd is set to 0 for now, let s try to propose a better solution for this in the future
eyePreviousView.setTranslation(vec3());
}
result._projection = stereo._eyeProjections[eye];
Mat4 previousProjection = prevStereo._eyeProjections[eye];
// Apply jitter to projections
// We divided by the framebuffer size, which was double-sized, to normalize the jitter, but we want a normal amount of jitter
// for each eye, so we multiply by 2 to get back to normal
normalizedJitter.x *= 2.0f;
result._projection[2][0] += normalizedJitter.x;
result._projection[2][1] += normalizedJitter.y;
previousProjection[2][0] += normalizedJitter.x;
previousProjection[2][1] += normalizedJitter.y;
result.recomputeDerived(eyeView, eyePreviousView, previousProjection);
result._stereoInfo = Vec4(1.0f, (float)eye, 1.0f / result._viewport.z, 1.0f / result._viewport.w);
return result;
}
Backend::TransformCamera Backend::TransformCamera::getMonoCamera(bool isSkybox,
const Transform& view,
Transform previousView,
Mat4 previousProjection,
Vec2 normalizedJitter) const {
TransformCamera result = *this;
if (isSkybox) {
previousView.setTranslation(vec3());
}
result._projection[2][0] += normalizedJitter.x;
result._projection[2][1] += normalizedJitter.y;
previousProjection[2][0] += normalizedJitter.x;
previousProjection[2][1] += normalizedJitter.y;
result.recomputeDerived(view, previousView, previousProjection);
result._stereoInfo = Vec4(0.0f, 0.0f, 1.0f / result._viewport.z, 1.0f / result._viewport.w);
return result;
}
const Backend::TransformCamera& Backend::TransformCamera::recomputeDerived(const Transform& view,
const Transform& previousView,
const Mat4& previousProjection) const {
_projectionInverse = glm::inverse(_projection);
// Get the viewEyeToWorld matrix form the transformView as passed to the gpu::Batch
// this is the "_viewInverse" fed to the shader
// Genetrate the "_view" matrix as well from the xform
view.getMatrix(_viewInverse);
_view = glm::inverse(_viewInverse);
previousView.getMatrix(_previousViewInverse);
_previousView = glm::inverse(_previousViewInverse);
Mat4 viewUntranslated = _view;
viewUntranslated[3] = Vec4(0.0f, 0.0f, 0.0f, 1.0f);
_projectionViewUntranslated = _projection * viewUntranslated;
viewUntranslated = _previousView;
viewUntranslated[3] = Vec4(0.0f, 0.0f, 0.0f, 1.0f);
_previousProjectionViewUntranslated = previousProjection * viewUntranslated;
_stereoInfo = Vec4(0.0f);
return *this;
}

View file

@ -0,0 +1,141 @@
//
// Backend.h
// interface/src/gpu
//
// Created by Olivier Prat on 05/18/2018.
// Copyright 2018 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_gpu_Backend_h
#define hifi_gpu_Backend_h
#include <GLMHelpers.h>
#include "Forward.h"
#include "Batch.h"
#include "Buffer.h"
#include "Framebuffer.h"
class QImage;
namespace gpu {
class Context;
struct ContextStats {
public:
int _ISNumFormatChanges = 0;
int _ISNumInputBufferChanges = 0;
int _ISNumIndexBufferChanges = 0;
int _RSNumResourceBufferBounded = 0;
int _RSNumTextureBounded = 0;
int _RSAmountTextureMemoryBounded = 0;
int _DSNumAPIDrawcalls = 0;
int _DSNumDrawcalls = 0;
int _DSNumTriangles = 0;
int _PSNumSetPipelines = 0;
ContextStats() {}
ContextStats(const ContextStats& stats) = default;
void evalDelta(const ContextStats& begin, const ContextStats& end);
};
class Backend {
public:
virtual ~Backend() {}
virtual void shutdown() {}
virtual const std::string& getVersion() const = 0;
void setStereoState(const StereoState& stereo);
virtual void render(const Batch& batch) = 0;
virtual void syncCache() = 0;
virtual void syncProgram(const gpu::ShaderPointer& program) = 0;
virtual void recycle() const = 0;
virtual void downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) = 0;
virtual void updatePresentFrame(const Mat4& correction = Mat4()) = 0;
virtual bool supportedTextureFormat(const gpu::Element& format) = 0;
// Shared header between C++ and GLSL
#include "TransformCamera_shared.slh"
class TransformCamera : public _TransformCamera {
public:
const Backend::TransformCamera& recomputeDerived(const Transform& view, const Transform& previousView, const Mat4& previousProjection) const;
// Jitter should be divided by framebuffer size
TransformCamera getMonoCamera(bool isSkybox, const Transform& view, Transform previousView, Mat4 previousProjection, Vec2 normalizedJitter) const;
// Jitter should be divided by framebuffer size
TransformCamera getEyeCamera(int eye, const StereoState& stereo, const StereoState& prevStereo, const Transform& view, const Transform& previousView,
Vec2 normalizedJitter) const;
};
template <typename T, typename U>
static void setGPUObject(const U& object, T* gpuObject) {
object.gpuObject.setGPUObject(gpuObject);
}
template <typename T, typename U>
static T* getGPUObject(const U& object) {
return reinterpret_cast<T*>(object.gpuObject.getGPUObject());
}
void resetStats() const { _stats = ContextStats(); }
void getStats(ContextStats& stats) const { stats = _stats; }
virtual bool isTextureManagementSparseEnabled() const = 0;
// These should only be accessed by Backend implementation to report the buffer and texture allocations,
// they are NOT public objects
static ContextMetricSize freeGPUMemSize;
static ContextMetricCount bufferCount;
static ContextMetricSize bufferGPUMemSize;
static ContextMetricCount textureResidentCount;
static ContextMetricCount textureFramebufferCount;
static ContextMetricCount textureResourceCount;
static ContextMetricCount textureExternalCount;
static ContextMetricSize textureResidentGPUMemSize;
static ContextMetricSize textureFramebufferGPUMemSize;
static ContextMetricSize textureResourceGPUMemSize;
static ContextMetricSize textureExternalGPUMemSize;
static ContextMetricCount texturePendingGPUTransferCount;
static ContextMetricSize texturePendingGPUTransferMemSize;
static ContextMetricSize textureResourcePopulatedGPUMemSize;
static ContextMetricSize textureResourceIdealGPUMemSize;
protected:
virtual bool isStereo() const {
return _stereo.isStereo();
}
void getStereoProjections(mat4* eyeProjections) const {
for (int i = 0; i < 2; ++i) {
eyeProjections[i] = _stereo._eyeProjections[i];
}
}
void getStereoViews(mat4* eyeViews) const {
for (int i = 0; i < 2; ++i) {
eyeViews[i] = _stereo._eyeViews[i];
}
}
friend class Context;
mutable ContextStats _stats;
StereoState _stereo;
StereoState _prevStereo;
};
}
#endif

View file

@ -53,6 +53,7 @@ Batch::Batch(const std::string& name) {
_data.reserve(_dataMax);
_objects.reserve(_objectsMax);
_drawCallInfos.reserve(_drawCallInfosMax);
_mustUpdatePreviousModels = true;
}
Batch::~Batch() {
@ -101,17 +102,18 @@ void Batch::clear() {
_currentModel = Transform();
_drawcallUniform = 0;
_drawcallUniformReset = 0;
_projectionJitter = glm::vec2(0.0f);
_enableStereo = true;
_enableSkybox = false;
_mustUpdatePreviousModels = true;
}
size_t Batch::cacheData(size_t size, const void* data) {
size_t offset = _data.size();
size_t numBytes = size;
_data.resize(offset + numBytes);
memcpy(_data.data() + offset, data, size);
if (data) {
memcpy(_data.data() + offset, data, size);
}
return offset;
}
@ -236,6 +238,15 @@ void Batch::setModelTransform(const Transform& model) {
ADD_COMMAND(setModelTransform);
_currentModel = model;
_previousModel = model;
_invalidModel = true;
}
void Batch::setModelTransform(const Transform& model, const Transform& previousModel) {
ADD_COMMAND(setModelTransform);
_currentModel = model;
_previousModel = previousModel;
_invalidModel = true;
}
@ -252,20 +263,29 @@ void Batch::setProjectionTransform(const Mat4& proj) {
_params.emplace_back(cacheData(sizeof(Mat4), &proj));
}
void Batch::setProjectionJitter(float jx, float jy) {
_projectionJitter.x = jx;
_projectionJitter.y = jy;
pushProjectionJitter(jx, jy);
void Batch::setProjectionJitterEnabled(bool isProjectionEnabled) {
_isJitterOnProjectionEnabled = isProjectionEnabled;
pushProjectionJitterEnabled(_isJitterOnProjectionEnabled);
}
void Batch::pushProjectionJitter(float jx, float jy) {
ADD_COMMAND(setProjectionJitter);
_params.emplace_back(jx);
_params.emplace_back(jy);
void Batch::pushProjectionJitterEnabled(bool isProjectionEnabled) {
ADD_COMMAND(setProjectionJitterEnabled);
_params.emplace_back(isProjectionEnabled & 1);
}
void Batch::popProjectionJitter() {
pushProjectionJitter(_projectionJitter.x, _projectionJitter.y);
void Batch::popProjectionJitterEnabled() {
pushProjectionJitterEnabled(_isJitterOnProjectionEnabled);
}
void Batch::setProjectionJitterSequence(const Vec2* sequence, size_t count) {
ADD_COMMAND(setProjectionJitterSequence);
_params.emplace_back((uint)count);
_params.emplace_back(cacheData(sizeof(Vec2) * count, sequence));
}
void Batch::setProjectionJitterScale(float scale) {
ADD_COMMAND(setProjectionJitterScale);
_params.emplace_back(scale);
}
void Batch::setViewportTransform(const Vec4i& viewport) {
@ -281,6 +301,34 @@ void Batch::setDepthRangeTransform(float nearDepth, float farDepth) {
_params.emplace_back(nearDepth);
}
void Batch::saveViewProjectionTransform(uint32 saveSlot) {
ADD_COMMAND(saveViewProjectionTransform);
if (saveSlot >= MAX_TRANSFORM_SAVE_SLOT_COUNT) {
qCWarning(gpulogging) << "Transform save slot" << saveSlot << "exceeds max save slot count of" << MAX_TRANSFORM_SAVE_SLOT_COUNT;
}
_params.emplace_back(saveSlot);
}
void Batch::setSavedViewProjectionTransform(uint32 saveSlot) {
ADD_COMMAND(setSavedViewProjectionTransform);
if (saveSlot >= MAX_TRANSFORM_SAVE_SLOT_COUNT) {
qCWarning(gpulogging) << "Transform save slot" << saveSlot << "exceeds max save slot count of"
<< MAX_TRANSFORM_SAVE_SLOT_COUNT;
}
_params.emplace_back(saveSlot);
}
void Batch::copySavedViewProjectionTransformToBuffer(uint32 saveSlot, const BufferPointer& buffer, Offset offset) {
ADD_COMMAND(copySavedViewProjectionTransformToBuffer);
if (saveSlot >= MAX_TRANSFORM_SAVE_SLOT_COUNT) {
qCWarning(gpulogging) << "Transform save slot" << saveSlot << "exceeds max save slot count of"
<< MAX_TRANSFORM_SAVE_SLOT_COUNT;
}
_params.emplace_back(saveSlot);
_params.emplace_back(_buffers.cache(buffer));
_params.emplace_back(offset);
}
void Batch::setPipeline(const PipelinePointer& pipeline) {
ADD_COMMAND(setPipeline);
@ -548,12 +596,15 @@ void Batch::captureDrawCallInfoImpl() {
if (_invalidModel) {
TransformObject object;
_currentModel.getMatrix(object._model);
_previousModel.getMatrix(object._previousModel);
// FIXME - we don't want to be using glm::inverse() here but it fixes the flickering issue we are
// seeing with planky blocks in toybox. Our implementation of getInverseMatrix() is buggy in cases
// of non-uniform scale. We need to fix that. In the mean time, glm::inverse() works.
//_model.getInverseMatrix(_object._modelInverse);
//_previousModel.getInverseMatrix(_object._previousModelInverse);
object._modelInverse = glm::inverse(object._model);
object._previousModelInverse = glm::inverse(object._previousModel);
_objects.emplace_back(object);
@ -760,4 +811,4 @@ void Batch::flush() {
}
buffer->flush();
}
}
}

View file

@ -42,6 +42,9 @@ class Batch {
public:
typedef Stream::Slot Slot;
enum {
MAX_TRANSFORM_SAVE_SLOT_COUNT = 6
};
class DrawCallInfo {
public:
@ -151,20 +154,20 @@ public:
// multi command desctription for multiDrawIndexedIndirect
class DrawIndirectCommand {
public:
uint _count{ 0 };
uint _instanceCount{ 0 };
uint _firstIndex{ 0 };
uint _baseInstance{ 0 };
uint _count { 0 };
uint _instanceCount { 0 };
uint _firstIndex { 0 };
uint _baseInstance { 0 };
};
// multi command desctription for multiDrawIndexedIndirect
class DrawIndexedIndirectCommand {
public:
uint _count{ 0 };
uint _instanceCount{ 0 };
uint _firstIndex{ 0 };
uint _baseVertex{ 0 };
uint _baseInstance{ 0 };
uint _count { 0 };
uint _instanceCount { 0 };
uint _firstIndex { 0 };
uint _baseVertex { 0 };
uint _baseInstance { 0 };
};
// Transform Stage
@ -174,17 +177,24 @@ public:
// WARNING: ViewTransform transform from eye space to world space, its inverse is composed
// with the ModelTransform to create the equivalent of the gl ModelViewMatrix
void setModelTransform(const Transform& model);
void setModelTransform(const Transform& model, const Transform& previousModel);
void resetViewTransform() { setViewTransform(Transform(), false); }
void setViewTransform(const Transform& view, bool camera = true);
void setProjectionTransform(const Mat4& proj);
void setProjectionJitter(float jx = 0.0f, float jy = 0.0f);
void setProjectionJitterEnabled(bool isProjectionEnabled);
void setProjectionJitterSequence(const Vec2* sequence, size_t count);
void setProjectionJitterScale(float scale);
// Very simple 1 level stack management of jitter.
void pushProjectionJitter(float jx = 0.0f, float jy = 0.0f);
void popProjectionJitter();
void pushProjectionJitterEnabled(bool isProjectionEnabled);
void popProjectionJitterEnabled();
// Viewport is xy = low left corner in framebuffer, zw = width height of the viewport, expressed in pixels
void setViewportTransform(const Vec4i& viewport);
void setDepthRangeTransform(float nearDepth, float farDepth);
void saveViewProjectionTransform(uint32 saveSlot);
void setSavedViewProjectionTransform(uint32 saveSlot);
void copySavedViewProjectionTransformToBuffer(uint32 saveSlot, const BufferPointer& buffer, Offset offset);
// Pipeline Stage
void setPipeline(const PipelinePointer& pipeline);
@ -202,7 +212,7 @@ public:
void setResourceTexture(uint32 slot, const TexturePointer& texture);
void setResourceTexture(uint32 slot, const TextureView& view); // not a command, just a shortcut from a TextureView
void setResourceTextureTable(const TextureTablePointer& table, uint32 slot = 0);
void setResourceFramebufferSwapChainTexture(uint32 slot, const FramebufferSwapChainPointer& framebuffer, unsigned int swpaChainIndex, unsigned int renderBufferSlot = 0U); // not a command, just a shortcut from a TextureView
void setResourceFramebufferSwapChainTexture(uint32 slot, const FramebufferSwapChainPointer& framebuffer, unsigned int swapChainIndex, unsigned int renderBufferSlot = 0U); // not a command, just a shortcut from a TextureView
// Ouput Stage
void setFramebuffer(const FramebufferPointer& framebuffer);
@ -309,10 +319,16 @@ public:
COMMAND_setModelTransform,
COMMAND_setViewTransform,
COMMAND_setProjectionTransform,
COMMAND_setProjectionJitter,
COMMAND_setProjectionJitterEnabled,
COMMAND_setProjectionJitterSequence,
COMMAND_setProjectionJitterScale,
COMMAND_setViewportTransform,
COMMAND_setDepthRangeTransform,
COMMAND_saveViewProjectionTransform,
COMMAND_setSavedViewProjectionTransform,
COMMAND_copySavedViewProjectionTransformToBuffer,
COMMAND_setPipeline,
COMMAND_setStateBlendFactor,
COMMAND_setStateScissorRect,
@ -495,17 +511,14 @@ public:
Bytes _data;
static size_t _dataMax;
// SSBO class... layout MUST match the layout in Transform.slh
class TransformObject {
public:
Mat4 _model;
Mat4 _modelInverse;
};
#include "TransformObject_shared.slh"
using TransformObjects = std::vector<TransformObject>;
bool _invalidModel { true };
Transform _currentModel;
TransformObjects _objects;
Transform _previousModel;
mutable bool _mustUpdatePreviousModels;
mutable TransformObjects _objects;
static size_t _objectsMax;
BufferCaches _buffers;
@ -523,11 +536,12 @@ public:
NamedBatchDataMap _namedData;
uint16_t _drawcallUniform{ 0 };
uint16_t _drawcallUniformReset{ 0 };
bool _isJitterOnProjectionEnabled { false };
glm::vec2 _projectionJitter{ 0.0f, 0.0f };
bool _enableStereo{ true };
uint16_t _drawcallUniform { 0 };
uint16_t _drawcallUniformReset { 0 };
bool _enableStereo { true };
bool _enableSkybox { false };
protected:
@ -556,7 +570,7 @@ protected:
template <typename T>
size_t Batch::Cache<T>::_max = BATCH_PREALLOCATE_MIN;
}
} // namespace gpu
#if defined(NSIGHT_FOUND)

View file

@ -217,74 +217,6 @@ double Context::getFrameTimerBatchAverage() const {
return 0.0;
}
const Backend::TransformCamera& Backend::TransformCamera::recomputeDerived(const Transform& xformView) const {
_projectionInverse = glm::inverse(_projection);
// Get the viewEyeToWorld matrix from the transformView as passed to the gpu::Batch
// this is the "_viewInverse" fed to the shader
// Genetrate the "_view" matrix as well from the xform
xformView.getMatrix(_viewInverse);
_view = glm::inverse(_viewInverse);
Mat4 viewUntranslated = _view;
viewUntranslated[3] = Vec4(0.0f, 0.0f, 0.0f, 1.0f);
_projectionViewUntranslated = _projection * viewUntranslated;
_stereoInfo = Vec4(0.0f);
return *this;
}
Backend::TransformCamera Backend::TransformCamera::getEyeCamera(int eye, const StereoState& _stereo, const Transform& xformView, Vec2 normalizedJitter) const {
TransformCamera result = *this;
Transform offsetTransform = xformView;
if (!_stereo._skybox) {
offsetTransform.postTranslate(-Vec3(_stereo._eyeViews[eye][3]));
} else {
// FIXME: If "skybox" the ipd is set to 0 for now, let s try to propose a better solution for this in the future
}
result._projection = _stereo._eyeProjections[eye];
normalizedJitter.x *= 2.0f;
result._projection[2][0] += normalizedJitter.x;
result._projection[2][1] += normalizedJitter.y;
result.recomputeDerived(offsetTransform);
result._stereoInfo = Vec4(1.0f, (float)eye, 0.0f, 0.0f);
return result;
}
Backend::TransformCamera Backend::TransformCamera::getMonoCamera(const Transform& xformView, Vec2 normalizedJitter) const {
TransformCamera result = *this;
result._projection[2][0] += normalizedJitter.x;
result._projection[2][1] += normalizedJitter.y;
result.recomputeDerived(xformView);
return result;
}
// Counters for Buffer and Texture usage in GPU/Context
ContextMetricSize Backend::freeGPUMemSize;
ContextMetricCount Backend::bufferCount;
ContextMetricSize Backend::bufferGPUMemSize;
ContextMetricCount Backend::textureResidentCount;
ContextMetricCount Backend::textureFramebufferCount;
ContextMetricCount Backend::textureResourceCount;
ContextMetricCount Backend::textureExternalCount;
ContextMetricSize Backend::textureResidentGPUMemSize;
ContextMetricSize Backend::textureFramebufferGPUMemSize;
ContextMetricSize Backend::textureResourceGPUMemSize;
ContextMetricSize Backend::textureExternalGPUMemSize;
ContextMetricCount Backend::texturePendingGPUTransferCount;
ContextMetricSize Backend::texturePendingGPUTransferMemSize;
ContextMetricSize Backend::textureResourcePopulatedGPUMemSize;
ContextMetricSize Backend::textureResourceIdealGPUMemSize;
Size Context::getFreeGPUMemSize() {
return Backend::freeGPUMemSize.getValue();
}

View file

@ -15,131 +15,14 @@
#include <mutex>
#include <queue>
#include <GLMHelpers.h>
#include "Forward.h"
#include "Batch.h"
#include "Buffer.h"
#include "Texture.h"
#include "Pipeline.h"
#include "Framebuffer.h"
#include "Frame.h"
#include "PointerStorage.h"
class QImage;
#include "Backend.h"
namespace gpu {
struct ContextStats {
public:
uint32_t _ISNumFormatChanges { 0 };
uint32_t _ISNumInputBufferChanges { 0 };
uint32_t _ISNumIndexBufferChanges { 0 };
uint32_t _RSNumResourceBufferBounded { 0 };
uint32_t _RSNumTextureBounded { 0 };
uint64_t _RSAmountTextureMemoryBounded { 0 };
uint32_t _DSNumAPIDrawcalls { 0 };
uint32_t _DSNumDrawcalls { 0 };
uint32_t _DSNumTriangles { 0 };
uint32_t _PSNumSetPipelines { 0 };
ContextStats() {}
ContextStats(const ContextStats& stats) = default;
void evalDelta(const ContextStats& begin, const ContextStats& end);
};
class Backend {
public:
virtual ~Backend(){};
virtual void shutdown() {}
virtual const std::string& getVersion() const = 0;
void setStereoState(const StereoState& stereo) { _stereo = stereo; }
virtual void render(const Batch& batch) = 0;
virtual void syncCache() = 0;
virtual void syncProgram(const gpu::ShaderPointer& program) = 0;
virtual void recycle() const = 0;
virtual void downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) = 0;
virtual void setCameraCorrection(const Mat4& correction, const Mat4& prevRenderView, bool reset = false) {}
virtual bool supportedTextureFormat(const gpu::Element& format) = 0;
// Shared header between C++ and GLSL
#include "TransformCamera_shared.slh"
class TransformCamera : public _TransformCamera {
public:
const Backend::TransformCamera& recomputeDerived(const Transform& xformView) const;
// Jitter should be divided by framebuffer size
TransformCamera getMonoCamera(const Transform& xformView, Vec2 normalizedJitter) const;
// Jitter should be divided by framebuffer size
TransformCamera getEyeCamera(int eye, const StereoState& stereo, const Transform& xformView, Vec2 normalizedJitter) const;
};
template <typename T, typename U>
static void setGPUObject(const U& object, T* gpuObject) {
object.gpuObject.setGPUObject(gpuObject);
}
template <typename T, typename U>
static T* getGPUObject(const U& object) {
return reinterpret_cast<T*>(object.gpuObject.getGPUObject());
}
void resetStats() const { _stats = ContextStats(); }
void getStats(ContextStats& stats) const { stats = _stats; }
virtual bool isTextureManagementSparseEnabled() const = 0;
// These should only be accessed by Backend implementation to report the buffer and texture allocations,
// they are NOT public objects
static ContextMetricSize freeGPUMemSize;
static ContextMetricCount bufferCount;
static ContextMetricSize bufferGPUMemSize;
static ContextMetricCount textureResidentCount;
static ContextMetricCount textureFramebufferCount;
static ContextMetricCount textureResourceCount;
static ContextMetricCount textureExternalCount;
static ContextMetricSize textureResidentGPUMemSize;
static ContextMetricSize textureFramebufferGPUMemSize;
static ContextMetricSize textureResourceGPUMemSize;
static ContextMetricSize textureExternalGPUMemSize;
static ContextMetricCount texturePendingGPUTransferCount;
static ContextMetricSize texturePendingGPUTransferMemSize;
static ContextMetricSize textureResourcePopulatedGPUMemSize;
static ContextMetricSize textureResourceIdealGPUMemSize;
virtual bool isStereo() const {
return _stereo.isStereo();
}
void getStereoProjections(mat4* eyeProjections) const {
for (int i = 0; i < 2; ++i) {
eyeProjections[i] = _stereo._eyeProjections[i];
}
}
protected:
void getStereoViews(mat4* eyeViews) const {
for (int i = 0; i < 2; ++i) {
eyeViews[i] = _stereo._eyeViews[i];
}
}
friend class Context;
mutable ContextStats _stats;
StereoState _stereo;
};
class Context {
public:
using Size = Resource::Size;

View file

@ -0,0 +1,27 @@
<@include gpu/Config.slh@>
<$VERSION_HEADER$>
// <$_SCRIBE_FILENAME$>
// Generated on <$_SCRIBE_DATE$>
// Draw the unit quad [-1,-1 -> 1,1].
// Not transform used.
// Simply draw a Triangle_strip of 2 triangles, no input buffers or index buffer needed
//
// Created by Olivier Prat on 10/22/2018
// Copyright 2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
void main(void) {
const float depth = 1.0;
const vec4 UNIT_QUAD[4] = vec4[4](
vec4(-1.0, -1.0, depth, 1.0),
vec4(1.0, -1.0, depth, 1.0),
vec4(-1.0, 1.0, depth, 1.0),
vec4(1.0, 1.0, depth, 1.0)
);
vec4 pos = UNIT_QUAD[gl_VertexID];
gl_Position = pos;
}

View file

@ -1,6 +1,7 @@
//
// Created by Bradley Austin Davis on 2018/10/14
// Copyright 2013-2018 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -80,7 +81,7 @@ constexpr const char* pose = "pose";
constexpr const char* profileRanges = "profileRanges";
constexpr const char* program = "program";
constexpr const char* programs = "programs";
constexpr const char* projectionJitter = "projectionJitter";
constexpr const char* isJitterOnProjectionEnabled = "isJitterOnProjectionEnabled";
constexpr const char* queries = "queries";
constexpr const char* sampleCount = "sampleCount";
constexpr const char* sampleMask = "sampleMask";
@ -150,10 +151,16 @@ constexpr const char* COMMAND_NAMES[] = {
"setModelTransform",
"setViewTransform",
"setProjectionTransform",
"setProjectionJitter",
"setProjectionJitterEnabled",
"setProjectionJitterSequence",
"setProjectionJitterScale",
"setViewportTransform",
"setDepthRangeTransform",
"saveViewProjectionTransform",
"setSavedViewProjectionTransform",
"copySavedViewProjectionTransformToBuffer",
"setPipeline",
"setStateBlendFactor",
"setStateScissorRect",

View file

@ -1,6 +1,7 @@
//
// Created by Bradley Austin Davis on 2018/10/14
// Copyright 2013-2018 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -740,7 +741,7 @@ BatchPointer Deserializer::readBatch(const json& node) {
auto& batch = *result;
readOptional(batch._enableStereo, node, keys::stereo);
readOptional(batch._enableSkybox, node, keys::skybox);
readOptionalTransformed<glm::vec2>(batch._projectionJitter, node, keys::projectionJitter, &readVec2);
readOptional(batch._isJitterOnProjectionEnabled, node, keys::isJitterOnProjectionEnabled);
readOptional(batch._drawcallUniform, node, keys::drawcallUniform);
readOptional(batch._drawcallUniformReset, node, keys::drawcallUniformReset);
readPointerCache(batch._textures, node, keys::textures, textures);

View file

@ -1,6 +1,7 @@
//
// Created by Bradley Austin Davis on 2018/10/14
// Copyright 2013-2018 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -220,8 +221,8 @@ json Serializer::writeBatch(const Batch& batch) {
if (batch._enableStereo != DEFAULT_BATCH._enableStereo) {
batchNode[keys::stereo] = batch._enableStereo;
}
if (batch._projectionJitter != DEFAULT_BATCH._projectionJitter) {
batchNode[keys::projectionJitter] = writeVec2(batch._projectionJitter);
if (batch._isJitterOnProjectionEnabled != DEFAULT_BATCH._isJitterOnProjectionEnabled) {
batchNode[keys::isJitterOnProjectionEnabled] = batch._isJitterOnProjectionEnabled;
}
if (batch._drawcallUniform != DEFAULT_BATCH._drawcallUniform) {
batchNode[keys::drawcallUniform] = batch._drawcallUniform;

View file

@ -66,6 +66,8 @@ namespace gpu {
double getGPUAverage() const;
double getBatchAverage() const;
const std::string& name() const { return _name; }
protected:
static const int QUERY_QUEUE_SIZE { 4 };

View file

@ -3,6 +3,7 @@
//
// Created by Sam Gateau on 2/10/15.
// Copyright 2013 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -10,8 +11,12 @@
<@if not GPU_TRANSFORM_STATE_SLH@>
<@def GPU_TRANSFORM_STATE_SLH@>
<@include gpu/ShaderConstants.h@>
<@func declareStandardCameraTransform()@>
#ifndef STANDARD_TRANSFORM_CAMERA
#define STANDARD_TRANSFORM_CAMERA
<@include gpu/ShaderConstants.h@>
<@include gpu/TransformCamera_shared.slh@>
#define TransformCamera _TransformCamera
@ -90,32 +95,23 @@ vec3 getEyeWorldPos() {
}
bool cam_isStereo() {
#ifdef GPU_TRANSFORM_IS_STEREO
return getTransformCamera()._stereoInfo.x > 0.0;
#else
return _cameraBlock._camera._stereoInfo.x > 0.0;
#endif
}
float cam_getStereoSide() {
#ifdef GPU_TRANSFORM_IS_STEREO
#ifdef GPU_TRANSFORM_STEREO_CAMERA
return getTransformCamera()._stereoInfo.y;
#else
return _cameraBlock._camera._stereoInfo.y;
#endif
#else
return _cameraBlock._camera._stereoInfo.y;
#endif
}
vec2 cam_getInvWidthHeight() {
return getTransformCamera()._stereoInfo.zw;
}
#endif // STANDARD_TRANSFORM_CAMERA
<@endfunc@>
<@func declareStandardObjectTransform()@>
struct TransformObject {
mat4 _model;
mat4 _modelInverse;
};
<@include gpu/TransformObject_shared.slh@>
layout(location=GPU_ATTR_DRAW_CALL_INFO) in ivec2 _drawCallInfo;
@ -155,11 +151,7 @@ TransformObject getTransformObject() {
<$declareStandardObjectTransform()$>
<@endfunc@>
<@func transformCameraViewport(cameraTransform, viewport)@>
<$viewport$> = <$cameraTransform$>._viewport;
<@endfunc@>
<@func transformStereoClipsSpace(cameraTransform, clipPos)@>
<@func transformStereoClipSpace(clipPos)@>
{
#ifdef GPU_TRANSFORM_IS_STEREO
@ -190,6 +182,18 @@ TransformObject getTransformObject() {
}
<@endfunc@>
<@func transformModelToEyeAndPrevEyeWorldAlignedPos(cameraTransform, objectTransform, modelPos, eyeWAPos, prevEyeWAPos)@>
<!// Bring the model pos in the world aligned space centered on the eye axis !>
{ // transformModelToEyeAndPrevEyeWorldAlignedPos
highp mat4 _mv = <$objectTransform$>._model;
highp mat4 _pmv = <$objectTransform$>._previousModel;
_mv[3].xyz -= <$cameraTransform$>._viewInverse[3].xyz;
_pmv[3].xyz -= <$cameraTransform$>._previousViewInverse[3].xyz;
<$eyeWAPos$> = (_mv * <$modelPos$>);
<$prevEyeWAPos$> = (_pmv * <$modelPos$>);
}
<@endfunc@>
<@func transformModelToMonoClipPos(cameraTransform, objectTransform, modelPos, clipPos)@>
{ // transformModelToMonoClipPos
vec4 eyeWAPos;
@ -201,7 +205,7 @@ TransformObject getTransformObject() {
<@func transformModelToClipPos(cameraTransform, objectTransform, modelPos, clipPos)@>
{ // transformModelToClipPos
<$transformModelToMonoClipPos($cameraTransform$, $objectTransform$, $modelPos$, $clipPos$)$>
<$transformStereoClipsSpace($cameraTransform$, $clipPos$)$>
<$transformStereoClipSpace($clipPos$)$>
}
<@endfunc@>
@ -212,19 +216,59 @@ TransformObject getTransformObject() {
<$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * eyeWAPos;
<$eyePos$> = vec4((<$cameraTransform$>._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);
<$transformStereoClipsSpace($cameraTransform$, $clipPos$)$>
<$transformStereoClipSpace($clipPos$)$>
}
<@endfunc@>
<@func transformModelToWorldAndEyeAndClipPos(cameraTransform, objectTransform, modelPos, worldPos, eyePos, clipPos)@>
{ // transformModelToEyeAndClipPos
<@func transformModelToEyeClipPosAndPrevClipPos(cameraTransform, objectTransform, modelPos, eyePos, clipPos, prevClipPos)@>
{ // transformModelToEyeClipPosAndPrevClipPos
vec4 eyeWAPos;
vec4 prevEyeWAPos;
<$transformModelToEyeAndPrevEyeWorldAlignedPos($cameraTransform$, $objectTransform$, $modelPos$, eyeWAPos, prevEyeWAPos)$>
<$eyePos$> = vec4((<$cameraTransform$>._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);
<$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * eyeWAPos;
<$transformStereoClipSpace($clipPos$)$>
<$prevClipPos$> = <$cameraTransform$>._previousProjectionViewUntranslated * prevEyeWAPos;
// Prev clip pos is in mono clip space
}
<@endfunc@>
<@func transformModelToClipPosAndPrevClipPos(cameraTransform, objectTransform, modelPos, clipPos, prevClipPos)@>
{ // transformModelToClipPosAndPrevClipPos
vec4 eyeWAPos;
vec4 prevEyeWAPos;
<$transformModelToEyeAndPrevEyeWorldAlignedPos($cameraTransform$, $objectTransform$, $modelPos$, eyeWAPos, prevEyeWAPos)$>
<$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * eyeWAPos;
<$transformStereoClipSpace($clipPos$)$>
<$prevClipPos$> = <$cameraTransform$>._previousProjectionViewUntranslated * prevEyeWAPos;
// Prev clip pos is in mono clip space
}
<@endfunc@>
<@func transformModelToWorldEyeAndClipPos(cameraTransform, objectTransform, modelPos, worldPos, eyePos, clipPos)@>
{ // transformModelToWorldEyeAndClipPos
vec4 eyeWAPos;
<$transformModelToEyeWorldAlignedPos($cameraTransform$, $objectTransform$, $modelPos$, eyeWAPos)$>
<$worldPos$> = vec4(eyeWAPos.xyz + <$cameraTransform$>._viewInverse[3].xyz, 1.0);
<$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * eyeWAPos;
<$eyePos$> = vec4((<$cameraTransform$>._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);
<$transformStereoClipsSpace($cameraTransform$, $clipPos$)$>
<$transformStereoClipSpace($clipPos$)$>
}
<@endfunc@>
<@func transformModelToWorldEyeClipPosAndPrevClipPos(cameraTransform, objectTransform, modelPos, worldPos, eyePos, clipPos, prevClipPos)@>
{ // transformModelToWorldEyeClipPosAndPrevClipPos
vec4 eyeWAPos;
vec4 prevEyeWAPos;
<$transformModelToEyeAndPrevEyeWorldAlignedPos($cameraTransform$, $objectTransform$, $modelPos$, eyeWAPos, prevEyeWAPos)$>
<$worldPos$> = vec4(eyeWAPos.xyz + <$cameraTransform$>._viewInverse[3].xyz, 1.0);
<$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * eyeWAPos;
<$eyePos$> = vec4((<$cameraTransform$>._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);
<$prevClipPos$> = <$cameraTransform$>._previousProjectionViewUntranslated * prevEyeWAPos;
<$transformStereoClipSpace($clipPos$)$>
}
<@endfunc@>
@ -236,13 +280,22 @@ TransformObject getTransformObject() {
}
<@endfunc@>
<@func transformModelToEyePosAndPrevEyePos(cameraTransform, objectTransform, modelPos, eyePos, prevEyePos)@>
{ // transformModelToEyePosAndPrevEyePos
vec4 eyeWAPos;
vec4 prevEyeWAPos;
<$transformModelToEyeAndPrevEyeWorldAlignedPos($cameraTransform$, $objectTransform$, $modelPos$, eyeWAPos, prevEyeWAPos)$>
<$eyePos$> = vec4((<$cameraTransform$>._view * vec4(eyeWAPos.xyz, 0.0)).xyz, 1.0);
<$prevEyePos$> = vec4((<$cameraTransform$>._previousView * vec4(prevEyeWAPos.xyz, 0.0)).xyz, 1.0);
}
<@endfunc@>
<@func transformWorldToClipPos(cameraTransform, worldPos, clipPos)@>
{ // transformWorldToClipPos
vec4 eyeWAPos = <$worldPos$> - vec4(<$cameraTransform$>._viewInverse[3].xyz, 0.0);
<$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * eyeWAPos;
<$transformStereoClipsSpace($cameraTransform$, $clipPos$)$>
<$transformStereoClipSpace($clipPos$)$>
}
<@endfunc@>
@ -275,6 +328,20 @@ TransformObject getTransformObject() {
}
<@endfunc@>
<@func transformModelToPrevEyeDir(cameraTransform, objectTransform, modelDir, prevEyeDir)@>
{ // transformModelToPrevEyeDir
vec3 mr0 = vec3(<$objectTransform$>._previousModelInverse[0].x, <$objectTransform$>._previousModelInverse[1].x, <$objectTransform$>._previousModelInverse[2].x);
vec3 mr1 = vec3(<$objectTransform$>._previousModelInverse[0].y, <$objectTransform$>._previousModelInverse[1].y, <$objectTransform$>._previousModelInverse[2].y);
vec3 mr2 = vec3(<$objectTransform$>._previousModelInverse[0].z, <$objectTransform$>._previousModelInverse[1].z, <$objectTransform$>._previousModelInverse[2].z);
vec3 mvc0 = vec3(dot(<$cameraTransform$>._previousViewInverse[0].xyz, mr0), dot(<$cameraTransform$>._previousViewInverse[0].xyz, mr1), dot(<$cameraTransform$>._previousViewInverse[0].xyz, mr2));
vec3 mvc1 = vec3(dot(<$cameraTransform$>._previousViewInverse[1].xyz, mr0), dot(<$cameraTransform$>._previousViewInverse[1].xyz, mr1), dot(<$cameraTransform$>._previousViewInverse[1].xyz, mr2));
vec3 mvc2 = vec3(dot(<$cameraTransform$>._previousViewInverse[2].xyz, mr0), dot(<$cameraTransform$>._previousViewInverse[2].xyz, mr1), dot(<$cameraTransform$>._previousViewInverse[2].xyz, mr2));
<$prevEyeDir$> = vec3(dot(mvc0, <$modelDir$>), dot(mvc1, <$modelDir$>), dot(mvc2, <$modelDir$>));
}
<@endfunc@>
<@func transformEyeToWorldDir(cameraTransform, eyeDir, worldDir)@>
{ // transformEyeToWorldDir
<$worldDir$> = vec3(<$cameraTransform$>._viewInverse * vec4(<$eyeDir$>.xyz, 0.0));
@ -291,7 +358,34 @@ TransformObject getTransformObject() {
{ // transformEyeToClipPos
<$clipPos$> = <$cameraTransform$>._projection * vec4(<$eyePos$>.xyz, 1.0);
<$transformStereoClipsSpace($cameraTransform$, $clipPos$)$>
<$transformStereoClipSpace($clipPos$)$>
}
<@endfunc@>
<@func transformEyeToPrevClipPos(cameraTransform, eyePos, prevClipPos)@>
{ // transformEyeToClipPos
vec4 worldPos = <$cameraTransform$>._viewInverse * vec4(<$eyePos$>.xyz, 1.0);
<$prevClipPos$> = <$cameraTransform$>._previousProjectionViewUntranslated * worldPos;
// Prev clip pos is in mono clip space
}
<@endfunc@>
<@func transformEyeToClipPosAndPrevClipPos(cameraTransform, eyePos, clipPos, prevClipPos)@>
{ // transformEyeToClipPosAndPrevClipPos
<$clipPos$> = <$cameraTransform$>._projection * vec4(<$eyePos$>.xyz, 1.0);
<$transformStereoClipSpace($clipPos$)$>
vec4 worldPos = <$cameraTransform$>._viewInverse * vec4(<$eyePos$>.xyz, 1.0);
<$prevClipPos$> = <$cameraTransform$>._previousProjectionViewUntranslated * worldPos;
// Prev clip pos is in mono clip space
}
<@endfunc@>
<@func transformPrevEyeToPrevClipPos(cameraTransform, prevEyePos, prevClipPos)@>
{ // transformPrevEyeToPrevClipPos
<$prevClipPos$> = <$cameraTransform$>._previousViewInverse * vec4(<$prevEyePos$>.xyz, 1.0) - vec4(<$cameraTransform$>._previousViewInverse[3].xyz, 0.0);
<$prevClipPos$> = <$cameraTransform$>._previousProjectionViewUntranslated * <$prevClipPos$>;
}
<@endfunc@>

View file

@ -1,22 +1,26 @@
// glsl / C++ compatible source as interface for FadeEffect
// glsl / C++ compatible source as interface for TransformCamera
#ifdef __cplusplus
# define _MAT4 Mat4
# define _VEC4 Vec4
# define _MUTABLE mutable
# define TC_MAT4 gpu::Mat4
# define TC_VEC4 gpu::Vec4
# define TC_MUTABLE mutable
#else
# define _MAT4 mat4
# define _VEC4 vec4
# define _MUTABLE
# define TC_MAT4 mat4
# define TC_VEC4 vec4
# define TC_MUTABLE
#endif
struct _TransformCamera {
_MUTABLE _MAT4 _view;
_MUTABLE _MAT4 _viewInverse;
_MUTABLE _MAT4 _projectionViewUntranslated;
_MAT4 _projection;
_MUTABLE _MAT4 _projectionInverse;
_VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.
_MUTABLE _VEC4 _stereoInfo;
TC_MUTABLE TC_MAT4 _view;
TC_MUTABLE TC_MAT4 _viewInverse;
TC_MUTABLE TC_MAT4 _previousView;
TC_MUTABLE TC_MAT4 _previousViewInverse;
TC_MAT4 _projection;
TC_MUTABLE TC_MAT4 _projectionInverse;
TC_MUTABLE TC_MAT4 _projectionViewUntranslated;
// Previous projection view untranslated AND jittered with current jitter
TC_MUTABLE TC_MAT4 _previousProjectionViewUntranslated;
TC_VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.
TC_MUTABLE TC_VEC4 _stereoInfo;
};
// <@if 1@>

View file

@ -0,0 +1,19 @@
// glsl / C++ compatible source as interface for TransformCamera
#ifdef __cplusplus
# define TO_MAT4 Mat4
#else
# define TO_MAT4 mat4
#endif
struct TransformObject {
TO_MAT4 _model;
TO_MAT4 _modelInverse;
TO_MAT4 _previousModel;
TO_MAT4 _previousModelInverse;
};
// <@if 1@>
// Trigger Scribe include
// <@endif@> <!def that !>
//

View file

@ -1,3 +1,2 @@
VERTEX DrawTransformVertexPosition
VERTEX DrawUnitQuad
FRAGMENT DrawColor
r

View file

@ -0,0 +1,2 @@
VERTEX DrawUnitQuad
FRAGMENT DrawWhite

View file

@ -181,10 +181,9 @@ vec4 computeHazeColor(vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePosition
// Mix with background at far range
const float BLEND_DISTANCE = 27000.0f;
vec4 outFragColor = potentialFragColor;
outFragColor.a *= mix(1.0, hazeParams.backgroundBlend, float(distance > BLEND_DISTANCE));
potentialFragColor.a *= mix(1.0, hazeParams.backgroundBlend, float(distance > BLEND_DISTANCE));
return outFragColor;
return potentialFragColor;
}
<@endif@>

View file

@ -73,14 +73,14 @@ void Skybox::prepare(gpu::Batch& batch) const {
}
}
void Skybox::render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward) const {
void Skybox::render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward, uint transformSlot) const {
updateSchemaBuffer();
Skybox::render(batch, frustum, (*this), forward);
Skybox::render(batch, frustum, (*this), forward, transformSlot);
}
static std::map<bool, gpu::PipelinePointer> _pipelines;
void Skybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum, const Skybox& skybox, bool forward) {
void Skybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum, const Skybox& skybox, bool forward, uint transformSlot) {
if (_pipelines.empty()) {
static const std::vector<std::tuple<bool, uint32_t>> keys = {
std::make_tuple(false, shader::graphics::program::skybox),
@ -109,6 +109,8 @@ void Skybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum, const Sky
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewTransform);
// This is needed if we want to have motion vectors on the sky
batch.saveViewProjectionTransform(transformSlot);
batch.setModelTransform(Transform()); // only for Mac
batch.setPipeline(_pipelines[forward]);

View file

@ -44,9 +44,9 @@ public:
virtual void clear();
void prepare(gpu::Batch& batch) const;
virtual void render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward) const;
virtual void render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward, uint transformSlot) const;
static void render(gpu::Batch& batch, const ViewFrustum& frustum, const Skybox& skybox, bool forward);
static void render(gpu::Batch& batch, const ViewFrustum& frustum, const Skybox& skybox, bool forward, uint transformSlot);
const UniformBufferView& getSchemaBuffer() const { return _schemaBuffer; }

View file

@ -5,12 +5,15 @@
//
// Created by Sam Gateau on 5/5/2015.
// Copyright 2015 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include graphics/ShaderConstants.h@>
<@include skybox.slh@>
<@if HIFI_USE_FORWARD@>
<@include gpu/Transform.slh@>
<$declareStandardCameraTransform()$>
@ -19,20 +22,16 @@
<$declareLightBuffer()$>
<@include graphics/Haze.slh@>
layout(location=0) out vec4 _fragColor;
<@else@>
<$declarePackDeferredFragmentSky()$>
<@endif@>
LAYOUT(binding=GRAPHICS_TEXTURE_SKYBOX) uniform samplerCube cubeMap;
struct Skybox {
vec4 color;
};
LAYOUT(binding=GRAPHICS_BUFFER_SKYBOX_PARAMS) uniform skyboxBuffer {
Skybox skybox;
};
layout(location=0) in vec3 _normal;
layout(location=0) out vec4 _fragColor;
layout(location=0) in vec3 _normal;
<@if not HIFI_USE_FORWARD@>
layout(location=1) in vec4 _prevPositionCS;
<@endif@>
void main(void) {
// FIXME: For legacy reasons, when skybox.color.a is 0.5, this is equivalent to:
@ -44,10 +43,10 @@ void main(void) {
vec3 normal = normalize(_normal);
vec3 skyboxTexel = texture(cubeMap, normal).rgb;
vec3 skyboxColor = skybox.color.rgb;
_fragColor = vec4(mix(vec3(1.0), skyboxTexel, float(skybox.color.a > 0.0)) *
mix(vec3(1.0), skyboxColor, float(skybox.color.a < 1.0)), 1.0);
vec3 color = mix(vec3(1.0), skyboxTexel, float(skybox.color.a > 0.0)) * mix(vec3(1.0), skyboxColor, float(skybox.color.a < 1.0));
<@if HIFI_USE_FORWARD@>
_fragColor = vec4(color, 1.0);
// FIXME: either move this elsewhere or give it access to isHazeEnabled() (which is in render-utils/LightingModel.slh)
if (/*(isHazeEnabled() > 0.0) && */(hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {
TransformCamera cam = getTransformCamera();
@ -63,6 +62,8 @@ void main(void) {
vec4 hazeColor = computeHazeColor(fragPositionES.xyz, fragPositionWS.xyz, eyePositionWS.xyz, lightDirectionWS);
_fragColor.rgb = mix(_fragColor.rgb, hazeColor.rgb, hazeColor.a);
}
<@else@>
packDeferredFragmentSky(_prevPositionCS, color, normal);
<@endif@>
}

View file

@ -0,0 +1,65 @@
<!
// skybox.slh
// libraries/graphics/src
//
// Created by HifiExperiments on 8/5/2020.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
!>
<@if not SKYBOX_SLH@>
<@def SKYBOX_SLH@>
<@include gpu/Transform.slh@>
<$declareStandardCameraTransform()$>
<@include gpu/PackedNormal.slh@>
LAYOUT(binding=GRAPHICS_TEXTURE_SKYBOX) uniform samplerCube cubeMap;
struct Skybox {
vec4 color;
};
LAYOUT(binding=GRAPHICS_BUFFER_SKYBOX_PARAMS) uniform skyboxBuffer {
Skybox skybox;
};
<@func declarePackDeferredFragmentSky()@>
// This code belongs in render-utils/VelocityWrite.slh but because graphics can't include render-utils, we have to have it here
vec2 getEyeTexcoordPos() {
// No need to add 0.5 as, by default, frag coords are pixel centered at (0.5, 0.5)
vec2 texCoordPos = gl_FragCoord.xy;
texCoordPos *= cam_getInvWidthHeight();
texCoordPos.x -= cam_getStereoSide();
return texCoordPos;
}
vec2 packVelocity(vec4 prevPositionCS) {
vec2 uv = getEyeTexcoordPos();
vec2 prevUV = (prevPositionCS.xy / prevPositionCS.w) * 0.5 + 0.5;
vec2 deltaUV = uv - prevUV;
// Velocity should be computed without any jitter inside.
return deltaUV;
}
// Must match layout in DeferredBufferWrite.slh, but only velocity and lighting are used
layout(location = 0) out vec4 _albedoMetallic; // albedo / metallic
layout(location = 1) out vec4 _normalRoughness; // normal / roughness
layout(location = 2) out vec4 _scatteringEmissiveOcclusion; // scattering / emissive / occlusion
layout(location = 3) out vec4 _velocity; // velocity
layout(location = 4) out vec4 _lighting; // emissive
void packDeferredFragmentSky(vec4 prevPositionCS, vec3 color, vec3 normal) {
_albedoMetallic = vec4(color, 0.6f);
_normalRoughness = vec4(packNormal(normal), 1.0f);
_scatteringEmissiveOcclusion = vec4(0.0f);
_velocity = vec4(packVelocity(prevPositionCS), 0.0f, 0.0f);
_lighting = vec4(color, 1.0f);
}
<@endfunc@>
<@endif@>

View file

@ -5,6 +5,7 @@
//
// Created by Sam Gateau on 5/5/2015.
// Copyright 2015 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -14,6 +15,9 @@
<$declareStandardTransform()$>
layout(location=0) out vec3 _normal;
<@if not HIFI_USE_FORWARD@>
layout(location=1) out vec4 _prevPositionCS;
<@endif@>
void main(void) {
const float depth = 0.0;
@ -23,17 +27,20 @@ void main(void) {
vec4(-1.0, 1.0, depth, 1.0),
vec4(1.0, 1.0, depth, 1.0)
);
vec4 inPosition = UNIT_QUAD[gl_VertexID];
// standard transform
TransformCamera cam = getTransformCamera();
vec3 clipDir = vec3(inPosition.xy, 0.0);
vec3 clipDir = UNIT_QUAD[gl_VertexID].xyz;
vec3 eyeDir;
<$transformClipToEyeDir(cam, clipDir, eyeDir)$>
<$transformEyeToWorldDir(cam, eyeDir, _normal)$>
<@if not HIFI_USE_FORWARD@>
_prevPositionCS = cam._previousProjectionViewUntranslated * (cam._viewInverse * (cam._projectionInverse * vec4(clipDir, 1.0)));
<@endif@>
// Position is supposed to come in clip space
gl_Position = vec4(inPosition.xy, 0.0, 1.0);
gl_Position = vec4(clipDir, 1.0);
<$transformStereoClipsSpace(cam, gl_Position)$>
<$transformStereoClipSpace(gl_Position)$>
}

View file

@ -43,15 +43,15 @@ void ProceduralSkybox::clear() {
Skybox::clear();
}
void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward) const {
void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward, uint transformSlot) const {
if (_procedural.isReady()) {
ProceduralSkybox::render(batch, frustum, (*this), forward);
ProceduralSkybox::render(batch, frustum, (*this), forward, transformSlot);
} else {
Skybox::render(batch, frustum, forward);
Skybox::render(batch, frustum, forward, transformSlot);
}
}
void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum, const ProceduralSkybox& skybox, bool forward) {
void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum, const ProceduralSkybox& skybox, bool forward, uint transformSlot) {
glm::mat4 projMat;
viewFrustum.evalProjectionMatrix(projMat);
@ -59,6 +59,8 @@ void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum,
viewFrustum.evalViewTransform(viewTransform);
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewTransform);
// This is needed if we want to have motion vectors on the sky
batch.saveViewProjectionTransform(transformSlot);
batch.setModelTransform(Transform()); // only for Mac
auto& procedural = skybox._procedural;

View file

@ -26,8 +26,8 @@ public:
bool empty() override;
void clear() override;
void render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward) const override;
static void render(gpu::Batch& batch, const ViewFrustum& frustum, const ProceduralSkybox& skybox, bool forward);
void render(gpu::Batch& batch, const ViewFrustum& frustum, bool forward, uint transformSlot) const override;
static void render(gpu::Batch& batch, const ViewFrustum& frustum, const ProceduralSkybox& skybox, bool forward, uint transformSlot);
uint64_t getCreated() const { return _created; }

View file

@ -6,27 +6,21 @@
//
// Created by Sam Gateau on 5/5/2015.
// Copyright 2015 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include graphics/ShaderConstants.h@>
LAYOUT(binding=GRAPHICS_TEXTURE_SKYBOX) uniform samplerCube cubeMap;
struct Skybox {
vec4 color;
};
LAYOUT(binding=GRAPHICS_BUFFER_SKYBOX_PARAMS) uniform skyboxBuffer {
Skybox skybox;
};
layout(location=0) in vec3 _normal;
layout(location=0) out vec4 _fragColor;
<@include graphics/skybox.slh@>
<$declarePackDeferredFragmentSky()$>
<@include procedural/ProceduralCommon.slh@>
layout(location=0) in vec3 _normal;
layout(location=1) in vec4 _prevPositionCS;
#line 1001
//PROCEDURAL_BLOCK_BEGIN
vec3 getSkyboxColor() {
@ -42,5 +36,6 @@ void main(void) {
color = max(color, vec3(0));
// Procedural Shaders are expected to be Gamma corrected so let's bring back the RGB in linear space for the rest of the pipeline
color = pow(color, vec3(2.2));
_fragColor = vec4(color, 1.0);
packDeferredFragmentSky(_prevPositionCS, color, _normal);
}

View file

@ -4,6 +4,7 @@
//
// Created by Raffi Bedikian on 8/30/15
// Copyright 2015 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -13,7 +14,6 @@
#include <glm/gtc/random.hpp>
#include <PathUtils.h>
#include <SharedUtil.h>
#include <gpu/Context.h>
#include <shaders/Shaders.h>
@ -21,11 +21,6 @@
#include "render-utils/ShaderConstants.h"
#include "StencilMaskPass.h"
#include "TextureCache.h"
#include "DependencyManager.h"
#include "ViewFrustum.h"
#include "GeometryCache.h"
#include "FramebufferCache.h"
#include "RandomAndNoise.h"
namespace ru {
@ -38,129 +33,149 @@ namespace gr {
using graphics::slot::buffer::Buffer;
}
#if !ANTIALIASING_USE_TAA
gpu::PipelinePointer Antialiasing::_antialiasingPipeline;
gpu::PipelinePointer Antialiasing::_intensityPipeline;
gpu::PipelinePointer Antialiasing::_blendPipeline;
gpu::PipelinePointer Antialiasing::_debugBlendPipeline;
Antialiasing::Antialiasing() {
_geometryId = DependencyManager::get<GeometryCache>()->allocateID();
}
#define TAA_JITTER_SEQUENCE_LENGTH 16
Antialiasing::~Antialiasing() {
auto geometryCache = DependencyManager::get<GeometryCache>();
if (geometryCache) {
geometryCache->releaseID(_geometryId);
}
}
const gpu::PipelinePointer& Antialiasing::getAntialiasingPipeline() {
if (!_antialiasingPipeline) {
gpu::ShaderPointer program = gpu::Shader::createProgram(shader::render_utils::program::fxaa);
gpu::StatePointer state = std::make_shared<gpu::State>();
state->setDepthTest(false, false, gpu::LESS_EQUAL);
PrepareStencil::testNoAA(*state);
// Good to go add the brand new pipeline
_antialiasingPipeline = gpu::Pipeline::create(program, state);
}
return _antialiasingPipeline;
}
const gpu::PipelinePointer& Antialiasing::getBlendPipeline() {
if (!_blendPipeline) {
gpu::ShaderPointer program = gpu::Shader::createProgram(shader::render_utils::program::fxaa_blend);
gpu::StatePointer state = std::make_shared<gpu::State>();
state->setDepthTest(false, false, gpu::LESS_EQUAL);
PrepareStencil::testNoAA(*state);
// Good to go add the brand new pipeline
_blendPipeline = gpu::Pipeline::create(program, state);
}
return _blendPipeline;
}
void Antialiasing::run(const render::RenderContextPointer& renderContext, const gpu::FramebufferPointer& sourceBuffer) {
assert(renderContext->args);
assert(renderContext->args->hasViewFrustum());
RenderArgs* args = renderContext->args;
gpu::doInBatch("Antialiasing::run", args->_context, [&](gpu::Batch& batch) {
batch.enableStereo(false);
batch.setViewportTransform(args->_viewport);
if (!_paramsBuffer) {
_paramsBuffer = std::make_shared<gpu::Buffer>(sizeof(glm::vec4), nullptr);
}
{
int width = args->_viewport.z;
int height = args->_viewport.w;
if (_antialiasingBuffer && _antialiasingBuffer->getSize() != uvec2(width, height)) {
_antialiasingBuffer.reset();
}
if (!_antialiasingBuffer) {
// Link the antialiasing FBO to texture
_antialiasingBuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("antialiasing"));
auto format = gpu::Element::COLOR_SRGBA_32;
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
_antialiasingTexture = gpu::Texture::createRenderBuffer(format, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
_antialiasingBuffer->setRenderBuffer(0, _antialiasingTexture);
glm::vec2 fbExtent { args->_viewport.z, args->_viewport.w };
glm::vec2 inverseFbExtent = 1.0f / fbExtent;
_paramsBuffer->setSubData(0, glm::vec4(inverseFbExtent, 0.0, 0.0));
}
}
glm::mat4 projMat;
Transform viewMat;
args->getViewFrustum().evalProjectionMatrix(projMat);
args->getViewFrustum().evalViewTransform(viewMat);
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewMat, true);
batch.setModelTransform(Transform());
// FXAA step
auto pipeline = getAntialiasingPipeline();
batch.setResourceTexture(0, sourceBuffer->getRenderBuffer(0));
batch.setFramebuffer(_antialiasingBuffer);
batch.setPipeline(pipeline);
batch.setUniformBuffer(0, _paramsBuffer);
batch.draw(gpu::TRIANGLE_STRIP, 4);
// Blend step
batch.setResourceTexture(0, _antialiasingTexture);
batch.setFramebuffer(sourceBuffer);
batch.setPipeline(getBlendPipeline());
batch.draw(gpu::TRIANGLE_STRIP, 4);
});
}
#else
void AntialiasingConfig::setAAMode(int mode) {
_mode = std::min((int)AntialiasingConfig::MODE_COUNT, std::max(0, mode)); // Just use unsigned?
void AntialiasingSetupConfig::setIndex(int current) {
_index = (current + TAA_JITTER_SEQUENCE_LENGTH) % TAA_JITTER_SEQUENCE_LENGTH;
emit dirty();
}
void AntialiasingSetupConfig::setState(int state) {
_state = (state) % 3;
switch (_state) {
case 0: {
none();
break;
}
case 1: {
pause();
break;
}
case 2:
default: {
play();
break;
}
}
emit dirty();
}
int AntialiasingSetupConfig::cycleStopPauseRun() {
_state = (_state + 1) % 3;
switch (_state) {
case 0: {
return none();
break;
}
case 1: {
return pause();
break;
}
case 2:
default: {
return play();
break;
}
}
return _state;
}
int AntialiasingSetupConfig::prev() {
setIndex(_index - 1);
return _index;
}
int AntialiasingSetupConfig::next() {
setIndex(_index + 1);
return _index;
}
int AntialiasingSetupConfig::none() {
_state = 0;
stop = true;
freeze = false;
setIndex(-1);
return _state;
}
int AntialiasingSetupConfig::pause() {
_state = 1;
stop = false;
freeze = true;
setIndex(0);
return _state;
}
int AntialiasingSetupConfig::play() {
_state = 2;
stop = false;
freeze = false;
setIndex(0);
return _state;
}
void AntialiasingSetupConfig::setAAMode(int mode) {
this->mode = glm::clamp(mode, 0, (int)AntialiasingSetupConfig::MODE_COUNT);
emit dirty();
}
AntialiasingSetup::AntialiasingSetup() {
_sampleSequence.reserve(TAA_JITTER_SEQUENCE_LENGTH + 1);
// Fill in with jitter samples
for (int i = 0; i < TAA_JITTER_SEQUENCE_LENGTH; i++) {
_sampleSequence.emplace_back(glm::vec2(halton::evaluate<2>(i), halton::evaluate<3>(i)) - vec2(0.5f));
}
}
void AntialiasingSetup::configure(const Config& config) {
_isStopped = config.stop;
_isFrozen = config.freeze;
if (config.freeze) {
_freezedSampleIndex = config.getIndex();
}
_scale = config.scale;
_mode = config.mode;
}
void AntialiasingSetup::run(const render::RenderContextPointer& renderContext, Output& output) {
assert(renderContext->args);
if (!_isStopped && _mode == AntialiasingSetupConfig::Mode::TAA) {
RenderArgs* args = renderContext->args;
gpu::doInBatch("AntialiasingSetup::run", args->_context, [&](gpu::Batch& batch) {
auto offset = 0;
auto count = _sampleSequence.size();
if (_isFrozen) {
count = 1;
offset = _freezedSampleIndex;
}
batch.setProjectionJitterSequence(_sampleSequence.data() + offset, count);
batch.setProjectionJitterScale(_scale);
});
}
output = _mode;
}
Antialiasing::Antialiasing(bool isSharpenEnabled) :
_isSharpenEnabled{ isSharpenEnabled } {
}
Antialiasing::~Antialiasing() {
_antialiasingBuffers.reset();
_antialiasingTextures[0].reset();
_antialiasingTextures[1].reset();
_antialiasingBuffers.clear();
}
const gpu::PipelinePointer& Antialiasing::getAntialiasingPipeline(const render::RenderContextPointer& renderContext) {
const gpu::PipelinePointer& Antialiasing::getAntialiasingPipeline() {
if (!_antialiasingPipeline) {
gpu::ShaderPointer program = gpu::Shader::createProgram(shader::render_utils::program::taa);
gpu::StatePointer state = std::make_shared<gpu::State>();
PrepareStencil::testNoAA(*state);
// Good to go add the brand new pipeline
_antialiasingPipeline = gpu::Pipeline::create(program, state);
@ -169,11 +184,24 @@ const gpu::PipelinePointer& Antialiasing::getAntialiasingPipeline(const render::
return _antialiasingPipeline;
}
const gpu::PipelinePointer& Antialiasing::getIntensityPipeline() {
if (!_intensityPipeline) {
gpu::ShaderPointer program = gpu::Shader::createProgram(shader::gpu::program::drawWhite);
gpu::StatePointer state = std::make_shared<gpu::State>();
PrepareStencil::testNoAA(*state);
// Good to go add the brand new pipeline
_intensityPipeline = gpu::Pipeline::create(program, state);
}
return _intensityPipeline;
}
const gpu::PipelinePointer& Antialiasing::getBlendPipeline() {
if (!_blendPipeline) {
gpu::ShaderPointer program = gpu::Shader::createProgram(shader::render_utils::program::fxaa_blend);
gpu::ShaderPointer program = gpu::Shader::createProgram(shader::render_utils::program::aa_blend);
gpu::StatePointer state = std::make_shared<gpu::State>();
PrepareStencil::testNoAA(*state);
// Good to go add the brand new pipeline
_blendPipeline = gpu::Pipeline::create(program, state);
}
@ -186,7 +214,6 @@ const gpu::PipelinePointer& Antialiasing::getDebugBlendPipeline() {
gpu::StatePointer state = std::make_shared<gpu::State>();
PrepareStencil::testNoAA(*state);
// Good to go add the brand new pipeline
_debugBlendPipeline = gpu::Pipeline::create(program, state);
}
@ -194,12 +221,11 @@ const gpu::PipelinePointer& Antialiasing::getDebugBlendPipeline() {
}
void Antialiasing::configure(const Config& config) {
_mode = (AntialiasingConfig::Mode) config.getAAMode();
_sharpen = config.sharpen * 0.25f;
if (!_isSharpenEnabled) {
_sharpen = 0.0f;
}
_params.edit().setSharpenedOutput(_sharpen > 0.0f);
_params.edit().blend = config.blend * config.blend;
_params.edit().covarianceGamma = config.covarianceGamma;
@ -209,7 +235,9 @@ void Antialiasing::configure(const Config& config) {
_params.edit().debugShowVelocityThreshold = config.debugShowVelocityThreshold;
_params.edit().regionInfo.x = config.debugX;
_params.edit().regionInfo.z = config.debugFXAAX;
_debugFXAAX = config.debugFXAAX;
_params.edit().setBicubicHistoryFetch(config.bicubicHistoryFetch);
_params.edit().setDebug(config.debug);
_params.edit().setShowDebugCursor(config.showCursorPixel);
@ -220,58 +248,83 @@ void Antialiasing::configure(const Config& config) {
}
void Antialiasing::run(const render::RenderContextPointer& renderContext, const Inputs& inputs) {
void Antialiasing::run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& output) {
assert(renderContext->args);
assert(renderContext->args->hasViewFrustum());
RenderArgs* args = renderContext->args;
auto& deferredFrameTransform = inputs.get0();
auto& sourceBuffer = inputs.get1();
auto& linearDepthBuffer = inputs.get2();
auto& velocityBuffer = inputs.get3();
const auto& deferredFrameBuffer = inputs.get1();
const auto& sourceBuffer = deferredFrameBuffer->getLightingFramebuffer();
const auto& linearDepthBuffer = inputs.get2();
const auto& velocityTexture = deferredFrameBuffer->getDeferredVelocityTexture();
const auto& mode = inputs.get3();
_params.edit().regionInfo.z = mode == AntialiasingSetupConfig::Mode::TAA ? _debugFXAAX : 0.0f;
int width = sourceBuffer->getWidth();
int height = sourceBuffer->getHeight();
if (_antialiasingBuffers && _antialiasingBuffers->get(0) && _antialiasingBuffers->get(0)->getSize() != uvec2(width, height)) {
_antialiasingBuffers.reset();
_antialiasingTextures[0].reset();
_antialiasingTextures[1].reset();
if (_antialiasingBuffers._swapChain && _antialiasingBuffers._swapChain->get(0) && _antialiasingBuffers._swapChain->get(0)->getSize() != uvec2(width, height)) {
_antialiasingBuffers.clear();
}
if (!_antialiasingBuffers) {
if (!_antialiasingBuffers._swapChain || !_intensityFramebuffer) {
std::vector<gpu::FramebufferPointer> antiAliasingBuffers;
// Link the antialiasing FBO to texture
auto format = sourceBuffer->getRenderBuffer(0)->getTexelFormat();
auto format = gpu::Element(gpu::VEC4, gpu::HALF, gpu::RGBA);
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR, gpu::Sampler::WRAP_CLAMP);
for (int i = 0; i < 2; i++) {
antiAliasingBuffers.emplace_back(gpu::Framebuffer::create("antialiasing"));
const auto& antiAliasingBuffer = antiAliasingBuffers.back();
_antialiasingTextures[i] = gpu::Texture::createRenderBuffer(format, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
antiAliasingBuffer->setRenderBuffer(0, _antialiasingTextures[i]);
_antialiasingBuffers._textures[i] = gpu::Texture::createRenderBuffer(format, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
antiAliasingBuffer->setRenderBuffer(0, _antialiasingBuffers._textures[i]);
}
_antialiasingBuffers = std::make_shared<gpu::FramebufferSwapChain>(antiAliasingBuffers);
_antialiasingBuffers._swapChain = std::make_shared<gpu::FramebufferSwapChain>(antiAliasingBuffers);
_intensityTexture = gpu::Texture::createRenderBuffer(gpu::Element::COLOR_R_8, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
_intensityFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("taaIntensity"));
_intensityFramebuffer->setRenderBuffer(0, _intensityTexture);
_intensityFramebuffer->setStencilBuffer(deferredFrameBuffer->getDeferredFramebuffer()->getDepthStencilBuffer(), deferredFrameBuffer->getDeferredFramebuffer()->getDepthStencilBufferFormat());
}
output = _intensityTexture;
gpu::doInBatch("Antialiasing::run", args->_context, [&](gpu::Batch& batch) {
PROFILE_RANGE_BATCH(batch, "TAA");
batch.enableStereo(false);
batch.setViewportTransform(args->_viewport);
// Set the intensity buffer to 1 except when the stencil is masked as NoAA, where it should be 0
// This is a bit of a hack as it is not possible and not portable to use the stencil value directly
// as a texture
batch.setFramebuffer(_intensityFramebuffer);
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, gpu::Vec4(0.0f));
batch.setResourceTexture(0, nullptr);
batch.setPipeline(getIntensityPipeline());
batch.draw(gpu::TRIANGLE_STRIP, 4);
// TAA step
getAntialiasingPipeline(renderContext);
batch.setResourceFramebufferSwapChainTexture(ru::Texture::TaaHistory, _antialiasingBuffers, 0);
if (!_params->isFXAAEnabled()) {
batch.setResourceFramebufferSwapChainTexture(ru::Texture::TaaHistory, _antialiasingBuffers._swapChain, 0);
batch.setResourceTexture(ru::Texture::TaaVelocity, velocityTexture);
} else {
batch.setResourceTexture(ru::Texture::TaaHistory, nullptr);
batch.setResourceTexture(ru::Texture::TaaVelocity, nullptr);
}
batch.setResourceTexture(ru::Texture::TaaSource, sourceBuffer->getRenderBuffer(0));
batch.setResourceTexture(ru::Texture::TaaVelocity, velocityBuffer->getVelocityTexture());
// This is only used during debug
batch.setResourceTexture(ru::Texture::TaaIntensity, _intensityTexture);
// This is only used during debug
batch.setResourceTexture(ru::Texture::TaaDepth, linearDepthBuffer->getLinearDepthTexture());
batch.setUniformBuffer(ru::Buffer::TaaParams, _params);
batch.setUniformBuffer(ru::Buffer::DeferredFrameTransform, deferredFrameTransform->getFrameTransformBuffer());
batch.setFramebufferSwapChain(_antialiasingBuffers, 1);
batch.setPipeline(getAntialiasingPipeline(renderContext));
batch.setFramebufferSwapChain(_antialiasingBuffers._swapChain, 1);
batch.setPipeline(getAntialiasingPipeline());
batch.draw(gpu::TRIANGLE_STRIP, 4);
// Blend step
@ -280,11 +333,11 @@ void Antialiasing::run(const render::RenderContextPointer& renderContext, const
batch.setFramebuffer(sourceBuffer);
if (_params->isDebug()) {
batch.setPipeline(getDebugBlendPipeline());
batch.setResourceFramebufferSwapChainTexture(ru::Texture::TaaNext, _antialiasingBuffers, 1);
} else {
batch.setResourceFramebufferSwapChainTexture(ru::Texture::TaaNext, _antialiasingBuffers._swapChain, 1);
} else {
batch.setPipeline(getBlendPipeline());
// Must match the bindg point in the fxaa_blend.slf shader
batch.setResourceFramebufferSwapChainTexture(0, _antialiasingBuffers, 1);
// Must match the binding point in the aa_blend.slf shader
batch.setResourceFramebufferSwapChainTexture(0, _antialiasingBuffers._swapChain, 1);
// Disable sharpen if FXAA
if (!_blendParamsBuffer) {
_blendParamsBuffer = std::make_shared<gpu::Buffer>(sizeof(glm::vec4), nullptr);
@ -293,8 +346,8 @@ void Antialiasing::run(const render::RenderContextPointer& renderContext, const
batch.setUniformBuffer(0, _blendParamsBuffer);
}
batch.draw(gpu::TRIANGLE_STRIP, 4);
batch.advance(_antialiasingBuffers);
batch.advance(_antialiasingBuffers._swapChain);
batch.setUniformBuffer(ru::Buffer::TaaParams, nullptr);
batch.setUniformBuffer(ru::Buffer::DeferredFrameTransform, nullptr);
@ -302,114 +355,8 @@ void Antialiasing::run(const render::RenderContextPointer& renderContext, const
batch.setResourceTexture(ru::Texture::TaaHistory, nullptr);
batch.setResourceTexture(ru::Texture::TaaVelocity, nullptr);
batch.setResourceTexture(ru::Texture::TaaNext, nullptr);
// Reset jitter sequence
batch.setProjectionJitterSequence(nullptr, 0);
});
}
void JitterSampleConfig::setIndex(int current) {
_index = (current) % JitterSample::SEQUENCE_LENGTH;
emit dirty();
}
void JitterSampleConfig::setState(int state) {
_state = (state) % 3;
switch (_state) {
case 0: {
none();
break;
}
case 1: {
pause();
break;
}
case 2:
default: {
play();
break;
}
}
emit dirty();
}
int JitterSampleConfig::cycleStopPauseRun() {
setState((_state + 1) % 3);
return _state;
}
int JitterSampleConfig::prev() {
setIndex(_index - 1);
return _index;
}
int JitterSampleConfig::next() {
setIndex(_index + 1);
return _index;
}
int JitterSampleConfig::none() {
_state = 0;
stop = true;
freeze = false;
setIndex(-1);
return _state;
}
int JitterSampleConfig::pause() {
_state = 1;
stop = false;
freeze = true;
setIndex(0);
return _state;
}
int JitterSampleConfig::play() {
_state = 2;
stop = false;
freeze = false;
setIndex(0);
return _state;
}
JitterSample::SampleSequence::SampleSequence(){
// Halton sequence (2,3)
for (int i = 0; i < SEQUENCE_LENGTH; i++) {
offsets[i] = glm::vec2(halton::evaluate<2>(i), halton::evaluate<3>(i));
offsets[i] -= vec2(0.5f);
}
offsets[SEQUENCE_LENGTH] = glm::vec2(0.0f);
}
void JitterSample::configure(const Config& config) {
_freeze = config.stop || config.freeze;
if (config.freeze) {
auto pausedIndex = config.getIndex();
if (_sampleSequence.currentIndex != pausedIndex) {
_sampleSequence.currentIndex = pausedIndex;
}
} else if (config.stop) {
_sampleSequence.currentIndex = -1;
} else {
_sampleSequence.currentIndex = config.getIndex();
}
_scale = config.scale;
}
void JitterSample::run(const render::RenderContextPointer& renderContext, Output& jitter) {
auto& current = _sampleSequence.currentIndex;
if (!_freeze) {
if (current >= 0) {
current = (current + 1) % SEQUENCE_LENGTH;
} else {
current = -1;
}
}
if (current >= 0) {
jitter = _sampleSequence.offsets[current];
} else {
jitter = glm::vec2(0.0f);
}
}
#endif

View file

@ -4,6 +4,7 @@
//
// Created by Raffi Bedikian on 8/30/15
// Copyright 2015 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
// Copyright 2022-2023 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
@ -18,98 +19,20 @@
#include "render/DrawTask.h"
#include "DeferredFrameTransform.h"
#include "VelocityBufferPass.h"
#include "DeferredFramebuffer.h"
#include "SurfaceGeometryPass.h"
class JitterSampleConfig : public render::Job::Config {
Q_OBJECT
Q_PROPERTY(float scale MEMBER scale NOTIFY dirty)
Q_PROPERTY(bool freeze MEMBER freeze NOTIFY dirty)
Q_PROPERTY(bool stop MEMBER stop NOTIFY dirty)
Q_PROPERTY(int index READ getIndex NOTIFY dirty)
Q_PROPERTY(int state READ getState WRITE setState NOTIFY dirty)
public:
JitterSampleConfig() : render::Job::Config(true) {}
float scale{ 0.5f };
bool stop{ false };
bool freeze{ false };
void setIndex(int current);
void setState(int state);
public slots:
int cycleStopPauseRun();
int prev();
int next();
int none();
int pause();
int play();
int getIndex() const { return _index; }
int getState() const { return _state; }
signals:
void dirty();
private:
int _state{ 0 };
int _index{ 0 };
};
class JitterSample {
public:
enum {
SEQUENCE_LENGTH = 64
};
using Config = JitterSampleConfig;
using Output = glm::vec2;
using JobModel = render::Job::ModelO<JitterSample, Output, Config>;
void configure(const Config& config);
void run(const render::RenderContextPointer& renderContext, Output& jitter);
private:
struct SampleSequence {
SampleSequence();
glm::vec2 offsets[SEQUENCE_LENGTH + 1];
int sequenceLength{ SEQUENCE_LENGTH };
int currentIndex{ 0 };
};
SampleSequence _sampleSequence;
float _scale{ 1.0 };
bool _freeze{ false };
};
class AntialiasingConfig : public render::Job::Config {
class AntialiasingSetupConfig : public render::Job::Config {
Q_OBJECT
Q_PROPERTY(float scale MEMBER scale NOTIFY dirty)
Q_PROPERTY(bool freeze MEMBER freeze NOTIFY dirty)
Q_PROPERTY(bool stop MEMBER stop NOTIFY dirty)
Q_PROPERTY(int index READ getIndex NOTIFY dirty)
Q_PROPERTY(int state READ getState WRITE setState NOTIFY dirty)
Q_PROPERTY(int mode READ getAAMode WRITE setAAMode NOTIFY dirty)
Q_PROPERTY(float blend MEMBER blend NOTIFY dirty)
Q_PROPERTY(float sharpen MEMBER sharpen NOTIFY dirty)
Q_PROPERTY(float covarianceGamma MEMBER covarianceGamma NOTIFY dirty)
Q_PROPERTY(bool constrainColor MEMBER constrainColor NOTIFY dirty)
Q_PROPERTY(bool feedbackColor MEMBER feedbackColor NOTIFY dirty)
Q_PROPERTY(bool debug MEMBER debug NOTIFY dirty)
Q_PROPERTY(float debugX MEMBER debugX NOTIFY dirty)
Q_PROPERTY(bool fxaaOnOff READ debugFXAA WRITE setDebugFXAA NOTIFY dirty)
Q_PROPERTY(float debugShowVelocityThreshold MEMBER debugShowVelocityThreshold NOTIFY dirty)
Q_PROPERTY(bool showCursorPixel MEMBER showCursorPixel NOTIFY dirty)
Q_PROPERTY(glm::vec2 debugCursorTexcoord MEMBER debugCursorTexcoord NOTIFY dirty)
Q_PROPERTY(float debugOrbZoom MEMBER debugOrbZoom NOTIFY dirty)
Q_PROPERTY(bool showClosestFragment MEMBER showClosestFragment NOTIFY dirty)
public:
AntialiasingConfig() : render::Job::Config(true) {}
AntialiasingSetupConfig() : render::Job::Config(true) {}
/*@jsdoc
*Antialiasing modes. <table>
@ -133,30 +56,103 @@ public:
};
Q_ENUM(Mode) // Stored as signed int.
float scale { 0.75f };
bool stop { false };
bool freeze { false };
int mode { TAA };
void setIndex(int current);
void setState(int state);
public slots:
int cycleStopPauseRun();
int prev();
int next();
int none();
int pause();
int play();
int getIndex() const { return _index; }
int getState() const { return _state; }
void setAAMode(int mode);
int getAAMode() const { return _mode; }
int getAAMode() const { return mode; }
signals:
void dirty();
private:
int _state { 0 };
int _index { 0 };
};
class AntialiasingSetup {
public:
using Config = AntialiasingSetupConfig;
using Output = int;
using JobModel = render::Job::ModelO<AntialiasingSetup, Output, Config>;
AntialiasingSetup();
void configure(const Config& config);
void run(const render::RenderContextPointer& renderContext, Output& output);
private:
std::vector<glm::vec2> _sampleSequence;
float _scale { 1.0f };
int _freezedSampleIndex { 0 };
bool _isStopped { false };
bool _isFrozen { false };
int _mode { AntialiasingSetupConfig::Mode::TAA };
};
class AntialiasingConfig : public render::Job::Config {
Q_OBJECT
Q_PROPERTY(float blend MEMBER blend NOTIFY dirty)
Q_PROPERTY(float sharpen MEMBER sharpen NOTIFY dirty)
Q_PROPERTY(float covarianceGamma MEMBER covarianceGamma NOTIFY dirty)
Q_PROPERTY(bool constrainColor MEMBER constrainColor NOTIFY dirty)
Q_PROPERTY(bool feedbackColor MEMBER feedbackColor NOTIFY dirty)
Q_PROPERTY(bool bicubicHistoryFetch MEMBER bicubicHistoryFetch NOTIFY dirty)
Q_PROPERTY(bool debug MEMBER debug NOTIFY dirty)
Q_PROPERTY(float debugX MEMBER debugX NOTIFY dirty)
Q_PROPERTY(bool fxaaOnOff READ debugFXAA WRITE setDebugFXAA NOTIFY dirty)
Q_PROPERTY(float debugShowVelocityThreshold MEMBER debugShowVelocityThreshold NOTIFY dirty)
Q_PROPERTY(bool showCursorPixel MEMBER showCursorPixel NOTIFY dirty)
Q_PROPERTY(glm::vec2 debugCursorTexcoord MEMBER debugCursorTexcoord NOTIFY dirty)
Q_PROPERTY(float debugOrbZoom MEMBER debugOrbZoom NOTIFY dirty)
Q_PROPERTY(bool showClosestFragment MEMBER showClosestFragment NOTIFY dirty)
public:
AntialiasingConfig() : render::Job::Config(true) {}
void setDebugFXAA(bool debug) { debugFXAAX = (debug ? 0.0f : 1.0f); emit dirty();}
bool debugFXAA() const { return (debugFXAAX == 0.0f ? true : false); }
int _mode{ TAA }; // '_' prefix but not private?
float blend { 0.2f };
float sharpen { 0.05f };
float blend{ 0.25f };
float sharpen{ 0.05f };
bool constrainColor { true };
float covarianceGamma { 1.15f };
bool feedbackColor { false };
bool bicubicHistoryFetch { true };
bool constrainColor{ true };
float covarianceGamma{ 0.65f };
bool feedbackColor{ false };
float debugX{ 0.0f };
float debugFXAAX{ 1.0f };
float debugShowVelocityThreshold{ 1.0f };
glm::vec2 debugCursorTexcoord{ 0.5f, 0.5f };
float debugOrbZoom{ 2.0f };
float debugX { 0.0f };
float debugFXAAX { 1.0f };
float debugShowVelocityThreshold { 1.0f };
glm::vec2 debugCursorTexcoord { 0.5f, 0.5f };
float debugOrbZoom { 2.0f };
bool debug { false };
bool showCursorPixel { false };
bool showClosestFragment{ false };
bool showClosestFragment { false };
signals:
void dirty();
@ -165,19 +161,15 @@ signals:
#define SET_BIT(bitfield, bitIndex, value) bitfield = ((bitfield) & ~(1 << (bitIndex))) | ((value) << (bitIndex))
#define GET_BIT(bitfield, bitIndex) ((bitfield) & (1 << (bitIndex)))
#define ANTIALIASING_USE_TAA 1
#if ANTIALIASING_USE_TAA
struct TAAParams {
float nope{ 0.0f };
float blend{ 0.15f };
float covarianceGamma{ 1.0f };
float debugShowVelocityThreshold{ 1.0f };
float nope { 0.0f };
float blend { 0.15f };
float covarianceGamma { 0.9f };
float debugShowVelocityThreshold { 1.0f };
glm::ivec4 flags{ 0 };
glm::vec4 pixelInfo{ 0.5f, 0.5f, 2.0f, 0.0f };
glm::vec4 regionInfo{ 0.0f, 0.0f, 1.0f, 0.0f };
glm::ivec4 flags { 0 };
glm::vec4 pixelInfo { 0.5f, 0.5f, 2.0f, 0.0f };
glm::vec4 regionInfo { 0.0f, 0.0f, 1.0f, 0.0f };
void setConstrainColor(bool enabled) { SET_BIT(flags.y, 1, enabled); }
bool isConstrainColor() const { return (bool)GET_BIT(flags.y, 1); }
@ -185,6 +177,12 @@ struct TAAParams {
void setFeedbackColor(bool enabled) { SET_BIT(flags.y, 4, enabled); }
bool isFeedbackColor() const { return (bool)GET_BIT(flags.y, 4); }
void setBicubicHistoryFetch(bool enabled) { SET_BIT(flags.y, 0, enabled); }
bool isBicubicHistoryFetch() const { return (bool)GET_BIT(flags.y, 0); }
void setSharpenedOutput(bool enabled) { SET_BIT(flags.y, 2, enabled); }
bool isSharpenedOutput() const { return (bool)GET_BIT(flags.y, 2); }
void setDebug(bool enabled) { SET_BIT(flags.x, 0, enabled); }
bool isDebug() const { return (bool) GET_BIT(flags.x, 0); }
@ -199,71 +197,52 @@ struct TAAParams {
void setShowClosestFragment(bool enabled) { SET_BIT(flags.x, 3, enabled); }
bool isFXAAEnabled() const { return regionInfo.z == 0.0f; }
};
using TAAParamsBuffer = gpu::StructBuffer<TAAParams>;
class Antialiasing {
public:
using Inputs = render::VaryingSet4 < DeferredFrameTransformPointer, gpu::FramebufferPointer, LinearDepthFramebufferPointer, VelocityFramebufferPointer > ;
using Inputs = render::VaryingSet4<DeferredFrameTransformPointer, DeferredFramebufferPointer, LinearDepthFramebufferPointer, int>;
using Outputs = gpu::TexturePointer;
using Config = AntialiasingConfig;
using JobModel = render::Job::ModelI<Antialiasing, Inputs, Config>;
using JobModel = render::Job::ModelIO<Antialiasing, Inputs, Outputs, Config>;
Antialiasing(bool isSharpenEnabled = true);
~Antialiasing();
void configure(const Config& config);
void run(const render::RenderContextPointer& renderContext, const Inputs& inputs);
void run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs);
const gpu::PipelinePointer& getAntialiasingPipeline(const render::RenderContextPointer& renderContext);
const gpu::PipelinePointer& getAntialiasingPipeline();
const gpu::PipelinePointer& getIntensityPipeline();
const gpu::PipelinePointer& getBlendPipeline();
const gpu::PipelinePointer& getDebugBlendPipeline();
private:
struct AntialiasingBuffer {
gpu::FramebufferSwapChainPointer _swapChain;
gpu::TexturePointer _textures[2];
gpu::FramebufferSwapChainPointer _antialiasingBuffers;
gpu::TexturePointer _antialiasingTextures[2];
void clear() {
_swapChain.reset();
_textures[0].reset();
_textures[1].reset();
}
};
AntialiasingBuffer _antialiasingBuffers;
gpu::FramebufferPointer _intensityFramebuffer;
gpu::TexturePointer _intensityTexture;
gpu::BufferPointer _blendParamsBuffer;
gpu::PipelinePointer _antialiasingPipeline;
gpu::PipelinePointer _blendPipeline;
gpu::PipelinePointer _debugBlendPipeline;
static gpu::PipelinePointer _antialiasingPipeline;
static gpu::PipelinePointer _intensityPipeline;
static gpu::PipelinePointer _blendPipeline;
static gpu::PipelinePointer _debugBlendPipeline;
TAAParamsBuffer _params;
AntialiasingConfig::Mode _mode{ AntialiasingConfig::TAA };
float _sharpen{ 0.15f };
bool _isSharpenEnabled{ true };
float _sharpen { 0.15f };
bool _isSharpenEnabled { true };
float _debugFXAAX { 0.0f };
};
#else // User setting for antialias mode will probably be broken.
class AntiAliasingConfig : public render::Job::Config { // Not to be confused with AntialiasingConfig...
Q_OBJECT
Q_PROPERTY(bool enabled MEMBER enabled)
public:
AntiAliasingConfig() : render::Job::Config(true) {}
};
class Antialiasing {
public:
using Config = AntiAliasingConfig;
using JobModel = render::Job::ModelI<Antialiasing, gpu::FramebufferPointer, Config>;
Antialiasing();
~Antialiasing();
void configure(const Config& config) {}
void run(const render::RenderContextPointer& renderContext, const gpu::FramebufferPointer& sourceBuffer);
const gpu::PipelinePointer& getAntialiasingPipeline();
const gpu::PipelinePointer& getBlendPipeline();
private:
gpu::FramebufferPointer _antialiasingBuffer;
gpu::TexturePointer _antialiasingTexture;
gpu::BufferPointer _paramsBuffer;
gpu::PipelinePointer _antialiasingPipeline;
gpu::PipelinePointer _blendPipeline;
int _geometryId { 0 };
};
#endif
#endif // hifi_AntialiasingEffect_h

View file

@ -3,6 +3,7 @@
//
// Created by Sam Gateau on 5/9/2017.
// Copyright 2015 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -80,6 +81,7 @@ void DrawBackgroundStage::run(const render::RenderContextPointer& renderContext,
auto args = renderContext->args;
gpu::doInBatch("DrawBackgroundStage::run", args->_context, [&](gpu::Batch& batch) {
PROFILE_RANGE_BATCH(batch, "Background");
args->_batch = &batch;
batch.enableSkybox(true);
@ -87,16 +89,11 @@ void DrawBackgroundStage::run(const render::RenderContextPointer& renderContext,
batch.setViewportTransform(args->_viewport);
batch.setStateScissorRect(args->_viewport);
glm::mat4 projMat;
Transform viewMat;
args->getViewFrustum().evalProjectionMatrix(projMat);
args->getViewFrustum().evalViewTransform(viewMat);
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewMat);
bool forward = args->_renderMethod == render::Args::RenderMethod::FORWARD;
batch.setProjectionJitterEnabled(!forward);
// If we're using forward rendering, we need to calculate haze
if (args->_renderMethod == render::Args::RenderMethod::FORWARD) {
if (forward) {
const auto& hazeStage = args->_scene->getStage<HazeStage>();
if (hazeStage && hazeFrame->_hazes.size() > 0) {
const auto& hazePointer = hazeStage->getHaze(hazeFrame->_hazes.front());
@ -106,7 +103,7 @@ void DrawBackgroundStage::run(const render::RenderContextPointer& renderContext,
}
}
skybox->render(batch, args->getViewFrustum(), args->_renderMethod == render::Args::RenderMethod::FORWARD);
skybox->render(batch, args->getViewFrustum(), forward, _transformSlot);
});
args->_batch = nullptr;
}

View file

@ -85,9 +85,12 @@ public:
using Inputs = render::VaryingSet3<LightingModelPointer, BackgroundStage::FramePointer, HazeStage::FramePointer>;
using JobModel = render::Job::ModelI<DrawBackgroundStage, Inputs>;
DrawBackgroundStage() {}
DrawBackgroundStage(uint transformSlot) : _transformSlot(transformSlot) {}
void run(const render::RenderContextPointer& renderContext, const Inputs& inputs);
private:
uint _transformSlot;
};
#endif

View file

@ -16,12 +16,15 @@
#include <render/BlurTask.h>
#include "render-utils/ShaderConstants.h"
#include "StencilMaskPass.h"
#define BLOOM_BLUR_LEVEL_COUNT 3
BloomThreshold::BloomThreshold(unsigned int downsamplingFactor) {
assert(downsamplingFactor > 0);
_parameters.edit()._sampleCount = downsamplingFactor;
auto& params = _parameters.edit();
params._sampleCount = downsamplingFactor;
params._offset = (1.0f - downsamplingFactor) * 0.5f;
}
void BloomThreshold::configure(const Config& config) {}
@ -50,11 +53,6 @@ void BloomThreshold::run(const render::RenderContextPointer& renderContext, cons
auto inputBuffer = inputFrameBuffer->getRenderBuffer(0);
auto bufferSize = gpu::Vec2u(inputBuffer->getDimensions());
const auto downSamplingFactor = _parameters.get()._sampleCount;
// Downsample resolution
bufferSize.x /= downSamplingFactor;
bufferSize.y /= downSamplingFactor;
if (!_outputBuffer || _outputBuffer->getSize() != bufferSize) {
auto colorTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(inputBuffer->getTexelFormat(), bufferSize.x, bufferSize.y,
@ -62,6 +60,7 @@ void BloomThreshold::run(const render::RenderContextPointer& renderContext, cons
_outputBuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("BloomThreshold"));
_outputBuffer->setRenderBuffer(0, colorTexture);
_outputBuffer->setStencilBuffer(inputFrameBuffer->getDepthStencilBuffer(), inputFrameBuffer->getDepthStencilBufferFormat());
_parameters.edit()._deltaUV = { 1.0f / bufferSize.x, 1.0f / bufferSize.y };
}

View file

@ -8,8 +8,10 @@
struct Parameters
{
BT_VEC2 _deltaUV;
float _offset;
float _threshold;
int _sampleCount;
float _padding[3];
};
// <@if 1@>

View file

@ -17,11 +17,10 @@ LAYOUT_STD140(binding=RENDER_UTILS_BUFFER_BLOOM_PARAMS) uniform parametersBuffer
Parameters parameters;
};
layout(location=0) in vec2 varTexCoord0;
layout(location=0) out vec4 outFragColor;
void main(void) {
vec2 startUv = varTexCoord0;
vec2 startUv = (vec2(gl_FragCoord.xy) + vec2(parameters._offset)) * parameters._deltaUV;
vec4 maskedColor = vec4(0,0,0,0);
for (int y=0 ; y<parameters._sampleCount ; y++) {

View file

@ -4,6 +4,8 @@
//
// Created by Andrew Meadows 2017.01.17
// Copyright 2017 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
// Copyright 2023 Overte e.V.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -77,7 +79,10 @@ void CauterizedMeshPartPayload::bindTransform(gpu::Batch& batch, const Transform
if (_cauterizedClusterBuffer) {
batch.setUniformBuffer(graphics::slot::buffer::Skinning, _cauterizedClusterBuffer);
}
batch.setModelTransform(_cauterizedTransform);
batch.setModelTransform(_cauterizedTransform, _previousRenderTransform);
if (renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
_previousRenderTransform = _cauterizedTransform;
}
} else {
ModelMeshPartPayload::bindTransform(batch, transform, renderMode);
}

View file

@ -4,6 +4,7 @@
//
// Created by Clement on 12/3/15.
// Copyright 2015 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -131,11 +132,10 @@ static const std::string DEFAULT_SHADOW_DEPTH_SHADER{
static const std::string DEFAULT_SHADOW_CASCADE_SHADER{
"vec3 cascadeColors[4] = vec3[4]( vec3(0,1,0), vec3(0,0,1), vec3(1,0,0), vec3(1) );"
"vec4 getFragmentColor() {"
" DeferredFrameTransform deferredTransform = getDeferredFrameTransform();"
" DeferredFragment frag = unpackDeferredFragment(deferredTransform, uv);"
" DeferredFragment frag = unpackDeferredFragment(uv);"
" vec4 viewPosition = vec4(frag.position.xyz, 1.0);"
" float viewDepth = -viewPosition.z;"
" vec4 worldPosition = getViewInverse() * viewPosition;"
" vec4 worldPosition = getViewInverse(frag.side) * viewPosition;"
" vec4 cascadeShadowCoords[2];"
" ivec2 cascadeIndices;"
" float cascadeMix = determineShadowCascadesOnPixel(worldPosition, viewDepth, cascadeShadowCoords, cascadeIndices);"
@ -231,7 +231,15 @@ static const std::string DEFAULT_AMBIENT_OCCLUSION_BLURRED_SHADER{
static const std::string DEFAULT_VELOCITY_SHADER{
"vec4 getFragmentColor() {"
" return vec4(vec2(texture(debugTexture0, uv).xy), 0.0, 1.0);"
" vec2 velocity = texture(debugTexture0, uv).xy * getWidthHeight(0);"
" vec4 velColor = vec4(0.1f * velocity + 0.5f, 0.0f, 1.0f);"
" return dot(velocity, velocity) > 1e-4 ? velColor : vec4(0.0f, 0.0f, 1.0f, 0.0f);"
"}"
};
static const std::string DEFAULT_ANTIALIASING_INTENSITY_SHADER{
"vec4 getFragmentColor() {"
" return vec4(texture(debugTexture0, uv).rrr, 1.0);"
" }"
};
@ -251,7 +259,7 @@ static std::string getFileContent(const std::string& fileName, const std::string
}
#include <QStandardPaths> // TODO REMOVE: Temporary until UI
DebugDeferredBuffer::DebugDeferredBuffer() {
DebugDeferredBuffer::DebugDeferredBuffer(uint transformSlot) : _transformSlot(transformSlot) {
// TODO REMOVE: Temporary until UI
static const auto DESKTOP_PATH = QStandardPaths::writableLocation(QStandardPaths::DesktopLocation);
static const auto CUSTOM_FILE = DESKTOP_PATH.toStdString() + "/custom.slh";
@ -325,6 +333,8 @@ std::string DebugDeferredBuffer::getShaderSourceCode(Mode mode, const std::strin
return DEFAULT_HALF_NORMAL_SHADER;
case VelocityMode:
return DEFAULT_VELOCITY_SHADER;
case AntialiasingIntensityMode:
return DEFAULT_ANTIALIASING_INTENSITY_SHADER;
case CustomMode:
return getFileContent(customFile, DEFAULT_CUSTOM_SHADER);
default:
@ -401,9 +411,9 @@ void DebugDeferredBuffer::run(const RenderContextPointer& renderContext, const I
auto& linearDepthTarget = inputs.get1();
auto& surfaceGeometryFramebuffer = inputs.get2();
auto& ambientOcclusionFramebuffer = inputs.get3();
auto& velocityFramebuffer = inputs.get4();
auto& frameTransform = inputs.get5();
auto& shadowFrame = inputs.get6();
auto& frameTransform = inputs.get4();
auto& shadowFrame = inputs.get5();
const auto& antialiasingIntensityTexture = inputs.get6();
gpu::doInBatch("DebugDeferredBuffer::run", args->_context, [&](gpu::Batch& batch) {
batch.enableStereo(false);
@ -412,12 +422,7 @@ void DebugDeferredBuffer::run(const RenderContextPointer& renderContext, const I
const auto geometryBuffer = DependencyManager::get<GeometryCache>();
const auto textureCache = DependencyManager::get<TextureCache>();
glm::mat4 projMat;
Transform viewMat;
args->getViewFrustum().evalProjectionMatrix(projMat);
args->getViewFrustum().evalViewTransform(viewMat);
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewMat, true);
batch.setSavedViewProjectionTransform(_transformSlot);
batch.setModelTransform(Transform());
using Textures = render_utils::slot::texture::Texture;
@ -435,8 +440,8 @@ void DebugDeferredBuffer::run(const RenderContextPointer& renderContext, const I
batch.setResourceTexture(Textures::DeferredDepth, deferredFramebuffer->getPrimaryDepthTexture());
batch.setResourceTexture(Textures::DeferredLighting, deferredFramebuffer->getLightingTexture());
}
if (velocityFramebuffer && _mode == VelocityMode) {
batch.setResourceTexture(Textures::DebugTexture0, velocityFramebuffer->getVelocityTexture());
if (_mode == VelocityMode) {
batch.setResourceTexture(Textures::DebugTexture0, deferredFramebuffer->getDeferredVelocityTexture());
}
if (!shadowFrame->_objects.empty()) {
@ -472,6 +477,10 @@ void DebugDeferredBuffer::run(const RenderContextPointer& renderContext, const I
batch.setResourceTexture(Textures::DebugTexture0, ambientOcclusionFramebuffer->getNormalTexture());
}
}
if (antialiasingIntensityTexture && _mode == AntialiasingIntensityMode) {
batch.setResourceTexture(Textures::DebugTexture0, antialiasingIntensityTexture);
}
const glm::vec4 color(1.0f, 1.0f, 1.0f, 1.0f);
const glm::vec2 bottomLeft(_size.x, _size.y);
const glm::vec2 topRight(_size.z, _size.w);

View file

@ -19,7 +19,6 @@
#include "DeferredFramebuffer.h"
#include "SurfaceGeometryPass.h"
#include "AmbientOcclusionEffect.h"
#include "VelocityBufferPass.h"
#include "LightStage.h"
@ -44,13 +43,13 @@ public:
LinearDepthFramebufferPointer,
SurfaceGeometryFramebufferPointer,
AmbientOcclusionFramebufferPointer,
VelocityFramebufferPointer,
DeferredFrameTransformPointer,
LightStage::ShadowFramePointer>;
LightStage::ShadowFramePointer,
gpu::TexturePointer>;
using Config = DebugDeferredBufferConfig;
using JobModel = render::Job::ModelI<DebugDeferredBuffer, Inputs, Config>;
DebugDeferredBuffer();
DebugDeferredBuffer(uint transformSlot);
~DebugDeferredBuffer();
void configure(const Config& config);
@ -92,6 +91,7 @@ protected:
AmbientOcclusionBlurredMode,
AmbientOcclusionNormalMode,
VelocityMode,
AntialiasingIntensityMode,
CustomMode, // Needs to stay last
NumModes,
@ -100,6 +100,7 @@ protected:
private:
Mode _mode{ Off };
glm::vec4 _size;
uint _transformSlot;
#include "debug_deferred_buffer_shared.slh"

View file

@ -45,6 +45,7 @@ struct DeferredFragment {
vec3 fresnel;
float roughness;
int mode;
int side;
float scattering;
float depthVal;
};
@ -58,6 +59,9 @@ vec3 getFresnelF0(float metallic, vec3 metalF0) {
}
<@endif@>
<@include DeferredTransform.slh@>
<$declareDeferredFrameTransform()$>
DeferredFragment unpackDeferredFragmentNoPosition(vec2 texcoord) {
vec4 normalVal;
vec4 diffuseVal;
@ -82,6 +86,8 @@ DeferredFragment unpackDeferredFragmentNoPosition(vec2 texcoord) {
frag.scattering = float(frag.mode == FRAG_MODE_SCATTERING) * specularVal.x;
frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);
frag.side = getStereoSideFromUV(texcoord.x);
return frag;
}
@ -109,18 +115,14 @@ DeferredFragment unpackDeferredFragmentNoPositionNoAmbient(vec2 texcoord) {
frag.fresnel = getFresnelF0(frag.metallic, diffuseVal.xyz);
frag.side = getStereoSideFromUV(texcoord.x);
return frag;
}
<@include DeferredTransform.slh@>
<$declareDeferredFrameTransform()$>
vec4 unpackDeferredPosition(float depthValue, vec2 texcoord) {
vec4 unpackDeferredPosition(int side, float depthValue, vec2 texcoord) {
float check = float(isStereo());
float check2 = check * float(texcoord.x > 0.5);
texcoord.x -= check2 * 0.5;
int side = int(check2);
texcoord.x -= check * 0.5 * float(side);
texcoord.x *= 1.0 + check;
return vec4(evalEyePositionFromZdb(side, depthValue, texcoord), 1.0);
@ -129,7 +131,7 @@ vec4 unpackDeferredPosition(float depthValue, vec2 texcoord) {
// This method to unpack position is fastesst
vec4 unpackDeferredPositionFromZdb(vec2 texcoord) {
float Zdb = texture(depthMap, texcoord).x;
return unpackDeferredPosition(Zdb, texcoord);
return unpackDeferredPosition(getStereoSideFromUV(texcoord.x), Zdb, texcoord);
}
vec4 unpackDeferredPositionFromZeye(vec2 texcoord) {
@ -144,13 +146,13 @@ vec4 unpackDeferredPositionFromZeye(vec2 texcoord) {
return vec4(evalEyePositionFromZeye(side, Zeye, texcoord), 1.0);
}
DeferredFragment unpackDeferredFragment(DeferredFrameTransform deferredTransform, vec2 texcoord) {
DeferredFragment unpackDeferredFragment(vec2 texcoord) {
float depthValue = texture(depthMap, texcoord).r;
DeferredFragment frag = unpackDeferredFragmentNoPosition(texcoord);
frag.depthVal = depthValue;
frag.position = unpackDeferredPosition(frag.depthVal, texcoord);
frag.position = unpackDeferredPosition(frag.side, frag.depthVal, texcoord);
return frag;
}

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 1/12/15.
// Copyright 2013 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -12,12 +13,14 @@
<@def DEFERRED_BUFFER_WRITE_SLH@>
<@include DeferredBuffer.slh@>
<@include DeferredBufferWrite_shared.slh@>
layout(location=0) out vec4 _fragColor0; // albedo / metallic
layout(location=1) out vec4 _fragColor1; // Normal
layout(location=2) out vec4 _fragColor2; // scattering / emissive / occlusion
layout(location=3) out vec4 _fragColor3; // emissive
// Must match layout in skybox.slh
layout(location = DEFERRED_COLOR_SLOT) out vec4 _albedoMetallic; // albedo / metallic
layout(location = DEFERRED_NORMAL_SLOT) out vec4 _normalRoughness; // normal / roughness
layout(location = DEFERRED_SPECULAR_SLOT) out vec4 _scatteringEmissiveOcclusion; // scattering / emissive / occlusion
layout(location = DEFERRED_VELOCITY_SLOT) out vec4 _velocity; // velocity
layout(location = DEFERRED_LIGHTING_SLOT) out vec4 _lighting; // emissive
// the alpha threshold
const float alphaThreshold = 0.5;
@ -25,51 +28,67 @@ float evalOpaqueFinalAlpha(float alpha, float mapAlpha) {
return mix(alpha, 1.0 - alpha, step(mapAlpha, alphaThreshold));
}
<@include VelocityWrite.slh@>
<@include DefaultMaterials.slh@>
<@include LightingModel.slh@>
void packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {
void packDeferredFragment(vec4 prevPositionCS, vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {
if (alpha < 1.0) {
discard;
}
float check = float(scattering > 0.0);
_fragColor0 = vec4(albedo, mix(packShadedMetallic(metallic), packScatteringMetallic(metallic), check));
_fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));
_fragColor2 = vec4(mix(emissive, vec3(scattering), check), occlusion);
_fragColor3 = vec4(isEmissiveEnabled() * emissive, 1.0);
_albedoMetallic = vec4(albedo, mix(packShadedMetallic(metallic), packScatteringMetallic(metallic), check));
_normalRoughness = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));
_scatteringEmissiveOcclusion = vec4(mix(emissive, vec3(scattering), check), occlusion);
_velocity = vec4(packVelocity(prevPositionCS), 0.0, 0.0);
_lighting = vec4(isEmissiveEnabled() * emissive, 1.0);
}
void packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 lightmap) {
void packDeferredFragmentLightmap(vec4 prevPositionCS, vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 lightmap) {
if (alpha < 1.0) {
discard;
}
_fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));
_fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));
_fragColor2 = vec4(isLightmapEnabled() * lightmap, 1.0);
_fragColor3 = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);
_albedoMetallic = vec4(albedo, packLightmappedMetallic(metallic));
_normalRoughness = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));
_scatteringEmissiveOcclusion = vec4(isLightmapEnabled() * lightmap, 1.0);
_velocity = vec4(packVelocity(prevPositionCS), 0.0, 0.0);
_lighting = vec4(isLightmapEnabled() * lightmap * albedo, 1.0);
}
void packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {
void packDeferredFragmentUnlit(vec4 prevPositionCS, vec3 normal, float alpha, vec3 color) {
// to reduce texel flickering for floating point error we discard when alpha is "almost one"
if (alpha < 0.999999) {
discard;
}
_fragColor0 = vec4(color, packUnlit());
_fragColor1 = vec4(packNormal(normal), 1.0);
_fragColor2 = vec4(vec3(0.0), 1.0);
_fragColor3 = vec4(color, 1.0);
_albedoMetallic = vec4(color, packUnlit());
_normalRoughness = vec4(packNormal(normal), 1.0);
_scatteringEmissiveOcclusion = vec4(vec3(0.0), 1.0);
_velocity = vec4(packVelocity(prevPositionCS), 0.0, 0.0);
_lighting = vec4(color, 1.0);
}
void packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, float roughness) {
void packDeferredFragmentTranslucent(vec4 prevPositionCS, vec3 normal, float alpha, vec3 albedo, float roughness) {
if (alpha <= 0.0) {
discard;
}
_fragColor0 = vec4(albedo.rgb, alpha);
_fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));
_fragColor2 = vec4(vec3(0.0), 1.0);
_fragColor3 = vec4(0.0);
_albedoMetallic = vec4(albedo.rgb, alpha);
_normalRoughness = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));
_scatteringEmissiveOcclusion = vec4(vec3(0.0), 1.0);
_velocity = vec4(packVelocity(prevPositionCS), 0.0, 0.0);
_lighting = vec4(0.0);
}
void packDeferredFragmentTranslucentUnlit(vec4 prevPositionCS, vec3 normal, float alpha, vec3 color) {
if (alpha <= 0.0) {
discard;
}
_albedoMetallic = vec4(color, alpha);
_normalRoughness = vec4(packNormal(normal), 1.0);
_scatteringEmissiveOcclusion = vec4(vec3(0.0), 1.0);
_velocity = vec4(packVelocity(prevPositionCS), 0.0, 0.0);
_lighting = vec4(color, 1.0);
}
<@endif@>

View file

@ -0,0 +1,12 @@
// glsl / C++ compatible source as interface for DeferredBuffer layout
#define DEFERRED_COLOR_SLOT 0
#define DEFERRED_NORMAL_SLOT 1
#define DEFERRED_SPECULAR_SLOT 2
#define DEFERRED_VELOCITY_SLOT 3
#define DEFERRED_LIGHTING_SLOT 4
// <@if 1@>
// Trigger Scribe include
// <@endif@> <!def that !>
//

View file

@ -18,73 +18,51 @@ DeferredFrameTransform::DeferredFrameTransform() {
_frameTransformBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(FrameTransform), (const gpu::Byte*) &frameTransform));
}
void DeferredFrameTransform::update(RenderArgs* args, glm::vec2 jitter) {
void DeferredFrameTransform::update(RenderArgs* args) {
// Update the depth info with near and far (same for stereo)
auto nearZ = args->getViewFrustum().getNearClip();
auto farZ = args->getViewFrustum().getFarClip();
auto& frameTransformBuffer = _frameTransformBuffer.edit<FrameTransform>();
frameTransformBuffer.depthInfo = glm::vec4(nearZ*farZ, farZ - nearZ, -farZ, 0.0f);
frameTransformBuffer.infos.depthInfo = glm::vec4(nearZ * farZ, farZ - nearZ, -farZ, 0.0f);
frameTransformBuffer.infos.pixelInfo = args->_viewport;
frameTransformBuffer.pixelInfo = args->_viewport;
//_parametersBuffer.edit<Parameters>()._ditheringInfo.y += 0.25f;
Transform cameraTransform;
args->getViewFrustum().evalViewTransform(cameraTransform);
cameraTransform.getMatrix(frameTransformBuffer.invView);
cameraTransform.getInverseMatrix(frameTransformBuffer.view);
args->getViewFrustum().evalProjectionMatrix(frameTransformBuffer.projectionMono);
// There may be some sort of mismatch here if the viewport size isn't the same as the frame buffer size as
// jitter is normalized by frame buffer size in TransformCamera. But we should be safe.
jitter.x /= args->_viewport.z;
jitter.y /= args->_viewport.w;
args->getViewFrustum().evalProjectionMatrix(frameTransformBuffer.infos.projectionMono);
// Running in stereo ?
bool isStereo = args->isStereo();
if (!isStereo) {
frameTransformBuffer.projectionUnjittered[0] = frameTransformBuffer.projectionMono;
frameTransformBuffer.invProjectionUnjittered[0] = glm::inverse(frameTransformBuffer.projectionUnjittered[0]);
frameTransformBuffer.stereoInfo = glm::vec4(0.0f, (float)args->_viewport.z, 0.0f, 0.0f);
frameTransformBuffer.invpixelInfo = glm::vec4(1.0f / args->_viewport.z, 1.0f / args->_viewport.w, 0.0f, 0.0f);
frameTransformBuffer.projection[0] = frameTransformBuffer.projectionUnjittered[0];
frameTransformBuffer.projection[0][2][0] += jitter.x;
frameTransformBuffer.projection[0][2][1] += jitter.y;
frameTransformBuffer.invProjection[0] = glm::inverse(frameTransformBuffer.projection[0]);
frameTransformBuffer.infos.stereoInfo = glm::vec4(0.0f, (float)args->_viewport.z, 0.0f, 0.0f);
frameTransformBuffer.infos.invPixelInfo = glm::vec4(1.0f / args->_viewport.z, 1.0f / args->_viewport.w, 0.0f, 0.0f);
} else {
mat4 projMats[2];
mat4 eyeViews[2];
args->_context->getStereoProjections(projMats);
args->_context->getStereoViews(eyeViews);
jitter.x *= 2.0f;
for (int i = 0; i < 2; i++) {
// Compose the mono Eye space to Stereo clip space Projection Matrix
auto sideViewMat = projMats[i] * eyeViews[i];
frameTransformBuffer.projectionUnjittered[i] = sideViewMat;
frameTransformBuffer.invProjectionUnjittered[i] = glm::inverse(sideViewMat);
frameTransformBuffer.projection[i] = frameTransformBuffer.projectionUnjittered[i];
frameTransformBuffer.projection[i][2][0] += jitter.x;
frameTransformBuffer.projection[i][2][1] += jitter.y;
frameTransformBuffer.invProjection[i] = glm::inverse(frameTransformBuffer.projection[i]);
}
frameTransformBuffer.stereoInfo = glm::vec4(1.0f, (float)(args->_viewport.z >> 1), 0.0f, 1.0f);
frameTransformBuffer.invpixelInfo = glm::vec4(1.0f / (float)(args->_viewport.z >> 1), 1.0f / args->_viewport.w, 0.0f, 0.0f);
frameTransformBuffer.infos.pixelInfo.z *= 0.5f;
frameTransformBuffer.infos.stereoInfo = glm::vec4(1.0f, (float)(args->_viewport.z >> 1), 0.0f, 1.0f);
frameTransformBuffer.infos.invPixelInfo = glm::vec4(2.0f / (float)(args->_viewport.z), 1.0f / args->_viewport.w, 0.0f, 0.0f);
}
}
void GenerateDeferredFrameTransform::run(const render::RenderContextPointer& renderContext, const Input& jitter, Output& frameTransform) {
void GenerateDeferredFrameTransform::run(const render::RenderContextPointer& renderContext, Output& frameTransform) {
if (!frameTransform) {
frameTransform = std::make_shared<DeferredFrameTransform>();
}
frameTransform->update(renderContext->args, jitter);
RenderArgs* args = renderContext->args;
frameTransform->update(args);
gpu::doInBatch("GenerateDeferredFrameTransform::run", args->_context, [&](gpu::Batch& batch) {
args->_batch = &batch;
glm::mat4 projMat;
Transform viewMat;
args->getViewFrustum().evalProjectionMatrix(projMat);
args->getViewFrustum().evalViewTransform(viewMat);
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewMat);
// This is the main view / projection transform that will be reused later on
batch.saveViewProjectionTransform(_transformSlot);
// Copy it to the deferred transform for the lighting pass
batch.copySavedViewProjectionTransformToBuffer(_transformSlot, frameTransform->getFrameTransformBuffer()._buffer,
sizeof(DeferredFrameTransform::DeferredFrameInfo));
});
}

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau 6/3/2016.
// Copyright 2016 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -20,12 +21,13 @@
// DeferredFrameTransform is a helper class gathering in one place the needed camera transform
// and frame resolution needed for all the deferred rendering passes taking advantage of the Deferred buffers
class DeferredFrameTransform {
friend class GenerateDeferredFrameTransform;
public:
using UniformBufferView = gpu::BufferView;
DeferredFrameTransform();
void update(RenderArgs* args, glm::vec2 jitter);
void update(RenderArgs* args);
UniformBufferView getFrameTransformBuffer() const { return _frameTransformBuffer; }
@ -34,54 +36,29 @@ protected:
// Class describing the uniform buffer with the transform info common to the AO shaders
// It s changing every frame
class FrameTransform {
#include "DeferredTransform_shared.slh"
class FrameTransform : public _DeferredFrameTransform {
public:
// Pixel info is { viewport width height}
glm::vec4 pixelInfo;
glm::vec4 invpixelInfo;
// Depth info is { n.f, f - n, -f}
glm::vec4 depthInfo;
// Stereo info is { isStereoFrame, halfWidth }
glm::vec4 stereoInfo{ 0.0 };
// Mono proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space
glm::mat4 projection[2];
// Inverse proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space
glm::mat4 invProjection[2];
// THe mono projection for sure
glm::mat4 projectionMono;
// Inv View matrix from eye space (mono) to world space
glm::mat4 invView;
// View matrix from world space to eye space (mono)
glm::mat4 view;
// Mono proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space without jittering
glm::mat4 projectionUnjittered[2];
// Inverse proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space without jittering
glm::mat4 invProjectionUnjittered[2];
FrameTransform() {}
FrameTransform() { infos.stereoInfo = glm::vec4(0.0f); }
};
UniformBufferView _frameTransformBuffer;
UniformBufferView _frameTransformBuffer;
};
using DeferredFrameTransformPointer = std::shared_ptr<DeferredFrameTransform>;
class GenerateDeferredFrameTransform {
public:
using Input = glm::vec2;
using Output = DeferredFrameTransformPointer;
using JobModel = render::Job::ModelIO<GenerateDeferredFrameTransform, Input, Output>;
using JobModel = render::Job::ModelO<GenerateDeferredFrameTransform, Output>;
GenerateDeferredFrameTransform() {}
GenerateDeferredFrameTransform(unsigned int transformSlot) : _transformSlot{ transformSlot } {}
void run(const render::RenderContextPointer& renderContext, const Input& jitter, Output& frameTransform);
void run(const render::RenderContextPointer& renderContext, Output& frameTransform);
private:
unsigned int _transformSlot;
};
#endif // hifi_DeferredFrameTransform_h

View file

@ -10,6 +10,11 @@
//
#include "DeferredFramebuffer.h"
#include "DeferredBufferWrite_shared.slh"
#include "gpu/Batch.h"
#include "gpu/Context.h"
DeferredFramebuffer::DeferredFramebuffer() {
}
@ -36,8 +41,10 @@ void DeferredFramebuffer::updatePrimaryDepth(const gpu::TexturePointer& depthBuf
_deferredColorTexture.reset();
_deferredNormalTexture.reset();
_deferredSpecularTexture.reset();
_deferredVelocityTexture.reset();
_lightingTexture.reset();
_lightingFramebuffer.reset();
_lightingWithVelocityFramebuffer.reset();
}
}
@ -46,8 +53,9 @@ void DeferredFramebuffer::allocate() {
_deferredFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("deferred"));
_deferredFramebufferDepthColor = gpu::FramebufferPointer(gpu::Framebuffer::create("deferredDepthColor"));
auto colorFormat = gpu::Element::COLOR_SRGBA_32;
auto linearFormat = gpu::Element::COLOR_RGBA_32;
const auto colorFormat = gpu::Element::COLOR_SRGBA_32;
const auto linearFormat = gpu::Element::COLOR_RGBA_32;
const auto halfFormat = gpu::Element(gpu::VEC2, gpu::HALF, gpu::XY);
auto width = _frameSize.x;
auto height = _frameSize.y;
@ -56,10 +64,12 @@ void DeferredFramebuffer::allocate() {
_deferredColorTexture = gpu::Texture::createRenderBuffer(colorFormat, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
_deferredNormalTexture = gpu::Texture::createRenderBuffer(linearFormat, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
_deferredSpecularTexture = gpu::Texture::createRenderBuffer(linearFormat, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
_deferredVelocityTexture = gpu::Texture::createRenderBuffer(halfFormat, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
_deferredFramebuffer->setRenderBuffer(0, _deferredColorTexture);
_deferredFramebuffer->setRenderBuffer(1, _deferredNormalTexture);
_deferredFramebuffer->setRenderBuffer(2, _deferredSpecularTexture);
_deferredFramebuffer->setRenderBuffer(DEFERRED_COLOR_SLOT, _deferredColorTexture);
_deferredFramebuffer->setRenderBuffer(DEFERRED_NORMAL_SLOT, _deferredNormalTexture);
_deferredFramebuffer->setRenderBuffer(DEFERRED_SPECULAR_SLOT, _deferredSpecularTexture);
_deferredFramebuffer->setRenderBuffer(DEFERRED_VELOCITY_SLOT, _deferredVelocityTexture);
_deferredFramebufferDepthColor->setRenderBuffer(0, _deferredColorTexture);
@ -80,8 +90,12 @@ void DeferredFramebuffer::allocate() {
_lightingFramebuffer->setRenderBuffer(0, _lightingTexture);
_lightingFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
_deferredFramebuffer->setRenderBuffer(3, _lightingTexture);
_lightingWithVelocityFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("lighting_velocity"));
_lightingWithVelocityFramebuffer->setRenderBuffer(0, _lightingTexture);
_lightingWithVelocityFramebuffer->setRenderBuffer(1, _deferredVelocityTexture);
_lightingWithVelocityFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
_deferredFramebuffer->setRenderBuffer(DEFERRED_LIGHTING_SLOT, _lightingTexture);
}
@ -127,6 +141,13 @@ gpu::TexturePointer DeferredFramebuffer::getDeferredSpecularTexture() {
return _deferredSpecularTexture;
}
gpu::TexturePointer DeferredFramebuffer::getDeferredVelocityTexture() {
if (!_deferredVelocityTexture) {
allocate();
}
return _deferredVelocityTexture;
}
gpu::FramebufferPointer DeferredFramebuffer::getLightingFramebuffer() {
if (!_lightingFramebuffer) {
allocate();
@ -134,6 +155,13 @@ gpu::FramebufferPointer DeferredFramebuffer::getLightingFramebuffer() {
return _lightingFramebuffer;
}
gpu::FramebufferPointer DeferredFramebuffer::getLightingWithVelocityFramebuffer() {
if (!_lightingWithVelocityFramebuffer) {
allocate();
}
return _lightingWithVelocityFramebuffer;
}
gpu::TexturePointer DeferredFramebuffer::getLightingTexture() {
if (!_lightingTexture) {
allocate();

View file

@ -15,10 +15,10 @@
#include "gpu/Resource.h"
#include "gpu/Framebuffer.h"
// DeferredFramebuffer is a helper class gathering in one place the GBuffer (Framebuffer) and lighting framebuffer
class DeferredFramebuffer {
public:
DeferredFramebuffer();
gpu::FramebufferPointer getDeferredFramebuffer();
@ -27,8 +27,10 @@ public:
gpu::TexturePointer getDeferredColorTexture();
gpu::TexturePointer getDeferredNormalTexture();
gpu::TexturePointer getDeferredSpecularTexture();
gpu::TexturePointer getDeferredVelocityTexture();
gpu::FramebufferPointer getLightingFramebuffer();
gpu::FramebufferPointer getLightingWithVelocityFramebuffer();
gpu::TexturePointer getLightingTexture();
// Update the depth buffer which will drive the allocation of all the other resources according to its size.
@ -47,13 +49,15 @@ protected:
gpu::TexturePointer _deferredColorTexture;
gpu::TexturePointer _deferredNormalTexture;
gpu::TexturePointer _deferredSpecularTexture;
gpu::TexturePointer _deferredVelocityTexture;
gpu::TexturePointer _lightingTexture;
gpu::FramebufferPointer _lightingFramebuffer;
gpu::FramebufferPointer _lightingWithVelocityFramebuffer;
glm::ivec2 _frameSize;
};
using DeferredFramebufferPointer = std::shared_ptr<DeferredFramebuffer>;
#endif // hifi_DeferredFramebuffer_h
#endif // hifi_DeferredFramebuffer_h

View file

@ -286,6 +286,7 @@ void PrepareDeferred::run(const RenderContextPointer& renderContext, const Input
outputs.edit0() = _deferredFramebuffer;
outputs.edit1() = _deferredFramebuffer->getLightingFramebuffer();
outputs.edit2() = _deferredFramebuffer->getLightingWithVelocityFramebuffer();
gpu::doInBatch("PrepareDeferred::run", args->_context, [&](gpu::Batch& batch) {
batch.enableStereo(false);
@ -298,8 +299,9 @@ void PrepareDeferred::run(const RenderContextPointer& renderContext, const Input
// Clear Color, Depth and Stencil for deferred buffer
batch.clearFramebuffer(
gpu::Framebuffer::BUFFER_COLOR0 | gpu::Framebuffer::BUFFER_COLOR1 | gpu::Framebuffer::BUFFER_COLOR2 | gpu::Framebuffer::BUFFER_COLOR3 |
gpu::Framebuffer::BUFFER_DEPTH |
gpu::Framebuffer::BUFFER_COLOR0 | gpu::Framebuffer::BUFFER_COLOR1 |
gpu::Framebuffer::BUFFER_COLOR2 | gpu::Framebuffer::BUFFER_COLOR3 |
gpu::Framebuffer::BUFFER_COLOR4 | gpu::Framebuffer::BUFFER_DEPTH |
gpu::Framebuffer::BUFFER_STENCIL,
vec4(vec3(0), 0), 1.0, 0, true);

View file

@ -78,8 +78,7 @@ class PrepareDeferred {
public:
// Inputs: primaryFramebuffer and lightingModel
using Inputs = render::VaryingSet2 <gpu::FramebufferPointer, LightingModelPointer>;
// Output: DeferredFramebuffer, LightingFramebuffer
using Outputs = render::VaryingSet2<DeferredFramebufferPointer, gpu::FramebufferPointer>;
using Outputs = render::VaryingSet3<DeferredFramebufferPointer, gpu::FramebufferPointer, gpu::FramebufferPointer>;
using JobModel = render::Job::ModelIO<PrepareDeferred, Inputs, Outputs>;

View file

@ -4,6 +4,7 @@
//
// Created by Sam Gateau on 6/2/16.
// Copyright 2016 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -16,109 +17,103 @@
<@func declareDeferredFrameTransform()@>
struct CameraCorrection {
mat4 _correction;
mat4 _correctionInverse;
mat4 _prevView;
mat4 _prevViewInverse;
};
LAYOUT(binding=GPU_BUFFER_CAMERA_CORRECTION) uniform cameraCorrectionBuffer {
CameraCorrection cameraCorrection;
};
<@include DeferredTransform_shared.slh@>
struct DeferredFrameTransform {
vec4 _pixelInfo;
vec4 _invPixelInfo;
vec4 _depthInfo;
vec4 _stereoInfo;
mat4 _projection[2];
mat4 _invProjection[2];
mat4 _projectionMono;
mat4 _viewInverse;
mat4 _view;
mat4 _projectionUnJittered[2];
mat4 _invProjectionUnJittered[2];
};
#define DeferredFrameTransform _DeferredFrameTransform
#define TransformCamera _TransformCamera
LAYOUT(binding=RENDER_UTILS_BUFFER_DEFERRED_FRAME_TRANSFORM) uniform deferredFrameTransformBuffer {
LAYOUT_STD140(binding=RENDER_UTILS_BUFFER_DEFERRED_FRAME_TRANSFORM) uniform deferredFrameTransformBuffer {
DeferredFrameTransform frameTransform;
};
vec2 getWidthHeight(int resolutionLevel) {
return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);
return vec2(ivec2(frameTransform.infos.pixelInfo.zw) >> resolutionLevel);
}
vec2 getInvWidthHeight() {
return frameTransform._invPixelInfo.xy;
return frameTransform.infos.invPixelInfo.xy;
}
mat4 getProjection(int side) {
return frameTransform.cameras[side]._projection;
}
mat4 getProjectionInverse(int side) {
return frameTransform.cameras[side]._projectionInverse;
}
float getProjScaleEye() {
return frameTransform._projection[0][1][1];
return getProjection(0)[1][1];
}
float getProjScale(int resolutionLevel) {
return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;
}
mat4 getProjection(int side) {
return frameTransform._projection[side];
return getWidthHeight(resolutionLevel).y * getProjScaleEye() * 0.5;
}
mat4 getProjectionMono() {
return frameTransform._projectionMono;
}
mat4 getUnjitteredProjection(int side) {
return frameTransform._projectionUnJittered[side];
}
mat4 getUnjitteredInvProjection(int side) {
return frameTransform._invProjectionUnJittered[side];
return frameTransform.infos.projectionMono;
}
// positive near distance of the projection
float getProjectionNear() {
float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];
float planeD = frameTransform._projection[0][3][2];
mat4 projection = getProjection(0);
float planeC = projection[2][3] + projection[2][2];
float planeD = projection[3][2];
return planeD / planeC;
}
// positive far distance of the projection
float getPosLinearDepthFar() {
return -frameTransform._depthInfo.z;
return -frameTransform.infos.depthInfo.z;
}
mat4 getViewInverse() {
return frameTransform._viewInverse * cameraCorrection._correctionInverse;
mat4 getViewInverse(int side) {
return frameTransform.cameras[side]._viewInverse;
}
mat4 getView() {
return cameraCorrection._correction * frameTransform._view;
mat4 getView(int side) {
return frameTransform.cameras[side]._view;
}
mat4 getPreviousView() {
return cameraCorrection._prevView;
mat4 getPreviousView(int side) {
return frameTransform.cameras[side]._previousView;
}
mat4 getPreviousViewInverse() {
return cameraCorrection._prevViewInverse;
}
DeferredFrameTransform getDeferredFrameTransform() {
DeferredFrameTransform result = frameTransform;
result._view = getView();
result._viewInverse = getViewInverse();
return result;
mat4 getPreviousViewInverse(int side) {
return frameTransform.cameras[side]._previousViewInverse;
}
bool isStereo() {
return frameTransform._stereoInfo.x > 0.0f;
return frameTransform.infos.stereoInfo.x > 0.0f;
}
float getStereoSideWidth(int resolutionLevel) {
return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);
return float(int(frameTransform.infos.stereoInfo.y) >> resolutionLevel);
}
float getStereoSideHeight(int resolutionLevel) {
return float(int(frameTransform._pixelInfo.w) >> resolutionLevel);
return float(int(frameTransform.infos.pixelInfo.w) >> resolutionLevel);
}
vec2 getSideImageSize(int resolutionLevel) {
return vec2(float(int(frameTransform.infos.stereoInfo.y) >> resolutionLevel), float(int(frameTransform.infos.pixelInfo.w) >> resolutionLevel));
}
int getStereoSideFromPixel(int xPos, int resolutionLevel) {
int sideWidth = int(getStereoSideWidth(resolutionLevel));
return int(xPos >= sideWidth && isStereo());
}
int getStereoSideFromPixel(int xPos) {
return getStereoSideFromPixel(xPos, 0);
}
int getStereoSideFromFragCoord() {
return getStereoSideFromPixel(int(gl_FragCoord.x), 0);
}
int getStereoSideFromUV(float uPos) {
return int(uPos >= 0.5 && isStereo());
}
vec2 getStereoSideSize(int resolutionLevel) {
@ -134,17 +129,16 @@ ivec4 getStereoSideInfo(int xPos, int resolutionLevel) {
return getStereoSideInfoFromWidth(xPos, sideWidth);
}
int getStereoSide(ivec4 sideInfo) {
return sideInfo.x;
}
float evalZeyeFromZdb(float depth) {
return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);
return frameTransform.infos.depthInfo.x / (depth * frameTransform.infos.depthInfo.y + frameTransform.infos.depthInfo.z);
}
float evalZdbFromZeye(float Zeye) {
return (frameTransform._depthInfo.x - Zeye * frameTransform._depthInfo.z) / (Zeye * frameTransform._depthInfo.y);
return (frameTransform.infos.depthInfo.x - Zeye * frameTransform.infos.depthInfo.z) / (Zeye * frameTransform.infos.depthInfo.y);
}
vec3 evalEyeNormal(vec3 C) {
@ -155,15 +149,7 @@ vec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {
// compute the view space position using the depth
vec3 clipPos;
clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;
vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);
return eyePos.xyz / eyePos.w;
}
vec3 evalUnjitteredEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {
// compute the view space position using the depth
vec3 clipPos;
clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;
vec4 eyePos = frameTransform._invProjectionUnJittered[side] * vec4(clipPos.xyz, 1.0);
vec4 eyePos = getProjectionInverse(side) * vec4(clipPos.xyz, 1.0);
return eyePos.xyz / eyePos.w;
}

View file

@ -0,0 +1,33 @@
// glsl / C++ compatible source as interface for DeferredFrameTransform layout
#ifdef __cplusplus
# define DFT_VEC4 glm::vec4
# define DFT_MAT4 glm::mat4
#include "gpu/TransformCamera_shared.slh"
#else
# define DFT_VEC4 vec4
# define DFT_MAT4 mat4
<@include gpu/TransformCamera_shared.slh@>
#endif
struct DeferredFrameInfo {
// Pixel info is { viewport width height}
DFT_VEC4 pixelInfo;
DFT_VEC4 invPixelInfo;
// Depth info is { n.f, f - n, -f}
DFT_VEC4 depthInfo;
// Stereo info is { isStereoFrame, halfWidth }
DFT_VEC4 stereoInfo;
// The mono projection for sure
DFT_MAT4 projectionMono;
};
struct _DeferredFrameTransform {
DeferredFrameInfo infos;
// The camera transforms for the two eyes (or only first one if mono, of course)
_TransformCamera cameras[2];
};
// <@if 1@>
// Trigger Scribe include
// <@endif@> <!def that !>
//

View file

@ -799,7 +799,7 @@ render::ShapePipelinePointer GeometryCache::getFadingShapePipeline(bool textured
graphics::MaterialKey::CullFaceMode cullFaceMode) {
auto fadeEffect = DependencyManager::get<FadeEffect>();
auto fadeBatchSetter = fadeEffect->getBatchSetter();
auto fadeItemSetter = fadeEffect->getItemUniformSetter();
auto fadeItemSetter = fadeEffect->getItemStoredSetter();
return std::make_shared<render::ShapePipeline>(getSimplePipeline(textured, transparent, unlit, depthBias, true, true, forward, cullFaceMode), nullptr,
[fadeBatchSetter, fadeItemSetter](const render::ShapePipeline& shapePipeline, gpu::Batch& batch, render::Args* args) {
batch.setResourceTexture(gr::Texture::MaterialAlbedo, DependencyManager::get<TextureCache>()->getWhiteTexture());
@ -1995,7 +1995,7 @@ void GeometryCache::useSimpleDrawPipeline(gpu::Batch& batch, bool noBlend) {
// enable decal blend
state->setBlendFunction(true, gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA);
PrepareStencil::testMask(*state);
PrepareStencil::testMaskResetNoAA(*state);
_standardDrawPipeline = gpu::Pipeline::create(program, state);
@ -2027,7 +2027,7 @@ void GeometryCache::useGridPipeline(gpu::Batch& batch, GridBuffer gridBuffer, bo
gpu::StatePointer state = std::make_shared<gpu::State>();
state->setDepthTest(true, !std::get<0>(key), gpu::LESS_EQUAL);
if (std::get<0>(key)) {
PrepareStencil::testMask(*state);
PrepareStencil::testMaskResetNoAA(*state);
} else {
PrepareStencil::testMaskDrawShape(*state);
}
@ -2134,7 +2134,6 @@ gpu::PipelinePointer GeometryCache::getWebBrowserProgram(bool transparent, bool
gpu::StatePointer state = std::make_shared<gpu::State>();
state->setDepthTest(true, !transparent, gpu::LESS_EQUAL);
// FIXME: do we need a testMaskDrawNoAA?
PrepareStencil::testMaskDrawShapeNoAA(*state);
state->setBlendFunction(transparent,
gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA,
@ -2215,7 +2214,7 @@ gpu::PipelinePointer GeometryCache::getSimplePipeline(bool textured, bool transp
gpu::State::FACTOR_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ONE);
if (config.isAntiAliased()) {
config.isTransparent() ? PrepareStencil::testMask(*state) : PrepareStencil::testMaskDrawShape(*state);
config.isTransparent() ? PrepareStencil::testMaskResetNoAA(*state) : PrepareStencil::testMaskDrawShape(*state);
} else {
PrepareStencil::testMaskDrawShapeNoAA(*state);
}

View file

@ -23,13 +23,13 @@
LAYOUT(binding=RENDER_UTILS_TEXTURE_HAZE_LINEAR_DEPTH) uniform sampler2D linearDepthMap;
vec4 unpackPositionFromZeye(vec2 texcoord) {
vec4 unpackPositionFromZeyeAndGetSide(vec2 texcoord, out int side) {
float Zeye = -texture(linearDepthMap, texcoord).x;
float check = float(isStereo());
float check2 = check * float(texcoord.x > 0.5);
texcoord.x -= check2 * 0.5;
int side = int(check2);
side = int(check2);
texcoord.x *= 1.0 + check;
return vec4(evalEyePositionFromZeye(side, Zeye, texcoord), 1.0);
@ -43,9 +43,10 @@ void main(void) {
discard;
}
vec4 fragPositionES = unpackPositionFromZeye(varTexCoord0);
int side;
vec4 fragPositionES = unpackPositionFromZeyeAndGetSide(varTexCoord0, side);
mat4 viewInverse = getViewInverse();
mat4 viewInverse = getViewInverse(side);
vec4 fragPositionWS = viewInverse * fragPositionES;
vec4 eyePositionWS = viewInverse[3];

View file

@ -124,7 +124,8 @@ gpu::PipelinePointer DrawHighlightMask::_stencilMaskPipeline;
gpu::PipelinePointer DrawHighlightMask::_stencilMaskFillPipeline;
DrawHighlightMask::DrawHighlightMask(unsigned int highlightIndex, render::ShapePlumberPointer shapePlumber,
HighlightSharedParametersPointer parameters) : _highlightPassIndex(highlightIndex), _shapePlumber(shapePlumber), _sharedParameters(parameters) {}
HighlightSharedParametersPointer parameters, uint transformSlot) :
_highlightPassIndex(highlightIndex), _shapePlumber(shapePlumber), _sharedParameters(parameters), _transformSlot(transformSlot) {}
void DrawHighlightMask::run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs) {
assert(renderContext->args);
@ -177,8 +178,6 @@ void DrawHighlightMask::run(const render::RenderContextPointer& renderContext, c
batch.clearDepthStencilFramebuffer(1.0f, 0);
});
const auto jitter = inputs.get2();
render::ItemBounds itemBounds;
gpu::doInBatch("DrawHighlightMask::run", args->_context, [&](gpu::Batch& batch) {
@ -190,9 +189,8 @@ void DrawHighlightMask::run(const render::RenderContextPointer& renderContext, c
args->getViewFrustum().evalProjectionMatrix(projMat);
args->getViewFrustum().evalViewTransform(viewMat);
batch.setViewportTransform(args->_viewport);
batch.setProjectionTransform(projMat);
batch.setProjectionJitter(jitter.x, jitter.y);
batch.setViewTransform(viewMat);
batch.setProjectionJitterEnabled(true);
batch.setSavedViewProjectionTransform(_transformSlot);
sortAndRenderZPassShapes(_shapePlumber, renderContext, inShapes, itemBounds);
});
@ -209,6 +207,11 @@ void DrawHighlightMask::run(const render::RenderContextPointer& renderContext, c
}
gpu::doInBatch("DrawHighlightMask::run::end", args->_context, [&](gpu::Batch& batch) {
// Setup camera, projection and viewport for all items
batch.setViewportTransform(args->_viewport);
batch.setProjectionJitterEnabled(true);
batch.setSavedViewProjectionTransform(_transformSlot);
// Draw stencil mask with object bounding boxes
auto stencilPipeline = highlight._style.isFilled() ? _stencilMaskFillPipeline : _stencilMaskPipeline;
batch.setPipeline(stencilPipeline);
@ -269,7 +272,6 @@ void DrawHighlight::run(const render::RenderContextPointer& renderContext, const
shaderParameters._size.y = size;
}
auto primaryFramebuffer = inputs.get4();
gpu::doInBatch("DrawHighlight::run", args->_context, [&](gpu::Batch& batch) {
batch.enableStereo(false);
batch.setFramebuffer(destinationFrameBuffer);
@ -285,9 +287,6 @@ void DrawHighlight::run(const render::RenderContextPointer& renderContext, const
batch.setResourceTexture(ru::Texture::HighlightSceneDepth, sceneDepthBuffer->getPrimaryDepthTexture());
batch.setResourceTexture(ru::Texture::HighlightDepth, highlightedDepthTexture);
batch.draw(gpu::TRIANGLE_STRIP, 4);
// Reset the framebuffer for overlay drawing
batch.setFramebuffer(primaryFramebuffer);
});
}
}
@ -311,7 +310,7 @@ const gpu::PipelinePointer& DrawHighlight::getPipeline(const render::HighlightSt
return style.isFilled() ? _pipelineFilled : _pipeline;
}
DebugHighlight::DebugHighlight() {
DebugHighlight::DebugHighlight(uint transformSlot) : _transformSlot(transformSlot) {
_geometryDepthId = DependencyManager::get<GeometryCache>()->allocateID();
}
@ -334,22 +333,15 @@ void DebugHighlight::run(const render::RenderContextPointer& renderContext, cons
assert(renderContext->args);
assert(renderContext->args->hasViewFrustum());
RenderArgs* args = renderContext->args;
const auto jitter = input.get2();
auto primaryFramebuffer = input.get3();
gpu::doInBatch("DebugHighlight::run", args->_context, [&](gpu::Batch& batch) {
batch.setViewportTransform(args->_viewport);
batch.setFramebuffer(highlightResources->getColorFramebuffer());
const auto geometryBuffer = DependencyManager::get<GeometryCache>();
glm::mat4 projMat;
Transform viewMat;
args->getViewFrustum().evalProjectionMatrix(projMat);
args->getViewFrustum().evalViewTransform(viewMat);
batch.setProjectionTransform(projMat);
batch.setProjectionJitter(jitter.x, jitter.y);
batch.setViewTransform(viewMat);
batch.setProjectionJitterEnabled(true);
batch.setSavedViewProjectionTransform(_transformSlot);
batch.setModelTransform(Transform());
const glm::vec4 color(1.0f, 1.0f, 1.0f, 1.0f);
@ -361,9 +353,6 @@ void DebugHighlight::run(const render::RenderContextPointer& renderContext, cons
geometryBuffer->renderQuad(batch, bottomLeft, topRight, color, _geometryDepthId);
batch.setResourceTexture(0, nullptr);
// Reset the framebuffer for overlay drawing
batch.setFramebuffer(primaryFramebuffer);
});
}
}
@ -465,13 +454,12 @@ void DrawHighlightTask::configure(const Config& config) {
}
void DrawHighlightTask::build(JobModel& task, const render::Varying& inputs, render::Varying& outputs) {
void DrawHighlightTask::build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, uint transformSlot) {
const auto items = inputs.getN<Inputs>(0).get<RenderFetchCullSortTask::BucketList>();
const auto& outlines = items[RenderFetchCullSortTask::OUTLINE];
const auto sceneFrameBuffer = inputs.getN<Inputs>(1);
const auto primaryFramebuffer = inputs.getN<Inputs>(2);
const auto deferredFrameTransform = inputs.getN<Inputs>(3);
const auto jitter = inputs.getN<Inputs>(4);
// Prepare the ShapePipeline
auto shapePlumber = std::make_shared<ShapePlumber>();
@ -514,8 +502,8 @@ void DrawHighlightTask::build(JobModel& task, const render::Varying& inputs, ren
stream << "HighlightMask" << i;
name = stream.str();
}
const auto drawMaskInputs = DrawHighlightMask::Inputs(sortedBounds, highlightResources, jitter).asVarying();
const auto highlightedRect = task.addJob<DrawHighlightMask>(name, drawMaskInputs, i, shapePlumber, sharedParameters);
const auto drawMaskInputs = DrawHighlightMask::Inputs(sortedBounds, highlightResources).asVarying();
const auto highlightedRect = task.addJob<DrawHighlightMask>(name, drawMaskInputs, i, shapePlumber, sharedParameters, transformSlot);
if (i == 0) {
highlight0Rect = highlightedRect;
}
@ -526,7 +514,7 @@ void DrawHighlightTask::build(JobModel& task, const render::Varying& inputs, ren
stream << "HighlightEffect" << i;
name = stream.str();
}
const auto drawHighlightInputs = DrawHighlight::Inputs(deferredFrameTransform, highlightResources, sceneFrameBuffer, highlightedRect, primaryFramebuffer).asVarying();
const auto drawHighlightInputs = DrawHighlight::Inputs(deferredFrameTransform, highlightResources, sceneFrameBuffer, highlightedRect).asVarying();
task.addJob<DrawHighlight>(name, drawHighlightInputs, i, sharedParameters);
}
@ -535,8 +523,8 @@ void DrawHighlightTask::build(JobModel& task, const render::Varying& inputs, ren
task.addJob<HighlightCleanup>("HighlightCleanup", cleanupInput);
// Debug highlight
const auto debugInputs = DebugHighlight::Inputs(highlightResources, const_cast<const render::Varying&>(highlight0Rect), jitter, primaryFramebuffer).asVarying();
task.addJob<DebugHighlight>("HighlightDebug", debugInputs);
const auto debugInputs = DebugHighlight::Inputs(highlightResources, const_cast<const render::Varying&>(highlight0Rect)).asVarying();
task.addJob<DebugHighlight>("HighlightDebug", debugInputs, transformSlot);
}
const render::Varying DrawHighlightTask::addSelectItemJobs(JobModel& task, const render::Varying& selectionName,

View file

@ -114,11 +114,10 @@ private:
class DrawHighlightMask {
public:
using Inputs = render::VaryingSet3<render::ShapeBounds, HighlightResourcesPointer, glm::vec2>;
using Outputs = glm::ivec4;
using Inputs = render::VaryingSet2<render::ShapeBounds, HighlightResourcesPointer>; using Outputs = glm::ivec4;
using JobModel = render::Job::ModelIO<DrawHighlightMask, Inputs, Outputs>;
DrawHighlightMask(unsigned int highlightIndex, render::ShapePlumberPointer shapePlumber, HighlightSharedParametersPointer parameters);
DrawHighlightMask(unsigned int highlightIndex, render::ShapePlumberPointer shapePlumber, HighlightSharedParametersPointer parameters, uint transformSlot);
void run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs);
@ -128,6 +127,7 @@ protected:
HighlightSharedParametersPointer _sharedParameters;
gpu::BufferPointer _boundsBuffer;
gpu::StructBuffer<glm::vec2> _outlineWidth;
uint _transformSlot { 0 };
static gpu::PipelinePointer _stencilMaskPipeline;
static gpu::PipelinePointer _stencilMaskFillPipeline;
@ -136,7 +136,7 @@ protected:
class DrawHighlight {
public:
using Inputs = render::VaryingSet5<DeferredFrameTransformPointer, HighlightResourcesPointer, DeferredFramebufferPointer, glm::ivec4, gpu::FramebufferPointer>;
using Inputs = render::VaryingSet4<DeferredFrameTransformPointer, HighlightResourcesPointer, DeferredFramebufferPointer, glm::ivec4>;
using Config = render::Job::Config;
using JobModel = render::Job::ModelI<DrawHighlight, Inputs, Config>;
@ -174,11 +174,10 @@ signals:
class DebugHighlight {
public:
using Inputs = render::VaryingSet4<HighlightResourcesPointer, glm::ivec4, glm::vec2, gpu::FramebufferPointer>;
using Config = DebugHighlightConfig;
using Inputs = render::VaryingSet2<HighlightResourcesPointer, glm::ivec4>; using Config = DebugHighlightConfig;
using JobModel = render::Job::ModelI<DebugHighlight, Inputs, Config>;
DebugHighlight();
DebugHighlight(uint transformSlot);
~DebugHighlight();
void configure(const Config& config);
@ -187,8 +186,9 @@ public:
private:
gpu::PipelinePointer _depthPipeline;
int _geometryDepthId{ 0 };
bool _isDisplayEnabled{ false };
int _geometryDepthId { 0 };
bool _isDisplayEnabled { false };
uint _transformSlot { 0 };
const gpu::PipelinePointer& getDepthPipeline();
void initializePipelines();
@ -197,14 +197,13 @@ private:
class DrawHighlightTask {
public:
using Inputs = render::VaryingSet5<RenderFetchCullSortTask::BucketList, DeferredFramebufferPointer, gpu::FramebufferPointer, DeferredFrameTransformPointer, glm::vec2>;
using Config = render::Task::Config;
using Inputs = render::VaryingSet4<RenderFetchCullSortTask::BucketList, DeferredFramebufferPointer, gpu::FramebufferPointer, DeferredFrameTransformPointer>; using Config = render::Task::Config;
using JobModel = render::Task::ModelI<DrawHighlightTask, Inputs, Config>;
DrawHighlightTask();
void configure(const Config& config);
void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs);
void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, uint transformSlot);
private:
static const render::Varying addSelectItemJobs(JobModel& task, const render::Varying& selectionName, const RenderFetchCullSortTask::BucketList& items);

View file

@ -1,7 +1,7 @@
<@include gpu/Config.slh@>
<$VERSION_HEADER$>
// Generated on <$_SCRIBE_DATE$>
//
// <$_SCRIBE_FILENAME$>
// Generated on <$_SCRIBE_DATE$>
// Draw and transform the fed vertex position with the standard MVP stack
// and offset the vertices by a certain amount in the vertex direction
//
@ -12,9 +12,7 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/ShaderConstants.h@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
struct ItemBound {
@ -107,5 +105,5 @@ void main(void) {
vec4 offsetPosition;
<$transformModelToMonoClipPos(cam, obj, pos, offsetPosition)$>
gl_Position.xy += normalize(offsetPosition.xy-gl_Position.xy) * _parameters.outlineWidth * gl_Position.w;
<$transformStereoClipsSpace(cam, gl_Position)$>
<$transformStereoClipSpace(gl_Position)$>
}

View file

@ -575,7 +575,7 @@ void LightClusteringPass::run(const render::RenderContextPointer& renderContext,
config->setNumClusteredLightReferences(clusteringStats.z);
}
DebugLightClusters::DebugLightClusters() {
DebugLightClusters::DebugLightClusters(uint transformSlot) : _transformSlot(transformSlot) {
}
@ -650,13 +650,7 @@ void DebugLightClusters::run(const render::RenderContextPointer& renderContext,
// Assign the camera transform
batch.setViewportTransform(args->_viewport);
glm::mat4 projMat;
Transform viewMat;
args->getViewFrustum().evalProjectionMatrix(projMat);
args->getViewFrustum().evalViewTransform(viewMat);
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewMat, true);
batch.setSavedViewProjectionTransform(_transformSlot);
// Then the actual ClusterGrid attributes
batch.setModelTransform(Transform());
@ -668,8 +662,6 @@ void DebugLightClusters::run(const render::RenderContextPointer& renderContext,
batch.setUniformBuffer(ru::Buffer::LightClusterGrid, lightClusters->_clusterGridBuffer);
batch.setUniformBuffer(ru::Buffer::LightClusterContent, lightClusters->_clusterContentBuffer);
if (doDrawClusterFromDepth) {
batch.setPipeline(getDrawClusterFromDepthPipeline());
batch.setUniformBuffer(ru::Buffer::DeferredFrameTransform, deferredTransform->getFrameTransformBuffer());

View file

@ -217,7 +217,7 @@ public:
using Config = DebugLightClustersConfig;
using JobModel = render::Job::ModelI<DebugLightClusters, Inputs, Config>;
DebugLightClusters();
DebugLightClusters(uint transformSlot);
void configure(const Config& config);
@ -228,6 +228,7 @@ protected:
gpu::PipelinePointer _drawClusterGrid;
gpu::PipelinePointer _drawClusterFromDepth;
gpu::PipelinePointer _drawClusterContent;
uint _transformSlot;
const gpu::PipelinePointer getDrawClusterGridPipeline();
const gpu::PipelinePointer getDrawClusterFromDepthPipeline();
const gpu::PipelinePointer getDrawClusterContentPipeline();

View file

@ -193,7 +193,14 @@ void ModelMeshPartPayload::bindTransform(gpu::Batch& batch, const Transform& tra
if (_clusterBuffer) {
batch.setUniformBuffer(graphics::slot::buffer::Skinning, _clusterBuffer);
}
batch.setModelTransform(transform);
// TODO: I'm not sure of this
//batch.setModelTransform(transform, _previousModelTransform);
batch.setModelTransform(transform, _previousRenderTransform);
if (renderMode == Args::RenderMode::DEFAULT_RENDER_MODE || renderMode == Args::RenderMode::MIRROR_RENDER_MODE) {
// TODO: I'm not sure of this
//_prevRenderTransform = _drawTransform;
_previousRenderTransform = transform;
}
}
void ModelMeshPartPayload::drawCall(gpu::Batch& batch) const {

View file

@ -70,6 +70,9 @@ public:
static bool enableMaterialProceduralShaders;
protected:
mutable Transform _previousRenderTransform;
private:
void initCache(const ModelPointer& model, int shapeID);

View file

@ -1,6 +1,7 @@
//
// Created by Bradley Austin Davis on 2018/01/09
// Copyright 2013-2018 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -27,17 +28,18 @@ namespace gr {
using namespace render;
extern void initForwardPipelines(ShapePlumber& plumber);
void BeginGPURangeTimer::run(const render::RenderContextPointer& renderContext, gpu::RangeTimerPointer& timer) {
timer = _gpuTimer;
gpu::doInBatch("BeginGPURangeTimer", renderContext->args->_context, [&](gpu::Batch& batch) {
_gpuTimer->begin(batch);
batch.pushProfileRange(timer->name().c_str());
});
}
void EndGPURangeTimer::run(const render::RenderContextPointer& renderContext, const gpu::RangeTimerPointer& timer) {
gpu::doInBatch("EndGPURangeTimer", renderContext->args->_context, [&](gpu::Batch& batch) {
batch.popProfileRange();
timer->end(batch);
});
@ -45,10 +47,11 @@ void EndGPURangeTimer::run(const render::RenderContextPointer& renderContext, co
config->setGPUBatchRunTime(timer->getGPUAverage(), timer->getBatchAverage());
}
DrawLayered3D::DrawLayered3D(bool opaque) :
_shapePlumber(std::make_shared<ShapePlumber>()),
_opaquePass(opaque) {
initForwardPipelines(*_shapePlumber);
DrawLayered3D::DrawLayered3D(const render::ShapePlumberPointer& shapePlumber, bool opaque, bool jitter, unsigned int transformSlot) :
_shapePlumber(shapePlumber),
_transformSlot(transformSlot),
_opaquePass(opaque),
_isJitterEnabled(jitter) {
}
void DrawLayered3D::run(const RenderContextPointer& renderContext, const Inputs& inputs) {
@ -58,9 +61,9 @@ void DrawLayered3D::run(const RenderContextPointer& renderContext, const Inputs&
auto config = std::static_pointer_cast<Config>(renderContext->jobConfig);
const auto& inItems = inputs.get0();
const auto& lightingModel = inputs.get1();
const auto& hazeFrame = inputs.get2();
const auto jitter = inputs.get3();
const auto& frameTransform = inputs.get1();
const auto& lightingModel = inputs.get2();
const auto& hazeFrame = inputs.get3();
config->setNumDrawn((int)inItems.size());
emit config->numDrawnChanged();
@ -80,29 +83,25 @@ void DrawLayered3D::run(const RenderContextPointer& renderContext, const Inputs&
if (_opaquePass) {
gpu::doInBatch("DrawLayered3D::run::clear", args->_context, [&](gpu::Batch& batch) {
batch.enableStereo(false);
batch.clearFramebuffer(gpu::Framebuffer::BUFFER_DEPTH, glm::vec4(), 1.f, 0, false);
batch.clearDepthFramebuffer(true, false);
});
}
if (!inItems.empty()) {
// Render the items
gpu::doInBatch("DrawLayered3D::main", args->_context, [&](gpu::Batch& batch) {
PROFILE_RANGE_BATCH(batch, "DrawLayered3D::main");
args->_batch = &batch;
batch.setViewportTransform(args->_viewport);
batch.setStateScissorRect(args->_viewport);
glm::mat4 projMat;
Transform viewMat;
args->getViewFrustum().evalProjectionMatrix(projMat);
args->getViewFrustum().evalViewTransform(viewMat);
batch.setProjectionTransform(projMat);
batch.setProjectionJitter(jitter.x, jitter.y);
batch.setViewTransform(viewMat);
batch.setProjectionJitterEnabled(_isJitterEnabled);
batch.setSavedViewProjectionTransform(_transformSlot);
// Setup lighting model for all items;
batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer());
batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT());
batch.setUniformBuffer(ru::Buffer::DeferredFrameTransform, frameTransform->getFrameTransformBuffer());
if (haze) {
batch.setUniformBuffer(graphics::slot::buffer::Buffer::HazeParams, haze->getHazeParametersBuffer());

View file

@ -13,6 +13,7 @@
#include "LightStage.h"
#include "HazeStage.h"
#include "LightingModel.h"
#include "DeferredFrameTransform.h"
class BeginGPURangeTimer {
public:
@ -61,11 +62,11 @@ protected:
class DrawLayered3D {
public:
using Inputs = render::VaryingSet4<render::ItemBounds, LightingModelPointer, HazeStage::FramePointer, glm::vec2>;
using Inputs = render::VaryingSet4<render::ItemBounds, DeferredFrameTransformPointer, LightingModelPointer, HazeStage::FramePointer>;
using Config = DrawLayered3DConfig;
using JobModel = render::Job::ModelI<DrawLayered3D, Inputs, Config>;
DrawLayered3D(bool opaque);
DrawLayered3D(const render::ShapePlumberPointer& shapePlumber, bool opaque, bool jitter, unsigned int transformSlot);
void configure(const Config& config) { _maxDrawn = config.maxDrawn; }
void run(const render::RenderContextPointer& renderContext, const Inputs& inputs);
@ -73,7 +74,9 @@ public:
protected:
render::ShapePlumberPointer _shapePlumber;
int _maxDrawn; // initialized by Config
uint _transformSlot;
bool _opaquePass { true };
bool _isJitterEnabled { false };
};
class Blit {

View file

@ -5,6 +5,7 @@
//
// Created by Sam Gateau on 5/29/15.
// Copyright 2016 High Fidelity, Inc.
// Copyright 2020 Vircadia contributors.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -21,6 +22,7 @@
#include <ViewFrustum.h>
#include <gpu/Context.h>
#include <graphics/ShaderConstants.h>
#include <shaders/Shaders.h>
#include <render/CullTask.h>
#include <render/FilterTask.h>
@ -39,7 +41,6 @@
#include "DeferredFramebuffer.h"
#include "DeferredLightingEffect.h"
#include "SurfaceGeometryPass.h"
#include "VelocityBufferPass.h"
#include "FramebufferCache.h"
#include "TextureCache.h"
#include "ZoneRenderer.h"
@ -60,6 +61,7 @@
using namespace render;
extern void initDeferredPipelines(render::ShapePlumber& plumber, const render::ShapePipeline::BatchSetter& batchSetter, const render::ShapePipeline::ItemSetter& itemSetter);
extern void initForwardPipelines(render::ShapePlumber& plumber);
namespace ru {
using render_utils::slot::texture::Texture;
@ -74,25 +76,17 @@ namespace gr {
class RenderDeferredTaskDebug {
public:
using ExtraBuffers = render::VaryingSet6<LinearDepthFramebufferPointer, SurfaceGeometryFramebufferPointer, AmbientOcclusionFramebufferPointer, gpu::BufferView, SubsurfaceScatteringResourcePointer, VelocityFramebufferPointer>;
using ExtraBuffers = render::VaryingSet5<LinearDepthFramebufferPointer, SurfaceGeometryFramebufferPointer, AmbientOcclusionFramebufferPointer, gpu::BufferView, SubsurfaceScatteringResourcePointer>;
using Input = render::VaryingSet9<RenderFetchCullSortTask::Output, RenderShadowTask::Output,
AssembleLightingStageTask::Output, LightClusteringPass::Output,
PrepareDeferred::Outputs, ExtraBuffers, GenerateDeferredFrameTransform::Output,
JitterSample::Output, LightingModel>;
LightingModel, Antialiasing::Outputs>;
using JobModel = render::Task::ModelI<RenderDeferredTaskDebug, Input>;
RenderDeferredTaskDebug();
void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs);
private:
void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, uint mainViewTransformSlot);
};
RenderDeferredTask::RenderDeferredTask()
{
}
void RenderDeferredTask::configure(const Config& config) {
// Propagate resolution scale to sub jobs who need it
auto preparePrimaryBufferConfig = config.getConfig<PreparePrimaryFramebuffer>("PreparePrimaryBufferDeferred");
@ -100,11 +94,16 @@ void RenderDeferredTask::configure(const Config& config) {
preparePrimaryBufferConfig->setResolutionScale(config.resolutionScale);
}
void RenderDeferredTask::build(JobModel& task, const render::Varying& input, render::Varying& output) {
void RenderDeferredTask::build(JobModel& task, const render::Varying& input, render::Varying& output, uint8_t transformOffset) {
auto fadeEffect = DependencyManager::get<FadeEffect>();
// Prepare the ShapePipelines
ShapePlumberPointer shapePlumber = std::make_shared<ShapePlumber>();
initDeferredPipelines(*shapePlumber, fadeEffect->getBatchSetter(), fadeEffect->getItemUniformSetter());
ShapePlumberPointer shapePlumberDeferred = std::make_shared<ShapePlumber>();
initDeferredPipelines(*shapePlumberDeferred, fadeEffect->getBatchSetter(), fadeEffect->getItemUniformSetter());
ShapePlumberPointer shapePlumberForward = std::make_shared<ShapePlumber>();
initForwardPipelines(*shapePlumberForward);
uint backgroundViewTransformSlot = render::RenderEngine::TS_BACKGROUND_VIEW + transformOffset;
uint mainViewTransformSlot = render::RenderEngine::TS_MAIN_VIEW + transformOffset;
const auto& inputs = input.get<Input>();
@ -142,25 +141,26 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
fadeEffect->build(task, opaques);
const auto jitter = task.addJob<JitterSample>("JitterCam");
const auto antialiasingMode = task.addJob<AntialiasingSetup>("AntialiasingSetup");
// GPU jobs: Start preparing the primary, deferred and lighting buffer
const auto scaledPrimaryFramebuffer = task.addJob<PreparePrimaryFramebuffer>("PreparePrimaryBufferDeferred");
// Prepare deferred, generate the shared Deferred Frame Transform. Only valid with the scaled frame buffer
const auto deferredFrameTransform = task.addJob<GenerateDeferredFrameTransform>("DeferredFrameTransform", jitter);
const auto deferredFrameTransform = task.addJob<GenerateDeferredFrameTransform>("DeferredFrameTransform", mainViewTransformSlot);
const auto prepareDeferredInputs = PrepareDeferred::Inputs(scaledPrimaryFramebuffer, lightingModel).asVarying();
const auto prepareDeferredOutputs = task.addJob<PrepareDeferred>("PrepareDeferred", prepareDeferredInputs);
const auto deferredFramebuffer = prepareDeferredOutputs.getN<PrepareDeferred::Outputs>(0);
const auto lightingFramebuffer = prepareDeferredOutputs.getN<PrepareDeferred::Outputs>(1);
const auto lightingWithVelocityFramebuffer = prepareDeferredOutputs.getN<PrepareDeferred::Outputs>(2);
// draw a stencil mask in hidden regions of the framebuffer.
task.addJob<PrepareStencil>("PrepareStencil", scaledPrimaryFramebuffer);
// Render opaque objects in DeferredBuffer
const auto opaqueInputs = DrawStateSortDeferred::Inputs(opaques, lightingModel, jitter).asVarying();
task.addJob<DrawStateSortDeferred>("DrawOpaqueDeferred", opaqueInputs, shapePlumber);
const auto opaqueInputs = DrawStateSortDeferred::Inputs(opaques, lightingModel, deferredFrameTransform).asVarying();
task.addJob<DrawStateSortDeferred>("DrawOpaqueDeferred", opaqueInputs, shapePlumberDeferred, mainViewTransformSlot);
// Opaque all rendered
@ -186,11 +186,6 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
const auto ambientOcclusionFramebuffer = ambientOcclusionOutputs.getN<AmbientOcclusionEffect::Output>(0);
const auto ambientOcclusionUniforms = ambientOcclusionOutputs.getN<AmbientOcclusionEffect::Output>(1);
// Velocity
const auto velocityBufferInputs = VelocityBufferPass::Inputs(deferredFrameTransform, deferredFramebuffer).asVarying();
const auto velocityBufferOutputs = task.addJob<VelocityBufferPass>("VelocityBuffer", velocityBufferInputs);
const auto velocityBuffer = velocityBufferOutputs.getN<VelocityBufferPass::Outputs>(0);
// Light Clustering
// Create the cluster grid of lights, cpu job for now
const auto lightClusteringPassInputs = LightClusteringPass::Input(deferredFrameTransform, lightingModel, lightFrame, linearDepthTarget).asVarying();
@ -203,28 +198,28 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
// Similar to light stage, background stage has been filled by several potential render items and resolved for the frame in this job
const auto backgroundInputs = DrawBackgroundStage::Inputs(lightingModel, backgroundFrame, hazeFrame).asVarying();
task.addJob<DrawBackgroundStage>("DrawBackgroundDeferred", backgroundInputs);
task.addJob<DrawBackgroundStage>("DrawBackgroundDeferred", backgroundInputs, backgroundViewTransformSlot);
const auto drawHazeInputs = render::Varying(DrawHaze::Inputs(hazeFrame, lightingFramebuffer, linearDepthTarget, deferredFrameTransform, lightingModel, lightFrame));
task.addJob<DrawHaze>("DrawHazeDeferred", drawHazeInputs);
// Render transparent objects forward in LightingBuffer
const auto transparentsInputs = RenderTransparentDeferred::Inputs(transparents, hazeFrame, lightFrame, lightingModel, lightClusters, shadowFrame, jitter).asVarying();
task.addJob<RenderTransparentDeferred>("DrawTransparentDeferred", transparentsInputs, shapePlumber);
const auto transparentsInputs = RenderTransparentDeferred::Inputs(transparents, hazeFrame, lightFrame, lightingModel, lightClusters, shadowFrame, deferredFrameTransform).asVarying();
task.addJob<RenderTransparentDeferred>("DrawTransparentDeferred", transparentsInputs, shapePlumberDeferred, mainViewTransformSlot);
// Highlight
const auto outlineInputs = DrawHighlightTask::Inputs(items, deferredFramebuffer, lightingFramebuffer, deferredFrameTransform, jitter).asVarying();
task.addJob<DrawHighlightTask>("DrawHighlight", outlineInputs);
const auto outlineInputs = DrawHighlightTask::Inputs(items, deferredFramebuffer, lightingFramebuffer, deferredFrameTransform).asVarying();
task.addJob<DrawHighlightTask>("DrawHighlight", outlineInputs, mainViewTransformSlot);
// Layered Over (in front)
const auto inFrontOpaquesInputs = DrawLayered3D::Inputs(inFrontOpaque, lightingModel, hazeFrame, jitter).asVarying();
const auto inFrontTransparentsInputs = DrawLayered3D::Inputs(inFrontTransparent, lightingModel, hazeFrame, jitter).asVarying();
task.addJob<DrawLayered3D>("DrawInFrontOpaque", inFrontOpaquesInputs, true);
task.addJob<DrawLayered3D>("DrawInFrontTransparent", inFrontTransparentsInputs, false);
const auto inFrontOpaquesInputs = DrawLayered3D::Inputs(inFrontOpaque, deferredFrameTransform, lightingModel, hazeFrame).asVarying();
task.addJob<DrawLayered3D>("DrawInFrontOpaque", inFrontOpaquesInputs, shapePlumberForward, true, true, mainViewTransformSlot);
const auto inFrontTransparentsInputs = DrawLayered3D::Inputs(inFrontTransparent, deferredFrameTransform, lightingModel, hazeFrame).asVarying();
task.addJob<DrawLayered3D>("DrawInFrontTransparent", inFrontTransparentsInputs, shapePlumberForward, false, true, mainViewTransformSlot);
// AA job before bloom to limit flickering
const auto antialiasingInputs = Antialiasing::Inputs(deferredFrameTransform, lightingFramebuffer, linearDepthTarget, velocityBuffer).asVarying();
task.addJob<Antialiasing>("Antialiasing", antialiasingInputs);
const auto antialiasingInputs = Antialiasing::Inputs(deferredFrameTransform, deferredFramebuffer, linearDepthTarget, antialiasingMode).asVarying();
const auto antialiasingIntensityTexture = task.addJob<Antialiasing>("Antialiasing", antialiasingInputs);
// Add bloom
const auto bloomInputs = BloomEffect::Inputs(deferredFrameTransform, lightingFramebuffer, bloomFrame, lightingModel).asVarying();
@ -238,21 +233,18 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
// Debugging task is happening in the "over" layer after tone mapping and just before HUD
{ // Debug the bounds of the rendered items, still look at the zbuffer
const auto extraDebugBuffers = RenderDeferredTaskDebug::ExtraBuffers(linearDepthTarget, surfaceGeometryFramebuffer, ambientOcclusionFramebuffer, ambientOcclusionUniforms, scatteringResource, velocityBuffer);
const auto extraDebugBuffers = RenderDeferredTaskDebug::ExtraBuffers(linearDepthTarget, surfaceGeometryFramebuffer, ambientOcclusionFramebuffer, ambientOcclusionUniforms, scatteringResource);
const auto debugInputs = RenderDeferredTaskDebug::Input(fetchedItems, shadowTaskOutputs, lightingStageInputs, lightClusters, prepareDeferredOutputs, extraDebugBuffers,
deferredFrameTransform, jitter, lightingModel).asVarying();
task.addJob<RenderDeferredTaskDebug>("DebugRenderDeferredTask", debugInputs);
deferredFrameTransform, lightingModel, antialiasingIntensityTexture).asVarying();
task.addJob<RenderDeferredTaskDebug>("DebugRenderDeferredTask", debugInputs, mainViewTransformSlot);
}
// HUD Layer
const auto renderHUDLayerInputs = RenderHUDLayerTask::Input(toneMappedBuffer, lightingModel, hudOpaque, hudTransparent, hazeFrame).asVarying();
task.addJob<RenderHUDLayerTask>("RenderHUDLayer", renderHUDLayerInputs);
const auto renderHUDLayerInputs = RenderHUDLayerTask::Input(toneMappedBuffer, lightingModel, hudOpaque, hudTransparent, hazeFrame, deferredFrameTransform).asVarying();
task.addJob<RenderHUDLayerTask>("RenderHUDLayer", renderHUDLayerInputs, shapePlumberForward, mainViewTransformSlot);
}
RenderDeferredTaskDebug::RenderDeferredTaskDebug() {
}
void RenderDeferredTaskDebug::build(JobModel& task, const render::Varying& input, render::Varying& outputs) {
void RenderDeferredTaskDebug::build(JobModel& task, const render::Varying& input, render::Varying& outputs, uint mainViewTransformSlot) {
const auto& inputs = input.get<Input>();
@ -274,14 +266,13 @@ void RenderDeferredTaskDebug::build(JobModel& task, const render::Varying& input
// RenderShadowTask out
const auto& shadowOut = inputs.get1();
const auto& renderShadowTaskOut = shadowOut[0];
const auto& shadowFrame = shadowOut[1];
const auto& renderShadowTaskOut = shadowOut[0];
const auto& shadowFrame = shadowOut[1];
// Extract the Lighting Stages Current frame ( and zones)
const auto lightingStageInputs = inputs.get2();
// Fetch the current frame stacks from all the stages
const auto stageCurrentFrames = lightingStageInputs.get0();
const auto stageCurrentFrames = lightingStageInputs[0];
const auto lightFrame = stageCurrentFrames[0];
const auto backgroundFrame = stageCurrentFrames[1];
const auto hazeFrame = stageCurrentFrames[2];
@ -304,33 +295,30 @@ void RenderDeferredTaskDebug::build(JobModel& task, const render::Varying& input
const auto& ambientOcclusionFramebuffer = extraDeferredBuffer[2];
const auto& ambientOcclusionUniforms = extraDeferredBuffer[3];
const auto& scatteringResource = extraDeferredBuffer[4];
const auto& velocityBuffer = extraDeferredBuffer[5];
// GenerateDeferredFrameTransform out
const auto& deferredFrameTransform = inputs[6];
// Jitter out
const auto& jitter = inputs[7];
// Lighting Model out
const auto& lightingModel = inputs[8];
const auto& lightingModel = inputs[7];
// Antialiasing out
const auto& antialiasingIntensityTexture = inputs[8];
// Light Cluster Grid Debuging job
{
const auto debugLightClustersInputs = DebugLightClusters::Inputs(deferredFrameTransform, lightingModel, linearDepthTarget, lightClusters).asVarying();
task.addJob<DebugLightClusters>("DebugLightClusters", debugLightClustersInputs);
task.addJob<DebugLightClusters>("DebugLightClusters", debugLightClustersInputs, mainViewTransformSlot);
}
{ // Debug the bounds of the rendered items, still look at the zbuffer
task.addJob<DrawBounds>("DrawMetaBounds", metas);
task.addJob<DrawBounds>("DrawOpaqueBounds", opaques);
task.addJob<DrawBounds>("DrawTransparentBounds", transparents);
task.addJob<DrawBounds>("DrawMetaBounds", metas, mainViewTransformSlot);
task.addJob<DrawBounds>("DrawOpaqueBounds", opaques, mainViewTransformSlot);
task.addJob<DrawBounds>("DrawTransparentBounds", transparents, mainViewTransformSlot);
task.addJob<DrawBounds>("DrawLightBounds", lights);
task.addJob<DrawBounds>("DrawZones", zones);
task.addJob<DrawBounds>("DrawLightBounds", lights, mainViewTransformSlot);
task.addJob<DrawBounds>("DrawZones", zones, mainViewTransformSlot);
const auto frustums = task.addJob<ExtractFrustums>("ExtractFrustums", shadowFrame);
const auto viewFrustum = frustums.getN<ExtractFrustums::Outputs>(ExtractFrustums::VIEW_FRUSTUM);
task.addJob<DrawFrustum>("DrawViewFrustum", viewFrustum, glm::vec3(0.0f, 1.0f, 0.0f));
@ -362,25 +350,25 @@ void RenderDeferredTaskDebug::build(JobModel& task, const render::Varying& input
const auto selectedItems = task.addJob<SelectItems>("TransparentSelection", selectItemInput, selectionBaseName);
// Render.getConfig("RenderMainView.DrawSelectionBounds").enabled = true
task.addJob<DrawBounds>("DrawSelectionBounds", selectedItems);
task.addJob<DrawBounds>("DrawSelectionBounds", selectedItems, mainViewTransformSlot);
}
{ // Debug the bounds of the layered objects, still look at the zbuffer
task.addJob<DrawBounds>("DrawInFrontOpaqueBounds", inFrontOpaque);
task.addJob<DrawBounds>("DrawInFrontTransparentBounds", inFrontTransparent);
task.addJob<DrawBounds>("DrawInFrontOpaqueBounds", inFrontOpaque, mainViewTransformSlot);
task.addJob<DrawBounds>("DrawInFrontTransparentBounds", inFrontTransparent, mainViewTransformSlot);
}
{ // Debug the bounds of the layered objects, still look at the zbuffer
task.addJob<DrawBounds>("DrawHUDOpaqueBounds", hudOpaque);
task.addJob<DrawBounds>("DrawHUDTransparentBounds", hudTransparent);
task.addJob<DrawBounds>("DrawHUDOpaqueBounds", hudOpaque, mainViewTransformSlot);
task.addJob<DrawBounds>("DrawHUDTransparentBounds", hudTransparent, mainViewTransformSlot);
}
// Debugging stages
{
// Debugging Deferred buffer job
const auto debugFramebuffers = DebugDeferredBuffer::Inputs(deferredFramebuffer, linearDepthTarget, surfaceGeometryFramebuffer, ambientOcclusionFramebuffer, velocityBuffer, deferredFrameTransform, shadowFrame).asVarying();
task.addJob<DebugDeferredBuffer>("DebugDeferredBuffer", debugFramebuffers);
const auto debugFramebuffers = DebugDeferredBuffer::Inputs(deferredFramebuffer, linearDepthTarget, surfaceGeometryFramebuffer, ambientOcclusionFramebuffer, deferredFrameTransform, shadowFrame, antialiasingIntensityTexture).asVarying();
task.addJob<DebugDeferredBuffer>("DebugDeferredBuffer", debugFramebuffers, mainViewTransformSlot);
const auto debugSubsurfaceScatteringInputs = DebugSubsurfaceScattering::Inputs(deferredFrameTransform, deferredFramebuffer, lightingModel,
surfaceGeometryFramebuffer, ambientOcclusionFramebuffer, scatteringResource).asVarying();
@ -391,8 +379,8 @@ void RenderDeferredTaskDebug::build(JobModel& task, const render::Varying& input
// Scene Octree Debugging job
{
task.addJob<DrawSceneOctree>("DrawSceneOctree", spatialSelection);
task.addJob<DrawItemSelection>("DrawItemSelection", spatialSelection);
task.addJob<DrawSceneOctree>("DrawSceneOctree", spatialSelection, mainViewTransformSlot);
task.addJob<DrawItemSelection>("DrawItemSelection", spatialSelection, mainViewTransformSlot);
}
// Status icon rendering job
@ -400,8 +388,7 @@ void RenderDeferredTaskDebug::build(JobModel& task, const render::Varying& input
// Grab a texture map representing the different status icons and assign that to the drawStatusJob
auto iconMapPath = PathUtils::resourcesPath() + "icons/statusIconAtlas.svg";
auto statusIconMap = DependencyManager::get<TextureCache>()->getImageTexture(iconMapPath, image::TextureUsage::STRICT_TEXTURE);
const auto drawStatusInputs = DrawStatus::Input(opaques, jitter).asVarying();
task.addJob<DrawStatus>("DrawStatus", drawStatusInputs, DrawStatus(statusIconMap));
task.addJob<DrawStatus>("DrawStatus", opaques, DrawStatus(statusIconMap, mainViewTransformSlot));
}
const auto debugZoneInputs = DebugZoneLighting::Inputs(deferredFrameTransform, lightFrame, backgroundFrame).asVarying();
@ -462,7 +449,7 @@ void RenderTransparentDeferred::run(const RenderContextPointer& renderContext, c
const auto& lightingModel = inputs.get3();
const auto& lightClusters = inputs.get4();
// Not needed yet: const auto& shadowFrame = inputs.get5();
const auto jitter = inputs.get6();
const auto& deferredFrameTransform = inputs.get6();
auto deferredLightingEffect = DependencyManager::get<DeferredLightingEffect>();
RenderArgs* args = renderContext->args;
@ -474,18 +461,13 @@ void RenderTransparentDeferred::run(const RenderContextPointer& renderContext, c
batch.setViewportTransform(args->_viewport);
batch.setStateScissorRect(args->_viewport);
glm::mat4 projMat;
Transform viewMat;
args->getViewFrustum().evalProjectionMatrix(projMat);
args->getViewFrustum().evalViewTransform(viewMat);
batch.setProjectionTransform(projMat);
batch.setProjectionJitter(jitter.x, jitter.y);
batch.setViewTransform(viewMat);
batch.setProjectionJitterEnabled(true);
batch.setSavedViewProjectionTransform(_transformSlot);
// Setup lighting model for all items;
batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer());
batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT());
batch.setUniformBuffer(ru::Buffer::DeferredFrameTransform, deferredFrameTransform->getFrameTransformBuffer());
// Set the light
deferredLightingEffect->setupKeyLightBatch(args, batch, *lightFrame);
@ -529,7 +511,7 @@ void DrawStateSortDeferred::run(const RenderContextPointer& renderContext, const
const auto& inItems = inputs.get0();
const auto& lightingModel = inputs.get1();
const auto jitter = inputs.get2();
const auto deferredFrameTransform = inputs.get2();
RenderArgs* args = renderContext->args;
@ -540,18 +522,13 @@ void DrawStateSortDeferred::run(const RenderContextPointer& renderContext, const
batch.setViewportTransform(args->_viewport);
batch.setStateScissorRect(args->_viewport);
glm::mat4 projMat;
Transform viewMat;
args->getViewFrustum().evalProjectionMatrix(projMat);
args->getViewFrustum().evalViewTransform(viewMat);
batch.setProjectionTransform(projMat);
batch.setProjectionJitter(jitter.x, jitter.y);
batch.setViewTransform(viewMat);
batch.setProjectionJitterEnabled(true);
batch.setSavedViewProjectionTransform(_transformSlot);
// Setup lighting model for all items;
batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer());
batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT());
batch.setUniformBuffer(ru::Buffer::DeferredFrameTransform, deferredFrameTransform->getFrameTransformBuffer());
// From the lighting model define a global shapeKey ORED with individiual keys
ShapeKey::Builder keyBuilder;

View file

@ -43,18 +43,19 @@ protected:
class RenderTransparentDeferred {
public:
using Inputs = render::VaryingSet7<render::ItemBounds, HazeStage::FramePointer, LightStage::FramePointer, LightingModelPointer, LightClustersPointer, LightStage::ShadowFramePointer, glm::vec2>;
using Inputs = render::VaryingSet7<render::ItemBounds, HazeStage::FramePointer, LightStage::FramePointer, LightingModelPointer, LightClustersPointer, LightStage::ShadowFramePointer, DeferredFrameTransformPointer>;
using Config = RenderTransparentDeferredConfig;
using JobModel = render::Job::ModelI<RenderTransparentDeferred, Inputs, Config>;
RenderTransparentDeferred(render::ShapePlumberPointer shapePlumber)
: _shapePlumber{ shapePlumber } {}
RenderTransparentDeferred(render::ShapePlumberPointer shapePlumber, uint transformSlot)
: _shapePlumber(shapePlumber), _transformSlot(transformSlot) {}
void configure(const Config& config) { _maxDrawn = config.maxDrawn; }
void run(const render::RenderContextPointer& renderContext, const Inputs& inputs);
protected:
render::ShapePlumberPointer _shapePlumber;
uint _transformSlot;
int _maxDrawn; // initialized by Config
};
@ -83,13 +84,13 @@ protected:
class DrawStateSortDeferred {
public:
using Inputs = render::VaryingSet3<render::ItemBounds, LightingModelPointer, glm::vec2>;
using Inputs = render::VaryingSet3<render::ItemBounds, LightingModelPointer, DeferredFrameTransformPointer>;
using Config = DrawStateSortConfig;
using JobModel = render::Job::ModelI<DrawStateSortDeferred, Inputs, Config>;
DrawStateSortDeferred(render::ShapePlumberPointer shapePlumber)
: _shapePlumber{ shapePlumber } {
DrawStateSortDeferred(render::ShapePlumberPointer shapePlumber, uint transformSlot)
: _shapePlumber(shapePlumber), _transformSlot(transformSlot) {
}
void configure(const Config& config) {
@ -100,6 +101,7 @@ public:
protected:
render::ShapePlumberPointer _shapePlumber;
uint _transformSlot;
int _maxDrawn; // initialized by Config
bool _stateSort;
};
@ -141,12 +143,8 @@ public:
using Config = RenderDeferredTaskConfig;
using JobModel = render::Task::ModelI<RenderDeferredTask, Input, Config>;
RenderDeferredTask();
void configure(const Config& config);
void build(JobModel& task, const render::Varying& input, render::Varying& output);
private:
void build(JobModel& task, const render::Varying& input, render::Varying& output, uint8_t transformOffset);
};

Some files were not shown because too many files have changed in this diff Show more