mirror of
https://github.com/JulianGro/overte.git
synced 2025-04-18 06:57:19 +02:00
commit
5be2f0acc7
112 changed files with 2608 additions and 310 deletions
.gitignore
interface/src
libraries
display-plugins/src/display-plugins
entities-renderer/src
gpu-gl/src/gpu/gl
GLBackend.cppGLBackend.hGLBackendOutput.cppGLBackendPipeline.cppGLBackendTransform.cppGLTexelFormat.cpp
gpu-gles/src/gpu/gl
gpu/src/gpu
plugins/src/plugins
render-utils/src
AmbientOcclusionEffect.cppAntialiasingEffect.cppAntialiasingEffect.hBackgroundStage.cppBloomEffect.cppDebugDeferredBuffer.cppDebugDeferredBuffer.hDeferredFrameTransform.cppDeferredFrameTransform.hDeferredLightingEffect.cppDeferredTransform.slhDrawHaze.cppHighlightEffect.cppRenderCommonTask.cppRenderDeferredTask.cppRenderForwardTask.cppRenderShadowTask.cppStencilMaskPass.cppSubsurfaceScattering.cppSurfaceGeometryPass.cppToneMappingEffect.cppVelocityBufferPass.cppVelocityBufferPass.hZoneRenderer.cppanimdebugdraw.slvdebug_deferred_buffer.slffxaa.slffxaa_blend.slfmodel.slvmodel_fade.slvmodel_lightmap.slvmodel_lightmap_fade.slvmodel_lightmap_normal_map.slvmodel_lightmap_normal_map_fade.slvmodel_normal_map.slvmodel_normal_map_fade.slvmodel_translucent.slvmodel_translucent_normal_map.slvoverlay3D.slvsimple.slvsimple_fade.slvsimple_opaque_web_browser.slfsimple_textured.slfsimple_textured_fade.slfsimple_textured_unlit.slfsimple_textured_unlit_fade.slfsimple_transparent_textured.slfsimple_transparent_textured_fade.slfsimple_transparent_textured_unlit.slfsimple_transparent_textured_unlit_fade.slfsimple_transparent_web_browser.slfskin_model.slvskin_model_dq.slvskin_model_fade.slvskin_model_fade_dq.slvskin_model_normal_map.slvskin_model_normal_map_dq.slvskin_model_normal_map_fade.slvskin_model_normal_map_fade_dq.slvstandardTransformPNTC.slvtaa.slftaa.slhtaa_blend.slfvelocityBuffer_cameraMotion.slf
render/src/render
4
.gitignore
vendored
4
.gitignore
vendored
|
@ -78,6 +78,8 @@ TAGS
|
|||
node_modules
|
||||
npm-debug.log
|
||||
|
||||
# ignore qmlc files generated from qml as cache
|
||||
*.qmlc
|
||||
# Android studio files
|
||||
*___jb_old___
|
||||
|
||||
|
@ -88,4 +90,4 @@ android/app/src/main/assets
|
|||
interface/compiledResources
|
||||
|
||||
# GPUCache
|
||||
interface/resources/GPUCache/*
|
||||
interface/resources/GPUCache/*
|
||||
|
|
|
@ -5429,7 +5429,7 @@ void Application::update(float deltaTime) {
|
|||
|
||||
editRenderArgs([this, deltaTime](AppRenderArgs& appRenderArgs) {
|
||||
PerformanceTimer perfTimer("editRenderArgs");
|
||||
appRenderArgs._headPose= getHMDSensorPose();
|
||||
appRenderArgs._headPose = getHMDSensorPose();
|
||||
|
||||
auto myAvatar = getMyAvatar();
|
||||
|
||||
|
@ -5449,10 +5449,10 @@ void Application::update(float deltaTime) {
|
|||
{
|
||||
QMutexLocker viewLocker(&_viewMutex);
|
||||
// adjust near clip plane to account for sensor scaling.
|
||||
auto adjustedProjection = glm::perspective(_viewFrustum.getFieldOfView(),
|
||||
_viewFrustum.getAspectRatio(),
|
||||
DEFAULT_NEAR_CLIP * sensorToWorldScale,
|
||||
_viewFrustum.getFarClip());
|
||||
auto adjustedProjection = glm::perspective(glm::radians(_fieldOfView.get()),
|
||||
getActiveDisplayPlugin()->getRecommendedAspectRatio(),
|
||||
DEFAULT_NEAR_CLIP * sensorToWorldScale,
|
||||
DEFAULT_FAR_CLIP);
|
||||
_viewFrustum.setProjection(adjustedProjection);
|
||||
_viewFrustum.calculate();
|
||||
}
|
||||
|
@ -5534,6 +5534,7 @@ void Application::update(float deltaTime) {
|
|||
{
|
||||
QMutexLocker viewLocker(&_viewMutex);
|
||||
_myCamera.loadViewFrustum(_displayViewFrustum);
|
||||
appRenderArgs._view = glm::inverse(_displayViewFrustum.getView());
|
||||
}
|
||||
|
||||
{
|
||||
|
|
|
@ -619,6 +619,7 @@ private:
|
|||
struct AppRenderArgs {
|
||||
render::Args _renderArgs;
|
||||
glm::mat4 _eyeToWorld;
|
||||
glm::mat4 _view;
|
||||
glm::mat4 _eyeOffsets[2];
|
||||
glm::mat4 _eyeProjections[2];
|
||||
glm::mat4 _headPose;
|
||||
|
|
|
@ -90,10 +90,10 @@ void Application::paintGL() {
|
|||
|
||||
{
|
||||
PROFILE_RANGE(render, "/gpuContextReset");
|
||||
_gpuContext->beginFrame(HMDSensorPose);
|
||||
_gpuContext->beginFrame(_appRenderArgs._view, HMDSensorPose);
|
||||
// Reset the gpu::Context Stages
|
||||
// Back to the default framebuffer;
|
||||
gpu::doInBatch(_gpuContext, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("Application_render::gpuContextReset", _gpuContext, [&](gpu::Batch& batch) {
|
||||
batch.resetStages();
|
||||
});
|
||||
}
|
||||
|
@ -216,7 +216,7 @@ void Application::runRenderFrame(RenderArgs* renderArgs) {
|
|||
|
||||
// Make sure the WorldBox is in the scene
|
||||
// For the record, this one RenderItem is the first one we created and added to the scene.
|
||||
// We could meoee that code elsewhere but you know...
|
||||
// We could move that code elsewhere but you know...
|
||||
if (!render::Item::isValidID(WorldBoxRenderData::_item)) {
|
||||
auto worldBoxRenderData = std::make_shared<WorldBoxRenderData>();
|
||||
auto worldBoxRenderPayload = std::make_shared<WorldBoxRenderData::Payload>(worldBoxRenderData);
|
||||
|
|
|
@ -107,7 +107,7 @@ public:
|
|||
args->_displayMode = RenderArgs::MONO;
|
||||
args->_renderMode = RenderArgs::RenderMode::SECONDARY_CAMERA_RENDER_MODE;
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("SecondaryCameraJob::run", args->_context, [&](gpu::Batch& batch) {
|
||||
batch.disableContextStereo();
|
||||
batch.disableContextViewCorrection();
|
||||
});
|
||||
|
@ -196,7 +196,7 @@ public:
|
|||
args->_displayMode = cachedArgs->_displayMode;
|
||||
args->_renderMode = cachedArgs->_renderMode;
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("EndSecondaryCameraFrame::run", args->_context, [&](gpu::Batch& batch) {
|
||||
batch.restoreContextStereo();
|
||||
batch.restoreContextViewCorrection();
|
||||
});
|
||||
|
|
|
@ -64,7 +64,7 @@ void ApplicationOverlay::renderOverlay(RenderArgs* renderArgs) {
|
|||
}
|
||||
|
||||
// Execute the batch into our framebuffer
|
||||
doInBatch(renderArgs->_context, [&](gpu::Batch& batch) {
|
||||
doInBatch("ApplicationOverlay::render", renderArgs->_context, [&](gpu::Batch& batch) {
|
||||
PROFILE_RANGE_BATCH(batch, "ApplicationOverlayRender");
|
||||
renderArgs->_batch = &batch;
|
||||
batch.enableStereo(false);
|
||||
|
|
|
@ -361,7 +361,7 @@ void OpenGLDisplayPlugin::customizeContext() {
|
|||
auto presentThread = DependencyManager::get<PresentThread>();
|
||||
Q_ASSERT(thread() == presentThread->thread());
|
||||
|
||||
getGLBackend()->setCameraCorrection(mat4());
|
||||
getGLBackend()->setCameraCorrection(mat4(), mat4(), true);
|
||||
|
||||
for (auto& cursorValue : _cursorsData) {
|
||||
auto& cursorData = cursorValue.second;
|
||||
|
@ -692,6 +692,9 @@ void OpenGLDisplayPlugin::present() {
|
|||
incrementPresentCount();
|
||||
|
||||
if (_currentFrame) {
|
||||
auto correction = getViewCorrection();
|
||||
getGLBackend()->setCameraCorrection(correction, _prevRenderView);
|
||||
_prevRenderView = correction * _currentFrame->view;
|
||||
{
|
||||
withPresentThreadLock([&] {
|
||||
_renderRate.increment();
|
||||
|
|
|
@ -118,6 +118,7 @@ protected:
|
|||
void renderFromTexture(gpu::Batch& batch, const gpu::TexturePointer texture, glm::ivec4 viewport, const glm::ivec4 scissor, gpu::FramebufferPointer fbo);
|
||||
void renderFromTexture(gpu::Batch& batch, const gpu::TexturePointer texture, glm::ivec4 viewport, const glm::ivec4 scissor);
|
||||
virtual void updateFrameData();
|
||||
virtual glm::mat4 getViewCorrection() { return glm::mat4(); }
|
||||
|
||||
void withOtherThreadContext(std::function<void()> f) const;
|
||||
|
||||
|
@ -137,6 +138,7 @@ protected:
|
|||
|
||||
gpu::FramePointer _currentFrame;
|
||||
gpu::Frame* _lastFrame { nullptr };
|
||||
mat4 _prevRenderView;
|
||||
gpu::FramebufferPointer _compositeFramebuffer;
|
||||
gpu::PipelinePointer _hudPipeline;
|
||||
gpu::PipelinePointer _mirrorHUDPipeline;
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
//
|
||||
#include "DebugHmdDisplayPlugin.h"
|
||||
|
||||
#include <ui-plugins/PluginContainer.h>
|
||||
|
||||
#include <QtCore/QProcessEnvironment>
|
||||
|
||||
#include <ViewFrustum.h>
|
||||
|
@ -41,7 +43,15 @@ bool DebugHmdDisplayPlugin::beginFrameRender(uint32_t frameIndex) {
|
|||
}
|
||||
|
||||
bool DebugHmdDisplayPlugin::internalActivate() {
|
||||
_isAutoRotateEnabled = _container->getBoolSetting("autoRotate", true);
|
||||
_container->addMenuItem(PluginType::DISPLAY_PLUGIN, MENU_PATH(), tr("Auto Rotate"),
|
||||
[this](bool clicked) {
|
||||
_isAutoRotateEnabled = clicked;
|
||||
_container->setBoolSetting("autoRotate", _isAutoRotateEnabled);
|
||||
}, true, _isAutoRotateEnabled);
|
||||
|
||||
_ipd = 0.0327499993f * 2.0f;
|
||||
// Would be nice to know why the left and right projection matrices are slightly dissymetrical
|
||||
_eyeProjections[0][0] = vec4{ 0.759056330, 0.000000000, 0.000000000, 0.000000000 };
|
||||
_eyeProjections[0][1] = vec4{ 0.000000000, 0.682773232, 0.000000000, 0.000000000 };
|
||||
_eyeProjections[0][2] = vec4{ -0.0580431037, -0.00619550655, -1.00000489, -1.00000000 };
|
||||
|
@ -50,10 +60,15 @@ bool DebugHmdDisplayPlugin::internalActivate() {
|
|||
_eyeProjections[1][1] = vec4{ 0.000000000, 0.678060353, 0.000000000, 0.000000000 };
|
||||
_eyeProjections[1][2] = vec4{ 0.0578232110, -0.00669418881, -1.00000489, -1.000000000 };
|
||||
_eyeProjections[1][3] = vec4{ 0.000000000, 0.000000000, -0.0800003856, 0.000000000 };
|
||||
_eyeInverseProjections[0] = glm::inverse(_eyeProjections[0]);
|
||||
_eyeInverseProjections[1] = glm::inverse(_eyeProjections[1]);
|
||||
// No need to do so here as this will done in Parent::internalActivate
|
||||
//_eyeInverseProjections[0] = glm::inverse(_eyeProjections[0]);
|
||||
//_eyeInverseProjections[1] = glm::inverse(_eyeProjections[1]);
|
||||
_eyeOffsets[0][3] = vec4{ -0.0327499993, 0.0, 0.0149999997, 1.0 };
|
||||
_eyeOffsets[1][3] = vec4{ 0.0327499993, 0.0, 0.0149999997, 1.0 };
|
||||
_eyeInverseProjections[0] = glm::inverse(_eyeProjections[0]);
|
||||
_eyeInverseProjections[1] = glm::inverse(_eyeProjections[1]);
|
||||
_eyeOffsets[0][3] = vec4{ -0.0327499993, 0.0, -0.0149999997, 1.0 };
|
||||
_eyeOffsets[1][3] = vec4{ 0.0327499993, 0.0, -0.0149999997, 1.0 };
|
||||
_renderTargetSize = { 3024, 1680 };
|
||||
_cullingProjection = _eyeProjections[0];
|
||||
// This must come after the initialization, so that the values calculated
|
||||
|
@ -63,10 +78,13 @@ bool DebugHmdDisplayPlugin::internalActivate() {
|
|||
}
|
||||
|
||||
void DebugHmdDisplayPlugin::updatePresentPose() {
|
||||
float yaw = sinf(secTimestampNow()) * 0.25f;
|
||||
float pitch = cosf(secTimestampNow()) * 0.25f;
|
||||
// Simulates head pose latency correction
|
||||
_currentPresentFrameInfo.presentPose =
|
||||
glm::mat4_cast(glm::angleAxis(yaw, Vectors::UP)) *
|
||||
glm::mat4_cast(glm::angleAxis(pitch, Vectors::RIGHT));
|
||||
Parent::updatePresentPose();
|
||||
if (_isAutoRotateEnabled) {
|
||||
float yaw = sinf(secTimestampNow()) * 0.25f;
|
||||
float pitch = cosf(secTimestampNow()) * 0.25f;
|
||||
// Simulates head pose latency correction
|
||||
_currentPresentFrameInfo.presentPose =
|
||||
glm::mat4_cast(glm::angleAxis(yaw, Vectors::UP)) *
|
||||
glm::mat4_cast(glm::angleAxis(pitch, Vectors::RIGHT)) ;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,5 +28,7 @@ protected:
|
|||
bool isHmdMounted() const override { return true; }
|
||||
bool internalActivate() override;
|
||||
private:
|
||||
|
||||
static const QString NAME;
|
||||
bool _isAutoRotateEnabled{ true };
|
||||
};
|
||||
|
|
|
@ -58,6 +58,18 @@ QRect HmdDisplayPlugin::getRecommendedHUDRect() const {
|
|||
return CompositorHelper::VIRTUAL_SCREEN_RECOMMENDED_OVERLAY_RECT;
|
||||
}
|
||||
|
||||
glm::mat4 HmdDisplayPlugin::getEyeToHeadTransform(Eye eye) const {
|
||||
return _eyeOffsets[eye];
|
||||
}
|
||||
|
||||
glm::mat4 HmdDisplayPlugin::getEyeProjection(Eye eye, const glm::mat4& baseProjection) const {
|
||||
return _eyeProjections[eye];
|
||||
}
|
||||
|
||||
glm::mat4 HmdDisplayPlugin::getCullingProjection(const glm::mat4& baseProjection) const {
|
||||
return _cullingProjection;
|
||||
}
|
||||
|
||||
#define DISABLE_PREVIEW_MENU_ITEM_DELAY_MS 500
|
||||
|
||||
bool HmdDisplayPlugin::internalActivate() {
|
||||
|
@ -324,12 +336,14 @@ void HmdDisplayPlugin::updateFrameData() {
|
|||
}
|
||||
|
||||
updatePresentPose();
|
||||
}
|
||||
|
||||
glm::mat4 HmdDisplayPlugin::getViewCorrection() {
|
||||
if (_currentFrame) {
|
||||
auto batchPose = _currentFrame->pose;
|
||||
auto currentPose = _currentPresentFrameInfo.presentPose;
|
||||
auto correction = glm::inverse(batchPose) * currentPose;
|
||||
getGLBackend()->setCameraCorrection(correction);
|
||||
return glm::inverse(_currentPresentFrameInfo.presentPose) * batchPose;
|
||||
} else {
|
||||
return glm::mat4();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -26,9 +26,9 @@ public:
|
|||
~HmdDisplayPlugin();
|
||||
bool isHmd() const override final { return true; }
|
||||
float getIPD() const override final { return _ipd; }
|
||||
glm::mat4 getEyeToHeadTransform(Eye eye) const override final { return _eyeOffsets[eye]; }
|
||||
glm::mat4 getEyeProjection(Eye eye, const glm::mat4& baseProjection) const override { return _eyeProjections[eye]; }
|
||||
glm::mat4 getCullingProjection(const glm::mat4& baseProjection) const override { return _cullingProjection; }
|
||||
glm::mat4 getEyeToHeadTransform(Eye eye) const override final;
|
||||
glm::mat4 getEyeProjection(Eye eye, const glm::mat4& baseProjection) const override;
|
||||
glm::mat4 getCullingProjection(const glm::mat4& baseProjection) const override;
|
||||
glm::uvec2 getRecommendedUiSize() const override final;
|
||||
glm::uvec2 getRecommendedRenderSize() const override final { return _renderTargetSize; }
|
||||
bool isDisplayVisible() const override { return isHmdMounted(); }
|
||||
|
@ -59,6 +59,7 @@ protected:
|
|||
void customizeContext() override;
|
||||
void uncustomizeContext() override;
|
||||
void updateFrameData() override;
|
||||
glm::mat4 getViewCorrection() override;
|
||||
|
||||
std::array<mat4, 2> _eyeOffsets;
|
||||
std::array<mat4, 2> _eyeProjections;
|
||||
|
|
|
@ -101,3 +101,4 @@ void StereoDisplayPlugin::internalDeactivate() {
|
|||
float StereoDisplayPlugin::getRecommendedAspectRatio() const {
|
||||
return aspect(Parent::getRecommendedRenderSize());
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ public:
|
|||
// the IPD at the Application level, the way we now allow with HMDs.
|
||||
// If that becomes an issue then we'll need to break up the functionality similar
|
||||
// to the HMD plugins.
|
||||
// virtual glm::mat4 getEyeToHeadTransform(Eye eye) const override;
|
||||
//virtual glm::mat4 getEyeToHeadTransform(Eye eye) const override;
|
||||
|
||||
protected:
|
||||
virtual bool internalActivate() override;
|
||||
|
|
|
@ -30,7 +30,7 @@ void main(void) {
|
|||
varTexcoord = inTexCoord0.st;
|
||||
|
||||
// pass along the diffuse color
|
||||
varColor = colorToLinearRGBA(inColor);
|
||||
varColor = color_sRGBAToLinear(inColor);
|
||||
|
||||
|
||||
// standard transform
|
||||
|
|
|
@ -31,7 +31,7 @@ void main(void) {
|
|||
varTexcoord = inTexCoord0.st;
|
||||
|
||||
// pass along the diffuse color
|
||||
varColor = colorToLinearRGBA(inColor);
|
||||
varColor = color_sRGBAToLinear(inColor);
|
||||
|
||||
|
||||
// standard transform
|
||||
|
|
|
@ -99,12 +99,16 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
|
|||
(&::gpu::gl::GLBackend::do_setUniformBuffer),
|
||||
(&::gpu::gl::GLBackend::do_setResourceBuffer),
|
||||
(&::gpu::gl::GLBackend::do_setResourceTexture),
|
||||
(&::gpu::gl::GLBackend::do_setResourceFramebufferSwapChainTexture),
|
||||
|
||||
(&::gpu::gl::GLBackend::do_setFramebuffer),
|
||||
(&::gpu::gl::GLBackend::do_setFramebufferSwapChain),
|
||||
(&::gpu::gl::GLBackend::do_clearFramebuffer),
|
||||
(&::gpu::gl::GLBackend::do_blit),
|
||||
(&::gpu::gl::GLBackend::do_generateTextureMips),
|
||||
|
||||
(&::gpu::gl::GLBackend::do_advance),
|
||||
|
||||
(&::gpu::gl::GLBackend::do_beginQuery),
|
||||
(&::gpu::gl::GLBackend::do_endQuery),
|
||||
(&::gpu::gl::GLBackend::do_getQuery),
|
||||
|
@ -756,9 +760,13 @@ void GLBackend::recycle() const {
|
|||
Texture::KtxStorage::releaseOpenKtxFiles();
|
||||
}
|
||||
|
||||
void GLBackend::setCameraCorrection(const Mat4& correction) {
|
||||
void GLBackend::setCameraCorrection(const Mat4& correction, const Mat4& prevRenderView, bool reset) {
|
||||
auto invCorrection = glm::inverse(correction);
|
||||
auto invPrevView = glm::inverse(prevRenderView);
|
||||
_transform._correction.prevView = (reset ? Mat4() : prevRenderView);
|
||||
_transform._correction.prevViewInverse = (reset ? Mat4() : invPrevView);
|
||||
_transform._correction.correction = correction;
|
||||
_transform._correction.correctionInverse = glm::inverse(correction);
|
||||
_transform._correction.correctionInverse = invCorrection;
|
||||
_pipeline._cameraCorrectionBuffer._buffer->setSubData(0, _transform._correction);
|
||||
_pipeline._cameraCorrectionBuffer._buffer->flush();
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ public:
|
|||
|
||||
virtual ~GLBackend();
|
||||
|
||||
void setCameraCorrection(const Mat4& correction);
|
||||
void setCameraCorrection(const Mat4& correction, const Mat4& prevRenderView, bool reset = false);
|
||||
void render(const Batch& batch) final override;
|
||||
|
||||
// This call synchronize the Full Backend cache with the current GLState
|
||||
|
@ -126,15 +126,19 @@ public:
|
|||
// Resource Stage
|
||||
virtual void do_setResourceBuffer(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setResourceTexture(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setResourceFramebufferSwapChainTexture(const Batch& batch, size_t paramOffset) final;
|
||||
|
||||
// Pipeline Stage
|
||||
virtual void do_setPipeline(const Batch& batch, size_t paramOffset) final;
|
||||
|
||||
// Output stage
|
||||
virtual void do_setFramebuffer(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setFramebufferSwapChain(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_clearFramebuffer(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_blit(const Batch& batch, size_t paramOffset) = 0;
|
||||
|
||||
virtual void do_advance(const Batch& batch, size_t paramOffset) final;
|
||||
|
||||
// Query section
|
||||
virtual void do_beginQuery(const Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_endQuery(const Batch& batch, size_t paramOffset) final;
|
||||
|
@ -245,6 +249,8 @@ protected:
|
|||
void setupStereoSide(int side);
|
||||
#endif
|
||||
|
||||
virtual void setResourceTexture(unsigned int slot, const TexturePointer& resourceTexture);
|
||||
virtual void setFramebuffer(const FramebufferPointer& framebuffer);
|
||||
virtual void initInput() final;
|
||||
virtual void killInput() final;
|
||||
virtual void syncInputStateCache() final;
|
||||
|
@ -303,9 +309,12 @@ protected:
|
|||
// Allows for correction of the camera pose to account for changes
|
||||
// between the time when a was recorded and the time(s) when it is
|
||||
// executed
|
||||
// Prev is the previous correction used at previous frame
|
||||
struct CameraCorrection {
|
||||
Mat4 correction;
|
||||
Mat4 correctionInverse;
|
||||
mat4 correction;
|
||||
mat4 correctionInverse;
|
||||
mat4 prevView;
|
||||
mat4 prevViewInverse;
|
||||
};
|
||||
|
||||
struct TransformStageState {
|
||||
|
|
|
@ -37,6 +37,19 @@ void GLBackend::resetOutputStage() {
|
|||
|
||||
void GLBackend::do_setFramebuffer(const Batch& batch, size_t paramOffset) {
|
||||
auto framebuffer = batch._framebuffers.get(batch._params[paramOffset]._uint);
|
||||
setFramebuffer(framebuffer);
|
||||
}
|
||||
|
||||
void GLBackend::do_setFramebufferSwapChain(const Batch& batch, size_t paramOffset) {
|
||||
auto swapChain = batch._swapChains.get(batch._params[paramOffset]._uint);
|
||||
if (swapChain) {
|
||||
auto index = batch._params[paramOffset + 1]._uint;
|
||||
FramebufferPointer framebuffer = static_cast<const FramebufferSwapChain*>(swapChain.get())->get(index);
|
||||
setFramebuffer(framebuffer);
|
||||
}
|
||||
}
|
||||
|
||||
void GLBackend::setFramebuffer(const FramebufferPointer& framebuffer) {
|
||||
if (_output._framebuffer != framebuffer) {
|
||||
auto newFBO = getFramebufferID(framebuffer);
|
||||
if (_output._drawFBO != newFBO) {
|
||||
|
@ -47,6 +60,13 @@ void GLBackend::do_setFramebuffer(const Batch& batch, size_t paramOffset) {
|
|||
}
|
||||
}
|
||||
|
||||
void GLBackend::do_advance(const Batch& batch, size_t paramOffset) {
|
||||
auto ringbuffer = batch._swapChains.get(batch._params[paramOffset]._uint);
|
||||
if (ringbuffer) {
|
||||
ringbuffer->advance();
|
||||
}
|
||||
}
|
||||
|
||||
void GLBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) {
|
||||
if (_stereo.isStereo() && !_pipeline._stateCache.scissorEnable) {
|
||||
qWarning("Clear without scissor in stereo mode");
|
||||
|
|
|
@ -253,6 +253,31 @@ void GLBackend::do_setResourceTexture(const Batch& batch, size_t paramOffset) {
|
|||
releaseResourceTexture(slot);
|
||||
return;
|
||||
}
|
||||
setResourceTexture(slot, resourceTexture);
|
||||
}
|
||||
|
||||
void GLBackend::do_setResourceFramebufferSwapChainTexture(const Batch& batch, size_t paramOffset) {
|
||||
GLuint slot = batch._params[paramOffset + 1]._uint;
|
||||
if (slot >= (GLuint)MAX_NUM_RESOURCE_TEXTURES) {
|
||||
qCDebug(gpugllogging) << "GLBackend::do_setResourceFramebufferSwapChainTexture: Trying to set a resource Texture at slot #" << slot << " which doesn't exist. MaxNumResourceTextures = " << getMaxNumResourceTextures();
|
||||
return;
|
||||
}
|
||||
|
||||
SwapChainPointer swapChain = batch._swapChains.get(batch._params[paramOffset + 0]._uint);
|
||||
|
||||
if (!swapChain) {
|
||||
releaseResourceTexture(slot);
|
||||
return;
|
||||
}
|
||||
auto index = batch._params[paramOffset + 2]._uint;
|
||||
auto renderBufferSlot = batch._params[paramOffset + 3]._uint;
|
||||
FramebufferPointer resourceFramebuffer = static_cast<const FramebufferSwapChain*>(swapChain.get())->get(index);
|
||||
TexturePointer resourceTexture = resourceFramebuffer->getRenderBuffer(renderBufferSlot);
|
||||
|
||||
setResourceTexture(slot, resourceTexture);
|
||||
}
|
||||
|
||||
void GLBackend::setResourceTexture(unsigned int slot, const TexturePointer& resourceTexture) {
|
||||
// check cache before thinking
|
||||
if (_resource._textures[slot] == resourceTexture) {
|
||||
return;
|
||||
|
@ -269,11 +294,11 @@ void GLBackend::do_setResourceTexture(const Batch& batch, size_t paramOffset) {
|
|||
glActiveTexture(GL_TEXTURE0 + slot);
|
||||
glBindTexture(target, to);
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
_resource._textures[slot] = resourceTexture;
|
||||
|
||||
_stats._RSAmountTextureMemoryBounded += (int) object->size();
|
||||
_stats._RSAmountTextureMemoryBounded += (int)object->size();
|
||||
|
||||
} else {
|
||||
releaseResourceTexture(slot);
|
||||
|
|
|
@ -105,7 +105,7 @@ void GLBackend::TransformStageState::preUpdate(size_t commandIndex, const Stereo
|
|||
if (_viewIsCamera && (_viewCorrectionEnabled && _correction.correction != glm::mat4())) {
|
||||
// FIXME should I switch to using the camera correction buffer in Transform.slf and leave this out?
|
||||
Transform result;
|
||||
_view.mult(result, _view, _correction.correction);
|
||||
_view.mult(result, _view, _correction.correctionInverse);
|
||||
if (_skybox) {
|
||||
result.setTranslation(vec3());
|
||||
}
|
||||
|
|
|
@ -146,7 +146,51 @@ GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
|
|||
case gpu::RGB:
|
||||
case gpu::RGBA:
|
||||
case gpu::XY:
|
||||
result = GL_RG8;
|
||||
switch (dstFormat.getType()) {
|
||||
case gpu::UINT32:
|
||||
result = GL_RG32UI;
|
||||
break;
|
||||
case gpu::INT32:
|
||||
result = GL_RG32I;
|
||||
break;
|
||||
case gpu::FLOAT:
|
||||
result = GL_RG32F;
|
||||
break;
|
||||
case gpu::UINT16:
|
||||
result = GL_RG16UI;
|
||||
break;
|
||||
case gpu::INT16:
|
||||
result = GL_RG16I;
|
||||
break;
|
||||
case gpu::NUINT16:
|
||||
result = GL_RG16;
|
||||
break;
|
||||
case gpu::NINT16:
|
||||
result = GL_RG16_SNORM;
|
||||
break;
|
||||
case gpu::HALF:
|
||||
result = GL_RG16F;
|
||||
break;
|
||||
case gpu::UINT8:
|
||||
result = GL_RG8UI;
|
||||
break;
|
||||
case gpu::INT8:
|
||||
result = GL_RG8I;
|
||||
break;
|
||||
case gpu::NUINT8:
|
||||
result = GL_RG8;
|
||||
break;
|
||||
case gpu::NINT8:
|
||||
result = GL_RG8_SNORM;
|
||||
break;
|
||||
case gpu::NUINT32:
|
||||
case gpu::NINT32:
|
||||
case gpu::NUINT2:
|
||||
case gpu::NINT2_10_10_10:
|
||||
case gpu::COMPRESSED:
|
||||
case gpu::NUM_TYPES: // quiet compiler
|
||||
Q_UNREACHABLE();
|
||||
}
|
||||
break;
|
||||
default:
|
||||
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
||||
|
@ -581,7 +625,52 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
|||
case gpu::RGB:
|
||||
case gpu::RGBA:
|
||||
case gpu::XY:
|
||||
texel.internalFormat = GL_RG8;
|
||||
switch (dstFormat.getType()) {
|
||||
case gpu::UINT32:
|
||||
texel.internalFormat = GL_RG32UI;
|
||||
break;
|
||||
case gpu::INT32:
|
||||
texel.internalFormat = GL_RG32I;
|
||||
break;
|
||||
case gpu::FLOAT:
|
||||
texel.internalFormat = GL_RG32F;
|
||||
break;
|
||||
case gpu::UINT16:
|
||||
texel.internalFormat = GL_RG16UI;
|
||||
break;
|
||||
case gpu::INT16:
|
||||
texel.internalFormat = GL_RG16I;
|
||||
break;
|
||||
case gpu::NUINT16:
|
||||
texel.internalFormat = GL_RG16;
|
||||
break;
|
||||
case gpu::NINT16:
|
||||
texel.internalFormat = GL_RG16_SNORM;
|
||||
break;
|
||||
case gpu::HALF:
|
||||
texel.type = GL_FLOAT;
|
||||
texel.internalFormat = GL_RG16F;
|
||||
break;
|
||||
case gpu::UINT8:
|
||||
texel.internalFormat = GL_RG8UI;
|
||||
break;
|
||||
case gpu::INT8:
|
||||
texel.internalFormat = GL_RG8I;
|
||||
break;
|
||||
case gpu::NUINT8:
|
||||
texel.internalFormat = GL_RG8;
|
||||
break;
|
||||
case gpu::NINT8:
|
||||
texel.internalFormat = GL_RG8_SNORM;
|
||||
break;
|
||||
case gpu::NUINT32:
|
||||
case gpu::NINT32:
|
||||
case gpu::NUINT2:
|
||||
case gpu::NINT2_10_10_10:
|
||||
case gpu::COMPRESSED:
|
||||
case gpu::NUM_TYPES: // quiet compiler
|
||||
Q_UNREACHABLE();
|
||||
}
|
||||
break;
|
||||
default:
|
||||
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
||||
|
|
|
@ -749,9 +749,13 @@ void GLBackend::recycle() const {
|
|||
Texture::KtxStorage::releaseOpenKtxFiles();
|
||||
}
|
||||
|
||||
void GLBackend::setCameraCorrection(const Mat4& correction) {
|
||||
void GLBackend::setCameraCorrection(const Mat4& correction, const Mat4& prevRenderView, bool reset) {
|
||||
auto invCorrection = glm::inverse(correction);
|
||||
auto invPrevView = glm::inverse(prevRenderView);
|
||||
_transform._correction.prevView = (reset ? Mat4() : prevRenderView);
|
||||
_transform._correction.prevViewInverse = (reset ? Mat4() : invPrevView);
|
||||
_transform._correction.correction = correction;
|
||||
_transform._correction.correctionInverse = glm::inverse(correction);
|
||||
_transform._correction.correctionInverse = invCorrection;
|
||||
_pipeline._cameraCorrectionBuffer._buffer->setSubData(0, _transform._correction);
|
||||
_pipeline._cameraCorrectionBuffer._buffer->flush();
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ public:
|
|||
|
||||
virtual ~GLBackend();
|
||||
|
||||
void setCameraCorrection(const Mat4& correction);
|
||||
void setCameraCorrection(const Mat4& correction, const Mat4& prevRenderView, bool reset = false);
|
||||
void render(const Batch& batch) final override;
|
||||
|
||||
// This call synchronize the Full Backend cache with the current GLState
|
||||
|
@ -303,6 +303,8 @@ protected:
|
|||
struct CameraCorrection {
|
||||
Mat4 correction;
|
||||
Mat4 correctionInverse;
|
||||
Mat4 prevView;
|
||||
Mat4 prevViewInverse;
|
||||
};
|
||||
|
||||
struct TransformStageState {
|
||||
|
|
|
@ -45,7 +45,12 @@ size_t Batch::_dataMax { BATCH_PREALLOCATE_MIN };
|
|||
size_t Batch::_objectsMax { BATCH_PREALLOCATE_MIN };
|
||||
size_t Batch::_drawCallInfosMax { BATCH_PREALLOCATE_MIN };
|
||||
|
||||
Batch::Batch() {
|
||||
Batch::Batch(const char* name) {
|
||||
#ifdef DEBUG
|
||||
if (name) {
|
||||
_name = name;
|
||||
}
|
||||
#endif
|
||||
_commands.reserve(_commandsMax);
|
||||
_commandOffsets.reserve(_commandOffsetsMax);
|
||||
_params.reserve(_paramsMax);
|
||||
|
@ -56,6 +61,9 @@ Batch::Batch() {
|
|||
|
||||
Batch::Batch(const Batch& batch_) {
|
||||
Batch& batch = *const_cast<Batch*>(&batch_);
|
||||
#ifdef DEBUG
|
||||
_name = batch_._name;
|
||||
#endif
|
||||
_commands.swap(batch._commands);
|
||||
_commandOffsets.swap(batch._commandOffsets);
|
||||
_params.swap(batch._params);
|
||||
|
@ -71,6 +79,7 @@ Batch::Batch(const Batch& batch_) {
|
|||
_transforms._items.swap(batch._transforms._items);
|
||||
_pipelines._items.swap(batch._pipelines._items);
|
||||
_framebuffers._items.swap(batch._framebuffers._items);
|
||||
_swapChains._items.swap(batch._swapChains._items);
|
||||
_drawCallInfos.swap(batch._drawCallInfos);
|
||||
_queries._items.swap(batch._queries._items);
|
||||
_lambdas._items.swap(batch._lambdas._items);
|
||||
|
@ -108,6 +117,7 @@ void Batch::clear() {
|
|||
_transforms.clear();
|
||||
_pipelines.clear();
|
||||
_framebuffers.clear();
|
||||
_swapChains.clear();
|
||||
_objects.clear();
|
||||
_drawCallInfos.clear();
|
||||
}
|
||||
|
@ -327,6 +337,15 @@ void Batch::setResourceTexture(uint32 slot, const TextureView& view) {
|
|||
setResourceTexture(slot, view._texture);
|
||||
}
|
||||
|
||||
void Batch::setResourceFramebufferSwapChainTexture(uint32 slot, const FramebufferSwapChainPointer& framebuffer, unsigned int swapChainIndex, unsigned int renderBufferSlot) {
|
||||
ADD_COMMAND(setResourceFramebufferSwapChainTexture);
|
||||
|
||||
_params.emplace_back(_swapChains.cache(framebuffer));
|
||||
_params.emplace_back(slot);
|
||||
_params.emplace_back(swapChainIndex);
|
||||
_params.emplace_back(renderBufferSlot);
|
||||
}
|
||||
|
||||
void Batch::setFramebuffer(const FramebufferPointer& framebuffer) {
|
||||
ADD_COMMAND(setFramebuffer);
|
||||
|
||||
|
@ -334,6 +353,19 @@ void Batch::setFramebuffer(const FramebufferPointer& framebuffer) {
|
|||
|
||||
}
|
||||
|
||||
void Batch::setFramebufferSwapChain(const FramebufferSwapChainPointer& framebuffer, unsigned int swapChainIndex) {
|
||||
ADD_COMMAND(setFramebufferSwapChain);
|
||||
|
||||
_params.emplace_back(_swapChains.cache(framebuffer));
|
||||
_params.emplace_back(swapChainIndex);
|
||||
}
|
||||
|
||||
void Batch::advance(const SwapChainPointer& swapChain) {
|
||||
ADD_COMMAND(advance);
|
||||
|
||||
_params.emplace_back(_swapChains.cache(swapChain));
|
||||
}
|
||||
|
||||
void Batch::clearFramebuffer(Framebuffer::Masks targets, const Vec4& color, float depth, int stencil, bool enableScissor) {
|
||||
ADD_COMMAND(clearFramebuffer);
|
||||
|
||||
|
|
|
@ -91,7 +91,7 @@ public:
|
|||
void captureDrawCallInfo();
|
||||
void captureNamedDrawCallInfo(std::string name);
|
||||
|
||||
Batch();
|
||||
Batch(const char* name = nullptr);
|
||||
Batch(const Batch& batch);
|
||||
~Batch();
|
||||
|
||||
|
@ -187,11 +187,14 @@ public:
|
|||
|
||||
void setResourceTexture(uint32 slot, const TexturePointer& texture);
|
||||
void setResourceTexture(uint32 slot, const TextureView& view); // not a command, just a shortcut from a TextureView
|
||||
|
||||
void setResourceFramebufferSwapChainTexture(uint32 slot, const FramebufferSwapChainPointer& framebuffer, unsigned int swpaChainIndex, unsigned int renderBufferSlot = 0U); // not a command, just a shortcut from a TextureView
|
||||
|
||||
// Ouput Stage
|
||||
void setFramebuffer(const FramebufferPointer& framebuffer);
|
||||
|
||||
void setFramebufferSwapChain(const FramebufferSwapChainPointer& framebuffer, unsigned int swapChainIndex);
|
||||
|
||||
void advance(const SwapChainPointer& swapChain);
|
||||
|
||||
// Clear framebuffer layers
|
||||
// Targets can be any of the render buffers contained in the currnetly bound Framebuffer
|
||||
// Optionally the scissor test can be enabled locally for this command and to restrict the clearing command to the pixels contained in the scissor rectangle
|
||||
|
@ -299,12 +302,16 @@ public:
|
|||
COMMAND_setUniformBuffer,
|
||||
COMMAND_setResourceBuffer,
|
||||
COMMAND_setResourceTexture,
|
||||
COMMAND_setResourceFramebufferSwapChainTexture,
|
||||
|
||||
COMMAND_setFramebuffer,
|
||||
COMMAND_setFramebufferSwapChain,
|
||||
COMMAND_clearFramebuffer,
|
||||
COMMAND_blit,
|
||||
COMMAND_generateTextureMips,
|
||||
|
||||
COMMAND_advance,
|
||||
|
||||
COMMAND_beginQuery,
|
||||
COMMAND_endQuery,
|
||||
COMMAND_getQuery,
|
||||
|
@ -421,6 +428,7 @@ public:
|
|||
typedef Cache<Transform>::Vector TransformCaches;
|
||||
typedef Cache<PipelinePointer>::Vector PipelineCaches;
|
||||
typedef Cache<FramebufferPointer>::Vector FramebufferCaches;
|
||||
typedef Cache<SwapChainPointer>::Vector SwapChainCaches;
|
||||
typedef Cache<QueryPointer>::Vector QueryCaches;
|
||||
typedef Cache<std::string>::Vector StringCaches;
|
||||
typedef Cache<std::function<void()>>::Vector LambdaCache;
|
||||
|
@ -475,6 +483,7 @@ public:
|
|||
TransformCaches _transforms;
|
||||
PipelineCaches _pipelines;
|
||||
FramebufferCaches _framebuffers;
|
||||
SwapChainCaches _swapChains;
|
||||
QueryCaches _queries;
|
||||
LambdaCache _lambdas;
|
||||
StringCaches _profileRanges;
|
||||
|
@ -486,6 +495,11 @@ public:
|
|||
bool _enableSkybox { false };
|
||||
|
||||
protected:
|
||||
|
||||
#ifdef DEBUG
|
||||
std::string _name;
|
||||
#endif
|
||||
|
||||
friend class Context;
|
||||
friend class Frame;
|
||||
|
||||
|
|
|
@ -98,8 +98,9 @@ Buffer::Update::Update(const Buffer& parent) : buffer(parent) {
|
|||
|
||||
void Buffer::Update::apply() const {
|
||||
// Make sure we're loaded in order
|
||||
++buffer._applyUpdateCount;
|
||||
assert(buffer._applyUpdateCount.load() == updateNumber);
|
||||
buffer._applyUpdateCount++;
|
||||
assert(buffer._applyUpdateCount == updateNumber);
|
||||
|
||||
const auto pageSize = buffer._pages._pageSize;
|
||||
buffer._renderSysmem.resize(size);
|
||||
buffer._renderPages.accommodate(size);
|
||||
|
|
|
@ -11,18 +11,45 @@
|
|||
<@if not GPU_COLOR_SLH@>
|
||||
<@def GPU_COLOR_SLH@>
|
||||
|
||||
float sRGBFloatToLinear(float value) {
|
||||
// Linear ====> linear RGB
|
||||
// sRGB ======> standard RGB with gamma of 2.2
|
||||
// YCoCg =====> Luma (Y) chrominance green (Cg) and chrominance orange (Co)
|
||||
// https://software.intel.com/en-us/node/503873
|
||||
|
||||
float color_scalar_sRGBToLinear(float value) {
|
||||
const float SRGB_ELBOW = 0.04045;
|
||||
|
||||
return (value <= SRGB_ELBOW) ? value / 12.92 : pow((value + 0.055) / 1.055, 2.4);
|
||||
}
|
||||
|
||||
vec3 colorToLinearRGB(vec3 srgb) {
|
||||
return vec3(sRGBFloatToLinear(srgb.r), sRGBFloatToLinear(srgb.g), sRGBFloatToLinear(srgb.b));
|
||||
vec3 color_sRGBToLinear(vec3 srgb) {
|
||||
return vec3(color_scalar_sRGBToLinear(srgb.r), color_scalar_sRGBToLinear(srgb.g), color_scalar_sRGBToLinear(srgb.b));
|
||||
}
|
||||
|
||||
vec4 colorToLinearRGBA(vec4 srgba) {
|
||||
return vec4(colorToLinearRGB(srgba.xyz), srgba.w);
|
||||
vec4 color_sRGBAToLinear(vec4 srgba) {
|
||||
return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);
|
||||
}
|
||||
|
||||
vec3 color_LinearToYCoCg(vec3 rgb) {
|
||||
// Y = R/4 + G/2 + B/4
|
||||
// Co = R/2 - B/2
|
||||
// Cg = -R/4 + G/2 - B/4
|
||||
return vec3(
|
||||
rgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0,
|
||||
rgb.x/2.0 - rgb.z/2.0,
|
||||
-rgb.x/4.0 + rgb.y/2.0 - rgb.z/4.0
|
||||
);
|
||||
}
|
||||
|
||||
vec3 color_YCoCgToLinear(vec3 ycocg) {
|
||||
// R = Y + Co - Cg
|
||||
// G = Y + Cg
|
||||
// B = Y - Co - Cg
|
||||
return clamp(vec3(
|
||||
ycocg.x + ycocg.y - ycocg.z,
|
||||
ycocg.x + ycocg.z,
|
||||
ycocg.x - ycocg.y - ycocg.z
|
||||
), vec3(0.0), vec3(1.0));
|
||||
}
|
||||
|
||||
<@func declareColorWheel()@>
|
||||
|
|
|
@ -53,11 +53,12 @@ const std::string& Context::getBackendVersion() const {
|
|||
return _backend->getVersion();
|
||||
}
|
||||
|
||||
void Context::beginFrame(const glm::mat4& renderPose) {
|
||||
void Context::beginFrame(const glm::mat4& renderView, const glm::mat4& renderPose) {
|
||||
assert(!_frameActive);
|
||||
_frameActive = true;
|
||||
_currentFrame = std::make_shared<Frame>();
|
||||
_currentFrame->pose = renderPose;
|
||||
_currentFrame->view = renderView;
|
||||
|
||||
if (!_frameRangeTimer) {
|
||||
_frameRangeTimer = std::make_shared<RangeTimer>("gpu::Context::Frame");
|
||||
|
@ -108,7 +109,7 @@ void Context::executeFrame(const FramePointer& frame) const {
|
|||
consumeFrameUpdates(frame);
|
||||
_backend->setStereoState(frame->stereoState);
|
||||
{
|
||||
Batch beginBatch;
|
||||
Batch beginBatch("Context::executeFrame::begin");
|
||||
_frameRangeTimer->begin(beginBatch);
|
||||
_backend->render(beginBatch);
|
||||
|
||||
|
@ -117,7 +118,7 @@ void Context::executeFrame(const FramePointer& frame) const {
|
|||
_backend->render(batch);
|
||||
}
|
||||
|
||||
Batch endBatch;
|
||||
Batch endBatch("Context::executeFrame::end");
|
||||
_frameRangeTimer->end(endBatch);
|
||||
_backend->render(endBatch);
|
||||
}
|
||||
|
|
|
@ -161,7 +161,7 @@ public:
|
|||
|
||||
const std::string& getBackendVersion() const;
|
||||
|
||||
void beginFrame(const glm::mat4& renderPose = glm::mat4());
|
||||
void beginFrame(const glm::mat4& renderView = glm::mat4(), const glm::mat4& renderPose = glm::mat4());
|
||||
void appendFrameBatch(Batch& batch);
|
||||
FramePointer endFrame();
|
||||
|
||||
|
@ -274,8 +274,8 @@ protected:
|
|||
typedef std::shared_ptr<Context> ContextPointer;
|
||||
|
||||
template<typename F>
|
||||
void doInBatch(std::shared_ptr<gpu::Context> context, F f) {
|
||||
gpu::Batch batch;
|
||||
void doInBatch(const char* name, std::shared_ptr<gpu::Context> context, F f) {
|
||||
gpu::Batch batch(name);
|
||||
f(batch);
|
||||
context->appendFrameBatch(batch);
|
||||
}
|
||||
|
|
|
@ -28,6 +28,8 @@ namespace gpu {
|
|||
|
||||
StereoState stereoState;
|
||||
uint32_t frameIndex{ 0 };
|
||||
/// The view matrix used for rendering the frame, only applicable for HMDs
|
||||
Mat4 view;
|
||||
/// The sensor pose used for rendering the frame, only applicable for HMDs
|
||||
Mat4 pose;
|
||||
/// The collection of batches which make up the frame
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#define hifi_gpu_Framebuffer_h
|
||||
|
||||
#include "Texture.h"
|
||||
#include "ResourceSwapChain.h"
|
||||
#include <memory>
|
||||
|
||||
class Transform; // Texcood transform util
|
||||
|
@ -177,6 +178,8 @@ protected:
|
|||
Framebuffer() {}
|
||||
};
|
||||
typedef std::shared_ptr<Framebuffer> FramebufferPointer;
|
||||
typedef ResourceSwapChain<Framebuffer> FramebufferSwapChain;
|
||||
typedef std::shared_ptr<FramebufferSwapChain> FramebufferSwapChainPointer;
|
||||
|
||||
}
|
||||
|
||||
|
|
62
libraries/gpu/src/gpu/ResourceSwapChain.h
Normal file
62
libraries/gpu/src/gpu/ResourceSwapChain.h
Normal file
|
@ -0,0 +1,62 @@
|
|||
//
|
||||
// Created by Olivier Prat on 2018/02/19
|
||||
// Copyright 2013-2018 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#ifndef hifi_gpu_ResourceSwapChain_h
|
||||
#define hifi_gpu_ResourceSwapChain_h
|
||||
|
||||
#include <memory>
|
||||
#include <array>
|
||||
|
||||
namespace gpu {
|
||||
class SwapChain {
|
||||
public:
|
||||
|
||||
SwapChain(unsigned int size = 2U) : _size{ size } {}
|
||||
virtual ~SwapChain() {}
|
||||
|
||||
void advance() {
|
||||
_frontIndex = (_frontIndex + 1) % _size;
|
||||
}
|
||||
|
||||
unsigned int getSize() const { return _size; }
|
||||
|
||||
protected:
|
||||
unsigned int _size;
|
||||
unsigned int _frontIndex{ 0U };
|
||||
|
||||
};
|
||||
typedef std::shared_ptr<SwapChain> SwapChainPointer;
|
||||
|
||||
template <class R>
|
||||
class ResourceSwapChain : public SwapChain {
|
||||
public:
|
||||
|
||||
enum {
|
||||
MAX_SIZE = 4
|
||||
};
|
||||
|
||||
using Type = R;
|
||||
using TypePointer = std::shared_ptr<R>;
|
||||
|
||||
ResourceSwapChain(unsigned int size = 2U) : SwapChain{ size } {}
|
||||
|
||||
void reset() {
|
||||
for (auto& ptr : _resources) {
|
||||
ptr.reset();
|
||||
}
|
||||
}
|
||||
|
||||
TypePointer& edit(unsigned int index) { return _resources[(index + _frontIndex) % _size]; }
|
||||
const TypePointer& get(unsigned int index) const { return _resources[(index + _frontIndex) % _size]; }
|
||||
|
||||
private:
|
||||
|
||||
std::array<TypePointer, MAX_SIZE> _resources;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
|
@ -42,4 +42,9 @@ std::function<void(gpu::Batch&, const gpu::TexturePointer&, bool mirror)> Displa
|
|||
hudOperator = _hudOperator;
|
||||
}
|
||||
return hudOperator;
|
||||
}
|
||||
}
|
||||
|
||||
glm::mat4 HmdDisplay::getEyeToHeadTransform(Eye eye) const {
|
||||
static const glm::mat4 xform;
|
||||
return xform;
|
||||
}
|
||||
|
|
|
@ -93,9 +93,7 @@ class HmdDisplay : public StereoDisplay {
|
|||
public:
|
||||
// HMD specific methods
|
||||
// TODO move these into another class?
|
||||
virtual glm::mat4 getEyeToHeadTransform(Eye eye) const {
|
||||
static const glm::mat4 transform; return transform;
|
||||
}
|
||||
virtual glm::mat4 getEyeToHeadTransform(Eye eye) const;
|
||||
|
||||
// returns a copy of the most recent head pose, computed via updateHeadPose
|
||||
virtual glm::mat4 getHeadPose() const {
|
||||
|
|
|
@ -385,7 +385,7 @@ void AmbientOcclusionEffect::run(const render::RenderContextPointer& renderConte
|
|||
auto firstHBlurPipeline = getHBlurPipeline();
|
||||
auto lastVBlurPipeline = getVBlurPipeline();
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
gpu::doInBatch("AmbientOcclusionEffect::run", args->_context, [=](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
|
||||
_gpuTimer->begin(batch);
|
||||
|
@ -518,7 +518,7 @@ void DebugAmbientOcclusion::run(const render::RenderContextPointer& renderContex
|
|||
|
||||
auto debugPipeline = getDebugPipeline();
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
gpu::doInBatch("DebugAmbientOcclusion::run", args->_context, [=](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
|
||||
batch.setViewportTransform(sourceViewport);
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <PathUtils.h>
|
||||
#include <SharedUtil.h>
|
||||
#include <gpu/Context.h>
|
||||
#include <gpu/StandardShaderLib.h>
|
||||
|
||||
#include "AntialiasingEffect.h"
|
||||
#include "StencilMaskPass.h"
|
||||
|
@ -22,7 +23,11 @@
|
|||
#include "DependencyManager.h"
|
||||
#include "ViewFrustum.h"
|
||||
#include "GeometryCache.h"
|
||||
#include "FramebufferCache.h"
|
||||
|
||||
#define ANTIALIASING_USE_TAA 1
|
||||
|
||||
#if !ANTIALIASING_USE_TAA
|
||||
#include "fxaa_vert.h"
|
||||
#include "fxaa_frag.h"
|
||||
#include "fxaa_blend_frag.h"
|
||||
|
@ -108,7 +113,7 @@ void Antialiasing::run(const render::RenderContextPointer& renderContext, const
|
|||
|
||||
RenderArgs* args = renderContext->args;
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("Antialiasing::run", args->_context, [&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
|
||||
|
@ -165,3 +170,372 @@ void Antialiasing::run(const render::RenderContextPointer& renderContext, const
|
|||
DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color, _geometryId);
|
||||
});
|
||||
}
|
||||
#else
|
||||
|
||||
#include "taa_frag.h"
|
||||
#include "fxaa_blend_frag.h"
|
||||
#include "taa_blend_frag.h"
|
||||
|
||||
const int AntialiasingPass_ParamsSlot = 0;
|
||||
const int AntialiasingPass_FrameTransformSlot = 1;
|
||||
|
||||
const int AntialiasingPass_HistoryMapSlot = 0;
|
||||
const int AntialiasingPass_SourceMapSlot = 1;
|
||||
const int AntialiasingPass_VelocityMapSlot = 2;
|
||||
const int AntialiasingPass_DepthMapSlot = 3;
|
||||
|
||||
const int AntialiasingPass_NextMapSlot = 4;
|
||||
|
||||
|
||||
Antialiasing::Antialiasing() {
|
||||
_antialiasingBuffers = std::make_shared<gpu::FramebufferSwapChain>(2U);
|
||||
}
|
||||
|
||||
Antialiasing::~Antialiasing() {
|
||||
_antialiasingBuffers.reset();
|
||||
_antialiasingTextures[0].reset();
|
||||
_antialiasingTextures[1].reset();
|
||||
}
|
||||
|
||||
const gpu::PipelinePointer& Antialiasing::getAntialiasingPipeline() {
|
||||
|
||||
if (!_antialiasingPipeline) {
|
||||
|
||||
auto vs = gpu::StandardShaderLib::getDrawUnitQuadTexcoordVS();
|
||||
auto ps = taa_frag::getShader();
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("taaParamsBuffer"), AntialiasingPass_ParamsSlot));
|
||||
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("deferredFrameTransformBuffer"), AntialiasingPass_FrameTransformSlot));
|
||||
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("historyMap"), AntialiasingPass_HistoryMapSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("sourceMap"), AntialiasingPass_SourceMapSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("velocityMap"), AntialiasingPass_VelocityMapSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("depthMap"), AntialiasingPass_DepthMapSlot));
|
||||
|
||||
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
PrepareStencil::testNoAA(*state);
|
||||
|
||||
// Good to go add the brand new pipeline
|
||||
_antialiasingPipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
return _antialiasingPipeline;
|
||||
}
|
||||
|
||||
const gpu::PipelinePointer& Antialiasing::getBlendPipeline() {
|
||||
if (!_blendPipeline) {
|
||||
auto vs = gpu::StandardShaderLib::getDrawUnitQuadTexcoordVS();
|
||||
auto ps = fxaa_blend_frag::getShader();
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("colorTexture"), AntialiasingPass_NextMapSlot));
|
||||
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
PrepareStencil::testNoAA(*state);
|
||||
|
||||
|
||||
// Good to go add the brand new pipeline
|
||||
_blendPipeline = gpu::Pipeline::create(program, state);
|
||||
_sharpenLoc = program->getUniforms().findLocation("sharpenIntensity");
|
||||
|
||||
}
|
||||
return _blendPipeline;
|
||||
}
|
||||
|
||||
const gpu::PipelinePointer& Antialiasing::getDebugBlendPipeline() {
|
||||
if (!_debugBlendPipeline) {
|
||||
auto vs = gpu::StandardShaderLib::getDrawUnitQuadTexcoordVS();
|
||||
auto ps = taa_blend_frag::getShader();
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("taaParamsBuffer"), AntialiasingPass_ParamsSlot));
|
||||
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("deferredFrameTransformBuffer"), AntialiasingPass_FrameTransformSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("nextMap"), AntialiasingPass_NextMapSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("historyMap"), AntialiasingPass_HistoryMapSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("sourceMap"), AntialiasingPass_SourceMapSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("velocityMap"), AntialiasingPass_VelocityMapSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("depthMap"), AntialiasingPass_DepthMapSlot));
|
||||
|
||||
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
PrepareStencil::testNoAA(*state);
|
||||
|
||||
|
||||
// Good to go add the brand new pipeline
|
||||
_debugBlendPipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
return _debugBlendPipeline;
|
||||
}
|
||||
|
||||
void Antialiasing::configure(const Config& config) {
|
||||
_sharpen = config.sharpen;
|
||||
_params.edit().blend = config.blend;
|
||||
_params.edit().covarianceGamma = config.covarianceGamma;
|
||||
|
||||
_params.edit().setConstrainColor(config.constrainColor);
|
||||
_params.edit().setFeedbackColor(config.feedbackColor);
|
||||
|
||||
_params.edit().debugShowVelocityThreshold = config.debugShowVelocityThreshold;
|
||||
|
||||
_params.edit().regionInfo.x = config.debugX;
|
||||
_params.edit().regionInfo.z = config.debugFXAAX;
|
||||
|
||||
_params.edit().setDebug(config.debug);
|
||||
_params.edit().setShowDebugCursor(config.showCursorPixel);
|
||||
_params.edit().setDebugCursor(config.debugCursorTexcoord);
|
||||
_params.edit().setDebugOrbZoom(config.debugOrbZoom);
|
||||
|
||||
_params.edit().setShowClosestFragment(config.showClosestFragment);
|
||||
}
|
||||
|
||||
|
||||
void Antialiasing::run(const render::RenderContextPointer& renderContext, const Inputs& inputs) {
|
||||
assert(renderContext->args);
|
||||
assert(renderContext->args->hasViewFrustum());
|
||||
|
||||
RenderArgs* args = renderContext->args;
|
||||
|
||||
auto& deferredFrameTransform = inputs.get0();
|
||||
auto& sourceBuffer = inputs.get1();
|
||||
auto& linearDepthBuffer = inputs.get2();
|
||||
auto& velocityBuffer = inputs.get3();
|
||||
|
||||
int width = sourceBuffer->getWidth();
|
||||
int height = sourceBuffer->getHeight();
|
||||
|
||||
if (_antialiasingBuffers->get(0)) {
|
||||
if (_antialiasingBuffers->get(0)->getSize() != uvec2(width, height)) {// || (sourceBuffer && (_antialiasingBuffer->getRenderBuffer(1) != sourceBuffer->getRenderBuffer(0)))) {
|
||||
_antialiasingBuffers->edit(0).reset();
|
||||
_antialiasingBuffers->edit(1).reset();
|
||||
_antialiasingTextures[0].reset();
|
||||
_antialiasingTextures[1].reset();
|
||||
}
|
||||
}
|
||||
|
||||
if (!_antialiasingBuffers->get(0)) {
|
||||
// Link the antialiasing FBO to texture
|
||||
for (int i = 0; i < 2; i++) {
|
||||
auto& antiAliasingBuffer = _antialiasingBuffers->edit(i);
|
||||
antiAliasingBuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("antialiasing"));
|
||||
auto format = gpu::Element::COLOR_SRGBA_32; // DependencyManager::get<FramebufferCache>()->getLightingTexture()->getTexelFormat();
|
||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR);
|
||||
_antialiasingTextures[i] = gpu::Texture::createRenderBuffer(format, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
|
||||
antiAliasingBuffer->setRenderBuffer(0, _antialiasingTextures[i]);
|
||||
}
|
||||
}
|
||||
|
||||
gpu::doInBatch("Antialiasing::run", args->_context, [&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
|
||||
// TAA step
|
||||
getAntialiasingPipeline();
|
||||
batch.setResourceFramebufferSwapChainTexture(AntialiasingPass_HistoryMapSlot, _antialiasingBuffers, 0);
|
||||
batch.setResourceTexture(AntialiasingPass_SourceMapSlot, sourceBuffer->getRenderBuffer(0));
|
||||
batch.setResourceTexture(AntialiasingPass_VelocityMapSlot, velocityBuffer->getVelocityTexture());
|
||||
// This is only used during debug
|
||||
batch.setResourceTexture(AntialiasingPass_DepthMapSlot, linearDepthBuffer->getLinearDepthTexture());
|
||||
|
||||
batch.setUniformBuffer(AntialiasingPass_ParamsSlot, _params);
|
||||
batch.setUniformBuffer(AntialiasingPass_FrameTransformSlot, deferredFrameTransform->getFrameTransformBuffer());
|
||||
|
||||
batch.setFramebufferSwapChain(_antialiasingBuffers, 1);
|
||||
batch.setPipeline(getAntialiasingPipeline());
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
|
||||
// Blend step
|
||||
batch.setResourceTexture(AntialiasingPass_SourceMapSlot, nullptr);
|
||||
|
||||
batch.setFramebuffer(sourceBuffer);
|
||||
if (_params->isDebug()) {
|
||||
batch.setPipeline(getDebugBlendPipeline());
|
||||
} else {
|
||||
batch.setPipeline(getBlendPipeline());
|
||||
// Disable sharpen if FXAA
|
||||
batch._glUniform1f(_sharpenLoc, _sharpen * _params.get().regionInfo.z);
|
||||
}
|
||||
batch.setResourceFramebufferSwapChainTexture(AntialiasingPass_NextMapSlot, _antialiasingBuffers, 1);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
batch.advance(_antialiasingBuffers);
|
||||
|
||||
batch.setUniformBuffer(AntialiasingPass_ParamsSlot, nullptr);
|
||||
batch.setUniformBuffer(AntialiasingPass_FrameTransformSlot, nullptr);
|
||||
|
||||
batch.setResourceTexture(AntialiasingPass_DepthMapSlot, nullptr);
|
||||
batch.setResourceTexture(AntialiasingPass_HistoryMapSlot, nullptr);
|
||||
batch.setResourceTexture(AntialiasingPass_VelocityMapSlot, nullptr);
|
||||
batch.setResourceTexture(AntialiasingPass_NextMapSlot, nullptr);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
void JitterSampleConfig::setIndex(int current) {
|
||||
_index = (current) % JitterSample::SEQUENCE_LENGTH;
|
||||
emit dirty();
|
||||
}
|
||||
|
||||
int JitterSampleConfig::cycleStopPauseRun() {
|
||||
_state = (_state + 1) % 3;
|
||||
switch (_state) {
|
||||
case 0: {
|
||||
return none();
|
||||
break;
|
||||
}
|
||||
case 1: {
|
||||
return pause();
|
||||
break;
|
||||
}
|
||||
case 2:
|
||||
default: {
|
||||
return play();
|
||||
break;
|
||||
}
|
||||
}
|
||||
return _state;
|
||||
}
|
||||
|
||||
int JitterSampleConfig::prev() {
|
||||
setIndex(_index - 1);
|
||||
return _index;
|
||||
}
|
||||
|
||||
int JitterSampleConfig::next() {
|
||||
setIndex(_index + 1);
|
||||
return _index;
|
||||
}
|
||||
|
||||
int JitterSampleConfig::none() {
|
||||
_state = 0;
|
||||
stop = true;
|
||||
freeze = false;
|
||||
setIndex(-1);
|
||||
return _state;
|
||||
}
|
||||
|
||||
int JitterSampleConfig::pause() {
|
||||
_state = 1;
|
||||
stop = false;
|
||||
freeze = true;
|
||||
setIndex(0);
|
||||
return _state;
|
||||
}
|
||||
|
||||
|
||||
int JitterSampleConfig::play() {
|
||||
_state = 2;
|
||||
stop = false;
|
||||
freeze = false;
|
||||
setIndex(0);
|
||||
return _state;
|
||||
}
|
||||
|
||||
template <int B>
|
||||
class Halton {
|
||||
public:
|
||||
|
||||
float eval(int index) const {
|
||||
float f = 1.0f;
|
||||
float r = 0.0f;
|
||||
float invB = 1.0f / (float)B;
|
||||
index++; // Indices start at 1, not 0
|
||||
|
||||
while (index > 0) {
|
||||
f = f * invB;
|
||||
r = r + f * (float)(index % B);
|
||||
index = index / B;
|
||||
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
JitterSample::SampleSequence::SampleSequence(){
|
||||
// Halton sequence (2,3)
|
||||
Halton<2> genX;
|
||||
Halton<3> genY;
|
||||
|
||||
for (int i = 0; i < SEQUENCE_LENGTH; i++) {
|
||||
offsets[i] = glm::vec2(genX.eval(i), genY.eval(i));
|
||||
offsets[i] -= vec2(0.5f);
|
||||
}
|
||||
offsets[SEQUENCE_LENGTH] = glm::vec2(0.0f);
|
||||
}
|
||||
|
||||
void JitterSample::configure(const Config& config) {
|
||||
_freeze = config.freeze;
|
||||
if (config.stop || _freeze) {
|
||||
auto pausedIndex = config.getIndex();
|
||||
if (_sampleSequence.currentIndex != pausedIndex) {
|
||||
_sampleSequence.currentIndex = pausedIndex;
|
||||
}
|
||||
} else {
|
||||
if (_sampleSequence.currentIndex < 0) {
|
||||
_sampleSequence.currentIndex = config.getIndex();
|
||||
}
|
||||
}
|
||||
_scale = config.scale;
|
||||
}
|
||||
|
||||
void JitterSample::run(const render::RenderContextPointer& renderContext) {
|
||||
auto& current = _sampleSequence.currentIndex;
|
||||
if (!_freeze) {
|
||||
if (current >= 0) {
|
||||
current = (current + 1) % SEQUENCE_LENGTH;
|
||||
} else {
|
||||
current = -1;
|
||||
}
|
||||
}
|
||||
auto args = renderContext->args;
|
||||
auto viewFrustum = args->getViewFrustum();
|
||||
|
||||
auto jit = _sampleSequence.offsets[(current < 0 ? SEQUENCE_LENGTH : current)];
|
||||
auto width = (float)args->_viewport.z;
|
||||
auto height = (float)args->_viewport.w;
|
||||
|
||||
auto jx = 2.0f * jit.x / width;
|
||||
auto jy = 2.0f * jit.y / height;
|
||||
|
||||
if (!args->isStereo()) {
|
||||
auto projMat = viewFrustum.getProjection();
|
||||
|
||||
projMat[2][0] += jx;
|
||||
projMat[2][1] += jy;
|
||||
|
||||
viewFrustum.setProjection(projMat);
|
||||
viewFrustum.calculate();
|
||||
args->setViewFrustum(viewFrustum);
|
||||
} else {
|
||||
mat4 projMats[2];
|
||||
args->_context->getStereoProjections(projMats);
|
||||
|
||||
jx *= 2.0f;
|
||||
|
||||
for (int i = 0; i < 2; i++) {
|
||||
auto& projMat = projMats[i];
|
||||
projMat[2][0] += jx;
|
||||
projMat[2][1] += jy;
|
||||
}
|
||||
|
||||
args->_context->setStereoProjections(projMats);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif
|
|
@ -15,7 +15,184 @@
|
|||
#include <DependencyManager.h>
|
||||
|
||||
#include "render/DrawTask.h"
|
||||
#include "DeferredFrameTransform.h"
|
||||
#include "VelocityBufferPass.h"
|
||||
|
||||
|
||||
class JitterSampleConfig : public render::Job::Config {
|
||||
Q_OBJECT
|
||||
Q_PROPERTY(float scale MEMBER scale NOTIFY dirty)
|
||||
Q_PROPERTY(bool freeze MEMBER freeze NOTIFY dirty)
|
||||
Q_PROPERTY(bool stop MEMBER stop NOTIFY dirty)
|
||||
Q_PROPERTY(int index READ getIndex NOTIFY dirty)
|
||||
public:
|
||||
JitterSampleConfig() : render::Job::Config(true) {}
|
||||
|
||||
float scale{ 0.5f };
|
||||
bool stop{ false };
|
||||
bool freeze{ false };
|
||||
|
||||
void setIndex(int current);
|
||||
|
||||
public slots:
|
||||
int cycleStopPauseRun();
|
||||
int prev();
|
||||
int next();
|
||||
int none();
|
||||
int pause();
|
||||
int play();
|
||||
|
||||
int getIndex() const { return _index; }
|
||||
int getState() const { return _state; }
|
||||
signals:
|
||||
void dirty();
|
||||
|
||||
private:
|
||||
int _state{ 0 };
|
||||
int _index{ 0 };
|
||||
|
||||
};
|
||||
|
||||
|
||||
class JitterSample {
|
||||
public:
|
||||
|
||||
enum {
|
||||
SEQUENCE_LENGTH = 128
|
||||
};
|
||||
|
||||
using Config = JitterSampleConfig;
|
||||
using JobModel = render::Job::Model<JitterSample, Config>;
|
||||
|
||||
void configure(const Config& config);
|
||||
void run(const render::RenderContextPointer& renderContext);
|
||||
|
||||
private:
|
||||
|
||||
struct SampleSequence {
|
||||
SampleSequence();
|
||||
|
||||
glm::vec2 offsets[SEQUENCE_LENGTH + 1];
|
||||
int sequenceLength{ SEQUENCE_LENGTH };
|
||||
int currentIndex{ 0 };
|
||||
};
|
||||
|
||||
SampleSequence _sampleSequence;
|
||||
float _scale{ 1.0 };
|
||||
bool _freeze{ false };
|
||||
};
|
||||
|
||||
|
||||
class AntialiasingConfig : public render::Job::Config {
|
||||
Q_OBJECT
|
||||
Q_PROPERTY(float blend MEMBER blend NOTIFY dirty)
|
||||
Q_PROPERTY(float sharpen MEMBER sharpen NOTIFY dirty)
|
||||
Q_PROPERTY(float covarianceGamma MEMBER covarianceGamma NOTIFY dirty)
|
||||
|
||||
Q_PROPERTY(bool constrainColor MEMBER constrainColor NOTIFY dirty)
|
||||
Q_PROPERTY(bool feedbackColor MEMBER feedbackColor NOTIFY dirty)
|
||||
|
||||
Q_PROPERTY(bool debug MEMBER debug NOTIFY dirty)
|
||||
Q_PROPERTY(float debugX MEMBER debugX NOTIFY dirty)
|
||||
Q_PROPERTY(float debugFXAAX MEMBER debugFXAAX NOTIFY dirty)
|
||||
Q_PROPERTY(float debugShowVelocityThreshold MEMBER debugShowVelocityThreshold NOTIFY dirty)
|
||||
Q_PROPERTY(bool showCursorPixel MEMBER showCursorPixel NOTIFY dirty)
|
||||
Q_PROPERTY(glm::vec2 debugCursorTexcoord MEMBER debugCursorTexcoord NOTIFY dirty)
|
||||
Q_PROPERTY(float debugOrbZoom MEMBER debugOrbZoom NOTIFY dirty)
|
||||
|
||||
Q_PROPERTY(bool showClosestFragment MEMBER showClosestFragment NOTIFY dirty)
|
||||
|
||||
public:
|
||||
AntialiasingConfig() : render::Job::Config(true) {}
|
||||
|
||||
float blend{ 0.05f };
|
||||
float sharpen{ 0.15f };
|
||||
|
||||
bool constrainColor{ true };
|
||||
float covarianceGamma{ 0.9f };
|
||||
bool feedbackColor{ false };
|
||||
|
||||
float debugX{ 0.0f };
|
||||
float debugFXAAX{ 1.0f };
|
||||
float debugShowVelocityThreshold{ 1.0f };
|
||||
glm::vec2 debugCursorTexcoord{ 0.5f, 0.5f };
|
||||
float debugOrbZoom{ 2.0f };
|
||||
|
||||
bool debug { false };
|
||||
bool showCursorPixel { false };
|
||||
bool showClosestFragment{ false };
|
||||
|
||||
signals:
|
||||
void dirty();
|
||||
};
|
||||
|
||||
#define SET_BIT(bitfield, bitIndex, value) bitfield = ((bitfield) & ~(1 << (bitIndex))) | ((value) << (bitIndex))
|
||||
#define GET_BIT(bitfield, bitIndex) ((bitfield) & (1 << (bitIndex)))
|
||||
|
||||
struct TAAParams {
|
||||
float nope{ 0.0f };
|
||||
float blend{ 0.05f };
|
||||
float covarianceGamma{ 1.0f };
|
||||
float debugShowVelocityThreshold{ 1.0f };
|
||||
|
||||
glm::ivec4 flags{ 0 };
|
||||
glm::vec4 pixelInfo{ 0.5f, 0.5f, 2.0f, 0.0f };
|
||||
glm::vec4 regionInfo{ 0.0f, 0.0f, 1.0f, 0.0f };
|
||||
|
||||
void setConstrainColor(bool enabled) { SET_BIT(flags.y, 1, enabled); }
|
||||
bool isConstrainColor() const { return (bool)GET_BIT(flags.y, 1); }
|
||||
|
||||
void setFeedbackColor(bool enabled) { SET_BIT(flags.y, 4, enabled); }
|
||||
bool isFeedbackColor() const { return (bool)GET_BIT(flags.y, 4); }
|
||||
|
||||
void setDebug(bool enabled) { SET_BIT(flags.x, 0, enabled); }
|
||||
bool isDebug() const { return (bool) GET_BIT(flags.x, 0); }
|
||||
|
||||
void setShowDebugCursor(bool enabled) { SET_BIT(flags.x, 1, enabled); }
|
||||
bool showDebugCursor() const { return (bool)GET_BIT(flags.x, 1); }
|
||||
|
||||
void setDebugCursor(glm::vec2 debugCursor) { pixelInfo.x = debugCursor.x; pixelInfo.y = debugCursor.y; }
|
||||
glm::vec2 getDebugCursor() const { return glm::vec2(pixelInfo.x, pixelInfo.y); }
|
||||
|
||||
void setDebugOrbZoom(float orbZoom) { pixelInfo.z = orbZoom; }
|
||||
float getDebugOrbZoom() const { return pixelInfo.z; }
|
||||
|
||||
void setShowClosestFragment(bool enabled) { SET_BIT(flags.x, 3, enabled); }
|
||||
|
||||
};
|
||||
using TAAParamsBuffer = gpu::StructBuffer<TAAParams>;
|
||||
|
||||
class Antialiasing {
|
||||
public:
|
||||
using Inputs = render::VaryingSet4 < DeferredFrameTransformPointer, gpu::FramebufferPointer, LinearDepthFramebufferPointer, VelocityFramebufferPointer > ;
|
||||
using Config = AntialiasingConfig;
|
||||
using JobModel = render::Job::ModelI<Antialiasing, Inputs, Config>;
|
||||
|
||||
Antialiasing();
|
||||
~Antialiasing();
|
||||
void configure(const Config& config);
|
||||
void run(const render::RenderContextPointer& renderContext, const Inputs& inputs);
|
||||
|
||||
const gpu::PipelinePointer& getAntialiasingPipeline();
|
||||
const gpu::PipelinePointer& getBlendPipeline();
|
||||
const gpu::PipelinePointer& getDebugBlendPipeline();
|
||||
|
||||
private:
|
||||
|
||||
gpu::FramebufferSwapChainPointer _antialiasingBuffers;
|
||||
gpu::TexturePointer _antialiasingTextures[2];
|
||||
|
||||
gpu::PipelinePointer _antialiasingPipeline;
|
||||
gpu::PipelinePointer _blendPipeline;
|
||||
gpu::PipelinePointer _debugBlendPipeline;
|
||||
|
||||
TAAParamsBuffer _params;
|
||||
float _sharpen{ 0.15f };
|
||||
int _sharpenLoc{ -1 };
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
class AntiAliasingConfig : public render::Job::Config {
|
||||
Q_OBJECT
|
||||
Q_PROPERTY(bool enabled MEMBER enabled)
|
||||
|
@ -27,27 +204,28 @@ class Antialiasing {
|
|||
public:
|
||||
using Config = AntiAliasingConfig;
|
||||
using JobModel = render::Job::ModelI<Antialiasing, gpu::FramebufferPointer, Config>;
|
||||
|
||||
|
||||
Antialiasing();
|
||||
~Antialiasing();
|
||||
void configure(const Config& config) {}
|
||||
void run(const render::RenderContextPointer& renderContext, const gpu::FramebufferPointer& sourceBuffer);
|
||||
|
||||
const gpu::PipelinePointer& getAntialiasingPipeline(RenderArgs* args);
|
||||
|
||||
const gpu::PipelinePointer& getAntialiasingPipeline();
|
||||
const gpu::PipelinePointer& getBlendPipeline();
|
||||
|
||||
|
||||
private:
|
||||
|
||||
|
||||
// Uniforms for AA
|
||||
gpu::int32 _texcoordOffsetLoc;
|
||||
|
||||
|
||||
gpu::FramebufferPointer _antialiasingBuffer;
|
||||
|
||||
|
||||
gpu::TexturePointer _antialiasingTexture;
|
||||
|
||||
|
||||
gpu::PipelinePointer _antialiasingPipeline;
|
||||
gpu::PipelinePointer _blendPipeline;
|
||||
int _geometryId { 0 };
|
||||
};
|
||||
*/
|
||||
|
||||
#endif // hifi_AntialiasingEffect_h
|
||||
|
|
|
@ -93,7 +93,7 @@ void DrawBackgroundStage::run(const render::RenderContextPointer& renderContext,
|
|||
PerformanceTimer perfTimer("skybox");
|
||||
auto args = renderContext->args;
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("DrawBackgroundStage::run", args->_context, [&](gpu::Batch& batch) {
|
||||
args->_batch = &batch;
|
||||
|
||||
batch.enableSkybox(true);
|
||||
|
|
|
@ -75,7 +75,7 @@ void BloomThreshold::run(const render::RenderContextPointer& renderContext, cons
|
|||
|
||||
glm::ivec4 viewport{ 0, 0, bufferSize.x, bufferSize.y };
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("BloomThreshold::run", args->_context, [&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
|
||||
batch.setViewportTransform(viewport);
|
||||
|
@ -135,7 +135,7 @@ void BloomApply::run(const render::RenderContextPointer& renderContext, const In
|
|||
const auto blur2FB = inputs.get3();
|
||||
const glm::ivec4 viewport{ 0, 0, framebufferSize.x, framebufferSize.y };
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("BloomApply::run", args->_context, [&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
|
||||
batch.setFramebuffer(frameBuffer);
|
||||
|
@ -180,7 +180,7 @@ void BloomDraw::run(const render::RenderContextPointer& renderContext, const Inp
|
|||
_pipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("BloomDraw::run", args->_context, [&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
|
||||
batch.setFramebuffer(frameBuffer);
|
||||
|
@ -238,7 +238,7 @@ void DebugBloom::run(const render::RenderContextPointer& renderContext, const In
|
|||
_pipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("DebugBloom::run", args->_context, [&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
|
||||
batch.setFramebuffer(frameBuffer);
|
||||
|
|
|
@ -53,7 +53,8 @@ enum TextureSlot {
|
|||
DiffusedCurvature,
|
||||
Scattering,
|
||||
AmbientOcclusion,
|
||||
AmbientOcclusionBlurred
|
||||
AmbientOcclusionBlurred,
|
||||
Velocity,
|
||||
};
|
||||
|
||||
enum ParamSlot {
|
||||
|
@ -254,6 +255,12 @@ static const std::string DEFAULT_AMBIENT_OCCLUSION_BLURRED_SHADER{
|
|||
" }"
|
||||
};
|
||||
|
||||
static const std::string DEFAULT_VELOCITY_SHADER{
|
||||
"vec4 getFragmentColor() {"
|
||||
" return vec4(vec2(texture(velocityMap, uv).xy), 0.0, 1.0);"
|
||||
" }"
|
||||
};
|
||||
|
||||
static const std::string DEFAULT_CUSTOM_SHADER {
|
||||
"vec4 getFragmentColor() {"
|
||||
" return vec4(1.0, 0.0, 0.0, 1.0);"
|
||||
|
@ -341,6 +348,8 @@ std::string DebugDeferredBuffer::getShaderSourceCode(Mode mode, std::string cust
|
|||
return DEFAULT_AMBIENT_OCCLUSION_SHADER;
|
||||
case AmbientOcclusionBlurredMode:
|
||||
return DEFAULT_AMBIENT_OCCLUSION_BLURRED_SHADER;
|
||||
case VelocityMode:
|
||||
return DEFAULT_VELOCITY_SHADER;
|
||||
case CustomMode:
|
||||
return getFileContent(customFile, DEFAULT_CUSTOM_SHADER);
|
||||
default:
|
||||
|
@ -402,6 +411,7 @@ const gpu::PipelinePointer& DebugDeferredBuffer::getPipeline(Mode mode, std::str
|
|||
slotBindings.insert(gpu::Shader::Binding("diffusedCurvatureMap", DiffusedCurvature));
|
||||
slotBindings.insert(gpu::Shader::Binding("scatteringMap", Scattering));
|
||||
slotBindings.insert(gpu::Shader::Binding("occlusionBlurredMap", AmbientOcclusionBlurred));
|
||||
slotBindings.insert(gpu::Shader::Binding("velocityMap", Velocity));
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
auto pipeline = gpu::Pipeline::create(program, std::make_shared<gpu::State>());
|
||||
|
@ -439,9 +449,10 @@ void DebugDeferredBuffer::run(const RenderContextPointer& renderContext, const I
|
|||
auto& linearDepthTarget = inputs.get1();
|
||||
auto& surfaceGeometryFramebuffer = inputs.get2();
|
||||
auto& ambientOcclusionFramebuffer = inputs.get3();
|
||||
auto& frameTransform = inputs.get4();
|
||||
auto& velocityFramebuffer = inputs.get4();
|
||||
auto& frameTransform = inputs.get5();
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("DebugDeferredBuffer::run", args->_context, [&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
|
||||
|
@ -468,6 +479,9 @@ void DebugDeferredBuffer::run(const RenderContextPointer& renderContext, const I
|
|||
batch.setResourceTexture(Depth, deferredFramebuffer->getPrimaryDepthTexture());
|
||||
batch.setResourceTexture(Lighting, deferredFramebuffer->getLightingTexture());
|
||||
}
|
||||
if (velocityFramebuffer) {
|
||||
batch.setResourceTexture(Velocity, velocityFramebuffer->getVelocityTexture());
|
||||
}
|
||||
|
||||
auto lightStage = renderContext->_scene->getStage<LightStage>();
|
||||
assert(lightStage);
|
||||
|
@ -515,5 +529,7 @@ void DebugDeferredBuffer::run(const RenderContextPointer& renderContext, const I
|
|||
batch.setResourceTexture(AmbientOcclusion, nullptr);
|
||||
batch.setResourceTexture(AmbientOcclusionBlurred, nullptr);
|
||||
|
||||
batch.setResourceTexture(Velocity, nullptr);
|
||||
|
||||
});
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "DeferredFramebuffer.h"
|
||||
#include "SurfaceGeometryPass.h"
|
||||
#include "AmbientOcclusionEffect.h"
|
||||
#include "VelocityBufferPass.h"
|
||||
|
||||
class DebugDeferredBufferConfig : public render::Job::Config {
|
||||
Q_OBJECT
|
||||
|
@ -38,7 +39,7 @@ signals:
|
|||
|
||||
class DebugDeferredBuffer {
|
||||
public:
|
||||
using Inputs = render::VaryingSet5<DeferredFramebufferPointer, LinearDepthFramebufferPointer, SurfaceGeometryFramebufferPointer, AmbientOcclusionFramebufferPointer, DeferredFrameTransformPointer>;
|
||||
using Inputs = render::VaryingSet6<DeferredFramebufferPointer, LinearDepthFramebufferPointer, SurfaceGeometryFramebufferPointer, AmbientOcclusionFramebufferPointer, VelocityFramebufferPointer, DeferredFrameTransformPointer>;
|
||||
using Config = DebugDeferredBufferConfig;
|
||||
using JobModel = render::Job::ModelI<DebugDeferredBuffer, Inputs, Config>;
|
||||
|
||||
|
@ -81,6 +82,7 @@ protected:
|
|||
ScatteringDebugMode,
|
||||
AmbientOcclusionMode,
|
||||
AmbientOcclusionBlurredMode,
|
||||
VelocityMode,
|
||||
CustomMode, // Needs to stay last
|
||||
|
||||
NumModes,
|
||||
|
|
|
@ -38,12 +38,13 @@ void DeferredFrameTransform::update(RenderArgs* args) {
|
|||
|
||||
args->getViewFrustum().evalProjectionMatrix(frameTransformBuffer.projectionMono);
|
||||
|
||||
// Running in stero ?
|
||||
// Running in stereo ?
|
||||
bool isStereo = args->isStereo();
|
||||
if (!isStereo) {
|
||||
frameTransformBuffer.projection[0] = frameTransformBuffer.projectionMono;
|
||||
frameTransformBuffer.stereoInfo = glm::vec4(0.0f, (float)args->_viewport.z, 0.0f, 0.0f);
|
||||
frameTransformBuffer.invpixelInfo = glm::vec4(1.0f / args->_viewport.z, 1.0f / args->_viewport.w, 0.0f, 0.0f);
|
||||
frameTransformBuffer.invProjection[0] = glm::inverse(frameTransformBuffer.projection[0]);
|
||||
} else {
|
||||
|
||||
mat4 projMats[2];
|
||||
|
@ -55,6 +56,7 @@ void DeferredFrameTransform::update(RenderArgs* args) {
|
|||
// Compose the mono Eye space to Stereo clip space Projection Matrix
|
||||
auto sideViewMat = projMats[i] * eyeViews[i];
|
||||
frameTransformBuffer.projection[i] = sideViewMat;
|
||||
frameTransformBuffer.invProjection[i] = glm::inverse(sideViewMat);
|
||||
}
|
||||
|
||||
frameTransformBuffer.stereoInfo = glm::vec4(1.0f, (float)(args->_viewport.z >> 1), 0.0f, 1.0f);
|
||||
|
|
|
@ -45,6 +45,8 @@ protected:
|
|||
glm::vec4 stereoInfo{ 0.0 };
|
||||
// Mono proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space
|
||||
glm::mat4 projection[2];
|
||||
// Inverse proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space
|
||||
glm::mat4 invProjection[2];
|
||||
// THe mono projection for sure
|
||||
glm::mat4 projectionMono;
|
||||
// Inv View matrix from eye space (mono) to world space
|
||||
|
|
|
@ -431,7 +431,7 @@ void PrepareDeferred::run(const RenderContextPointer& renderContext, const Input
|
|||
outputs.edit0() = _deferredFramebuffer;
|
||||
outputs.edit1() = _deferredFramebuffer->getLightingFramebuffer();
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("PrepareDeferred::run", args->_context, [&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
batch.setStateScissorRect(args->_viewport);
|
||||
|
|
|
@ -16,6 +16,9 @@
|
|||
struct CameraCorrection {
|
||||
mat4 _correction;
|
||||
mat4 _correctionInverse;
|
||||
|
||||
mat4 _prevView;
|
||||
mat4 _prevViewInverse;
|
||||
};
|
||||
|
||||
uniform cameraCorrectionBuffer {
|
||||
|
@ -28,6 +31,7 @@ struct DeferredFrameTransform {
|
|||
vec4 _depthInfo;
|
||||
vec4 _stereoInfo;
|
||||
mat4 _projection[2];
|
||||
mat4 _invProjection[2];
|
||||
mat4 _projectionMono;
|
||||
mat4 _viewInverse;
|
||||
mat4 _view;
|
||||
|
@ -37,13 +41,6 @@ uniform deferredFrameTransformBuffer {
|
|||
DeferredFrameTransform frameTransform;
|
||||
};
|
||||
|
||||
DeferredFrameTransform getDeferredFrameTransform() {
|
||||
DeferredFrameTransform result = frameTransform;
|
||||
result._view = result._view * cameraCorrection._correctionInverse;
|
||||
result._viewInverse = result._viewInverse * cameraCorrection._correction;
|
||||
return result;
|
||||
}
|
||||
|
||||
vec2 getWidthHeight(int resolutionLevel) {
|
||||
return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);
|
||||
}
|
||||
|
@ -79,11 +76,26 @@ float getPosLinearDepthFar() {
|
|||
}
|
||||
|
||||
mat4 getViewInverse() {
|
||||
return frameTransform._viewInverse * cameraCorrection._correction;
|
||||
return frameTransform._viewInverse * cameraCorrection._correctionInverse;
|
||||
}
|
||||
|
||||
mat4 getView() {
|
||||
return frameTransform._view * cameraCorrection._correctionInverse;
|
||||
return cameraCorrection._correction * frameTransform._view;
|
||||
}
|
||||
|
||||
mat4 getPreviousView() {
|
||||
return cameraCorrection._prevView;
|
||||
}
|
||||
|
||||
mat4 getPreviousViewInverse() {
|
||||
return cameraCorrection._prevViewInverse;
|
||||
}
|
||||
|
||||
DeferredFrameTransform getDeferredFrameTransform() {
|
||||
DeferredFrameTransform result = frameTransform;
|
||||
result._view = getView();
|
||||
result._viewInverse = getViewInverse();
|
||||
return result;
|
||||
}
|
||||
|
||||
bool isStereo() {
|
||||
|
@ -123,6 +135,14 @@ vec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {
|
|||
return vec3(Xe, Ye, Zeye);
|
||||
}
|
||||
|
||||
vec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {
|
||||
// compute the view space position using the depth
|
||||
vec3 clipPos;
|
||||
clipPos.xyz = vec3(texcoord.xy, Zdb) * 2.0 - 1.0;
|
||||
vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);
|
||||
return eyePos.xyz / eyePos.w;
|
||||
}
|
||||
|
||||
ivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {
|
||||
ivec2 fragPos = ivec2(glFragCoord.xy);
|
||||
|
||||
|
|
|
@ -155,7 +155,7 @@ void DrawHaze::run(const render::RenderContextPointer& renderContext, const Inpu
|
|||
|
||||
auto sourceFramebufferSize = glm::ivec2(inputBuffer->getDimensions());
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("DrawHaze::run", args->_context, [&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
batch.setFramebuffer(outputBuffer);
|
||||
|
||||
|
|
|
@ -161,7 +161,7 @@ void DrawHighlightMask::run(const render::RenderContextPointer& renderContext, c
|
|||
// Clear the framebuffer without stereo
|
||||
// Needs to be distinct from the other batch because using the clear call
|
||||
// while stereo is enabled triggers a warning
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("DrawHighlightMask::run::begin", args->_context, [&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
batch.setFramebuffer(ressources->getDepthFramebuffer());
|
||||
batch.clearDepthStencilFramebuffer(1.0f, 0);
|
||||
|
@ -174,7 +174,7 @@ void DrawHighlightMask::run(const render::RenderContextPointer& renderContext, c
|
|||
|
||||
render::ItemBounds itemBounds;
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("DrawHighlightMask::run", args->_context, [&](gpu::Batch& batch) {
|
||||
args->_batch = &batch;
|
||||
|
||||
auto maskPipeline = _shapePlumber->pickPipeline(args, defaultKeyBuilder);
|
||||
|
@ -212,7 +212,7 @@ void DrawHighlightMask::run(const render::RenderContextPointer& renderContext, c
|
|||
|
||||
_boundsBuffer->setData(itemBounds.size() * sizeof(render::ItemBound), (const gpu::Byte*) itemBounds.data());
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("DrawHighlightMask::run::end", args->_context, [&](gpu::Batch& batch) {
|
||||
// Setup camera, projection and viewport for all items
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
batch.setProjectionTransform(projMat);
|
||||
|
@ -284,7 +284,7 @@ void DrawHighlight::run(const render::RenderContextPointer& renderContext, const
|
|||
shaderParameters._size.y = size;
|
||||
}
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("DrawHighlight::run", args->_context, [&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
batch.setFramebuffer(destinationFrameBuffer);
|
||||
|
||||
|
@ -357,7 +357,7 @@ void DebugHighlight::run(const render::RenderContextPointer& renderContext, cons
|
|||
assert(renderContext->args->hasViewFrustum());
|
||||
RenderArgs* args = renderContext->args;
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("DebugHighlight::run", args->_context, [&](gpu::Batch& batch) {
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
batch.setFramebuffer(highlightRessources->getColorFramebuffer());
|
||||
|
||||
|
|
|
@ -8,53 +8,23 @@
|
|||
|
||||
#include "RenderCommonTask.h"
|
||||
|
||||
#include <PerfStat.h>
|
||||
#include <PathUtils.h>
|
||||
#include <ViewFrustum.h>
|
||||
#include <gpu/Context.h>
|
||||
|
||||
#include <render/CullTask.h>
|
||||
#include <render/FilterTask.h>
|
||||
#include <render/SortTask.h>
|
||||
#include <render/DrawTask.h>
|
||||
#include <render/DrawStatus.h>
|
||||
#include <render/DrawSceneOctree.h>
|
||||
#include <render/BlurTask.h>
|
||||
|
||||
#include "LightingModel.h"
|
||||
#include "StencilMaskPass.h"
|
||||
#include "DebugDeferredBuffer.h"
|
||||
#include "DeferredFramebuffer.h"
|
||||
#include "DeferredLightingEffect.h"
|
||||
#include "SurfaceGeometryPass.h"
|
||||
#include "FramebufferCache.h"
|
||||
#include "TextureCache.h"
|
||||
#include "ZoneRenderer.h"
|
||||
#include "FadeEffect.h"
|
||||
#include "RenderUtilsLogging.h"
|
||||
|
||||
#include "AmbientOcclusionEffect.h"
|
||||
#include "AntialiasingEffect.h"
|
||||
#include "ToneMappingEffect.h"
|
||||
#include "SubsurfaceScattering.h"
|
||||
#include "DrawHaze.h"
|
||||
#include "BloomEffect.h"
|
||||
#include "HighlightEffect.h"
|
||||
|
||||
#include <sstream>
|
||||
|
||||
using namespace render;
|
||||
extern void initOverlay3DPipelines(render::ShapePlumber& plumber, bool depthTest = false);
|
||||
|
||||
void BeginGPURangeTimer::run(const render::RenderContextPointer& renderContext, gpu::RangeTimerPointer& timer) {
|
||||
timer = _gpuTimer;
|
||||
gpu::doInBatch(renderContext->args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("BeginGPURangeTimer", renderContext->args->_context, [&](gpu::Batch& batch) {
|
||||
_gpuTimer->begin(batch);
|
||||
});
|
||||
}
|
||||
|
||||
void EndGPURangeTimer::run(const render::RenderContextPointer& renderContext, const gpu::RangeTimerPointer& timer) {
|
||||
gpu::doInBatch(renderContext->args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("EndGPURangeTimer", renderContext->args->_context, [&](gpu::Batch& batch) {
|
||||
timer->end(batch);
|
||||
});
|
||||
|
||||
|
@ -87,14 +57,14 @@ void DrawOverlay3D::run(const RenderContextPointer& renderContext, const Inputs&
|
|||
// Needs to be distinct from the other batch because using the clear call
|
||||
// while stereo is enabled triggers a warning
|
||||
if (_opaquePass) {
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch){
|
||||
gpu::doInBatch("DrawOverlay3D::run::clear", args->_context, [&](gpu::Batch& batch){
|
||||
batch.enableStereo(false);
|
||||
batch.clearFramebuffer(gpu::Framebuffer::BUFFER_DEPTH, glm::vec4(), 1.f, 0, false);
|
||||
});
|
||||
}
|
||||
|
||||
// Render the items
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("DrawOverlay3D::main", args->_context, [&](gpu::Batch& batch) {
|
||||
args->_batch = &batch;
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
batch.setStateScissorRect(args->_viewport);
|
||||
|
@ -127,7 +97,7 @@ void CompositeHUD::run(const RenderContextPointer& renderContext) {
|
|||
|
||||
// Grab the HUD texture
|
||||
#if !defined(DISABLE_QML)
|
||||
gpu::doInBatch(renderContext->args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("CompositeHUD", renderContext->args->_context, [&](gpu::Batch& batch) {
|
||||
if (renderContext->args->_hudOperator) {
|
||||
renderContext->args->_hudOperator(batch, renderContext->args->_hudTexture, renderContext->args->_renderMode == RenderArgs::RenderMode::MIRROR_RENDER_MODE);
|
||||
}
|
||||
|
@ -154,7 +124,7 @@ void Blit::run(const RenderContextPointer& renderContext, const gpu::Framebuffer
|
|||
// Blit primary to blit FBO
|
||||
auto primaryFbo = srcFramebuffer;
|
||||
|
||||
gpu::doInBatch(renderArgs->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("Blit", renderArgs->_context, [&](gpu::Batch& batch) {
|
||||
batch.setFramebuffer(blitFbo);
|
||||
|
||||
if (renderArgs->_renderMode == RenderArgs::MIRROR_RENDER_MODE) {
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include "DeferredFramebuffer.h"
|
||||
#include "DeferredLightingEffect.h"
|
||||
#include "SurfaceGeometryPass.h"
|
||||
#include "VelocityBufferPass.h"
|
||||
#include "FramebufferCache.h"
|
||||
#include "TextureCache.h"
|
||||
#include "ZoneRenderer.h"
|
||||
|
@ -94,9 +95,12 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
|
|||
|
||||
fadeEffect->build(task, opaques);
|
||||
|
||||
task.addJob<JitterSample>("JitterCam");
|
||||
|
||||
// Prepare deferred, generate the shared Deferred Frame Transform
|
||||
const auto deferredFrameTransform = task.addJob<GenerateDeferredFrameTransform>("DeferredFrameTransform");
|
||||
const auto lightingModel = task.addJob<MakeLightingModel>("LightingModel");
|
||||
|
||||
|
||||
// GPU jobs: Start preparing the primary, deferred and lighting buffer
|
||||
const auto primaryFramebuffer = task.addJob<PreparePrimaryFramebuffer>("PreparePrimaryBuffer");
|
||||
|
@ -142,6 +146,11 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
|
|||
const auto ambientOcclusionFramebuffer = ambientOcclusionOutputs.getN<AmbientOcclusionEffect::Outputs>(0);
|
||||
const auto ambientOcclusionUniforms = ambientOcclusionOutputs.getN<AmbientOcclusionEffect::Outputs>(1);
|
||||
|
||||
// Velocity
|
||||
const auto velocityBufferInputs = VelocityBufferPass::Inputs(deferredFrameTransform, deferredFramebuffer).asVarying();
|
||||
const auto velocityBufferOutputs = task.addJob<VelocityBufferPass>("VelocityBuffer", velocityBufferInputs);
|
||||
const auto velocityBuffer = velocityBufferOutputs.getN<VelocityBufferPass::Outputs>(0);
|
||||
|
||||
// Clear Light, Haze and Skybox Stages and render zones from the general metas bucket
|
||||
const auto zones = task.addJob<ZoneRendererTask>("ZoneRenderer", metas);
|
||||
|
||||
|
@ -162,6 +171,7 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
|
|||
|
||||
task.addJob<RenderDeferred>("RenderDeferred", deferredLightingInputs);
|
||||
|
||||
|
||||
// Similar to light stage, background stage has been filled by several potential render items and resolved for the frame in this job
|
||||
task.addJob<DrawBackgroundStage>("DrawBackgroundDeferred", lightingModel);
|
||||
|
||||
|
@ -220,10 +230,30 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
|
|||
task.addJob<DrawBounds>("DrawSelectionBounds", selectedItems);
|
||||
}
|
||||
|
||||
// Debugging stages
|
||||
// Layered Overlays
|
||||
const auto filteredOverlaysOpaque = task.addJob<FilterLayeredItems>("FilterOverlaysLayeredOpaque", overlayOpaques, Item::LAYER_3D_FRONT);
|
||||
const auto filteredOverlaysTransparent = task.addJob<FilterLayeredItems>("FilterOverlaysLayeredTransparent", overlayTransparents, Item::LAYER_3D_FRONT);
|
||||
const auto overlaysInFrontOpaque = filteredOverlaysOpaque.getN<FilterLayeredItems::Outputs>(0);
|
||||
const auto overlaysInFrontTransparent = filteredOverlaysTransparent.getN<FilterLayeredItems::Outputs>(0);
|
||||
|
||||
const auto overlayInFrontOpaquesInputs = DrawOverlay3D::Inputs(overlaysInFrontOpaque, lightingModel).asVarying();
|
||||
const auto overlayInFrontTransparentsInputs = DrawOverlay3D::Inputs(overlaysInFrontTransparent, lightingModel).asVarying();
|
||||
task.addJob<DrawOverlay3D>("DrawOverlayInFrontOpaque", overlayInFrontOpaquesInputs, true);
|
||||
task.addJob<DrawOverlay3D>("DrawOverlayInFrontTransparent", overlayInFrontTransparentsInputs, false);
|
||||
|
||||
{ // Debug the bounds of the rendered Overlay items that are marked drawInFront, still look at the zbuffer
|
||||
task.addJob<DrawBounds>("DrawOverlayInFrontOpaqueBounds", overlaysInFrontOpaque);
|
||||
task.addJob<DrawBounds>("DrawOverlayInFrontTransparentBounds", overlaysInFrontTransparent);
|
||||
}
|
||||
|
||||
// AA job
|
||||
const auto antialiasingInputs = Antialiasing::Inputs(deferredFrameTransform, primaryFramebuffer, linearDepthTarget, velocityBuffer).asVarying();
|
||||
task.addJob<Antialiasing>("Antialiasing", antialiasingInputs);
|
||||
|
||||
// Debugging stages
|
||||
{
|
||||
// Debugging Deferred buffer job
|
||||
const auto debugFramebuffers = render::Varying(DebugDeferredBuffer::Inputs(deferredFramebuffer, linearDepthTarget, surfaceGeometryFramebuffer, ambientOcclusionFramebuffer, deferredFrameTransform));
|
||||
const auto debugFramebuffers = render::Varying(DebugDeferredBuffer::Inputs(deferredFramebuffer, linearDepthTarget, surfaceGeometryFramebuffer, ambientOcclusionFramebuffer, velocityBuffer, deferredFrameTransform));
|
||||
task.addJob<DebugDeferredBuffer>("DebugDeferredBuffer", debugFramebuffers);
|
||||
|
||||
const auto debugSubsurfaceScatteringInputs = DebugSubsurfaceScattering::Inputs(deferredFrameTransform, deferredFramebuffer, lightingModel,
|
||||
|
@ -250,25 +280,6 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
|
|||
task.addJob<DebugZoneLighting>("DrawZoneStack", deferredFrameTransform);
|
||||
}
|
||||
|
||||
// Layered Overlays
|
||||
const auto filteredOverlaysOpaque = task.addJob<FilterLayeredItems>("FilterOverlaysLayeredOpaque", overlayOpaques, Item::LAYER_3D_FRONT);
|
||||
const auto filteredOverlaysTransparent = task.addJob<FilterLayeredItems>("FilterOverlaysLayeredTransparent", overlayTransparents, Item::LAYER_3D_FRONT);
|
||||
const auto overlaysInFrontOpaque = filteredOverlaysOpaque.getN<FilterLayeredItems::Outputs>(0);
|
||||
const auto overlaysInFrontTransparent = filteredOverlaysTransparent.getN<FilterLayeredItems::Outputs>(0);
|
||||
|
||||
const auto overlayInFrontOpaquesInputs = DrawOverlay3D::Inputs(overlaysInFrontOpaque, lightingModel).asVarying();
|
||||
const auto overlayInFrontTransparentsInputs = DrawOverlay3D::Inputs(overlaysInFrontTransparent, lightingModel).asVarying();
|
||||
task.addJob<DrawOverlay3D>("DrawOverlayInFrontOpaque", overlayInFrontOpaquesInputs, true);
|
||||
task.addJob<DrawOverlay3D>("DrawOverlayInFrontTransparent", overlayInFrontTransparentsInputs, false);
|
||||
|
||||
{ // Debug the bounds of the rendered Overlay items that are marked drawInFront, still look at the zbuffer
|
||||
task.addJob<DrawBounds>("DrawOverlayInFrontOpaqueBounds", overlaysInFrontOpaque);
|
||||
task.addJob<DrawBounds>("DrawOverlayInFrontTransparentBounds", overlaysInFrontTransparent);
|
||||
}
|
||||
|
||||
// AA job to be revisited
|
||||
task.addJob<Antialiasing>("Antialiasing", primaryFramebuffer);
|
||||
|
||||
// Composite the HUD and HUD overlays
|
||||
task.addJob<CompositeHUD>("HUD");
|
||||
|
||||
|
@ -304,7 +315,7 @@ void DrawDeferred::run(const RenderContextPointer& renderContext, const Inputs&
|
|||
|
||||
RenderArgs* args = renderContext->args;
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("DrawDeferred::run", args->_context, [&](gpu::Batch& batch) {
|
||||
args->_batch = &batch;
|
||||
|
||||
// Setup camera, projection and viewport for all items
|
||||
|
@ -371,7 +382,7 @@ void DrawStateSortDeferred::run(const RenderContextPointer& renderContext, const
|
|||
|
||||
RenderArgs* args = renderContext->args;
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("DrawStateSortDeferred::run", args->_context, [&](gpu::Batch& batch) {
|
||||
args->_batch = &batch;
|
||||
|
||||
// Setup camera, projection and viewport for all items
|
||||
|
@ -409,4 +420,3 @@ void DrawStateSortDeferred::run(const RenderContextPointer& renderContext, const
|
|||
|
||||
config->setNumDrawn((int)inItems.size());
|
||||
}
|
||||
|
||||
|
|
|
@ -136,7 +136,7 @@ void PrepareFramebuffer::run(const RenderContextPointer& renderContext, gpu::Fra
|
|||
}
|
||||
|
||||
auto args = renderContext->args;
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("PrepareFramebuffer::run", args->_context, [&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
batch.setStateScissorRect(args->_viewport);
|
||||
|
@ -152,7 +152,8 @@ void PrepareFramebuffer::run(const RenderContextPointer& renderContext, gpu::Fra
|
|||
|
||||
void PrepareForward::run(const RenderContextPointer& renderContext, const Inputs& inputs) {
|
||||
RenderArgs* args = renderContext->args;
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
|
||||
gpu::doInBatch("RenderForward::Draw::run", args->_context, [&](gpu::Batch& batch) {
|
||||
args->_batch = &batch;
|
||||
|
||||
graphics::LightPointer keySunLight;
|
||||
|
@ -186,7 +187,7 @@ void DrawForward::run(const RenderContextPointer& renderContext, const Inputs& i
|
|||
const auto& inItems = inputs.get0();
|
||||
const auto& lightingModel = inputs.get1();
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("DrawForward::run", args->_context, [&](gpu::Batch& batch) {
|
||||
args->_batch = &batch;
|
||||
|
||||
|
||||
|
|
|
@ -140,7 +140,7 @@ void RenderShadowMap::run(const render::RenderContextPointer& renderContext, con
|
|||
args->popViewFrustum();
|
||||
args->pushViewFrustum(adjustedShadowFrustum);
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("RenderShadowMap::run", args->_context, [&](gpu::Batch& batch) {
|
||||
args->_batch = &batch;
|
||||
batch.enableStereo(false);
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ void PrepareStencil::run(const RenderContextPointer& renderContext, const gpu::F
|
|||
return;
|
||||
}
|
||||
|
||||
doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
doInBatch("PrepareStencil::run", args->_context, [&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
|
|
|
@ -322,7 +322,7 @@ void diffuseProfileGPU(gpu::TexturePointer& profileMap, RenderArgs* args) {
|
|||
auto makeFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("diffuseProfile"));
|
||||
makeFramebuffer->setRenderBuffer(0, profileMap);
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
gpu::doInBatch("SubsurfaceScattering::diffuseProfileGPU", args->_context, [=](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
|
||||
batch.setViewportTransform(glm::ivec4(0, 0, width, height));
|
||||
|
@ -359,7 +359,7 @@ void diffuseScatterGPU(const gpu::TexturePointer& profileMap, gpu::TexturePointe
|
|||
auto makeFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("diffuseScatter"));
|
||||
makeFramebuffer->setRenderBuffer(0, lut);
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
gpu::doInBatch("SubsurfaceScattering::diffuseScatterGPU", args->_context, [=](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
|
||||
batch.setViewportTransform(glm::ivec4(0, 0, width, height));
|
||||
|
@ -396,7 +396,7 @@ void computeSpecularBeckmannGPU(gpu::TexturePointer& beckmannMap, RenderArgs* ar
|
|||
auto makeFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("computeSpecularBeckmann"));
|
||||
makeFramebuffer->setRenderBuffer(0, beckmannMap);
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
gpu::doInBatch("SubsurfaceScattering::computeSpecularBeckmannGPU", args->_context, [=](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
|
||||
batch.setViewportTransform(glm::ivec4(0, 0, width, height));
|
||||
|
@ -537,7 +537,7 @@ void DebugSubsurfaceScattering::run(const render::RenderContextPointer& renderCo
|
|||
// const auto light = DependencyManager::get<DeferredLightingEffect>()->getLightStage()->getLight(0);
|
||||
const auto light = lightStage->getLight(0);
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
gpu::doInBatch("DebugSubsurfaceScattering::run", args->_context, [=](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
|
||||
|
||||
|
|
|
@ -174,7 +174,7 @@ void LinearDepthPass::run(const render::RenderContextPointer& renderContext, con
|
|||
auto halfViewport = depthViewport >> 1;
|
||||
float clearLinearDepth = args->getViewFrustum().getFarClip() * 2.0f;
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
gpu::doInBatch("LinearDepthPass::run", args->_context, [=](gpu::Batch& batch) {
|
||||
_gpuTimer->begin(batch);
|
||||
batch.enableStereo(false);
|
||||
|
||||
|
@ -466,7 +466,7 @@ void SurfaceGeometryPass::run(const render::RenderContextPointer& renderContext,
|
|||
_diffusePass.getParameters()->setLinearDepthPosFar(args->getViewFrustum().getFarClip());
|
||||
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
gpu::doInBatch("SurfaceGeometryPass::run", args->_context, [=](gpu::Batch& batch) {
|
||||
_gpuTimer->begin(batch);
|
||||
batch.enableStereo(false);
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ void ToneMappingEffect::render(RenderArgs* args, const gpu::TexturePointer& ligh
|
|||
}
|
||||
|
||||
auto framebufferSize = glm::ivec2(lightingBuffer->getDimensions());
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
gpu::doInBatch("ToneMappingEffect::render", args->_context, [&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
batch.setFramebuffer(destinationFramebuffer);
|
||||
|
||||
|
|
173
libraries/render-utils/src/VelocityBufferPass.cpp
Normal file
173
libraries/render-utils/src/VelocityBufferPass.cpp
Normal file
|
@ -0,0 +1,173 @@
|
|||
//
|
||||
// VelocityBufferPass.cpp
|
||||
// libraries/render-utils/src/
|
||||
//
|
||||
// Created by Sam Gateau 8/15/2017.
|
||||
// Copyright 2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "VelocityBufferPass.h"
|
||||
|
||||
#include <limits>
|
||||
|
||||
#include <gpu/Context.h>
|
||||
#include <gpu/StandardShaderLib.h>
|
||||
#include "StencilMaskPass.h"
|
||||
|
||||
const int VelocityBufferPass_FrameTransformSlot = 0;
|
||||
const int VelocityBufferPass_DepthMapSlot = 0;
|
||||
|
||||
|
||||
#include "velocityBuffer_cameraMotion_frag.h"
|
||||
|
||||
VelocityFramebuffer::VelocityFramebuffer() {
|
||||
}
|
||||
|
||||
|
||||
void VelocityFramebuffer::updatePrimaryDepth(const gpu::TexturePointer& depthBuffer) {
|
||||
//If the depth buffer or size changed, we need to delete our FBOs
|
||||
bool reset = false;
|
||||
if ((_primaryDepthTexture != depthBuffer)) {
|
||||
_primaryDepthTexture = depthBuffer;
|
||||
reset = true;
|
||||
}
|
||||
if (_primaryDepthTexture) {
|
||||
auto newFrameSize = glm::ivec2(_primaryDepthTexture->getDimensions());
|
||||
if (_frameSize != newFrameSize) {
|
||||
_frameSize = newFrameSize;
|
||||
_halfFrameSize = newFrameSize >> 1;
|
||||
|
||||
reset = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (reset) {
|
||||
clear();
|
||||
}
|
||||
}
|
||||
|
||||
void VelocityFramebuffer::clear() {
|
||||
_velocityFramebuffer.reset();
|
||||
_velocityTexture.reset();
|
||||
}
|
||||
|
||||
void VelocityFramebuffer::allocate() {
|
||||
|
||||
auto width = _frameSize.x;
|
||||
auto height = _frameSize.y;
|
||||
|
||||
// For Velocity Buffer:
|
||||
_velocityTexture = gpu::Texture::createRenderBuffer(gpu::Element(gpu::VEC2, gpu::HALF, gpu::RGB), width, height, gpu::Texture::SINGLE_MIP,
|
||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR));
|
||||
_velocityFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("velocity"));
|
||||
_velocityFramebuffer->setRenderBuffer(0, _velocityTexture);
|
||||
_velocityFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, _primaryDepthTexture->getTexelFormat());
|
||||
}
|
||||
|
||||
gpu::FramebufferPointer VelocityFramebuffer::getVelocityFramebuffer() {
|
||||
if (!_velocityFramebuffer) {
|
||||
allocate();
|
||||
}
|
||||
return _velocityFramebuffer;
|
||||
}
|
||||
|
||||
gpu::TexturePointer VelocityFramebuffer::getVelocityTexture() {
|
||||
if (!_velocityTexture) {
|
||||
allocate();
|
||||
}
|
||||
return _velocityTexture;
|
||||
}
|
||||
|
||||
VelocityBufferPass::VelocityBufferPass() {
|
||||
}
|
||||
|
||||
void VelocityBufferPass::configure(const Config& config) {
|
||||
}
|
||||
|
||||
void VelocityBufferPass::run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs) {
|
||||
assert(renderContext->args);
|
||||
assert(renderContext->args->hasViewFrustum());
|
||||
|
||||
RenderArgs* args = renderContext->args;
|
||||
|
||||
const auto& frameTransform = inputs.get0();
|
||||
const auto& deferredFramebuffer = inputs.get1();
|
||||
|
||||
if (!_gpuTimer) {
|
||||
_gpuTimer = std::make_shared < gpu::RangeTimer>(__FUNCTION__);
|
||||
}
|
||||
|
||||
if (!_velocityFramebuffer) {
|
||||
_velocityFramebuffer = std::make_shared<VelocityFramebuffer>();
|
||||
}
|
||||
_velocityFramebuffer->updatePrimaryDepth(deferredFramebuffer->getPrimaryDepthTexture());
|
||||
|
||||
auto depthBuffer = deferredFramebuffer->getPrimaryDepthTexture();
|
||||
|
||||
auto velocityFBO = _velocityFramebuffer->getVelocityFramebuffer();
|
||||
auto velocityTexture = _velocityFramebuffer->getVelocityTexture();
|
||||
|
||||
outputs.edit0() = _velocityFramebuffer;
|
||||
outputs.edit1() = velocityFBO;
|
||||
outputs.edit2() = velocityTexture;
|
||||
|
||||
auto cameraMotionPipeline = getCameraMotionPipeline();
|
||||
|
||||
auto fullViewport = args->_viewport;
|
||||
|
||||
gpu::doInBatch("VelocityBufferPass::run", args->_context, [=](gpu::Batch& batch) {
|
||||
_gpuTimer->begin(batch);
|
||||
batch.enableStereo(false);
|
||||
|
||||
batch.setViewportTransform(fullViewport);
|
||||
batch.setProjectionTransform(glm::mat4());
|
||||
batch.resetViewTransform();
|
||||
batch.setModelTransform(gpu::Framebuffer::evalSubregionTexcoordTransform(_velocityFramebuffer->getDepthFrameSize(), fullViewport));
|
||||
|
||||
batch.setUniformBuffer(VelocityBufferPass_FrameTransformSlot, frameTransform->getFrameTransformBuffer());
|
||||
|
||||
// Velocity buffer camera motion
|
||||
batch.setFramebuffer(velocityFBO);
|
||||
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, glm::vec4(0.0f, 0.0f, 0.0f, 0.0f));
|
||||
batch.setPipeline(cameraMotionPipeline);
|
||||
batch.setResourceTexture(VelocityBufferPass_DepthMapSlot, depthBuffer);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
|
||||
_gpuTimer->end(batch);
|
||||
});
|
||||
|
||||
auto config = std::static_pointer_cast<Config>(renderContext->jobConfig);
|
||||
config->setGPUBatchRunTime(_gpuTimer->getGPUAverage(), _gpuTimer->getBatchAverage());
|
||||
}
|
||||
|
||||
|
||||
const gpu::PipelinePointer& VelocityBufferPass::getCameraMotionPipeline() {
|
||||
if (!_cameraMotionPipeline) {
|
||||
auto vs = gpu::StandardShaderLib::getDrawViewportQuadTransformTexcoordVS();
|
||||
auto ps = velocityBuffer_cameraMotion_frag::getShader();
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("deferredFrameTransformBuffer"), VelocityBufferPass_FrameTransformSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("depthMap"), VelocityBufferPass_DepthMapSlot));
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
// Stencil test the curvature pass for objects pixels only, not the background
|
||||
// PrepareStencil::testShape(*state);
|
||||
|
||||
state->setColorWriteMask(true, true, false, false);
|
||||
|
||||
// Good to go add the brand new pipeline
|
||||
_cameraMotionPipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
return _cameraMotionPipeline;
|
||||
}
|
||||
|
||||
|
||||
|
89
libraries/render-utils/src/VelocityBufferPass.h
Normal file
89
libraries/render-utils/src/VelocityBufferPass.h
Normal file
|
@ -0,0 +1,89 @@
|
|||
//
|
||||
// VelocityBufferPass.h
|
||||
// libraries/render-utils/src/
|
||||
//
|
||||
// Created by Sam Gateau 8/15/2017.
|
||||
// Copyright 2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_VelocityBufferPass_h
|
||||
#define hifi_VelocityBufferPass_h
|
||||
|
||||
#include "SurfaceGeometryPass.h"
|
||||
|
||||
|
||||
// VelocityFramebuffer is a helper class gathering in one place theframebuffers and targets describing the surface geometry linear depth
|
||||
// from a z buffer
|
||||
class VelocityFramebuffer {
|
||||
public:
|
||||
VelocityFramebuffer();
|
||||
|
||||
gpu::FramebufferPointer getVelocityFramebuffer();
|
||||
gpu::TexturePointer getVelocityTexture();
|
||||
|
||||
// Update the depth buffer which will drive the allocation of all the other resources according to its size.
|
||||
void updatePrimaryDepth(const gpu::TexturePointer& depthBuffer);
|
||||
|
||||
gpu::TexturePointer getPrimaryDepthTexture();
|
||||
const glm::ivec2& getDepthFrameSize() const { return _frameSize; }
|
||||
|
||||
void setResolutionLevel(int level);
|
||||
int getResolutionLevel() const { return _resolutionLevel; }
|
||||
|
||||
protected:
|
||||
void clear();
|
||||
void allocate();
|
||||
|
||||
gpu::TexturePointer _primaryDepthTexture;
|
||||
|
||||
gpu::FramebufferPointer _velocityFramebuffer;
|
||||
gpu::TexturePointer _velocityTexture;
|
||||
|
||||
glm::ivec2 _frameSize;
|
||||
glm::ivec2 _halfFrameSize;
|
||||
int _resolutionLevel{ 0 };
|
||||
};
|
||||
|
||||
using VelocityFramebufferPointer = std::shared_ptr<VelocityFramebuffer>;
|
||||
|
||||
class VelocityBufferPassConfig : public render::GPUJobConfig {
|
||||
Q_OBJECT
|
||||
Q_PROPERTY(float depthThreshold MEMBER depthThreshold NOTIFY dirty)
|
||||
|
||||
public:
|
||||
VelocityBufferPassConfig() : render::GPUJobConfig(true) {}
|
||||
|
||||
float depthThreshold{ 5.0f };
|
||||
|
||||
signals:
|
||||
void dirty();
|
||||
};
|
||||
|
||||
class VelocityBufferPass {
|
||||
public:
|
||||
using Inputs = render::VaryingSet2<DeferredFrameTransformPointer, DeferredFramebufferPointer>;
|
||||
using Outputs = render::VaryingSet3<VelocityFramebufferPointer, gpu::FramebufferPointer, gpu::TexturePointer>;
|
||||
using Config = VelocityBufferPassConfig;
|
||||
using JobModel = render::Job::ModelIO<VelocityBufferPass, Inputs, Outputs, Config>;
|
||||
|
||||
VelocityBufferPass();
|
||||
|
||||
void configure(const Config& config);
|
||||
void run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs);
|
||||
|
||||
private:
|
||||
typedef gpu::BufferView UniformBufferView;
|
||||
|
||||
VelocityFramebufferPointer _velocityFramebuffer;
|
||||
|
||||
const gpu::PipelinePointer& getCameraMotionPipeline();
|
||||
gpu::PipelinePointer _cameraMotionPipeline;
|
||||
|
||||
gpu::RangeTimerPointer _gpuTimer;
|
||||
};
|
||||
|
||||
|
||||
#endif // hifi_VelocityBufferPass_h
|
|
@ -171,7 +171,7 @@ void DebugZoneLighting::run(const render::RenderContextPointer& context, const I
|
|||
}
|
||||
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
gpu::doInBatch("DebugZoneLighting::run", args->_context, [=](gpu::Batch& batch) {
|
||||
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
auto viewFrustum = args->getViewFrustum();
|
||||
|
|
|
@ -17,7 +17,7 @@ out vec4 _color;
|
|||
|
||||
void main(void) {
|
||||
// pass along the color
|
||||
_color = colorToLinearRGBA(inColor.rgba);
|
||||
_color = color_sRGBAToLinear(inColor.rgba);
|
||||
|
||||
TransformCamera cam = getTransformCamera();
|
||||
TransformObject obj = getTransformObject();
|
||||
|
|
|
@ -22,6 +22,7 @@ uniform sampler2D halfNormalMap;
|
|||
uniform sampler2D occlusionMap;
|
||||
uniform sampler2D occlusionBlurredMap;
|
||||
uniform sampler2D scatteringMap;
|
||||
uniform sampler2D velocityMap;
|
||||
|
||||
<@include ShadowCore.slh@>
|
||||
|
||||
|
|
|
@ -23,72 +23,112 @@ precision mediump int;
|
|||
#endif
|
||||
|
||||
uniform sampler2D colorTexture;
|
||||
//uniform sampler2D historyTexture;
|
||||
uniform vec2 texcoordOffset;
|
||||
|
||||
in vec2 varTexcoord;
|
||||
out vec4 outFragColor;
|
||||
in vec2 varTexCoord0;
|
||||
layout(location = 0) out vec4 outFragColor;
|
||||
//layout(location = 0) out vec4 outFragHistory;
|
||||
|
||||
void main() {
|
||||
// filter width limit for dependent "two-tap" texture samples
|
||||
float FXAA_SPAN_MAX = 8.0;
|
||||
outFragColor = vec4(texture(colorTexture, varTexCoord0).xyz, 1.0/8.0);
|
||||
|
||||
// local contrast multiplier for performing AA
|
||||
// higher = sharper, but setting this value too high will cause near-vertical and near-horizontal edges to fail
|
||||
// see "fxaaQualityEdgeThreshold"
|
||||
float FXAA_REDUCE_MUL = 1.0 / 8.0;
|
||||
// v2
|
||||
/* float ModulationFactor = 1.0 / 8.0;
|
||||
|
||||
// luminance threshold for processing dark colors
|
||||
// see "fxaaQualityEdgeThresholdMin"
|
||||
float FXAA_REDUCE_MIN = 1.0 / 128.0;
|
||||
vec3 History = textureLod(historyTexture, varTexCoord0, 0.0).rgb;
|
||||
vec3 CurrentSubpixel = textureLod(colorTexture, varTexCoord0, 0.0).rgb;
|
||||
/*
|
||||
vec3 NearColor0 = textureLodOffset(colorTexture, varTexCoord0, 0.0, ivec2(1, 0)).xyz;
|
||||
vec3 NearColor1 = textureLodOffset(colorTexture, varTexCoord0, 0.0, ivec2(0, 1)).xyz;
|
||||
vec3 NearColor2 = textureLodOffset(colorTexture, varTexCoord0, 0.0, ivec2(-1, 0)).xyz;
|
||||
vec3 NearColor3 = textureLodOffset(colorTexture, varTexCoord0, 0.0, ivec2(0, -1)).xyz;
|
||||
|
||||
// fetch raw RGB values for nearby locations
|
||||
// sampling pattern is "five on a die" (each diagonal direction and the center)
|
||||
// computing the coordinates for these texture reads could be moved to the vertex shader for speed if needed
|
||||
vec3 rgbNW = texture(colorTexture, varTexcoord + (vec2(-1.0, -1.0) * texcoordOffset)).xyz;
|
||||
vec3 rgbNE = texture(colorTexture, varTexcoord + (vec2(+1.0, -1.0) * texcoordOffset)).xyz;
|
||||
vec3 rgbSW = texture(colorTexture, varTexcoord + (vec2(-1.0, +1.0) * texcoordOffset)).xyz;
|
||||
vec3 rgbSE = texture(colorTexture, varTexcoord + (vec2(+1.0, +1.0) * texcoordOffset)).xyz;
|
||||
vec3 rgbM = texture(colorTexture, varTexcoord).xyz;
|
||||
|
||||
// convert RGB values to luminance
|
||||
vec3 luma = vec3(0.299, 0.587, 0.114);
|
||||
float lumaNW = dot(rgbNW, luma);
|
||||
float lumaNE = dot(rgbNE, luma);
|
||||
float lumaSW = dot(rgbSW, luma);
|
||||
float lumaSE = dot(rgbSE, luma);
|
||||
float lumaM = dot( rgbM, luma);
|
||||
|
||||
// luma range of local neighborhood
|
||||
float lumaMin = min(lumaM, min(min(lumaNW, lumaNE), min(lumaSW, lumaSE)));
|
||||
float lumaMax = max(lumaM, max(max(lumaNW, lumaNE), max(lumaSW, lumaSE)));
|
||||
|
||||
// direction perpendicular to local luma gradient
|
||||
vec2 dir;
|
||||
dir.x = -((lumaNW + lumaNE) - (lumaSW + lumaSE));
|
||||
dir.y = ((lumaNW + lumaSW) - (lumaNE + lumaSE));
|
||||
vec3 BoxMin = min(CurrentSubpixel, min(NearColor0, min(NearColor1, min(NearColor2, NearColor3))));
|
||||
vec3 BoxMax = max(CurrentSubpixel, max(NearColor0, max(NearColor1, max(NearColor2, NearColor3))));;
|
||||
|
||||
// compute clamped direction offset for additional "two-tap" samples
|
||||
// longer vector = blurry, shorter vector = sharp
|
||||
float dirReduce = max((lumaNW + lumaNE + lumaSW + lumaSE) * (0.25 * FXAA_REDUCE_MUL), FXAA_REDUCE_MIN);
|
||||
float rcpDirMin = 1.0 / (min(abs(dir.x), abs(dir.y)) + dirReduce);
|
||||
dir = min(vec2(FXAA_SPAN_MAX, FXAA_SPAN_MAX),
|
||||
max(vec2(-FXAA_SPAN_MAX, -FXAA_SPAN_MAX), dir * rcpDirMin)) * texcoordOffset;
|
||||
|
||||
// perform additional texture sampling perpendicular to gradient
|
||||
vec3 rgbA = (1.0 / 2.0) * (
|
||||
texture(colorTexture, varTexcoord + dir * (1.0 / 3.0 - 0.5)).xyz +
|
||||
texture(colorTexture, varTexcoord + dir * (2.0 / 3.0 - 0.5)).xyz);
|
||||
vec3 rgbB = rgbA * (1.0 / 2.0) + (1.0 / 4.0) * (
|
||||
texture(colorTexture, varTexcoord + dir * (0.0 / 3.0 - 0.5)).xyz +
|
||||
texture(colorTexture, varTexcoord + dir * (3.0 / 3.0 - 0.5)).xyz);
|
||||
float lumaB = dot(rgbB, luma);
|
||||
if (gl_FragCoord.x > 800) {
|
||||
History = clamp(History, BoxMin, BoxMax);
|
||||
}
|
||||
|
||||
// compare luma of new samples to the luma range of the original neighborhood
|
||||
// if the new samples exceed this range, just use the first two samples instead of all four
|
||||
if (lumaB < lumaMin || lumaB > lumaMax) {
|
||||
outFragColor.xyz=rgbA;
|
||||
} else {
|
||||
outFragColor.xyz=rgbB;
|
||||
History = mix(CurrentSubpixel, History, ModulationFactor);
|
||||
|
||||
/* outFragHistory.xyz = History;
|
||||
outFragHistory.w = ModulationFactor
|
||||
|
||||
outFragColor.xyz = History;
|
||||
outFragColor.w = 1.0;*/
|
||||
|
||||
|
||||
|
||||
/* } else {
|
||||
outFragColor.xyz = CurrentSubpixel;
|
||||
outFragColor.w = 1.0;
|
||||
|
||||
}*/
|
||||
if (gl_FragCoord.x > 800) {
|
||||
/* // filter width limit for dependent "two-tap" texture samples
|
||||
float FXAA_SPAN_MAX = 8.0;
|
||||
|
||||
// local contrast multiplier for performing AA
|
||||
// higher = sharper, but setting this value too high will cause near-vertical and near-horizontal edges to fail
|
||||
// see "fxaaQualityEdgeThreshold"
|
||||
float FXAA_REDUCE_MUL = 1.0 / 8.0;
|
||||
|
||||
// luminance threshold for processing dark colors
|
||||
// see "fxaaQualityEdgeThresholdMin"
|
||||
float FXAA_REDUCE_MIN = 1.0 / 128.0;
|
||||
|
||||
// fetch raw RGB values for nearby locations
|
||||
// sampling pattern is "five on a die" (each diagonal direction and the center)
|
||||
// computing the coordinates for these texture reads could be moved to the vertex shader for speed if needed
|
||||
vec3 rgbNW = texture(colorTexture, varTexCoord0 + (vec2(-1.0, -1.0) * texcoordOffset)).xyz;
|
||||
vec3 rgbNE = texture(colorTexture, varTexCoord0 + (vec2(+1.0, -1.0) * texcoordOffset)).xyz;
|
||||
vec3 rgbSW = texture(colorTexture, varTexCoord0 + (vec2(-1.0, +1.0) * texcoordOffset)).xyz;
|
||||
vec3 rgbSE = texture(colorTexture, varTexCoord0 + (vec2(+1.0, +1.0) * texcoordOffset)).xyz;
|
||||
vec3 rgbM = texture(colorTexture, varTexCoord0).xyz;
|
||||
|
||||
// convert RGB values to luminance
|
||||
vec3 luma = vec3(0.299, 0.587, 0.114);
|
||||
float lumaNW = dot(rgbNW, luma);
|
||||
float lumaNE = dot(rgbNE, luma);
|
||||
float lumaSW = dot(rgbSW, luma);
|
||||
float lumaSE = dot(rgbSE, luma);
|
||||
float lumaM = dot(rgbM, luma);
|
||||
|
||||
// luma range of local neighborhood
|
||||
float lumaMin = min(lumaM, min(min(lumaNW, lumaNE), min(lumaSW, lumaSE)));
|
||||
float lumaMax = max(lumaM, max(max(lumaNW, lumaNE), max(lumaSW, lumaSE)));
|
||||
|
||||
// direction perpendicular to local luma gradient
|
||||
vec2 dir;
|
||||
dir.x = -((lumaNW + lumaNE) - (lumaSW + lumaSE));
|
||||
dir.y = ((lumaNW + lumaSW) - (lumaNE + lumaSE));
|
||||
|
||||
// compute clamped direction offset for additional "two-tap" samples
|
||||
// longer vector = blurry, shorter vector = sharp
|
||||
float dirReduce = max((lumaNW + lumaNE + lumaSW + lumaSE) * (0.25 * FXAA_REDUCE_MUL), FXAA_REDUCE_MIN);
|
||||
float rcpDirMin = 1.0 / (min(abs(dir.x), abs(dir.y)) + dirReduce);
|
||||
dir = min(vec2(FXAA_SPAN_MAX, FXAA_SPAN_MAX),
|
||||
max(vec2(-FXAA_SPAN_MAX, -FXAA_SPAN_MAX), dir * rcpDirMin)) * texcoordOffset;
|
||||
|
||||
// perform additional texture sampling perpendicular to gradient
|
||||
vec3 rgbA = (1.0 / 2.0) * (
|
||||
texture(colorTexture, varTexCoord0 + dir * (1.0 / 3.0 - 0.5)).xyz +
|
||||
texture(colorTexture, varTexCoord0 + dir * (2.0 / 3.0 - 0.5)).xyz);
|
||||
vec3 rgbB = rgbA * (1.0 / 2.0) + (1.0 / 4.0) * (
|
||||
texture(colorTexture, varTexCoord0 + dir * (0.0 / 3.0 - 0.5)).xyz +
|
||||
texture(colorTexture, varTexCoord0 + dir * (3.0 / 3.0 - 0.5)).xyz);
|
||||
float lumaB = dot(rgbB, luma);
|
||||
|
||||
// compare luma of new samples to the luma range of the original neighborhood
|
||||
// if the new samples exceed this range, just use the first two samples instead of all four
|
||||
if (lumaB < lumaMin || lumaB > lumaMax) {
|
||||
outFragColor.xyz = rgbA;
|
||||
}
|
||||
else {
|
||||
outFragColor.xyz = rgbB;
|
||||
}*/
|
||||
outFragColor.a = 1.0;
|
||||
}
|
||||
outFragColor.a = 1.0;
|
||||
}
|
||||
|
|
|
@ -14,11 +14,27 @@
|
|||
|
||||
<@include DeferredBufferWrite.slh@>
|
||||
|
||||
in vec2 varTexcoord;
|
||||
in vec2 varTexCoord0;
|
||||
out vec4 outFragColor;
|
||||
|
||||
uniform sampler2D colorTexture;
|
||||
uniform float sharpenIntensity;
|
||||
|
||||
void main(void) {
|
||||
outFragColor = texture(colorTexture, varTexcoord);
|
||||
vec4 pixels[9];
|
||||
vec4 sharpenedPixel;
|
||||
pixels[0] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy)+ivec2(-1,-1), 0);
|
||||
pixels[1] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy)+ivec2(0,-1), 0);
|
||||
pixels[2] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy)+ivec2(1,-1), 0);
|
||||
|
||||
pixels[3] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy)+ivec2(-1,0), 0);
|
||||
pixels[4] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy), 0);
|
||||
pixels[5] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy)+ivec2(1,0), 0);
|
||||
|
||||
pixels[6] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy)+ivec2(-1,1), 0);
|
||||
pixels[7] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy)+ivec2(0,1), 0);
|
||||
pixels[8] = texelFetch(colorTexture, ivec2(gl_FragCoord.xy)+ivec2(1,1), 0);
|
||||
|
||||
sharpenedPixel = pixels[4]*7.8 - (pixels[1]+pixels[3]+pixels[5]+pixels[7]) - (pixels[0]+pixels[2]+pixels[6]+pixels[8])*0.7;
|
||||
outFragColor = mix(pixels[4], sharpenedPixel, sharpenIntensity);
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ out vec4 _position;
|
|||
out vec3 _normal;
|
||||
|
||||
void main(void) {
|
||||
_color = colorToLinearRGB(inColor.xyz);
|
||||
_color = color_sRGBToLinear(inColor.xyz);
|
||||
_alpha = inColor.w;
|
||||
|
||||
TexMapArray texMapArray = getTexMapArray();
|
||||
|
|
|
@ -28,7 +28,7 @@ out vec3 _normal;
|
|||
out vec3 _color;
|
||||
|
||||
void main(void) {
|
||||
_color = colorToLinearRGB(inColor.xyz);
|
||||
_color = color_sRGBToLinear(inColor.xyz);
|
||||
_alpha = inColor.w;
|
||||
|
||||
TexMapArray texMapArray = getTexMapArray();
|
||||
|
|
|
@ -28,7 +28,7 @@ out vec3 _color;
|
|||
|
||||
void main(void) {
|
||||
// pass along the color in linear space
|
||||
_color = colorToLinearRGB(inColor.xyz);
|
||||
_color = color_sRGBToLinear(inColor.xyz);
|
||||
|
||||
// and the texture coordinates
|
||||
TexMapArray texMapArray = getTexMapArray();
|
||||
|
|
|
@ -29,7 +29,7 @@ out vec4 _worldPosition;
|
|||
|
||||
void main(void) {
|
||||
// pass along the color in linear space
|
||||
_color = colorToLinearRGB(inColor.xyz);
|
||||
_color = color_sRGBToLinear(inColor.xyz);
|
||||
|
||||
// and the texture coordinates
|
||||
TexMapArray texMapArray = getTexMapArray();
|
||||
|
|
|
@ -29,7 +29,7 @@ out vec3 _color;
|
|||
|
||||
void main(void) {
|
||||
// pass along the color in linear space
|
||||
_color = colorToLinearRGB(inColor.xyz);
|
||||
_color = color_sRGBToLinear(inColor.xyz);
|
||||
|
||||
TexMapArray texMapArray = getTexMapArray();
|
||||
<$evalTexMapArrayTexcoord0(texMapArray, inTexCoord0, _texCoord0)$>
|
||||
|
|
|
@ -30,7 +30,7 @@ out vec4 _worldPosition;
|
|||
|
||||
void main(void) {
|
||||
// pass along the color in linear space
|
||||
_color = colorToLinearRGB(inColor.xyz);
|
||||
_color = color_sRGBToLinear(inColor.xyz);
|
||||
|
||||
TexMapArray texMapArray = getTexMapArray();
|
||||
<$evalTexMapArrayTexcoord0(texMapArray, inTexCoord0, _texCoord0)$>
|
||||
|
|
|
@ -30,7 +30,7 @@ out float _alpha;
|
|||
|
||||
void main(void) {
|
||||
// pass along the color
|
||||
_color = colorToLinearRGB(inColor.rgb);
|
||||
_color = color_sRGBToLinear(inColor.rgb);
|
||||
_alpha = inColor.a;
|
||||
|
||||
TexMapArray texMapArray = getTexMapArray();
|
||||
|
|
|
@ -31,7 +31,7 @@ out float _alpha;
|
|||
|
||||
void main(void) {
|
||||
// pass along the color
|
||||
_color = colorToLinearRGB(inColor.rgb);
|
||||
_color = color_sRGBToLinear(inColor.rgb);
|
||||
_alpha = inColor.a;
|
||||
|
||||
TexMapArray texMapArray = getTexMapArray();
|
||||
|
|
|
@ -28,7 +28,7 @@ out vec3 _normal;
|
|||
out vec3 _color;
|
||||
|
||||
void main(void) {
|
||||
_color = colorToLinearRGB(inColor.xyz);
|
||||
_color = color_sRGBToLinear(inColor.xyz);
|
||||
_alpha = inColor.w;
|
||||
|
||||
TexMapArray texMapArray = getTexMapArray();
|
||||
|
|
|
@ -29,7 +29,7 @@ out vec3 _tangent;
|
|||
out vec3 _color;
|
||||
|
||||
void main(void) {
|
||||
_color = colorToLinearRGB(inColor.xyz);
|
||||
_color = color_sRGBToLinear(inColor.xyz);
|
||||
_alpha = inColor.w;
|
||||
|
||||
TexMapArray texMapArray = getTexMapArray();
|
||||
|
|
|
@ -23,7 +23,7 @@ out vec4 _position;
|
|||
out vec3 _normal;
|
||||
|
||||
void main(void) {
|
||||
_color = colorToLinearRGB(inColor.xyz);
|
||||
_color = color_sRGBToLinear(inColor.xyz);
|
||||
_alpha = inColor.w;
|
||||
|
||||
_texCoord0 = inTexCoord0.st;
|
||||
|
|
|
@ -25,7 +25,7 @@ out vec2 _texCoord0;
|
|||
out vec4 _position;
|
||||
|
||||
void main(void) {
|
||||
_color = colorToLinearRGBA(inColor);
|
||||
_color = color_sRGBAToLinear(inColor);
|
||||
_texCoord0 = inTexCoord0.st;
|
||||
_position = inPosition;
|
||||
_modelNormal = inNormal.xyz;
|
||||
|
|
|
@ -29,7 +29,7 @@ out vec4 _position;
|
|||
out vec4 _worldPosition;
|
||||
|
||||
void main(void) {
|
||||
_color = colorToLinearRGBA(inColor);
|
||||
_color = color_sRGBAToLinear(inColor);
|
||||
_texCoord0 = inTexCoord0.st;
|
||||
_position = inPosition;
|
||||
_modelNormal = inNormal.xyz;
|
||||
|
|
|
@ -25,6 +25,6 @@ in vec2 _texCoord0;
|
|||
|
||||
void main(void) {
|
||||
vec4 texel = texture(originalTexture, _texCoord0.st);
|
||||
texel = colorToLinearRGBA(texel);
|
||||
texel = color_sRGBAToLinear(texel);
|
||||
packDeferredFragmentUnlit(normalize(_normal), 1.0, _color.rgb * texel.rgb);
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ void main(void) {
|
|||
vec4 texel = texture(originalTexture, _texCoord0);
|
||||
float colorAlpha = _color.a;
|
||||
if (_color.a <= 0.0) {
|
||||
texel = colorToLinearRGBA(texel);
|
||||
texel = color_sRGBAToLinear(texel);
|
||||
colorAlpha = -_color.a;
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ void main(void) {
|
|||
vec4 texel = texture(originalTexture, _texCoord0);
|
||||
float colorAlpha = _color.a;
|
||||
if (_color.a <= 0.0) {
|
||||
texel = colorToLinearRGBA(texel);
|
||||
texel = color_sRGBAToLinear(texel);
|
||||
colorAlpha = -_color.a;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ void main(void) {
|
|||
vec4 texel = texture(originalTexture, _texCoord0.st);
|
||||
float colorAlpha = _color.a;
|
||||
if (_color.a <= 0.0) {
|
||||
texel = colorToLinearRGBA(texel);
|
||||
texel = color_sRGBAToLinear(texel);
|
||||
colorAlpha = -_color.a;
|
||||
}
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ void main(void) {
|
|||
vec4 texel = texture(originalTexture, _texCoord0.st);
|
||||
float colorAlpha = _color.a;
|
||||
if (_color.a <= 0.0) {
|
||||
texel = colorToLinearRGBA(texel);
|
||||
texel = color_sRGBAToLinear(texel);
|
||||
colorAlpha = -_color.a;
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ void main(void) {
|
|||
vec4 texel = texture(originalTexture, _texCoord0.st);
|
||||
float opacity = _color.a;
|
||||
if (_color.a <= 0.0) {
|
||||
texel = colorToLinearRGBA(texel);
|
||||
texel = color_sRGBAToLinear(texel);
|
||||
opacity = -_color.a;
|
||||
}
|
||||
opacity *= texel.a;
|
||||
|
|
|
@ -46,7 +46,7 @@ void main(void) {
|
|||
vec4 texel = texture(originalTexture, _texCoord0.st);
|
||||
float opacity = _color.a;
|
||||
if (_color.a <= 0.0) {
|
||||
texel = colorToLinearRGBA(texel);
|
||||
texel = color_sRGBAToLinear(texel);
|
||||
opacity = -_color.a;
|
||||
}
|
||||
opacity *= texel.a;
|
||||
|
|
|
@ -29,7 +29,7 @@ void main(void) {
|
|||
vec4 texel = texture(originalTexture, _texCoord0.st);
|
||||
float colorAlpha = _color.a;
|
||||
if (_color.a <= 0.0) {
|
||||
texel = colorToLinearRGBA(texel);
|
||||
texel = color_sRGBAToLinear(texel);
|
||||
colorAlpha = -_color.a;
|
||||
}
|
||||
_fragColor0 = vec4(_color.rgb * texel.rgb, colorAlpha * texel.a);
|
||||
|
|
|
@ -40,7 +40,7 @@ void main(void) {
|
|||
vec4 texel = texture(originalTexture, _texCoord0.st);
|
||||
float colorAlpha = _color.a;
|
||||
if (_color.a <= 0.0) {
|
||||
texel = colorToLinearRGBA(texel);
|
||||
texel = color_sRGBAToLinear(texel);
|
||||
colorAlpha = -_color.a;
|
||||
}
|
||||
_fragColor0 = vec4(_color.rgb * texel.rgb+fadeEmissive, colorAlpha * texel.a);
|
||||
|
|
|
@ -25,7 +25,7 @@ in vec2 _texCoord0;
|
|||
|
||||
void main(void) {
|
||||
vec4 texel = texture(originalTexture, _texCoord0.st);
|
||||
texel = colorToLinearRGBA(texel);
|
||||
texel = color_sRGBAToLinear(texel);
|
||||
packDeferredFragmentTranslucent(
|
||||
normalize(_normal),
|
||||
_color.a,
|
||||
|
|
|
@ -37,7 +37,7 @@ void main(void) {
|
|||
skinPositionNormal(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, position, interpolatedNormal);
|
||||
|
||||
// pass along the color
|
||||
_color = colorToLinearRGB(inColor.rgb);
|
||||
_color = color_sRGBToLinear(inColor.rgb);
|
||||
_alpha = inColor.a;
|
||||
|
||||
TexMapArray texMapArray = getTexMapArray();
|
||||
|
|
|
@ -37,7 +37,7 @@ void main(void) {
|
|||
skinPositionNormal(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, position, interpolatedNormal);
|
||||
|
||||
// pass along the color
|
||||
_color = colorToLinearRGB(inColor.rgb);
|
||||
_color = color_sRGBToLinear(inColor.rgb);
|
||||
_alpha = inColor.a;
|
||||
|
||||
TexMapArray texMapArray = getTexMapArray();
|
||||
|
|
|
@ -38,7 +38,7 @@ void main(void) {
|
|||
skinPositionNormal(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, position, interpolatedNormal);
|
||||
|
||||
// pass along the color
|
||||
_color = colorToLinearRGB(inColor.rgb);
|
||||
_color = color_sRGBToLinear(inColor.rgb);
|
||||
_alpha = inColor.a;
|
||||
|
||||
TexMapArray texMapArray = getTexMapArray();
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// skin_model_fade.vert
|
||||
// skin_model_fade_dq.vert
|
||||
// vertex shader
|
||||
//
|
||||
// Created by Olivier Prat on 06/045/17.
|
||||
|
@ -38,7 +38,7 @@ void main(void) {
|
|||
skinPositionNormal(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, position, interpolatedNormal);
|
||||
|
||||
// pass along the color
|
||||
_color = colorToLinearRGB(inColor.rgb);
|
||||
_color = color_sRGBToLinear(inColor.rgb);
|
||||
_alpha = inColor.a;
|
||||
|
||||
TexMapArray texMapArray = getTexMapArray();
|
||||
|
|
|
@ -39,7 +39,7 @@ void main(void) {
|
|||
skinPositionNormalTangent(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, inTangent.xyz, position, interpolatedNormal.xyz, interpolatedTangent.xyz);
|
||||
|
||||
// pass along the color
|
||||
_color = colorToLinearRGB(inColor.rgb);
|
||||
_color = color_sRGBToLinear(inColor.rgb);
|
||||
_alpha = inColor.a;
|
||||
|
||||
TexMapArray texMapArray = getTexMapArray();
|
||||
|
|
|
@ -39,7 +39,7 @@ void main(void) {
|
|||
skinPositionNormalTangent(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, inTangent.xyz, position, interpolatedNormal.xyz, interpolatedTangent.xyz);
|
||||
|
||||
// pass along the color
|
||||
_color = colorToLinearRGB(inColor.rgb);
|
||||
_color = color_sRGBToLinear(inColor.rgb);
|
||||
_alpha = inColor.a;
|
||||
|
||||
TexMapArray texMapArray = getTexMapArray();
|
||||
|
|
|
@ -40,7 +40,7 @@ void main(void) {
|
|||
skinPositionNormalTangent(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, inTangent.xyz, position, interpolatedNormal.xyz, interpolatedTangent.xyz);
|
||||
|
||||
// pass along the color
|
||||
_color = colorToLinearRGB(inColor.rgb);
|
||||
_color = color_sRGBToLinear(inColor.rgb);
|
||||
_alpha = inColor.a;
|
||||
|
||||
TexMapArray texMapArray = getTexMapArray();
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// skin_model_normal_map.vert
|
||||
// skin_model_normal_map_fade_dq.vert
|
||||
// vertex shader
|
||||
//
|
||||
// Created by Andrzej Kapolka on 10/29/13.
|
||||
|
@ -40,7 +40,7 @@ void main(void) {
|
|||
skinPositionNormalTangent(inSkinClusterIndex, inSkinClusterWeight, inPosition, inNormal.xyz, inTangent.xyz, position, interpolatedNormal.xyz, interpolatedTangent.xyz);
|
||||
|
||||
// pass along the color
|
||||
_color = colorToLinearRGB(inColor.rgb);
|
||||
_color = color_sRGBToLinear(inColor.rgb);
|
||||
_alpha = inColor.a;
|
||||
|
||||
TexMapArray texMapArray = getTexMapArray();
|
||||
|
|
|
@ -24,7 +24,7 @@ out vec4 varColor;
|
|||
|
||||
void main(void) {
|
||||
varTexCoord0 = inTexCoord0.st;
|
||||
varColor = colorToLinearRGBA(inColor);
|
||||
varColor = color_sRGBAToLinear(inColor);
|
||||
|
||||
// standard transform
|
||||
TransformCamera cam = getTransformCamera();
|
||||
|
|
51
libraries/render-utils/src/taa.slf
Normal file
51
libraries/render-utils/src/taa.slf
Normal file
|
@ -0,0 +1,51 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// taa.frag
|
||||
// fragment shader
|
||||
//
|
||||
// Created by Sam Gateau on 8/14/2017
|
||||
// Copyright 2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
|
||||
<@include taa.slh@>
|
||||
|
||||
in vec2 varTexCoord0;
|
||||
layout(location = 0) out vec4 outFragColor;
|
||||
|
||||
void main() {
|
||||
vec2 fragUV = varTexCoord0;
|
||||
|
||||
// Debug region before debug or fxaa region X
|
||||
float distToRegionFXAA = fragUV.x - taa_getRegionFXAA().x;
|
||||
if (distToRegionFXAA > 0.0) {
|
||||
outFragColor = vec4(taa_evalFXAA(fragUV), 1.0);
|
||||
return;
|
||||
}
|
||||
|
||||
vec2 fragVel = taa_fetchVelocityMapBest(fragUV).xy;
|
||||
|
||||
vec3 sourceColor;
|
||||
vec3 historyColor;
|
||||
vec2 prevFragUV = taa_fetchSourceAndHistory(fragUV, fragVel, sourceColor, historyColor);
|
||||
|
||||
vec3 nextColor = sourceColor;
|
||||
|
||||
if (taa_constrainColor()) {
|
||||
// clamp history to neighbourhood of current sample
|
||||
historyColor = taa_evalConstrainColor(sourceColor, fragUV, fragVel, historyColor);
|
||||
}
|
||||
|
||||
if (taa_feedbackColor()) {
|
||||
nextColor = taa_evalFeedbackColor(sourceColor, historyColor, params.blend);
|
||||
} else {
|
||||
nextColor = mix(historyColor, sourceColor, params.blend);
|
||||
}
|
||||
|
||||
outFragColor = vec4(taa_resolveColor(nextColor), 1.0);
|
||||
}
|
529
libraries/render-utils/src/taa.slh
Normal file
529
libraries/render-utils/src/taa.slh
Normal file
|
@ -0,0 +1,529 @@
|
|||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// TAA.slh
|
||||
// Common component needed by TemporalAntialiasing fragment shader
|
||||
//
|
||||
// Created by Sam Gateau on 8/17/2017
|
||||
// Copyright 2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include DeferredTransform.slh@>
|
||||
<$declareDeferredFrameTransform()$>
|
||||
|
||||
<@include gpu/Color.slh@>
|
||||
|
||||
uniform sampler2D depthMap;
|
||||
uniform sampler2D sourceMap;
|
||||
uniform sampler2D historyMap;
|
||||
uniform sampler2D velocityMap;
|
||||
uniform sampler2D nextMap;
|
||||
|
||||
struct TAAParams
|
||||
{
|
||||
float none;
|
||||
float blend;
|
||||
float covarianceGamma;
|
||||
float debugShowVelocityThreshold;
|
||||
ivec4 flags;
|
||||
vec4 pixelInfo_orbZoom;
|
||||
vec4 regionInfo;
|
||||
};
|
||||
|
||||
layout(std140) uniform taaParamsBuffer {
|
||||
TAAParams params;
|
||||
};
|
||||
|
||||
#define GET_BIT(bitfield, bitIndex) bool((bitfield) & (1 << (bitIndex)))
|
||||
|
||||
bool taa_isDebugEnabled() {
|
||||
return GET_BIT(params.flags.x, 0);
|
||||
}
|
||||
|
||||
bool taa_showDebugCursor() {
|
||||
return GET_BIT(params.flags.x, 1);
|
||||
}
|
||||
|
||||
bool taa_showClosestFragment() {
|
||||
return GET_BIT(params.flags.x, 3);
|
||||
}
|
||||
|
||||
bool taa_constrainColor() {
|
||||
return GET_BIT(params.flags.y, 1);
|
||||
}
|
||||
|
||||
bool taa_feedbackColor() {
|
||||
return GET_BIT(params.flags.y, 4);
|
||||
}
|
||||
|
||||
vec2 taa_getDebugCursorTexcoord() {
|
||||
return params.pixelInfo_orbZoom.xy;
|
||||
}
|
||||
|
||||
float taa_getDebugOrbZoom() {
|
||||
return params.pixelInfo_orbZoom.z;
|
||||
}
|
||||
|
||||
vec2 taa_getRegionDebug() {
|
||||
return params.regionInfo.xy;
|
||||
}
|
||||
|
||||
vec2 taa_getRegionFXAA() {
|
||||
return params.regionInfo.zw;
|
||||
}
|
||||
#define USE_YCOCG 1
|
||||
|
||||
vec4 taa_fetchColor(sampler2D map, vec2 uv) {
|
||||
#if USE_YCOCG
|
||||
vec4 c = texture(map, uv);
|
||||
return vec4(color_LinearToYCoCg(c.rgb), c.a);
|
||||
#else
|
||||
return texture(map, uv);
|
||||
#endif
|
||||
}
|
||||
|
||||
vec3 taa_resolveColor(vec3 color) {
|
||||
#if USE_YCOCG
|
||||
return color_YCoCgToLinear(color);
|
||||
#else
|
||||
return color;
|
||||
#endif
|
||||
}
|
||||
|
||||
vec4 taa_fetchSourceMap(vec2 uv) {
|
||||
#if USE_YCOCG
|
||||
vec4 c = texture(sourceMap, uv);
|
||||
return vec4(color_LinearToYCoCg(c.rgb), c.a);
|
||||
#else
|
||||
return texture(sourceMap, uv);
|
||||
#endif
|
||||
}
|
||||
|
||||
vec4 taa_fetchHistoryMap(vec2 uv) {
|
||||
#if USE_YCOCG
|
||||
vec4 c = texture(historyMap, uv);
|
||||
return vec4(color_LinearToYCoCg(c.rgb), c.a);
|
||||
#else
|
||||
return texture(historyMap, uv);
|
||||
#endif
|
||||
}
|
||||
|
||||
vec4 taa_fetchNextMap(vec2 uv) {
|
||||
#if USE_YCOCG
|
||||
vec4 c = texture(nextMap, uv);
|
||||
return vec4(color_LinearToYCoCg(c.rgb), c.a);
|
||||
#else
|
||||
return texture(nextMap, uv);
|
||||
#endif
|
||||
}
|
||||
|
||||
vec2 taa_fetchVelocityMap(vec2 uv) {
|
||||
return texture(velocityMap, uv).xy;
|
||||
}
|
||||
|
||||
float taa_fetchDepth(vec2 uv) {
|
||||
return -texture(depthMap, vec2(uv), 0).x;
|
||||
}
|
||||
|
||||
|
||||
#define ZCMP_GT(a, b) (a > b)
|
||||
|
||||
vec2 taa_getImageSize() {
|
||||
vec2 imageSize = getWidthHeight(0);
|
||||
if (isStereo()) {
|
||||
imageSize.x *= 2.0;
|
||||
}
|
||||
return imageSize;
|
||||
}
|
||||
|
||||
vec2 taa_getTexelSize() {
|
||||
vec2 texelSize = getInvWidthHeight();
|
||||
if (isStereo()) {
|
||||
texelSize.x *= 0.5;
|
||||
}
|
||||
return texelSize;
|
||||
}
|
||||
|
||||
vec3 taa_findClosestFragment3x3(vec2 uv)
|
||||
{
|
||||
vec2 dd = abs(taa_getTexelSize());
|
||||
vec2 du = vec2(dd.x, 0.0);
|
||||
vec2 dv = vec2(0.0, dd.y);
|
||||
|
||||
vec3 dtl = vec3(-1, -1, taa_fetchDepth(uv - dv - du));
|
||||
vec3 dtc = vec3( 0, -1, taa_fetchDepth(uv - dv));
|
||||
vec3 dtr = vec3( 1, -1, taa_fetchDepth(uv - dv + du));
|
||||
|
||||
vec3 dml = vec3(-1, 0, taa_fetchDepth(uv - du));
|
||||
vec3 dmc = vec3( 0, 0, taa_fetchDepth(uv));
|
||||
vec3 dmr = vec3( 1, 0, taa_fetchDepth(uv + du));
|
||||
|
||||
vec3 dbl = vec3(-1, 1, taa_fetchDepth(uv + dv - du));
|
||||
vec3 dbc = vec3( 0, 1, taa_fetchDepth(uv + dv));
|
||||
vec3 dbr = vec3( 1, 1, taa_fetchDepth(uv + dv + du));
|
||||
|
||||
vec3 dmin = dtl;
|
||||
if (ZCMP_GT(dmin.z, dtc.z)) dmin = dtc;
|
||||
if (ZCMP_GT(dmin.z, dtr.z)) dmin = dtr;
|
||||
|
||||
if (ZCMP_GT(dmin.z, dml.z)) dmin = dml;
|
||||
if (ZCMP_GT(dmin.z, dmc.z)) dmin = dmc;
|
||||
if (ZCMP_GT(dmin.z, dmr.z)) dmin = dmr;
|
||||
|
||||
if (ZCMP_GT(dmin.z, dbl.z)) dmin = dbl;
|
||||
if (ZCMP_GT(dmin.z, dbc.z)) dmin = dbc;
|
||||
if (ZCMP_GT(dmin.z, dbr.z)) dmin = dbr;
|
||||
|
||||
return vec3(uv + dd.xy * dmin.xy, dmin.z);
|
||||
}
|
||||
|
||||
vec2 taa_fetchVelocityMapBest(vec2 uv) {
|
||||
vec2 dd = abs(taa_getTexelSize());
|
||||
vec2 du = vec2(dd.x, 0.0);
|
||||
vec2 dv = vec2(0.0, dd.y);
|
||||
|
||||
vec2 dtl = taa_fetchVelocityMap(uv - dv - du);
|
||||
vec2 dtc = taa_fetchVelocityMap(uv - dv);
|
||||
vec2 dtr = taa_fetchVelocityMap(uv - dv + du);
|
||||
|
||||
vec2 dml = taa_fetchVelocityMap(uv - du);
|
||||
vec2 dmc = taa_fetchVelocityMap(uv);
|
||||
vec2 dmr = taa_fetchVelocityMap(uv + du);
|
||||
|
||||
vec2 dbl = taa_fetchVelocityMap(uv + dv - du);
|
||||
vec2 dbc = taa_fetchVelocityMap(uv + dv);
|
||||
vec2 dbr = taa_fetchVelocityMap(uv + dv + du);
|
||||
|
||||
vec3 best = vec3(dtl, dot(dtl,dtl));
|
||||
|
||||
float testSpeed = dot(dtc,dtc);
|
||||
if (testSpeed > best.z) { best = vec3(dtc, testSpeed); }
|
||||
testSpeed = dot(dtr,dtr);
|
||||
if (testSpeed > best.z) { best = vec3(dtr, testSpeed); }
|
||||
|
||||
testSpeed = dot(dml,dml);
|
||||
if (testSpeed > best.z) { best = vec3(dml, testSpeed); }
|
||||
testSpeed = dot(dmc,dmc);
|
||||
if (testSpeed > best.z) { best = vec3(dmc, testSpeed); }
|
||||
testSpeed = dot(dmr,dmr);
|
||||
if (testSpeed > best.z) { best = vec3(dmr, testSpeed); }
|
||||
|
||||
testSpeed = dot(dbl,dbl);
|
||||
if (testSpeed > best.z) { best = vec3(dbl, testSpeed); }
|
||||
testSpeed = dot(dbc,dbc);
|
||||
if (testSpeed > best.z) { best = vec3(dbc, testSpeed); }
|
||||
testSpeed = dot(dbr,dbr);
|
||||
if (testSpeed > best.z) { best = vec3(dbr, testSpeed); }
|
||||
|
||||
return best.xy;
|
||||
}
|
||||
|
||||
vec2 taa_fromFragUVToEyeUVAndSide(vec2 fragUV, out int stereoSide) {
|
||||
vec2 eyeUV = fragUV;
|
||||
stereoSide = 0;
|
||||
if (isStereo()) {
|
||||
if (eyeUV.x > 0.5) {
|
||||
eyeUV.x -= 0.5;
|
||||
stereoSide = 1;
|
||||
}
|
||||
eyeUV.x *= 2.0;
|
||||
}
|
||||
return eyeUV;
|
||||
}
|
||||
|
||||
vec2 taa_fromEyeUVToFragUV(vec2 eyeUV, int stereoSide) {
|
||||
vec2 fragUV = eyeUV;
|
||||
if (isStereo()) {
|
||||
fragUV.x *= 0.5;
|
||||
fragUV.x += stereoSide*0.5;
|
||||
}
|
||||
return fragUV;
|
||||
}
|
||||
|
||||
vec2 taa_computePrevFragAndEyeUV(vec2 fragUV, vec2 fragVelocity, out vec2 prevEyeUV) {
|
||||
int stereoSide = 0;
|
||||
vec2 eyeUV = taa_fromFragUVToEyeUVAndSide(fragUV, stereoSide);
|
||||
prevEyeUV = eyeUV - fragVelocity;
|
||||
return taa_fromEyeUVToFragUV(prevEyeUV, stereoSide);
|
||||
}
|
||||
|
||||
vec2 taa_fetchSourceAndHistory(vec2 fragUV, vec2 fragVelocity, out vec3 sourceColor, out vec3 historyColor) {
|
||||
vec2 prevEyeUV;
|
||||
vec2 prevFragUV = taa_computePrevFragAndEyeUV(fragUV, fragVelocity, prevEyeUV);
|
||||
sourceColor = taa_fetchSourceMap(fragUV).xyz;
|
||||
|
||||
historyColor = sourceColor;
|
||||
if (!(any(lessThan(prevEyeUV, vec2(0.0))) || any(greaterThan(prevEyeUV, vec2(1.0))))) {
|
||||
historyColor = taa_fetchHistoryMap(prevFragUV).xyz;
|
||||
}
|
||||
return prevFragUV;
|
||||
}
|
||||
|
||||
float Luminance(vec3 rgb) {
|
||||
return rgb.x/4.0 + rgb.y/2.0 + rgb.z/4.0;
|
||||
}
|
||||
|
||||
#define MINMAX_3X3_ROUNDED 1
|
||||
|
||||
mat3 taa_evalNeighbourColorVariance(vec3 sourceColor, vec2 fragUV, vec2 fragVelocity) {
|
||||
vec2 texelSize = taa_getTexelSize();
|
||||
|
||||
|
||||
vec2 du = vec2(texelSize.x, 0.0);
|
||||
vec2 dv = vec2(0.0, texelSize.y);
|
||||
|
||||
vec3 sampleColor = taa_fetchSourceMap(fragUV - dv - du).rgb;
|
||||
vec3 sumSamples = sampleColor;
|
||||
vec3 sumSamples2 = sampleColor * sampleColor;
|
||||
|
||||
sampleColor = taa_fetchSourceMap(fragUV - dv).rgb;
|
||||
sumSamples += sampleColor;
|
||||
sumSamples2 += sampleColor * sampleColor;
|
||||
|
||||
sampleColor = taa_fetchSourceMap(fragUV - dv + du).rgb;
|
||||
sumSamples += sampleColor;
|
||||
sumSamples2 += sampleColor * sampleColor;
|
||||
|
||||
sampleColor = taa_fetchSourceMap(fragUV - du).rgb;
|
||||
sumSamples += sampleColor;
|
||||
sumSamples2 += sampleColor * sampleColor;
|
||||
|
||||
sampleColor = sourceColor; //taa_fetchSourceMap(fragUV).rgb; // could resuse the same osurce sampleColor isn't it ?
|
||||
sumSamples += sampleColor;
|
||||
sumSamples2 += sampleColor * sampleColor;
|
||||
|
||||
sampleColor = taa_fetchSourceMap(fragUV + du).rgb;
|
||||
sumSamples += sampleColor;
|
||||
sumSamples2 += sampleColor * sampleColor;
|
||||
|
||||
sampleColor = taa_fetchSourceMap(fragUV + dv - du).rgb;
|
||||
sumSamples += sampleColor;
|
||||
sumSamples2 += sampleColor * sampleColor;
|
||||
|
||||
sampleColor = taa_fetchSourceMap(fragUV + dv).rgb;
|
||||
sumSamples += sampleColor;
|
||||
sumSamples2 += sampleColor * sampleColor;
|
||||
|
||||
sampleColor = taa_fetchSourceMap(fragUV + dv + du).rgb;
|
||||
sumSamples += sampleColor;
|
||||
sumSamples2 += sampleColor * sampleColor;
|
||||
|
||||
|
||||
vec3 mu = sumSamples / vec3(9.0);
|
||||
vec3 sigma = sqrt(max(sumSamples2 / vec3(9.0) - mu * mu, vec3(0.0)));
|
||||
|
||||
float gamma = params.covarianceGamma;
|
||||
vec3 cmin = mu - gamma * sigma;
|
||||
vec3 cmax = mu + gamma * sigma;
|
||||
|
||||
return mat3(cmin, cmax, mu);
|
||||
}
|
||||
|
||||
mat3 taa_evalNeighbourColorRegion(vec3 sourceColor, vec2 fragUV, vec2 fragVelocity, float fragZe) {
|
||||
vec2 imageSize = taa_getImageSize();
|
||||
vec2 texelSize = taa_getTexelSize();
|
||||
vec3 cmin, cmax, cavg;
|
||||
|
||||
#if MINMAX_3X3_ROUNDED
|
||||
vec2 du = vec2(texelSize.x, 0.0);
|
||||
vec2 dv = vec2(0.0, texelSize.y);
|
||||
|
||||
vec3 ctl = taa_fetchSourceMap(fragUV - dv - du).rgb;
|
||||
vec3 ctc = taa_fetchSourceMap(fragUV - dv).rgb;
|
||||
vec3 ctr = taa_fetchSourceMap(fragUV - dv + du).rgb;
|
||||
vec3 cml = taa_fetchSourceMap(fragUV - du).rgb;
|
||||
vec3 cmc = sourceColor; //taa_fetchSourceMap(fragUV).rgb; // could resuse the same osurce sample isn't it ?
|
||||
vec3 cmr = taa_fetchSourceMap(fragUV + du).rgb;
|
||||
vec3 cbl = taa_fetchSourceMap(fragUV + dv - du).rgb;
|
||||
vec3 cbc = taa_fetchSourceMap(fragUV + dv).rgb;
|
||||
vec3 cbr = taa_fetchSourceMap(fragUV + dv + du).rgb;
|
||||
|
||||
cmin = min(ctl, min(ctc, min(ctr, min(cml, min(cmc, min(cmr, min(cbl, min(cbc, cbr))))))));
|
||||
cmax = max(ctl, max(ctc, max(ctr, max(cml, max(cmc, max(cmr, max(cbl, max(cbc, cbr))))))));
|
||||
|
||||
#if MINMAX_3X3_ROUNDED || USE_YCOCG || USE_CLIPPING
|
||||
cavg = (ctl + ctc + ctr + cml + cmc + cmr + cbl + cbc + cbr) / 9.0;
|
||||
#elif
|
||||
cavg = (cmin + cmax ) * 0.5;
|
||||
#endif
|
||||
|
||||
#if MINMAX_3X3_ROUNDED
|
||||
vec3 cmin5 = min(ctc, min(cml, min(cmc, min(cmr, cbc))));
|
||||
vec3 cmax5 = max(ctc, max(cml, max(cmc, max(cmr, cbc))));
|
||||
vec3 cavg5 = (ctc + cml + cmc + cmr + cbc) / 5.0;
|
||||
cmin = 0.5 * (cmin + cmin5);
|
||||
cmax = 0.5 * (cmax + cmax5);
|
||||
cavg = 0.5 * (cavg + cavg5);
|
||||
#endif
|
||||
#else
|
||||
const float _SubpixelThreshold = 0.5;
|
||||
const float _GatherBase = 0.5;
|
||||
const float _GatherSubpixelMotion = 0.1666;
|
||||
|
||||
vec2 texel_vel = fragVelocity * imageSize;
|
||||
float texel_vel_mag = length(texel_vel) * -fragZe;
|
||||
float k_subpixel_motion = clamp(_SubpixelThreshold / (0.0001 + texel_vel_mag), 0.0, 1.0);
|
||||
float k_min_max_support = _GatherBase + _GatherSubpixelMotion * k_subpixel_motion;
|
||||
|
||||
vec2 ss_offset01 = k_min_max_support * vec2(-texelSize.x, texelSize.y);
|
||||
vec2 ss_offset11 = k_min_max_support * vec2(texelSize.x, texelSize.y);
|
||||
vec3 c00 = taa_fetchSourceMap(fragUV - ss_offset11).rgb;
|
||||
vec3 c10 = taa_fetchSourceMap(fragUV - ss_offset01).rgb;
|
||||
vec3 c01 = taa_fetchSourceMap(fragUV + ss_offset01).rgb;
|
||||
vec3 c11 = taa_fetchSourceMap(fragUV + ss_offset11).rgb;
|
||||
|
||||
cmin = min(c00, min(c10, min(c01, c11)));
|
||||
cmax = max(c00, max(c10, max(c01, c11)));
|
||||
cavg = (cmin + cmax ) * 0.5;
|
||||
|
||||
#if USE_YCOCG || USE_CLIPPING
|
||||
cavg = (c00 + c10 + c01 + c11) / 4.0;
|
||||
#elif
|
||||
cavg = (cmin + cmax ) * 0.5;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// shrink chroma min-max
|
||||
#if USE_YCOCG
|
||||
vec2 chroma_extent = vec2(0.25 * 0.5 * (cmax.r - cmin.r));
|
||||
vec2 chroma_center = sourceColor.gb;
|
||||
cmin.yz = chroma_center - chroma_extent;
|
||||
cmax.yz = chroma_center + chroma_extent;
|
||||
cavg.yz = chroma_center;
|
||||
#endif
|
||||
|
||||
return mat3(cmin, cmax, cavg);
|
||||
}
|
||||
|
||||
//#define USE_OPTIMIZATIONS 0
|
||||
|
||||
vec3 taa_clampColor(vec3 colorMin, vec3 colorMax, vec3 colorSource, vec3 color) {
|
||||
const float eps = 0.00001;
|
||||
vec3 p = colorSource;
|
||||
vec3 q = color;
|
||||
// note: only clips towards aabb center (but fast!)
|
||||
vec3 p_clip = 0.5 * (colorMax + colorMin);
|
||||
vec3 e_clip = 0.5 * (colorMax - colorMin) + vec3(eps);
|
||||
|
||||
vec3 v_clip = q - p_clip;
|
||||
vec3 v_unit = v_clip.xyz / e_clip;
|
||||
vec3 a_unit = abs(v_unit);
|
||||
float ma_unit = max(a_unit.x, max(a_unit.y, a_unit.z));
|
||||
|
||||
if (ma_unit > 1.0)
|
||||
return p_clip + v_clip / ma_unit;
|
||||
else
|
||||
return q;// point inside aabb
|
||||
}
|
||||
|
||||
vec3 taa_evalConstrainColor(vec3 sourceColor, vec2 sourceUV, vec2 sourceVel, vec3 candidateColor) {
|
||||
mat3 colorMinMaxAvg;
|
||||
|
||||
colorMinMaxAvg = taa_evalNeighbourColorVariance(sourceColor, sourceUV, sourceVel);
|
||||
|
||||
// clamp history to neighbourhood of current sample
|
||||
return taa_clampColor(colorMinMaxAvg[0], colorMinMaxAvg[1], sourceColor, candidateColor);
|
||||
}
|
||||
|
||||
vec3 taa_evalFeedbackColor(vec3 sourceColor, vec3 historyColor, float blendFactor) {
|
||||
const float _FeedbackMin = 0.1;
|
||||
const float _FeedbackMax = 0.9;
|
||||
// feedback weight from unbiased luminance diff (t.lottes)
|
||||
#if USE_YCOCG
|
||||
float lum0 = sourceColor.r;
|
||||
float lum1 = historyColor.r;
|
||||
#else
|
||||
float lum0 = Luminance(sourceColor.rgb);
|
||||
float lum1 = Luminance(historyColor.rgb);
|
||||
#endif
|
||||
float unbiased_diff = abs(lum0 - lum1) / max(lum0, max(lum1, 0.2));
|
||||
float unbiased_weight = 1.0 - unbiased_diff;
|
||||
float unbiased_weight_sqr = unbiased_weight * unbiased_weight;
|
||||
float k_feedback = mix(_FeedbackMin, _FeedbackMax, unbiased_weight_sqr);
|
||||
|
||||
|
||||
vec3 nextColor = mix(historyColor, sourceColor, k_feedback * blendFactor).xyz;
|
||||
return nextColor;
|
||||
}
|
||||
|
||||
|
||||
<$declareColorWheel()$>
|
||||
|
||||
vec3 taa_getVelocityColorRelative(float velocityPixLength) {
|
||||
return colorRamp(velocityPixLength/params.debugShowVelocityThreshold);
|
||||
}
|
||||
|
||||
vec3 taa_getVelocityColorAboveThreshold(float velocityPixLength) {
|
||||
return colorRamp((velocityPixLength - params.debugShowVelocityThreshold)/params.debugShowVelocityThreshold);
|
||||
}
|
||||
|
||||
|
||||
vec3 taa_evalFXAA(vec2 fragUV) {
|
||||
|
||||
// vec2 texelSize = getInvWidthHeight();
|
||||
vec2 texelSize = taa_getTexelSize();
|
||||
|
||||
// filter width limit for dependent "two-tap" texture samples
|
||||
float FXAA_SPAN_MAX = 8.0;
|
||||
|
||||
// local contrast multiplier for performing AA
|
||||
// higher = sharper, but setting this value too high will cause near-vertical and near-horizontal edges to fail
|
||||
// see "fxaaQualityEdgeThreshold"
|
||||
float FXAA_REDUCE_MUL = 1.0 / 8.0;
|
||||
|
||||
// luminance threshold for processing dark colors
|
||||
// see "fxaaQualityEdgeThresholdMin"
|
||||
float FXAA_REDUCE_MIN = 1.0 / 128.0;
|
||||
|
||||
// fetch raw RGB values for nearby locations
|
||||
// sampling pattern is "five on a die" (each diagonal direction and the center)
|
||||
// computing the coordinates for these texture reads could be moved to the vertex shader for speed if needed
|
||||
vec3 rgbNW = texture(sourceMap, fragUV + (vec2(-1.0, -1.0) * texelSize)).xyz;
|
||||
vec3 rgbNE = texture(sourceMap, fragUV + (vec2(+1.0, -1.0) * texelSize)).xyz;
|
||||
vec3 rgbSW = texture(sourceMap, fragUV + (vec2(-1.0, +1.0) * texelSize)).xyz;
|
||||
vec3 rgbSE = texture(sourceMap, fragUV + (vec2(+1.0, +1.0) * texelSize)).xyz;
|
||||
vec3 rgbM = texture(sourceMap, fragUV).xyz;
|
||||
|
||||
// convert RGB values to luminance
|
||||
vec3 luma = vec3(0.299, 0.587, 0.114);
|
||||
float lumaNW = dot(rgbNW, luma);
|
||||
float lumaNE = dot(rgbNE, luma);
|
||||
float lumaSW = dot(rgbSW, luma);
|
||||
float lumaSE = dot(rgbSE, luma);
|
||||
float lumaM = dot( rgbM, luma);
|
||||
|
||||
// luma range of local neighborhood
|
||||
float lumaMin = min(lumaM, min(min(lumaNW, lumaNE), min(lumaSW, lumaSE)));
|
||||
float lumaMax = max(lumaM, max(max(lumaNW, lumaNE), max(lumaSW, lumaSE)));
|
||||
|
||||
// direction perpendicular to local luma gradient
|
||||
vec2 dir;
|
||||
dir.x = -((lumaNW + lumaNE) - (lumaSW + lumaSE));
|
||||
dir.y = ((lumaNW + lumaSW) - (lumaNE + lumaSE));
|
||||
|
||||
// compute clamped direction offset for additional "two-tap" samples
|
||||
// longer vector = blurry, shorter vector = sharp
|
||||
float dirReduce = max((lumaNW + lumaNE + lumaSW + lumaSE) * (0.25 * FXAA_REDUCE_MUL), FXAA_REDUCE_MIN);
|
||||
float rcpDirMin = 1.0 / (min(abs(dir.x), abs(dir.y)) + dirReduce);
|
||||
dir = min(vec2(FXAA_SPAN_MAX, FXAA_SPAN_MAX),
|
||||
max(vec2(-FXAA_SPAN_MAX, -FXAA_SPAN_MAX), dir * rcpDirMin)) * texelSize;
|
||||
|
||||
// perform additional texture sampling perpendicular to gradient
|
||||
vec3 rgbA = (1.0 / 2.0) * (
|
||||
texture(sourceMap, fragUV + dir * (1.0 / 3.0 - 0.5)).xyz +
|
||||
texture(sourceMap, fragUV + dir * (2.0 / 3.0 - 0.5)).xyz);
|
||||
vec3 rgbB = rgbA * (1.0 / 2.0) + (1.0 / 4.0) * (
|
||||
texture(sourceMap, fragUV + dir * (0.0 / 3.0 - 0.5)).xyz +
|
||||
texture(sourceMap, fragUV + dir * (3.0 / 3.0 - 0.5)).xyz);
|
||||
float lumaB = dot(rgbB, luma);
|
||||
|
||||
// compare luma of new samples to the luma range of the original neighborhood
|
||||
// if the new samples exceed this range, just use the first two samples instead of all four
|
||||
if (lumaB < lumaMin || lumaB > lumaMax) {
|
||||
return rgbA;
|
||||
} else {
|
||||
return rgbB;
|
||||
}
|
||||
}
|
156
libraries/render-utils/src/taa_blend.slf
Normal file
156
libraries/render-utils/src/taa_blend.slf
Normal file
|
@ -0,0 +1,156 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// taa_blend.frag
|
||||
// fragment shader
|
||||
//
|
||||
// Created by Sam Gateau on 8/17/2017
|
||||
// Copyright 2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include taa.slh@>
|
||||
|
||||
in vec2 varTexCoord0;
|
||||
layout(location = 0) out vec4 outFragColor;
|
||||
|
||||
void main(void) {
|
||||
vec3 nextColor = texture(nextMap, varTexCoord0).xyz;
|
||||
outFragColor = vec4(nextColor, 1.0);
|
||||
|
||||
|
||||
// Pixel being shaded
|
||||
vec3 sourceColor = texture(sourceMap, varTexCoord0).xyz;
|
||||
|
||||
vec2 imageSize = getWidthHeight(0);
|
||||
vec2 texelSize = getInvWidthHeight();
|
||||
|
||||
vec2 pixPos = varTexCoord0 * imageSize;
|
||||
vec2 pixVelocity = imageSize * texture(velocityMap, varTexCoord0).xy;
|
||||
float pixVelocityLength = length(pixVelocity);
|
||||
vec2 velocity = pixVelocity * texelSize;
|
||||
int stereoSide = 0;
|
||||
vec2 prevTexCoord = taa_fromFragUVToEyeUVAndSide(varTexCoord0, stereoSide) - velocity;
|
||||
prevTexCoord = taa_fromEyeUVToFragUV(prevTexCoord, stereoSide);
|
||||
vec2 prevPix = prevTexCoord * imageSize;
|
||||
|
||||
// Pixel Debugged
|
||||
if (taa_showDebugCursor()) {
|
||||
vec2 cursorUVRaw = taa_getDebugCursorTexcoord();
|
||||
vec2 cursorPosRaw = floor(cursorUVRaw * imageSize) + vec2(0.5);
|
||||
vec3 cursorFrag = taa_findClosestFragment3x3(cursorUVRaw);
|
||||
vec2 cursorUV = cursorUVRaw;
|
||||
vec2 cursorPos = cursorUV * imageSize;
|
||||
vec2 cursorVelocity = texture(velocityMap, cursorUV).xy;
|
||||
vec2 cursorPrevUV = taa_fromFragUVToEyeUVAndSide(cursorUV, stereoSide) - cursorVelocity;
|
||||
cursorVelocity *= imageSize;
|
||||
float cursorVelocityLength = length(cursorVelocity);
|
||||
vec2 cursorVelocityDir = cursorVelocity / cursorVelocityLength;
|
||||
|
||||
vec2 cursorToFragVec = pixPos - cursorPos;
|
||||
float cursorToFragLength = length(cursorToFragVec);
|
||||
|
||||
if ((cursorToFragLength <= cursorVelocityLength)) {
|
||||
vec2 cursorVelocityDir = cursorVelocity / cursorVelocityLength;
|
||||
vec2 cursorVelocityNor = vec2(cursorVelocityDir.y, -cursorVelocityDir.x);
|
||||
|
||||
if ((dot(cursorVelocityDir, cursorToFragVec) < 0) && abs(dot(cursorVelocityNor, cursorToFragVec)) < 1.0) {
|
||||
|
||||
vec3 speedColor = taa_getVelocityColorRelative(cursorToFragLength);
|
||||
|
||||
outFragColor = vec4(speedColor, 1.0);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
float tenPercentHeight = 0.1 * imageSize.y;
|
||||
float centerWidth = imageSize.x * 0.5;
|
||||
|
||||
//vec2 nextOrbPos = vec2(centerWidth, imageSize.y - 3 * tenPercentHeight);
|
||||
vec2 nextOrbPos = cursorPos;
|
||||
vec2 nextOrbPosToPix = pixPos - nextOrbPos;
|
||||
float nextOrbPosToPixLength = length(nextOrbPosToPix);
|
||||
|
||||
vec2 prevOrbPos = nextOrbPos - cursorVelocityDir * 2.0 * tenPercentHeight;
|
||||
vec2 prevOrbPosToPix = pixPos - prevOrbPos;
|
||||
float prevOrbPosToPixLength = length(prevOrbPosToPix);
|
||||
|
||||
float orbPixThreshold = 2.0 / taa_getDebugOrbZoom();
|
||||
|
||||
if ((prevOrbPosToPixLength < tenPercentHeight) && (cursorVelocityLength > 0.5)) {
|
||||
vec2 prevOrbPosToPix_uv = cursorPrevUV + prevOrbPosToPix * texelSize / taa_getDebugOrbZoom();
|
||||
vec3 preOrbColor = vec3(0.0);
|
||||
if (!(any(lessThan(prevOrbPosToPix_uv, vec2(0.0))) || any(greaterThan(prevOrbPosToPix_uv, vec2(1.0))))) {
|
||||
preOrbColor = texture(historyMap, prevOrbPosToPix_uv).xyz;
|
||||
}
|
||||
if (prevOrbPosToPixLength < orbPixThreshold) {
|
||||
preOrbColor = vec3(1.0, 0.0, 1.0);
|
||||
}
|
||||
float distanceToNext = length(imageSize * (cursorUV - prevOrbPosToPix_uv));
|
||||
if (distanceToNext < orbPixThreshold) {
|
||||
preOrbColor = vec3(1.0, 0.5, 0.0);
|
||||
}
|
||||
outFragColor = vec4(preOrbColor, 1.0);
|
||||
return;
|
||||
}
|
||||
if (nextOrbPosToPixLength < tenPercentHeight) {
|
||||
vec2 nextOrbPosToPix_uv = cursorUV + nextOrbPosToPix * texelSize / taa_getDebugOrbZoom();
|
||||
vec3 nextOrbColor = vec3(0.0);
|
||||
if (!(any(lessThan(nextOrbPosToPix_uv, vec2(0.0))) || any(greaterThan(nextOrbPosToPix_uv, vec2(1.0))))) {
|
||||
nextOrbColor = texture(nextMap, nextOrbPosToPix_uv).xyz;
|
||||
}
|
||||
float distanceToPrev = length(imageSize * (cursorPrevUV - nextOrbPosToPix_uv));
|
||||
if (distanceToPrev < orbPixThreshold) {
|
||||
nextOrbColor = vec3(1.0, 0.0, 1.0);
|
||||
}
|
||||
if (nextOrbPosToPixLength < orbPixThreshold) {
|
||||
nextOrbColor = vec3(1.0, 0.5, 0.0);
|
||||
}
|
||||
|
||||
outFragColor = vec4(nextOrbColor, 1.0);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Debug region before debug or fxaa region X
|
||||
float distToRegionDebug = varTexCoord0.x - taa_getRegionDebug().x;
|
||||
float distToRegionFXAA = varTexCoord0.x - taa_getRegionFXAA().x;
|
||||
if ((distToRegionFXAA < 0.0) && (distToRegionDebug > 0.0)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// draw region splitter
|
||||
if ((abs(distToRegionDebug) < getInvWidthHeight().x) || (abs(distToRegionFXAA) < getInvWidthHeight().x)) {
|
||||
outFragColor.rgb = vec3(1.0, 1.0, 0.0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (distToRegionFXAA > 0.0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (taa_showClosestFragment()) {
|
||||
vec3 fragUV = taa_findClosestFragment3x3(varTexCoord0);
|
||||
outFragColor = vec4((fragUV.xy - varTexCoord0) * imageSize * 0.5 + vec2(0.5), 0.0, 1.0);
|
||||
return;
|
||||
}
|
||||
|
||||
outFragColor = vec4(nextColor, 1.0);
|
||||
|
||||
vec3 prevColor = nextColor;
|
||||
|
||||
if (!(any(lessThan(prevTexCoord, vec2(0.0))) || any(greaterThan(prevTexCoord, vec2(1.0))))) {
|
||||
prevColor = texture(historyMap, prevTexCoord).xyz;
|
||||
}
|
||||
|
||||
outFragColor.xyz = mix(prevColor, vec3(1,0,1), clamp(distance(prevColor, nextColor) - 0.01, 0, 1));
|
||||
|
||||
if (pixVelocityLength > params.debugShowVelocityThreshold) {
|
||||
vec3 speedColor = taa_getVelocityColorAboveThreshold(pixVelocityLength);
|
||||
|
||||
outFragColor = vec4(0.0, 1.0, 1.0, 1.0);
|
||||
}
|
||||
}
|
41
libraries/render-utils/src/velocityBuffer_cameraMotion.slf
Normal file
41
libraries/render-utils/src/velocityBuffer_cameraMotion.slf
Normal file
|
@ -0,0 +1,41 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// Created by Sam Gateau on 6/3/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include DeferredTransform.slh@>
|
||||
<$declareDeferredFrameTransform()$>
|
||||
|
||||
in vec2 varTexCoord0;
|
||||
out vec4 outFragColor;
|
||||
|
||||
uniform sampler2D depthMap;
|
||||
|
||||
|
||||
void main(void) {
|
||||
// Pixel being shaded
|
||||
ivec2 pixelPos;
|
||||
vec2 texcoordPos;
|
||||
ivec4 stereoSide;
|
||||
ivec2 framePixelPos = getPixelPosTexcoordPosAndSide(gl_FragCoord.xy, pixelPos, texcoordPos, stereoSide);
|
||||
|
||||
float Zdb = texelFetch(depthMap, ivec2(gl_FragCoord.xy), 0).x;
|
||||
|
||||
// The position of the pixel fragment in Eye space then in world space
|
||||
vec3 eyePos = evalEyePositionFromZdb(stereoSide.x, Zdb, texcoordPos);
|
||||
vec3 worldPos = (getViewInverse() * vec4(eyePos, 1.0)).xyz;
|
||||
|
||||
vec3 prevEyePos = (getPreviousView() * vec4(worldPos, 1.0)).xyz;
|
||||
vec4 prevClipPos = (frameTransform._projection[stereoSide.x] * vec4(prevEyePos, 1.0));
|
||||
vec2 prevUV = 0.5 * (prevClipPos.xy / prevClipPos.w) + vec2(0.5);
|
||||
|
||||
//vec2 imageSize = getWidthHeight(0);
|
||||
vec2 imageSize = vec2(1.0, 1.0);
|
||||
outFragColor = vec4( ((texcoordPos - prevUV) * imageSize), 0.0, 0.0);
|
||||
}
|
|
@ -286,7 +286,7 @@ void BlurGaussian::run(const RenderContextPointer& renderContext, const gpu::Fra
|
|||
_parameters->setWidthHeight(blurredFramebuffer->getWidth(), blurredFramebuffer->getHeight(), args->isStereo());
|
||||
_parameters->setTexcoordTransform(gpu::Framebuffer::evalSubregionTexcoordTransformCoefficients(textureSize, viewport));
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
gpu::doInBatch("BlurGaussian::run", args->_context, [=](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
batch.setViewportTransform(viewport);
|
||||
|
||||
|
@ -401,7 +401,7 @@ void BlurGaussianDepthAware::run(const RenderContextPointer& renderContext, cons
|
|||
_parameters->setDepthPerspective(args->getViewFrustum().getProjection()[1][1]);
|
||||
_parameters->setLinearDepthPosFar(args->getViewFrustum().getFarClip());
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
gpu::doInBatch("BlurGaussianDepthAware::run", args->_context, [=](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
batch.setViewportTransform(sourceViewport);
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue