mirror of
https://github.com/lubosz/overte.git
synced 2025-04-25 01:03:59 +02:00
introducing the Velocity buffer pass generation
This commit is contained in:
parent
fe0b597aa1
commit
2f0eee5e4a
13 changed files with 393 additions and 7 deletions
|
@ -351,7 +351,7 @@ public:
|
|||
// Don't actually crash in debug builds, in case this apparent deadlock is simply from
|
||||
// the developer actively debugging code
|
||||
#ifdef NDEBUG
|
||||
deadlockDetectionCrash();
|
||||
// deadlockDetectionCrash();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
|
|
@ -140,7 +140,49 @@ GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
|
|||
case gpu::RGB:
|
||||
case gpu::RGBA:
|
||||
case gpu::XY:
|
||||
result = GL_RG8;
|
||||
switch (dstFormat.getType()) {
|
||||
case gpu::UINT32:
|
||||
result = GL_RG32UI;
|
||||
break;
|
||||
case gpu::INT32:
|
||||
result = GL_RG32I;
|
||||
break;
|
||||
case gpu::FLOAT:
|
||||
result = GL_RG32F;
|
||||
break;
|
||||
case gpu::UINT16:
|
||||
result = GL_RG16UI;
|
||||
break;
|
||||
case gpu::INT16:
|
||||
result = GL_RG16I;
|
||||
break;
|
||||
case gpu::NUINT16:
|
||||
result = GL_RG16;
|
||||
break;
|
||||
case gpu::NINT16:
|
||||
result = GL_RG16_SNORM;
|
||||
break;
|
||||
case gpu::HALF:
|
||||
result = GL_RG16F;
|
||||
break;
|
||||
case gpu::UINT8:
|
||||
result = GL_RG8UI;
|
||||
break;
|
||||
case gpu::INT8:
|
||||
result = GL_RG8I;
|
||||
break;
|
||||
case gpu::NUINT8:
|
||||
result = GL_RG8;
|
||||
break;
|
||||
case gpu::NINT8:
|
||||
result = GL_RG8_SNORM;
|
||||
break;
|
||||
case gpu::NUINT32:
|
||||
case gpu::NINT32:
|
||||
case gpu::COMPRESSED:
|
||||
case gpu::NUM_TYPES: // quiet compiler
|
||||
Q_UNREACHABLE();
|
||||
}
|
||||
break;
|
||||
default:
|
||||
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
||||
|
|
|
@ -52,7 +52,8 @@ enum Slot {
|
|||
DiffusedCurvature,
|
||||
Scattering,
|
||||
AmbientOcclusion,
|
||||
AmbientOcclusionBlurred
|
||||
AmbientOcclusionBlurred,
|
||||
Velocity,
|
||||
};
|
||||
|
||||
|
||||
|
@ -226,6 +227,12 @@ static const std::string DEFAULT_AMBIENT_OCCLUSION_BLURRED_SHADER{
|
|||
" }"
|
||||
};
|
||||
|
||||
static const std::string DEFAULT_VELOCITY_SHADER{
|
||||
"vec4 getFragmentColor() {"
|
||||
" return vec4(vec2(texture(velocityMap, uv).xy), 0.0, 1.0);"
|
||||
" }"
|
||||
};
|
||||
|
||||
static const std::string DEFAULT_CUSTOM_SHADER {
|
||||
"vec4 getFragmentColor() {"
|
||||
" return vec4(1.0, 0.0, 0.0, 1.0);"
|
||||
|
@ -308,6 +315,8 @@ std::string DebugDeferredBuffer::getShaderSourceCode(Mode mode, std::string cust
|
|||
return DEFAULT_AMBIENT_OCCLUSION_SHADER;
|
||||
case AmbientOcclusionBlurredMode:
|
||||
return DEFAULT_AMBIENT_OCCLUSION_BLURRED_SHADER;
|
||||
case VelocityMode:
|
||||
return DEFAULT_VELOCITY_SHADER;
|
||||
case CustomMode:
|
||||
return getFileContent(customFile, DEFAULT_CUSTOM_SHADER);
|
||||
default:
|
||||
|
@ -366,6 +375,7 @@ const gpu::PipelinePointer& DebugDeferredBuffer::getPipeline(Mode mode, std::str
|
|||
slotBindings.insert(gpu::Shader::Binding("diffusedCurvatureMap", DiffusedCurvature));
|
||||
slotBindings.insert(gpu::Shader::Binding("scatteringMap", Scattering));
|
||||
slotBindings.insert(gpu::Shader::Binding("occlusionBlurredMap", AmbientOcclusionBlurred));
|
||||
slotBindings.insert(gpu::Shader::Binding("velocityMap", Velocity));
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
auto pipeline = gpu::Pipeline::create(program, std::make_shared<gpu::State>());
|
||||
|
@ -403,6 +413,7 @@ void DebugDeferredBuffer::run(const RenderContextPointer& renderContext, const I
|
|||
auto& linearDepthTarget = inputs.get1();
|
||||
auto& surfaceGeometryFramebuffer = inputs.get2();
|
||||
auto& ambientOcclusionFramebuffer = inputs.get3();
|
||||
auto& velocityFramebuffer = inputs.get4();
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
|
@ -431,6 +442,9 @@ void DebugDeferredBuffer::run(const RenderContextPointer& renderContext, const I
|
|||
batch.setResourceTexture(Depth, deferredFramebuffer->getPrimaryDepthTexture());
|
||||
batch.setResourceTexture(Lighting, deferredFramebuffer->getLightingTexture());
|
||||
}
|
||||
if (velocityFramebuffer) {
|
||||
batch.setResourceTexture(Velocity, velocityFramebuffer->getVelocityTexture());
|
||||
}
|
||||
|
||||
auto lightStage = renderContext->_scene->getStage<LightStage>();
|
||||
assert(lightStage);
|
||||
|
@ -476,5 +490,7 @@ void DebugDeferredBuffer::run(const RenderContextPointer& renderContext, const I
|
|||
batch.setResourceTexture(AmbientOcclusion, nullptr);
|
||||
batch.setResourceTexture(AmbientOcclusionBlurred, nullptr);
|
||||
|
||||
batch.setResourceTexture(Velocity, nullptr);
|
||||
|
||||
});
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "DeferredFramebuffer.h"
|
||||
#include "SurfaceGeometryPass.h"
|
||||
#include "AmbientOcclusionEffect.h"
|
||||
#include "VelocityBufferPass.h"
|
||||
|
||||
class DebugDeferredBufferConfig : public render::Job::Config {
|
||||
Q_OBJECT
|
||||
|
@ -37,7 +38,7 @@ signals:
|
|||
|
||||
class DebugDeferredBuffer {
|
||||
public:
|
||||
using Inputs = render::VaryingSet4<DeferredFramebufferPointer, LinearDepthFramebufferPointer, SurfaceGeometryFramebufferPointer, AmbientOcclusionFramebufferPointer>;
|
||||
using Inputs = render::VaryingSet5<DeferredFramebufferPointer, LinearDepthFramebufferPointer, SurfaceGeometryFramebufferPointer, AmbientOcclusionFramebufferPointer, VelocityFramebufferPointer>;
|
||||
using Config = DebugDeferredBufferConfig;
|
||||
using JobModel = render::Job::ModelI<DebugDeferredBuffer, Inputs, Config>;
|
||||
|
||||
|
@ -76,6 +77,7 @@ protected:
|
|||
ScatteringDebugMode,
|
||||
AmbientOcclusionMode,
|
||||
AmbientOcclusionBlurredMode,
|
||||
VelocityMode,
|
||||
CustomMode, // Needs to stay last
|
||||
|
||||
NumModes,
|
||||
|
|
|
@ -31,6 +31,10 @@ void DeferredFrameTransform::update(RenderArgs* args) {
|
|||
|
||||
//_parametersBuffer.edit<Parameters>()._ditheringInfo.y += 0.25f;
|
||||
|
||||
// Move the current view transform to prev
|
||||
frameTransformBuffer.prevInvView = frameTransformBuffer.invView;
|
||||
frameTransformBuffer.prevView = frameTransformBuffer.view;
|
||||
|
||||
Transform cameraTransform;
|
||||
args->getViewFrustum().evalViewTransform(cameraTransform);
|
||||
cameraTransform.getMatrix(frameTransformBuffer.invView);
|
||||
|
|
|
@ -52,6 +52,11 @@ protected:
|
|||
// View matrix from world space to eye space (mono)
|
||||
glm::mat4 view;
|
||||
|
||||
// Previous Frame Inv View matrix from eye space (mono) to world space
|
||||
glm::mat4 prevInvView;
|
||||
// Previous Frame View matrix from world space to eye space (mono)
|
||||
glm::mat4 prevView;
|
||||
|
||||
FrameTransform() {}
|
||||
};
|
||||
UniformBufferView _frameTransformBuffer;
|
||||
|
|
|
@ -31,6 +31,8 @@ struct DeferredFrameTransform {
|
|||
mat4 _projectionMono;
|
||||
mat4 _viewInverse;
|
||||
mat4 _view;
|
||||
mat4 _prevViewInverse;
|
||||
mat4 _prevView;
|
||||
};
|
||||
|
||||
uniform deferredFrameTransformBuffer {
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "DeferredFramebuffer.h"
|
||||
#include "DeferredLightingEffect.h"
|
||||
#include "SurfaceGeometryPass.h"
|
||||
#include "VelocityBufferPass.h"
|
||||
#include "FramebufferCache.h"
|
||||
#include "TextureCache.h"
|
||||
#include "ZoneRenderer.h"
|
||||
|
@ -130,7 +131,11 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
|
|||
const auto ambientOcclusionFramebuffer = ambientOcclusionOutputs.getN<AmbientOcclusionEffect::Outputs>(0);
|
||||
const auto ambientOcclusionUniforms = ambientOcclusionOutputs.getN<AmbientOcclusionEffect::Outputs>(1);
|
||||
|
||||
|
||||
// Velocity
|
||||
const auto velocityBufferInputs = VelocityBufferPass::Inputs(deferredFrameTransform, deferredFramebuffer).asVarying();
|
||||
const auto velocityBufferOutputs = task.addJob<VelocityBufferPass>("VelocityBuffer", velocityBufferInputs);
|
||||
const auto velocityBuffer = velocityBufferOutputs.getN<VelocityBufferPass::Outputs>(0);
|
||||
|
||||
// Draw Lights just add the lights to the current list of lights to deal with. NOt really gpu job for now.
|
||||
task.addJob<DrawLight>("DrawLight", lights);
|
||||
|
||||
|
@ -149,6 +154,7 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
|
|||
|
||||
task.addJob<RenderDeferred>("RenderDeferred", deferredLightingInputs);
|
||||
|
||||
|
||||
// Similar to light stage, background stage has been filled by several potential render items and resolved for the frame in this job
|
||||
task.addJob<DrawBackgroundStage>("DrawBackgroundDeferred", lightingModel);
|
||||
|
||||
|
@ -191,7 +197,7 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
|
|||
// Debugging stages
|
||||
{
|
||||
// Debugging Deferred buffer job
|
||||
const auto debugFramebuffers = render::Varying(DebugDeferredBuffer::Inputs(deferredFramebuffer, linearDepthTarget, surfaceGeometryFramebuffer, ambientOcclusionFramebuffer));
|
||||
const auto debugFramebuffers = render::Varying(DebugDeferredBuffer::Inputs(deferredFramebuffer, linearDepthTarget, surfaceGeometryFramebuffer, ambientOcclusionFramebuffer, velocityBuffer));
|
||||
task.addJob<DebugDeferredBuffer>("DebugDeferredBuffer", debugFramebuffers);
|
||||
|
||||
const auto debugSubsurfaceScatteringInputs = DebugSubsurfaceScattering::Inputs(deferredFrameTransform, deferredFramebuffer, lightingModel,
|
||||
|
@ -217,7 +223,7 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
|
|||
|
||||
task.addJob<DebugZoneLighting>("DrawZoneStack", deferredFrameTransform);
|
||||
}
|
||||
|
||||
|
||||
|
||||
// AA job to be revisited
|
||||
task.addJob<Antialiasing>("Antialiasing", primaryFramebuffer);
|
||||
|
|
174
libraries/render-utils/src/VelocityBufferPass.cpp
Normal file
174
libraries/render-utils/src/VelocityBufferPass.cpp
Normal file
|
@ -0,0 +1,174 @@
|
|||
//
|
||||
// VelocityBufferPass.cpp
|
||||
// libraries/render-utils/src/
|
||||
//
|
||||
// Created by Sam Gateau 8/15/2017.
|
||||
// Copyright 2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "VelocityBufferPass.h"
|
||||
|
||||
#include <limits>
|
||||
|
||||
#include <gpu/Context.h>
|
||||
#include <gpu/StandardShaderLib.h>
|
||||
#include "StencilMaskPass.h"
|
||||
|
||||
const int VelocityBufferPass_FrameTransformSlot = 0;
|
||||
const int VelocityBufferPass_DepthMapSlot = 0;
|
||||
const int VelocityBufferPass_NormalMapSlot = 1;
|
||||
|
||||
|
||||
#include "velocityBuffer_cameraMotion_frag.h"
|
||||
|
||||
VelocityFramebuffer::VelocityFramebuffer() {
|
||||
}
|
||||
|
||||
|
||||
void VelocityFramebuffer::updatePrimaryDepth(const gpu::TexturePointer& depthBuffer) {
|
||||
//If the depth buffer or size changed, we need to delete our FBOs
|
||||
bool reset = false;
|
||||
if ((_primaryDepthTexture != depthBuffer)) {
|
||||
_primaryDepthTexture = depthBuffer;
|
||||
reset = true;
|
||||
}
|
||||
if (_primaryDepthTexture) {
|
||||
auto newFrameSize = glm::ivec2(_primaryDepthTexture->getDimensions());
|
||||
if (_frameSize != newFrameSize) {
|
||||
_frameSize = newFrameSize;
|
||||
_halfFrameSize = newFrameSize >> 1;
|
||||
|
||||
reset = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (reset) {
|
||||
clear();
|
||||
}
|
||||
}
|
||||
|
||||
void VelocityFramebuffer::clear() {
|
||||
_velocityFramebuffer.reset();
|
||||
_velocityTexture.reset();
|
||||
}
|
||||
|
||||
void VelocityFramebuffer::allocate() {
|
||||
|
||||
auto width = _frameSize.x;
|
||||
auto height = _frameSize.y;
|
||||
|
||||
// For Velocity Buffer:
|
||||
_velocityTexture = gpu::Texture::createRenderBuffer(gpu::Element(gpu::VEC2, gpu::HALF, gpu::RGB), width, height, gpu::Texture::SINGLE_MIP,
|
||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT));
|
||||
_velocityFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("velocity"));
|
||||
_velocityFramebuffer->setRenderBuffer(0, _velocityTexture);
|
||||
_velocityFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, _primaryDepthTexture->getTexelFormat());
|
||||
}
|
||||
|
||||
gpu::FramebufferPointer VelocityFramebuffer::getVelocityFramebuffer() {
|
||||
if (!_velocityFramebuffer) {
|
||||
allocate();
|
||||
}
|
||||
return _velocityFramebuffer;
|
||||
}
|
||||
|
||||
gpu::TexturePointer VelocityFramebuffer::getVelocityTexture() {
|
||||
if (!_velocityTexture) {
|
||||
allocate();
|
||||
}
|
||||
return _velocityTexture;
|
||||
}
|
||||
|
||||
VelocityBufferPass::VelocityBufferPass() {
|
||||
}
|
||||
|
||||
void VelocityBufferPass::configure(const Config& config) {
|
||||
}
|
||||
|
||||
void VelocityBufferPass::run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs) {
|
||||
assert(renderContext->args);
|
||||
assert(renderContext->args->hasViewFrustum());
|
||||
|
||||
RenderArgs* args = renderContext->args;
|
||||
|
||||
const auto& frameTransform = inputs.get0();
|
||||
const auto& deferredFramebuffer = inputs.get1();
|
||||
|
||||
if (!_gpuTimer) {
|
||||
_gpuTimer = std::make_shared < gpu::RangeTimer>(__FUNCTION__);
|
||||
}
|
||||
|
||||
if (!_velocityFramebuffer) {
|
||||
_velocityFramebuffer = std::make_shared<VelocityFramebuffer>();
|
||||
}
|
||||
_velocityFramebuffer->updatePrimaryDepth(deferredFramebuffer->getPrimaryDepthTexture());
|
||||
|
||||
auto depthBuffer = deferredFramebuffer->getPrimaryDepthTexture();
|
||||
|
||||
auto velocityFBO = _velocityFramebuffer->getVelocityFramebuffer();
|
||||
auto velocityTexture = _velocityFramebuffer->getVelocityTexture();
|
||||
|
||||
outputs.edit0() = _velocityFramebuffer;
|
||||
outputs.edit1() = velocityFBO;
|
||||
outputs.edit2() = velocityTexture;
|
||||
|
||||
auto cameraMotionPipeline = getCameraMotionPipeline();
|
||||
|
||||
auto fullViewport = args->_viewport;
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
_gpuTimer->begin(batch);
|
||||
batch.enableStereo(false);
|
||||
|
||||
batch.setViewportTransform(fullViewport);
|
||||
batch.setProjectionTransform(glm::mat4());
|
||||
batch.resetViewTransform();
|
||||
batch.setModelTransform(gpu::Framebuffer::evalSubregionTexcoordTransform(_velocityFramebuffer->getDepthFrameSize(), fullViewport));
|
||||
|
||||
batch.setUniformBuffer(VelocityBufferPass_FrameTransformSlot, frameTransform->getFrameTransformBuffer());
|
||||
|
||||
// Velocity buffer camera motion
|
||||
batch.setFramebuffer(velocityFBO);
|
||||
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, glm::vec4(0.0f, 0.0f, 0.0f, 0.0f));
|
||||
batch.setPipeline(cameraMotionPipeline);
|
||||
batch.setResourceTexture(VelocityBufferPass_DepthMapSlot, depthBuffer);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
|
||||
_gpuTimer->end(batch);
|
||||
});
|
||||
|
||||
auto config = std::static_pointer_cast<Config>(renderContext->jobConfig);
|
||||
config->setGPUBatchRunTime(_gpuTimer->getGPUAverage(), _gpuTimer->getBatchAverage());
|
||||
}
|
||||
|
||||
|
||||
const gpu::PipelinePointer& VelocityBufferPass::getCameraMotionPipeline() {
|
||||
if (!_cameraMotionPipeline) {
|
||||
auto vs = gpu::StandardShaderLib::getDrawViewportQuadTransformTexcoordVS();
|
||||
auto ps = gpu::Shader::createPixel(std::string(velocityBuffer_cameraMotion_frag));
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("deferredFrameTransformBuffer"), VelocityBufferPass_FrameTransformSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("depthMap"), VelocityBufferPass_DepthMapSlot));
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
// Stencil test the curvature pass for objects pixels only, not the background
|
||||
PrepareStencil::testShape(*state);
|
||||
|
||||
state->setColorWriteMask(true, true, false, false);
|
||||
|
||||
// Good to go add the brand new pipeline
|
||||
_cameraMotionPipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
return _cameraMotionPipeline;
|
||||
}
|
||||
|
||||
|
||||
|
89
libraries/render-utils/src/VelocityBufferPass.h
Normal file
89
libraries/render-utils/src/VelocityBufferPass.h
Normal file
|
@ -0,0 +1,89 @@
|
|||
//
|
||||
// VelocityBufferPass.h
|
||||
// libraries/render-utils/src/
|
||||
//
|
||||
// Created by Sam Gateau 8/15/2017.
|
||||
// Copyright 2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_VelocityBufferPass_h
|
||||
#define hifi_VelocityBufferPass_h
|
||||
|
||||
#include "SurfaceGeometryPass.h"
|
||||
|
||||
|
||||
// VelocityFramebuffer is a helper class gathering in one place theframebuffers and targets describing the surface geometry linear depth
|
||||
// from a z buffer
|
||||
class VelocityFramebuffer {
|
||||
public:
|
||||
VelocityFramebuffer();
|
||||
|
||||
gpu::FramebufferPointer getVelocityFramebuffer();
|
||||
gpu::TexturePointer getVelocityTexture();
|
||||
|
||||
// Update the depth buffer which will drive the allocation of all the other resources according to its size.
|
||||
void updatePrimaryDepth(const gpu::TexturePointer& depthBuffer);
|
||||
|
||||
gpu::TexturePointer getPrimaryDepthTexture();
|
||||
const glm::ivec2& getDepthFrameSize() const { return _frameSize; }
|
||||
|
||||
void setResolutionLevel(int level);
|
||||
int getResolutionLevel() const { return _resolutionLevel; }
|
||||
|
||||
protected:
|
||||
void clear();
|
||||
void allocate();
|
||||
|
||||
gpu::TexturePointer _primaryDepthTexture;
|
||||
|
||||
gpu::FramebufferPointer _velocityFramebuffer;
|
||||
gpu::TexturePointer _velocityTexture;
|
||||
|
||||
glm::ivec2 _frameSize;
|
||||
glm::ivec2 _halfFrameSize;
|
||||
int _resolutionLevel{ 0 };
|
||||
};
|
||||
|
||||
using VelocityFramebufferPointer = std::shared_ptr<VelocityFramebuffer>;
|
||||
|
||||
class VelocityBufferPassConfig : public render::GPUJobConfig {
|
||||
Q_OBJECT
|
||||
Q_PROPERTY(float depthThreshold MEMBER depthThreshold NOTIFY dirty)
|
||||
|
||||
public:
|
||||
VelocityBufferPassConfig() : render::GPUJobConfig(true) {}
|
||||
|
||||
float depthThreshold{ 5.0f };
|
||||
|
||||
signals:
|
||||
void dirty();
|
||||
};
|
||||
|
||||
class VelocityBufferPass {
|
||||
public:
|
||||
using Inputs = render::VaryingSet2<DeferredFrameTransformPointer, DeferredFramebufferPointer>;
|
||||
using Outputs = render::VaryingSet3<VelocityFramebufferPointer, gpu::FramebufferPointer, gpu::TexturePointer>;
|
||||
using Config = VelocityBufferPassConfig;
|
||||
using JobModel = render::Job::ModelIO<VelocityBufferPass, Inputs, Outputs, Config>;
|
||||
|
||||
VelocityBufferPass();
|
||||
|
||||
void configure(const Config& config);
|
||||
void run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs);
|
||||
|
||||
private:
|
||||
typedef gpu::BufferView UniformBufferView;
|
||||
|
||||
VelocityFramebufferPointer _velocityFramebuffer;
|
||||
|
||||
const gpu::PipelinePointer& getCameraMotionPipeline();
|
||||
gpu::PipelinePointer _cameraMotionPipeline;
|
||||
|
||||
gpu::RangeTimerPointer _gpuTimer;
|
||||
};
|
||||
|
||||
|
||||
#endif // hifi_VelocityBufferPass_h
|
|
@ -23,6 +23,7 @@ uniform sampler2D halfNormalMap;
|
|||
uniform sampler2D occlusionMap;
|
||||
uniform sampler2D occlusionBlurredMap;
|
||||
uniform sampler2D scatteringMap;
|
||||
uniform sampler2D velocityMap;
|
||||
|
||||
<$declareDeferredCurvature()$>
|
||||
|
||||
|
|
44
libraries/render-utils/src/velocityBuffer_cameraMotion.slf
Normal file
44
libraries/render-utils/src/velocityBuffer_cameraMotion.slf
Normal file
|
@ -0,0 +1,44 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// Created by Sam Gateau on 6/3/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include DeferredTransform.slh@>
|
||||
<$declareDeferredFrameTransform()$>
|
||||
|
||||
in vec2 varTexCoord0;
|
||||
out vec4 outFragColor;
|
||||
|
||||
uniform sampler2D depthMap;
|
||||
|
||||
|
||||
void main(void) {
|
||||
// Pixel being shaded
|
||||
ivec2 pixelPos;
|
||||
vec2 texcoordPos;
|
||||
ivec4 stereoSide;
|
||||
ivec2 framePixelPos = getPixelPosTexcoordPosAndSide(gl_FragCoord.xy, pixelPos, texcoordPos, stereoSide);
|
||||
|
||||
float Zdb = texelFetch(depthMap, ivec2(gl_FragCoord.xy), 0).x;
|
||||
float Zeye = -evalZeyeFromZdb(Zdb);
|
||||
if (Zeye <= -getPosLinearDepthFar()) {
|
||||
outFragColor = vec4(0.0, 0.0, 0.0, 0.0);
|
||||
return;
|
||||
}
|
||||
|
||||
// The position of the pixel fragment in Eye space then in world space
|
||||
vec3 eyePos = evalEyePositionFromZeye(stereoSide.x, Zeye, texcoordPos);
|
||||
vec3 worldPos = (frameTransform._viewInverse * vec4(eyePos, 1.0)).xyz;
|
||||
|
||||
vec3 prevEyePos = (frameTransform._prevView * vec4(worldPos, 1.0)).xyz;
|
||||
vec4 prevClipPos = (frameTransform._projection[stereoSide.x] * vec4(prevEyePos, 1.0));
|
||||
vec2 prevUV = prevClipPos.xy / prevClipPos.w;
|
||||
|
||||
outFragColor = vec4(prevUV, 0.0, 0.0);
|
||||
}
|
|
@ -157,6 +157,7 @@ Column {
|
|||
ListElement { text: "Debug Scattering"; color: "White" }
|
||||
ListElement { text: "Ambient Occlusion"; color: "White" }
|
||||
ListElement { text: "Ambient Occlusion Blurred"; color: "White" }
|
||||
ListElement { text: "Velocity"; color: "White" }
|
||||
ListElement { text: "Custom"; color: "White" }
|
||||
}
|
||||
width: 200
|
||||
|
|
Loading…
Reference in a new issue