Intoducing the bluring as a separate Job reusable and the first version of curvature

This commit is contained in:
samcake 2016-06-07 18:13:40 -07:00
parent 8d90570f72
commit 75a5f6bd89
20 changed files with 606 additions and 114 deletions

View file

@ -14,11 +14,16 @@
#include <QObject>
#include <QOpenGLDebugLogger>
void OpenGLDebug::log(const QOpenGLDebugMessage & debugMessage) {
qDebug() << debugMessage;
}
void setupDebugLogger(QObject* window) {
QOpenGLDebugLogger* logger = new QOpenGLDebugLogger(window);
logger->initialize(); // initializes in the current context, i.e. ctx
logger->enableMessages();
QObject::connect(logger, &QOpenGLDebugLogger::messageLogged, window, [&](const QOpenGLDebugMessage & debugMessage) {
qDebug() << debugMessage;
OpenGLDebug::log(debugMessage);
});
}

View file

@ -13,7 +13,13 @@
#define hifi_QOpenGLDebugLoggerWrapper_h
class QObject;
class QOpenGLDebugMessage;
void setupDebugLogger(QObject* window);
class OpenGLDebug {
public:
static void log(const QOpenGLDebugMessage & debugMessage);
};
#endif // hifi_QOpenGLDebugLoggerWrapper_h

View file

@ -141,7 +141,8 @@ static const std::string DEFAULT_PYRAMID_DEPTH_SHADER {
static const std::string DEFAULT_CURVATURE_SHADER{
"vec4 getFragmentColor() {"
" return vec4(texture(curvatureMap, uv).xyz, 1.0);"
// " return vec4(pow(vec3(texture(curvatureMap, uv).a), vec3(1.0 / 2.2)), 1.0);"
" return vec4(pow(vec3(texture(curvatureMap, uv).xyz), vec3(1.0 / 2.2)), 1.0);"
//" return vec4(vec3(1.0 - textureLod(pyramidMap, uv, 3).x * 0.01), 1.0);"
" }"
};

View file

@ -23,15 +23,18 @@ void DeferredFrameTransform::update(RenderArgs* args) {
// Update the depth info with near and far (same for stereo)
auto nearZ = args->getViewFrustum().getNearClip();
auto farZ = args->getViewFrustum().getFarClip();
_frameTransformBuffer.edit<FrameTransform>().depthInfo = glm::vec4(nearZ*farZ, farZ - nearZ, -farZ, 0.0f);
_frameTransformBuffer.edit<FrameTransform>().pixelInfo = args->_viewport;
auto& frameTransformBuffer = _frameTransformBuffer.edit<FrameTransform>();
frameTransformBuffer.depthInfo = glm::vec4(nearZ*farZ, farZ - nearZ, -farZ, 0.0f);
frameTransformBuffer.pixelInfo = args->_viewport;
//_parametersBuffer.edit<Parameters>()._ditheringInfo.y += 0.25f;
Transform cameraTransform;
args->getViewFrustum().evalViewTransform(cameraTransform);
cameraTransform.getMatrix(_frameTransformBuffer.edit<FrameTransform>().invView);
cameraTransform.getMatrix(frameTransformBuffer.invView);
cameraTransform.getInverseMatrix(frameTransformBuffer.view);
// Running in stero ?
bool isStereo = args->_context->isStereo();
@ -39,10 +42,9 @@ void DeferredFrameTransform::update(RenderArgs* args) {
// Eval the mono projection
mat4 monoProjMat;
args->getViewFrustum().evalProjectionMatrix(monoProjMat);
_frameTransformBuffer.edit<FrameTransform>().projection[0] = monoProjMat;
_frameTransformBuffer.edit<FrameTransform>().stereoInfo = glm::vec4(0.0f, (float)args->_viewport.z, 0.0f, 0.0f);
_frameTransformBuffer.edit<FrameTransform>().invpixelInfo = glm::vec4(1.0f / args->_viewport.z, 1.0f / args->_viewport.w, 0.0f, 0.0f);
frameTransformBuffer.projection[0] = monoProjMat;
frameTransformBuffer.stereoInfo = glm::vec4(0.0f, (float)args->_viewport.z, 0.0f, 0.0f);
frameTransformBuffer.invpixelInfo = glm::vec4(1.0f / args->_viewport.z, 1.0f / args->_viewport.w, 0.0f, 0.0f);
} else {
mat4 projMats[2];
@ -53,11 +55,11 @@ void DeferredFrameTransform::update(RenderArgs* args) {
for (int i = 0; i < 2; i++) {
// Compose the mono Eye space to Stereo clip space Projection Matrix
auto sideViewMat = projMats[i] * eyeViews[i];
_frameTransformBuffer.edit<FrameTransform>().projection[i] = sideViewMat;
frameTransformBuffer.projection[i] = sideViewMat;
}
_frameTransformBuffer.edit<FrameTransform>().stereoInfo = glm::vec4(1.0f, (float)(args->_viewport.z >> 1), 0.0f, 1.0f);
_frameTransformBuffer.edit<FrameTransform>().invpixelInfo = glm::vec4(1.0f / (float)(args->_viewport.z >> 1), 1.0f / args->_viewport.w, 0.0f, 0.0f);
frameTransformBuffer.stereoInfo = glm::vec4(1.0f, (float)(args->_viewport.z >> 1), 0.0f, 1.0f);
frameTransformBuffer.invpixelInfo = glm::vec4(1.0f / (float)(args->_viewport.z >> 1), 1.0f / args->_viewport.w, 0.0f, 0.0f);
}
}

View file

@ -47,6 +47,8 @@ protected:
glm::mat4 projection[2];
// Inv View matrix from eye space (mono) to world space
glm::mat4 invView;
// View matrix from world space to eye space (mono)
glm::mat4 view;
FrameTransform() {}
};

View file

@ -20,6 +20,7 @@ struct DeferredFrameTransform {
vec4 _stereoInfo;
mat4 _projection[2];
mat4 _viewInverse;
mat4 _view;
};
uniform deferredFrameTransformBuffer {
@ -45,6 +46,14 @@ mat4 getProjection(int side) {
return frameTransform._projection[side];
}
mat4 getViewInverse() {
return frameTransform._viewInverse;
}
mat4 getView() {
return frameTransform._view;
}
bool isStereo() {
return frameTransform._stereoInfo.x > 0.0f;
}
@ -53,9 +62,9 @@ float getStereoSideWidth(int resolutionLevel) {
return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);
}
ivec3 getStereoSideInfo(int xPos, int resolutionLevel) {
ivec4 getStereoSideInfo(int xPos, int resolutionLevel) {
int sideWidth = int(getStereoSideWidth(resolutionLevel));
return ivec3(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth);
return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());
}
float evalZeyeFromZdb(float depth) {
@ -75,7 +84,7 @@ vec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {
return vec3(Xe, Ye, Zeye);
}
ivec2 getPixelPosNclipPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 nclipPos, out ivec3 stereoSide) {
ivec2 getPixelPosNclipPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 nclipPos, out ivec4 stereoSide) {
ivec2 fragPos = ivec2(glFragCoord.xy);
stereoSide = getStereoSideInfo(fragPos.x, 0);

View file

@ -47,6 +47,8 @@ void FramebufferCache::setFrameBufferSize(QSize frameBufferSize) {
_lightingFramebuffer.reset();
_depthPyramidFramebuffer.reset();
_depthPyramidTexture.reset();
_curvatureFramebuffer.reset();
_curvatureTexture.reset();
_occlusionFramebuffer.reset();
_occlusionTexture.reset();
_occlusionBlurredFramebuffer.reset();
@ -109,15 +111,12 @@ void FramebufferCache::createPrimaryFramebuffer() {
_depthPyramidFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
_depthPyramidFramebuffer->setRenderBuffer(0, _depthPyramidTexture);
_depthPyramidFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
_curvatureTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
_curvatureFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
_curvatureFramebuffer->setRenderBuffer(0, _curvatureTexture);
_curvatureFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
resizeAmbientOcclusionBuffers();
}
@ -266,7 +265,6 @@ gpu::TexturePointer FramebufferCache::getCurvatureTexture() {
return _curvatureTexture;
}
void FramebufferCache::setAmbientOcclusionResolutionLevel(int level) {
const int MAX_AO_RESOLUTION_LEVEL = 4;
level = std::max(0, std::min(level, MAX_AO_RESOLUTION_LEVEL));

View file

@ -23,6 +23,7 @@
#include <render/DrawTask.h>
#include <render/DrawStatus.h>
#include <render/DrawSceneOctree.h>
#include <render/BlurTask.h>
#include "DebugDeferredBuffer.h"
#include "DeferredLightingEffect.h"
@ -109,7 +110,9 @@ RenderDeferredTask::RenderDeferredTask(CullFunctor cullFunctor) {
addJob<DrawBackgroundDeferred>("DrawBackgroundDeferred", background);
// Opaque all rendered, generate surface geometry buffers
addJob<SurfaceGeometryPass>("SurfaceGeometry", deferredFrameTransform);
const auto curvatureFramebuffer = addJob<SurfaceGeometryPass>("SurfaceGeometry", deferredFrameTransform);
addJob<render::BlurGaussian>("DiffuseCurvature", curvatureFramebuffer);
// AO job
addJob<AmbientOcclusionEffect>("AmbientOcclusion");

View file

@ -13,50 +13,4 @@
<$declareDeferredFrameTransform()$>
uniform sampler2D depthMap;
out vec4 outFragColor;
void main(void) {
// Fetch normal and depth of current pixel
float4 samplePos = sourceTexture.SampleLevel(pointSampler, input.texUV, 0.0f);
float4 sampleNormal = depthTexture.SampleLevel(pointSampler, input.texUV, 0.0f);
// Calculate the width scale.
float distanceToProjectionWindow = 1.0f / tan(0.5f * radians(fov));
float scale = distanceToProjectionWindow / sampleNormal.w;
// Calculate dF/du and dF/dv
float2 du = float2( 1.0f, 0.0f ) * UVfactor.x * screenPixel * scale;
float2 dv = float2( 0.0f, 1.0f ) * UVfactor.x * screenPixel * scale;
float4 dFdu = depthTexture.SampleLevel(linearSampler, input.texUV + du.xy, 0.0f) -
depthTexture.SampleLevel(linearSampler, input.texUV - du.xy, 0.0f);
float4 dFdv = depthTexture.SampleLevel(linearSampler, input.texUV + dv.xy, 0.0f) -
depthTexture.SampleLevel(linearSampler, input.texUV - dv.xy, 0.0f);
dFdu *= step(abs(dFdu.w), 0.1f); dFdv *= step(abs(dFdv.w), 0.1f);
// Calculate ( du/dx, du/dy, du/dz ) and ( dv/dx, dv/dy, dv/dz )
float dist = 1.0f; samplePos.w = 1.0f;
float2 centerOffset = ((input.texUV - 0.5f) * 2.0f);
float4 px = mul( samplePos + float4( dist, 0.0f, 0.0f, 0.0f ), matViewProj );
float4 py = mul( samplePos + float4( 0.0f, dist, 0.0f, 0.0f ), matViewProj );
float4 pz = mul( samplePos + float4( 0.0f, 0.0f, dist, 0.0f ), matViewProj );
#ifdef INVERT_TEXTURE_V
centerOffset.y = -centerOffset.y;
#endif
px.xy = ((px.xy / px.w) - centerOffset) / scale;
py.xy = ((py.xy / py.w) - centerOffset) / scale;
pz.xy = ((pz.xy / pz.w) - centerOffset) / scale;
#ifdef INVERT_TEXTURE_V
px.y = -px.y; py.y = -py.y; pz.y = -pz.y;
#endif
// Calculate dF/dx, dF/dy and dF/dz using chain rule
float4 dFdx = dFdu * px.x + dFdv * px.y;
float4 dFdy = dFdu * py.x + dFdv * py.y;
float4 dFdz = dFdu * pz.x + dFdv * pz.y;
// Calculate the mean curvature
float meanCurvature = ((dFdx.x + dFdy.y + dFdz.z) * 0.33333333333333333f) * 100.0f;
return (float4( sampleNormal.xyz, meanCurvature ) + 1.0f) * 0.5f;
}

View file

@ -24,13 +24,28 @@ const int SurfaceGeometryPass_NormalMapSlot = 1;
#include "surfaceGeometry_makeCurvature_frag.h"
SurfaceGeometryPass::SurfaceGeometryPass() {
Parameters parameters;
_parametersBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(Parameters), (const gpu::Byte*) &parameters));
}
void SurfaceGeometryPass::configure(const Config& config) {
if (config.depthThreshold != getCurvatureDepthThreshold()) {
_parametersBuffer.edit<Parameters>().curvatureInfo.x = config.depthThreshold;
}
if (config.basisScale != getCurvatureBasisScale()) {
_parametersBuffer.edit<Parameters>().curvatureInfo.y = config.basisScale;
}
if (config.curvatureScale != getCurvatureScale()) {
_parametersBuffer.edit<Parameters>().curvatureInfo.w = config.curvatureScale;
}
}
void SurfaceGeometryPass::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const DeferredFrameTransformPointer& frameTransform) {
void SurfaceGeometryPass::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const DeferredFrameTransformPointer& frameTransform, gpu::FramebufferPointer& curvatureFramebuffer) {
assert(renderContext->args);
assert(renderContext->args->hasViewFrustum());
@ -44,6 +59,9 @@ void SurfaceGeometryPass::run(const render::SceneContextPointer& sceneContext, c
auto pyramidTexture = framebufferCache->getDepthPyramidTexture();
auto curvatureFBO = framebufferCache->getCurvatureFramebuffer();
curvatureFramebuffer = curvatureFBO;
auto curvatureTexture = framebufferCache->getCurvatureTexture();
QSize framebufferSize = framebufferCache->getFrameBufferSize();
float sMin = args->_viewport.x / (float)framebufferSize.width();
@ -68,7 +86,8 @@ void SurfaceGeometryPass::run(const render::SceneContextPointer& sceneContext, c
batch.setModelTransform(model);
batch.setUniformBuffer(SurfaceGeometryPass_FrameTransformSlot, frameTransform->getFrameTransformBuffer());
batch.setUniformBuffer(SurfaceGeometryPass_ParamsSlot, _parametersBuffer);
// Pyramid pass
batch.setFramebuffer(pyramidFBO);
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, glm::vec4(args->getViewFrustum().getFarClip(), 0.0f, 0.0f, 0.0f));
@ -76,17 +95,15 @@ void SurfaceGeometryPass::run(const render::SceneContextPointer& sceneContext, c
batch.setResourceTexture(SurfaceGeometryPass_DepthMapSlot, depthBuffer);
batch.draw(gpu::TRIANGLE_STRIP, 4);
// Pyramid pass
// Curvature pass
batch.setFramebuffer(curvatureFBO);
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, glm::vec4(0.0));
batch.setPipeline(curvaturePipeline);
batch.setResourceTexture(SurfaceGeometryPass_DepthMapSlot, pyramidTexture);
batch.setResourceTexture(SurfaceGeometryPass_NormalMapSlot, normalTexture);
batch.draw(gpu::TRIANGLE_STRIP, 4);
batch.setResourceTexture(SurfaceGeometryPass_DepthMapSlot, nullptr);
batch.setResourceTexture(SurfaceGeometryPass_NormalMapSlot, nullptr);
});
}
@ -125,7 +142,7 @@ const gpu::PipelinePointer& SurfaceGeometryPass::getCurvaturePipeline() {
gpu::Shader::BindingSet slotBindings;
slotBindings.insert(gpu::Shader::Binding(std::string("deferredFrameTransformBuffer"), SurfaceGeometryPass_FrameTransformSlot));
slotBindings.insert(gpu::Shader::Binding(std::string("ambientOcclusionParamsBuffer"), SurfaceGeometryPass_ParamsSlot));
slotBindings.insert(gpu::Shader::Binding(std::string("surfaceGeometryParamsBuffer"), SurfaceGeometryPass_ParamsSlot));
slotBindings.insert(gpu::Shader::Binding(std::string("depthMap"), SurfaceGeometryPass_DepthMapSlot));
slotBindings.insert(gpu::Shader::Binding(std::string("normalMap"), SurfaceGeometryPass_NormalMapSlot));
gpu::Shader::makeProgram(*program, slotBindings);

View file

@ -19,10 +19,17 @@
class SurfaceGeometryPassConfig : public render::Job::Config {
Q_OBJECT
Q_PROPERTY(float depthThreshold MEMBER depthThreshold NOTIFY dirty)
Q_PROPERTY(float basisScale MEMBER basisScale NOTIFY dirty)
Q_PROPERTY(float curvatureScale MEMBER curvatureScale NOTIFY dirty)
Q_PROPERTY(double gpuTime READ getGpuTime)
public:
SurfaceGeometryPassConfig() : render::Job::Config(true) {}
float depthThreshold{ 0.1f };
float basisScale{ 1.0f };
float curvatureScale{ 100.0f };
double getGpuTime() { return gpuTime; }
double gpuTime{ 0.0 };
@ -34,13 +41,17 @@ signals:
class SurfaceGeometryPass {
public:
using Config = SurfaceGeometryPassConfig;
using JobModel = render::Job::ModelI<SurfaceGeometryPass, DeferredFrameTransformPointer, Config>;
using JobModel = render::Job::ModelIO<SurfaceGeometryPass, DeferredFrameTransformPointer, gpu::FramebufferPointer, Config>;
SurfaceGeometryPass();
void configure(const Config& config);
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const DeferredFrameTransformPointer& frameTransform);
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const DeferredFrameTransformPointer& frameTransform, gpu::FramebufferPointer& curvatureFramebuffer);
float getCurvatureDepthThreshold() const { return _parametersBuffer.get<Parameters>().curvatureInfo.x; }
float getCurvatureBasisScale() const { return _parametersBuffer.get<Parameters>().curvatureInfo.y; }
float getCurvatureScale() const { return _parametersBuffer.get<Parameters>().curvatureInfo.w; }
private:
typedef gpu::BufferView UniformBufferView;
@ -49,8 +60,8 @@ private:
public:
// Resolution info
glm::vec4 resolutionInfo { -1.0f, 0.0f, 0.0f, 0.0f };
// radius info is { R, R^2, 1 / R^6, ObscuranceScale}
glm::vec4 radiusInfo{ 0.5f, 0.5f * 0.5f, 1.0f / (0.25f * 0.25f * 0.25f), 1.0f };
// Curvature algorithm
glm::vec4 curvatureInfo{ 0.0f };
// Dithering info
glm::vec4 ditheringInfo { 0.0f, 0.0f, 0.01f, 1.0f };
// Sampling info
@ -68,7 +79,6 @@ private:
const gpu::PipelinePointer& getLinearDepthPipeline();
const gpu::PipelinePointer& getCurvaturePipeline();
gpu::PipelinePointer _linearDepthPipeline;
gpu::PipelinePointer _curvaturePipeline;

View file

@ -12,6 +12,37 @@
<@include DeferredTransform.slh@>
<$declareDeferredFrameTransform()$>
struct SurfaceGeometryParams {
// Resolution info
vec4 resolutionInfo;
// Curvature algorithm
vec4 curvatureInfo;
// Dithering info
vec4 ditheringInfo;
// Sampling info
vec4 sampleInfo;
// Blurring info
vec4 blurInfo;
// gaussian distribution coefficients first is the sampling radius (max is 6)
vec4 _gaussianCoefs[2];
};
uniform surfaceGeometryParamsBuffer {
SurfaceGeometryParams params;
};
float getCurvatureDepthThreshold() {
return params.curvatureInfo.x;
}
float getCurvatureBasisScale() {
return params.curvatureInfo.y;
}
float getCurvatureScale() {
return params.curvatureInfo.w;
}
uniform sampler2D linearDepthMap;
float getZEye(ivec2 pixel) {
@ -44,9 +75,18 @@ vec3 unpackNormal(in vec3 p) {
return oct_to_float32x3(unorm8x3_to_snorm12x2(p));
}
vec2 sideToFrameNclip(vec2 side, vec2 nclipPos) {
return vec2((nclipPos.x + side.x) * side.y, nclipPos.y);
}
uniform sampler2D normalMap;
vec3 getRawNormal(vec2 texcoord) {
return texture(normalMap, texcoord).xyz;
}
vec3 getWorldNormal(vec2 texcoord) {
vec3 rawNormal = texture(normalMap, texcoord).xyz;
vec3 rawNormal = getRawNormal(texcoord);
return unpackNormal(rawNormal);
}
@ -70,56 +110,62 @@ void main(void) {
// Pixel being shaded
ivec2 pixelPos;
vec2 nclipPos;
ivec3 stereoSide;
ivec4 stereoSide;
ivec2 framePixelPos = getPixelPosNclipPosAndSide(gl_FragCoord.xy, pixelPos, nclipPos, stereoSide);
vec2 stereoSideClip = vec2(stereoSide.x, (isStereo() ? 0.5 : 1.0));
vec2 frameNclipPos = sideToFrameNclip(stereoSideClip, nclipPos);
// Fetch the z under the pixel (stereo or not)
float Zeye = getZEye(framePixelPos);
vec3 worldNormal = getWorldNormal(frameNclipPos);
// The position of the pixel fragment in Eye space then in world space
vec3 eyePos = evalEyePositionFromZeye(stereoSide.x, Zeye, nclipPos);
vec3 worldPos = (frameTransform._viewInverse * vec4(eyePos, 1.0)).xyz;
vec3 moduloPos = fract(worldPos);
// Calculate the perspective scale.
float perspectiveScale =(-getProjScaleEye() / Zeye);
//outFragColor = vec4(vec3(perspectiveScale * 0.1), 1.0);
outFragColor = vec4(moduloPos, 1.0);
// Calculate the width scale.
// float distanceToProjectionWindow = 1.0f / tan(0.5f * radians(fov));
float scale = -getProjScaleEye() / Zeye;
vec2 viewportScale = scale * getInvWidthHeight();
vec2 viewportScale = perspectiveScale * getInvWidthHeight();
// Calculate dF/du and dF/dv
float threshold = getCurvatureDepthThreshold();
vec2 du = vec2( 1.0f, 0.0f ) * viewportScale.x;
vec2 dv = vec2( 0.0f, 1.0f ) * viewportScale.y;
outFragColor = vec4(du.x, dv.y, scale, 1.0);
vec4 dFdu = vec4(getWorldNormalDiff(nclipPos, du), getEyeDepthDiff(nclipPos, du));
vec4 dFdv = vec4(getWorldNormalDiff(nclipPos, dv), getEyeDepthDiff(nclipPos, dv));
dFdu *= step(abs(dFdu.w), 0.1f); dFdv *= step(abs(dFdv.w), 0.1f);
vec4 dFdu = vec4(getWorldNormalDiff(frameNclipPos, du), getEyeDepthDiff(frameNclipPos, du));
vec4 dFdv = vec4(getWorldNormalDiff(frameNclipPos, dv), getEyeDepthDiff(frameNclipPos, dv));
dFdu *= step(abs(dFdu.w), threshold);
dFdv *= step(abs(dFdv.w), threshold);
outFragColor = vec4(dFdu.xyz, 1.0);
/*
// Calculate ( du/dx, du/dy, du/dz ) and ( dv/dx, dv/dy, dv/dz )
float dist = 1.0f; samplePos.w = 1.0f;
vec2 centerOffset = ((input.texUV - 0.5f) * 2.0f);
vec4 px = mul( samplePos + vec4( dist, 0.0f, 0.0f, 0.0f ), matViewProj );
vec4 py = mul( samplePos + vec4( 0.0f, dist, 0.0f, 0.0f ), matViewProj );
vec4 pz = mul( samplePos + vec4( 0.0f, 0.0f, dist, 0.0f ), matViewProj );
#ifdef INVERT_TEXTURE_V
centerOffset.y = -centerOffset.y;
#endif
px.xy = ((px.xy / px.w) - centerOffset) / scale;
py.xy = ((py.xy / py.w) - centerOffset) / scale;
pz.xy = ((pz.xy / pz.w) - centerOffset) / scale;
#ifdef INVERT_TEXTURE_V
px.y = -px.y; py.y = -py.y; pz.y = -pz.y;
#endif
// Eval px, py, pz world positions of the basis centered on the world pos of the fragment
float dist = getCurvatureBasisScale();
vec4 px = vec4(worldPos, 1.0) + vec4(dist, 0.0f, 0.0f, 0.0f);
vec4 py = vec4(worldPos, 1.0) + vec4(0.0f, dist, 0.0f, 0.0f);
vec4 pz = vec4(worldPos, 1.0) + vec4(0.0f, 0.0f, dist, 0.0f);
// Project px, py pz to homogeneous clip space
mat4 viewProj = getProjection(stereoSide.x) * frameTransform._view;
px = viewProj * px;
py = viewProj * py;
pz = viewProj * pz;
// then to normalized clip space
px.xy /= px.w;
py.xy /= py.w;
pz.xy /= pz.w;
vec2 hclipPos = (nclipPos * 2.0 - 1.0);
px.xy = (px.xy - hclipPos) / perspectiveScale;
py.xy = (py.xy - hclipPos) / perspectiveScale;
pz.xy = (pz.xy - hclipPos) / perspectiveScale;
// Calculate dF/dx, dF/dy and dF/dz using chain rule
vec4 dFdx = dFdu * px.x + dFdv * px.y;
@ -127,7 +173,7 @@ void main(void) {
vec4 dFdz = dFdu * pz.x + dFdv * pz.y;
// Calculate the mean curvature
float meanCurvature = ((dFdx.x + dFdy.y + dFdz.z) * 0.33333333333333333) * 100.0;
outFragColor = vec4( (meanCurvature + 1.0) * 0.5);
*/
float meanCurvature = ((dFdx.x + dFdy.y + dFdz.z) * 0.33333333333333333) * params.curvatureInfo.w;
//outFragColor = vec4(vec3(worldNormal + 1.0) * 0.5, (meanCurvature + 1.0) * 0.5);
outFragColor = vec4((vec3(dFdx.x, dFdy.y, dFdz.z) * params.curvatureInfo.w + 1.0) * 0.5, (meanCurvature + 1.0) * 0.5);
}

View file

@ -0,0 +1,177 @@
//
// BlurTask.cpp
// render/src/render
//
// Created by Sam Gateau on 6/7/16.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "BlurTask.h"
#include <gpu/Context.h>
#include <gpu/StandardShaderLib.h>
#include "blurGaussianV_frag.h"
#include "blurGaussianH_frag.h"
using namespace render;
enum BlurShaderBufferSlots {
BlurTask_ParamsSlot = 0,
};
enum BlurShaderMapSlots {
BlurTask_SourceSlot = 0,
};
const float BLUR_NUM_SAMPLES = 7.0f;
BlurParams::BlurParams() {
Params params;
_parametersBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(Params), (const gpu::Byte*) &params));
}
void BlurParams::setWidthHeight(int width, int height) {
auto resolutionInfo = _parametersBuffer.get<Params>().resolutionInfo;
if (width != resolutionInfo.x || height != resolutionInfo.y) {
_parametersBuffer.edit<Params>().resolutionInfo = glm::vec4((float) width, (float) height, 1.0f / (float) width, 1.0f / (float) height);
}
}
void BlurParams::setFilterRadiusScale(float scale) {
auto filterInfo = _parametersBuffer.get<Params>().filterInfo;
if (scale != filterInfo.x) {
_parametersBuffer.edit<Params>().filterInfo.x = scale;
_parametersBuffer.edit<Params>().filterInfo.y = scale / BLUR_NUM_SAMPLES;
}
}
BlurGaussian::BlurGaussian() {
_parameters = std::make_shared<BlurParams>();
}
gpu::PipelinePointer BlurGaussian::getBlurVPipeline() {
if (!_blurVPipeline) {
auto vs = gpu::StandardShaderLib::getDrawUnitQuadTexcoordVS();
auto ps = gpu::Shader::createPixel(std::string(blurGaussianV_frag));
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
gpu::Shader::BindingSet slotBindings;
slotBindings.insert(gpu::Shader::Binding(std::string("blurParamsBuffer"), BlurTask_ParamsSlot));
slotBindings.insert(gpu::Shader::Binding(std::string("sourceMap"), BlurTask_SourceSlot));
gpu::Shader::makeProgram(*program, slotBindings);
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
// Stencil test the curvature pass for objects pixels only, not the background
state->setStencilTest(true, 0xFF, gpu::State::StencilTest(0, 0xFF, gpu::NOT_EQUAL, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP));
_blurVPipeline = gpu::Pipeline::create(program, state);
}
return _blurVPipeline;
}
gpu::PipelinePointer BlurGaussian::getBlurHPipeline() {
if (!_blurHPipeline) {
auto vs = gpu::StandardShaderLib::getDrawUnitQuadTexcoordVS();
auto ps = gpu::Shader::createPixel(std::string(blurGaussianH_frag));
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
gpu::Shader::BindingSet slotBindings;
slotBindings.insert(gpu::Shader::Binding(std::string("blurParamsBuffer"), BlurTask_ParamsSlot));
slotBindings.insert(gpu::Shader::Binding(std::string("sourceMap"), BlurTask_SourceSlot));
gpu::Shader::makeProgram(*program, slotBindings);
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
// Stencil test the curvature pass for objects pixels only, not the background
state->setStencilTest(true, 0xFF, gpu::State::StencilTest(0, 0xFF, gpu::NOT_EQUAL, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP));
_blurHPipeline = gpu::Pipeline::create(program, state);
}
return _blurHPipeline;
}
bool BlurGaussian::updateBlurringResources(const gpu::FramebufferPointer& sourceFramebuffer, BlurringResources& blurringResources) {
if (!sourceFramebuffer) {
return false;
}
if (!_blurredFramebuffer) {
_blurredFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
// attach depthStencil if present in source
if (sourceFramebuffer->hasDepthStencil()) {
_blurredFramebuffer->setDepthStencilBuffer(sourceFramebuffer->getDepthStencilBuffer(), sourceFramebuffer->getDepthStencilBufferFormat());
}
auto blurringSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT);
auto blurringTarget = gpu::TexturePointer(gpu::Texture::create2D(sourceFramebuffer->getRenderBuffer(0)->getTexelFormat(), sourceFramebuffer->getWidth(), sourceFramebuffer->getHeight(), blurringSampler));
_blurredFramebuffer->setRenderBuffer(0, blurringTarget);
}
else {
// it would be easier to just call resize on the bluredFramebuffer and let it work if needed but the source might loose it's depth buffer when doing so
if ((_blurredFramebuffer->getWidth() != sourceFramebuffer->getWidth()) || (_blurredFramebuffer->getHeight() != sourceFramebuffer->getHeight())) {
_blurredFramebuffer->resize(sourceFramebuffer->getWidth(), sourceFramebuffer->getHeight(), sourceFramebuffer->getNumSamples());
if (sourceFramebuffer->hasDepthStencil()) {
_blurredFramebuffer->setDepthStencilBuffer(sourceFramebuffer->getDepthStencilBuffer(), sourceFramebuffer->getDepthStencilBufferFormat());
}
}
}
blurringResources.sourceTexture = sourceFramebuffer->getRenderBuffer(0);
blurringResources.blurringFramebuffer = _blurredFramebuffer;
blurringResources.blurringTexture = _blurredFramebuffer->getRenderBuffer(0);
blurringResources.finalFramebuffer = sourceFramebuffer;
return true;
}
void BlurGaussian::configure(const Config& config) {
_parameters->setFilterRadiusScale(config.filterScale);
}
void BlurGaussian::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const gpu::FramebufferPointer& sourceFramebuffer) {
assert(renderContext->args);
assert(renderContext->args->hasViewFrustum());
RenderArgs* args = renderContext->args;
BlurringResources blurringResources;
if (!updateBlurringResources(sourceFramebuffer, blurringResources)) {
// early exit if no valid blurring resources
return;
}
auto blurVPipeline = getBlurVPipeline();
auto blurHPipeline = getBlurHPipeline();
_parameters->setWidthHeight(args->_viewport.z, args->_viewport.w);
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
batch.enableStereo(false);
batch.setViewportTransform(args->_viewport);
batch.setUniformBuffer(BlurTask_ParamsSlot, _parameters->_parametersBuffer);
batch.setFramebuffer(blurringResources.blurringFramebuffer);
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, glm::vec4(0.0));
batch.setPipeline(blurVPipeline);
batch.setResourceTexture(BlurTask_SourceSlot, blurringResources.sourceTexture);
batch.draw(gpu::TRIANGLE_STRIP, 4);
batch.setFramebuffer(blurringResources.finalFramebuffer);
batch.setPipeline(blurHPipeline);
batch.setResourceTexture(BlurTask_SourceSlot, blurringResources.blurringTexture);
batch.draw(gpu::TRIANGLE_STRIP, 4);
batch.setResourceTexture(BlurTask_SourceSlot, nullptr);
batch.setUniformBuffer(BlurTask_ParamsSlot, nullptr);
});
}

View file

@ -0,0 +1,91 @@
//
// BlurTask.h
// render/src/render
//
// Created by Sam Gateau on 6/7/16.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_render_BlurTask_h
#define hifi_render_BlurTask_h
#include "Engine.h"
namespace render {
class BlurParams {
public:
void setWidthHeight(int width, int height);
void setFilterRadiusScale(float scale);
// Class describing the uniform buffer with all the parameters common to the blur shaders
class Params {
public:
// Resolution info (width, height, inverse of width, inverse of height)
glm::vec4 resolutionInfo{ 0.0f, 0.0f, 0.0f, 0.0f };
// Filter info (radius scale
glm::vec4 filterInfo{ 1.0f, 0.0f, 0.0f, 0.0f };
Params() {}
};
gpu::BufferView _parametersBuffer;
BlurParams();
};
using BlurParamsPointer = std::shared_ptr<BlurParams>;
class BlurGaussianConfig : public Job::Config {
Q_OBJECT
Q_PROPERTY(bool enabled MEMBER enabled NOTIFY dirty) // expose enabled flag
Q_PROPERTY(float filterScale MEMBER filterScale NOTIFY dirty) // expose enabled flag
public:
float filterScale{ 2.0f };
signals :
void dirty();
protected:
};
class BlurGaussian {
public:
using Config = BlurGaussianConfig;
using JobModel = Job::ModelI<BlurGaussian, gpu::FramebufferPointer, Config>;
BlurGaussian();
void configure(const Config& config);
void run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const gpu::FramebufferPointer& sourceFramebuffer);
protected:
BlurParamsPointer _parameters;
gpu::PipelinePointer _blurVPipeline;
gpu::PipelinePointer _blurHPipeline;
gpu::PipelinePointer getBlurVPipeline();
gpu::PipelinePointer getBlurHPipeline();
gpu::FramebufferPointer _blurredFramebuffer;
struct BlurringResources {
gpu::TexturePointer sourceTexture;
gpu::FramebufferPointer blurringFramebuffer;
gpu::TexturePointer blurringTexture;
gpu::FramebufferPointer finalFramebuffer;
};
bool updateBlurringResources(const gpu::FramebufferPointer& sourceFramebuffer, BlurringResources& blurringResources);
};
}
#endif // hifi_render_DrawTask_h

View file

@ -0,0 +1,65 @@
// Generated on <$_SCRIBE_DATE$>
//
// Created by Sam Gateau on 6/7/16.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@func declareBlurUniforms()@>
#define NUM_TAPS 7
#define NUM_TAPS_OFFSET 3.0f
float uniformFilterWidth = 0.05f;
const float gaussianDistributionCurve[NUM_TAPS] = float[](
0.383f, 0.006f, 0.061f, 0.242f, 0.242f, 0.061f, 0.006f
);
const float gaussianDistributionOffset[NUM_TAPS] = float[](
0.0f, -3.0f, -2.0f, -1.0f, 1.0f, 2.0f, 3.0f
);
struct BlurParameters {
vec4 resolutionInfo;
vec4 filterInfo;
};
uniform blurParamsBuffer {
BlurParameters parameters;
};
vec2 getViewportInvWidthHeight() {
return parameters.resolutionInfo.zw;
}
<@endfunc@>
<@func declareBlurGaussian()@>
<$declareBlurUniforms()$>
uniform sampler2D sourceMap;
vec4 pixelShaderGaussian(vec2 texcoord, vec2 direction, vec2 pixelStep) {
vec4 sampleCenter = texture(sourceMap, texcoord);
vec2 finalStep = parameters.filterInfo.x * direction * pixelStep;
vec4 srcBlurred = vec4(0.0);
for(int i = 0; i < NUM_TAPS; i++) {
// Fetch color and depth for current sample.
vec2 sampleCoord = texcoord + (gaussianDistributionOffset[i] * finalStep);
vec4 srcSample = texture(sourceMap, sampleCoord);
// Accumulate.
srcBlurred += gaussianDistributionCurve[i] * srcSample;
}
return srcBlurred;
}
<@endfunc@>

View file

@ -0,0 +1,23 @@
<@include gpu/Config.slh@>
<$VERSION_HEADER$>
// Generated on <$_SCRIBE_DATE$>
//
// Created by Sam Gateau on 6/7/16.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include BlurTask.slh@>
<$declareBlurGaussian()$>
in vec2 varTexCoord0;
out vec4 outFragColor;
void main(void) {
outFragColor = pixelShaderGaussian(varTexCoord0, vec2(1.0, 0.0), getViewportInvWidthHeight());
}

View file

@ -0,0 +1,22 @@
<@include gpu/Config.slh@>
<$VERSION_HEADER$>
// Generated on <$_SCRIBE_DATE$>
//
// Created by Sam Gateau on 6/7/16.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include BlurTask.slh@>
<$declareBlurGaussian()$>
in vec2 varTexCoord0;
out vec4 outFragColor;
void main(void) {
outFragColor = pixelShaderGaussian(varTexCoord0, vec2(0.0, 1.0), getViewportInvWidthHeight());
}

View file

@ -0,0 +1,20 @@
//
// debugSurfaceGeometryPass.js
//
// Created by Sam Gateau on 6/6/2016
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or https://www.apache.org/licenses/LICENSE-2.0.html
//
// Set up the qml ui
var qml = Script.resolvePath('surfaceGeometryPass.qml');
var window = new OverlayWindow({
title: 'Surface Geometry Pass',
source: qml,
width: 400, height: 400,
});
window.setPosition(250, 500);
window.closed.connect(function() { Script.stop(); });

View file

@ -22,7 +22,15 @@ Column {
debug.config.mode = mode;
}
Label { text: qsTr("Debug Buffer") }
function setLayout(layout) {
debug.config.size = { x: -1, y: -1, z: 1, w: 1 };
}
Button {
text: "Fullscreen"
onClicked: { debug.setLayout(1); }
}
ExclusiveGroup { id: bufferGroup }
Repeater {
model: [

View file

@ -0,0 +1,33 @@
//
// surfaceGeometryPass.qml
//
// Created by Sam Gateau on 6/6/2016
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or https://www.apache.org/licenses/LICENSE-2.0.html
//
import QtQuick 2.5
import QtQuick.Controls 1.4
import "configSlider"
Column {
spacing: 8
Column {
id: surfaceGeometry
Column{
Repeater {
model: [ "Depth Threshold:depthThreshold:1.0", "Basis Scale:basisScale:1.0", "Curvature Scale:curvatureScale:200.0" ]
ConfigSlider {
label: qsTr(modelData.split(":")[0])
integral: false
config: Render.getConfig("SurfaceGeometry")
property: modelData.split(":")[1]
max: modelData.split(":")[2]
min: 0.0
}
}
}
}
}