mirror of
https://github.com/overte-org/overte.git
synced 2025-08-17 04:27:13 +02:00
A lot more stuff to try to see the skin scattering
This commit is contained in:
parent
75a5f6bd89
commit
e69022285e
11 changed files with 588 additions and 25 deletions
|
@ -141,8 +141,8 @@ static const std::string DEFAULT_PYRAMID_DEPTH_SHADER {
|
|||
|
||||
static const std::string DEFAULT_CURVATURE_SHADER{
|
||||
"vec4 getFragmentColor() {"
|
||||
// " return vec4(pow(vec3(texture(curvatureMap, uv).a), vec3(1.0 / 2.2)), 1.0);"
|
||||
" return vec4(pow(vec3(texture(curvatureMap, uv).xyz), vec3(1.0 / 2.2)), 1.0);"
|
||||
" return vec4(pow(vec3(texture(curvatureMap, uv).a), vec3(1.0 / 2.2)), 1.0);"
|
||||
// " return vec4(pow(vec3(texture(curvatureMap, uv).xyz), vec3(1.0 / 2.2)), 1.0);"
|
||||
//" return vec4(vec3(1.0 - textureLod(pyramidMap, uv, 3).x * 0.01), 1.0);"
|
||||
" }"
|
||||
};
|
||||
|
|
|
@ -84,7 +84,7 @@ vec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {
|
|||
return vec3(Xe, Ye, Zeye);
|
||||
}
|
||||
|
||||
ivec2 getPixelPosNclipPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 nclipPos, out ivec4 stereoSide) {
|
||||
ivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {
|
||||
ivec2 fragPos = ivec2(glFragCoord.xy);
|
||||
|
||||
stereoSide = getStereoSideInfo(fragPos.x, 0);
|
||||
|
@ -92,7 +92,7 @@ ivec2 getPixelPosNclipPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out ve
|
|||
pixelPos = fragPos;
|
||||
pixelPos.x -= stereoSide.y;
|
||||
|
||||
nclipPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();
|
||||
texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();
|
||||
|
||||
return fragPos;
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include "AmbientOcclusionEffect.h"
|
||||
#include "AntialiasingEffect.h"
|
||||
#include "ToneMappingEffect.h"
|
||||
#include "SubsurfaceScattering.h"
|
||||
|
||||
using namespace render;
|
||||
|
||||
|
@ -114,6 +115,7 @@ RenderDeferredTask::RenderDeferredTask(CullFunctor cullFunctor) {
|
|||
|
||||
addJob<render::BlurGaussian>("DiffuseCurvature", curvatureFramebuffer);
|
||||
|
||||
|
||||
// AO job
|
||||
addJob<AmbientOcclusionEffect>("AmbientOcclusion");
|
||||
|
||||
|
@ -136,6 +138,7 @@ RenderDeferredTask::RenderDeferredTask(CullFunctor cullFunctor) {
|
|||
addJob<DrawOverlay3D>("DrawOverlay3DOpaque", overlayOpaques, true);
|
||||
addJob<DrawOverlay3D>("DrawOverlay3DTransparent", overlayTransparents, false);
|
||||
|
||||
addJob<SubsurfaceScattering>("Scattering", deferredFrameTransform);
|
||||
|
||||
// Debugging stages
|
||||
{
|
||||
|
|
257
libraries/render-utils/src/SubsurfaceScattering.cpp
Normal file
257
libraries/render-utils/src/SubsurfaceScattering.cpp
Normal file
|
@ -0,0 +1,257 @@
|
|||
//
|
||||
// SubsurfaceScattering.cpp
|
||||
// libraries/render-utils/src/
|
||||
//
|
||||
// Created by Sam Gateau 6/3/2016.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "SubsurfaceScattering.h"
|
||||
|
||||
#include <gpu/Context.h>
|
||||
#include <gpu/StandardShaderLib.h>
|
||||
|
||||
#include "FramebufferCache.h"
|
||||
|
||||
const int SubsurfaceScattering_FrameTransformSlot = 0;
|
||||
const int SubsurfaceScattering_ParamsSlot = 1;
|
||||
const int SubsurfaceScattering_DepthMapSlot = 0;
|
||||
const int SubsurfaceScattering_NormalMapSlot = 1;
|
||||
|
||||
SubsurfaceScattering::SubsurfaceScattering() {
|
||||
Parameters parameters;
|
||||
_parametersBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(Parameters), (const gpu::Byte*) ¶meters));
|
||||
}
|
||||
|
||||
void SubsurfaceScattering::configure(const Config& config) {
|
||||
|
||||
if (config.depthThreshold != getCurvatureDepthThreshold()) {
|
||||
_parametersBuffer.edit<Parameters>().curvatureInfo.x = config.depthThreshold;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
gpu::PipelinePointer SubsurfaceScattering::getScatteringPipeline() {
|
||||
if (!_scatteringPipeline) {
|
||||
auto vs = gpu::StandardShaderLib::getDrawUnitQuadTexcoordVS();
|
||||
auto ps = gpu::StandardShaderLib::getDrawTextureOpaquePS();
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
// slotBindings.insert(gpu::Shader::Binding(std::string("blurParamsBuffer"), BlurTask_ParamsSlot));
|
||||
// slotBindings.insert(gpu::Shader::Binding(std::string("sourceMap"), BlurTask_SourceSlot));
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
// Stencil test the curvature pass for objects pixels only, not the background
|
||||
// state->setStencilTest(true, 0xFF, gpu::State::StencilTest(0, 0xFF, gpu::NOT_EQUAL, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP));
|
||||
|
||||
_scatteringPipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
return _scatteringPipeline;
|
||||
}
|
||||
|
||||
|
||||
void SubsurfaceScattering::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const DeferredFrameTransformPointer& frameTransform, gpu::FramebufferPointer& curvatureFramebuffer) {
|
||||
assert(renderContext->args);
|
||||
assert(renderContext->args->hasViewFrustum());
|
||||
|
||||
RenderArgs* args = renderContext->args;
|
||||
|
||||
if (!_scatteringTable) {
|
||||
_scatteringTable = SubsurfaceScattering::generatePreIntegratedScattering();
|
||||
}
|
||||
|
||||
auto& pipeline = getScatteringPipeline();
|
||||
|
||||
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
|
||||
batch.setViewportTransform(args->_viewport >> 1);
|
||||
/* batch.setProjectionTransform(glm::mat4());
|
||||
batch.setViewTransform(Transform());
|
||||
|
||||
Transform model;
|
||||
model.setTranslation(glm::vec3(sMin, tMin, 0.0f));
|
||||
model.setScale(glm::vec3(sWidth, tHeight, 1.0f));
|
||||
batch.setModelTransform(model);*/
|
||||
|
||||
batch.setPipeline(pipeline);
|
||||
batch.setResourceTexture(0, _scatteringTable);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
// Reference: http://www.altdevblogaday.com/2011/12/31/skin-shading-in-unity3d/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cmath>
|
||||
#include <algorithm>
|
||||
|
||||
#define _PI 3.14159265358979523846
|
||||
#define HEIGHT 512
|
||||
#define WIDTH 512
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
||||
double gaussian(float v, float r) {
|
||||
double g = (1.0 / sqrt(2.0 * _PI * v)) * exp(-(r*r) / (2.0 * v));
|
||||
return g;
|
||||
}
|
||||
|
||||
vec3 scatter(double r) {
|
||||
// Values from GPU Gems 3 "Advanced Skin Rendering".
|
||||
// Originally taken from real life samples.
|
||||
static const double profile[][4] = {
|
||||
{ 0.0064, 0.233, 0.455, 0.649 },
|
||||
{ 0.0484, 0.100, 0.336, 0.344 },
|
||||
{ 0.1870, 0.118, 0.198, 0.000 },
|
||||
{ 0.5670, 0.113, 0.007, 0.007 },
|
||||
{ 1.9900, 0.358, 0.004, 0.000 },
|
||||
{ 7.4100, 0.078, 0.000, 0.000 }
|
||||
};
|
||||
static const int profileNum = 6;
|
||||
vec3 ret; ret.x = 0.0; ret.y = 0.0; ret.z = 0.0;
|
||||
for (int i = 0; i < profileNum; i++) {
|
||||
double g = gaussian(profile[i][0] * 1.414f, r);
|
||||
ret.x += g * profile[i][1];
|
||||
ret.y += g * profile[i][2];
|
||||
ret.z += g * profile[i][3];
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
vec3 integrate(double cosTheta, double skinRadius) {
|
||||
// Angle from lighting direction.
|
||||
double theta = acos(cosTheta);
|
||||
vec3 totalWeights; totalWeights.x = 0.0; totalWeights.y = 0.0; totalWeights.z = 0.0;
|
||||
vec3 totalLight; totalLight.x = 0.0; totalLight.y = 0.0; totalLight.z = 0.0;
|
||||
vec3 skinColour; skinColour.x = 1.0; skinColour.y = 1.0; skinColour.z = 1.0;
|
||||
|
||||
double a = -(_PI);
|
||||
|
||||
//const double inc = 0.001;
|
||||
const double inc = 0.01;
|
||||
|
||||
while (a <= (_PI)) {
|
||||
double sampleAngle = theta + a;
|
||||
double diffuse = cos(sampleAngle);
|
||||
if (diffuse < 0.0) diffuse = 0.0;
|
||||
if (diffuse > 1.0) diffuse = 1.0;
|
||||
|
||||
// Distance.
|
||||
double sampleDist = abs(2.0 * skinRadius * sin(a * 0.5));
|
||||
|
||||
// Profile Weight.
|
||||
vec3 weights = scatter(sampleDist);
|
||||
|
||||
totalWeights.x += weights.x;
|
||||
totalWeights.y += weights.y;
|
||||
totalWeights.z += weights.z;
|
||||
totalLight.x += diffuse * weights.x * (skinColour.x * skinColour.x);
|
||||
totalLight.y += diffuse * weights.y * (skinColour.y * skinColour.y);
|
||||
totalLight.z += diffuse * weights.z * (skinColour.z * skinColour.z);
|
||||
a += inc;
|
||||
}
|
||||
|
||||
vec3 result;
|
||||
result.x = totalLight.x / totalWeights.x;
|
||||
result.y = totalLight.y / totalWeights.y;
|
||||
result.z = totalLight.z / totalWeights.z;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void diffuseScatter(gpu::TexturePointer& lut) {
|
||||
for (int j = 0; j < HEIGHT; j++) {
|
||||
for (int i = 0; i < WIDTH; i++) {
|
||||
// Lookup by: x: NDotL y: 1 / r
|
||||
float y = 2.0 * 1.0 / ((j + 1.0) / (double)HEIGHT);
|
||||
float x = ((i / (double)WIDTH) * 2.0) - 1.0;
|
||||
vec3 val = integrate(x, y);
|
||||
|
||||
// Convert to linear
|
||||
val.x = sqrt(val.x);
|
||||
val.y = sqrt(val.y);
|
||||
val.z = sqrt(val.z);
|
||||
|
||||
// Convert to 24-bit image.
|
||||
unsigned char valI[3];
|
||||
if (val.x > 1.0) val.x = 1.0;
|
||||
if (val.y > 1.0) val.y = 1.0;
|
||||
if (val.z > 1.0) val.z = 1.0;
|
||||
valI[0] = (unsigned char)(val.x * 256.0);
|
||||
valI[1] = (unsigned char)(val.y * 256.0);
|
||||
valI[2] = (unsigned char)(val.z * 256.0);
|
||||
//printf("%u %u %u\n", valI[0], valI[1], valI[2]);
|
||||
|
||||
// Write to file.
|
||||
// fwrite(valI, sizeof(unsigned char), 3, output_file);
|
||||
}
|
||||
|
||||
printf("%.2lf%% Done...\n", (j / (float)HEIGHT) * 100.0f);
|
||||
}
|
||||
}
|
||||
|
||||
void diffuseProfile(gpu::TexturePointer& profile) {
|
||||
std::vector<unsigned char> bytes(3 * HEIGHT * WIDTH);
|
||||
int size = sizeof(unsigned char) * bytes.size();
|
||||
|
||||
int index = 0;
|
||||
for (int j = 0; j < HEIGHT; j++) {
|
||||
for (int i = 0; i < WIDTH; i++) {
|
||||
float y = (double)(j + 1.0) / (double)HEIGHT;
|
||||
vec3 val = scatter(y * 2.0f);
|
||||
|
||||
// Convert to 24-bit image.
|
||||
unsigned char valI[3];
|
||||
if (val.x > 1.0) val.x = 1.0;
|
||||
if (val.y > 1.0) val.y = 1.0;
|
||||
if (val.z > 1.0) val.z = 1.0;
|
||||
valI[0] = (unsigned char)(val.x * 255.0);
|
||||
valI[1] = (unsigned char)(val.y * 255.0);
|
||||
valI[2] = (unsigned char)(val.z * 255.0);
|
||||
|
||||
bytes[3 * index] = valI[0];
|
||||
bytes[3 * index + 1] = valI[1];
|
||||
bytes[3 * index + 2] = valI[2];
|
||||
|
||||
// Write to file.
|
||||
// fwrite(valI, sizeof(unsigned char), 3, output_file);
|
||||
|
||||
index++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
profile->assignStoredMip(0, profile->getTexelFormat(), size, bytes.data());
|
||||
}
|
||||
|
||||
/*int main() {
|
||||
diffuseScatter();
|
||||
//diffuseProfile();
|
||||
}*/
|
||||
|
||||
|
||||
|
||||
gpu::TexturePointer SubsurfaceScattering::generatePreIntegratedScattering() {
|
||||
auto scatteringLUT = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, WIDTH, HEIGHT));
|
||||
//diffuseScatter(scatteringLUT);
|
||||
diffuseProfile(scatteringLUT);
|
||||
return scatteringLUT;
|
||||
}
|
||||
|
71
libraries/render-utils/src/SubsurfaceScattering.h
Normal file
71
libraries/render-utils/src/SubsurfaceScattering.h
Normal file
|
@ -0,0 +1,71 @@
|
|||
//
|
||||
// SubsurfaceScattering.h
|
||||
// libraries/render-utils/src/
|
||||
//
|
||||
// Created by Sam Gateau 6/3/2016.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_SubsurfaceScattering_h
|
||||
#define hifi_SubsurfaceScattering_h
|
||||
|
||||
#include <DependencyManager.h>
|
||||
|
||||
#include "render/DrawTask.h"
|
||||
#include "DeferredFrameTransform.h"
|
||||
|
||||
class SubsurfaceScatteringConfig : public render::Job::Config {
|
||||
Q_OBJECT
|
||||
Q_PROPERTY(float depthThreshold MEMBER depthThreshold NOTIFY dirty)
|
||||
public:
|
||||
SubsurfaceScatteringConfig() : render::Job::Config(true) {}
|
||||
|
||||
float depthThreshold{ 0.1f };
|
||||
|
||||
signals:
|
||||
void dirty();
|
||||
};
|
||||
|
||||
class SubsurfaceScattering {
|
||||
public:
|
||||
using Config = SubsurfaceScatteringConfig;
|
||||
using JobModel = render::Job::ModelIO<SubsurfaceScattering, DeferredFrameTransformPointer, gpu::FramebufferPointer, Config>;
|
||||
|
||||
SubsurfaceScattering();
|
||||
|
||||
void configure(const Config& config);
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const DeferredFrameTransformPointer& frameTransform, gpu::FramebufferPointer& curvatureFramebuffer);
|
||||
|
||||
float getCurvatureDepthThreshold() const { return _parametersBuffer.get<Parameters>().curvatureInfo.x; }
|
||||
|
||||
|
||||
static gpu::TexturePointer generatePreIntegratedScattering();
|
||||
|
||||
private:
|
||||
typedef gpu::BufferView UniformBufferView;
|
||||
|
||||
// Class describing the uniform buffer with all the parameters common to the AO shaders
|
||||
class Parameters {
|
||||
public:
|
||||
// Resolution info
|
||||
glm::vec4 resolutionInfo { -1.0f, 0.0f, 0.0f, 0.0f };
|
||||
// Curvature algorithm
|
||||
glm::vec4 curvatureInfo{ 0.0f };
|
||||
|
||||
Parameters() {}
|
||||
};
|
||||
gpu::BufferView _parametersBuffer;
|
||||
|
||||
|
||||
gpu::TexturePointer _scatteringTable;
|
||||
|
||||
|
||||
gpu::PipelinePointer _scatteringPipeline;
|
||||
|
||||
gpu::PipelinePointer getScatteringPipeline();
|
||||
};
|
||||
|
||||
#endif // hifi_SubsurfaceScattering_h
|
|
@ -75,8 +75,8 @@ vec3 unpackNormal(in vec3 p) {
|
|||
return oct_to_float32x3(unorm8x3_to_snorm12x2(p));
|
||||
}
|
||||
|
||||
vec2 sideToFrameNclip(vec2 side, vec2 nclipPos) {
|
||||
return vec2((nclipPos.x + side.x) * side.y, nclipPos.y);
|
||||
vec2 sideToFrameTexcoord(vec2 side, vec2 texcoordPos) {
|
||||
return vec2((texcoordPos.x + side.x) * side.y, texcoordPos.y);
|
||||
}
|
||||
|
||||
uniform sampler2D normalMap;
|
||||
|
@ -109,24 +109,24 @@ out vec4 outFragColor;
|
|||
void main(void) {
|
||||
// Pixel being shaded
|
||||
ivec2 pixelPos;
|
||||
vec2 nclipPos;
|
||||
vec2 texcoordPos;
|
||||
ivec4 stereoSide;
|
||||
ivec2 framePixelPos = getPixelPosNclipPosAndSide(gl_FragCoord.xy, pixelPos, nclipPos, stereoSide);
|
||||
ivec2 framePixelPos = getPixelPosTexcoordPosAndSide(gl_FragCoord.xy, pixelPos, texcoordPos, stereoSide);
|
||||
vec2 stereoSideClip = vec2(stereoSide.x, (isStereo() ? 0.5 : 1.0));
|
||||
vec2 frameNclipPos = sideToFrameNclip(stereoSideClip, nclipPos);
|
||||
vec2 frameTexcoordPos = sideToFrameTexcoord(stereoSideClip, texcoordPos);
|
||||
|
||||
// Fetch the z under the pixel (stereo or not)
|
||||
float Zeye = getZEye(framePixelPos);
|
||||
|
||||
vec3 worldNormal = getWorldNormal(frameNclipPos);
|
||||
vec3 worldNormal = getWorldNormal(frameTexcoordPos);
|
||||
|
||||
// The position of the pixel fragment in Eye space then in world space
|
||||
vec3 eyePos = evalEyePositionFromZeye(stereoSide.x, Zeye, nclipPos);
|
||||
vec3 eyePos = evalEyePositionFromZeye(stereoSide.x, Zeye, texcoordPos);
|
||||
vec3 worldPos = (frameTransform._viewInverse * vec4(eyePos, 1.0)).xyz;
|
||||
|
||||
// Calculate the perspective scale.
|
||||
float perspectiveScale =(-getProjScaleEye() / Zeye);
|
||||
//outFragColor = vec4(vec3(perspectiveScale * 0.1), 1.0);
|
||||
float perspectiveScale = (-getProjScaleEye() / Zeye);
|
||||
float pixPerspectiveScaleInv = 1.0 / (perspectiveScale);
|
||||
|
||||
vec2 viewportScale = perspectiveScale * getInvWidthHeight();
|
||||
|
||||
|
@ -135,12 +135,12 @@ void main(void) {
|
|||
vec2 du = vec2( 1.0f, 0.0f ) * viewportScale.x;
|
||||
vec2 dv = vec2( 0.0f, 1.0f ) * viewportScale.y;
|
||||
|
||||
vec4 dFdu = vec4(getWorldNormalDiff(frameNclipPos, du), getEyeDepthDiff(frameNclipPos, du));
|
||||
vec4 dFdv = vec4(getWorldNormalDiff(frameNclipPos, dv), getEyeDepthDiff(frameNclipPos, dv));
|
||||
vec4 dFdu = vec4(getWorldNormalDiff(frameTexcoordPos, du), getEyeDepthDiff(frameTexcoordPos, du));
|
||||
vec4 dFdv = vec4(getWorldNormalDiff(frameTexcoordPos, dv), getEyeDepthDiff(frameTexcoordPos, dv));
|
||||
dFdu *= step(abs(dFdu.w), threshold);
|
||||
dFdv *= step(abs(dFdv.w), threshold);
|
||||
|
||||
outFragColor = vec4(dFdu.xyz, 1.0);
|
||||
// outFragColor = vec4(dFdu.xyz, 1.0);
|
||||
|
||||
// Calculate ( du/dx, du/dy, du/dz ) and ( dv/dx, dv/dy, dv/dz )
|
||||
|
||||
|
@ -161,11 +161,11 @@ void main(void) {
|
|||
py.xy /= py.w;
|
||||
pz.xy /= pz.w;
|
||||
|
||||
vec2 hclipPos = (nclipPos * 2.0 - 1.0);
|
||||
vec2 nclipPos = (texcoordPos - 0.5) * 2.0;
|
||||
|
||||
px.xy = (px.xy - hclipPos) / perspectiveScale;
|
||||
py.xy = (py.xy - hclipPos) / perspectiveScale;
|
||||
pz.xy = (pz.xy - hclipPos) / perspectiveScale;
|
||||
px.xy = (px.xy - nclipPos) * pixPerspectiveScaleInv;
|
||||
py.xy = (py.xy - nclipPos) * pixPerspectiveScaleInv;
|
||||
pz.xy = (pz.xy - nclipPos) * pixPerspectiveScaleInv;
|
||||
|
||||
// Calculate dF/dx, dF/dy and dF/dz using chain rule
|
||||
vec4 dFdx = dFdu * px.x + dFdv * px.y;
|
||||
|
@ -174,6 +174,6 @@ void main(void) {
|
|||
|
||||
// Calculate the mean curvature
|
||||
float meanCurvature = ((dFdx.x + dFdy.y + dFdz.z) * 0.33333333333333333) * params.curvatureInfo.w;
|
||||
//outFragColor = vec4(vec3(worldNormal + 1.0) * 0.5, (meanCurvature + 1.0) * 0.5);
|
||||
outFragColor = vec4((vec3(dFdx.x, dFdy.y, dFdz.z) * params.curvatureInfo.w + 1.0) * 0.5, (meanCurvature + 1.0) * 0.5);
|
||||
outFragColor = vec4(vec3(worldNormal + 1.0) * 0.5, (meanCurvature + 1.0) * 0.5);
|
||||
// outFragColor = vec4((vec3(dFdx.x, dFdy.y, dFdz.z) * params.curvatureInfo.w + 1.0) * 0.5, (meanCurvature + 1.0) * 0.5);
|
||||
}
|
||||
|
|
|
@ -16,6 +16,9 @@
|
|||
#include "blurGaussianV_frag.h"
|
||||
#include "blurGaussianH_frag.h"
|
||||
|
||||
#include "blurGaussianDepthAwareV_frag.h"
|
||||
#include "blurGaussianDepthAwareH_frag.h"
|
||||
|
||||
using namespace render;
|
||||
|
||||
enum BlurShaderBufferSlots {
|
||||
|
@ -32,11 +35,18 @@ BlurParams::BlurParams() {
|
|||
_parametersBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(Params), (const gpu::Byte*) ¶ms));
|
||||
}
|
||||
|
||||
void BlurParams::setWidthHeight(int width, int height) {
|
||||
void BlurParams::setWidthHeight(int width, int height, bool isStereo) {
|
||||
auto resolutionInfo = _parametersBuffer.get<Params>().resolutionInfo;
|
||||
bool resChanged = false;
|
||||
if (width != resolutionInfo.x || height != resolutionInfo.y) {
|
||||
resChanged = true;
|
||||
_parametersBuffer.edit<Params>().resolutionInfo = glm::vec4((float) width, (float) height, 1.0f / (float) width, 1.0f / (float) height);
|
||||
}
|
||||
|
||||
auto stereoInfo = _parametersBuffer.get<Params>().stereoInfo;
|
||||
if (isStereo || resChanged) {
|
||||
_parametersBuffer.edit<Params>().stereoInfo = glm::vec4((float)width, (float)height, 1.0f / (float)width, 1.0f / (float)height);
|
||||
}
|
||||
}
|
||||
|
||||
void BlurParams::setFilterRadiusScale(float scale) {
|
||||
|
@ -150,7 +160,7 @@ void BlurGaussian::run(const SceneContextPointer& sceneContext, const RenderCont
|
|||
auto blurVPipeline = getBlurVPipeline();
|
||||
auto blurHPipeline = getBlurHPipeline();
|
||||
|
||||
_parameters->setWidthHeight(args->_viewport.z, args->_viewport.w);
|
||||
_parameters->setWidthHeight(args->_viewport.z, args->_viewport.w, args->_context->isStereo());
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
|
@ -175,3 +185,133 @@ void BlurGaussian::run(const SceneContextPointer& sceneContext, const RenderCont
|
|||
});
|
||||
}
|
||||
|
||||
|
||||
|
||||
BlurGaussianDepthAware::BlurGaussianDepthAware() {
|
||||
_parameters = std::make_shared<BlurParams>();
|
||||
}
|
||||
|
||||
gpu::PipelinePointer BlurGaussianDepthAware::getBlurVPipeline() {
|
||||
if (!_blurVPipeline) {
|
||||
auto vs = gpu::StandardShaderLib::getDrawUnitQuadTexcoordVS();
|
||||
auto ps = gpu::Shader::createPixel(std::string(blurGaussianDepthAwareV_frag));
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("blurParamsBuffer"), BlurTask_ParamsSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("sourceMap"), BlurTask_SourceSlot));
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
// Stencil test the curvature pass for objects pixels only, not the background
|
||||
state->setStencilTest(true, 0xFF, gpu::State::StencilTest(0, 0xFF, gpu::NOT_EQUAL, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP));
|
||||
|
||||
_blurVPipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
return _blurVPipeline;
|
||||
}
|
||||
|
||||
gpu::PipelinePointer BlurGaussianDepthAware::getBlurHPipeline() {
|
||||
if (!_blurHPipeline) {
|
||||
auto vs = gpu::StandardShaderLib::getDrawUnitQuadTexcoordVS();
|
||||
auto ps = gpu::Shader::createPixel(std::string(blurGaussianDepthAwareH_frag));
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("blurParamsBuffer"), BlurTask_ParamsSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("sourceMap"), BlurTask_SourceSlot));
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
// Stencil test the curvature pass for objects pixels only, not the background
|
||||
state->setStencilTest(true, 0xFF, gpu::State::StencilTest(0, 0xFF, gpu::NOT_EQUAL, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP));
|
||||
|
||||
_blurHPipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
return _blurHPipeline;
|
||||
}
|
||||
|
||||
bool BlurGaussianDepthAware::updateBlurringResources(const gpu::FramebufferPointer& sourceFramebuffer, BlurringResources& blurringResources) {
|
||||
if (!sourceFramebuffer) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!_blurredFramebuffer) {
|
||||
_blurredFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
|
||||
// attach depthStencil if present in source
|
||||
if (sourceFramebuffer->hasDepthStencil()) {
|
||||
_blurredFramebuffer->setDepthStencilBuffer(sourceFramebuffer->getDepthStencilBuffer(), sourceFramebuffer->getDepthStencilBufferFormat());
|
||||
}
|
||||
auto blurringSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT);
|
||||
auto blurringTarget = gpu::TexturePointer(gpu::Texture::create2D(sourceFramebuffer->getRenderBuffer(0)->getTexelFormat(), sourceFramebuffer->getWidth(), sourceFramebuffer->getHeight(), blurringSampler));
|
||||
_blurredFramebuffer->setRenderBuffer(0, blurringTarget);
|
||||
} else {
|
||||
// it would be easier to just call resize on the bluredFramebuffer and let it work if needed but the source might loose it's depth buffer when doing so
|
||||
if ((_blurredFramebuffer->getWidth() != sourceFramebuffer->getWidth()) || (_blurredFramebuffer->getHeight() != sourceFramebuffer->getHeight())) {
|
||||
_blurredFramebuffer->resize(sourceFramebuffer->getWidth(), sourceFramebuffer->getHeight(), sourceFramebuffer->getNumSamples());
|
||||
if (sourceFramebuffer->hasDepthStencil()) {
|
||||
_blurredFramebuffer->setDepthStencilBuffer(sourceFramebuffer->getDepthStencilBuffer(), sourceFramebuffer->getDepthStencilBufferFormat());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
blurringResources.sourceTexture = sourceFramebuffer->getRenderBuffer(0);
|
||||
blurringResources.blurringFramebuffer = _blurredFramebuffer;
|
||||
blurringResources.blurringTexture = _blurredFramebuffer->getRenderBuffer(0);
|
||||
blurringResources.finalFramebuffer = sourceFramebuffer;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void BlurGaussianDepthAware::configure(const Config& config) {
|
||||
_parameters->setFilterRadiusScale(config.filterScale);
|
||||
}
|
||||
|
||||
|
||||
void BlurGaussianDepthAware::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const InputPair& SourceAndDepth) {
|
||||
assert(renderContext->args);
|
||||
assert(renderContext->args->hasViewFrustum());
|
||||
|
||||
RenderArgs* args = renderContext->args;
|
||||
|
||||
auto& sourceFramebuffer = SourceAndDepth.first.template get<gpu::FramebufferPointer>();
|
||||
auto& depthTexture = SourceAndDepth.first.template get<gpu::TexturePointer>();
|
||||
|
||||
BlurringResources blurringResources;
|
||||
if (!updateBlurringResources(sourceFramebuffer, blurringResources)) {
|
||||
// early exit if no valid blurring resources
|
||||
return;
|
||||
}
|
||||
|
||||
auto blurVPipeline = getBlurVPipeline();
|
||||
auto blurHPipeline = getBlurHPipeline();
|
||||
|
||||
_parameters->setWidthHeight(args->_viewport.z, args->_viewport.w, args->_context->isStereo());
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
|
||||
batch.setUniformBuffer(BlurTask_ParamsSlot, _parameters->_parametersBuffer);
|
||||
|
||||
batch.setFramebuffer(blurringResources.blurringFramebuffer);
|
||||
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, glm::vec4(0.0));
|
||||
|
||||
batch.setPipeline(blurVPipeline);
|
||||
batch.setResourceTexture(BlurTask_SourceSlot, blurringResources.sourceTexture);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
|
||||
batch.setFramebuffer(blurringResources.finalFramebuffer);
|
||||
batch.setPipeline(blurHPipeline);
|
||||
batch.setResourceTexture(BlurTask_SourceSlot, blurringResources.blurringTexture);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
|
||||
batch.setResourceTexture(BlurTask_SourceSlot, nullptr);
|
||||
batch.setUniformBuffer(BlurTask_ParamsSlot, nullptr);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ namespace render {
|
|||
class BlurParams {
|
||||
public:
|
||||
|
||||
void setWidthHeight(int width, int height);
|
||||
void setWidthHeight(int width, int height, bool isStereo);
|
||||
|
||||
void setFilterRadiusScale(float scale);
|
||||
|
||||
|
@ -33,6 +33,9 @@ public:
|
|||
// Filter info (radius scale
|
||||
glm::vec4 filterInfo{ 1.0f, 0.0f, 0.0f, 0.0f };
|
||||
|
||||
// stereo info if blurring a stereo render
|
||||
glm::vec4 stereoInfo{ 0.0f };
|
||||
|
||||
Params() {}
|
||||
};
|
||||
gpu::BufferView _parametersBuffer;
|
||||
|
@ -86,6 +89,48 @@ protected:
|
|||
bool updateBlurringResources(const gpu::FramebufferPointer& sourceFramebuffer, BlurringResources& blurringResources);
|
||||
};
|
||||
|
||||
|
||||
template < class T0, class T1 >
|
||||
class VaryingPair : public std::pair<Varying, Varying> {
|
||||
public:
|
||||
using Parent = std::pair<Varying, Varying>;
|
||||
|
||||
VaryingPair() : Parent(Varying(T0()), T1()) {}
|
||||
};
|
||||
|
||||
class BlurGaussianDepthAware {
|
||||
public:
|
||||
using InputPair = VaryingPair<gpu::FramebufferPointer, gpu::TexturePointer>;
|
||||
using Config = BlurGaussianConfig;
|
||||
using JobModel = Job::ModelI<BlurGaussianDepthAware, InputPair, Config>;
|
||||
|
||||
BlurGaussianDepthAware();
|
||||
|
||||
void configure(const Config& config);
|
||||
void run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const InputPair& SourceAndDepth);
|
||||
|
||||
protected:
|
||||
|
||||
BlurParamsPointer _parameters;
|
||||
|
||||
gpu::PipelinePointer _blurVPipeline;
|
||||
gpu::PipelinePointer _blurHPipeline;
|
||||
|
||||
gpu::PipelinePointer getBlurVPipeline();
|
||||
gpu::PipelinePointer getBlurHPipeline();
|
||||
|
||||
gpu::FramebufferPointer _blurredFramebuffer;
|
||||
|
||||
struct BlurringResources {
|
||||
gpu::TexturePointer sourceTexture;
|
||||
gpu::FramebufferPointer blurringFramebuffer;
|
||||
gpu::TexturePointer blurringTexture;
|
||||
gpu::FramebufferPointer finalFramebuffer;
|
||||
};
|
||||
bool updateBlurringResources(const gpu::FramebufferPointer& sourceFramebuffer, BlurringResources& blurringResources);
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
||||
#endif // hifi_render_DrawTask_h
|
||||
|
|
|
@ -24,6 +24,7 @@ const float gaussianDistributionOffset[NUM_TAPS] = float[](
|
|||
struct BlurParameters {
|
||||
vec4 resolutionInfo;
|
||||
vec4 filterInfo;
|
||||
vec4 stereoInfo;
|
||||
};
|
||||
|
||||
uniform blurParamsBuffer {
|
||||
|
|
23
libraries/render/src/render/blurGaussianDepthAwareH.slf
Normal file
23
libraries/render/src/render/blurGaussianDepthAwareH.slf
Normal file
|
@ -0,0 +1,23 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// Created by Sam Gateau on 6/7/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include BlurTask.slh@>
|
||||
<$declareBlurGaussian()$>
|
||||
|
||||
|
||||
in vec2 varTexCoord0;
|
||||
|
||||
out vec4 outFragColor;
|
||||
|
||||
void main(void) {
|
||||
outFragColor = pixelShaderGaussianDepthAware(varTexCoord0, vec2(1.0, 0.0), getViewportInvWidthHeight());
|
||||
}
|
||||
|
23
libraries/render/src/render/blurGaussianDepthAwareV.slf
Normal file
23
libraries/render/src/render/blurGaussianDepthAwareV.slf
Normal file
|
@ -0,0 +1,23 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// Created by Sam Gateau on 6/7/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include BlurTask.slh@>
|
||||
<$declareBlurGaussian()$>
|
||||
|
||||
|
||||
in vec2 varTexCoord0;
|
||||
|
||||
out vec4 outFragColor;
|
||||
|
||||
void main(void) {
|
||||
outFragColor = pixelShaderGaussianDepthAware(varTexCoord0, vec2(0.0, 1.0), getViewportInvWidthHeight());
|
||||
}
|
||||
|
Loading…
Reference in a new issue