diff --git a/libraries/render-utils/src/DebugDeferredBuffer.cpp b/libraries/render-utils/src/DebugDeferredBuffer.cpp
index 0e007b8314..076f4a3880 100644
--- a/libraries/render-utils/src/DebugDeferredBuffer.cpp
+++ b/libraries/render-utils/src/DebugDeferredBuffer.cpp
@@ -157,10 +157,26 @@ static const std::string DEFAULT_NORMAL_CURVATURE_SHADER{
     " }"
 };
 
+static const std::string DEFAULT_DIFFUSED_CURVATURE_SHADER{
+    "vec4 getFragmentColor() {"
+    "    return vec4(pow(vec3(texture(diffusedCurvatureMap, uv).a), vec3(1.0 / 2.2)), 1.0);"
+    // "    return vec4(pow(vec3(texture(curvatureMap, uv).xyz), vec3(1.0 / 2.2)), 1.0);"
+    //"    return vec4(vec3(1.0 - textureLod(pyramidMap, uv, 3).x * 0.01), 1.0);"
+    " }"
+};
+
+static const std::string DEFAULT_DIFFUSED_NORMAL_CURVATURE_SHADER{
+    "vec4 getFragmentColor() {"
+    //"    return vec4(pow(vec3(texture(curvatureMap, uv).a), vec3(1.0 / 2.2)), 1.0);"
+    "    return vec4(pow(vec3(texture(diffusedCurvatureMap, uv).xyz), vec3(1.0 / 2.2)), 1.0);"
+    //"    return vec4(vec3(1.0 - textureLod(pyramidMap, uv, 3).x * 0.01), 1.0);"
+    " }"
+};
+
 static const std::string DEFAULT_SCATTERING_SHADER{
     "vec4 getFragmentColor() {"
-  //  "    return vec4(pow(vec3(texture(scatteringMap, uv).xyz), vec3(1.0 / 2.2)), 1.0);"
-    "    return vec4(vec3(texture(scatteringMap, uv).xyz), 1.0);"
+    "    return vec4(pow(vec3(texture(scatteringMap, uv).xyz), vec3(1.0 / 2.2)), 1.0);"
+  //  "    return vec4(vec3(texture(scatteringMap, uv).xyz), 1.0);"
     " }"
 };
 
@@ -233,6 +249,10 @@ std::string DebugDeferredBuffer::getShaderSourceCode(Mode mode, std::string cust
             return DEFAULT_CURVATURE_SHADER;
         case NormalCurvatureMode:
             return DEFAULT_NORMAL_CURVATURE_SHADER;
+        case DiffusedCurvatureMode:
+            return DEFAULT_DIFFUSED_CURVATURE_SHADER;
+        case DiffusedNormalCurvatureMode:
+            return DEFAULT_DIFFUSED_NORMAL_CURVATURE_SHADER;
         case ScatteringMode:
             return DEFAULT_SCATTERING_SHADER;
         case AmbientOcclusionMode:
@@ -317,12 +337,13 @@ void DebugDeferredBuffer::configure(const Config& config) {
     _size = config.size;
 }
 
-void DebugDeferredBuffer::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const gpu::FramebufferPointer&  inputBuffer) {
+void DebugDeferredBuffer::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const Inputs& inputs) {
     assert(renderContext->args);
     assert(renderContext->args->hasViewFrustum());
     RenderArgs* args = renderContext->args;
 
-    auto& scatteringFramebuffer = inputBuffer;
+    auto& diffusedCurvatureFramebuffer = inputs.getFirst();
+    auto& scatteringFramebuffer = inputs.getSecond();
 
     gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
         batch.enableStereo(false);
@@ -354,7 +375,7 @@ void DebugDeferredBuffer::run(const SceneContextPointer& sceneContext, const Ren
         batch.setResourceTexture(Shadow, lightStage.lights[0]->shadow.framebuffer->getDepthStencilBuffer());
         batch.setResourceTexture(Pyramid, framebufferCache->getDepthPyramidTexture());
         batch.setResourceTexture(Curvature, framebufferCache->getCurvatureTexture());
-        //batch.setResourceTexture(DiffusedCurvature, diffusedCurvatureBuffer);
+        batch.setResourceTexture(DiffusedCurvature, diffusedCurvatureFramebuffer->getRenderBuffer(0));
         batch.setResourceTexture(Scattering, scatteringFramebuffer->getRenderBuffer(0));
         if (DependencyManager::get<DeferredLightingEffect>()->isAmbientOcclusionEnabled()) {
             batch.setResourceTexture(AmbientOcclusion, framebufferCache->getOcclusionTexture());
diff --git a/libraries/render-utils/src/DebugDeferredBuffer.h b/libraries/render-utils/src/DebugDeferredBuffer.h
index fc99cae82c..095e0ab9cc 100644
--- a/libraries/render-utils/src/DebugDeferredBuffer.h
+++ b/libraries/render-utils/src/DebugDeferredBuffer.h
@@ -34,13 +34,14 @@ signals:
 
 class DebugDeferredBuffer {
 public:
+    using Inputs = render::VaryingPair<gpu::FramebufferPointer, gpu::FramebufferPointer>;
     using Config = DebugDeferredBufferConfig;
-    using JobModel = render::Job::ModelI<DebugDeferredBuffer, gpu::FramebufferPointer, Config>;
+    using JobModel = render::Job::ModelI<DebugDeferredBuffer, Inputs, Config>;
     
     DebugDeferredBuffer();
 
     void configure(const Config& config);
-    void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const gpu::FramebufferPointer& inputBuffer);
+    void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const Inputs& inputs);
     
 protected:
     friend class DebugDeferredBufferConfig;
@@ -61,6 +62,8 @@ protected:
         PyramidDepthMode,
         CurvatureMode,
         NormalCurvatureMode,
+        DiffusedCurvatureMode,
+        DiffusedNormalCurvatureMode,
         ScatteringMode,
         AmbientOcclusionMode,
         AmbientOcclusionBlurredMode,
diff --git a/libraries/render-utils/src/RenderDeferredTask.cpp b/libraries/render-utils/src/RenderDeferredTask.cpp
index b4b6a979da..dcdf45e0c9 100755
--- a/libraries/render-utils/src/RenderDeferredTask.cpp
+++ b/libraries/render-utils/src/RenderDeferredTask.cpp
@@ -113,20 +113,22 @@ RenderDeferredTask::RenderDeferredTask(CullFunctor cullFunctor) {
     // Opaque all rendered, generate surface geometry buffers
     const auto curvatureFramebufferAndDepth = addJob<SurfaceGeometryPass>("SurfaceGeometry", deferredFrameTransform);
 
-    addJob<render::BlurGaussianDepthAware>("DiffuseCurvature", curvatureFramebufferAndDepth);
-
+#define SIMPLE_BLUR 1
+#if SIMPLE_BLUR
+    const auto curvatureFramebuffer = addJob<render::BlurGaussian>("DiffuseCurvature", curvatureFramebufferAndDepth.get<SurfaceGeometryPass::Outputs>().first);
+    const auto diffusedCurvatureFramebuffer = addJob<render::BlurGaussian>("DiffuseCurvature2", curvatureFramebufferAndDepth.get<SurfaceGeometryPass::Outputs>().first, true);
+#else
+    const auto curvatureFramebuffer = addJob<render::BlurGaussianDepthAware>("DiffuseCurvature", curvatureFramebufferAndDepth);
     const auto diffusedCurvatureFramebuffer = addJob<render::BlurGaussianDepthAware>("DiffuseCurvature2", curvatureFramebufferAndDepth, true);
-    
+#endif
+
     // AO job
     addJob<AmbientOcclusionEffect>("AmbientOcclusion");
 
     // Draw Lights just add the lights to the current list of lights to deal with. NOt really gpu job for now.
     addJob<DrawLight>("DrawLight", lights);
 
-    curvatureFramebufferAndDepth.get<SurfaceGeometryPass::Outputs>().first;
-
-  //  const auto scatteringInputs = render::Varying(SubsurfaceScattering::Inputs(deferredFrameTransform, curvatureFramebufferAndDepth[0]));
-    const auto scatteringInputs = render::Varying(SubsurfaceScattering::Inputs(deferredFrameTransform, curvatureFramebufferAndDepth.get<SurfaceGeometryPass::Outputs>().first));
+    const auto scatteringInputs = render::Varying(SubsurfaceScattering::Inputs(deferredFrameTransform, curvatureFramebuffer, diffusedCurvatureFramebuffer));
     const auto scatteringFramebuffer = addJob<SubsurfaceScattering>("Scattering", scatteringInputs);
 
     // DeferredBuffer is complete, now let's shade it into the LightingBuffer
@@ -150,7 +152,8 @@ RenderDeferredTask::RenderDeferredTask(CullFunctor cullFunctor) {
     // Debugging stages
     {
         // Debugging Deferred buffer job
-        addJob<DebugDeferredBuffer>("DebugDeferredBuffer", scatteringFramebuffer);
+        const auto debugFramebuffers = render::Varying(DebugDeferredBuffer::Inputs(diffusedCurvatureFramebuffer, scatteringFramebuffer));
+        addJob<DebugDeferredBuffer>("DebugDeferredBuffer", debugFramebuffers);
 
         // Scene Octree Debuging job
         {
diff --git a/libraries/render-utils/src/SubsurfaceScattering.cpp b/libraries/render-utils/src/SubsurfaceScattering.cpp
index 1dcc9c7196..741c563555 100644
--- a/libraries/render-utils/src/SubsurfaceScattering.cpp
+++ b/libraries/render-utils/src/SubsurfaceScattering.cpp
@@ -18,11 +18,20 @@
 #include "subsurfaceScattering_makeLUT_frag.h"
 #include "subsurfaceScattering_drawScattering_frag.h"
 
-const int SubsurfaceScattering_FrameTransformSlot = 0;
-const int SubsurfaceScattering_ParamsSlot = 1;
-const int SubsurfaceScattering_CurvatureMapSlot = 0;
-const int SubsurfaceScattering_NormalMapSlot = 1;
-const int SubsurfaceScattering_ScatteringTableSlot = 2;
+enum ScatteringShaderBufferSlots {
+    ScatteringTask_FrameTransformSlot = 0,
+    ScatteringTask_ParamSlot,
+};
+enum ScatteringShaderMapSlots {
+    ScatteringTask_ScatteringTableSlot = 0,
+    ScatteringTask_CurvatureMapSlot,
+    ScatteringTask_DiffusedCurvatureMapSlot,
+    ScatteringTask_NormalMapSlot,
+
+    ScatteringTask_AlbedoMapSlot,
+    ScatteringTask_LinearMapSlot,
+
+};
 
 SubsurfaceScattering::SubsurfaceScattering() {
     Parameters parameters;
@@ -30,9 +39,19 @@ SubsurfaceScattering::SubsurfaceScattering() {
 }
 
 void SubsurfaceScattering::configure(const Config& config) {
-    
-    if (config.depthThreshold != getCurvatureDepthThreshold()) {
-        _parametersBuffer.edit<Parameters>().curvatureInfo.x = config.depthThreshold;
+    auto& params = _parametersBuffer.get<Parameters>();
+
+    glm::vec4 bentInfo(config.bentRed, config.bentGreen, config.bentBlue, config.bentScale);
+
+    if (bentInfo != params.normalBentInfo) {
+        _parametersBuffer.edit<Parameters>().normalBentInfo = bentInfo;
+    }
+
+    if (config.curvatureOffset != params.curvatureInfo.x) {
+        _parametersBuffer.edit<Parameters>().curvatureInfo.x = config.curvatureOffset;
+    }
+    if (config.curvatureScale != params.curvatureInfo.y) {
+        _parametersBuffer.edit<Parameters>().curvatureInfo.y = config.curvatureScale;
     }
 
     _showLUT = config.showLUT;
@@ -48,12 +67,17 @@ gpu::PipelinePointer SubsurfaceScattering::getScatteringPipeline() {
         gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
 
         gpu::Shader::BindingSet slotBindings;
-        slotBindings.insert(gpu::Shader::Binding(std::string("deferredFrameTransformBuffer"), SubsurfaceScattering_FrameTransformSlot));
-        // slotBindings.insert(gpu::Shader::Binding(std::string("sourceMap"), BlurTask_SourceSlot));
+        slotBindings.insert(gpu::Shader::Binding(std::string("deferredFrameTransformBuffer"), ScatteringTask_FrameTransformSlot));
+        slotBindings.insert(gpu::Shader::Binding(std::string("scatteringParamsBuffer"), ScatteringTask_ParamSlot));
+
+        slotBindings.insert(gpu::Shader::Binding(std::string("scatteringLUT"), ScatteringTask_ScatteringTableSlot));
+        slotBindings.insert(gpu::Shader::Binding(std::string("curvatureMap"), ScatteringTask_CurvatureMapSlot));
+        slotBindings.insert(gpu::Shader::Binding(std::string("diffusedCurvatureMap"), ScatteringTask_DiffusedCurvatureMapSlot));
+        slotBindings.insert(gpu::Shader::Binding(std::string("normalMap"), ScatteringTask_NormalMapSlot));
+
+        slotBindings.insert(gpu::Shader::Binding(std::string("albedoMap"), ScatteringTask_AlbedoMapSlot));
+        slotBindings.insert(gpu::Shader::Binding(std::string("linearDepthMap"), ScatteringTask_LinearMapSlot));
 
-        slotBindings.insert(gpu::Shader::Binding(std::string("curvatureMap"), SubsurfaceScattering_CurvatureMapSlot));
-        slotBindings.insert(gpu::Shader::Binding(std::string("normalMap"), SubsurfaceScattering_NormalMapSlot));
-        slotBindings.insert(gpu::Shader::Binding(std::string("scatteringLUT"), SubsurfaceScattering_ScatteringTableSlot));
         gpu::Shader::makeProgram(*program, slotBindings);
 
         gpu::StatePointer state = gpu::StatePointer(new gpu::State());
@@ -112,7 +136,7 @@ bool SubsurfaceScattering::updateScatteringFramebuffer(const gpu::FramebufferPoi
         if ((_scatteringFramebuffer->getWidth() != sourceFramebuffer->getWidth()) || (_scatteringFramebuffer->getHeight() != sourceFramebuffer->getHeight())) {
             _scatteringFramebuffer->resize(sourceFramebuffer->getWidth(), sourceFramebuffer->getHeight(), sourceFramebuffer->getNumSamples());
             if (sourceFramebuffer->hasDepthStencil()) {
-                _scatteringFramebuffer->setDepthStencilBuffer(sourceFramebuffer->getDepthStencilBuffer(), sourceFramebuffer->getDepthStencilBufferFormat());
+           //     _scatteringFramebuffer->setDepthStencilBuffer(sourceFramebuffer->getDepthStencilBuffer(), sourceFramebuffer->getDepthStencilBufferFormat());
             }
         }
     }
@@ -139,8 +163,9 @@ void SubsurfaceScattering::run(const render::SceneContextPointer& sceneContext,
 
     auto pipeline = getScatteringPipeline();
     
-    auto& frameTransform = inputs.first. template get<DeferredFrameTransformPointer>();//getFirst();
-    auto& curvatureFramebuffer = inputs.second. template get<gpu::FramebufferPointer>();//getSecond();
+    auto& frameTransform = inputs.getFirst();
+    auto& curvatureFramebuffer = inputs.getSecond();
+    auto& diffusedFramebuffer = inputs.getThird();
     
 
     auto framebufferCache = DependencyManager::get<FramebufferCache>();
@@ -155,13 +180,21 @@ void SubsurfaceScattering::run(const render::SceneContextPointer& sceneContext,
         batch.setViewportTransform(args->_viewport);
 
         batch.setFramebuffer(_scatteringFramebuffer);
-
-        batch.setUniformBuffer(SubsurfaceScattering_FrameTransformSlot, frameTransform->getFrameTransformBuffer());
+     //   batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, vec4(vec3(0), 0), false);
 
         batch.setPipeline(pipeline);
-        batch.setResourceTexture(SubsurfaceScattering_NormalMapSlot, framebufferCache->getDeferredNormalTexture());
-        batch.setResourceTexture(SubsurfaceScattering_CurvatureMapSlot, curvatureFramebuffer->getRenderBuffer(0));
-        batch.setResourceTexture(SubsurfaceScattering_ScatteringTableSlot, _scatteringTable);
+
+        batch.setUniformBuffer(ScatteringTask_FrameTransformSlot, frameTransform->getFrameTransformBuffer());
+        batch.setUniformBuffer(ScatteringTask_ParamSlot, _parametersBuffer);
+
+        batch.setResourceTexture(ScatteringTask_ScatteringTableSlot, _scatteringTable);
+        batch.setResourceTexture(ScatteringTask_CurvatureMapSlot, curvatureFramebuffer->getRenderBuffer(0));
+        batch.setResourceTexture(ScatteringTask_DiffusedCurvatureMapSlot, diffusedFramebuffer->getRenderBuffer(0));
+        batch.setResourceTexture(ScatteringTask_NormalMapSlot, framebufferCache->getDeferredNormalTexture());
+        batch.setResourceTexture(ScatteringTask_AlbedoMapSlot, framebufferCache->getDeferredColorTexture());
+        batch.setResourceTexture(ScatteringTask_LinearMapSlot, framebufferCache->getDepthPyramidTexture());
+
+
         batch.draw(gpu::TRIANGLE_STRIP, 4);
 
         if (_showLUT) {
diff --git a/libraries/render-utils/src/SubsurfaceScattering.h b/libraries/render-utils/src/SubsurfaceScattering.h
index 15369492a0..818c18bcb9 100644
--- a/libraries/render-utils/src/SubsurfaceScattering.h
+++ b/libraries/render-utils/src/SubsurfaceScattering.h
@@ -19,12 +19,27 @@
 
 class SubsurfaceScatteringConfig : public render::Job::Config {
     Q_OBJECT
-    Q_PROPERTY(float depthThreshold MEMBER depthThreshold NOTIFY dirty)
+    Q_PROPERTY(float bentRed MEMBER bentRed NOTIFY dirty)
+    Q_PROPERTY(float bentGreen MEMBER bentGreen NOTIFY dirty)
+    Q_PROPERTY(float bentBlue MEMBER bentBlue NOTIFY dirty)
+    Q_PROPERTY(float bentScale MEMBER bentScale NOTIFY dirty)
+
+    Q_PROPERTY(float curvatureOffset MEMBER curvatureOffset NOTIFY dirty)
+    Q_PROPERTY(float curvatureScale MEMBER curvatureScale NOTIFY dirty)
+
+
     Q_PROPERTY(bool showLUT MEMBER showLUT NOTIFY dirty)
 public:
     SubsurfaceScatteringConfig() : render::Job::Config(true) {}
 
-    float depthThreshold{ 0.1f };
+    float bentRed{ 1.5f };
+    float bentGreen{ 0.8f };
+    float bentBlue{ 0.3f };
+    float bentScale{ 1.0f };
+
+    float curvatureOffset{ 0.012f };
+    float curvatureScale{ 0.25f };
+
     bool showLUT{ true };
 
 signals:
@@ -33,7 +48,7 @@ signals:
 
 class SubsurfaceScattering {
 public:
-    using Inputs = render::VaryingPair<DeferredFrameTransformPointer, gpu::FramebufferPointer>;
+    using Inputs = render::VaryingTrio<DeferredFrameTransformPointer, gpu::FramebufferPointer, gpu::FramebufferPointer>;
     using Config = SubsurfaceScatteringConfig;
     using JobModel = render::Job::ModelIO<SubsurfaceScattering, Inputs, gpu::FramebufferPointer, Config>;
 
@@ -41,9 +56,6 @@ public:
 
     void configure(const Config& config);
     void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const Inputs& inputs, gpu::FramebufferPointer& scatteringFramebuffer);
-    
-    float getCurvatureDepthThreshold() const { return _parametersBuffer.get<Parameters>().curvatureInfo.x; }
-
 
     static gpu::TexturePointer generatePreIntegratedScattering(RenderArgs* args);
 
@@ -53,9 +65,7 @@ private:
     // Class describing the uniform buffer with all the parameters common to the AO shaders
     class Parameters {
     public:
-        // Resolution info
-        glm::vec4 resolutionInfo { -1.0f, 0.0f, 0.0f, 0.0f };
-        // Curvature algorithm
+        glm::vec4 normalBentInfo { 0.0f };
         glm::vec4 curvatureInfo{ 0.0f };
 
         Parameters() {}
diff --git a/libraries/render-utils/src/debug_deferred_buffer.slf b/libraries/render-utils/src/debug_deferred_buffer.slf
index a6028f3c95..4b8e8d48ce 100644
--- a/libraries/render-utils/src/debug_deferred_buffer.slf
+++ b/libraries/render-utils/src/debug_deferred_buffer.slf
@@ -18,6 +18,7 @@ uniform sampler2D pyramidMap;
 uniform sampler2D occlusionMap;
 uniform sampler2D occlusionBlurredMap;
 uniform sampler2D curvatureMap;
+uniform sampler2D diffusedCurvatureMap;
 uniform sampler2D scatteringMap;
 
 in vec2 uv;
diff --git a/libraries/render-utils/src/subsurfaceScattering_drawScattering.slf b/libraries/render-utils/src/subsurfaceScattering_drawScattering.slf
index ae78ab4577..bbe4af7335 100644
--- a/libraries/render-utils/src/subsurfaceScattering_drawScattering.slf
+++ b/libraries/render-utils/src/subsurfaceScattering_drawScattering.slf
@@ -14,47 +14,49 @@
 <$declareDeferredFrameTransform()$>
 
 
-
-
-vec2 signNotZero(vec2 v) {
-    return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);
+uniform sampler2D linearDepthMap;
+float getZEye(ivec2 pixel) {
+    return -texelFetch(linearDepthMap, pixel, 0).x;
+}
+float getZEyeLinear(vec2 texcoord) {
+    return -texture(linearDepthMap, texcoord).x;
 }
 
-vec3 oct_to_float32x3(in vec2 e) {
-    vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));
-    if (v.z < 0) {
-        v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);
-    }
-    return normalize(v);
+<@include DeferredBufferRead.slh@>
+
+
+vec3 fresnelSchlick(vec3 fresnelColor, vec3 lightDir, vec3 halfDir) {
+    return fresnelColor + (1.0 - fresnelColor) * pow(1.0 - clamp(dot(lightDir, halfDir), 0.0, 1.0), 5);
 }
 
-vec2 unorm8x3_to_snorm12x2(vec3 u) {
-    u *= 255.0;
-    u.y *= (1.0 / 16.0);
-    vec2 s = vec2(  u.x * 16.0 + floor(u.y),
-    fract(u.y) * (16.0 * 256.0) + u.z);
-    return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));
+float specularDistribution(float roughness, vec3 normal, vec3 halfDir) {
+    float ndoth = clamp(dot(halfDir, normal), 0.0, 1.0);
+    float gloss2 = pow(0.001 + roughness, 4);
+    float denom = (ndoth * ndoth*(gloss2 - 1) + 1);
+    float power = gloss2 / (3.14159 * denom * denom);
+    return power;
 }
-vec3 unpackNormal(in vec3 p) {
-    return oct_to_float32x3(unorm8x3_to_snorm12x2(p));
+
+// Frag Shading returns the diffuse amount as W and the specular rgb as xyz
+vec4 evalPBRShading(vec3 fragNormal, vec3 fragLightDir, vec3 fragEyeDir, float metallic, vec3 fresnel, float roughness) {
+    // Diffuse Lighting
+    float diffuse = clamp(dot(fragNormal, fragLightDir), 0.0, 1.0);
+    
+    // Specular Lighting
+    vec3 halfDir = normalize(fragEyeDir + fragLightDir);
+    vec3 fresnelColor = fresnelSchlick(fresnel, fragLightDir,halfDir);
+    float power = specularDistribution(roughness, fragNormal, halfDir);
+    vec3 specular = power * fresnelColor * diffuse;
+
+    return vec4(specular, (1.0 - metallic) * diffuse * (1 - fresnelColor.x));
 }
 
+
+
 vec2 sideToFrameTexcoord(vec2 side, vec2 texcoordPos) {
     return vec2((texcoordPos.x + side.x) * side.y, texcoordPos.y);
 }
 
-uniform sampler2D normalMap;
-
-vec3 getRawNormal(vec2 texcoord) {
-    return texture(normalMap, texcoord).xyz;
-}
-
-vec3 getWorldNormal(vec2 texcoord) {
-    vec3 rawNormal = getRawNormal(texcoord);
-    return unpackNormal(rawNormal);
-}
-
-
 // the curvature texture
 uniform sampler2D curvatureMap;
 
@@ -62,6 +64,13 @@ vec4 fetchCurvature(vec2 texcoord) {
     return texture(curvatureMap, texcoord);
 }
 
+// the curvature texture
+uniform sampler2D diffusedCurvatureMap;
+
+vec4 fetchDiffusedCurvature(vec2 texcoord) {
+    return texture(diffusedCurvatureMap, texcoord);
+}
+
 
 uniform sampler2D scatteringLUT;
 
@@ -69,13 +78,33 @@ vec3 fetchBRDF(float LdotN, float curvature) {
     return texture(scatteringLUT, vec2( LdotN * 0.5 + 0.5, curvature)).xyz;
 }
 
+vec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {
+    return vec3(
+        fetchBRDF(LdotNSpectrum.r, curvature).r,
+        fetchBRDF(LdotNSpectrum.g, curvature).g,
+        fetchBRDF(LdotNSpectrum.b, curvature).b
+    );
+}
+
 // Scattering parameters
-float normalBendFactor = 1.0f;
-float normalBendR = 1.5f;
-float normalBendG = 0.8f;
-float normalBendB = 0.3f;
-float scatterBase = 0.012f;
-float scatterCurve = 0.25f;
+
+struct ScatteringParameters {
+    vec4 normalBendInfo; // R, G, B, factor
+    vec4 curvatureInfo;// Offset, Scale
+};
+
+uniform scatteringParamsBuffer {
+    ScatteringParameters parameters;
+};
+
+vec3 getBendFactor() {
+    return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;
+}
+
+float unpackCurvature(float packedCurvature) {
+    return abs(packedCurvature * 2 - 1) * 0.5f * parameters.curvatureInfo.y + parameters.curvatureInfo.x;
+}
+
 
 in vec2 varTexCoord0;
 out vec4 _fragColor;
@@ -83,43 +112,69 @@ out vec4 _fragColor;
 uniform vec3 uniformLightVector = vec3(1.0);
 
 void main(void) {
-  //  DeferredTransform deferredTransform = getDeferredTransform();
- //   DeferredFragment frag = unpackDeferredFragment(deferredTransform, varTexCoord0);
 
-    vec3 normal = getWorldNormal(varTexCoord0);
-    vec4 diffusedCurvature = fetchCurvature(varTexCoord0);
+    DeferredFragment fragment = unpackDeferredFragmentNoPosition(varTexCoord0);
+
+    vec3 normal = fragment.normal; // .getWorldNormal(varTexCoord0);
+    vec4 blurredCurvature = fetchCurvature(varTexCoord0);
+    vec4 diffusedCurvature = fetchDiffusedCurvature(varTexCoord0);
+
+    // --> Get curvature data
+    vec3 bentNormalHigh = normalize( (blurredCurvature.xyz - 0.5f) * 2.0f );
+    vec3 bentNormalLow = normalize( (diffusedCurvature.xyz - 0.5f) * 2.0f );
+    float curvature = unpackCurvature(diffusedCurvature.w);
+
 
-    // --> Calculate bent normals.
-    vec3 bentNormalN = normal;
-    vec3 bentNormalR = normalize( (diffusedCurvature.xyz - 0.5f) * 2.0f );
-    float curvature = abs(diffusedCurvature.w * 2 - 1) * 0.5f * scatterCurve + scatterBase;
  //   _fragColor = vec4(vec3(diffusedCurvature.xyz), 1.0);
 
        // --> Calculate the light vector.
     vec3 lightVector = normalize(uniformLightVector); //normalize(lightPos - sourcePos.xyz);
     
- //   _fragColor = vec4(fetchBRDF(dot(bentNormalR, lightVector), abs(diffusedCurvature.w * 2 - 1)), 1.0);
-
- //    _fragColor = vec4(vec3(abs(dot(bentNormalR, lightVector))), 1.0);
-     _fragColor = vec4(vec3(varTexCoord0, 0.0), 1.0);
-
+   // _fragColor = vec4(fetchBRDF(dot(bentNormalR, lightVector), abs(diffusedCurvature.w * 2 - 1)), 1.0);
+   //  _fragColor = vec4(vec3(abs(dot(bentNormalR, lightVector))), 1.0);
+ //    _fragColor = vec4(vec3(varTexCoord0, 0.0), 1.0);
   //  _fragColor = vec4(vec3(bentNormalR * 0.5 + 0.5), 1.0);
 
 
-/*
-        // --> Optimise for skin diffusion profile.
-        float diffuseBlendedR = dot(normalize(mix( bentNormalN.xyz, bentNormalN, normalBendR * normalBendFactor)), lightVector);
-        float diffuseBlendedG = dot(normalize(mix(normal.xyz, bentNormalN, normalBendG * normalBendFactor)), lightVector);
-        float diffuseBlendedB = dot(normalize(mix(normal.xyz, bentNormalN, normalBendB * normalBendFactor)), lightVector);
-
+    vec3 rS = bentNormalHigh;
     
+    vec3 bendFactorSpectrum = getBendFactor();
+    vec3 rN = normalize(mix(normal, bentNormalLow, bendFactorSpectrum.x));
+    vec3 gN = normalize(mix(bentNormalHigh, bentNormalLow, bendFactorSpectrum.y));
+    vec3 bN = normalize(mix(bentNormalHigh, bentNormalLow, bendFactorSpectrum.z));
+
+    vec3 NdotLSpectrum  = vec3(dot(rN, lightVector), dot(gN, lightVector), dot(bN, lightVector));
+
     // --> Look up the pre-integrated curvature-dependent BDRF textures
-    vec3 bdrfR = fetchBRDF(diffuseBlendedR, curvature);
-    vec3 bdrfG = fetchBRDF(diffuseBlendedG, curvature);
-    vec3 bdrfB = fetchBRDF(diffuseBlendedB, curvature);
-    vec3 bdrf = vec3( bdrfR.x, bdrfG.y, bdrfB.z);
-     bdrf *= bdrf;
-    _fragColor = vec4(vec3(bdrf.xyz), 1.0);*/
+    vec3 bdrf = fetchBRDFSpectrum(NdotLSpectrum, curvature);
+
+
+    // Pixel being shaded
+    ivec2 pixelPos;
+    vec2 texcoordPos;
+    ivec4 stereoSide;
+    ivec2 framePixelPos = getPixelPosTexcoordPosAndSide(gl_FragCoord.xy, pixelPos, texcoordPos, stereoSide);
+    vec2 stereoSideClip = vec2(stereoSide.x, (isStereo() ? 0.5 : 1.0));
+    vec2 frameTexcoordPos = sideToFrameTexcoord(stereoSideClip, texcoordPos);
+
+    // Fetch the z under the pixel (stereo or not)
+    float Zeye = getZEye(framePixelPos);
+
+    vec3 worldNormal = getWorldNormal(frameTexcoordPos);
+
+    // The position of the pixel fragment in Eye space then in world space
+    vec3 eyePos = evalEyePositionFromZeye(stereoSide.x, Zeye, texcoordPos);
+    vec3 fragEyeDir = -(frameTransform._viewInverse * vec4(normalize(eyePos), 0.0)).xyz;
+    vec3 fresnel = vec3(0.03); // Default Di-electric fresnel value
+    if (fragment.metallic > 0.5) {
+        fresnel = albedo;
+        fragment.metallic = 1.0;
+    }
+
+    vec4 shading = evalPBRShading(rS, lightVector, fragEyeDir, fragment.metallic, fresnel, fragment.roughness);
+    _fragColor = vec4(shading.w * albedo * vec3(bdrf.xyz), 1.0);
+
+
 }
 
 
diff --git a/libraries/render/src/render/BlurTask.cpp b/libraries/render/src/render/BlurTask.cpp
index 004ac079c0..3f9a1cf111 100644
--- a/libraries/render/src/render/BlurTask.cpp
+++ b/libraries/render/src/render/BlurTask.cpp
@@ -72,7 +72,76 @@ void BlurParams::setDepthThreshold(float threshold) {
     }
 }
 
-BlurGaussian::BlurGaussian() {
+BlurInOutResource::BlurInOutResource(bool generateOutputFramebuffer) :
+_generateOutputFramebuffer(generateOutputFramebuffer)
+{
+
+}
+
+bool BlurInOutResource::updateResources(const gpu::FramebufferPointer& sourceFramebuffer, Resources& blurringResources) {
+    if (!sourceFramebuffer) {
+        return false;
+    }
+
+    if (!_blurredFramebuffer) {
+        _blurredFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
+
+        // attach depthStencil if present in source
+        if (sourceFramebuffer->hasDepthStencil()) {
+            _blurredFramebuffer->setDepthStencilBuffer(sourceFramebuffer->getDepthStencilBuffer(), sourceFramebuffer->getDepthStencilBufferFormat());
+        }
+        auto blurringSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT);
+        auto blurringTarget = gpu::TexturePointer(gpu::Texture::create2D(sourceFramebuffer->getRenderBuffer(0)->getTexelFormat(), sourceFramebuffer->getWidth(), sourceFramebuffer->getHeight(), blurringSampler));
+        _blurredFramebuffer->setRenderBuffer(0, blurringTarget);
+    } else {
+        // it would be easier to just call resize on the bluredFramebuffer and let it work if needed but the source might loose it's depth buffer when doing so
+        if ((_blurredFramebuffer->getWidth() != sourceFramebuffer->getWidth()) || (_blurredFramebuffer->getHeight() != sourceFramebuffer->getHeight())) {
+            _blurredFramebuffer->resize(sourceFramebuffer->getWidth(), sourceFramebuffer->getHeight(), sourceFramebuffer->getNumSamples());
+            if (sourceFramebuffer->hasDepthStencil()) {
+                _blurredFramebuffer->setDepthStencilBuffer(sourceFramebuffer->getDepthStencilBuffer(), sourceFramebuffer->getDepthStencilBufferFormat());
+            }
+        }
+    }
+
+    blurringResources.sourceTexture = sourceFramebuffer->getRenderBuffer(0);
+    blurringResources.blurringFramebuffer = _blurredFramebuffer;
+    blurringResources.blurringTexture = _blurredFramebuffer->getRenderBuffer(0);
+
+    if (_generateOutputFramebuffer) {
+        // The job output the blur result in a new Framebuffer spawning here.
+        // Let s make sure it s ready for this
+        if (!_outputFramebuffer) {
+            _outputFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
+
+            // attach depthStencil if present in source
+            if (sourceFramebuffer->hasDepthStencil()) {
+                _outputFramebuffer->setDepthStencilBuffer(sourceFramebuffer->getDepthStencilBuffer(), sourceFramebuffer->getDepthStencilBufferFormat());
+            }
+            auto blurringSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT);
+            auto blurringTarget = gpu::TexturePointer(gpu::Texture::create2D(sourceFramebuffer->getRenderBuffer(0)->getTexelFormat(), sourceFramebuffer->getWidth(), sourceFramebuffer->getHeight(), blurringSampler));
+            _outputFramebuffer->setRenderBuffer(0, blurringTarget);
+        } else {
+            if ((_outputFramebuffer->getWidth() != sourceFramebuffer->getWidth()) || (_outputFramebuffer->getHeight() != sourceFramebuffer->getHeight())) {
+                _outputFramebuffer->resize(sourceFramebuffer->getWidth(), sourceFramebuffer->getHeight(), sourceFramebuffer->getNumSamples());
+                if (sourceFramebuffer->hasDepthStencil()) {
+                    _outputFramebuffer->setDepthStencilBuffer(sourceFramebuffer->getDepthStencilBuffer(), sourceFramebuffer->getDepthStencilBufferFormat());
+                }
+            }
+        }
+
+        // Should be good to use the output Framebuffer as final
+        blurringResources.finalFramebuffer = _outputFramebuffer;
+    } else {
+        // Just the reuse the input as output to blur itself.
+        blurringResources.finalFramebuffer = sourceFramebuffer;
+    }
+
+    return true;
+}
+
+BlurGaussian::BlurGaussian(bool generateOutputFramebuffer) :
+    _inOutResources(generateOutputFramebuffer)
+{
     _parameters = std::make_shared<BlurParams>();
 }
 
@@ -120,57 +189,24 @@ gpu::PipelinePointer BlurGaussian::getBlurHPipeline() {
     return _blurHPipeline;
 }
 
-bool BlurGaussian::updateBlurringResources(const gpu::FramebufferPointer& sourceFramebuffer, BlurringResources& blurringResources) {
-    if (!sourceFramebuffer) {
-        return false;
-    }
-
-    if (!_blurredFramebuffer) {
-        _blurredFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
-
-        // attach depthStencil if present in source
-        if (sourceFramebuffer->hasDepthStencil()) {
-            _blurredFramebuffer->setDepthStencilBuffer(sourceFramebuffer->getDepthStencilBuffer(), sourceFramebuffer->getDepthStencilBufferFormat());
-        }
-        auto blurringSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT);
-        auto blurringTarget = gpu::TexturePointer(gpu::Texture::create2D(sourceFramebuffer->getRenderBuffer(0)->getTexelFormat(), sourceFramebuffer->getWidth(), sourceFramebuffer->getHeight(), blurringSampler));
-        _blurredFramebuffer->setRenderBuffer(0, blurringTarget);
-    }
-    else {
-        // it would be easier to just call resize on the bluredFramebuffer and let it work if needed but the source might loose it's depth buffer when doing so
-        if ((_blurredFramebuffer->getWidth() != sourceFramebuffer->getWidth()) || (_blurredFramebuffer->getHeight() != sourceFramebuffer->getHeight())) {
-            _blurredFramebuffer->resize(sourceFramebuffer->getWidth(), sourceFramebuffer->getHeight(), sourceFramebuffer->getNumSamples());
-            if (sourceFramebuffer->hasDepthStencil()) {
-                _blurredFramebuffer->setDepthStencilBuffer(sourceFramebuffer->getDepthStencilBuffer(), sourceFramebuffer->getDepthStencilBufferFormat());
-            }
-        }
-    }
-
-    blurringResources.sourceTexture = sourceFramebuffer->getRenderBuffer(0);
-    blurringResources.blurringFramebuffer = _blurredFramebuffer;
-    blurringResources.blurringTexture = _blurredFramebuffer->getRenderBuffer(0);
-    blurringResources.finalFramebuffer = sourceFramebuffer;
-
-    return true;
-}
-
 void BlurGaussian::configure(const Config& config) {
     _parameters->setFilterRadiusScale(config.filterScale);
 }
 
 
-void BlurGaussian::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const gpu::FramebufferPointer& sourceFramebuffer) {
+void BlurGaussian::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const gpu::FramebufferPointer& sourceFramebuffer, gpu::FramebufferPointer& blurredFramebuffer) {
     assert(renderContext->args);
     assert(renderContext->args->hasViewFrustum());
 
     RenderArgs* args = renderContext->args;
 
 
-    BlurringResources blurringResources;
-    if (!updateBlurringResources(sourceFramebuffer, blurringResources)) {
+    BlurInOutResource::Resources blurringResources;
+    if (!_inOutResources.updateResources(sourceFramebuffer, blurringResources)) {
         // early exit if no valid blurring resources
         return;
     }
+    blurredFramebuffer = blurringResources.finalFramebuffer;
 
     auto blurVPipeline = getBlurVPipeline();
     auto blurHPipeline = getBlurHPipeline();
@@ -191,6 +227,10 @@ void BlurGaussian::run(const SceneContextPointer& sceneContext, const RenderCont
         batch.draw(gpu::TRIANGLE_STRIP, 4);
 
         batch.setFramebuffer(blurringResources.finalFramebuffer);
+        if (_inOutResources._generateOutputFramebuffer) {
+            batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, glm::vec4(0.0));
+        }
+
         batch.setPipeline(blurHPipeline);
         batch.setResourceTexture(BlurTask_SourceSlot, blurringResources.blurringTexture);
         batch.draw(gpu::TRIANGLE_STRIP, 4);
@@ -203,7 +243,7 @@ void BlurGaussian::run(const SceneContextPointer& sceneContext, const RenderCont
 
 
 BlurGaussianDepthAware::BlurGaussianDepthAware(bool generateOutputFramebuffer) :
-    _generateOutputFramebuffer(generateOutputFramebuffer)
+    _inOutResources(generateOutputFramebuffer)
 {
     _parameters = std::make_shared<BlurParams>();
 }
@@ -254,67 +294,6 @@ gpu::PipelinePointer BlurGaussianDepthAware::getBlurHPipeline() {
     return _blurHPipeline;
 }
 
-bool BlurGaussianDepthAware::updateBlurringResources(const gpu::FramebufferPointer& sourceFramebuffer, BlurringResources& blurringResources) {
-    if (!sourceFramebuffer) {
-        return false;
-    }
-
-    if (!_blurredFramebuffer) {
-        _blurredFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
-
-        // attach depthStencil if present in source
-        if (sourceFramebuffer->hasDepthStencil()) {
-            _blurredFramebuffer->setDepthStencilBuffer(sourceFramebuffer->getDepthStencilBuffer(), sourceFramebuffer->getDepthStencilBufferFormat());
-        }
-        auto blurringSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT);
-        auto blurringTarget = gpu::TexturePointer(gpu::Texture::create2D(sourceFramebuffer->getRenderBuffer(0)->getTexelFormat(), sourceFramebuffer->getWidth(), sourceFramebuffer->getHeight(), blurringSampler));
-        _blurredFramebuffer->setRenderBuffer(0, blurringTarget);
-    } else {
-        // it would be easier to just call resize on the bluredFramebuffer and let it work if needed but the source might loose it's depth buffer when doing so
-        if ((_blurredFramebuffer->getWidth() != sourceFramebuffer->getWidth()) || (_blurredFramebuffer->getHeight() != sourceFramebuffer->getHeight())) {
-            _blurredFramebuffer->resize(sourceFramebuffer->getWidth(), sourceFramebuffer->getHeight(), sourceFramebuffer->getNumSamples());
-            if (sourceFramebuffer->hasDepthStencil()) {
-                _blurredFramebuffer->setDepthStencilBuffer(sourceFramebuffer->getDepthStencilBuffer(), sourceFramebuffer->getDepthStencilBufferFormat());
-            }
-        }
-    }
-    
-    blurringResources.sourceTexture = sourceFramebuffer->getRenderBuffer(0);
-    blurringResources.blurringFramebuffer = _blurredFramebuffer;
-    blurringResources.blurringTexture = _blurredFramebuffer->getRenderBuffer(0);
-
-    if (_generateOutputFramebuffer) {
-        // The job output the blur result in a new Framebuffer spawning here.
-        // Let s make sure it s ready for this
-        if (!_outputFramebuffer) {
-            _outputFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
-            
-            // attach depthStencil if present in source
-            if (sourceFramebuffer->hasDepthStencil()) {
-                _outputFramebuffer->setDepthStencilBuffer(sourceFramebuffer->getDepthStencilBuffer(), sourceFramebuffer->getDepthStencilBufferFormat());
-            }
-            auto blurringSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT);
-            auto blurringTarget = gpu::TexturePointer(gpu::Texture::create2D(sourceFramebuffer->getRenderBuffer(0)->getTexelFormat(), sourceFramebuffer->getWidth(), sourceFramebuffer->getHeight(), blurringSampler));
-            _outputFramebuffer->setRenderBuffer(0, blurringTarget);
-        } else {
-            if ((_outputFramebuffer->getWidth() != sourceFramebuffer->getWidth()) || (_outputFramebuffer->getHeight() != sourceFramebuffer->getHeight())) {
-                _outputFramebuffer->resize(sourceFramebuffer->getWidth(), sourceFramebuffer->getHeight(), sourceFramebuffer->getNumSamples());
-                if (sourceFramebuffer->hasDepthStencil()) {
-                    _outputFramebuffer->setDepthStencilBuffer(sourceFramebuffer->getDepthStencilBuffer(), sourceFramebuffer->getDepthStencilBufferFormat());
-                }
-            }
-        }
-        
-        // Should be good to use the output Framebuffer as final
-        blurringResources.finalFramebuffer = _outputFramebuffer;
-    } else {
-        // Just the reuse the input as output to blur itself.
-        blurringResources.finalFramebuffer = sourceFramebuffer;
-    }
-    
-    return true;
-}
-
 void BlurGaussianDepthAware::configure(const Config& config) {
     _parameters->setFilterRadiusScale(config.filterScale);
     _parameters->setDepthThreshold(config.depthThreshold);
@@ -330,8 +309,8 @@ void BlurGaussianDepthAware::run(const SceneContextPointer& sceneContext, const
     auto& sourceFramebuffer = SourceAndDepth.first. template get<gpu::FramebufferPointer>();//getFirst();
     auto& depthTexture = SourceAndDepth.second. template get<gpu::TexturePointer>();//getSecond();
 
-    BlurringResources blurringResources;
-    if (!updateBlurringResources(sourceFramebuffer, blurringResources)) {
+    BlurInOutResource::Resources blurringResources;
+    if (!_inOutResources.updateResources(sourceFramebuffer, blurringResources)) {
         // early exit if no valid blurring resources
         return;
     }
diff --git a/libraries/render/src/render/BlurTask.h b/libraries/render/src/render/BlurTask.h
index 899b1ffe12..1f3b1000d7 100644
--- a/libraries/render/src/render/BlurTask.h
+++ b/libraries/render/src/render/BlurTask.h
@@ -50,13 +50,34 @@ public:
 };
 using BlurParamsPointer = std::shared_ptr<BlurParams>;
 
+class BlurInOutResource {
+public:
+    BlurInOutResource(bool generateOutputFramebuffer = false);
+
+    struct Resources {
+        gpu::TexturePointer sourceTexture;
+        gpu::FramebufferPointer blurringFramebuffer;
+        gpu::TexturePointer blurringTexture;
+        gpu::FramebufferPointer finalFramebuffer;
+    };
+
+    bool updateResources(const gpu::FramebufferPointer& sourceFramebuffer, Resources& resources);
+
+    gpu::FramebufferPointer _blurredFramebuffer;
+
+    // the output framebuffer defined if the job needs to output the result in a new framebuffer and not in place in th einput buffer
+    gpu::FramebufferPointer _outputFramebuffer;
+    bool _generateOutputFramebuffer{ false };
+};
+
+
 class BlurGaussianConfig : public Job::Config {
     Q_OBJECT
     Q_PROPERTY(bool enabled MEMBER enabled NOTIFY dirty) // expose enabled flag
     Q_PROPERTY(float filterScale MEMBER filterScale NOTIFY dirty) // expose enabled flag
 public:
 
-    float filterScale{ 2.0f };
+    float filterScale{ 1.0f };
 signals :
     void dirty();
 
@@ -67,12 +88,12 @@ protected:
 class BlurGaussian {
 public:
     using Config = BlurGaussianConfig;
-    using JobModel = Job::ModelI<BlurGaussian, gpu::FramebufferPointer, Config>;
+    using JobModel = Job::ModelIO<BlurGaussian, gpu::FramebufferPointer, gpu::FramebufferPointer, Config>;
 
-    BlurGaussian();
+    BlurGaussian(bool generateOutputFramebuffer = false);
 
     void configure(const Config& config);
-    void run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const gpu::FramebufferPointer& sourceFramebuffer);
+    void run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const gpu::FramebufferPointer& sourceFramebuffer, gpu::FramebufferPointer& blurredFramebuffer);
 
 protected:
 
@@ -84,15 +105,7 @@ protected:
     gpu::PipelinePointer getBlurVPipeline();
     gpu::PipelinePointer getBlurHPipeline();
 
-    gpu::FramebufferPointer _blurredFramebuffer;
-
-    struct BlurringResources {
-        gpu::TexturePointer sourceTexture;
-        gpu::FramebufferPointer blurringFramebuffer;
-        gpu::TexturePointer blurringTexture;
-        gpu::FramebufferPointer finalFramebuffer;
-    };
-    bool updateBlurringResources(const gpu::FramebufferPointer& sourceFramebuffer, BlurringResources& blurringResources);
+    BlurInOutResource _inOutResources;
 };
 
 class BlurGaussianDepthAwareConfig : public BlurGaussianConfig {
@@ -127,22 +140,10 @@ protected:
     gpu::PipelinePointer getBlurVPipeline();
     gpu::PipelinePointer getBlurHPipeline();
 
-    gpu::FramebufferPointer _blurredFramebuffer;
-
-    // the output framebuffer defined if the job needs to output the result in a new framebuffer and not in place in th einput buffer
-    gpu::FramebufferPointer _outputFramebuffer;
-    bool _generateOutputFramebuffer { false };
-    
-    struct BlurringResources {
-        gpu::TexturePointer sourceTexture;
-        gpu::FramebufferPointer blurringFramebuffer;
-        gpu::TexturePointer blurringTexture;
-        gpu::FramebufferPointer finalFramebuffer;
-    };
-    bool updateBlurringResources(const gpu::FramebufferPointer& sourceFramebuffer, BlurringResources& blurringResources);
+    BlurInOutResource _inOutResources;
 };
 
 
 }
 
-#endif // hifi_render_DrawTask_h
+#endif // hifi_render_BlurTask_h
diff --git a/libraries/render/src/render/BlurTask.slh b/libraries/render/src/render/BlurTask.slh
index a8c96c12b2..2ed4021967 100644
--- a/libraries/render/src/render/BlurTask.slh
+++ b/libraries/render/src/render/BlurTask.slh
@@ -111,7 +111,7 @@ vec4 pixelShaderGaussianDepthAware(vec2 texcoord, vec2 direction, vec2 pixelStep
 
         
         // If the difference in depth is huge, we lerp color back.
-        float s = clamp(depthThreshold * distanceToProjectionWindow * filterScale * abs(srcDepth - sampleDepth), 0.0, 1.0);
+        float s = clamp(depthThreshold * distanceToProjectionWindow /* * filterScale*/ * abs(srcDepth - sampleDepth), 0.0, 1.0);
         srcSample = mix(srcSample, sampleCenter, s);
 
         // Accumulate.
diff --git a/libraries/render/src/render/Task.h b/libraries/render/src/render/Task.h
index 44cb10aead..28d8d6151b 100644
--- a/libraries/render/src/render/Task.h
+++ b/libraries/render/src/render/Task.h
@@ -11,6 +11,7 @@
 
 #ifndef hifi_render_Task_h
 #define hifi_render_Task_h
+#include <tuple>
 
 #include <QtCore/qobject.h>
 
@@ -91,19 +92,6 @@ using VaryingPairBase = std::pair<Varying, Varying>;
 template <> void varyingGet(const VaryingPairBase& data, uint8_t index, Varying& var);
 template <> uint8_t varyingLength(const VaryingPairBase& data);
 
-/*
-class VaryingPairBase {
-    public:
-    Varying first;
-    Varying second;
-    
-    
-        //  template < class T0, class T1> VaryingPairBase() : Parent(Varying(T0()), Varying(T1())) {}
-      //  VaryingPairBase(const VaryingPairBase& pair) : Parent(pair.first, pair.second) {}
-        VaryingPairBase(const Varying& _first, const Varying& _second) : first(_first), second(_second) {}
-        
-};
-  */
 template < class T0, class T1 >
 class VaryingPair : public VaryingPairBase {
 public:
@@ -120,48 +108,41 @@ public:
     T1& editSecond() { return second.edit<T1>(); }
 };
 
-    
-    
- /*   template <class T> Varying varyingGet(const T& data, uint8_t index) {
-        return Varying(T());
-    }*/
-
-//template <T0, T1> Varying varyingGet(template VaryingPair<T0, T1>& data, uint8_t index);
-//template <> uint8_t varyingLength(template VaryingPair<T0, T1>& data);
-
-
-/*
-template < class T0, class T1 >
-class VaryingPair : Varying {
+template <class T0, class T1, class T2>
+class VaryingTrio : public std::tuple<Varying, Varying,Varying>{
 public:
-    using Parent = Varying;
-    using Pair = std::pair<Varying, Varying>;
-    
-    VaryingPair() : Parent(Pair(Varying(T0()), Varying(T1()))) {}
-    VaryingPair(const Varying& first, const Varying& second) : Parent(Pair(first, second)) {}
-    
-    
-    Pair& editPair() { return edit<Pair>(); }
-    const Pair& getPair() const { return get<Pair>(); }
+    using Parent = std::tuple<Varying, Varying, Varying>;
 
-    const T0& getFirst() const { return getPair().first.template get<T0>(); }
-    T0& editFirst() { return editPair().first.template edit<T0>(); }
-    
-    const T1& getSecond() const { return getPair().second.template get<T1>(); }
-    T1& editSecond() { return editPair().second.template edit<T1>(); }
-    
-    // access potential sub varyings contained in this one.
-    virtual Varying operator[] (uint8_t index) const {
-        if (index == 0) {
-            return getPair().first;
-        } else {
-            return getPair().second;
-        } }
-    virtual uint8_t length() const { return 2; }
-    
+    VaryingTrio() : Parent(Varying(T0()), Varying(T1()), Varying(T2())) {}
+    VaryingTrio(const VaryingTrio& trio) : Parent(std::get<0>(trio), std::get<1>(trio), std::get<2>(trio)) {}
+    VaryingTrio(const Varying& first, const Varying& second, const Varying& third) : Parent(first, second, third) {}
+
+    const T0& getFirst() const { return std::get<0>((*this)).get<T0>(); }
+    T0& editFirst() { return std::get<0>((*this)).edit<T0>(); }
+
+    const T1& getSecond() const { return std::get<1>((*this)).get<T1>(); }
+    T1& editSecond() { return std::get<1>((*this)).edit<T1>(); }
+
+    const T2& getThird() const { return std::get<2>((*this)).get<T2>(); }
+    T2& editThird() { return std::get<2>((*this)).edit<T2>(); }
 };
- */
-    
+/*
+template <class... _Types>
+class VaryingTuple : public std::tuple<_Types>{
+public:
+    using Parent = std::tuple<_Types>;
+
+    VaryingPair() : Parent(Varying(T0()), Varying(T1())) {}
+    VaryingPair(const VaryingPair& pair) : Parent(pair.first, pair.second) {}
+    VaryingPair(const Varying& first, const Varying& second) : Parent(first, second) {}
+
+    const T0& getFirst() const { return first.get<T0>(); }
+    T0& editFirst() { return first.edit<T0>(); }
+
+    const T1& getSecond() const { return second.get<T1>(); }
+    T1& editSecond() { return second.edit<T1>(); }
+};*/
+
 template < class T, int NUM >
 class VaryingArray : public std::array<Varying, NUM> {
 public:
diff --git a/scripts/developer/utilities/render/framebuffer.qml b/scripts/developer/utilities/render/framebuffer.qml
index 4ed0b7dcf0..9727829880 100644
--- a/scripts/developer/utilities/render/framebuffer.qml
+++ b/scripts/developer/utilities/render/framebuffer.qml
@@ -49,6 +49,8 @@ Column {
                 "Pyramid Depth",
                 "Curvature",
                 "NormalCurvature",
+                "DiffusedCurvature",
+                "DiffusedNormalCurvature",
                 "Scattering",
                 "Ambient Occlusion",
                 "Ambient Occlusion Blurred",
diff --git a/scripts/developer/utilities/render/surfaceGeometryPass.qml b/scripts/developer/utilities/render/surfaceGeometryPass.qml
index 4ec397addd..608731128b 100644
--- a/scripts/developer/utilities/render/surfaceGeometryPass.qml
+++ b/scripts/developer/utilities/render/surfaceGeometryPass.qml
@@ -32,13 +32,33 @@ Column {
 
         Column{
             Repeater {
-                model: [ "Blur Scale:filterScale:2.0", "Blur Depth Threshold:depthThreshold:100.0" ]
+                model: [ "Blur Scale:DiffuseCurvature:filterScale:2.0", "Blur Depth Threshold:DiffuseCurvature:depthThreshold:10.0", "Blur Scale2:DiffuseCurvature2:filterScale:2.0", "Blur Depth Threshold 2:DiffuseCurvature2:depthThreshold:10.0"]
                 ConfigSlider {
                     label: qsTr(modelData.split(":")[0])
                     integral: false
-                    config: Render.getConfig("DiffuseCurvature")
-                    property: modelData.split(":")[1]
-                    max: modelData.split(":")[2]
+                    config: Render.getConfig(modelData.split(":")[1])
+                    property: modelData.split(":")[2]
+                    max: modelData.split(":")[3]
+                    min: 0.0
+                }
+            }
+        }
+
+        Column{
+            Repeater {
+                model: [ "Scattering Bent Red:Scattering:bentRed:2.0",
+                         "Scattering Bent Green:Scattering:bentGreen:2.0",
+                         "Scattering Bent Blue:Scattering:bentBlue:2.0",
+                         "Scattering Bent Scale:Scattering:bentScale:2.0",
+                         "Scattering Curvature Offset:Scattering:curvatureOffset:1.0",
+                         "Scattering Curvature Scale:Scattering:curvatureScale:1.0",
+                          ]
+                ConfigSlider {
+                    label: qsTr(modelData.split(":")[0])
+                    integral: false
+                    config: Render.getConfig(modelData.split(":")[1])
+                    property: modelData.split(":")[2]
+                    max: modelData.split(":")[3]
                     min: 0.0
                 }
             }