diff --git a/libraries/render-utils/src/DebugDeferredBuffer.cpp b/libraries/render-utils/src/DebugDeferredBuffer.cpp
index 6dfec30b16..5de61df423 100644
--- a/libraries/render-utils/src/DebugDeferredBuffer.cpp
+++ b/libraries/render-utils/src/DebugDeferredBuffer.cpp
@@ -47,6 +47,7 @@ enum Slot {
     Lighting,
     Shadow,
     Pyramid,
+    Curvature,
     AmbientOcclusion,
     AmbientOcclusionBlurred
 };
@@ -138,6 +139,13 @@ static const std::string DEFAULT_PYRAMID_DEPTH_SHADER {
     " }"
 };
 
+static const std::string DEFAULT_CURVATURE_SHADER{
+    "vec4 getFragmentColor() {"
+    "    return vec4(texture(curvatureMap, uv).xyz, 1.0);"
+    //"    return vec4(vec3(1.0 - textureLod(pyramidMap, uv, 3).x * 0.01), 1.0);"
+    " }"
+};
+
 static const std::string DEFAULT_AMBIENT_OCCLUSION_SHADER{
     "vec4 getFragmentColor() {"
     "    return vec4(vec3(texture(obscuranceMap, uv).x), 1.0);"
@@ -203,6 +211,8 @@ std::string DebugDeferredBuffer::getShaderSourceCode(Mode mode, std::string cust
             return DEFAULT_SHADOW_SHADER;
         case PyramidDepthMode:
             return DEFAULT_PYRAMID_DEPTH_SHADER;
+        case CurvatureMode:
+            return DEFAULT_CURVATURE_SHADER;
         case AmbientOcclusionMode:
             return DEFAULT_AMBIENT_OCCLUSION_SHADER;
         case AmbientOcclusionBlurredMode:
@@ -257,6 +267,7 @@ const gpu::PipelinePointer& DebugDeferredBuffer::getPipeline(Mode mode, std::str
         slotBindings.insert(gpu::Shader::Binding("lightingMap", Lighting));
         slotBindings.insert(gpu::Shader::Binding("shadowMap", Shadow));
         slotBindings.insert(gpu::Shader::Binding("pyramidMap", Pyramid));
+        slotBindings.insert(gpu::Shader::Binding("curvatureMap", Curvature));
         slotBindings.insert(gpu::Shader::Binding("occlusionBlurredMap", AmbientOcclusionBlurred));
         gpu::Shader::makeProgram(*program, slotBindings);
         
@@ -288,6 +299,8 @@ void DebugDeferredBuffer::run(const SceneContextPointer& sceneContext, const Ren
     RenderArgs* args = renderContext->args;
 
     gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
+        batch.enableStereo(false);
+
         const auto geometryBuffer = DependencyManager::get<GeometryCache>();
         const auto framebufferCache = DependencyManager::get<FramebufferCache>();
         const auto textureCache = DependencyManager::get<TextureCache>();
@@ -313,6 +326,7 @@ void DebugDeferredBuffer::run(const SceneContextPointer& sceneContext, const Ren
         batch.setResourceTexture(Lighting, framebufferCache->getLightingTexture());
         batch.setResourceTexture(Shadow, lightStage.lights[0]->shadow.framebuffer->getDepthStencilBuffer());
         batch.setResourceTexture(Pyramid, framebufferCache->getDepthPyramidTexture());
+        batch.setResourceTexture(Curvature, framebufferCache->getCurvatureTexture());
         if (DependencyManager::get<DeferredLightingEffect>()->isAmbientOcclusionEnabled()) {
             batch.setResourceTexture(AmbientOcclusion, framebufferCache->getOcclusionTexture());
         } else {
diff --git a/libraries/render-utils/src/DebugDeferredBuffer.h b/libraries/render-utils/src/DebugDeferredBuffer.h
index 521dc13e0a..0af6d589e9 100644
--- a/libraries/render-utils/src/DebugDeferredBuffer.h
+++ b/libraries/render-utils/src/DebugDeferredBuffer.h
@@ -59,6 +59,7 @@ protected:
         LightingMode,
         ShadowMode,
         PyramidDepthMode,
+        CurvatureMode,
         AmbientOcclusionMode,
         AmbientOcclusionBlurredMode,
         CustomMode // Needs to stay last
diff --git a/libraries/render-utils/src/DeferredFrameTransform.cpp b/libraries/render-utils/src/DeferredFrameTransform.cpp
new file mode 100644
index 0000000000..1cb85058d8
--- /dev/null
+++ b/libraries/render-utils/src/DeferredFrameTransform.cpp
@@ -0,0 +1,70 @@
+//
+//  DeferredFrameTransform.cpp
+//  libraries/render-utils/src/
+//
+//  Created by Sam Gateau 6/3/2016.
+//  Copyright 2016 High Fidelity, Inc.
+//
+//  Distributed under the Apache License, Version 2.0.
+//  See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
+//
+#include "DeferredFrameTransform.h"
+
+#include "gpu/Context.h"
+#include "render/Engine.h"
+
+DeferredFrameTransform::DeferredFrameTransform() {
+    FrameTransform frameTransform;
+    _frameTransformBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(FrameTransform), (const gpu::Byte*) &frameTransform));
+}
+
+void DeferredFrameTransform::update(RenderArgs* args) {
+
+    // Update the depth info with near and far (same for stereo)
+    auto nearZ = args->getViewFrustum().getNearClip();
+    auto farZ = args->getViewFrustum().getFarClip();
+    _frameTransformBuffer.edit<FrameTransform>().depthInfo = glm::vec4(nearZ*farZ, farZ - nearZ, -farZ, 0.0f);
+
+    _frameTransformBuffer.edit<FrameTransform>().pixelInfo = args->_viewport;
+
+    //_parametersBuffer.edit<Parameters>()._ditheringInfo.y += 0.25f;
+
+    Transform cameraTransform;
+    args->getViewFrustum().evalViewTransform(cameraTransform);
+    cameraTransform.getMatrix(_frameTransformBuffer.edit<FrameTransform>().invView);
+
+    // Running in stero ?
+    bool isStereo = args->_context->isStereo();
+    if (!isStereo) {
+        // Eval the mono projection
+        mat4 monoProjMat;
+        args->getViewFrustum().evalProjectionMatrix(monoProjMat);
+        _frameTransformBuffer.edit<FrameTransform>().projection[0] = monoProjMat;
+        _frameTransformBuffer.edit<FrameTransform>().stereoInfo = glm::vec4(0.0f, (float)args->_viewport.z, 0.0f, 0.0f);
+        _frameTransformBuffer.edit<FrameTransform>().invpixelInfo = glm::vec4(1.0f / args->_viewport.z, 1.0f / args->_viewport.w, 0.0f, 0.0f);
+
+    } else {
+
+        mat4 projMats[2];
+        mat4 eyeViews[2];
+        args->_context->getStereoProjections(projMats);
+        args->_context->getStereoViews(eyeViews);
+
+        for (int i = 0; i < 2; i++) {
+            // Compose the mono Eye space to Stereo clip space Projection Matrix
+            auto sideViewMat = projMats[i] * eyeViews[i];
+            _frameTransformBuffer.edit<FrameTransform>().projection[i] = sideViewMat;
+        }
+
+        _frameTransformBuffer.edit<FrameTransform>().stereoInfo = glm::vec4(1.0f, (float)(args->_viewport.z >> 1), 0.0f, 1.0f);
+        _frameTransformBuffer.edit<FrameTransform>().invpixelInfo = glm::vec4(1.0f / (float)(args->_viewport.z >> 1), 1.0f / args->_viewport.w, 0.0f, 0.0f);
+
+    }
+}
+
+void GenerateDeferredFrameTransform::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, DeferredFrameTransformPointer& frameTransform) {
+    if (!frameTransform) {
+        frameTransform = std::make_shared<DeferredFrameTransform>();
+    }
+    frameTransform->update(renderContext->args);
+}
diff --git a/libraries/render-utils/src/DeferredFrameTransform.h b/libraries/render-utils/src/DeferredFrameTransform.h
new file mode 100644
index 0000000000..b6c3667c28
--- /dev/null
+++ b/libraries/render-utils/src/DeferredFrameTransform.h
@@ -0,0 +1,74 @@
+//
+//  DeferredFrameTransform.h
+//  libraries/render-utils/src/
+//
+//  Created by Sam Gateau 6/3/2016.
+//  Copyright 2016 High Fidelity, Inc.
+//
+//  Distributed under the Apache License, Version 2.0.
+//  See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
+//
+
+#ifndef hifi_DeferredFrameTransform_h
+#define hifi_DeferredFrameTransform_h
+
+#include "gpu/Resource.h"
+#include "render/DrawTask.h"
+
+class RenderArgs;
+
+// DeferredFrameTransform is  a helper class gathering in one place the needed camera transform
+// and frame resolution needed for all the deferred rendering passes taking advantage of the Deferred buffers
+class DeferredFrameTransform {
+public:
+    using UniformBufferView = gpu::BufferView;
+
+    DeferredFrameTransform();
+
+    void update(RenderArgs* args);
+
+    UniformBufferView getFrameTransformBuffer() const { return _frameTransformBuffer; }
+
+protected:
+
+
+    // Class describing the uniform buffer with the transform info common to the AO shaders
+    // It s changing every frame
+    class FrameTransform {
+    public:
+        // Pixel info is { viemport width height and stereo on off}
+        glm::vec4 pixelInfo;
+        glm::vec4 invpixelInfo;
+        // Depth info is { n.f, f - n, -f}
+        glm::vec4 depthInfo;
+        // Stereo info
+        glm::vec4 stereoInfo{ 0.0 };
+        // Mono proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space
+        glm::mat4 projection[2];
+        // Inv View matrix from eye space (mono) to world space
+        glm::mat4 invView;
+
+        FrameTransform() {}
+    };
+    UniformBufferView _frameTransformBuffer;
+
+   
+};
+
+using DeferredFrameTransformPointer = std::shared_ptr<DeferredFrameTransform>;
+
+
+
+
+class GenerateDeferredFrameTransform {
+public:
+    using JobModel = render::Job::ModelO<GenerateDeferredFrameTransform, DeferredFrameTransformPointer>;
+
+    GenerateDeferredFrameTransform() {}
+
+    void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, DeferredFrameTransformPointer& frameTransform);
+
+private:
+};
+
+#endif // hifi_SurfaceGeometryPass_h
diff --git a/libraries/render-utils/src/DeferredTransform.slh b/libraries/render-utils/src/DeferredTransform.slh
new file mode 100644
index 0000000000..25a62fca3d
--- /dev/null
+++ b/libraries/render-utils/src/DeferredTransform.slh
@@ -0,0 +1,95 @@
+<!
+//  DeferredTransform.slh
+//  libraries/render-utils/src
+//
+//  Created by Sam Gateau on 6/2/16.
+//  Copyright 2016 High Fidelity, Inc.
+//
+//  Distributed under the Apache License, Version 2.0.
+//  See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
+!>
+<@if not DEFERRED_TRANSFORM_SLH@>
+<@def DEFERRED_TRANSFORM_SLH@>
+
+<@func declareDeferredFrameTransform()@>
+
+struct DeferredFrameTransform {
+    vec4 _pixelInfo;
+    vec4 _invPixelInfo;
+    vec4 _depthInfo;
+    vec4 _stereoInfo;
+    mat4 _projection[2];
+    mat4 _invView;
+};
+
+uniform deferredFrameTransformBuffer {
+    DeferredFrameTransform frameTransform;
+};
+
+vec2 getWidthHeight(int resolutionLevel) {
+    return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);
+}
+
+vec2 getInvWidthHeight() {
+    return frameTransform._invPixelInfo.xy;
+}
+
+float getProjScaleEye() {
+    return frameTransform._projection[0][1][1];
+}
+
+float getProjScale(int resolutionLevel) {
+    return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;
+}
+mat4 getProjection(int side) {
+    return frameTransform._projection[side];
+}
+
+bool isStereo() {
+    return frameTransform._stereoInfo.x > 0.0f;
+}
+
+float getStereoSideWidth(int resolutionLevel) {
+    return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);
+}
+
+ivec3 getStereoSideInfo(int xPos, int resolutionLevel) {
+    int sideWidth = int(getStereoSideWidth(resolutionLevel));
+    return ivec3(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth);
+}
+
+float evalZeyeFromZdb(float depth) {
+    return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);
+}
+
+vec3 evalEyeNormal(vec3 C) {
+    //return normalize(cross(dFdy(C), dFdx(C)));
+    return normalize(cross(dFdx(C), dFdy(C)));
+}
+
+vec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {
+    // compute the view space position using the depth
+    // basically manually pick the proj matrix components to do the inverse
+    float Xe = (-Zeye * (texcoord.x * 2.0 - 1.0) - Zeye * frameTransform._projection[side][2][0] - frameTransform._projection[side][3][0]) / frameTransform._projection[side][0][0];
+    float Ye = (-Zeye * (texcoord.y * 2.0 - 1.0) - Zeye * frameTransform._projection[side][2][1] - frameTransform._projection[side][3][1]) / frameTransform._projection[side][1][1];
+    return vec3(Xe, Ye, Zeye);
+}
+
+ivec2 getPixelPosNclipPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 nclipPos, out ivec3 stereoSide) {
+    ivec2 fragPos = ivec2(glFragCoord.xy);
+
+    stereoSide = getStereoSideInfo(fragPos.x, 0);
+
+    pixelPos = fragPos;
+    pixelPos.x -= stereoSide.y;
+
+    nclipPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();
+    
+    return fragPos;
+}
+
+<@endfunc@>
+
+
+
+<@endif@>
\ No newline at end of file
diff --git a/libraries/render-utils/src/FramebufferCache.cpp b/libraries/render-utils/src/FramebufferCache.cpp
index 2d322b1726..63ae7e521e 100644
--- a/libraries/render-utils/src/FramebufferCache.cpp
+++ b/libraries/render-utils/src/FramebufferCache.cpp
@@ -111,6 +111,13 @@ void FramebufferCache::createPrimaryFramebuffer() {
     _depthPyramidFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
     
     
+   
+    _curvatureTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
+    _curvatureFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
+    _curvatureFramebuffer->setRenderBuffer(0, _curvatureTexture);
+    _curvatureFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
+
+
     resizeAmbientOcclusionBuffers();
 }
 
@@ -245,6 +252,21 @@ gpu::TexturePointer FramebufferCache::getDepthPyramidTexture() {
     return _depthPyramidTexture;
 }
 
+gpu::FramebufferPointer FramebufferCache::getCurvatureFramebuffer() {
+    if (!_curvatureFramebuffer) {
+        createPrimaryFramebuffer();
+    }
+    return _curvatureFramebuffer;
+}
+
+gpu::TexturePointer FramebufferCache::getCurvatureTexture() {
+    if (!_curvatureTexture) {
+        createPrimaryFramebuffer();
+    }
+    return _curvatureTexture;
+}
+
+
 void FramebufferCache::setAmbientOcclusionResolutionLevel(int level) {
     const int MAX_AO_RESOLUTION_LEVEL = 4;
     level = std::max(0, std::min(level, MAX_AO_RESOLUTION_LEVEL));
diff --git a/libraries/render-utils/src/FramebufferCache.h b/libraries/render-utils/src/FramebufferCache.h
index 7c7c309572..0fb9b9b2ee 100644
--- a/libraries/render-utils/src/FramebufferCache.h
+++ b/libraries/render-utils/src/FramebufferCache.h
@@ -47,6 +47,9 @@ public:
     gpu::FramebufferPointer getDepthPyramidFramebuffer();
     gpu::TexturePointer getDepthPyramidTexture();
 
+    gpu::FramebufferPointer getCurvatureFramebuffer();
+    gpu::TexturePointer getCurvatureTexture();
+
     void setAmbientOcclusionResolutionLevel(int level);
     gpu::FramebufferPointer getOcclusionFramebuffer();
     gpu::TexturePointer getOcclusionTexture();
@@ -95,7 +98,10 @@ private:
     gpu::FramebufferPointer _depthPyramidFramebuffer;
     gpu::TexturePointer _depthPyramidTexture;
     
-    
+
+    gpu::FramebufferPointer _curvatureFramebuffer;
+    gpu::TexturePointer _curvatureTexture;
+
     gpu::FramebufferPointer _occlusionFramebuffer;
     gpu::TexturePointer _occlusionTexture;
     
diff --git a/libraries/render-utils/src/RenderDeferredTask.cpp b/libraries/render-utils/src/RenderDeferredTask.cpp
index 444c52623e..6fa98089be 100755
--- a/libraries/render-utils/src/RenderDeferredTask.cpp
+++ b/libraries/render-utils/src/RenderDeferredTask.cpp
@@ -26,6 +26,7 @@
 
 #include "DebugDeferredBuffer.h"
 #include "DeferredLightingEffect.h"
+#include "SurfaceGeometryPass.h"
 #include "FramebufferCache.h"
 #include "HitEffect.h"
 #include "TextureCache.h"
@@ -92,6 +93,9 @@ RenderDeferredTask::RenderDeferredTask(CullFunctor cullFunctor) {
     const auto overlayTransparents = addJob<DepthSortItems>("DepthSortOverlayTransparent", filteredNonspatialBuckets[TRANSPARENT_SHAPE_BUCKET], DepthSortItems(false));
     const auto background = filteredNonspatialBuckets[BACKGROUND_BUCKET];
 
+    // Prepare deferred, generate the shared Deferred Frame Transform
+    const auto deferredFrameTransform = addJob<GenerateDeferredFrameTransform>("EvalDeferredFrameTransform");
+
     // GPU jobs: Start preparing the deferred and lighting buffer
     addJob<PrepareDeferred>("PrepareDeferred");
 
@@ -104,6 +108,9 @@ RenderDeferredTask::RenderDeferredTask(CullFunctor cullFunctor) {
     // Use Stencil and start drawing background in Lighting buffer
     addJob<DrawBackgroundDeferred>("DrawBackgroundDeferred", background);
 
+    // Opaque all rendered, generate surface geometry buffers
+    addJob<SurfaceGeometryPass>("SurfaceGeometry", deferredFrameTransform);
+
     // AO job
     addJob<AmbientOcclusionEffect>("AmbientOcclusion");
 
diff --git a/libraries/render-utils/src/SurfaceGeometry.slh b/libraries/render-utils/src/SurfaceGeometry.slh
new file mode 100644
index 0000000000..cd9e4c8ac4
--- /dev/null
+++ b/libraries/render-utils/src/SurfaceGeometry.slh
@@ -0,0 +1,62 @@
+<@include gpu/Config.slh@>
+<$VERSION_HEADER$>
+//  Generated on <$_SCRIBE_DATE$>
+//
+//  Created by Sam Gateau on 6/3/16.
+//  Copyright 2016 High Fidelity, Inc.
+//
+//  Distributed under the Apache License, Version 2.0.
+//  See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
+//
+
+<@include DeferredTransform.slh@>
+<$declareDeferredFrameTransform()$>
+
+
+uniform sampler2D depthMap;
+
+out vec4 outFragColor;
+
+void main(void) {
+ // Fetch normal and depth of current pixel
+    float4 samplePos = sourceTexture.SampleLevel(pointSampler, input.texUV, 0.0f);
+    float4 sampleNormal = depthTexture.SampleLevel(pointSampler, input.texUV, 0.0f);
+
+    // Calculate the width scale.
+    float distanceToProjectionWindow = 1.0f / tan(0.5f * radians(fov));
+    float scale = distanceToProjectionWindow / sampleNormal.w;
+    
+    // Calculate dF/du and dF/dv
+    float2 du = float2( 1.0f, 0.0f ) * UVfactor.x * screenPixel * scale;
+    float2 dv = float2( 0.0f, 1.0f ) * UVfactor.x * screenPixel * scale;
+    float4 dFdu = depthTexture.SampleLevel(linearSampler, input.texUV + du.xy, 0.0f) -
+                  depthTexture.SampleLevel(linearSampler, input.texUV - du.xy, 0.0f);
+    float4 dFdv = depthTexture.SampleLevel(linearSampler, input.texUV + dv.xy, 0.0f) -
+                  depthTexture.SampleLevel(linearSampler, input.texUV - dv.xy, 0.0f);
+    dFdu *= step(abs(dFdu.w), 0.1f); dFdv *= step(abs(dFdv.w), 0.1f); 
+    
+    // Calculate ( du/dx, du/dy, du/dz ) and ( dv/dx, dv/dy, dv/dz )
+    float dist = 1.0f; samplePos.w = 1.0f;
+    float2 centerOffset = ((input.texUV - 0.5f) * 2.0f);
+    float4 px =  mul( samplePos + float4( dist, 0.0f, 0.0f, 0.0f ), matViewProj );
+    float4 py =  mul( samplePos + float4( 0.0f, dist, 0.0f, 0.0f ), matViewProj );
+    float4 pz =  mul( samplePos + float4( 0.0f, 0.0f, dist, 0.0f ), matViewProj );
+    #ifdef INVERT_TEXTURE_V
+        centerOffset.y = -centerOffset.y;
+    #endif
+    px.xy = ((px.xy / px.w) - centerOffset) / scale; 
+    py.xy = ((py.xy / py.w) - centerOffset) / scale; 
+    pz.xy = ((pz.xy / pz.w) - centerOffset) / scale; 
+    #ifdef INVERT_TEXTURE_V
+        px.y = -px.y; py.y = -py.y; pz.y = -pz.y;
+    #endif
+    
+    // Calculate dF/dx, dF/dy and dF/dz using chain rule
+    float4 dFdx = dFdu * px.x + dFdv * px.y;
+    float4 dFdy = dFdu * py.x + dFdv * py.y;
+    float4 dFdz = dFdu * pz.x + dFdv * pz.y;
+
+    // Calculate the mean curvature
+    float meanCurvature = ((dFdx.x + dFdy.y + dFdz.z) * 0.33333333333333333f) * 100.0f;
+    return (float4( sampleNormal.xyz, meanCurvature ) + 1.0f) * 0.5f;
+}
diff --git a/libraries/render-utils/src/SurfaceGeometryPass.cpp b/libraries/render-utils/src/SurfaceGeometryPass.cpp
new file mode 100644
index 0000000000..ac3f04383b
--- /dev/null
+++ b/libraries/render-utils/src/SurfaceGeometryPass.cpp
@@ -0,0 +1,107 @@
+//
+//  SurfaceGeometryPass.cpp
+//  libraries/render-utils/src/
+//
+//  Created by Sam Gateau 6/3/2016.
+//  Copyright 2016 High Fidelity, Inc.
+//
+//  Distributed under the Apache License, Version 2.0.
+//  See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
+//
+#include "SurfaceGeometryPass.h"
+
+#include <gpu/Context.h>
+#include <gpu/StandardShaderLib.h>
+
+#include "FramebufferCache.h"
+
+const int SurfaceGeometryPass_FrameTransformSlot = 0;
+const int SurfaceGeometryPass_ParamsSlot = 1;
+const int SurfaceGeometryPass_DepthMapSlot = 0;
+
+#include "surfaceGeometry_makeCurvature_frag.h"
+
+SurfaceGeometryPass::SurfaceGeometryPass() {
+}
+
+void SurfaceGeometryPass::configure(const Config& config) {
+}
+
+void SurfaceGeometryPass::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const DeferredFrameTransformPointer& frameTransform) {
+    assert(renderContext->args);
+    assert(renderContext->args->hasViewFrustum());
+
+    RenderArgs* args = renderContext->args;
+
+
+    auto framebufferCache = DependencyManager::get<FramebufferCache>();
+    auto depthBuffer = framebufferCache->getPrimaryDepthTexture();
+  //  auto normalBuffer = framebufferCache->getDeferredNormalTexture();
+ //   auto pyramidFBO = framebufferCache->getDepthPyramidFramebuffer();
+    auto curvatureFBO = framebufferCache->getCurvatureFramebuffer();
+
+    QSize framebufferSize = framebufferCache->getFrameBufferSize();
+    float sMin = args->_viewport.x / (float)framebufferSize.width();
+    float sWidth = args->_viewport.z / (float)framebufferSize.width();
+    float tMin = args->_viewport.y / (float)framebufferSize.height();
+    float tHeight = args->_viewport.w / (float)framebufferSize.height();
+
+
+    auto curvaturePipeline = getCurvaturePipeline();
+
+    gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
+        batch.enableStereo(false);
+
+   //     _gpuTimer.begin(batch);
+
+        batch.setViewportTransform(args->_viewport);
+        batch.setProjectionTransform(glm::mat4());
+        batch.setViewTransform(Transform());
+
+        Transform model;
+        model.setTranslation(glm::vec3(sMin, tMin, 0.0f));
+        model.setScale(glm::vec3(sWidth, tHeight, 1.0f));
+        batch.setModelTransform(model);
+
+        batch.setUniformBuffer(SurfaceGeometryPass_FrameTransformSlot, frameTransform->getFrameTransformBuffer());
+        //   batch.setUniformBuffer(SurfaceGeometryPass_ParamsSlot, _parametersBuffer);
+
+
+        // Pyramid pass
+        batch.setFramebuffer(curvatureFBO);
+        batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, glm::vec4(args->getViewFrustum().getFarClip(), 0.0f, 0.0f, 0.0f));
+        batch.setPipeline(curvaturePipeline);
+        batch.setResourceTexture(SurfaceGeometryPass_DepthMapSlot, depthBuffer);
+        batch.draw(gpu::TRIANGLE_STRIP, 4);
+
+    });
+
+}
+
+const gpu::PipelinePointer& SurfaceGeometryPass::getCurvaturePipeline() {
+    if (!_curvaturePipeline) {
+        auto vs = gpu::StandardShaderLib::getDrawViewportQuadTransformTexcoordVS();
+        auto ps = gpu::Shader::createPixel(std::string(surfaceGeometry_makeCurvature_frag));
+        gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
+
+        gpu::Shader::BindingSet slotBindings;
+        slotBindings.insert(gpu::Shader::Binding(std::string("deferredFrameTransformBuffer"), SurfaceGeometryPass_FrameTransformSlot));
+        slotBindings.insert(gpu::Shader::Binding(std::string("ambientOcclusionParamsBuffer"), SurfaceGeometryPass_ParamsSlot));
+        slotBindings.insert(gpu::Shader::Binding(std::string("depthMap"), SurfaceGeometryPass_DepthMapSlot));
+        gpu::Shader::makeProgram(*program, slotBindings);
+
+
+        gpu::StatePointer state = gpu::StatePointer(new gpu::State());
+
+        // Stencil test the curvature pass for objects pixels only, not the background
+        state->setStencilTest(true, 0xFF, gpu::State::StencilTest(0, 0xFF, gpu::NOT_EQUAL, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP));
+
+      //  state->setColorWriteMask(true, false, false, false);
+
+        // Good to go add the brand new pipeline
+        _curvaturePipeline = gpu::Pipeline::create(program, state);
+    }
+
+    return _curvaturePipeline;
+}
+
diff --git a/libraries/render-utils/src/SurfaceGeometryPass.h b/libraries/render-utils/src/SurfaceGeometryPass.h
new file mode 100644
index 0000000000..5501f43659
--- /dev/null
+++ b/libraries/render-utils/src/SurfaceGeometryPass.h
@@ -0,0 +1,75 @@
+//
+//  SurfaceGeometryPass.h
+//  libraries/render-utils/src/
+//
+//  Created by Sam Gateau 6/3/2016.
+//  Copyright 2016 High Fidelity, Inc.
+//
+//  Distributed under the Apache License, Version 2.0.
+//  See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
+//
+
+#ifndef hifi_SurfaceGeometryPass_h
+#define hifi_SurfaceGeometryPass_h
+
+#include <DependencyManager.h>
+
+#include "render/DrawTask.h"
+#include "DeferredFrameTransform.h"
+
+class SurfaceGeometryPassConfig : public render::Job::Config {
+    Q_OBJECT
+    Q_PROPERTY(double gpuTime READ getGpuTime)
+public:
+    SurfaceGeometryPassConfig() : render::Job::Config(true) {}
+
+    double getGpuTime() { return gpuTime; }
+
+    double gpuTime{ 0.0 };
+
+signals:
+    void dirty();
+};
+
+class SurfaceGeometryPass {
+public:
+    using Config = SurfaceGeometryPassConfig;
+    using JobModel = render::Job::ModelI<SurfaceGeometryPass, DeferredFrameTransformPointer, Config>;
+
+    SurfaceGeometryPass();
+
+    void configure(const Config& config);
+    void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const DeferredFrameTransformPointer& frameTransform);
+    
+private:
+    typedef gpu::BufferView UniformBufferView;
+
+    // Class describing the uniform buffer with all the parameters common to the AO shaders
+    class Parameters {
+    public:
+        // Resolution info
+        glm::vec4 resolutionInfo { -1.0f, 0.0f, 0.0f, 0.0f };
+        // radius info is { R, R^2, 1 / R^6, ObscuranceScale}
+        glm::vec4 radiusInfo{ 0.5f, 0.5f * 0.5f, 1.0f / (0.25f * 0.25f * 0.25f), 1.0f };
+        // Dithering info 
+        glm::vec4 ditheringInfo { 0.0f, 0.0f, 0.01f, 1.0f };
+        // Sampling info
+        glm::vec4 sampleInfo { 11.0f, 1.0f/11.0f, 7.0f, 1.0f };
+        // Blurring info
+        glm::vec4 blurInfo { 1.0f, 3.0f, 2.0f, 0.0f };
+         // gaussian distribution coefficients first is the sampling radius (max is 6)
+        const static int GAUSSIAN_COEFS_LENGTH = 8;
+        float _gaussianCoefs[GAUSSIAN_COEFS_LENGTH];
+        
+        Parameters() {}
+    };
+    gpu::BufferView _parametersBuffer;
+
+    const gpu::PipelinePointer& getCurvaturePipeline();
+
+    gpu::PipelinePointer _curvaturePipeline;
+
+    gpu::RangeTimer _gpuTimer;
+};
+
+#endif // hifi_SurfaceGeometryPass_h
diff --git a/libraries/render-utils/src/debug_deferred_buffer.slf b/libraries/render-utils/src/debug_deferred_buffer.slf
index b323836657..4c045b7e99 100644
--- a/libraries/render-utils/src/debug_deferred_buffer.slf
+++ b/libraries/render-utils/src/debug_deferred_buffer.slf
@@ -17,6 +17,7 @@
 uniform sampler2D pyramidMap;
 uniform sampler2D occlusionMap;
 uniform sampler2D occlusionBlurredMap;
+uniform sampler2D curvatureMap;
 
 in vec2 uv;
 out vec4 outFragColor;
diff --git a/libraries/render-utils/src/ssao_makeOcclusion.slf b/libraries/render-utils/src/ssao_makeOcclusion.slf
index 72424cad1e..01e44d0bb9 100644
--- a/libraries/render-utils/src/ssao_makeOcclusion.slf
+++ b/libraries/render-utils/src/ssao_makeOcclusion.slf
@@ -112,7 +112,7 @@ void main(void) {
 
     // From now on, ssC is the pixel pos in the side
     ssC.x -= side.y;
-    vec2 fragPos = (vec2(ssC) + 0.5) / getStereoSideWidth();
+    vec2 fragPos = (vec2(ssC) + 0.5)  / getStereoSideWidth();
 
     // The position  and normal of the pixel fragment in Eye space
     vec3 Cp = evalEyePositionFromZeye(side.x, Zeye, fragPos);
diff --git a/libraries/render-utils/src/surfaceGeometry_makeCurvature.slf b/libraries/render-utils/src/surfaceGeometry_makeCurvature.slf
new file mode 100644
index 0000000000..ca221ae84a
--- /dev/null
+++ b/libraries/render-utils/src/surfaceGeometry_makeCurvature.slf
@@ -0,0 +1,107 @@
+<@include gpu/Config.slh@>
+<$VERSION_HEADER$>
+//  Generated on <$_SCRIBE_DATE$>
+//
+//  Created by Sam Gateau on 6/3/16.
+//  Copyright 2016 High Fidelity, Inc.
+//
+//  Distributed under the Apache License, Version 2.0.
+//  See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
+//
+
+<@include DeferredTransform.slh@>
+<$declareDeferredFrameTransform()$>
+
+
+uniform sampler2D depthMap;
+
+out vec4 outFragColor;
+/*
+void main(void) {
+    float Zdb = texelFetch(depthMap, ivec2(gl_FragCoord.xy), 0).x;
+    float Zeye = -evalZeyeFromZdb(Zdb);
+    outFragColor = vec4(Zeye, 0.0, 0.0, 1.0);
+}
+*/
+
+void main(void) {
+    // Pixel being shaded
+    ivec2 pixelPos;
+    vec2 nclipPos;
+    ivec3 stereoSide;
+    ivec2 framePixelPos = getPixelPosNclipPosAndSide(gl_FragCoord.xy, pixelPos, nclipPos, stereoSide);
+
+    // Fetch the z under the pixel (stereo or not)
+    float Zdb = texelFetch(depthMap, pixelPos, 0).x;
+    float Zeye = -evalZeyeFromZdb(Zdb);
+
+
+    // The position  and normal of the pixel fragment in Eye space
+    vec3 eyePos = evalEyePositionFromZeye(stereoSide.x, Zeye, nclipPos);
+
+    vec3 worldPos = (frameTransform._invView * vec4(eyePos, 1.0)).xyz;
+
+    vec3 moduloPos = fract(worldPos);
+
+    outFragColor = vec4(moduloPos, 1.0);
+    /*
+    return;
+
+    // Choose the screen-space sample radius
+    // proportional to the projected area of the sphere
+    float ssDiskRadius = -getProjScale() * getRadius() / Cp.z;
+
+
+    vec2 texUV = gl_FragCoord.xy * getInvWidthHeight();
+    float Zdb = texelFetch(depthMap, ivec2(gl_FragCoord.xy), 0).x;
+    float Zeye = -evalZeyeFromZdb(Zdb);
+
+    ivec3 stereoInfo = getStereoSideInfo(gl_FragCoord.x, 0);
+
+    // World Pos
+    vec4 samplePos = evalEyePositionFromZeye(stereoInfo.x, )
+
+    // Calculate the width scale.
+    ./ Choose the screen-space sample radius
+    // proportional to the projected area of the sphere
+   // float ssDiskRadius = -getProjScale() * getRadius() / Cp.z;
+
+  //  float distanceToProjectionWindow = 1.0f / tan(0.5f * radians(fov));
+    float scale = getProjScaleEye() / Zeye;
+    
+    vec2 viewportScale = scale * getInvWidthHeight();
+
+    // Calculate dF/du and dF/dv
+    vec2 du = vec2( 1.0f, 0.0f ) * viewportScale.x;
+    vec2 dv = vec2( 0.0f, 1.0f ) * viewportScale.y;
+
+    vec4 dFdu = texture(depthMap, texUV + du.xy) - texture(depthMap, texUV - du.xy);
+    vec4 dFdv = texture(depthMap, texUV + dv.xy) - texture(depthMap, texUV - dv.xy);
+    dFdu *= step(abs(dFdu.w), 0.1f); dFdv *= step(abs(dFdv.w), 0.1f); 
+    
+    // Calculate ( du/dx, du/dy, du/dz ) and ( dv/dx, dv/dy, dv/dz )
+    float dist = 1.0f; samplePos.w = 1.0f;
+    vec2 centerOffset = ((input.texUV - 0.5f) * 2.0f);
+    vec4 px =  mul( samplePos + vec4( dist, 0.0f, 0.0f, 0.0f ), matViewProj );
+    vec4 py =  mul( samplePos + vec4( 0.0f, dist, 0.0f, 0.0f ), matViewProj );
+    vec4 pz =  mul( samplePos + vec4( 0.0f, 0.0f, dist, 0.0f ), matViewProj );
+    #ifdef INVERT_TEXTURE_V
+        centerOffset.y = -centerOffset.y;
+    #endif
+    px.xy = ((px.xy / px.w) - centerOffset) / scale; 
+    py.xy = ((py.xy / py.w) - centerOffset) / scale; 
+    pz.xy = ((pz.xy / pz.w) - centerOffset) / scale; 
+    #ifdef INVERT_TEXTURE_V
+        px.y = -px.y; py.y = -py.y; pz.y = -pz.y;
+    #endif
+    
+    // Calculate dF/dx, dF/dy and dF/dz using chain rule
+    vec4 dFdx = dFdu * px.x + dFdv * px.y;
+    vec4 dFdy = dFdu * py.x + dFdv * py.y;
+    vec4 dFdz = dFdu * pz.x + dFdv * pz.y;
+
+    // Calculate the mean curvature
+    float meanCurvature = ((dFdx.x + dFdy.y + dFdz.z) * 0.33333333333333333) * 100.0;
+    outFragColor = vec4( (meanCurvature + 1.0) * 0.5);
+    */
+}
diff --git a/scripts/developer/utilities/render/framebuffer.qml b/scripts/developer/utilities/render/framebuffer.qml
index 0d8d85cc32..e8122db8c9 100644
--- a/scripts/developer/utilities/render/framebuffer.qml
+++ b/scripts/developer/utilities/render/framebuffer.qml
@@ -39,6 +39,7 @@ Column {
                 "Lighting",
                 "Shadow",
                 "Pyramid Depth",
+                "Curvature",
                 "Ambient Occlusion",
                 "Ambient Occlusion Blurred",
                 "Custom Shader"