diff --git a/examples/utilities/tools/renderEngineDebug.js b/examples/utilities/tools/renderEngineDebug.js index ce697481d9..e6d15653a3 100755 --- a/examples/utilities/tools/renderEngineDebug.js +++ b/examples/utilities/tools/renderEngineDebug.js @@ -52,11 +52,13 @@ var overlaysCounter = new CounterWidget(panel, "Overlays", Render.overlay3D); var resizing = false; var previousMode = Settings.getValue(SETTINGS_KEY, -1); -previousMode = 1; // FIXME: just for debug purpose +previousMode = 8; // FIXME: just for debug purpose Menu.addActionGroup(MENU, ACTIONS, ACTIONS[previousMode + 1]); Render.deferredDebugMode = previousMode; Render.deferredDebugSize = { x: 0.0, y: -1.0, z: 1.0, w: 1.0 }; // Reset to default size +Render.deferredDebugSize = { x: -0.5, y: -1.0, z: 1.0, w: 1.0 }; // Reset to default size + function setEngineDeferredDebugSize(eventX) { var scaledX = (2.0 * (eventX / Window.innerWidth) - 1.0).clamp(-1.0, 1.0); Render.deferredDebugSize = { x: scaledX, y: -1.0, z: 1.0, w: 1.0 }; diff --git a/libraries/render-utils/src/AmbientOcclusionEffect.cpp b/libraries/render-utils/src/AmbientOcclusionEffect.cpp index 87b7a0643d..f7269d8206 100644 --- a/libraries/render-utils/src/AmbientOcclusionEffect.cpp +++ b/libraries/render-utils/src/AmbientOcclusionEffect.cpp @@ -178,8 +178,8 @@ void AmbientOcclusionEffect::setLevel(float level) { void AmbientOcclusionEffect::setDithering(bool enabled) { if (enabled != isDitheringEnabled()) { - auto& current = _parametersBuffer.edit()._sampleInfo; - current.w = (float)enabled; + auto& current = _parametersBuffer.edit()._ditheringInfo; + current.x = (float)enabled; } } @@ -331,16 +331,47 @@ void AmbientOcclusionEffect::run(const render::SceneContextPointer& sceneContext updateDeferredTransformBuffer(renderContext); - // Eval the mono projection - mat4 monoProjMat; - args->_viewFrustum->evalProjectionMatrix(monoProjMat); - + // Update the depth info with near and far (same for stereo) setDepthInfo(args->_viewFrustum->getNearClip(), args->_viewFrustum->getFarClip()); - _parametersBuffer.edit()._projection[0] = monoProjMat; - _parametersBuffer.edit()._pixelInfo = args->_viewport; - _parametersBuffer.edit()._ditheringInfo.y += 0.25f; + _parametersBuffer.edit()._pixelInfo = args->_viewport; + //_parametersBuffer.edit()._ditheringInfo.y += 0.25f; + + // Running in stero ? + bool isStereo = args->_context->isStereo(); + if (!isStereo) { + // Eval the mono projection + mat4 monoProjMat; + args->_viewFrustum->evalProjectionMatrix(monoProjMat); + _parametersBuffer.edit()._projection[0] = monoProjMat; + _parametersBuffer.edit()._stereoInfo = glm::vec4(0.0f, args->_viewport.z, 0.0f, 0.0f); + + } else { + + mat4 projMats[2]; + Transform viewTransforms[2]; + + DeferredTransform deferredTransforms[2]; + + + mat4 eyeViews[2]; + args->_context->getStereoProjections(projMats); + args->_context->getStereoViews(eyeViews); + + float halfWidth = 0.5f * sWidth; + + for (int i = 0; i < 2; i++) { + // Compose the mono Eye space to Stereo clip space Projection Matrix + auto sideViewMat = projMats[i] * eyeViews[i]; + + _parametersBuffer.edit()._projection[i] = sideViewMat; + } + + _parametersBuffer.edit()._stereoInfo = glm::vec4(1.0f, (float)(args->_viewport.z >> 1), 0.0f, 1.0f); + + } + auto pyramidPipeline = getPyramidPipeline(); auto occlusionPipeline = getOcclusionPipeline(); auto firstHBlurPipeline = getHBlurPipeline(); diff --git a/libraries/render-utils/src/AmbientOcclusionEffect.h b/libraries/render-utils/src/AmbientOcclusionEffect.h index ce8a0abfe2..fd368682bd 100644 --- a/libraries/render-utils/src/AmbientOcclusionEffect.h +++ b/libraries/render-utils/src/AmbientOcclusionEffect.h @@ -31,7 +31,7 @@ public: float getLevel() const { return _parametersBuffer.get()._radiusInfo.w; } void setDithering(bool enabled); - bool isDitheringEnabled() const { return _parametersBuffer.get()._ditheringInfo.w; } + bool isDitheringEnabled() const { return _parametersBuffer.get()._ditheringInfo.x; } // Number of samples per pixel to evaluate the Obscurance void setNumSamples(int numSamples); @@ -57,7 +57,7 @@ private: // radius info is { R, R^2, 1 / R^6, ObscuranceScale} glm::vec4 _radiusInfo{ 0.5, 0.5 * 0.5, 1.0 / (0.25 * 0.25 * 0.25), 1.0 }; // Dithering info - glm::vec4 _ditheringInfo{ 1.0, 0.0, 0.0, 0.0 }; + glm::vec4 _ditheringInfo{ 0.0, 0.0, 0.0, 0.0 }; // Sampling info glm::vec4 _sampleInfo{ 11.0, 1.0/11.0, 7.0, 1.0 }; // Blurring info @@ -66,6 +66,8 @@ private: glm::vec4 _pixelInfo; // Depth info is { n.f, f - n, -f} glm::vec4 _depthInfo; + // Stereo info + glm::vec4 _stereoInfo{ 0.0 }; // Mono proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space glm::mat4 _projection[2]; diff --git a/libraries/render-utils/src/ssao.slh b/libraries/render-utils/src/ssao.slh index 61bc6cbe89..43106e1d14 100644 --- a/libraries/render-utils/src/ssao.slh +++ b/libraries/render-utils/src/ssao.slh @@ -38,6 +38,7 @@ struct AmbientOcclusionParams { vec4 _blurInfo; vec4 _pixelInfo; vec4 _depthInfo; + vec4 _stereoInfo; mat4 _projection[2]; }; @@ -90,16 +91,20 @@ float getBlurEdgeSharpness() { return params._blurInfo.x; } -float evalZeyeFromZdb(float depth) { - return params._depthInfo.x / (depth * params._depthInfo.y + params._depthInfo.z); +bool isStereo() { + return params._stereoInfo.x > 0.0f; } -vec3 evalEyePositionFromZeye(float Zeye, vec2 texcoord) { - // compute the view space position using the depth - // basically manually pick the proj matrix components to do the inverse - float Xe = (-Zeye * (texcoord.x * 2.0 - 1.0) - Zeye * params._projection[0][2][0] - params._projection[0][3][0]) / params._projection[0][0][0]; - float Ye = (-Zeye * (texcoord.y * 2.0 - 1.0) - Zeye * params._projection[0][2][1] - params._projection[0][3][1]) / params._projection[0][1][1]; - return vec3(Xe, Ye, Zeye); +ivec2 getStereoSideInfo(int xPos) { + return (xPos < params._stereoInfo.y ? ivec2(0, 0) : ivec2(1, int(params._stereoInfo.y)) ); +} + +float getStereoSideWidth() { + return (params._stereoInfo.y); +} + +float evalZeyeFromZdb(float depth) { + return params._depthInfo.x / (depth * params._depthInfo.y + params._depthInfo.z); } vec3 evalEyeNormal(vec3 C) { diff --git a/libraries/render-utils/src/ssao_makeOcclusion.slf b/libraries/render-utils/src/ssao_makeOcclusion.slf index 1da18b8ef8..8d72510f36 100644 --- a/libraries/render-utils/src/ssao_makeOcclusion.slf +++ b/libraries/render-utils/src/ssao_makeOcclusion.slf @@ -20,9 +20,16 @@ const int MAX_MIP_LEVEL = 5; // the depth pyramid texture uniform sampler2D pyramidMap; -vec3 evalEyePosition(vec2 texcoord) { - float Zeye = -texture(pyramidMap, texcoord, 0).x; - return evalEyePositionFromZeye(Zeye, texcoord); +float getZEye(ivec2 pixel) { + return -texelFetch(pyramidMap, pixel, 0).x; +} + +vec3 evalEyePositionFromZeye(ivec2 side, float Zeye, vec2 texcoord) { + // compute the view space position using the depth + // basically manually pick the proj matrix components to do the inverse + float Xe = (-Zeye * (texcoord.x * 2.0 - 1.0) - Zeye * params._projection[side.x][2][0] - params._projection[side.x][3][0]) / params._projection[side.x][0][0]; + float Ye = (-Zeye * (texcoord.y * 2.0 - 1.0) - Zeye * params._projection[side.x][2][1] - params._projection[side.x][3][1]) / params._projection[side.x][1][1]; + return vec3(Xe, Ye, Zeye); } in vec2 varTexCoord0; @@ -44,37 +51,36 @@ vec2 tapLocation(int sampleNumber, float spinAngle, out float ssR){ return vec2(cos(angle), sin(angle)); } -vec3 getOffsetPosition(ivec2 ssC, vec2 unitOffset, float ssR) { +vec3 getOffsetPosition(ivec2 side, ivec2 ssC, vec2 unitOffset, float ssR) { // Derivation: // mipLevel = floor(log(ssR / MAX_OFFSET)); int mipLevel = clamp(findMSB(int(ssR)) - LOG_MAX_OFFSET, 0, MAX_MIP_LEVEL); ivec2 ssP = ivec2(ssR * unitOffset) + ssC; + ivec2 ssPFull = ivec2(ssP.x + side.y, ssP.y); vec3 P; // We need to divide by 2^mipLevel to read the appropriately scaled coordinate from a MIP-map. // Manually clamp to the texture size because texelFetch bypasses the texture unit - ivec2 mipP = clamp(ssP >> mipLevel, ivec2(0), textureSize(pyramidMap, mipLevel) - ivec2(1)); + ivec2 mipP = clamp(ssPFull >> mipLevel, ivec2(0), textureSize(pyramidMap, mipLevel) - ivec2(1)); P.z = -texelFetch(pyramidMap, mipP, mipLevel).r; -// P.z = -texelFetch(pyramidMap, ssP, 0).r; +// P.z = -texelFetch(pyramidMap, ssPFull, 0).r; // Offset to pixel center - //P = reconstructCSPosition(vec2(ssP) + vec2(0.5), P.z); - - vec2 tapUV = (vec2(ssP) + vec2(0.5)) / getWidthHeight(); - P = evalEyePositionFromZeye(P.z, tapUV); + vec2 tapUV = (vec2(ssP) + vec2(0.5)) / getStereoSideWidth(); + P = evalEyePositionFromZeye(side, P.z, tapUV); return P; } -float sampleAO(in ivec2 ssC, in vec3 C, in vec3 n_C, in float ssDiskRadius, in int tapIndex, in float randomPatternRotationAngle) { +float sampleAO(in ivec2 side, in ivec2 ssC, in vec3 C, in vec3 n_C, in float ssDiskRadius, in int tapIndex, in float randomPatternRotationAngle) { // Offset on the unit disk, spun for this pixel float ssR; vec2 unitOffset = tapLocation(tapIndex, randomPatternRotationAngle, ssR); ssR *= ssDiskRadius; // The occluding point in camera space - vec3 Q = getOffsetPosition(ssC, unitOffset, ssR); + vec3 Q = getOffsetPosition(side, ssC, unitOffset, ssR); vec3 v = Q - C; float vv = dot(v, v); @@ -84,11 +90,7 @@ float sampleAO(in ivec2 ssC, in vec3 C, in vec3 n_C, in float ssDiskRadius, in i const float epsilon = 0.01; float radius2 = getRadius2(); - // A: From the HPG12 paper - // Note large epsilon to avoid overdarkening within cracks - // return float(vv < radius2) * max((vn - bias) / (epsilon + vv), 0.0) * radius2 * 0.6; - - // B: Smoother transition to zero (lowers contrast, smoothing out corners). [Recommended] + // Fall off function as recommended in SAO paper float f = max(radius2 - vv, 0.0); return f * f * f * max((vn - bias) / (epsilon + vv), 0.0); } @@ -105,19 +107,31 @@ void main(void) { // Pixel being shaded ivec2 ssC = ivec2(gl_FragCoord.xy); - vec3 Cp = evalEyePosition(varTexCoord0); + // Fetch the z under the pixel (stereo or not) + float Zeye = getZEye(ssC); - float randomPatternRotationAngle = getAngleDithering(ssC); + // Stereo side info + ivec2 side = getStereoSideInfo(ssC.x); + // From now on, ssC is the pixel pos in the side + ssC.x -= side.y; + vec2 fragPos = (vec2(ssC) + 0.5) / getStereoSideWidth(); + + // The position and normal of the pixel fragment in Eye space + vec3 Cp = evalEyePositionFromZeye(side, Zeye, fragPos); vec3 Cn = evalEyeNormal(Cp); // Choose the screen-space sample radius // proportional to the projected area of the sphere float ssDiskRadius = -getProjScale() * getRadius() / Cp.z; + // Let's make noise + float randomPatternRotationAngle = getAngleDithering(ssC); + + // Accumulate the Obscurance for each samples float sum = 0.0; for (int i = 0; i < getNumSamples(); ++i) { - sum += sampleAO(ssC, Cp, Cn, ssDiskRadius, i, randomPatternRotationAngle); + sum += sampleAO(side, ssC, Cp, Cn, ssDiskRadius, i, randomPatternRotationAngle); } float A = max(0.0, 1.0 - sum * getObscuranceScaling() * 5.0 * getInvNumSamples()); diff --git a/libraries/render-utils/src/ssao_makePyramid.slf b/libraries/render-utils/src/ssao_makePyramid.slf index 577c0fd232..70d46fb432 100644 --- a/libraries/render-utils/src/ssao_makePyramid.slf +++ b/libraries/render-utils/src/ssao_makePyramid.slf @@ -12,14 +12,12 @@ <@include ssao.slh@> <$declareAmbientOcclusion()$> -// the depth texture uniform sampler2D depthMap; -in vec2 varTexCoord0; out vec4 outFragColor; void main(void) { - float Zdb = texture(depthMap, varTexCoord0).x; + float Zdb = texelFetch(depthMap, ivec2(gl_FragCoord.xy), 0).x; float Zeye = -evalZeyeFromZdb(Zdb); outFragColor = vec4(Zeye, 0.0, 0.0, 1.0); }