mirror of
https://github.com/overte-org/overte.git
synced 2025-08-09 00:17:25 +02:00
FIxed the stereo rendering
This commit is contained in:
parent
577666da6e
commit
ae18bb8ef7
6 changed files with 95 additions and 43 deletions
|
@ -52,11 +52,13 @@ var overlaysCounter = new CounterWidget(panel, "Overlays", Render.overlay3D);
|
||||||
|
|
||||||
var resizing = false;
|
var resizing = false;
|
||||||
var previousMode = Settings.getValue(SETTINGS_KEY, -1);
|
var previousMode = Settings.getValue(SETTINGS_KEY, -1);
|
||||||
previousMode = 1; // FIXME: just for debug purpose
|
previousMode = 8; // FIXME: just for debug purpose
|
||||||
Menu.addActionGroup(MENU, ACTIONS, ACTIONS[previousMode + 1]);
|
Menu.addActionGroup(MENU, ACTIONS, ACTIONS[previousMode + 1]);
|
||||||
Render.deferredDebugMode = previousMode;
|
Render.deferredDebugMode = previousMode;
|
||||||
Render.deferredDebugSize = { x: 0.0, y: -1.0, z: 1.0, w: 1.0 }; // Reset to default size
|
Render.deferredDebugSize = { x: 0.0, y: -1.0, z: 1.0, w: 1.0 }; // Reset to default size
|
||||||
|
|
||||||
|
Render.deferredDebugSize = { x: -0.5, y: -1.0, z: 1.0, w: 1.0 }; // Reset to default size
|
||||||
|
|
||||||
function setEngineDeferredDebugSize(eventX) {
|
function setEngineDeferredDebugSize(eventX) {
|
||||||
var scaledX = (2.0 * (eventX / Window.innerWidth) - 1.0).clamp(-1.0, 1.0);
|
var scaledX = (2.0 * (eventX / Window.innerWidth) - 1.0).clamp(-1.0, 1.0);
|
||||||
Render.deferredDebugSize = { x: scaledX, y: -1.0, z: 1.0, w: 1.0 };
|
Render.deferredDebugSize = { x: scaledX, y: -1.0, z: 1.0, w: 1.0 };
|
||||||
|
|
|
@ -178,8 +178,8 @@ void AmbientOcclusionEffect::setLevel(float level) {
|
||||||
|
|
||||||
void AmbientOcclusionEffect::setDithering(bool enabled) {
|
void AmbientOcclusionEffect::setDithering(bool enabled) {
|
||||||
if (enabled != isDitheringEnabled()) {
|
if (enabled != isDitheringEnabled()) {
|
||||||
auto& current = _parametersBuffer.edit<Parameters>()._sampleInfo;
|
auto& current = _parametersBuffer.edit<Parameters>()._ditheringInfo;
|
||||||
current.w = (float)enabled;
|
current.x = (float)enabled;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -331,15 +331,46 @@ void AmbientOcclusionEffect::run(const render::SceneContextPointer& sceneContext
|
||||||
|
|
||||||
updateDeferredTransformBuffer(renderContext);
|
updateDeferredTransformBuffer(renderContext);
|
||||||
|
|
||||||
|
// Update the depth info with near and far (same for stereo)
|
||||||
|
setDepthInfo(args->_viewFrustum->getNearClip(), args->_viewFrustum->getFarClip());
|
||||||
|
|
||||||
|
|
||||||
|
_parametersBuffer.edit<Parameters>()._pixelInfo = args->_viewport;
|
||||||
|
//_parametersBuffer.edit<Parameters>()._ditheringInfo.y += 0.25f;
|
||||||
|
|
||||||
|
// Running in stero ?
|
||||||
|
bool isStereo = args->_context->isStereo();
|
||||||
|
if (!isStereo) {
|
||||||
// Eval the mono projection
|
// Eval the mono projection
|
||||||
mat4 monoProjMat;
|
mat4 monoProjMat;
|
||||||
args->_viewFrustum->evalProjectionMatrix(monoProjMat);
|
args->_viewFrustum->evalProjectionMatrix(monoProjMat);
|
||||||
|
|
||||||
setDepthInfo(args->_viewFrustum->getNearClip(), args->_viewFrustum->getFarClip());
|
|
||||||
_parametersBuffer.edit<Parameters>()._projection[0] = monoProjMat;
|
_parametersBuffer.edit<Parameters>()._projection[0] = monoProjMat;
|
||||||
_parametersBuffer.edit<Parameters>()._pixelInfo = args->_viewport;
|
_parametersBuffer.edit<Parameters>()._stereoInfo = glm::vec4(0.0f, args->_viewport.z, 0.0f, 0.0f);
|
||||||
_parametersBuffer.edit<Parameters>()._ditheringInfo.y += 0.25f;
|
|
||||||
|
|
||||||
|
} else {
|
||||||
|
|
||||||
|
mat4 projMats[2];
|
||||||
|
Transform viewTransforms[2];
|
||||||
|
|
||||||
|
DeferredTransform deferredTransforms[2];
|
||||||
|
|
||||||
|
|
||||||
|
mat4 eyeViews[2];
|
||||||
|
args->_context->getStereoProjections(projMats);
|
||||||
|
args->_context->getStereoViews(eyeViews);
|
||||||
|
|
||||||
|
float halfWidth = 0.5f * sWidth;
|
||||||
|
|
||||||
|
for (int i = 0; i < 2; i++) {
|
||||||
|
// Compose the mono Eye space to Stereo clip space Projection Matrix
|
||||||
|
auto sideViewMat = projMats[i] * eyeViews[i];
|
||||||
|
|
||||||
|
_parametersBuffer.edit<Parameters>()._projection[i] = sideViewMat;
|
||||||
|
}
|
||||||
|
|
||||||
|
_parametersBuffer.edit<Parameters>()._stereoInfo = glm::vec4(1.0f, (float)(args->_viewport.z >> 1), 0.0f, 1.0f);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
auto pyramidPipeline = getPyramidPipeline();
|
auto pyramidPipeline = getPyramidPipeline();
|
||||||
auto occlusionPipeline = getOcclusionPipeline();
|
auto occlusionPipeline = getOcclusionPipeline();
|
||||||
|
|
|
@ -31,7 +31,7 @@ public:
|
||||||
float getLevel() const { return _parametersBuffer.get<Parameters>()._radiusInfo.w; }
|
float getLevel() const { return _parametersBuffer.get<Parameters>()._radiusInfo.w; }
|
||||||
|
|
||||||
void setDithering(bool enabled);
|
void setDithering(bool enabled);
|
||||||
bool isDitheringEnabled() const { return _parametersBuffer.get<Parameters>()._ditheringInfo.w; }
|
bool isDitheringEnabled() const { return _parametersBuffer.get<Parameters>()._ditheringInfo.x; }
|
||||||
|
|
||||||
// Number of samples per pixel to evaluate the Obscurance
|
// Number of samples per pixel to evaluate the Obscurance
|
||||||
void setNumSamples(int numSamples);
|
void setNumSamples(int numSamples);
|
||||||
|
@ -57,7 +57,7 @@ private:
|
||||||
// radius info is { R, R^2, 1 / R^6, ObscuranceScale}
|
// radius info is { R, R^2, 1 / R^6, ObscuranceScale}
|
||||||
glm::vec4 _radiusInfo{ 0.5, 0.5 * 0.5, 1.0 / (0.25 * 0.25 * 0.25), 1.0 };
|
glm::vec4 _radiusInfo{ 0.5, 0.5 * 0.5, 1.0 / (0.25 * 0.25 * 0.25), 1.0 };
|
||||||
// Dithering info
|
// Dithering info
|
||||||
glm::vec4 _ditheringInfo{ 1.0, 0.0, 0.0, 0.0 };
|
glm::vec4 _ditheringInfo{ 0.0, 0.0, 0.0, 0.0 };
|
||||||
// Sampling info
|
// Sampling info
|
||||||
glm::vec4 _sampleInfo{ 11.0, 1.0/11.0, 7.0, 1.0 };
|
glm::vec4 _sampleInfo{ 11.0, 1.0/11.0, 7.0, 1.0 };
|
||||||
// Blurring info
|
// Blurring info
|
||||||
|
@ -66,6 +66,8 @@ private:
|
||||||
glm::vec4 _pixelInfo;
|
glm::vec4 _pixelInfo;
|
||||||
// Depth info is { n.f, f - n, -f}
|
// Depth info is { n.f, f - n, -f}
|
||||||
glm::vec4 _depthInfo;
|
glm::vec4 _depthInfo;
|
||||||
|
// Stereo info
|
||||||
|
glm::vec4 _stereoInfo{ 0.0 };
|
||||||
// Mono proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space
|
// Mono proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space
|
||||||
glm::mat4 _projection[2];
|
glm::mat4 _projection[2];
|
||||||
|
|
||||||
|
|
|
@ -38,6 +38,7 @@ struct AmbientOcclusionParams {
|
||||||
vec4 _blurInfo;
|
vec4 _blurInfo;
|
||||||
vec4 _pixelInfo;
|
vec4 _pixelInfo;
|
||||||
vec4 _depthInfo;
|
vec4 _depthInfo;
|
||||||
|
vec4 _stereoInfo;
|
||||||
mat4 _projection[2];
|
mat4 _projection[2];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -90,16 +91,20 @@ float getBlurEdgeSharpness() {
|
||||||
return params._blurInfo.x;
|
return params._blurInfo.x;
|
||||||
}
|
}
|
||||||
|
|
||||||
float evalZeyeFromZdb(float depth) {
|
bool isStereo() {
|
||||||
return params._depthInfo.x / (depth * params._depthInfo.y + params._depthInfo.z);
|
return params._stereoInfo.x > 0.0f;
|
||||||
}
|
}
|
||||||
|
|
||||||
vec3 evalEyePositionFromZeye(float Zeye, vec2 texcoord) {
|
ivec2 getStereoSideInfo(int xPos) {
|
||||||
// compute the view space position using the depth
|
return (xPos < params._stereoInfo.y ? ivec2(0, 0) : ivec2(1, int(params._stereoInfo.y)) );
|
||||||
// basically manually pick the proj matrix components to do the inverse
|
}
|
||||||
float Xe = (-Zeye * (texcoord.x * 2.0 - 1.0) - Zeye * params._projection[0][2][0] - params._projection[0][3][0]) / params._projection[0][0][0];
|
|
||||||
float Ye = (-Zeye * (texcoord.y * 2.0 - 1.0) - Zeye * params._projection[0][2][1] - params._projection[0][3][1]) / params._projection[0][1][1];
|
float getStereoSideWidth() {
|
||||||
return vec3(Xe, Ye, Zeye);
|
return (params._stereoInfo.y);
|
||||||
|
}
|
||||||
|
|
||||||
|
float evalZeyeFromZdb(float depth) {
|
||||||
|
return params._depthInfo.x / (depth * params._depthInfo.y + params._depthInfo.z);
|
||||||
}
|
}
|
||||||
|
|
||||||
vec3 evalEyeNormal(vec3 C) {
|
vec3 evalEyeNormal(vec3 C) {
|
||||||
|
|
|
@ -20,9 +20,16 @@ const int MAX_MIP_LEVEL = 5;
|
||||||
// the depth pyramid texture
|
// the depth pyramid texture
|
||||||
uniform sampler2D pyramidMap;
|
uniform sampler2D pyramidMap;
|
||||||
|
|
||||||
vec3 evalEyePosition(vec2 texcoord) {
|
float getZEye(ivec2 pixel) {
|
||||||
float Zeye = -texture(pyramidMap, texcoord, 0).x;
|
return -texelFetch(pyramidMap, pixel, 0).x;
|
||||||
return evalEyePositionFromZeye(Zeye, texcoord);
|
}
|
||||||
|
|
||||||
|
vec3 evalEyePositionFromZeye(ivec2 side, float Zeye, vec2 texcoord) {
|
||||||
|
// compute the view space position using the depth
|
||||||
|
// basically manually pick the proj matrix components to do the inverse
|
||||||
|
float Xe = (-Zeye * (texcoord.x * 2.0 - 1.0) - Zeye * params._projection[side.x][2][0] - params._projection[side.x][3][0]) / params._projection[side.x][0][0];
|
||||||
|
float Ye = (-Zeye * (texcoord.y * 2.0 - 1.0) - Zeye * params._projection[side.x][2][1] - params._projection[side.x][3][1]) / params._projection[side.x][1][1];
|
||||||
|
return vec3(Xe, Ye, Zeye);
|
||||||
}
|
}
|
||||||
|
|
||||||
in vec2 varTexCoord0;
|
in vec2 varTexCoord0;
|
||||||
|
@ -44,37 +51,36 @@ vec2 tapLocation(int sampleNumber, float spinAngle, out float ssR){
|
||||||
return vec2(cos(angle), sin(angle));
|
return vec2(cos(angle), sin(angle));
|
||||||
}
|
}
|
||||||
|
|
||||||
vec3 getOffsetPosition(ivec2 ssC, vec2 unitOffset, float ssR) {
|
vec3 getOffsetPosition(ivec2 side, ivec2 ssC, vec2 unitOffset, float ssR) {
|
||||||
// Derivation:
|
// Derivation:
|
||||||
// mipLevel = floor(log(ssR / MAX_OFFSET));
|
// mipLevel = floor(log(ssR / MAX_OFFSET));
|
||||||
int mipLevel = clamp(findMSB(int(ssR)) - LOG_MAX_OFFSET, 0, MAX_MIP_LEVEL);
|
int mipLevel = clamp(findMSB(int(ssR)) - LOG_MAX_OFFSET, 0, MAX_MIP_LEVEL);
|
||||||
|
|
||||||
ivec2 ssP = ivec2(ssR * unitOffset) + ssC;
|
ivec2 ssP = ivec2(ssR * unitOffset) + ssC;
|
||||||
|
ivec2 ssPFull = ivec2(ssP.x + side.y, ssP.y);
|
||||||
|
|
||||||
vec3 P;
|
vec3 P;
|
||||||
|
|
||||||
// We need to divide by 2^mipLevel to read the appropriately scaled coordinate from a MIP-map.
|
// We need to divide by 2^mipLevel to read the appropriately scaled coordinate from a MIP-map.
|
||||||
// Manually clamp to the texture size because texelFetch bypasses the texture unit
|
// Manually clamp to the texture size because texelFetch bypasses the texture unit
|
||||||
ivec2 mipP = clamp(ssP >> mipLevel, ivec2(0), textureSize(pyramidMap, mipLevel) - ivec2(1));
|
ivec2 mipP = clamp(ssPFull >> mipLevel, ivec2(0), textureSize(pyramidMap, mipLevel) - ivec2(1));
|
||||||
P.z = -texelFetch(pyramidMap, mipP, mipLevel).r;
|
P.z = -texelFetch(pyramidMap, mipP, mipLevel).r;
|
||||||
// P.z = -texelFetch(pyramidMap, ssP, 0).r;
|
// P.z = -texelFetch(pyramidMap, ssPFull, 0).r;
|
||||||
|
|
||||||
// Offset to pixel center
|
// Offset to pixel center
|
||||||
//P = reconstructCSPosition(vec2(ssP) + vec2(0.5), P.z);
|
vec2 tapUV = (vec2(ssP) + vec2(0.5)) / getStereoSideWidth();
|
||||||
|
P = evalEyePositionFromZeye(side, P.z, tapUV);
|
||||||
vec2 tapUV = (vec2(ssP) + vec2(0.5)) / getWidthHeight();
|
|
||||||
P = evalEyePositionFromZeye(P.z, tapUV);
|
|
||||||
return P;
|
return P;
|
||||||
}
|
}
|
||||||
|
|
||||||
float sampleAO(in ivec2 ssC, in vec3 C, in vec3 n_C, in float ssDiskRadius, in int tapIndex, in float randomPatternRotationAngle) {
|
float sampleAO(in ivec2 side, in ivec2 ssC, in vec3 C, in vec3 n_C, in float ssDiskRadius, in int tapIndex, in float randomPatternRotationAngle) {
|
||||||
// Offset on the unit disk, spun for this pixel
|
// Offset on the unit disk, spun for this pixel
|
||||||
float ssR;
|
float ssR;
|
||||||
vec2 unitOffset = tapLocation(tapIndex, randomPatternRotationAngle, ssR);
|
vec2 unitOffset = tapLocation(tapIndex, randomPatternRotationAngle, ssR);
|
||||||
ssR *= ssDiskRadius;
|
ssR *= ssDiskRadius;
|
||||||
|
|
||||||
// The occluding point in camera space
|
// The occluding point in camera space
|
||||||
vec3 Q = getOffsetPosition(ssC, unitOffset, ssR);
|
vec3 Q = getOffsetPosition(side, ssC, unitOffset, ssR);
|
||||||
|
|
||||||
vec3 v = Q - C;
|
vec3 v = Q - C;
|
||||||
float vv = dot(v, v);
|
float vv = dot(v, v);
|
||||||
|
@ -84,11 +90,7 @@ float sampleAO(in ivec2 ssC, in vec3 C, in vec3 n_C, in float ssDiskRadius, in i
|
||||||
const float epsilon = 0.01;
|
const float epsilon = 0.01;
|
||||||
float radius2 = getRadius2();
|
float radius2 = getRadius2();
|
||||||
|
|
||||||
// A: From the HPG12 paper
|
// Fall off function as recommended in SAO paper
|
||||||
// Note large epsilon to avoid overdarkening within cracks
|
|
||||||
// return float(vv < radius2) * max((vn - bias) / (epsilon + vv), 0.0) * radius2 * 0.6;
|
|
||||||
|
|
||||||
// B: Smoother transition to zero (lowers contrast, smoothing out corners). [Recommended]
|
|
||||||
float f = max(radius2 - vv, 0.0);
|
float f = max(radius2 - vv, 0.0);
|
||||||
return f * f * f * max((vn - bias) / (epsilon + vv), 0.0);
|
return f * f * f * max((vn - bias) / (epsilon + vv), 0.0);
|
||||||
}
|
}
|
||||||
|
@ -105,19 +107,31 @@ void main(void) {
|
||||||
// Pixel being shaded
|
// Pixel being shaded
|
||||||
ivec2 ssC = ivec2(gl_FragCoord.xy);
|
ivec2 ssC = ivec2(gl_FragCoord.xy);
|
||||||
|
|
||||||
vec3 Cp = evalEyePosition(varTexCoord0);
|
// Fetch the z under the pixel (stereo or not)
|
||||||
|
float Zeye = getZEye(ssC);
|
||||||
|
|
||||||
float randomPatternRotationAngle = getAngleDithering(ssC);
|
// Stereo side info
|
||||||
|
ivec2 side = getStereoSideInfo(ssC.x);
|
||||||
|
|
||||||
|
// From now on, ssC is the pixel pos in the side
|
||||||
|
ssC.x -= side.y;
|
||||||
|
vec2 fragPos = (vec2(ssC) + 0.5) / getStereoSideWidth();
|
||||||
|
|
||||||
|
// The position and normal of the pixel fragment in Eye space
|
||||||
|
vec3 Cp = evalEyePositionFromZeye(side, Zeye, fragPos);
|
||||||
vec3 Cn = evalEyeNormal(Cp);
|
vec3 Cn = evalEyeNormal(Cp);
|
||||||
|
|
||||||
// Choose the screen-space sample radius
|
// Choose the screen-space sample radius
|
||||||
// proportional to the projected area of the sphere
|
// proportional to the projected area of the sphere
|
||||||
float ssDiskRadius = -getProjScale() * getRadius() / Cp.z;
|
float ssDiskRadius = -getProjScale() * getRadius() / Cp.z;
|
||||||
|
|
||||||
|
// Let's make noise
|
||||||
|
float randomPatternRotationAngle = getAngleDithering(ssC);
|
||||||
|
|
||||||
|
// Accumulate the Obscurance for each samples
|
||||||
float sum = 0.0;
|
float sum = 0.0;
|
||||||
for (int i = 0; i < getNumSamples(); ++i) {
|
for (int i = 0; i < getNumSamples(); ++i) {
|
||||||
sum += sampleAO(ssC, Cp, Cn, ssDiskRadius, i, randomPatternRotationAngle);
|
sum += sampleAO(side, ssC, Cp, Cn, ssDiskRadius, i, randomPatternRotationAngle);
|
||||||
}
|
}
|
||||||
|
|
||||||
float A = max(0.0, 1.0 - sum * getObscuranceScaling() * 5.0 * getInvNumSamples());
|
float A = max(0.0, 1.0 - sum * getObscuranceScaling() * 5.0 * getInvNumSamples());
|
||||||
|
|
|
@ -12,14 +12,12 @@
|
||||||
<@include ssao.slh@>
|
<@include ssao.slh@>
|
||||||
<$declareAmbientOcclusion()$>
|
<$declareAmbientOcclusion()$>
|
||||||
|
|
||||||
// the depth texture
|
|
||||||
uniform sampler2D depthMap;
|
uniform sampler2D depthMap;
|
||||||
|
|
||||||
in vec2 varTexCoord0;
|
|
||||||
out vec4 outFragColor;
|
out vec4 outFragColor;
|
||||||
|
|
||||||
void main(void) {
|
void main(void) {
|
||||||
float Zdb = texture(depthMap, varTexCoord0).x;
|
float Zdb = texelFetch(depthMap, ivec2(gl_FragCoord.xy), 0).x;
|
||||||
float Zeye = -evalZeyeFromZdb(Zdb);
|
float Zeye = -evalZeyeFromZdb(Zdb);
|
||||||
outFragColor = vec4(Zeye, 0.0, 0.0, 1.0);
|
outFragColor = vec4(Zeye, 0.0, 0.0, 1.0);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue