mirror of
https://github.com/lubosz/overte.git
synced 2025-04-27 07:15:30 +02:00
Using proper inverse projection matrix in velocity buffer computation to reproject to world pos
This commit is contained in:
parent
5af4337b0e
commit
b7b478f640
9 changed files with 29 additions and 11 deletions
libraries
display-plugins/src/display-plugins/stereo
plugins/src/plugins
render-utils/src
|
@ -101,3 +101,4 @@ void StereoDisplayPlugin::internalDeactivate() {
|
|||
float StereoDisplayPlugin::getRecommendedAspectRatio() const {
|
||||
return aspect(Parent::getRecommendedRenderSize());
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ public:
|
|||
// the IPD at the Application level, the way we now allow with HMDs.
|
||||
// If that becomes an issue then we'll need to break up the functionality similar
|
||||
// to the HMD plugins.
|
||||
// virtual glm::mat4 getEyeToHeadTransform(Eye eye) const override;
|
||||
//virtual glm::mat4 getEyeToHeadTransform(Eye eye) const override;
|
||||
|
||||
protected:
|
||||
virtual bool internalActivate() override;
|
||||
|
|
|
@ -42,4 +42,9 @@ std::function<void(gpu::Batch&, const gpu::TexturePointer&, bool mirror)> Displa
|
|||
hudOperator = _hudOperator;
|
||||
}
|
||||
return hudOperator;
|
||||
}
|
||||
}
|
||||
|
||||
glm::mat4 HmdDisplay::getEyeToHeadTransform(Eye eye) const {
|
||||
static const glm::mat4 xform;
|
||||
return xform;
|
||||
}
|
||||
|
|
|
@ -93,9 +93,7 @@ class HmdDisplay : public StereoDisplay {
|
|||
public:
|
||||
// HMD specific methods
|
||||
// TODO move these into another class?
|
||||
virtual glm::mat4 getEyeToHeadTransform(Eye eye) const {
|
||||
static const glm::mat4 transform; return transform;
|
||||
}
|
||||
virtual glm::mat4 getEyeToHeadTransform(Eye eye) const;
|
||||
|
||||
// returns a copy of the most recent head pose, computed via updateHeadPose
|
||||
virtual glm::mat4 getHeadPose() const {
|
||||
|
|
|
@ -58,7 +58,7 @@ class JitterSample {
|
|||
public:
|
||||
|
||||
enum {
|
||||
SEQUENCE_LENGTH = 16
|
||||
SEQUENCE_LENGTH = 128
|
||||
};
|
||||
|
||||
using Config = JitterSampleConfig;
|
||||
|
@ -106,12 +106,12 @@ class AntialiasingConfig : public render::Job::Config {
|
|||
public:
|
||||
AntialiasingConfig() : render::Job::Config(true) {}
|
||||
|
||||
float blend{ 0.1f };
|
||||
float blend{ 0.075f };
|
||||
|
||||
|
||||
bool constrainColor{ true };
|
||||
bool covarianceClipColor{ true };
|
||||
float covarianceGamma{ 1.0f };
|
||||
float covarianceGamma{ 0.9f };
|
||||
bool clipExactColor{ false };
|
||||
bool feedbackColor{ false };
|
||||
|
||||
|
|
|
@ -48,6 +48,7 @@ void DeferredFrameTransform::update(RenderArgs* args) {
|
|||
frameTransformBuffer.projection[0] = frameTransformBuffer.projectionMono;
|
||||
frameTransformBuffer.stereoInfo = glm::vec4(0.0f, (float)args->_viewport.z, 0.0f, 0.0f);
|
||||
frameTransformBuffer.invpixelInfo = glm::vec4(1.0f / args->_viewport.z, 1.0f / args->_viewport.w, 0.0f, 0.0f);
|
||||
frameTransformBuffer.invProjection[0] = glm::inverse(frameTransformBuffer.projection[0]);
|
||||
} else {
|
||||
|
||||
mat4 projMats[2];
|
||||
|
@ -59,6 +60,7 @@ void DeferredFrameTransform::update(RenderArgs* args) {
|
|||
// Compose the mono Eye space to Stereo clip space Projection Matrix
|
||||
auto sideViewMat = projMats[i] * eyeViews[i];
|
||||
frameTransformBuffer.projection[i] = sideViewMat;
|
||||
frameTransformBuffer.invProjection[i] = glm::inverse(sideViewMat);
|
||||
}
|
||||
|
||||
frameTransformBuffer.stereoInfo = glm::vec4(1.0f, (float)(args->_viewport.z >> 1), 0.0f, 1.0f);
|
||||
|
|
|
@ -45,6 +45,8 @@ protected:
|
|||
glm::vec4 stereoInfo{ 0.0 };
|
||||
// Mono proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space
|
||||
glm::mat4 projection[2];
|
||||
// Inverse proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space
|
||||
glm::mat4 invProjection[2];
|
||||
// THe mono projection for sure
|
||||
glm::mat4 projectionMono;
|
||||
// Inv View matrix from eye space (mono) to world space
|
||||
|
|
|
@ -31,6 +31,7 @@ struct DeferredFrameTransform {
|
|||
vec4 _depthInfo;
|
||||
vec4 _stereoInfo;
|
||||
mat4 _projection[2];
|
||||
mat4 _invProjection[2];
|
||||
mat4 _projectionMono;
|
||||
mat4 _viewInverse;
|
||||
mat4 _view;
|
||||
|
@ -128,6 +129,15 @@ vec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {
|
|||
return vec3(Xe, Ye, Zeye);
|
||||
}
|
||||
|
||||
vec3 evalEyePositionFromZdb(int side, float Zdb, vec2 texcoord) {
|
||||
// compute the view space position using the depth
|
||||
vec3 clipPos;
|
||||
clipPos.xy = texcoord.xy * 2.0 - 1.0;
|
||||
clipPos.z = Zdb * 2.0 - 1.0;
|
||||
vec4 eyePos = frameTransform._invProjection[side] * vec4(clipPos.xyz, 1.0);
|
||||
return eyePos.xyz / eyePos.w;
|
||||
}
|
||||
|
||||
ivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {
|
||||
ivec2 fragPos = ivec2(glFragCoord.xy);
|
||||
|
||||
|
|
|
@ -26,14 +26,14 @@ void main(void) {
|
|||
ivec2 framePixelPos = getPixelPosTexcoordPosAndSide(gl_FragCoord.xy, pixelPos, texcoordPos, stereoSide);
|
||||
|
||||
float Zdb = texelFetch(depthMap, ivec2(gl_FragCoord.xy), 0).x;
|
||||
float Zeye = evalZeyeFromZdb(Zdb);
|
||||
/* if (Zeye <= -getPosLinearDepthFar()) {
|
||||
/* float Zeye = evalZeyeFromZdb(Zdb);
|
||||
if (Zeye <= -getPosLinearDepthFar()) {
|
||||
outFragColor = vec4(0.5, 0.5, 0.0, 0.0);
|
||||
return;
|
||||
}*/
|
||||
|
||||
// The position of the pixel fragment in Eye space then in world space
|
||||
vec3 eyePos = evalEyePositionFromZeye(stereoSide.x, Zeye, texcoordPos);
|
||||
vec3 eyePos = evalEyePositionFromZdb(stereoSide.x, Zdb, texcoordPos);
|
||||
vec3 worldPos = (frameTransform._viewInverse * cameraCorrection._correction * vec4(eyePos, 1.0)).xyz;
|
||||
|
||||
vec3 prevEyePos = (cameraCorrection._prevCorrectionInverse * frameTransform._prevView * vec4(worldPos, 1.0)).xyz;
|
||||
|
|
Loading…
Reference in a new issue