mirror of
https://github.com/HifiExperiments/overte.git
synced 2025-05-29 18:41:00 +02:00
Jitter is now set through a batch command
This commit is contained in:
parent
57ff1c54e1
commit
c6d598cc88
17 changed files with 124 additions and 103 deletions
|
@ -44,8 +44,9 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
|
||||||
|
|
||||||
(&::gpu::gl::GLBackend::do_setModelTransform),
|
(&::gpu::gl::GLBackend::do_setModelTransform),
|
||||||
(&::gpu::gl::GLBackend::do_setViewTransform),
|
(&::gpu::gl::GLBackend::do_setViewTransform),
|
||||||
(&::gpu::gl::GLBackend::do_setProjectionTransform),
|
(&::gpu::gl::GLBackend::do_setProjectionTransform),
|
||||||
(&::gpu::gl::GLBackend::do_setViewportTransform),
|
(&::gpu::gl::GLBackend::do_setProjectionJitter),
|
||||||
|
(&::gpu::gl::GLBackend::do_setViewportTransform),
|
||||||
(&::gpu::gl::GLBackend::do_setDepthRangeTransform),
|
(&::gpu::gl::GLBackend::do_setDepthRangeTransform),
|
||||||
|
|
||||||
(&::gpu::gl::GLBackend::do_setPipeline),
|
(&::gpu::gl::GLBackend::do_setPipeline),
|
||||||
|
@ -166,7 +167,18 @@ void GLBackend::renderPassTransfer(const Batch& batch) {
|
||||||
case Batch::COMMAND_drawIndexedInstanced:
|
case Batch::COMMAND_drawIndexedInstanced:
|
||||||
case Batch::COMMAND_multiDrawIndirect:
|
case Batch::COMMAND_multiDrawIndirect:
|
||||||
case Batch::COMMAND_multiDrawIndexedIndirect:
|
case Batch::COMMAND_multiDrawIndexedIndirect:
|
||||||
_transform.preUpdate(_commandIndex, _stereo);
|
{
|
||||||
|
Vec2u outputSize{ 1,1 };
|
||||||
|
|
||||||
|
if (_output._framebuffer) {
|
||||||
|
outputSize.x = _output._framebuffer->getWidth();
|
||||||
|
outputSize.y = _output._framebuffer->getHeight();
|
||||||
|
} else if (glm::dot(_transform._projectionJitter, _transform._projectionJitter)>0.0f) {
|
||||||
|
qCWarning(gpugllogging) << "Jittering needs to have a frame buffer to be set";
|
||||||
|
}
|
||||||
|
|
||||||
|
_transform.preUpdate(_commandIndex, _stereo, outputSize);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case Batch::COMMAND_disableContextStereo:
|
case Batch::COMMAND_disableContextStereo:
|
||||||
|
@ -179,8 +191,10 @@ void GLBackend::renderPassTransfer(const Batch& batch) {
|
||||||
|
|
||||||
case Batch::COMMAND_setViewportTransform:
|
case Batch::COMMAND_setViewportTransform:
|
||||||
case Batch::COMMAND_setViewTransform:
|
case Batch::COMMAND_setViewTransform:
|
||||||
case Batch::COMMAND_setProjectionTransform: {
|
case Batch::COMMAND_setProjectionTransform:
|
||||||
CommandCall call = _commandCalls[(*command)];
|
case Batch::COMMAND_setProjectionJitter:
|
||||||
|
{
|
||||||
|
CommandCall call = _commandCalls[(*command)];
|
||||||
(this->*(call))(batch, *offset);
|
(this->*(call))(batch, *offset);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -254,6 +268,8 @@ void GLBackend::render(const Batch& batch) {
|
||||||
if (!batch.isStereoEnabled()) {
|
if (!batch.isStereoEnabled()) {
|
||||||
_stereo._enable = false;
|
_stereo._enable = false;
|
||||||
}
|
}
|
||||||
|
// Reset jitter
|
||||||
|
_transform._projectionJitter = Vec2(0.0f, 0.0f);
|
||||||
|
|
||||||
{
|
{
|
||||||
PROFILE_RANGE(render_gpu_gl_detail, "Transfer");
|
PROFILE_RANGE(render_gpu_gl_detail, "Transfer");
|
||||||
|
|
|
@ -126,6 +126,7 @@ public:
|
||||||
virtual void do_setModelTransform(const Batch& batch, size_t paramOffset) final;
|
virtual void do_setModelTransform(const Batch& batch, size_t paramOffset) final;
|
||||||
virtual void do_setViewTransform(const Batch& batch, size_t paramOffset) final;
|
virtual void do_setViewTransform(const Batch& batch, size_t paramOffset) final;
|
||||||
virtual void do_setProjectionTransform(const Batch& batch, size_t paramOffset) final;
|
virtual void do_setProjectionTransform(const Batch& batch, size_t paramOffset) final;
|
||||||
|
virtual void do_setProjectionJitter(const Batch& batch, size_t paramOffset) final;
|
||||||
virtual void do_setViewportTransform(const Batch& batch, size_t paramOffset) final;
|
virtual void do_setViewportTransform(const Batch& batch, size_t paramOffset) final;
|
||||||
virtual void do_setDepthRangeTransform(const Batch& batch, size_t paramOffset) final;
|
virtual void do_setDepthRangeTransform(const Batch& batch, size_t paramOffset) final;
|
||||||
|
|
||||||
|
@ -367,6 +368,7 @@ protected:
|
||||||
Mat4 _projection;
|
Mat4 _projection;
|
||||||
Vec4i _viewport { 0, 0, 1, 1 };
|
Vec4i _viewport { 0, 0, 1, 1 };
|
||||||
Vec2 _depthRange { 0.0f, 1.0f };
|
Vec2 _depthRange { 0.0f, 1.0f };
|
||||||
|
Vec2 _projectionJitter{ 0.0f, 0.0f };
|
||||||
bool _invalidView { false };
|
bool _invalidView { false };
|
||||||
bool _invalidProj { false };
|
bool _invalidProj { false };
|
||||||
bool _invalidViewport { false };
|
bool _invalidViewport { false };
|
||||||
|
@ -379,7 +381,7 @@ protected:
|
||||||
mutable List::const_iterator _camerasItr;
|
mutable List::const_iterator _camerasItr;
|
||||||
mutable size_t _currentCameraOffset{ INVALID_OFFSET };
|
mutable size_t _currentCameraOffset{ INVALID_OFFSET };
|
||||||
|
|
||||||
void preUpdate(size_t commandIndex, const StereoState& stereo);
|
void preUpdate(size_t commandIndex, const StereoState& stereo, Vec2u framebufferSize);
|
||||||
void update(size_t commandIndex, const StereoState& stereo) const;
|
void update(size_t commandIndex, const StereoState& stereo) const;
|
||||||
void bindCurrentCamera(int stereoSide) const;
|
void bindCurrentCamera(int stereoSide) const;
|
||||||
} _transform;
|
} _transform;
|
||||||
|
|
|
@ -28,6 +28,12 @@ void GLBackend::do_setProjectionTransform(const Batch& batch, size_t paramOffset
|
||||||
_transform._invalidProj = true;
|
_transform._invalidProj = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void GLBackend::do_setProjectionJitter(const Batch& batch, size_t paramOffset) {
|
||||||
|
_transform._projectionJitter.x = batch._params[paramOffset]._float;
|
||||||
|
_transform._projectionJitter.y = batch._params[paramOffset+1]._float;
|
||||||
|
_transform._invalidProj = true;
|
||||||
|
}
|
||||||
|
|
||||||
void GLBackend::do_setViewportTransform(const Batch& batch, size_t paramOffset) {
|
void GLBackend::do_setViewportTransform(const Batch& batch, size_t paramOffset) {
|
||||||
memcpy(&_transform._viewport, batch.readData(batch._params[paramOffset]._uint), sizeof(Vec4i));
|
memcpy(&_transform._viewport, batch.readData(batch._params[paramOffset]._uint), sizeof(Vec4i));
|
||||||
|
|
||||||
|
@ -90,7 +96,7 @@ void GLBackend::syncTransformStateCache() {
|
||||||
_transform._enabledDrawcallInfoBuffer = false;
|
_transform._enabledDrawcallInfoBuffer = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void GLBackend::TransformStageState::preUpdate(size_t commandIndex, const StereoState& stereo) {
|
void GLBackend::TransformStageState::preUpdate(size_t commandIndex, const StereoState& stereo, Vec2u framebufferSize) {
|
||||||
// Check all the dirty flags and update the state accordingly
|
// Check all the dirty flags and update the state accordingly
|
||||||
if (_invalidViewport) {
|
if (_invalidViewport) {
|
||||||
_camera._viewport = glm::vec4(_viewport);
|
_camera._viewport = glm::vec4(_viewport);
|
||||||
|
@ -117,18 +123,19 @@ void GLBackend::TransformStageState::preUpdate(size_t commandIndex, const Stereo
|
||||||
|
|
||||||
if (_invalidView || _invalidProj || _invalidViewport) {
|
if (_invalidView || _invalidProj || _invalidViewport) {
|
||||||
size_t offset = _cameraUboSize * _cameras.size();
|
size_t offset = _cameraUboSize * _cameras.size();
|
||||||
|
Vec2 finalJitter = _projectionJitter / Vec2(framebufferSize);
|
||||||
_cameraOffsets.push_back(TransformStageState::Pair(commandIndex, offset));
|
_cameraOffsets.push_back(TransformStageState::Pair(commandIndex, offset));
|
||||||
|
|
||||||
if (stereo.isStereo()) {
|
if (stereo.isStereo()) {
|
||||||
#ifdef GPU_STEREO_CAMERA_BUFFER
|
#ifdef GPU_STEREO_CAMERA_BUFFER
|
||||||
_cameras.push_back(CameraBufferElement(_camera.getEyeCamera(0, stereo, _view), _camera.getEyeCamera(1, stereo, _view)));
|
_cameras.push_back(CameraBufferElement(_camera.getEyeCamera(0, stereo, _view, finalJitter), _camera.getEyeCamera(1, stereo, _view, finalJitter)));
|
||||||
#else
|
#else
|
||||||
_cameras.push_back((_camera.getEyeCamera(0, stereo, _view)));
|
_cameras.push_back((_camera.getEyeCamera(0, stereo, _view)));
|
||||||
_cameras.push_back((_camera.getEyeCamera(1, stereo, _view)));
|
_cameras.push_back((_camera.getEyeCamera(1, stereo, _view)));
|
||||||
#endif
|
#endif
|
||||||
} else {
|
} else {
|
||||||
#ifdef GPU_STEREO_CAMERA_BUFFER
|
#ifdef GPU_STEREO_CAMERA_BUFFER
|
||||||
_cameras.push_back(CameraBufferElement(_camera.recomputeDerived(_view)));
|
_cameras.push_back(CameraBufferElement(_camera.getMonoCamera(_view, finalJitter)));
|
||||||
#else
|
#else
|
||||||
_cameras.push_back((_camera.recomputeDerived(_view)));
|
_cameras.push_back((_camera.recomputeDerived(_view)));
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -265,6 +265,12 @@ void Batch::setProjectionTransform(const Mat4& proj) {
|
||||||
_params.emplace_back(cacheData(sizeof(Mat4), &proj));
|
_params.emplace_back(cacheData(sizeof(Mat4), &proj));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Batch::setProjectionJitter(float jx, float jy) {
|
||||||
|
ADD_COMMAND(setProjectionJitter);
|
||||||
|
_params.emplace_back(jx);
|
||||||
|
_params.emplace_back(jy);
|
||||||
|
}
|
||||||
|
|
||||||
void Batch::setViewportTransform(const Vec4i& viewport) {
|
void Batch::setViewportTransform(const Vec4i& viewport) {
|
||||||
ADD_COMMAND(setViewportTransform);
|
ADD_COMMAND(setViewportTransform);
|
||||||
|
|
||||||
|
|
|
@ -167,6 +167,7 @@ public:
|
||||||
void resetViewTransform() { setViewTransform(Transform(), false); }
|
void resetViewTransform() { setViewTransform(Transform(), false); }
|
||||||
void setViewTransform(const Transform& view, bool camera = true);
|
void setViewTransform(const Transform& view, bool camera = true);
|
||||||
void setProjectionTransform(const Mat4& proj);
|
void setProjectionTransform(const Mat4& proj);
|
||||||
|
void setProjectionJitter(float jx = 0.0f, float jy = 0.0f);
|
||||||
// Viewport is xy = low left corner in framebuffer, zw = width height of the viewport, expressed in pixels
|
// Viewport is xy = low left corner in framebuffer, zw = width height of the viewport, expressed in pixels
|
||||||
void setViewportTransform(const Vec4i& viewport);
|
void setViewportTransform(const Vec4i& viewport);
|
||||||
void setDepthRangeTransform(float nearDepth, float farDepth);
|
void setDepthRangeTransform(float nearDepth, float farDepth);
|
||||||
|
@ -292,8 +293,9 @@ public:
|
||||||
|
|
||||||
COMMAND_setModelTransform,
|
COMMAND_setModelTransform,
|
||||||
COMMAND_setViewTransform,
|
COMMAND_setViewTransform,
|
||||||
COMMAND_setProjectionTransform,
|
COMMAND_setProjectionTransform,
|
||||||
COMMAND_setViewportTransform,
|
COMMAND_setProjectionJitter,
|
||||||
|
COMMAND_setViewportTransform,
|
||||||
COMMAND_setDepthRangeTransform,
|
COMMAND_setDepthRangeTransform,
|
||||||
|
|
||||||
COMMAND_setPipeline,
|
COMMAND_setPipeline,
|
||||||
|
|
|
@ -174,11 +174,6 @@ void Context::getStereoViews(mat4* eyeViews) const {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Context::setProjectionJitter(float jx, float jy) {
|
|
||||||
_projectionJitter.x = jx;
|
|
||||||
_projectionJitter.y = jy;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Context::downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) {
|
void Context::downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) {
|
||||||
_backend->downloadFramebuffer(srcFramebuffer, region, destImage);
|
_backend->downloadFramebuffer(srcFramebuffer, region, destImage);
|
||||||
}
|
}
|
||||||
|
@ -227,7 +222,7 @@ const Backend::TransformCamera& Backend::TransformCamera::recomputeDerived(const
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
Backend::TransformCamera Backend::TransformCamera::getEyeCamera(int eye, const StereoState& _stereo, const Transform& xformView) const {
|
Backend::TransformCamera Backend::TransformCamera::getEyeCamera(int eye, const StereoState& _stereo, const Transform& xformView, Vec2 normalizedJitter) const {
|
||||||
TransformCamera result = *this;
|
TransformCamera result = *this;
|
||||||
Transform offsetTransform = xformView;
|
Transform offsetTransform = xformView;
|
||||||
if (!_stereo._skybox) {
|
if (!_stereo._skybox) {
|
||||||
|
@ -236,6 +231,9 @@ Backend::TransformCamera Backend::TransformCamera::getEyeCamera(int eye, const S
|
||||||
// FIXME: If "skybox" the ipd is set to 0 for now, let s try to propose a better solution for this in the future
|
// FIXME: If "skybox" the ipd is set to 0 for now, let s try to propose a better solution for this in the future
|
||||||
}
|
}
|
||||||
result._projection = _stereo._eyeProjections[eye];
|
result._projection = _stereo._eyeProjections[eye];
|
||||||
|
normalizedJitter.x *= 2.0f;
|
||||||
|
result._projection[2][0] += normalizedJitter.x;
|
||||||
|
result._projection[2][1] += normalizedJitter.y;
|
||||||
result.recomputeDerived(offsetTransform);
|
result.recomputeDerived(offsetTransform);
|
||||||
|
|
||||||
result._stereoInfo = Vec4(1.0f, (float)eye, 0.0f, 0.0f);
|
result._stereoInfo = Vec4(1.0f, (float)eye, 0.0f, 0.0f);
|
||||||
|
@ -243,6 +241,14 @@ Backend::TransformCamera Backend::TransformCamera::getEyeCamera(int eye, const S
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Backend::TransformCamera Backend::TransformCamera::getMonoCamera(const Transform& xformView, Vec2 normalizedJitter) const {
|
||||||
|
TransformCamera result = *this;
|
||||||
|
result._projection[2][0] += normalizedJitter.x;
|
||||||
|
result._projection[2][1] += normalizedJitter.y;
|
||||||
|
result.recomputeDerived(xformView);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
// Counters for Buffer and Texture usage in GPU/Context
|
// Counters for Buffer and Texture usage in GPU/Context
|
||||||
|
|
||||||
ContextMetricSize Backend::freeGPUMemSize;
|
ContextMetricSize Backend::freeGPUMemSize;
|
||||||
|
|
|
@ -70,7 +70,10 @@ public:
|
||||||
class TransformCamera : public _TransformCamera {
|
class TransformCamera : public _TransformCamera {
|
||||||
public:
|
public:
|
||||||
const Backend::TransformCamera& recomputeDerived(const Transform& xformView) const;
|
const Backend::TransformCamera& recomputeDerived(const Transform& xformView) const;
|
||||||
TransformCamera getEyeCamera(int eye, const StereoState& stereo, const Transform& xformView) const;
|
// Jitter should be divided by framebuffer size
|
||||||
|
TransformCamera getMonoCamera(const Transform& xformView, Vec2 normalizedJitter) const;
|
||||||
|
// Jitter should be divided by framebuffer size
|
||||||
|
TransformCamera getEyeCamera(int eye, const StereoState& stereo, const Transform& xformView, Vec2 normalizedJitter) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -130,7 +133,6 @@ protected:
|
||||||
friend class Context;
|
friend class Context;
|
||||||
mutable ContextStats _stats;
|
mutable ContextStats _stats;
|
||||||
StereoState _stereo;
|
StereoState _stereo;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class Context {
|
class Context {
|
||||||
|
@ -202,8 +204,6 @@ public:
|
||||||
void setStereoViews(const mat4 eyeViews[2]);
|
void setStereoViews(const mat4 eyeViews[2]);
|
||||||
void getStereoProjections(mat4* eyeProjections) const;
|
void getStereoProjections(mat4* eyeProjections) const;
|
||||||
void getStereoViews(mat4* eyeViews) const;
|
void getStereoViews(mat4* eyeViews) const;
|
||||||
void setProjectionJitter(float jx, float jy);
|
|
||||||
gpu::Vec2 getProjectionJitter() const { return _projectionJitter; }
|
|
||||||
|
|
||||||
// Downloading the Framebuffer is a synchronous action that is not efficient.
|
// Downloading the Framebuffer is a synchronous action that is not efficient.
|
||||||
// It s here for convenience to easily capture a snapshot
|
// It s here for convenience to easily capture a snapshot
|
||||||
|
@ -250,7 +250,6 @@ protected:
|
||||||
FramePointer _currentFrame;
|
FramePointer _currentFrame;
|
||||||
RangeTimerPointer _frameRangeTimer;
|
RangeTimerPointer _frameRangeTimer;
|
||||||
StereoState _stereo;
|
StereoState _stereo;
|
||||||
gpu::Vec2 _projectionJitter{ 0.0f, 0.0f };
|
|
||||||
|
|
||||||
// Sampled at the end of every frame, the stats of all the counters
|
// Sampled at the end of every frame, the stats of all the counters
|
||||||
mutable ContextStats _frameStats;
|
mutable ContextStats _frameStats;
|
||||||
|
|
|
@ -1,22 +1,22 @@
|
||||||
// glsl / C++ compatible source as interface for FadeEffect
|
// glsl / C++ compatible source as interface for FadeEffect
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
# define MAT4 Mat4
|
# define _MAT4 Mat4
|
||||||
# define VEC4 Vec4
|
# define _VEC4 Vec4
|
||||||
# define MUTABLE mutable
|
# define _MUTABLE mutable
|
||||||
#else
|
#else
|
||||||
# define MAT4 mat4
|
# define _MAT4 mat4
|
||||||
# define VEC4 vec4
|
# define _VEC4 vec4
|
||||||
# define MUTABLE
|
# define _MUTABLE
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct _TransformCamera {
|
struct _TransformCamera {
|
||||||
MUTABLE MAT4 _view;
|
_MUTABLE _MAT4 _view;
|
||||||
MUTABLE MAT4 _viewInverse;
|
_MUTABLE _MAT4 _viewInverse;
|
||||||
MUTABLE MAT4 _projectionViewUntranslated;
|
_MUTABLE _MAT4 _projectionViewUntranslated;
|
||||||
MAT4 _projection;
|
_MAT4 _projection;
|
||||||
MUTABLE MAT4 _projectionInverse;
|
_MUTABLE _MAT4 _projectionInverse;
|
||||||
VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.
|
_VEC4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.
|
||||||
MUTABLE VEC4 _stereoInfo;
|
_MUTABLE _VEC4 _stereoInfo;
|
||||||
};
|
};
|
||||||
|
|
||||||
// <@if 1@>
|
// <@if 1@>
|
||||||
|
|
|
@ -499,7 +499,7 @@ void JitterSample::configure(const Config& config) {
|
||||||
_scale = config.scale;
|
_scale = config.scale;
|
||||||
}
|
}
|
||||||
|
|
||||||
void JitterSample::run(const render::RenderContextPointer& renderContext) {
|
void JitterSample::run(const render::RenderContextPointer& renderContext, Output& jitter) {
|
||||||
auto& current = _sampleSequence.currentIndex;
|
auto& current = _sampleSequence.currentIndex;
|
||||||
if (!_freeze) {
|
if (!_freeze) {
|
||||||
if (current >= 0) {
|
if (current >= 0) {
|
||||||
|
@ -508,40 +508,7 @@ void JitterSample::run(const render::RenderContextPointer& renderContext) {
|
||||||
current = -1;
|
current = -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
auto args = renderContext->args;
|
jitter = _sampleSequence.offsets[(current < 0 ? SEQUENCE_LENGTH : current)];
|
||||||
auto viewFrustum = args->getViewFrustum();
|
|
||||||
|
|
||||||
auto jit = _sampleSequence.offsets[(current < 0 ? SEQUENCE_LENGTH : current)];
|
|
||||||
auto width = (float)args->_viewport.z;
|
|
||||||
auto height = (float)args->_viewport.w;
|
|
||||||
|
|
||||||
auto jx = jit.x / width;
|
|
||||||
auto jy = jit.y / height;
|
|
||||||
|
|
||||||
if (!args->isStereo()) {
|
|
||||||
auto projMat = viewFrustum.getProjection();
|
|
||||||
|
|
||||||
projMat[2][0] += jx;
|
|
||||||
projMat[2][1] += jy;
|
|
||||||
|
|
||||||
viewFrustum.setProjection(projMat);
|
|
||||||
viewFrustum.calculate();
|
|
||||||
args->pushViewFrustum(viewFrustum);
|
|
||||||
} else {
|
|
||||||
mat4 projMats[2];
|
|
||||||
args->_context->getStereoProjections(projMats);
|
|
||||||
|
|
||||||
jx *= 2.0f;
|
|
||||||
|
|
||||||
for (int i = 0; i < 2; i++) {
|
|
||||||
auto& projMat = projMats[i];
|
|
||||||
projMat[2][0] += jx;
|
|
||||||
projMat[2][1] += jy;
|
|
||||||
}
|
|
||||||
|
|
||||||
args->_context->setStereoProjections(projMats);
|
|
||||||
}
|
|
||||||
args->_context->setProjectionJitter(jx, jy);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -62,10 +62,11 @@ public:
|
||||||
};
|
};
|
||||||
|
|
||||||
using Config = JitterSampleConfig;
|
using Config = JitterSampleConfig;
|
||||||
using JobModel = render::Job::Model<JitterSample, Config>;
|
using Output = glm::vec2;
|
||||||
|
using JobModel = render::Job::ModelO<JitterSample, Output, Config>;
|
||||||
|
|
||||||
void configure(const Config& config);
|
void configure(const Config& config);
|
||||||
void run(const render::RenderContextPointer& renderContext);
|
void run(const render::RenderContextPointer& renderContext, Output& jitter);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,7 @@ DeferredFrameTransform::DeferredFrameTransform() {
|
||||||
_frameTransformBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(FrameTransform), (const gpu::Byte*) &frameTransform));
|
_frameTransformBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(FrameTransform), (const gpu::Byte*) &frameTransform));
|
||||||
}
|
}
|
||||||
|
|
||||||
void DeferredFrameTransform::update(RenderArgs* args) {
|
void DeferredFrameTransform::update(RenderArgs* args, glm::vec2 jitter) {
|
||||||
|
|
||||||
// Update the depth info with near and far (same for stereo)
|
// Update the depth info with near and far (same for stereo)
|
||||||
auto nearZ = args->getViewFrustum().getNearClip();
|
auto nearZ = args->getViewFrustum().getNearClip();
|
||||||
|
@ -38,46 +38,53 @@ void DeferredFrameTransform::update(RenderArgs* args) {
|
||||||
|
|
||||||
args->getViewFrustum().evalProjectionMatrix(frameTransformBuffer.projectionMono);
|
args->getViewFrustum().evalProjectionMatrix(frameTransformBuffer.projectionMono);
|
||||||
|
|
||||||
|
// There may be some sort of mismatch here if the viewport size isn't the same as the frame buffer size as
|
||||||
|
// jitter is normalized by frame buffer size in TransformCamera. But we should be safe.
|
||||||
|
jitter.x /= args->_viewport.z;
|
||||||
|
jitter.y /= args->_viewport.w;
|
||||||
|
|
||||||
// Running in stereo ?
|
// Running in stereo ?
|
||||||
bool isStereo = args->isStereo();
|
bool isStereo = args->isStereo();
|
||||||
if (!isStereo) {
|
if (!isStereo) {
|
||||||
frameTransformBuffer.projection[0] = frameTransformBuffer.projectionMono;
|
frameTransformBuffer.projectionUnjittered[0] = frameTransformBuffer.projectionMono;
|
||||||
|
frameTransformBuffer.invProjectionUnjittered[0] = glm::inverse(frameTransformBuffer.projectionUnjittered[0]);
|
||||||
|
|
||||||
frameTransformBuffer.stereoInfo = glm::vec4(0.0f, (float)args->_viewport.z, 0.0f, 0.0f);
|
frameTransformBuffer.stereoInfo = glm::vec4(0.0f, (float)args->_viewport.z, 0.0f, 0.0f);
|
||||||
frameTransformBuffer.invpixelInfo = glm::vec4(1.0f / args->_viewport.z, 1.0f / args->_viewport.w, 0.0f, 0.0f);
|
frameTransformBuffer.invpixelInfo = glm::vec4(1.0f / args->_viewport.z, 1.0f / args->_viewport.w, 0.0f, 0.0f);
|
||||||
frameTransformBuffer.invProjection[0] = glm::inverse(frameTransformBuffer.projection[0]);
|
|
||||||
|
|
||||||
frameTransformBuffer.projectionUnjittered[0] = frameTransformBuffer.projection[0];
|
frameTransformBuffer.projection[0] = frameTransformBuffer.projectionUnjittered[0];
|
||||||
frameTransformBuffer.projectionUnjittered[0][2][0] -= args->_context->getProjectionJitter().x;
|
frameTransformBuffer.projection[0][2][0] += jitter.x;
|
||||||
frameTransformBuffer.projectionUnjittered[0][2][1] -= args->_context->getProjectionJitter().y;
|
frameTransformBuffer.projection[0][2][1] += jitter.y;
|
||||||
frameTransformBuffer.invProjectionUnjittered[0] = glm::inverse(frameTransformBuffer.projectionUnjittered[0]);
|
frameTransformBuffer.invProjection[0] = glm::inverse(frameTransformBuffer.projection[0]);
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
mat4 projMats[2];
|
mat4 projMats[2];
|
||||||
mat4 eyeViews[2];
|
mat4 eyeViews[2];
|
||||||
args->_context->getStereoProjections(projMats);
|
args->_context->getStereoProjections(projMats);
|
||||||
args->_context->getStereoViews(eyeViews);
|
args->_context->getStereoViews(eyeViews);
|
||||||
|
|
||||||
|
jitter.x *= 2.0f;
|
||||||
|
|
||||||
for (int i = 0; i < 2; i++) {
|
for (int i = 0; i < 2; i++) {
|
||||||
// Compose the mono Eye space to Stereo clip space Projection Matrix
|
// Compose the mono Eye space to Stereo clip space Projection Matrix
|
||||||
auto sideViewMat = projMats[i] * eyeViews[i];
|
auto sideViewMat = projMats[i] * eyeViews[i];
|
||||||
frameTransformBuffer.projection[i] = sideViewMat;
|
frameTransformBuffer.projectionUnjittered[i] = sideViewMat;
|
||||||
frameTransformBuffer.invProjection[i] = glm::inverse(sideViewMat);
|
frameTransformBuffer.invProjectionUnjittered[i] = glm::inverse(sideViewMat);
|
||||||
|
|
||||||
frameTransformBuffer.projectionUnjittered[i] = frameTransformBuffer.projection[i];
|
frameTransformBuffer.projection[i] = frameTransformBuffer.projectionUnjittered[i];
|
||||||
frameTransformBuffer.projectionUnjittered[i][2][0] -= args->_context->getProjectionJitter().x;
|
frameTransformBuffer.projection[i][2][0] += jitter.x;
|
||||||
frameTransformBuffer.projectionUnjittered[i][2][1] -= args->_context->getProjectionJitter().y;
|
frameTransformBuffer.projection[i][2][1] += jitter.y;
|
||||||
frameTransformBuffer.invProjectionUnjittered[i] = glm::inverse(frameTransformBuffer.projectionUnjittered[i]);
|
frameTransformBuffer.invProjection[i] = glm::inverse(frameTransformBuffer.projection[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
frameTransformBuffer.stereoInfo = glm::vec4(1.0f, (float)(args->_viewport.z >> 1), 0.0f, 1.0f);
|
frameTransformBuffer.stereoInfo = glm::vec4(1.0f, (float)(args->_viewport.z >> 1), 0.0f, 1.0f);
|
||||||
frameTransformBuffer.invpixelInfo = glm::vec4(1.0f / (float)(args->_viewport.z >> 1), 1.0f / args->_viewport.w, 0.0f, 0.0f);
|
frameTransformBuffer.invpixelInfo = glm::vec4(1.0f / (float)(args->_viewport.z >> 1), 1.0f / args->_viewport.w, 0.0f, 0.0f);
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void GenerateDeferredFrameTransform::run(const render::RenderContextPointer& renderContext, DeferredFrameTransformPointer& frameTransform) {
|
void GenerateDeferredFrameTransform::run(const render::RenderContextPointer& renderContext, const Input& jitter, Output& frameTransform) {
|
||||||
if (!frameTransform) {
|
if (!frameTransform) {
|
||||||
frameTransform = std::make_shared<DeferredFrameTransform>();
|
frameTransform = std::make_shared<DeferredFrameTransform>();
|
||||||
}
|
}
|
||||||
frameTransform->update(renderContext->args);
|
frameTransform->update(renderContext->args, jitter);
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,7 @@ public:
|
||||||
|
|
||||||
DeferredFrameTransform();
|
DeferredFrameTransform();
|
||||||
|
|
||||||
void update(RenderArgs* args);
|
void update(RenderArgs* args, glm::vec2 jitter);
|
||||||
|
|
||||||
UniformBufferView getFrameTransformBuffer() const { return _frameTransformBuffer; }
|
UniformBufferView getFrameTransformBuffer() const { return _frameTransformBuffer; }
|
||||||
|
|
||||||
|
@ -72,11 +72,14 @@ using DeferredFrameTransformPointer = std::shared_ptr<DeferredFrameTransform>;
|
||||||
|
|
||||||
class GenerateDeferredFrameTransform {
|
class GenerateDeferredFrameTransform {
|
||||||
public:
|
public:
|
||||||
using JobModel = render::Job::ModelO<GenerateDeferredFrameTransform, DeferredFrameTransformPointer>;
|
|
||||||
|
using Input = glm::vec2;
|
||||||
|
using Output = DeferredFrameTransformPointer;
|
||||||
|
using JobModel = render::Job::ModelIO<GenerateDeferredFrameTransform, Input, Output>;
|
||||||
|
|
||||||
GenerateDeferredFrameTransform() {}
|
GenerateDeferredFrameTransform() {}
|
||||||
|
|
||||||
void run(const render::RenderContextPointer& renderContext, DeferredFrameTransformPointer& frameTransform);
|
void run(const render::RenderContextPointer& renderContext, const Input& jitter, Output& frameTransform);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
};
|
};
|
||||||
|
|
|
@ -46,6 +46,7 @@ void DrawOverlay3D::run(const RenderContextPointer& renderContext, const Inputs&
|
||||||
|
|
||||||
const auto& inItems = inputs.get0();
|
const auto& inItems = inputs.get0();
|
||||||
const auto& lightingModel = inputs.get1();
|
const auto& lightingModel = inputs.get1();
|
||||||
|
const auto jitter = inputs.get2();
|
||||||
|
|
||||||
config->setNumDrawn((int)inItems.size());
|
config->setNumDrawn((int)inItems.size());
|
||||||
emit config->numDrawnChanged();
|
emit config->numDrawnChanged();
|
||||||
|
@ -75,7 +76,8 @@ void DrawOverlay3D::run(const RenderContextPointer& renderContext, const Inputs&
|
||||||
args->getViewFrustum().evalViewTransform(viewMat);
|
args->getViewFrustum().evalViewTransform(viewMat);
|
||||||
|
|
||||||
batch.setProjectionTransform(projMat);
|
batch.setProjectionTransform(projMat);
|
||||||
batch.setViewTransform(viewMat);
|
batch.setProjectionJitter(jitter.x, jitter.y);
|
||||||
|
batch.setViewTransform(viewMat);
|
||||||
|
|
||||||
// Setup lighting model for all items;
|
// Setup lighting model for all items;
|
||||||
batch.setUniformBuffer(render::ShapePipeline::Slot::LIGHTING_MODEL, lightingModel->getParametersBuffer());
|
batch.setUniformBuffer(render::ShapePipeline::Slot::LIGHTING_MODEL, lightingModel->getParametersBuffer());
|
||||||
|
|
|
@ -60,7 +60,7 @@ protected:
|
||||||
|
|
||||||
class DrawOverlay3D {
|
class DrawOverlay3D {
|
||||||
public:
|
public:
|
||||||
using Inputs = render::VaryingSet2 <render::ItemBounds, LightingModelPointer>;
|
using Inputs = render::VaryingSet3<render::ItemBounds, LightingModelPointer, glm::vec2>;
|
||||||
|
|
||||||
using Config = DrawOverlay3DConfig;
|
using Config = DrawOverlay3DConfig;
|
||||||
using JobModel = render::Job::ModelI<DrawOverlay3D, Inputs, Config>;
|
using JobModel = render::Job::ModelI<DrawOverlay3D, Inputs, Config>;
|
||||||
|
|
|
@ -95,10 +95,10 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
|
||||||
|
|
||||||
fadeEffect->build(task, opaques);
|
fadeEffect->build(task, opaques);
|
||||||
|
|
||||||
task.addJob<JitterSample>("JitterCam");
|
const auto jitter = task.addJob<JitterSample>("JitterCam");
|
||||||
|
|
||||||
// Prepare deferred, generate the shared Deferred Frame Transform
|
// Prepare deferred, generate the shared Deferred Frame Transform
|
||||||
const auto deferredFrameTransform = task.addJob<GenerateDeferredFrameTransform>("DeferredFrameTransform");
|
const auto deferredFrameTransform = task.addJob<GenerateDeferredFrameTransform>("DeferredFrameTransform", jitter);
|
||||||
const auto lightingModel = task.addJob<MakeLightingModel>("LightingModel");
|
const auto lightingModel = task.addJob<MakeLightingModel>("LightingModel");
|
||||||
|
|
||||||
|
|
||||||
|
@ -116,7 +116,7 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
|
||||||
task.addJob<PrepareStencil>("PrepareStencil", primaryFramebuffer);
|
task.addJob<PrepareStencil>("PrepareStencil", primaryFramebuffer);
|
||||||
|
|
||||||
// Render opaque objects in DeferredBuffer
|
// Render opaque objects in DeferredBuffer
|
||||||
const auto opaqueInputs = DrawStateSortDeferred::Inputs(opaques, lightingModel).asVarying();
|
const auto opaqueInputs = DrawStateSortDeferred::Inputs(opaques, lightingModel, jitter).asVarying();
|
||||||
task.addJob<DrawStateSortDeferred>("DrawOpaqueDeferred", opaqueInputs, shapePlumber);
|
task.addJob<DrawStateSortDeferred>("DrawOpaqueDeferred", opaqueInputs, shapePlumber);
|
||||||
|
|
||||||
task.addJob<EndGPURangeTimer>("OpaqueRangeTimer", opaqueRangeTimer);
|
task.addJob<EndGPURangeTimer>("OpaqueRangeTimer", opaqueRangeTimer);
|
||||||
|
@ -205,8 +205,8 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
|
||||||
const auto overlaysInFrontOpaque = filteredOverlaysOpaque.getN<FilterLayeredItems::Outputs>(0);
|
const auto overlaysInFrontOpaque = filteredOverlaysOpaque.getN<FilterLayeredItems::Outputs>(0);
|
||||||
const auto overlaysInFrontTransparent = filteredOverlaysTransparent.getN<FilterLayeredItems::Outputs>(0);
|
const auto overlaysInFrontTransparent = filteredOverlaysTransparent.getN<FilterLayeredItems::Outputs>(0);
|
||||||
|
|
||||||
const auto overlayInFrontOpaquesInputs = DrawOverlay3D::Inputs(overlaysInFrontOpaque, lightingModel).asVarying();
|
const auto overlayInFrontOpaquesInputs = DrawOverlay3D::Inputs(overlaysInFrontOpaque, lightingModel, jitter).asVarying();
|
||||||
const auto overlayInFrontTransparentsInputs = DrawOverlay3D::Inputs(overlaysInFrontTransparent, lightingModel).asVarying();
|
const auto overlayInFrontTransparentsInputs = DrawOverlay3D::Inputs(overlaysInFrontTransparent, lightingModel, jitter).asVarying();
|
||||||
task.addJob<DrawOverlay3D>("DrawOverlayInFrontOpaque", overlayInFrontOpaquesInputs, true);
|
task.addJob<DrawOverlay3D>("DrawOverlayInFrontOpaque", overlayInFrontOpaquesInputs, true);
|
||||||
task.addJob<DrawOverlay3D>("DrawOverlayInFrontTransparent", overlayInFrontTransparentsInputs, false);
|
task.addJob<DrawOverlay3D>("DrawOverlayInFrontTransparent", overlayInFrontTransparentsInputs, false);
|
||||||
|
|
||||||
|
@ -288,9 +288,10 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
|
||||||
|
|
||||||
const auto overlaysHUDOpaque = filteredOverlaysOpaque.getN<FilterLayeredItems::Outputs>(1);
|
const auto overlaysHUDOpaque = filteredOverlaysOpaque.getN<FilterLayeredItems::Outputs>(1);
|
||||||
const auto overlaysHUDTransparent = filteredOverlaysTransparent.getN<FilterLayeredItems::Outputs>(1);
|
const auto overlaysHUDTransparent = filteredOverlaysTransparent.getN<FilterLayeredItems::Outputs>(1);
|
||||||
|
const auto nullJitter = Varying(glm::vec2(0.0f, 0.0f));
|
||||||
|
|
||||||
const auto overlayHUDOpaquesInputs = DrawOverlay3D::Inputs(overlaysHUDOpaque, lightingModel).asVarying();
|
const auto overlayHUDOpaquesInputs = DrawOverlay3D::Inputs(overlaysHUDOpaque, lightingModel, nullJitter).asVarying();
|
||||||
const auto overlayHUDTransparentsInputs = DrawOverlay3D::Inputs(overlaysHUDTransparent, lightingModel).asVarying();
|
const auto overlayHUDTransparentsInputs = DrawOverlay3D::Inputs(overlaysHUDTransparent, lightingModel, nullJitter).asVarying();
|
||||||
task.addJob<DrawOverlay3D>("DrawOverlayHUDOpaque", overlayHUDOpaquesInputs, true);
|
task.addJob<DrawOverlay3D>("DrawOverlayHUDOpaque", overlayHUDOpaquesInputs, true);
|
||||||
task.addJob<DrawOverlay3D>("DrawOverlayHUDTransparent", overlayHUDTransparentsInputs, false);
|
task.addJob<DrawOverlay3D>("DrawOverlayHUDTransparent", overlayHUDTransparentsInputs, false);
|
||||||
|
|
||||||
|
@ -382,6 +383,7 @@ void DrawStateSortDeferred::run(const RenderContextPointer& renderContext, const
|
||||||
|
|
||||||
const auto& inItems = inputs.get0();
|
const auto& inItems = inputs.get0();
|
||||||
const auto& lightingModel = inputs.get1();
|
const auto& lightingModel = inputs.get1();
|
||||||
|
const auto jitter = inputs.get2();
|
||||||
|
|
||||||
RenderArgs* args = renderContext->args;
|
RenderArgs* args = renderContext->args;
|
||||||
|
|
||||||
|
@ -398,6 +400,7 @@ void DrawStateSortDeferred::run(const RenderContextPointer& renderContext, const
|
||||||
args->getViewFrustum().evalViewTransform(viewMat);
|
args->getViewFrustum().evalViewTransform(viewMat);
|
||||||
|
|
||||||
batch.setProjectionTransform(projMat);
|
batch.setProjectionTransform(projMat);
|
||||||
|
batch.setProjectionJitter(jitter.x, jitter.y);
|
||||||
batch.setViewTransform(viewMat);
|
batch.setViewTransform(viewMat);
|
||||||
|
|
||||||
// Setup lighting model for all items;
|
// Setup lighting model for all items;
|
||||||
|
|
|
@ -81,7 +81,7 @@ protected:
|
||||||
|
|
||||||
class DrawStateSortDeferred {
|
class DrawStateSortDeferred {
|
||||||
public:
|
public:
|
||||||
using Inputs = render::VaryingSet2<render::ItemBounds, LightingModelPointer>;
|
using Inputs = render::VaryingSet3<render::ItemBounds, LightingModelPointer, glm::vec2>;
|
||||||
|
|
||||||
using Config = DrawStateSortConfig;
|
using Config = DrawStateSortConfig;
|
||||||
using JobModel = render::Job::ModelI<DrawStateSortDeferred, Inputs, Config>;
|
using JobModel = render::Job::ModelI<DrawStateSortDeferred, Inputs, Config>;
|
||||||
|
|
|
@ -98,7 +98,7 @@ void TestWindow::beginFrame() {
|
||||||
_preparePrimaryFramebuffer.run(_renderContext, primaryFramebuffer);
|
_preparePrimaryFramebuffer.run(_renderContext, primaryFramebuffer);
|
||||||
|
|
||||||
DeferredFrameTransformPointer frameTransform;
|
DeferredFrameTransformPointer frameTransform;
|
||||||
_generateDeferredFrameTransform.run(_renderContext, frameTransform);
|
_generateDeferredFrameTransform.run(_renderContext, glm::vec2(0.0f, 0.0f), frameTransform);
|
||||||
|
|
||||||
LightingModelPointer lightingModel;
|
LightingModelPointer lightingModel;
|
||||||
_generateLightingModel.run(_renderContext, lightingModel);
|
_generateLightingModel.run(_renderContext, lightingModel);
|
||||||
|
|
Loading…
Reference in a new issue