Allow the lighting framebuffer to use the camera correction to stabilize lights in the HMD

This commit is contained in:
Brad Davis 2016-08-04 12:46:59 -07:00
parent 94fe2a8824
commit cbe1f6dbf5
8 changed files with 52 additions and 12 deletions

View file

@ -676,3 +676,10 @@ void GLBackend::cleanupTrash() const {
glDeleteQueries((GLsizei)ids.size(), ids.data());
}
}
void GLBackend::setCameraCorrection(const Mat4& correction) {
_transform._correction._correction = correction;
_transform._correction._correctionInverse = glm::inverse(correction);
_pipeline._cameraCorrectionBuffer.edit<CameraCorrection>() = _transform._correction;
_pipeline._cameraCorrectionBuffer._buffer->flush();
}

View file

@ -250,6 +250,11 @@ protected:
void updateTransform(const Batch& batch);
void resetTransformStage();
struct CameraCorrection {
Mat4 _correction;
Mat4 _correctionInverse;
};
struct TransformStageState {
using CameraBufferElement = TransformCamera;
using TransformCameras = std::vector<CameraBufferElement>;
@ -267,7 +272,8 @@ protected:
bool _viewIsCamera{ false };
bool _skybox { false };
Transform _view;
Mat4 _correction;
CameraCorrection _correction;
Mat4 _projection;
Vec4i _viewport { 0, 0, 1, 1 };
Vec2 _depthRange { 0.0f, 1.0f };
@ -321,10 +327,13 @@ protected:
PipelinePointer _pipeline;
GLuint _program { 0 };
GLint _cameraCorrectionLocation { -1 };
GLShader* _programShader { nullptr };
bool _invalidProgram { false };
State::Data _stateCache { State::DEFAULT };
BufferView _cameraCorrectionBuffer { gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(CameraCorrection), nullptr )) };
State::Data _stateCache{ State::DEFAULT };
State::Signature _stateSignatureCache { 0 };
GLState* _state { nullptr };

View file

@ -34,6 +34,7 @@ void GLBackend::do_setPipeline(Batch& batch, size_t paramOffset) {
_pipeline._pipeline.reset();
_pipeline._program = 0;
_pipeline._cameraCorrectionLocation = -1;
_pipeline._programShader = nullptr;
_pipeline._invalidProgram = true;
@ -53,6 +54,7 @@ void GLBackend::do_setPipeline(Batch& batch, size_t paramOffset) {
_pipeline._program = glprogram;
_pipeline._programShader = pipelineObject->_program;
_pipeline._invalidProgram = true;
_pipeline._cameraCorrectionLocation = pipelineObject->_cameraCorrection;
}
// Now for the state
@ -68,6 +70,10 @@ void GLBackend::do_setPipeline(Batch& batch, size_t paramOffset) {
// THis should be done on Pipeline::update...
if (_pipeline._invalidProgram) {
glUseProgram(_pipeline._program);
if (_pipeline._cameraCorrectionLocation != -1) {
auto cameraCorrectionBuffer = syncGPUObject(*_pipeline._cameraCorrectionBuffer._buffer);
glBindBufferRange(GL_UNIFORM_BUFFER, _pipeline._cameraCorrectionLocation, cameraCorrectionBuffer->_id, 0, sizeof(CameraCorrection));
}
(void) CHECK_GL_ERROR();
_pipeline._invalidProgram = false;
}

View file

@ -13,10 +13,6 @@
using namespace gpu;
using namespace gpu::gl;
void GLBackend::setCameraCorrection(const Mat4& correction) {
_transform._correction = correction;
}
// Transform Stage
void GLBackend::do_setModelTransform(Batch& batch, size_t paramOffset) {
}
@ -88,10 +84,10 @@ void GLBackend::TransformStageState::preUpdate(size_t commandIndex, const Stereo
if (_invalidView) {
// Apply the correction
if (_viewIsCamera && _correction != glm::mat4()) {
PROFILE_RANGE_EX("Correct Camera!", 0xFFFF0000, 1);
if (_viewIsCamera && _correction._correction != glm::mat4()) {
// FIXME should I switch to using the camera correction buffer in Transform.slf and leave this out?
Transform result;
_view.mult(result, _view, _correction);
_view.mult(result, _view, _correction._correction);
if (_skybox) {
result.setTranslation(vec3());
}

View file

@ -48,6 +48,10 @@ GLPipeline* GLPipeline::sync(const GLBackend& backend, const Pipeline& pipeline)
Backend::setGPUObject(pipeline, object);
}
// Special case for view correction matrices, any pipeline that declares the correction buffer
// uniform will automatically have it provided without any client code necessary.
// Required for stable lighting in the HMD.
object->_cameraCorrection = shader->getBuffers().findLocation("cameraCorrectionBuffer");
object->_program = programObject;
object->_state = stateObject;

View file

@ -18,6 +18,9 @@ public:
GLShader* _program { nullptr };
GLState* _state { nullptr };
// Bit of a hack, any pipeline can need the camera correction buffer at execution time, so
// we store whether a given pipeline has declared the uniform buffer for it.
int32 _cameraCorrection { -1 };
};
} }

View file

@ -67,6 +67,7 @@ enum DeferredShader_MapSlot {
};
enum DeferredShader_BufferSlot {
DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT = 0,
CAMERA_CORRECTION_BUFFER_SLOT,
SCATTERING_PARAMETERS_BUFFER_SLOT,
LIGHTING_MODEL_BUFFER_SLOT = render::ShapePipeline::Slot::LIGHTING_MODEL,
LIGHT_GPU_SLOT = render::ShapePipeline::Slot::LIGHT,
@ -181,10 +182,12 @@ static void loadLightProgram(const char* vertSource, const char* fragSource, boo
slotBindings.insert(gpu::Shader::Binding(std::string("scatteringSpecularBeckmann"), SCATTERING_SPECULAR_UNIT));
slotBindings.insert(gpu::Shader::Binding(std::string("cameraCorrectionBuffer"), CAMERA_CORRECTION_BUFFER_SLOT));
slotBindings.insert(gpu::Shader::Binding(std::string("deferredFrameTransformBuffer"), DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT));
slotBindings.insert(gpu::Shader::Binding(std::string("lightingModelBuffer"), LIGHTING_MODEL_BUFFER_SLOT));
slotBindings.insert(gpu::Shader::Binding(std::string("subsurfaceScatteringParametersBuffer"), SCATTERING_PARAMETERS_BUFFER_SLOT));
slotBindings.insert(gpu::Shader::Binding(std::string("lightBuffer"), LIGHT_GPU_SLOT));
gpu::Shader::makeProgram(*program, slotBindings);

View file

@ -13,6 +13,15 @@
<@func declareDeferredFrameTransform()@>
struct CameraCorrection {
mat4 _correction;
mat4 _correctionInverse;
};
uniform cameraCorrectionBuffer {
CameraCorrection cameraCorrection;
};
struct DeferredFrameTransform {
vec4 _pixelInfo;
vec4 _invPixelInfo;
@ -29,7 +38,10 @@ uniform deferredFrameTransformBuffer {
};
DeferredFrameTransform getDeferredFrameTransform() {
return frameTransform;
DeferredFrameTransform result = frameTransform;
result._view = result._view * cameraCorrection._correctionInverse;
result._viewInverse = result._viewInverse * cameraCorrection._correction;
return result;
}
vec2 getWidthHeight(int resolutionLevel) {
@ -67,11 +79,11 @@ float getPosLinearDepthFar() {
}
mat4 getViewInverse() {
return frameTransform._viewInverse;
return frameTransform._viewInverse * cameraCorrection._correction;
}
mat4 getView() {
return frameTransform._view;
return frameTransform._view * cameraCorrection._correctionInverse;
}
bool isStereo() {