Merge pull request #15157 from jherico/compositeless

Case 21730: Remove composite framebuffer from display plugins
This commit is contained in:
Sam Gateau 2019-03-23 10:38:15 -07:00 committed by GitHub
commit 69a83e647e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
28 changed files with 558 additions and 303 deletions

View file

@ -0,0 +1,39 @@
#version 320 es
precision highp float;
precision highp sampler2D;
layout(location = 0) in vec4 vTexCoordLR;
layout(location = 0) out vec4 FragColorL;
layout(location = 1) out vec4 FragColorR;
uniform sampler2D sampler;
// https://software.intel.com/en-us/node/503873
// sRGB ====> Linear
vec3 color_sRGBToLinear(vec3 srgb) {
return mix(pow((srgb + vec3(0.055)) / vec3(1.055), vec3(2.4)), srgb / vec3(12.92), vec3(lessThanEqual(srgb, vec3(0.04045))));
}
vec4 color_sRGBAToLinear(vec4 srgba) {
return vec4(color_sRGBToLinear(srgba.xyz), srgba.w);
}
// Linear ====> sRGB
vec3 color_LinearTosRGB(vec3 lrgb) {
return mix(vec3(1.055) * pow(vec3(lrgb), vec3(0.41666)) - vec3(0.055), vec3(lrgb) * vec3(12.92), vec3(lessThan(lrgb, vec3(0.0031308))));
}
vec4 color_LinearTosRGBA(vec4 lrgba) {
return vec4(color_LinearTosRGB(lrgba.xyz), lrgba.w);
}
// FIXME switch to texelfetch for getting from the source texture?
void main() {
//FragColorL = color_LinearTosRGBA(texture(sampler, vTexCoordLR.xy));
//FragColorR = color_LinearTosRGBA(texture(sampler, vTexCoordLR.zw));
FragColorL = texture(sampler, vTexCoordLR.xy);
FragColorR = texture(sampler, vTexCoordLR.zw);
}

View file

@ -0,0 +1,21 @@
#version 320 es
layout(location = 0) out vec4 vTexCoordLR;
void main(void) {
const float depth = 0.0;
const vec4 UNIT_QUAD[4] = vec4[4](
vec4(-1.0, -1.0, depth, 1.0),
vec4(1.0, -1.0, depth, 1.0),
vec4(-1.0, 1.0, depth, 1.0),
vec4(1.0, 1.0, depth, 1.0)
);
vec4 pos = UNIT_QUAD[gl_VertexID];
gl_Position = pos;
vTexCoordLR.xy = pos.xy;
vTexCoordLR.xy += 1.0;
vTexCoordLR.y *= 0.5;
vTexCoordLR.x *= 0.25;
vTexCoordLR.zw = vTexCoordLR.xy;
vTexCoordLR.z += 0.5;
}

View file

@ -7,6 +7,7 @@
//
package io.highfidelity.oculus;
import android.content.res.AssetManager;
import android.os.Bundle;
import android.util.Log;
import android.view.Surface;
@ -24,7 +25,7 @@ public class OculusMobileActivity extends QtActivity implements SurfaceHolder.Ca
private static final String TAG = OculusMobileActivity.class.getSimpleName();
static { System.loadLibrary("oculusMobile"); }
private native void nativeOnCreate();
private native void nativeOnCreate(AssetManager assetManager);
private native static void nativeOnResume();
private native static void nativeOnPause();
private native static void nativeOnSurfaceChanged(Surface s);
@ -53,7 +54,7 @@ public class OculusMobileActivity extends QtActivity implements SurfaceHolder.Ca
mView = new SurfaceView(this);
mView.getHolder().addCallback(this);
nativeOnCreate();
nativeOnCreate(getAssets());
questNativeOnCreate();
}
@ -81,7 +82,7 @@ public class OculusMobileActivity extends QtActivity implements SurfaceHolder.Ca
Log.w(TAG, "QQQ onResume");
super.onResume();
//Reconnect the global reference back to handler
nativeOnCreate();
nativeOnCreate(getAssets());
questNativeOnResume();
nativeOnResume();

View file

@ -109,7 +109,7 @@ bool Basic2DWindowOpenGLDisplayPlugin::internalActivate() {
return Parent::internalActivate();
}
void Basic2DWindowOpenGLDisplayPlugin::compositeExtra() {
void Basic2DWindowOpenGLDisplayPlugin::compositeExtra(const gpu::FramebufferPointer& compositeFramebuffer) {
#if defined(Q_OS_ANDROID)
auto& virtualPadManager = VirtualPad::Manager::instance();
if(virtualPadManager.getLeftVirtualPad()->isShown()) {
@ -121,7 +121,7 @@ void Basic2DWindowOpenGLDisplayPlugin::compositeExtra() {
render([&](gpu::Batch& batch) {
batch.enableStereo(false);
batch.setFramebuffer(_compositeFramebuffer);
batch.setFramebuffer(compositeFramebuffer);
batch.resetViewTransform();
batch.setProjectionTransform(mat4());
batch.setPipeline(_cursorPipeline);
@ -140,7 +140,7 @@ void Basic2DWindowOpenGLDisplayPlugin::compositeExtra() {
});
}
#endif
Parent::compositeExtra();
Parent::compositeExtra(compositeFramebuffer);
}
static const uint32_t MIN_THROTTLE_CHECK_FRAMES = 60;

View file

@ -33,7 +33,7 @@ public:
virtual bool isThrottled() const override;
virtual void compositeExtra() override;
virtual void compositeExtra(const gpu::FramebufferPointer&) override;
virtual void pluginUpdate() override {};

View file

@ -379,14 +379,6 @@ void OpenGLDisplayPlugin::customizeContext() {
scissorState->setDepthTest(gpu::State::DepthTest(false));
scissorState->setScissorEnable(true);
{
#ifdef Q_OS_ANDROID
gpu::ShaderPointer program = gpu::Shader::createProgram(shader::gpu::program::DrawTextureGammaLinearToSRGB);
#else
gpu::ShaderPointer program = gpu::Shader::createProgram(shader::gpu::program::DrawTexture);
#endif
_simplePipeline = gpu::Pipeline::create(program, scissorState);
}
{
#ifdef Q_OS_ANDROID
gpu::ShaderPointer program = gpu::Shader::createProgram(shader::gpu::program::DrawTextureGammaLinearToSRGB);
@ -396,29 +388,59 @@ void OpenGLDisplayPlugin::customizeContext() {
_presentPipeline = gpu::Pipeline::create(program, scissorState);
}
// HUD operator
{
gpu::ShaderPointer program = gpu::Shader::createProgram(shader::gpu::program::DrawTexture);
_hudPipeline = gpu::Pipeline::create(program, blendState);
}
{
gpu::ShaderPointer program = gpu::Shader::createProgram(shader::gpu::program::DrawTextureMirroredX);
_mirrorHUDPipeline = gpu::Pipeline::create(program, blendState);
gpu::PipelinePointer hudPipeline;
{
gpu::ShaderPointer program = gpu::Shader::createProgram(shader::gpu::program::DrawTexture);
hudPipeline = gpu::Pipeline::create(program, blendState);
}
gpu::PipelinePointer hudMirrorPipeline;
{
gpu::ShaderPointer program = gpu::Shader::createProgram(shader::gpu::program::DrawTextureMirroredX);
hudMirrorPipeline = gpu::Pipeline::create(program, blendState);
}
_hudOperator = [=](gpu::Batch& batch, const gpu::TexturePointer& hudTexture, const gpu::FramebufferPointer& compositeFramebuffer, bool mirror) {
auto hudStereo = isStereo();
auto hudCompositeFramebufferSize = compositeFramebuffer->getSize();
std::array<glm::ivec4, 2> hudEyeViewports;
for_each_eye([&](Eye eye) {
hudEyeViewports[eye] = eyeViewport(eye);
});
if (hudPipeline && hudTexture) {
batch.enableStereo(false);
batch.setPipeline(mirror ? hudMirrorPipeline : hudPipeline);
batch.setResourceTexture(0, hudTexture);
if (hudStereo) {
for_each_eye([&](Eye eye) {
batch.setViewportTransform(hudEyeViewports[eye]);
batch.draw(gpu::TRIANGLE_STRIP, 4);
});
} else {
batch.setViewportTransform(ivec4(uvec2(0), hudCompositeFramebufferSize));
batch.draw(gpu::TRIANGLE_STRIP, 4);
}
}
};
}
{
gpu::ShaderPointer program = gpu::Shader::createProgram(shader::gpu::program::DrawTransformedTexture);
_cursorPipeline = gpu::Pipeline::create(program, blendState);
}
}
updateCompositeFramebuffer();
}
void OpenGLDisplayPlugin::uncustomizeContext() {
_presentPipeline.reset();
_cursorPipeline.reset();
_hudPipeline.reset();
_mirrorHUDPipeline.reset();
_compositeFramebuffer.reset();
_hudOperator = DEFAULT_HUD_OPERATOR;
withPresentThreadLock([&] {
_currentFrame.reset();
_lastFrame = nullptr;
@ -510,24 +532,16 @@ void OpenGLDisplayPlugin::captureFrame(const std::string& filename) const {
});
}
void OpenGLDisplayPlugin::renderFromTexture(gpu::Batch& batch, const gpu::TexturePointer& texture, const glm::ivec4& viewport, const glm::ivec4& scissor) {
renderFromTexture(batch, texture, viewport, scissor, nullptr);
}
void OpenGLDisplayPlugin::renderFromTexture(gpu::Batch& batch, const gpu::TexturePointer& texture, const glm::ivec4& viewport, const glm::ivec4& scissor, const gpu::FramebufferPointer& copyFbo /*=gpu::FramebufferPointer()*/) {
auto fbo = gpu::FramebufferPointer();
void OpenGLDisplayPlugin::renderFromTexture(gpu::Batch& batch, const gpu::TexturePointer& texture, const glm::ivec4& viewport, const glm::ivec4& scissor, const gpu::FramebufferPointer& destFbo, const gpu::FramebufferPointer& copyFbo /*=gpu::FramebufferPointer()*/) {
batch.enableStereo(false);
batch.resetViewTransform();
batch.setFramebuffer(fbo);
batch.setFramebuffer(destFbo);
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, vec4(0));
batch.setStateScissorRect(scissor);
batch.setViewportTransform(viewport);
batch.setResourceTexture(0, texture);
#ifndef USE_GLES
batch.setPipeline(_presentPipeline);
#else
batch.setPipeline(_simplePipeline);
#endif
batch.draw(gpu::TRIANGLE_STRIP, 4);
if (copyFbo) {
gpu::Vec4i copyFboRect(0, 0, copyFbo->getWidth(), copyFbo->getHeight());
@ -553,7 +567,7 @@ void OpenGLDisplayPlugin::renderFromTexture(gpu::Batch& batch, const gpu::Textur
batch.setViewportTransform(copyFboRect);
batch.setStateScissorRect(copyFboRect);
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, {0.0f, 0.0f, 0.0f, 1.0f});
batch.blit(fbo, sourceRect, copyFbo, copyRect);
batch.blit(destFbo, sourceRect, copyFbo, copyRect);
}
}
@ -581,41 +595,14 @@ void OpenGLDisplayPlugin::updateFrameData() {
});
}
std::function<void(gpu::Batch&, const gpu::TexturePointer&, bool mirror)> OpenGLDisplayPlugin::getHUDOperator() {
auto hudPipeline = _hudPipeline;
auto hudMirrorPipeline = _mirrorHUDPipeline;
auto hudStereo = isStereo();
auto hudCompositeFramebufferSize = _compositeFramebuffer->getSize();
std::array<glm::ivec4, 2> hudEyeViewports;
for_each_eye([&](Eye eye) {
hudEyeViewports[eye] = eyeViewport(eye);
});
return [=](gpu::Batch& batch, const gpu::TexturePointer& hudTexture, bool mirror) {
if (hudPipeline && hudTexture) {
batch.enableStereo(false);
batch.setPipeline(mirror ? hudMirrorPipeline : hudPipeline);
batch.setResourceTexture(0, hudTexture);
if (hudStereo) {
for_each_eye([&](Eye eye) {
batch.setViewportTransform(hudEyeViewports[eye]);
batch.draw(gpu::TRIANGLE_STRIP, 4);
});
} else {
batch.setViewportTransform(ivec4(uvec2(0), hudCompositeFramebufferSize));
batch.draw(gpu::TRIANGLE_STRIP, 4);
}
}
};
}
void OpenGLDisplayPlugin::compositePointer() {
void OpenGLDisplayPlugin::compositePointer(const gpu::FramebufferPointer& compositeFramebuffer) {
auto& cursorManager = Cursor::Manager::instance();
const auto& cursorData = _cursorsData[cursorManager.getCursor()->getIcon()];
auto cursorTransform = DependencyManager::get<CompositorHelper>()->getReticleTransform(glm::mat4());
render([&](gpu::Batch& batch) {
batch.enableStereo(false);
batch.setProjectionTransform(mat4());
batch.setFramebuffer(_compositeFramebuffer);
batch.setFramebuffer(compositeFramebuffer);
batch.setPipeline(_cursorPipeline);
batch.setResourceTexture(0, cursorData.texture);
batch.resetViewTransform();
@ -626,34 +613,13 @@ void OpenGLDisplayPlugin::compositePointer() {
batch.draw(gpu::TRIANGLE_STRIP, 4);
});
} else {
batch.setViewportTransform(ivec4(uvec2(0), _compositeFramebuffer->getSize()));
batch.setViewportTransform(ivec4(uvec2(0), compositeFramebuffer->getSize()));
batch.draw(gpu::TRIANGLE_STRIP, 4);
}
});
}
void OpenGLDisplayPlugin::compositeScene() {
render([&](gpu::Batch& batch) {
batch.enableStereo(false);
batch.setFramebuffer(_compositeFramebuffer);
batch.setViewportTransform(ivec4(uvec2(), _compositeFramebuffer->getSize()));
batch.setStateScissorRect(ivec4(uvec2(), _compositeFramebuffer->getSize()));
batch.resetViewTransform();
batch.setProjectionTransform(mat4());
batch.setPipeline(_simplePipeline);
batch.setResourceTexture(0, _currentFrame->framebuffer->getRenderBuffer(0));
batch.draw(gpu::TRIANGLE_STRIP, 4);
});
}
void OpenGLDisplayPlugin::compositeLayers() {
updateCompositeFramebuffer();
{
PROFILE_RANGE_EX(render_detail, "compositeScene", 0xff0077ff, (uint64_t)presentCount())
compositeScene();
}
void OpenGLDisplayPlugin::compositeLayers(const gpu::FramebufferPointer& compositeFramebuffer) {
#ifdef HIFI_ENABLE_NSIGHT_DEBUG
if (false) // do not draw the HUD if running nsight debug
#endif
@ -667,23 +633,35 @@ void OpenGLDisplayPlugin::compositeLayers() {
{
PROFILE_RANGE_EX(render_detail, "compositeExtra", 0xff0077ff, (uint64_t)presentCount())
compositeExtra();
compositeExtra(compositeFramebuffer);
}
// Draw the pointer last so it's on top of everything
auto compositorHelper = DependencyManager::get<CompositorHelper>();
if (compositorHelper->getReticleVisible()) {
PROFILE_RANGE_EX(render_detail, "compositePointer", 0xff0077ff, (uint64_t)presentCount())
compositePointer();
compositePointer(compositeFramebuffer);
}
}
void OpenGLDisplayPlugin::internalPresent() {
void OpenGLDisplayPlugin::internalPresent(const gpu::FramebufferPointer& compositeFramebuffer) {
render([&](gpu::Batch& batch) {
// Note: _displayTexture must currently be the same size as the display.
uvec2 dims = _displayTexture ? uvec2(_displayTexture->getDimensions()) : getSurfacePixels();
auto viewport = ivec4(uvec2(0), dims);
renderFromTexture(batch, _displayTexture ? _displayTexture : _compositeFramebuffer->getRenderBuffer(0), viewport, viewport);
gpu::TexturePointer finalTexture;
if (_displayTexture) {
finalTexture = _displayTexture;
} else if (compositeFramebuffer) {
finalTexture = compositeFramebuffer->getRenderBuffer(0);
} else {
qCWarning(displayPlugins) << "No valid texture for output";
}
if (finalTexture) {
renderFromTexture(batch, finalTexture, viewport, viewport);
}
});
swapBuffers();
_presentRate.increment();
@ -700,7 +678,7 @@ void OpenGLDisplayPlugin::present() {
}
incrementPresentCount();
if (_currentFrame) {
if (_currentFrame && _currentFrame->framebuffer) {
auto correction = getViewCorrection();
getGLBackend()->setCameraCorrection(correction, _prevRenderView);
_prevRenderView = correction * _currentFrame->view;
@ -720,18 +698,18 @@ void OpenGLDisplayPlugin::present() {
// Write all layers to a local framebuffer
{
PROFILE_RANGE_EX(render, "composite", 0xff00ffff, frameId)
compositeLayers();
compositeLayers(_currentFrame->framebuffer);
}
// Take the composite framebuffer and send it to the output device
{
PROFILE_RANGE_EX(render, "internalPresent", 0xff00ffff, frameId)
internalPresent();
internalPresent(_currentFrame->framebuffer);
}
gpu::Backend::freeGPUMemSize.set(gpu::gl::getFreeDedicatedMemory());
} else if (alwaysPresent()) {
internalPresent();
internalPresent(nullptr);
}
_movingAveragePresent.addSample((float)(usecTimestampNow() - startPresent));
}
@ -788,7 +766,12 @@ bool OpenGLDisplayPlugin::setDisplayTexture(const QString& name) {
}
QImage OpenGLDisplayPlugin::getScreenshot(float aspectRatio) const {
auto size = _compositeFramebuffer->getSize();
if (!_currentFrame || !_currentFrame->framebuffer) {
return QImage();
}
auto compositeFramebuffer = _currentFrame->framebuffer;
auto size = compositeFramebuffer->getSize();
if (isHmd()) {
size.x /= 2;
}
@ -806,7 +789,7 @@ QImage OpenGLDisplayPlugin::getScreenshot(float aspectRatio) const {
auto glBackend = const_cast<OpenGLDisplayPlugin&>(*this).getGLBackend();
QImage screenshot(bestSize.x, bestSize.y, QImage::Format_ARGB32);
withOtherThreadContext([&] {
glBackend->downloadFramebuffer(_compositeFramebuffer, ivec4(corner, bestSize), screenshot);
glBackend->downloadFramebuffer(compositeFramebuffer, ivec4(corner, bestSize), screenshot);
});
return screenshot.mirrored(false, true);
}
@ -858,7 +841,7 @@ bool OpenGLDisplayPlugin::beginFrameRender(uint32_t frameIndex) {
}
ivec4 OpenGLDisplayPlugin::eyeViewport(Eye eye) const {
uvec2 vpSize = _compositeFramebuffer->getSize();
auto vpSize = glm::uvec2(getRecommendedRenderSize());
vpSize.x /= 2;
uvec2 vpPos;
if (eye == Eye::Right) {
@ -891,14 +874,6 @@ void OpenGLDisplayPlugin::render(std::function<void(gpu::Batch& batch)> f) {
OpenGLDisplayPlugin::~OpenGLDisplayPlugin() {
}
void OpenGLDisplayPlugin::updateCompositeFramebuffer() {
auto renderSize = glm::uvec2(getRecommendedRenderSize());
if (!_compositeFramebuffer || _compositeFramebuffer->getSize() != renderSize) {
_compositeFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("OpenGLDisplayPlugin::composite", gpu::Element::COLOR_RGBA_32, renderSize.x, renderSize.y));
// _compositeFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("OpenGLDisplayPlugin::composite", gpu::Element::COLOR_SRGBA_32, renderSize.x, renderSize.y));
}
}
void OpenGLDisplayPlugin::copyTextureToQuickFramebuffer(NetworkTexturePointer networkTexture, QOpenGLFramebufferObject* target, GLsync* fenceSync) {
#if !defined(USE_GLES)
auto glBackend = const_cast<OpenGLDisplayPlugin&>(*this).getGLBackend();

View file

@ -94,14 +94,10 @@ protected:
// is not populated
virtual bool alwaysPresent() const { return false; }
void updateCompositeFramebuffer();
virtual QThread::Priority getPresentPriority() { return QThread::HighPriority; }
virtual void compositeLayers();
virtual void compositeScene();
virtual std::function<void(gpu::Batch&, const gpu::TexturePointer&, bool mirror)> getHUDOperator();
virtual void compositePointer();
virtual void compositeExtra() {};
virtual void compositeLayers(const gpu::FramebufferPointer&);
virtual void compositePointer(const gpu::FramebufferPointer&);
virtual void compositeExtra(const gpu::FramebufferPointer&) {};
// These functions must only be called on the presentation thread
virtual void customizeContext();
@ -116,10 +112,10 @@ protected:
virtual void deactivateSession() {}
// Plugin specific functionality to send the composed scene to the output window or device
virtual void internalPresent();
virtual void internalPresent(const gpu::FramebufferPointer&);
void renderFromTexture(gpu::Batch& batch, const gpu::TexturePointer& texture, const glm::ivec4& viewport, const glm::ivec4& scissor, const gpu::FramebufferPointer& fbo);
void renderFromTexture(gpu::Batch& batch, const gpu::TexturePointer& texture, const glm::ivec4& viewport, const glm::ivec4& scissor);
void renderFromTexture(gpu::Batch& batch, const gpu::TexturePointer& texture, const glm::ivec4& viewport, const glm::ivec4& scissor, const gpu::FramebufferPointer& destFbo = nullptr, const gpu::FramebufferPointer& copyFbo = nullptr);
virtual void updateFrameData();
virtual glm::mat4 getViewCorrection() { return glm::mat4(); }
@ -142,14 +138,8 @@ protected:
gpu::FramePointer _currentFrame;
gpu::Frame* _lastFrame { nullptr };
mat4 _prevRenderView;
gpu::FramebufferPointer _compositeFramebuffer;
gpu::PipelinePointer _hudPipeline;
gpu::PipelinePointer _mirrorHUDPipeline;
gpu::ShaderPointer _mirrorHUDPS;
gpu::PipelinePointer _simplePipeline;
gpu::PipelinePointer _presentPipeline;
gpu::PipelinePointer _cursorPipeline;
gpu::TexturePointer _displayTexture{};
gpu::TexturePointer _displayTexture;
float _compositeHUDAlpha { 1.0f };
struct CursorData {
@ -185,5 +175,9 @@ protected:
// be serialized through this mutex
mutable Mutex _presentMutex;
float _hudAlpha{ 1.0f };
private:
gpu::PipelinePointer _presentPipeline;
};

View file

@ -24,7 +24,7 @@ public:
protected:
void updatePresentPose() override;
void hmdPresent() override {}
void hmdPresent(const gpu::FramebufferPointer&) override {}
bool isHmdMounted() const override { return true; }
bool internalActivate() override;
private:

View file

@ -114,20 +114,23 @@ void HmdDisplayPlugin::internalDeactivate() {
void HmdDisplayPlugin::customizeContext() {
Parent::customizeContext();
_hudRenderer.build();
_hudOperator = _hudRenderer.build();
}
void HmdDisplayPlugin::uncustomizeContext() {
// This stops the weirdness where if the preview was disabled, on switching back to 2D,
// the vsync was stuck in the disabled state. No idea why that happens though.
_disablePreview = false;
render([&](gpu::Batch& batch) {
batch.enableStereo(false);
batch.resetViewTransform();
batch.setFramebuffer(_compositeFramebuffer);
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, vec4(0));
});
_hudRenderer = HUDRenderer();
if (_currentFrame && _currentFrame->framebuffer) {
render([&](gpu::Batch& batch) {
batch.enableStereo(false);
batch.resetViewTransform();
batch.setFramebuffer(_currentFrame->framebuffer);
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, vec4(0));
});
}
_hudRenderer = {};
_previewTexture.reset();
Parent::uncustomizeContext();
}
@ -174,11 +177,11 @@ float HmdDisplayPlugin::getLeftCenterPixel() const {
return leftCenterPixel;
}
void HmdDisplayPlugin::internalPresent() {
void HmdDisplayPlugin::internalPresent(const gpu::FramebufferPointer& compositeFramebuffer) {
PROFILE_RANGE_EX(render, __FUNCTION__, 0xff00ff00, (uint64_t)presentCount())
// Composite together the scene, hud and mouse cursor
hmdPresent();
hmdPresent(compositeFramebuffer);
if (_displayTexture) {
// Note: _displayTexture must currently be the same size as the display.
@ -260,7 +263,7 @@ void HmdDisplayPlugin::internalPresent() {
viewport.z *= 2;
}
renderFromTexture(batch, _compositeFramebuffer->getRenderBuffer(0), viewport, scissor, fbo);
renderFromTexture(batch, compositeFramebuffer->getRenderBuffer(0), viewport, scissor, nullptr, fbo);
});
swapBuffers();
@ -345,7 +348,7 @@ glm::mat4 HmdDisplayPlugin::getViewCorrection() {
}
}
void HmdDisplayPlugin::HUDRenderer::build() {
DisplayPlugin::HUDOperator HmdDisplayPlugin::HUDRenderer::build() {
vertices = std::make_shared<gpu::Buffer>();
indices = std::make_shared<gpu::Buffer>();
@ -380,7 +383,7 @@ void HmdDisplayPlugin::HUDRenderer::build() {
indexCount = numberOfRectangles * TRIANGLE_PER_RECTANGLE * VERTEX_PER_TRANGLE;
// Compute indices order
std::vector<GLushort> indices;
std::vector<GLushort> indexData;
for (int i = 0; i < stacks - 1; i++) {
for (int j = 0; j < slices - 1; j++) {
GLushort bottomLeftIndex = i * slices + j;
@ -388,24 +391,21 @@ void HmdDisplayPlugin::HUDRenderer::build() {
GLushort topLeftIndex = bottomLeftIndex + slices;
GLushort topRightIndex = topLeftIndex + 1;
// FIXME make a z-order curve for better vertex cache locality
indices.push_back(topLeftIndex);
indices.push_back(bottomLeftIndex);
indices.push_back(topRightIndex);
indexData.push_back(topLeftIndex);
indexData.push_back(bottomLeftIndex);
indexData.push_back(topRightIndex);
indices.push_back(topRightIndex);
indices.push_back(bottomLeftIndex);
indices.push_back(bottomRightIndex);
indexData.push_back(topRightIndex);
indexData.push_back(bottomLeftIndex);
indexData.push_back(bottomRightIndex);
}
}
this->indices->append(indices);
indices->append(indexData);
format = std::make_shared<gpu::Stream::Format>(); // 1 for everyone
format->setAttribute(gpu::Stream::POSITION, gpu::Stream::POSITION, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), 0);
format->setAttribute(gpu::Stream::TEXCOORD, gpu::Stream::TEXCOORD, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::UV));
uniformsBuffer = std::make_shared<gpu::Buffer>(sizeof(Uniforms), nullptr);
updatePipeline();
}
void HmdDisplayPlugin::HUDRenderer::updatePipeline() {
if (!pipeline) {
auto program = gpu::Shader::createProgram(shader::render_utils::program::hmd_ui);
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
@ -416,10 +416,6 @@ void HmdDisplayPlugin::HUDRenderer::updatePipeline() {
pipeline = gpu::Pipeline::create(program, state);
}
}
std::function<void(gpu::Batch&, const gpu::TexturePointer&, bool mirror)> HmdDisplayPlugin::HUDRenderer::render(HmdDisplayPlugin& plugin) {
updatePipeline();
auto hudPipeline = pipeline;
auto hudFormat = format;
@ -428,9 +424,9 @@ std::function<void(gpu::Batch&, const gpu::TexturePointer&, bool mirror)> HmdDis
auto hudUniformBuffer = uniformsBuffer;
auto hudUniforms = uniforms;
auto hudIndexCount = indexCount;
return [=](gpu::Batch& batch, const gpu::TexturePointer& hudTexture, bool mirror) {
if (hudPipeline && hudTexture) {
batch.setPipeline(hudPipeline);
return [=](gpu::Batch& batch, const gpu::TexturePointer& hudTexture, const gpu::FramebufferPointer&, const bool mirror) {
if (pipeline && hudTexture) {
batch.setPipeline(pipeline);
batch.setInputFormat(hudFormat);
gpu::BufferView posView(hudVertices, VERTEX_OFFSET, hudVertices->getSize(), VERTEX_STRIDE, hudFormat->getAttributes().at(gpu::Stream::POSITION)._element);
@ -454,7 +450,7 @@ std::function<void(gpu::Batch&, const gpu::TexturePointer&, bool mirror)> HmdDis
};
}
void HmdDisplayPlugin::compositePointer() {
void HmdDisplayPlugin::compositePointer(const gpu::FramebufferPointer& compositeFramebuffer) {
auto& cursorManager = Cursor::Manager::instance();
const auto& cursorData = _cursorsData[cursorManager.getCursor()->getIcon()];
auto compositorHelper = DependencyManager::get<CompositorHelper>();
@ -463,7 +459,7 @@ void HmdDisplayPlugin::compositePointer() {
render([&](gpu::Batch& batch) {
// FIXME use standard gpu stereo rendering for this.
batch.enableStereo(false);
batch.setFramebuffer(_compositeFramebuffer);
batch.setFramebuffer(compositeFramebuffer);
batch.setPipeline(_cursorPipeline);
batch.setResourceTexture(0, cursorData.texture);
batch.resetViewTransform();
@ -478,10 +474,6 @@ void HmdDisplayPlugin::compositePointer() {
});
}
std::function<void(gpu::Batch&, const gpu::TexturePointer&, bool mirror)> HmdDisplayPlugin::getHUDOperator() {
return _hudRenderer.render(*this);
}
HmdDisplayPlugin::~HmdDisplayPlugin() {
}

View file

@ -53,16 +53,15 @@ signals:
void hmdVisibleChanged(bool visible);
protected:
virtual void hmdPresent() = 0;
virtual void hmdPresent(const gpu::FramebufferPointer&) = 0;
virtual bool isHmdMounted() const = 0;
virtual void postPreview() {};
virtual void updatePresentPose();
bool internalActivate() override;
void internalDeactivate() override;
std::function<void(gpu::Batch&, const gpu::TexturePointer&, bool mirror)> getHUDOperator() override;
void compositePointer() override;
void internalPresent() override;
void compositePointer(const gpu::FramebufferPointer&) override;
void internalPresent(const gpu::FramebufferPointer&) override;
void customizeContext() override;
void uncustomizeContext() override;
void updateFrameData() override;
@ -120,8 +119,6 @@ private:
static const size_t TEXTURE_OFFSET { offsetof(Vertex, uv) };
static const int VERTEX_STRIDE { sizeof(Vertex) };
void build();
void updatePipeline();
std::function<void(gpu::Batch&, const gpu::TexturePointer&, bool mirror)> render(HmdDisplayPlugin& plugin);
HUDOperator build();
} _hudRenderer;
};

View file

@ -37,13 +37,13 @@ glm::uvec2 InterleavedStereoDisplayPlugin::getRecommendedRenderSize() const {
return result;
}
void InterleavedStereoDisplayPlugin::internalPresent() {
void InterleavedStereoDisplayPlugin::internalPresent(const gpu::FramebufferPointer& compositeFramebuffer) {
render([&](gpu::Batch& batch) {
batch.enableStereo(false);
batch.resetViewTransform();
batch.setFramebuffer(gpu::FramebufferPointer());
batch.setViewportTransform(ivec4(uvec2(0), getSurfacePixels()));
batch.setResourceTexture(0, _currentFrame->framebuffer->getRenderBuffer(0));
batch.setResourceTexture(0, compositeFramebuffer->getRenderBuffer(0));
batch.setPipeline(_interleavedPresentPipeline);
batch.draw(gpu::TRIANGLE_STRIP, 4);
});

View file

@ -21,7 +21,7 @@ protected:
// initialize OpenGL context settings needed by the plugin
void customizeContext() override;
void uncustomizeContext() override;
void internalPresent() override;
void internalPresent(const gpu::FramebufferPointer&) override;
private:
static const QString NAME;

View file

@ -7,59 +7,44 @@
//
#include "Framebuffer.h"
#include <array>
#include <EGL/egl.h>
#include <glad/glad.h>
#include <android/log.h>
#include <VrApi.h>
#include <VrApi_Helpers.h>
#include "Helpers.h"
using namespace ovr;
void Framebuffer::updateLayer(int eye, ovrLayerProjection2& layer, const ovrMatrix4f* projectionMatrix ) const {
auto& layerTexture = layer.Textures[eye];
layerTexture.ColorSwapChain = _swapChain;
layerTexture.SwapChainIndex = _index;
layerTexture.ColorSwapChain = _swapChainInfos[eye].swapChain;
layerTexture.SwapChainIndex = _swapChainInfos[eye].index;
if (projectionMatrix) {
layerTexture.TexCoordsFromTanAngles = ovrMatrix4f_TanAngleMatrixFromProjection( projectionMatrix );
}
layerTexture.TextureRect = { 0, 0, 1, 1 };
}
void Framebuffer::SwapChainInfo::destroy() {
if (swapChain != nullptr) {
vrapi_DestroyTextureSwapChain(swapChain);
swapChain = nullptr;
}
index = -1;
length = -1;
}
void Framebuffer::create(const glm::uvec2& size) {
_size = size;
_index = 0;
_validTexture = false;
// Depth renderbuffer
/* glGenRenderbuffers(1, &_depth);
glBindRenderbuffer(GL_RENDERBUFFER, _depth);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, _size.x, _size.y);
glBindRenderbuffer(GL_RENDERBUFFER, 0);
*/
// Framebuffer
glGenFramebuffers(1, &_fbo);
// glBindFramebuffer(GL_DRAW_FRAMEBUFFER, _fbo);
// glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, _depth);
// glBindFramebuffer(GL_FRAMEBUFFER, 0);
_swapChain = vrapi_CreateTextureSwapChain3(VRAPI_TEXTURE_TYPE_2D, GL_RGBA8, _size.x, _size.y, 1, 3);
_length = vrapi_GetTextureSwapChainLength(_swapChain);
if (!_length) {
__android_log_write(ANDROID_LOG_WARN, "QQQ_OVR", "Unable to count swap chain textures");
return;
}
for (int i = 0; i < _length; ++i) {
GLuint chainTexId = vrapi_GetTextureSwapChainHandle(_swapChain, i);
glBindTexture(GL_TEXTURE_2D, chainTexId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
}
ovr::for_each_eye([&](ovrEye eye) {
_swapChainInfos[eye].create(size);
});
glBindTexture(GL_TEXTURE_2D, 0);
glGenFramebuffers(1, &_fbo);
}
void Framebuffer::destroy() {
@ -67,28 +52,82 @@ void Framebuffer::destroy() {
glDeleteFramebuffers(1, &_fbo);
_fbo = 0;
}
if (0 != _depth) {
glDeleteRenderbuffers(1, &_depth);
_depth = 0;
}
if (_swapChain != nullptr) {
vrapi_DestroyTextureSwapChain(_swapChain);
_swapChain = nullptr;
}
_index = -1;
_length = -1;
ovr::for_each_eye([&](ovrEye eye) {
_swapChainInfos[eye].destroy();
});
}
void Framebuffer::advance() {
_index = (_index + 1) % _length;
_validTexture = false;
ovr::for_each_eye([&](ovrEye eye) {
_swapChainInfos[eye].advance();
});
}
void Framebuffer::bind() {
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, _fbo);
if (!_validTexture) {
GLuint chainTexId = vrapi_GetTextureSwapChainHandle(_swapChain, _index);
glFramebufferTexture(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, chainTexId, 0);
_validTexture = true;
void Framebuffer::bind(GLenum target) {
glBindFramebuffer(target, _fbo);
_swapChainInfos[0].bind(target, GL_COLOR_ATTACHMENT0);
_swapChainInfos[1].bind(target, GL_COLOR_ATTACHMENT1);
}
void Framebuffer::invalidate(GLenum target) {
static const std::array<GLenum, 2> INVALIDATE_ATTACHMENTS {{ GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 }};
glInvalidateFramebuffer(target, static_cast<GLsizei>(INVALIDATE_ATTACHMENTS.size()), INVALIDATE_ATTACHMENTS.data());
}
void Framebuffer::drawBuffers(ovrEye eye) const {
static const std::array<std::array<GLenum, 2>, 3> EYE_DRAW_BUFFERS { {
{GL_COLOR_ATTACHMENT0, GL_NONE},
{GL_NONE, GL_COLOR_ATTACHMENT1},
{GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1}
} };
switch(eye) {
case VRAPI_EYE_LEFT:
case VRAPI_EYE_RIGHT:
case VRAPI_EYE_COUNT: {
const auto& eyeDrawBuffers = EYE_DRAW_BUFFERS[eye];
glDrawBuffers(static_cast<GLsizei>(eyeDrawBuffers.size()), eyeDrawBuffers.data());
}
break;
default:
throw std::runtime_error("Invalid eye for drawBuffers");
}
}
void Framebuffer::SwapChainInfo::create(const glm::uvec2 &size) {
index = 0;
validTexture = false;
// GL_SRGB8_ALPHA8 and GL_RGBA8 appear to behave the same here. The only thing that changes the
// output gamma behavior is VRAPI_MODE_FLAG_FRONT_BUFFER_SRGB passed to vrapi_EnterVrMode
swapChain = vrapi_CreateTextureSwapChain3(VRAPI_TEXTURE_TYPE_2D, GL_SRGB8_ALPHA8, size.x, size.y, 1, 3);
length = vrapi_GetTextureSwapChainLength(swapChain);
if (!length) {
__android_log_write(ANDROID_LOG_WARN, "QQQ_OVR", "Unable to count swap chain textures");
throw std::runtime_error("Unable to create Oculus texture swap chain");
}
for (int i = 0; i < length; ++i) {
GLuint chainTexId = vrapi_GetTextureSwapChainHandle(swapChain, i);
glBindTexture(GL_TEXTURE_2D, chainTexId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
}
}
void Framebuffer::SwapChainInfo::advance() {
index = (index + 1) % length;
validTexture = false;
}
void Framebuffer::SwapChainInfo::bind(uint32_t target, uint32_t attachment) {
if (!validTexture) {
GLuint chainTexId = vrapi_GetTextureSwapChainHandle(swapChain, index);
glFramebufferTexture(target, attachment, chainTexId, 0);
validTexture = true;
}
}

View file

@ -9,6 +9,7 @@
#include <cstdint>
#include <glm/glm.hpp>
#include <glad/glad.h>
#include <VrApi_Types.h>
@ -20,15 +21,28 @@ public:
void create(const glm::uvec2& size);
void advance();
void destroy();
void bind();
void bind(GLenum target = GL_DRAW_FRAMEBUFFER);
void invalidate(GLenum target = GL_DRAW_FRAMEBUFFER);
void drawBuffers(ovrEye eye) const;
uint32_t _depth { 0 };
const glm::uvec2& size() const { return _size; }
private:
uint32_t _fbo{ 0 };
int _length{ -1 };
int _index{ -1 };
bool _validTexture{ false };
glm::uvec2 _size;
ovrTextureSwapChain* _swapChain{ nullptr };
struct SwapChainInfo {
int length{ -1 };
int index{ -1 };
bool validTexture{ false };
ovrTextureSwapChain* swapChain{ nullptr };
void create(const glm::uvec2& size);
void destroy();
void advance();
void bind(GLenum target, GLenum attachment);
};
SwapChainInfo _swapChainInfos[VRAPI_FRAME_LAYER_EYE_MAX];
};
} // namespace ovr

View file

@ -9,37 +9,186 @@
#include <android/native_window_jni.h>
#include <android/log.h>
#include <android/asset_manager.h>
#include <android/asset_manager_jni.h>
#include <unistd.h>
#include <algorithm>
#include <array>
#include <VrApi.h>
#include <VrApi_Helpers.h>
#include <VrApi_Types.h>
//#include <OVR_Platform.h>
#include "GLContext.h"
#include "Helpers.h"
#include "Framebuffer.h"
static AAssetManager* ASSET_MANAGER = nullptr;
#define USE_BLIT_PRESENT 0
#if !USE_BLIT_PRESENT
static std::string getTextAsset(const char* assetPath) {
if (!ASSET_MANAGER || !assetPath) {
return nullptr;
}
AAsset* asset = AAssetManager_open(ASSET_MANAGER, assetPath, AASSET_MODE_BUFFER);
if (!asset) {
return {};
}
auto length = AAsset_getLength(asset);
if (0 == length) {
AAsset_close(asset);
return {};
}
auto buffer = AAsset_getBuffer(asset);
if (!buffer) {
AAsset_close(asset);
return {};
}
std::string result { static_cast<const char*>(buffer), static_cast<size_t>(length) };
AAsset_close(asset);
return result;
}
static std::string getShaderInfoLog(GLuint glshader) {
std::string result;
GLint infoLength = 0;
glGetShaderiv(glshader, GL_INFO_LOG_LENGTH, &infoLength);
if (infoLength > 0) {
char* temp = new char[infoLength];
glGetShaderInfoLog(glshader, infoLength, NULL, temp);
result = std::string(temp);
delete[] temp;
}
return result;
}
static GLuint buildShader(GLenum shaderDomain, const char* shader) {
GLuint glshader = glCreateShader(shaderDomain);
if (!glshader) {
throw std::runtime_error("Bad shader");
}
glShaderSource(glshader, 1, &shader, NULL);
glCompileShader(glshader);
GLint compiled = 0;
glGetShaderiv(glshader, GL_COMPILE_STATUS, &compiled);
// if compilation fails
if (!compiled) {
std::string compileError = getShaderInfoLog(glshader);
glDeleteShader(glshader);
__android_log_print(ANDROID_LOG_WARN, "QQQ_OVR", "Shader compile error: %s", compileError.c_str());
return 0;
}
return glshader;
}
static std::string getProgramInfoLog(GLuint glprogram) {
std::string result;
GLint infoLength = 0;
glGetProgramiv(glprogram, GL_INFO_LOG_LENGTH, &infoLength);
if (infoLength > 0) {
char* temp = new char[infoLength];
glGetProgramInfoLog(glprogram, infoLength, NULL, temp);
result = std::string(temp);
delete[] temp;
}
return result;
}
static GLuint buildProgram(const char* vertex, const char* fragment) {
// A brand new program:
GLuint glprogram { 0 }, glvertex { 0 }, glfragment { 0 };
try {
glprogram = glCreateProgram();
if (0 == glprogram) {
throw std::runtime_error("Failed to create program, is GL context current?");
}
glvertex = buildShader(GL_VERTEX_SHADER, vertex);
if (0 == glvertex) {
throw std::runtime_error("Failed to create or compile vertex shader");
}
glAttachShader(glprogram, glvertex);
glfragment = buildShader(GL_FRAGMENT_SHADER, fragment);
if (0 == glfragment) {
throw std::runtime_error("Failed to create or compile fragment shader");
}
glAttachShader(glprogram, glfragment);
GLint linked { 0 };
glLinkProgram(glprogram);
glGetProgramiv(glprogram, GL_LINK_STATUS, &linked);
if (!linked) {
std::string linkErrorLog = getProgramInfoLog(glprogram);
__android_log_print(ANDROID_LOG_WARN, "QQQ_OVR", "Program link error: %s", linkErrorLog.c_str());
throw std::runtime_error("Failed to link program, is the interface between the fragment and vertex shaders correct?");
}
} catch(const std::runtime_error& error) {
if (0 != glprogram) {
glDeleteProgram(glprogram);
glprogram = 0;
}
}
if (0 != glvertex) {
glDeleteShader(glvertex);
}
if (0 != glfragment) {
glDeleteShader(glfragment);
}
if (0 == glprogram) {
throw std::runtime_error("Failed to build program");
}
return glprogram;
}
#endif
using namespace ovr;
static thread_local bool isRenderThread { false };
struct VrSurface : public TaskQueue {
using HandlerTask = VrHandler::HandlerTask;
using HandlerTask = ovr::VrHandler::HandlerTask;
JavaVM* vm{nullptr};
jobject oculusActivity{ nullptr };
ANativeWindow* nativeWindow{ nullptr };
VrHandler* handler{nullptr};
ovr::VrHandler* handler{nullptr};
ovrMobile* session{nullptr};
bool resumed { false };
GLContext vrglContext;
Framebuffer eyeFbos[2];
uint32_t readFbo{0};
ovr::GLContext vrglContext;
ovr::Framebuffer eyesFbo;
#if USE_BLIT_PRESENT
GLuint readFbo { 0 };
#else
GLuint renderProgram { 0 };
GLuint renderVao { 0 };
#endif
std::atomic<uint32_t> presentIndex{1};
double displayTime{0};
// Not currently set by anything
@ -76,6 +225,16 @@ struct VrSurface : public TaskQueue {
vrglContext.create(currentDisplay, currentContext, noErrorContext);
vrglContext.makeCurrent();
#if USE_BLIT_PRESENT
glGenFramebuffers(1, &readFbo);
#else
glGenVertexArrays(1, &renderVao);
const char* vertex = nullptr;
auto vertexShader = getTextAsset("shaders/present.vert");
auto fragmentShader = getTextAsset("shaders/present.frag");
renderProgram = buildProgram(vertexShader.c_str(), fragmentShader.c_str());
#endif
glm::uvec2 eyeTargetSize;
withEnv([&](JNIEnv* env){
ovrJava java{ vm, env, oculusActivity };
@ -85,10 +244,7 @@ struct VrSurface : public TaskQueue {
};
});
__android_log_print(ANDROID_LOG_WARN, "QQQ_OVR", "QQQ Eye Size %d, %d", eyeTargetSize.x, eyeTargetSize.y);
ovr::for_each_eye([&](ovrEye eye) {
eyeFbos[eye].create(eyeTargetSize);
});
glGenFramebuffers(1, &readFbo);
eyesFbo.create(eyeTargetSize);
vrglContext.doneCurrent();
}
@ -157,6 +313,7 @@ struct VrSurface : public TaskQueue {
ovrJava java{ vm, env, oculusActivity };
ovrModeParms modeParms = vrapi_DefaultModeParms(&java);
modeParms.Flags |= VRAPI_MODE_FLAG_NATIVE_WINDOW;
modeParms.Flags |= VRAPI_MODE_FLAG_FRONT_BUFFER_SRGB;
if (noErrorContext) {
modeParms.Flags |= VRAPI_MODE_FLAG_CREATE_CONTEXT_NO_ERROR;
}
@ -178,38 +335,51 @@ struct VrSurface : public TaskQueue {
void presentFrame(uint32_t sourceTexture, const glm::uvec2 &sourceSize, const ovrTracking2& tracking) {
ovrLayerProjection2 layer = vrapi_DefaultLayerProjection2();
layer.HeadPose = tracking.HeadPose;
eyesFbo.bind();
if (sourceTexture) {
eyesFbo.invalidate();
#if USE_BLIT_PRESENT
glBindFramebuffer(GL_READ_FRAMEBUFFER, readFbo);
glFramebufferTexture(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, sourceTexture, 0);
GLenum framebufferStatus = glCheckFramebufferStatus(GL_READ_FRAMEBUFFER);
if (GL_FRAMEBUFFER_COMPLETE != framebufferStatus) {
__android_log_print(ANDROID_LOG_WARN, "QQQ_OVR", "incomplete framebuffer");
}
}
GLenum invalidateAttachment = GL_COLOR_ATTACHMENT0;
ovr::for_each_eye([&](ovrEye eye) {
const auto &eyeTracking = tracking.Eye[eye];
auto &eyeFbo = eyeFbos[eye];
const auto &destSize = eyeFbo._size;
eyeFbo.bind();
glInvalidateFramebuffer(GL_DRAW_FRAMEBUFFER, 1, &invalidateAttachment);
if (sourceTexture) {
const auto &destSize = eyesFbo.size();
ovr::for_each_eye([&](ovrEye eye) {
auto sourceWidth = sourceSize.x / 2;
auto sourceX = (eye == VRAPI_EYE_LEFT) ? 0 : sourceWidth;
// Each eye blit uses a different draw buffer
eyesFbo.drawBuffers(eye);
glBlitFramebuffer(
sourceX, 0, sourceX + sourceWidth, sourceSize.y,
0, 0, destSize.x, destSize.y,
GL_COLOR_BUFFER_BIT, GL_NEAREST);
}
eyeFbo.updateLayer(eye, layer, &eyeTracking.ProjectionMatrix);
eyeFbo.advance();
});
if (sourceTexture) {
glInvalidateFramebuffer(GL_READ_FRAMEBUFFER, 1, &invalidateAttachment);
});
static const std::array<GLenum, 1> READ_INVALIDATE_ATTACHMENTS {{ GL_COLOR_ATTACHMENT0 }};
glInvalidateFramebuffer(GL_READ_FRAMEBUFFER, (GLuint)READ_INVALIDATE_ATTACHMENTS.size(), READ_INVALIDATE_ATTACHMENTS.data());
glFramebufferTexture(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, 0, 0);
#else
eyesFbo.drawBuffers(VRAPI_EYE_COUNT);
const auto &destSize = eyesFbo.size();
glViewport(0, 0, destSize.x, destSize.y);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, sourceTexture);
glBindVertexArray(renderVao);
glUseProgram(renderProgram);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glUseProgram(0);
glBindVertexArray(0);
#endif
} else {
eyesFbo.drawBuffers(VRAPI_EYE_COUNT);
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
}
glFlush();
ovr::for_each_eye([&](ovrEye eye) {
const auto &eyeTracking = tracking.Eye[eye];
eyesFbo.updateLayer(eye, layer, &eyeTracking.ProjectionMatrix);
});
eyesFbo.advance();
ovrLayerHeader2 *layerHeader = &layer.Header;
ovrSubmitFrameDescription2 frameDesc = {};
@ -321,8 +491,9 @@ JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM *, void *) {
return JNI_VERSION_1_6;
}
JNIEXPORT void JNICALL Java_io_highfidelity_oculus_OculusMobileActivity_nativeOnCreate(JNIEnv* env, jobject obj) {
JNIEXPORT void JNICALL Java_io_highfidelity_oculus_OculusMobileActivity_nativeOnCreate(JNIEnv* env, jobject obj, jobject assetManager) {
__android_log_write(ANDROID_LOG_WARN, "QQQ_JNI", __FUNCTION__);
ASSET_MANAGER = AAssetManager_fromJava(env, assetManager);
SURFACE.onCreate(env, obj);
}

View file

@ -245,7 +245,7 @@ void OculusMobileDisplayPlugin::updatePresentPose() {
});
}
void OculusMobileDisplayPlugin::internalPresent() {
void OculusMobileDisplayPlugin::internalPresent(const gpu::FramebufferPointer& compsiteFramebuffer) {
VrHandler::pollTask();
if (!vrActive()) {
@ -253,8 +253,12 @@ void OculusMobileDisplayPlugin::internalPresent() {
return;
}
auto sourceTexture = getGLBackend()->getTextureID(_compositeFramebuffer->getRenderBuffer(0));
glm::uvec2 sourceSize{ _compositeFramebuffer->getWidth(), _compositeFramebuffer->getHeight() };
GLuint sourceTexture = 0;
glm::uvec2 sourceSize;
if (compsiteFramebuffer) {
sourceTexture = getGLBackend()->getTextureID(compsiteFramebuffer->getRenderBuffer(0));
sourceSize = { compsiteFramebuffer->getWidth(), compsiteFramebuffer->getHeight() };
}
VrHandler::presentFrame(sourceTexture, sourceSize, presentTracking);
_presentRate.increment();
}

View file

@ -54,8 +54,8 @@ protected:
void uncustomizeContext() override;
void updatePresentPose() override;
void internalPresent() override;
void hmdPresent() override { throw std::runtime_error("Unused"); }
void internalPresent(const gpu::FramebufferPointer&) override;
void hmdPresent(const gpu::FramebufferPointer&) override { throw std::runtime_error("Unused"); }
bool isHmdMounted() const override;
bool alwaysPresent() const override { return true; }

View file

@ -2,6 +2,12 @@
#include <NumericalConstants.h>
const DisplayPlugin::HUDOperator DisplayPlugin::DEFAULT_HUD_OPERATOR{ std::function<void(gpu::Batch&, const gpu::TexturePointer&, const gpu::FramebufferPointer&, bool mirror)>() };
DisplayPlugin::DisplayPlugin() : _hudOperator{ DEFAULT_HUD_OPERATOR } {
}
int64_t DisplayPlugin::getPaintDelayUsecs() const {
std::lock_guard<std::mutex> lock(_paintDelayMutex);
return _paintDelayTimer.isValid() ? _paintDelayTimer.nsecsElapsed() / NSECS_PER_USEC : 0;
@ -35,8 +41,8 @@ void DisplayPlugin::waitForPresent() {
}
}
std::function<void(gpu::Batch&, const gpu::TexturePointer&, bool mirror)> DisplayPlugin::getHUDOperator() {
std::function<void(gpu::Batch&, const gpu::TexturePointer&, bool mirror)> hudOperator;
std::function<void(gpu::Batch&, const gpu::TexturePointer&, const gpu::FramebufferPointer& compositeFramebuffer, bool mirror)> DisplayPlugin::getHUDOperator() {
HUDOperator hudOperator;
{
QMutexLocker locker(&_presentMutex);
hudOperator = _hudOperator;
@ -48,3 +54,5 @@ glm::mat4 HmdDisplay::getEyeToHeadTransform(Eye eye) const {
static const glm::mat4 xform;
return xform;
}

View file

@ -121,6 +121,8 @@ class DisplayPlugin : public Plugin, public HmdDisplay {
Q_OBJECT
using Parent = Plugin;
public:
DisplayPlugin();
virtual int getRequiredThreadCount() const { return 0; }
virtual bool isHmd() const { return false; }
virtual int getHmdScreen() const { return -1; }
@ -214,7 +216,8 @@ public:
void waitForPresent();
float getAveragePresentTime() { return _movingAveragePresent.average / (float)USECS_PER_MSEC; } // in msec
std::function<void(gpu::Batch&, const gpu::TexturePointer&, bool mirror)> getHUDOperator();
using HUDOperator = std::function<void(gpu::Batch&, const gpu::TexturePointer&, const gpu::FramebufferPointer&, bool mirror)>;
virtual HUDOperator getHUDOperator() final;
static const QString& MENU_PATH();
@ -231,7 +234,8 @@ protected:
gpu::ContextPointer _gpuContext;
std::function<void(gpu::Batch&, const gpu::TexturePointer&, bool mirror)> _hudOperator { std::function<void(gpu::Batch&, const gpu::TexturePointer&, bool mirror)>() };
static const HUDOperator DEFAULT_HUD_OPERATOR;
HUDOperator _hudOperator;
MovingAverage<float, 10> _movingAveragePresent;

View file

@ -122,8 +122,8 @@ void CompositeHUD::run(const RenderContextPointer& renderContext, const gpu::Fra
if (inputs) {
batch.setFramebuffer(inputs);
}
if (renderContext->args->_hudOperator) {
renderContext->args->_hudOperator(batch, renderContext->args->_hudTexture, renderContext->args->_renderMode == RenderArgs::RenderMode::MIRROR_RENDER_MODE);
if (renderContext->args->_hudOperator && renderContext->args->_blitFramebuffer) {
renderContext->args->_hudOperator(batch, renderContext->args->_hudTexture, renderContext->args->_blitFramebuffer, renderContext->args->_renderMode == RenderArgs::RenderMode::MIRROR_RENDER_MODE);
}
});
#endif

View file

@ -131,7 +131,7 @@ namespace render {
render::ScenePointer _scene;
int8_t _cameraMode { -1 };
std::function<void(gpu::Batch&, const gpu::TexturePointer&, bool mirror)> _hudOperator;
std::function<void(gpu::Batch&, const gpu::TexturePointer&, const gpu::FramebufferPointer&, bool mirror)> _hudOperator;
gpu::TexturePointer _hudTexture;
};

View file

@ -16,7 +16,7 @@ public:
bool isSupported() const override;
protected:
void hmdPresent() override {}
void hmdPresent(const gpu::FramebufferPointer&) override {}
bool isHmdMounted() const override { return true; }
private:

View file

@ -108,13 +108,16 @@ void OculusDisplayPlugin::customizeContext() {
}
void OculusDisplayPlugin::uncustomizeContext() {
#if 0
// Present a final black frame to the HMD
_compositeFramebuffer->Bound(FramebufferTarget::Draw, [] {
Context::ClearColor(0, 0, 0, 1);
Context::Clear().ColorBuffer();
});
hmdPresent();
if (_currentFrame && _currentFrame->framebuffer) {
// Present a final black frame to the HMD
_currentFrame->framebuffer->Bound(FramebufferTarget::Draw, [] {
Context::ClearColor(0, 0, 0, 1);
Context::Clear().ColorBuffer();
});
hmdPresent();
}
#endif
ovr_DestroyTextureSwapChain(_session, _textureSwapChain);
@ -127,7 +130,7 @@ void OculusDisplayPlugin::uncustomizeContext() {
static const uint64_t FRAME_BUDGET = (11 * USECS_PER_MSEC);
static const uint64_t FRAME_OVER_BUDGET = (15 * USECS_PER_MSEC);
void OculusDisplayPlugin::hmdPresent() {
void OculusDisplayPlugin::hmdPresent(const gpu::FramebufferPointer& compositeFramebuffer) {
static uint64_t lastSubmitEnd = 0;
if (!_customized) {
@ -157,15 +160,8 @@ void OculusDisplayPlugin::hmdPresent() {
auto fbo = getGLBackend()->getFramebufferID(_outputFramebuffer);
glNamedFramebufferTexture(fbo, GL_COLOR_ATTACHMENT0, curTexId, 0);
render([&](gpu::Batch& batch) {
batch.enableStereo(false);
batch.setFramebuffer(_outputFramebuffer);
batch.setViewportTransform(ivec4(uvec2(), _outputFramebuffer->getSize()));
batch.setStateScissorRect(ivec4(uvec2(), _outputFramebuffer->getSize()));
batch.resetViewTransform();
batch.setProjectionTransform(mat4());
batch.setPipeline(_presentPipeline);
batch.setResourceTexture(0, _compositeFramebuffer->getRenderBuffer(0));
batch.draw(gpu::TRIANGLE_STRIP, 4);
auto viewport = ivec4(uvec2(), _outputFramebuffer->getSize());
renderFromTexture(batch, compositeFramebuffer->getRenderBuffer(0), viewport, viewport, _outputFramebuffer);
});
glNamedFramebufferTexture(fbo, GL_COLOR_ATTACHMENT0, 0, 0);
}

View file

@ -28,7 +28,7 @@ protected:
QThread::Priority getPresentPriority() override { return QThread::TimeCriticalPriority; }
bool internalActivate() override;
void hmdPresent() override;
void hmdPresent(const gpu::FramebufferPointer&) override;
bool isHmdMounted() const override;
void customizeContext() override;
void uncustomizeContext() override;

View file

@ -237,7 +237,7 @@ void OculusLegacyDisplayPlugin::uncustomizeContext() {
Parent::uncustomizeContext();
}
void OculusLegacyDisplayPlugin::hmdPresent() {
void OculusLegacyDisplayPlugin::hmdPresent(const gpu::FramebufferPointer& compositeFramebuffer) {
if (!_hswDismissed) {
ovrHSWDisplayState hswState;
ovrHmd_GetHSWDisplayState(_hmd, &hswState);
@ -252,7 +252,7 @@ void OculusLegacyDisplayPlugin::hmdPresent() {
memset(eyePoses, 0, sizeof(ovrPosef) * 2);
eyePoses[0].Orientation = eyePoses[1].Orientation = ovrRotation;
GLint texture = getGLBackend()->getTextureID(_compositeFramebuffer->getRenderBuffer(0));
GLint texture = getGLBackend()->getTextureID(compositeFramebuffer->getRenderBuffer(0));
auto sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
glFlush();
if (_hmdWindow->makeCurrent()) {

View file

@ -39,7 +39,7 @@ protected:
void customizeContext() override;
void uncustomizeContext() override;
void hmdPresent() override;
void hmdPresent(const gpu::FramebufferPointer&) override;
bool isHmdMounted() const override { return true; }
private:

View file

@ -511,13 +511,13 @@ void OpenVrDisplayPlugin::customizeContext() {
Parent::customizeContext();
if (_threadedSubmit) {
_compositeInfos[0].texture = _compositeFramebuffer->getRenderBuffer(0);
// _compositeInfos[0].texture = _compositeFramebuffer->getRenderBuffer(0);
for (size_t i = 0; i < COMPOSITING_BUFFER_SIZE; ++i) {
if (0 != i) {
// if (0 != i) {
_compositeInfos[i].texture = gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, _renderTargetSize.x,
_renderTargetSize.y, gpu::Texture::SINGLE_MIP,
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT));
}
// }
_compositeInfos[i].textureID = getGLBackend()->getTextureID(_compositeInfos[i].texture);
}
_submitThread->_canvas = _submitCanvas;
@ -613,17 +613,17 @@ bool OpenVrDisplayPlugin::beginFrameRender(uint32_t frameIndex) {
return Parent::beginFrameRender(frameIndex);
}
void OpenVrDisplayPlugin::compositeLayers() {
void OpenVrDisplayPlugin::compositeLayers(const gpu::FramebufferPointer& compositeFramebuffer) {
if (_threadedSubmit) {
++_renderingIndex;
_renderingIndex %= COMPOSITING_BUFFER_SIZE;
auto& newComposite = _compositeInfos[_renderingIndex];
newComposite.pose = _currentPresentFrameInfo.presentPose;
_compositeFramebuffer->setRenderBuffer(0, newComposite.texture);
compositeFramebuffer->setRenderBuffer(0, newComposite.texture);
}
Parent::compositeLayers();
Parent::compositeLayers(compositeFramebuffer);
if (_threadedSubmit) {
auto& newComposite = _compositeInfos[_renderingIndex];
@ -645,13 +645,13 @@ void OpenVrDisplayPlugin::compositeLayers() {
}
}
void OpenVrDisplayPlugin::hmdPresent() {
void OpenVrDisplayPlugin::hmdPresent(const gpu::FramebufferPointer& compositeFramebuffer) {
PROFILE_RANGE_EX(render, __FUNCTION__, 0xff00ff00, (uint64_t)_currentFrame->frameIndex)
if (_threadedSubmit) {
_submitThread->waitForPresent();
} else {
GLuint glTexId = getGLBackend()->getTextureID(_compositeFramebuffer->getRenderBuffer(0));
GLuint glTexId = getGLBackend()->getTextureID(compositeFramebuffer->getRenderBuffer(0));
vr::Texture_t vrTexture{ (void*)(uintptr_t)glTexId, vr::TextureType_OpenGL, vr::ColorSpace_Auto };
vr::VRCompositor()->Submit(vr::Eye_Left, &vrTexture, &OPENVR_TEXTURE_BOUNDS_LEFT);
vr::VRCompositor()->Submit(vr::Eye_Right, &vrTexture, &OPENVR_TEXTURE_BOUNDS_RIGHT);

View file

@ -72,8 +72,8 @@ protected:
void internalDeactivate() override;
void updatePresentPose() override;
void compositeLayers() override;
void hmdPresent() override;
void compositeLayers(const gpu::FramebufferPointer&) override;
void hmdPresent(const gpu::FramebufferPointer&) override;
bool isHmdMounted() const override;
void postPreview() override;