Merge pull request #15920 from annabrewer/combine-tone-map-resample

BUGZ-965: Standardize and shorten deferred/forward and desktop/android pipelines
This commit is contained in:
Shannon Romano 2019-08-01 17:39:38 -07:00 committed by GitHub
commit 2f6d79fb45
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
14 changed files with 243 additions and 201 deletions

View file

@ -41,7 +41,6 @@
#include "MainWindow.h"
#include "Snapshot.h"
#include "SnapshotUploader.h"
#include "ToneMappingEffect.h"
// filename format: hifi-snap-by-%username%-on-%date%_%time%_@-%location%.jpg
// %1 <= username, %2 <= date and time, %3 <= current location

View file

@ -106,33 +106,33 @@ void Instance::enumerateNics() {
}
json Instance::getCPU(int index) {
assert(index <(int) _cpus.size());
assert(index < (int)_cpus.size());
if (index < 0 || (int) _cpus.size() <= index)
if (index < 0 || (int)_cpus.size() <= index)
return json();
return _cpus.at(index);
}
json Instance::getGPU(int index) {
assert(index <(int) _gpus.size());
assert(index < (int)_gpus.size());
if (index < 0 || (int) _gpus.size() <= index)
if (index < 0 || (int)_gpus.size() <= index)
return json();
return _gpus.at(index);
}
json Instance::getDisplay(int index) {
assert(index <(int) _displays.size());
if (index < 0 || (int) _displays.size() <= index)
assert(index < (int)_displays.size());
if (index < 0 || (int)_displays.size() <= index)
return json();
return _displays.at(index);
}
Instance::~Instance() {
if (_cpus.size() > 0) {
_cpus.clear();
@ -147,7 +147,6 @@ Instance::~Instance() {
}
}
json Instance::listAllKeys() {
json allKeys;
allKeys.array({{

View file

@ -15,7 +15,6 @@
#include <shaders/Shaders.h>
#include <render/BlurTask.h>
#include <render/ResampleTask.h>
#include "render-utils/ShaderConstants.h"
#define BLOOM_BLUR_LEVEL_COUNT 3

View file

@ -148,6 +148,30 @@ void Blit::run(const RenderContextPointer& renderContext, const gpu::Framebuffer
});
}
NewFramebuffer::NewFramebuffer(gpu::Element pixelFormat) {
_pixelFormat = pixelFormat;
}
void NewFramebuffer::run(const render::RenderContextPointer& renderContext, Output& output) {
RenderArgs* args = renderContext->args;
glm::uvec2 frameSize(args->_viewport.z, args->_viewport.w);
output.reset();
if (_outputFramebuffer && _outputFramebuffer->getSize() != frameSize) {
_outputFramebuffer.reset();
}
if (!_outputFramebuffer) {
_outputFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("newFramebuffer.out"));
auto colorFormat = _pixelFormat;
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR);
auto colorTexture = gpu::Texture::createRenderBuffer(colorFormat, frameSize.x, frameSize.y, gpu::Texture::SINGLE_MIP, defaultSampler);
_outputFramebuffer->setRenderBuffer(0, colorTexture);
}
output = _outputFramebuffer;
}
void NewOrDefaultFramebuffer::run(const render::RenderContextPointer& renderContext, const Input& input, Output& output) {
RenderArgs* args = renderContext->args;
// auto frameSize = input;
@ -167,7 +191,7 @@ void NewOrDefaultFramebuffer::run(const render::RenderContextPointer& renderCont
}
if (!_outputFramebuffer) {
_outputFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("newFramebuffer.out"));
_outputFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("newOrDefaultFramebuffer.out"));
auto colorFormat = gpu::Element::COLOR_SRGBA_32;
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR);
auto colorTexture = gpu::Texture::createRenderBuffer(colorFormat, frameSize.x, frameSize.y, gpu::Texture::SINGLE_MIP, defaultSampler);

View file

@ -83,6 +83,20 @@ public:
void run(const render::RenderContextPointer& renderContext, const gpu::FramebufferPointer& srcFramebuffer);
};
class NewFramebuffer {
public:
using Output = gpu::FramebufferPointer;
using JobModel = render::Job::ModelO<NewFramebuffer, Output>;
NewFramebuffer(gpu::Element pixelFormat = gpu::Element::COLOR_SRGBA_32);
void run(const render::RenderContextPointer& renderContext, Output& output);
protected:
gpu::Element _pixelFormat;
private:
gpu::FramebufferPointer _outputFramebuffer;
};
class NewOrDefaultFramebuffer {
public:
using Input = glm::uvec2;

View file

@ -29,7 +29,6 @@
#include <render/DrawStatus.h>
#include <render/DrawSceneOctree.h>
#include <render/BlurTask.h>
#include <render/ResampleTask.h>
#include "RenderHifi.h"
#include "render-utils/ShaderConstants.h"
@ -51,7 +50,7 @@
#include "AmbientOcclusionEffect.h"
#include "AntialiasingEffect.h"
#include "ToneMappingEffect.h"
#include "ToneMapAndResampleTask.h"
#include "SubsurfaceScattering.h"
#include "DrawHaze.h"
#include "BloomEffect.h"
@ -96,7 +95,7 @@ RenderDeferredTask::RenderDeferredTask()
void RenderDeferredTask::configure(const Config& config) {
// Propagate resolution scale to sub jobs who need it
auto preparePrimaryBufferConfig = config.getConfig<PreparePrimaryFramebuffer>("PreparePrimaryBuffer");
auto preparePrimaryBufferConfig = config.getConfig<PreparePrimaryFramebuffer>("PreparePrimaryBufferDeferred");
assert(preparePrimaryBufferConfig);
preparePrimaryBufferConfig->setResolutionScale(config.resolutionScale);
}
@ -146,7 +145,7 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
const auto jitter = task.addJob<JitterSample>("JitterCam");
// GPU jobs: Start preparing the primary, deferred and lighting buffer
const auto scaledPrimaryFramebuffer = task.addJob<PreparePrimaryFramebuffer>("PreparePrimaryBuffer");
const auto scaledPrimaryFramebuffer = task.addJob<PreparePrimaryFramebuffer>("PreparePrimaryBufferDeferred");
// Prepare deferred, generate the shared Deferred Frame Transform. Only valid with the scaled frame buffer
const auto deferredFrameTransform = task.addJob<GenerateDeferredFrameTransform>("DeferredFrameTransform", jitter);
@ -238,23 +237,22 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
const auto bloomInputs = BloomEffect::Inputs(deferredFrameTransform, lightingFramebuffer, bloomFrame).asVarying();
task.addJob<BloomEffect>("Bloom", bloomInputs);
const auto destFramebuffer = static_cast<gpu::FramebufferPointer>(nullptr);
// Lighting Buffer ready for tone mapping
const auto toneMappingInputs = ToneMappingDeferred::Input(lightingFramebuffer, scaledPrimaryFramebuffer).asVarying();
const auto toneMappedBuffer = task.addJob<ToneMappingDeferred>("ToneMapping", toneMappingInputs);
const auto toneMappingInputs = ToneMapAndResample::Input(lightingFramebuffer, destFramebuffer).asVarying();
const auto toneMappedBuffer = task.addJob<ToneMapAndResample>("ToneMapping", toneMappingInputs);
// Debugging task is happening in the "over" layer after tone mapping and just before HUD
{ // Debug the bounds of the rendered items, still look at the zbuffer
const auto extraDebugBuffers = RenderDeferredTaskDebug::ExtraBuffers(linearDepthTarget, surfaceGeometryFramebuffer, ambientOcclusionFramebuffer, ambientOcclusionUniforms, scatteringResource, velocityBuffer);
const auto debugInputs = RenderDeferredTaskDebug::Input(fetchedItems, shadowTaskOutputs, lightingStageInputs, lightClusters, prepareDeferredOutputs, extraDebugBuffers,
deferredFrameTransform, jitter, lightingModel).asVarying();
deferredFrameTransform, jitter, lightingModel).asVarying();
task.addJob<RenderDeferredTaskDebug>("DebugRenderDeferredTask", debugInputs);
}
// Upscale to finale resolution
const auto primaryFramebuffer = task.addJob<render::UpsampleToBlitFramebuffer>("PrimaryBufferUpscale", toneMappedBuffer);
// HUD Layer
const auto renderHUDLayerInputs = RenderHUDLayerTask::Input(primaryFramebuffer, lightingModel, hudOpaque, hudTransparent, hazeFrame).asVarying();
const auto renderHUDLayerInputs = RenderHUDLayerTask::Input(toneMappedBuffer, lightingModel, hudOpaque, hudTransparent, hazeFrame).asVarying();
task.addJob<RenderHUDLayerTask>("RenderHUDLayer", renderHUDLayerInputs);
}
@ -415,7 +413,6 @@ void RenderDeferredTaskDebug::build(JobModel& task, const render::Varying& input
const auto debugZoneInputs = DebugZoneLighting::Inputs(deferredFrameTransform, lightFrame, backgroundFrame).asVarying();
task.addJob<DebugZoneLighting>("DrawZoneStack", debugZoneInputs);
}

View file

@ -19,7 +19,6 @@
#include <gpu/Texture.h>
#include <graphics/ShaderConstants.h>
#include <render/ShapePipeline.h>
#include <render/ResampleTask.h>
#include <render/FilterTask.h>
@ -28,7 +27,7 @@
#include "StencilMaskPass.h"
#include "ZoneRenderer.h"
#include "FadeEffect.h"
#include "ToneMappingEffect.h"
#include "ToneMapAndResampleTask.h"
#include "BackgroundStage.h"
#include "FramebufferCache.h"
#include "TextureCache.h"
@ -51,7 +50,7 @@ extern void initForwardPipelines(ShapePlumber& plumber);
void RenderForwardTask::configure(const Config& config) {
// Propagate resolution scale to sub jobs who need it
auto preparePrimaryBufferConfig = config.getConfig<PreparePrimaryFramebufferMSAA>("PreparePrimaryBuffer");
auto preparePrimaryBufferConfig = config.getConfig<PreparePrimaryFramebufferMSAA>("PreparePrimaryBufferForward");
assert(preparePrimaryBufferConfig);
preparePrimaryBufferConfig->setResolutionScale(config.resolutionScale);
}
@ -99,7 +98,7 @@ void RenderForwardTask::build(JobModel& task, const render::Varying& input, rend
// GPU jobs: Start preparing the main framebuffer
const auto scaledPrimaryFramebuffer = task.addJob<PreparePrimaryFramebufferMSAA>("PreparePrimaryBuffer");
const auto scaledPrimaryFramebuffer = task.addJob<PreparePrimaryFramebufferMSAA>("PreparePrimaryBufferForward");
// Prepare deferred, generate the shared Deferred Frame Transform. Only valid with the scaled frame buffer
const auto deferredFrameTransform = task.addJob<GenerateDeferredFrameTransform>("DeferredFrameTransform");
@ -141,34 +140,17 @@ void RenderForwardTask::build(JobModel& task, const render::Varying& input, rend
task.addJob<DebugZoneLighting>("DrawZoneStack", debugZoneInputs);
}
#if defined(Q_OS_ANDROID)
const auto newResolvedFramebuffer = task.addJob<NewFramebuffer>("MakeResolvingFramebuffer", gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::R11G11B10));
// Just resolve the msaa
const auto resolveInputs = ResolveFramebuffer::Inputs(scaledPrimaryFramebuffer, static_cast<gpu::FramebufferPointer>(nullptr)).asVarying();
const auto resolvedFramebuffer = task.addJob<ResolveFramebuffer>("Resolve", resolveInputs);
const auto toneMappedBuffer = resolvedFramebuffer;
#else
const auto newResolvedFramebuffer = task.addJob<NewOrDefaultFramebuffer>("MakeResolvingFramebuffer");
// Just resolve the msaa
const auto resolveInputs = ResolveFramebuffer::Inputs(scaledPrimaryFramebuffer, newResolvedFramebuffer).asVarying();
const auto resolvedFramebuffer = task.addJob<ResolveFramebuffer>("Resolve", resolveInputs);
// Lighting Buffer ready for tone mapping
// Forward rendering on GLES doesn't support tonemapping to and from the same FBO, so we specify
// the output FBO as null, which causes the tonemapping to target the blit framebuffer
const auto toneMappingInputs = ToneMappingDeferred::Input(resolvedFramebuffer, resolvedFramebuffer).asVarying();
const auto toneMappedBuffer = task.addJob<ToneMappingDeferred>("ToneMapping", toneMappingInputs);
#endif
// Upscale to finale resolution
const auto primaryFramebuffer = task.addJob<render::UpsampleToBlitFramebuffer>("PrimaryBufferUpscale", toneMappedBuffer);
const auto destFramebuffer = static_cast<gpu::FramebufferPointer>(nullptr);
const auto toneMappingInputs = ToneMapAndResample::Input(resolvedFramebuffer, destFramebuffer).asVarying();
const auto toneMappedBuffer = task.addJob<ToneMapAndResample>("ToneMapping", toneMappingInputs);
// HUD Layer
const auto renderHUDLayerInputs = RenderHUDLayerTask::Input(primaryFramebuffer, lightingModel, hudOpaque, hudTransparent, hazeFrame).asVarying();
const auto renderHUDLayerInputs = RenderHUDLayerTask::Input(toneMappedBuffer, lightingModel, hudOpaque, hudTransparent, hazeFrame).asVarying();
task.addJob<RenderHUDLayerTask>("RenderHUDLayer", renderHUDLayerInputs);
}
@ -176,8 +158,8 @@ gpu::FramebufferPointer PreparePrimaryFramebufferMSAA::createFramebuffer(const c
gpu::FramebufferPointer framebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create(name));
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR);
auto colorFormat = gpu::Element::COLOR_SRGBA_32;
auto colorFormat = gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::R11G11B10);
auto colorTexture =
gpu::Texture::createRenderBufferMultisample(colorFormat, frameSize.x, frameSize.y, numSamples, defaultSampler);
framebuffer->setRenderBuffer(0, colorTexture);

View file

@ -50,11 +50,13 @@ public:
const float SCALE_RANGE_MIN = 0.1f;
const float SCALE_RANGE_MAX = 2.0f;
resolutionScale = std::max(SCALE_RANGE_MIN, std::min(SCALE_RANGE_MAX, scale));
//emit dirty();
}
int getNumSamples() const { return numSamples; }
void setNumSamples(int num) {
numSamples = std::max(1, std::min(32, num));
emit dirty();
}
signals:

View file

@ -0,0 +1,110 @@
//
// ToneMapAndResampleTask.cpp
// libraries/render-utils/src
//
// Created by Anna Brewer on 7/3/19.
// Copyright 2019 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "ToneMapAndResampleTask.h"
#include <gpu/Context.h>
#include <shaders/Shaders.h>
#include "render-utils/ShaderConstants.h"
#include "StencilMaskPass.h"
#include "FramebufferCache.h"
using namespace render;
using namespace shader::gpu::program;
using namespace shader::render_utils::program;
gpu::PipelinePointer ToneMapAndResample::_pipeline;
gpu::PipelinePointer ToneMapAndResample::_mirrorPipeline;
ToneMapAndResample::ToneMapAndResample() {
Parameters parameters;
_parametersBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(Parameters), (const gpu::Byte*) &parameters));
}
void ToneMapAndResample::init() {
// shared_ptr to gpu::State
gpu::StatePointer blitState = gpu::StatePointer(new gpu::State());
blitState->setDepthTest(gpu::State::DepthTest(false, false));
blitState->setColorWriteMask(true, true, true, true);
_pipeline = gpu::PipelinePointer(gpu::Pipeline::create(gpu::Shader::createProgram(toneMapping), blitState));
_mirrorPipeline = gpu::PipelinePointer(gpu::Pipeline::create(gpu::Shader::createProgram(toneMapping_mirrored), blitState));
}
void ToneMapAndResample::setExposure(float exposure) {
auto& params = _parametersBuffer.get<Parameters>();
if (params._exposure != exposure) {
_parametersBuffer.edit<Parameters>()._exposure = exposure;
_parametersBuffer.edit<Parameters>()._twoPowExposure = pow(2.0, exposure);
}
}
void ToneMapAndResample::setToneCurve(ToneCurve curve) {
auto& params = _parametersBuffer.get<Parameters>();
if (params._toneCurve != (int)curve) {
_parametersBuffer.edit<Parameters>()._toneCurve = (int)curve;
}
}
void ToneMapAndResample::configure(const Config& config) {
setExposure(config.exposure);
setToneCurve((ToneCurve)config.curve);
}
void ToneMapAndResample::run(const RenderContextPointer& renderContext, const Input& input, Output& output) {
assert(renderContext->args);
assert(renderContext->args->hasViewFrustum());
RenderArgs* args = renderContext->args;
auto lightingBuffer = input.get0()->getRenderBuffer(0);
auto destinationFramebuffer = input.get1();
if (!destinationFramebuffer) {
destinationFramebuffer = args->_blitFramebuffer;
}
if (!lightingBuffer || !destinationFramebuffer) {
return;
}
if (!_pipeline) {
init();
}
const auto bufferSize = destinationFramebuffer->getSize();
auto srcBufferSize = glm::ivec2(lightingBuffer->getDimensions());
glm::ivec4 destViewport{ 0, 0, bufferSize.x, bufferSize.y };
gpu::doInBatch("Resample::run", args->_context, [&](gpu::Batch& batch) {
batch.enableStereo(false);
batch.setFramebuffer(destinationFramebuffer);
batch.setViewportTransform(destViewport);
batch.setProjectionTransform(glm::mat4());
batch.resetViewTransform();
batch.setPipeline(args->_renderMode == RenderArgs::MIRROR_RENDER_MODE ? _mirrorPipeline : _pipeline);
batch.setModelTransform(gpu::Framebuffer::evalSubregionTexcoordTransform(srcBufferSize, args->_viewport));
batch.setUniformBuffer(render_utils::slot::buffer::ToneMappingParams, _parametersBuffer);
batch.setResourceTexture(render_utils::slot::texture::ToneMappingColor, lightingBuffer);
batch.draw(gpu::TRIANGLE_STRIP, 4);
});
// Set full final viewport
args->_viewport = destViewport;
output = destinationFramebuffer;
}

View file

@ -1,16 +1,16 @@
//
// ToneMappingEffect.h
// ToneMapAndResample.h
// libraries/render-utils/src
//
// Created by Sam Gateau on 12/7/2015.
// Copyright 2015 High Fidelity, Inc.
// Created by Anna Brewer on 7/3/19.
// Copyright 2019 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_ToneMappingEffect_h
#define hifi_ToneMappingEffect_h
#ifndef hifi_ToneMapAndResample_h
#define hifi_ToneMapAndResample_h
#include <DependencyManager.h>
#include <NumericalConstants.h>
@ -20,29 +20,66 @@
#include <render/Forward.h>
#include <render/DrawTask.h>
enum class ToneCurve {
// Different tone curve available
None,
Gamma22,
Reinhard,
Filmic,
};
class ToneMappingConfig : public render::Job::Config {
Q_OBJECT
Q_PROPERTY(float exposure MEMBER exposure WRITE setExposure);
Q_PROPERTY(int curve MEMBER curve WRITE setCurve);
class ToneMappingEffect {
public:
ToneMappingEffect();
virtual ~ToneMappingEffect() {}
ToneMappingConfig() : render::Job::Config(true) {}
void render(RenderArgs* args, const gpu::TexturePointer& lightingBuffer, const gpu::FramebufferPointer& destinationBuffer);
void setExposure(float newExposure) { exposure = newExposure; emit dirty(); }
void setCurve(int newCurve) { curve = std::max((int)ToneCurve::None, std::min((int)ToneCurve::Filmic, newCurve)); emit dirty(); }
float exposure{ 0.0f };
int curve{ (int)ToneCurve::Gamma22 };
signals:
void dirty();
};
class ToneMapAndResample {
public:
ToneMapAndResample();
virtual ~ToneMapAndResample() {}
void render(RenderArgs* args, const gpu::TexturePointer& lightingBuffer, gpu::FramebufferPointer& destinationBuffer);
void setExposure(float exposure);
float getExposure() const { return _parametersBuffer.get<Parameters>()._exposure; }
// Different tone curve available
enum ToneCurve {
None = 0,
Gamma22,
Reinhard,
Filmic,
};
void setToneCurve(ToneCurve curve);
ToneCurve getToneCurve() const { return (ToneCurve)_parametersBuffer.get<Parameters>()._toneCurve; }
private:
// Inputs: lightingFramebuffer, destinationFramebuffer
using Input = render::VaryingSet2<gpu::FramebufferPointer, gpu::FramebufferPointer>;
using Output = gpu::FramebufferPointer;
using Config = ToneMappingConfig;
using JobModel = render::Job::ModelIO<ToneMapAndResample, Input, Output, Config>;
void configure(const Config& config);
void run(const render::RenderContextPointer& renderContext, const Input& input, Output& output);
protected:
static gpu::PipelinePointer _pipeline;
static gpu::PipelinePointer _mirrorPipeline;
gpu::FramebufferPointer _destinationFrameBuffer;
float _factor{ 2.0f };
gpu::FramebufferPointer getResampledFrameBuffer(const gpu::FramebufferPointer& sourceFramebuffer);
private:
gpu::PipelinePointer _blitLightBuffer;
// Class describing the uniform buffer with all the parameters common to the tone mapping shaders
@ -51,46 +88,16 @@ private:
float _exposure = 0.0f;
float _twoPowExposure = 1.0f;
glm::vec2 spareA;
int _toneCurve = Gamma22;
int _toneCurve = (int)ToneCurve::Gamma22;
glm::vec3 spareB;
Parameters() {}
};
typedef gpu::BufferView UniformBufferView;
gpu::BufferView _parametersBuffer;
void init(RenderArgs* args);
void init();
};
class ToneMappingConfig : public render::Job::Config {
Q_OBJECT
Q_PROPERTY(float exposure MEMBER exposure WRITE setExposure);
Q_PROPERTY(int curve MEMBER curve WRITE setCurve);
public:
ToneMappingConfig() : render::Job::Config(true) {}
void setExposure(float newExposure) { exposure = newExposure; emit dirty(); }
void setCurve(int newCurve) { curve = std::max((int)ToneMappingEffect::None, std::min((int)ToneMappingEffect::Filmic, newCurve)); emit dirty(); }
float exposure{ 0.0f };
int curve{ ToneMappingEffect::Gamma22 };
signals:
void dirty();
};
class ToneMappingDeferred {
public:
// Inputs: lightingFramebuffer, destinationFramebuffer
using Input = render::VaryingSet2<gpu::FramebufferPointer, gpu::FramebufferPointer>;
using Output = gpu::FramebufferPointer;
using Config = ToneMappingConfig;
using JobModel = render::Job::ModelIO<ToneMappingDeferred, Input, Output, Config>;
void configure(const Config& config);
void run(const render::RenderContextPointer& renderContext, const Input& input, Output& output);
ToneMappingEffect _toneMappingEffect;
};
#endif // hifi_ToneMappingEffect_h
#endif // hifi_ToneMapAndResample_h

View file

@ -1,96 +0,0 @@
//
// ToneMappingEffect.cpp
// libraries/render-utils/src
//
// Created by Sam Gateau on 12/7/2015.
// Copyright 2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "ToneMappingEffect.h"
#include <gpu/Context.h>
#include <shaders/Shaders.h>
#include "render-utils/ShaderConstants.h"
#include "StencilMaskPass.h"
#include "FramebufferCache.h"
ToneMappingEffect::ToneMappingEffect() {
Parameters parameters;
_parametersBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(Parameters), (const gpu::Byte*) &parameters));
}
void ToneMappingEffect::init(RenderArgs* args) {
auto blitProgram = gpu::Shader::createProgram(shader::render_utils::program::toneMapping);
auto blitState = std::make_shared<gpu::State>();
blitState->setColorWriteMask(true, true, true, true);
_blitLightBuffer = gpu::PipelinePointer(gpu::Pipeline::create(blitProgram, blitState));
}
void ToneMappingEffect::setExposure(float exposure) {
auto& params = _parametersBuffer.get<Parameters>();
if (params._exposure != exposure) {
_parametersBuffer.edit<Parameters>()._exposure = exposure;
_parametersBuffer.edit<Parameters>()._twoPowExposure = pow(2.0, exposure);
}
}
void ToneMappingEffect::setToneCurve(ToneCurve curve) {
auto& params = _parametersBuffer.get<Parameters>();
if (params._toneCurve != curve) {
_parametersBuffer.edit<Parameters>()._toneCurve = curve;
}
}
void ToneMappingEffect::render(RenderArgs* args, const gpu::TexturePointer& lightingBuffer, const gpu::FramebufferPointer& destinationFramebuffer) {
if (!_blitLightBuffer) {
init(args);
}
if (!lightingBuffer || !destinationFramebuffer) {
return;
}
auto framebufferSize = glm::ivec2(lightingBuffer->getDimensions());
gpu::doInBatch("ToneMappingEffect::render", args->_context, [&](gpu::Batch& batch) {
batch.enableStereo(false);
batch.setFramebuffer(destinationFramebuffer);
// FIXME: Generate the Luminosity map
//batch.generateTextureMips(lightingBuffer);
batch.setViewportTransform(args->_viewport);
batch.setProjectionTransform(glm::mat4());
batch.resetViewTransform();
batch.setModelTransform(gpu::Framebuffer::evalSubregionTexcoordTransform(framebufferSize, args->_viewport));
batch.setPipeline(_blitLightBuffer);
batch.setUniformBuffer(render_utils::slot::buffer::ToneMappingParams, _parametersBuffer);
batch.setResourceTexture(render_utils::slot::texture::ToneMappingColor, lightingBuffer);
batch.draw(gpu::TRIANGLE_STRIP, 4);
});
}
void ToneMappingDeferred::configure(const Config& config) {
_toneMappingEffect.setExposure(config.exposure);
_toneMappingEffect.setToneCurve((ToneMappingEffect::ToneCurve)config.curve);
}
void ToneMappingDeferred::run(const render::RenderContextPointer& renderContext, const Input& input, Output& output) {
auto lightingBuffer = input.get0()->getRenderBuffer(0);
auto destFbo = input.get1();
if (!destFbo) {
destFbo = renderContext->args->_blitFramebuffer;
}
_toneMappingEffect.render(renderContext->args, lightingBuffer, destFbo);
output = destFbo;
}

View file

@ -1 +1,2 @@
VERTEX gpu::vertex::DrawViewportQuadTransformTexcoord
DEFINES mirrored:f

View file

@ -43,7 +43,11 @@ layout(location=0) in vec2 varTexCoord0;
layout(location=0) out vec4 outFragColor;
void main(void) {
<@if HIFI_USE_MIRRORED@>
vec4 fragColorRaw = texture(colorMap, vec2(1.0 - varTexCoord0.x, varTexCoord0.y));
<@else@>
vec4 fragColorRaw = texture(colorMap, varTexCoord0);
<@endif@>
vec3 fragColor = fragColorRaw.xyz;
vec3 srcColor = fragColor * getTwoPowExposure();

View file

@ -45,7 +45,7 @@ Rectangle {
anchors.right: parent.right
spacing: 5
Repeater {
model: [ "MSAA:PrepareFramebuffer:numSamples:4:1"
model: [ "MSAA:PreparePrimaryBufferForward:numSamples:4:1"
]
ConfigSlider {
label: qsTr(modelData.split(":")[0])