mirror of
https://github.com/AleziaKurdis/overte.git
synced 2025-04-07 12:12:39 +02:00
commit
e1c369dc10
45 changed files with 1482 additions and 800 deletions
|
@ -12,7 +12,7 @@
|
|||
Script.include("cookies.js");
|
||||
|
||||
var MENU = "Developer>Render>Debug Deferred Buffer";
|
||||
var ACTIONS = ["Off", "Diffuse", "Alpha", "Specular", "Roughness", "Normal", "Depth", "Lighting", "Custom"];
|
||||
var ACTIONS = ["Off", "Diffuse", "Metallic", "Roughness", "Normal", "Depth", "Lighting", "Shadow", "PyramidDepth", "AmbientOcclusion", "OcclusionBlurred", "Custom"];
|
||||
var SETTINGS_KEY = "EngineDebugScript.DebugMode";
|
||||
|
||||
Number.prototype.clamp = function(min, max) {
|
||||
|
@ -52,6 +52,7 @@ var overlaysCounter = new CounterWidget(panel, "Overlays", Render.overlay3D);
|
|||
|
||||
var resizing = false;
|
||||
var previousMode = Settings.getValue(SETTINGS_KEY, -1);
|
||||
previousMode = 8;
|
||||
Menu.addActionGroup(MENU, ACTIONS, ACTIONS[previousMode + 1]);
|
||||
Render.deferredDebugMode = previousMode;
|
||||
Render.deferredDebugSize = { x: 0.0, y: -1.0, z: 1.0, w: 1.0 }; // Reset to default size
|
||||
|
@ -98,12 +99,70 @@ panel.newSlider("Tone Mapping Exposure", -10, 10,
|
|||
function() { return Render.tone.exposure; },
|
||||
function (value) { return (value); });
|
||||
|
||||
panel.newSlider("Ambient Occlusion Resolution Level", 0.0, 4.0,
|
||||
function (value) { Render.ambientOcclusion.resolutionLevel = value; },
|
||||
function() { return Render.ambientOcclusion.resolutionLevel; },
|
||||
function (value) { return (value); });
|
||||
|
||||
panel.newSlider("Ambient Occlusion Radius", 0.0, 2.0,
|
||||
function (value) { Render.ambientOcclusion.radius = value; },
|
||||
function() { return Render.ambientOcclusion.radius; },
|
||||
function (value) { return (value.toFixed(2)); });
|
||||
|
||||
panel.newSlider("Ambient Occlusion Level", 0.0, 1.0,
|
||||
function (value) { Render.ambientOcclusion.level = value; },
|
||||
function() { return Render.ambientOcclusion.level; },
|
||||
function (value) { return (value.toFixed(2)); });
|
||||
|
||||
panel.newSlider("Ambient Occlusion Num Samples", 1, 32,
|
||||
function (value) { Render.ambientOcclusion.numSamples = value; },
|
||||
function() { return Render.ambientOcclusion.numSamples; },
|
||||
function (value) { return (value); });
|
||||
|
||||
panel.newSlider("Ambient Occlusion Num Spiral Turns", 0.0, 30.0,
|
||||
function (value) { Render.ambientOcclusion.numSpiralTurns = value; },
|
||||
function() { return Render.ambientOcclusion.numSpiralTurns; },
|
||||
function (value) { return (value.toFixed(2)); });
|
||||
|
||||
panel.newCheckbox("Ambient Occlusion Dithering",
|
||||
function (value) { Render.ambientOcclusion.ditheringEnabled = value; },
|
||||
function() { return Render.ambientOcclusion.ditheringEnabled; },
|
||||
function (value) { return (value); });
|
||||
|
||||
panel.newSlider("Ambient Occlusion Falloff Bias", 0.0, 0.2,
|
||||
function (value) { Render.ambientOcclusion.falloffBias = value; },
|
||||
function() { return Render.ambientOcclusion.falloffBias; },
|
||||
function (value) { return (value.toFixed(2)); });
|
||||
|
||||
panel.newSlider("Ambient Occlusion Edge Sharpness", 0.0, 1.0,
|
||||
function (value) { Render.ambientOcclusion.edgeSharpness = value; },
|
||||
function() { return Render.ambientOcclusion.edgeSharpness; },
|
||||
function (value) { return (value.toFixed(2)); });
|
||||
|
||||
panel.newSlider("Ambient Occlusion Blur Radius", 0.0, 6.0,
|
||||
function (value) { Render.ambientOcclusion.blurRadius = value; },
|
||||
function() { return Render.ambientOcclusion.blurRadius; },
|
||||
function (value) { return (value); });
|
||||
|
||||
panel.newSlider("Ambient Occlusion Blur Deviation", 0.0, 3.0,
|
||||
function (value) { Render.ambientOcclusion.blurDeviation = value; },
|
||||
function() { return Render.ambientOcclusion.blurDeviation; },
|
||||
function (value) { return (value.toFixed(2)); });
|
||||
|
||||
|
||||
panel.newSlider("Ambient Occlusion GPU time", 0.0, 10.0,
|
||||
function (value) {},
|
||||
function() { return Render.ambientOcclusion.gpuTime; },
|
||||
function (value) { return (value.toFixed(2) + " ms"); });
|
||||
|
||||
|
||||
var tickTackPeriod = 500;
|
||||
|
||||
function updateCounters() {
|
||||
opaquesCounter.update();
|
||||
transparentsCounter.update();
|
||||
overlaysCounter.update();
|
||||
panel.update("Ambient Occlusion GPU time");
|
||||
}
|
||||
Script.setInterval(updateCounters, tickTackPeriod);
|
||||
|
||||
|
|
|
@ -3771,6 +3771,8 @@ void Application::displaySide(RenderArgs* renderArgs, Camera& theCamera, bool se
|
|||
|
||||
auto engineContext = _renderEngine->getRenderContext();
|
||||
renderInterface->setItemCounts(engineContext->getItemsConfig());
|
||||
renderInterface->setJobGPUTimes(engineContext->getAmbientOcclusion().gpuTime);
|
||||
|
||||
}
|
||||
|
||||
activeRenderingThread = nullptr;
|
||||
|
|
|
@ -186,7 +186,7 @@ namespace MenuOption {
|
|||
const QString CopyPath = "Copy Path to Clipboard";
|
||||
const QString CoupleEyelids = "Couple Eyelids";
|
||||
const QString CrashInterface = "Crash Interface";
|
||||
const QString DebugAmbientOcclusion = "Debug Ambient Occlusion";
|
||||
const QString DebugAmbientOcclusion = "Ambient Occlusion";
|
||||
const QString DecreaseAvatarSize = "Decrease Avatar Size";
|
||||
const QString DeleteBookmark = "Delete Bookmark...";
|
||||
const QString DisableActivityLogger = "Disable Activity Logger";
|
||||
|
|
|
@ -98,7 +98,11 @@ void GLBackend::do_getQuery(Batch& batch, size_t paramOffset) {
|
|||
glGetQueryObjectui64vEXT(glquery->_qo, GL_QUERY_RESULT, &glquery->_result);
|
||||
#endif
|
||||
#else
|
||||
glGetQueryObjectui64v(glquery->_qo, GL_QUERY_RESULT, &glquery->_result);
|
||||
glGetQueryObjectui64v(glquery->_qo, GL_QUERY_RESULT_AVAILABLE, &glquery->_result);
|
||||
if (glquery->_result == GL_TRUE) {
|
||||
glGetQueryObjectui64v(glquery->_qo, GL_QUERY_RESULT, &glquery->_result);
|
||||
query->triggerReturnHandler(glquery->_result);
|
||||
}
|
||||
#endif
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
|
|
@ -146,6 +146,68 @@ public:
|
|||
case gpu::RGB:
|
||||
case gpu::RGBA:
|
||||
texel.internalFormat = GL_RED;
|
||||
switch (dstFormat.getType()) {
|
||||
case gpu::UINT32: {
|
||||
texel.internalFormat = GL_R32UI;
|
||||
break;
|
||||
}
|
||||
case gpu::INT32: {
|
||||
texel.internalFormat = GL_R32I;
|
||||
break;
|
||||
}
|
||||
case gpu::NUINT32: {
|
||||
texel.internalFormat = GL_RED;
|
||||
break;
|
||||
}
|
||||
case gpu::NINT32: {
|
||||
texel.internalFormat = GL_RED_SNORM;
|
||||
break;
|
||||
}
|
||||
case gpu::FLOAT: {
|
||||
texel.internalFormat = GL_R32F;
|
||||
break;
|
||||
}
|
||||
case gpu::UINT16: {
|
||||
texel.internalFormat = GL_R16UI;
|
||||
break;
|
||||
}
|
||||
case gpu::INT16: {
|
||||
texel.internalFormat = GL_R16I;
|
||||
break;
|
||||
}
|
||||
case gpu::NUINT16: {
|
||||
texel.internalFormat = GL_R16;
|
||||
break;
|
||||
}
|
||||
case gpu::NINT16: {
|
||||
texel.internalFormat = GL_R16_SNORM;
|
||||
break;
|
||||
}
|
||||
case gpu::HALF: {
|
||||
texel.internalFormat = GL_R16F;
|
||||
break;
|
||||
}
|
||||
case gpu::UINT8: {
|
||||
texel.internalFormat = GL_R8UI;
|
||||
break;
|
||||
}
|
||||
case gpu::INT8: {
|
||||
texel.internalFormat = GL_R8I;
|
||||
break;
|
||||
}
|
||||
case gpu::NUINT8: {
|
||||
texel.internalFormat = GL_R8;
|
||||
break;
|
||||
}
|
||||
case gpu::NINT8: {
|
||||
texel.internalFormat = GL_R8_SNORM;
|
||||
break;
|
||||
}
|
||||
case gpu::NUM_TYPES: { // quiet compiler
|
||||
Q_UNREACHABLE();
|
||||
}
|
||||
|
||||
}
|
||||
break;
|
||||
case gpu::DEPTH:
|
||||
texel.format = GL_DEPTH_COMPONENT; // It's depth component to load it
|
||||
|
|
|
@ -10,11 +10,13 @@
|
|||
//
|
||||
#include "Query.h"
|
||||
|
||||
#include <QDebug>
|
||||
#include "GPULogging.h"
|
||||
#include "Batch.h"
|
||||
|
||||
using namespace gpu;
|
||||
|
||||
Query::Query()
|
||||
Query::Query(const Handler& returnHandler) :
|
||||
_returnHandler(returnHandler)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -22,6 +24,48 @@ Query::~Query()
|
|||
{
|
||||
}
|
||||
|
||||
double Query::getElapsedTime() {
|
||||
return 0.0;
|
||||
double Query::getElapsedTime() const {
|
||||
return ((double) _queryResult) * 0.000001;
|
||||
}
|
||||
|
||||
void Query::triggerReturnHandler(uint64_t queryResult) {
|
||||
_queryResult = queryResult;
|
||||
if (_returnHandler) {
|
||||
_returnHandler(*this);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
RangeTimer::RangeTimer() {
|
||||
for (int i = 0; i < QUERY_QUEUE_SIZE; i++) {
|
||||
_timerQueries.push_back(std::make_shared<gpu::Query>([&, i] (const Query& query) {
|
||||
_tailIndex ++;
|
||||
auto elapsedTime = query.getElapsedTime();
|
||||
_movingAverage.addSample(elapsedTime);
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
void RangeTimer::begin(gpu::Batch& batch) {
|
||||
_headIndex++;
|
||||
batch.beginQuery(_timerQueries[rangeIndex(_headIndex)]);
|
||||
}
|
||||
void RangeTimer::end(gpu::Batch& batch) {
|
||||
if (_headIndex < 0) {
|
||||
return;
|
||||
}
|
||||
batch.endQuery(_timerQueries[rangeIndex(_headIndex)]);
|
||||
|
||||
if (_tailIndex < 0) {
|
||||
_tailIndex = _headIndex;
|
||||
}
|
||||
|
||||
// Pull the previous tail query hopping to see it return
|
||||
if (_tailIndex != _headIndex) {
|
||||
batch.getQuery(_timerQueries[rangeIndex(_tailIndex)]);
|
||||
}
|
||||
}
|
||||
|
||||
double RangeTimer::getAverage() const {
|
||||
return _movingAverage.average;
|
||||
}
|
|
@ -13,26 +13,59 @@
|
|||
|
||||
#include <assert.h>
|
||||
#include <memory>
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
#include <SimpleMovingAverage.h>
|
||||
|
||||
#include "Format.h"
|
||||
|
||||
namespace gpu {
|
||||
|
||||
class Batch;
|
||||
|
||||
class Query {
|
||||
public:
|
||||
Query();
|
||||
using Handler = std::function<void(const Query&)>;
|
||||
|
||||
Query(const Handler& returnHandler);
|
||||
~Query();
|
||||
|
||||
uint32 queryResult;
|
||||
|
||||
double getElapsedTime();
|
||||
double getElapsedTime() const;
|
||||
|
||||
const GPUObjectPointer gpuObject {};
|
||||
void triggerReturnHandler(uint64_t queryResult);
|
||||
protected:
|
||||
Handler _returnHandler;
|
||||
|
||||
uint64_t _queryResult = 0;
|
||||
};
|
||||
|
||||
typedef std::shared_ptr<Query> QueryPointer;
|
||||
typedef std::vector< QueryPointer > Queries;
|
||||
|
||||
|
||||
// gpu RangeTimer is just returning an estimate of the time taken by a chunck of work delimited by the
|
||||
// begin and end calls repeated for several times.
|
||||
// The result is always a late average of the time spent for that same task a few cycles ago.
|
||||
class RangeTimer {
|
||||
public:
|
||||
RangeTimer();
|
||||
void begin(gpu::Batch& batch);
|
||||
void end(gpu::Batch& batch);
|
||||
|
||||
double getAverage() const;
|
||||
|
||||
protected:
|
||||
|
||||
static const int QUERY_QUEUE_SIZE { 4 };
|
||||
|
||||
gpu::Queries _timerQueries;
|
||||
int _headIndex = -1;
|
||||
int _tailIndex = -1;
|
||||
MovingAverage<double, QUERY_QUEUE_SIZE * 2> _movingAverage;
|
||||
|
||||
int rangeIndex(int index) const { return (index % QUERY_QUEUE_SIZE); }
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -12,69 +12,135 @@
|
|||
|
||||
#include <glm/gtc/random.hpp>
|
||||
|
||||
#include <algorithm> //min max and more
|
||||
|
||||
|
||||
#include <PathUtils.h>
|
||||
#include <SharedUtil.h>
|
||||
#include <gpu/Context.h>
|
||||
#include <gpu/StandardShaderLib.h>
|
||||
#include "RenderUtilsLogging.h"
|
||||
|
||||
#include "AmbientOcclusionEffect.h"
|
||||
#include "TextureCache.h"
|
||||
#include "FramebufferCache.h"
|
||||
#include "DependencyManager.h"
|
||||
#include "ViewFrustum.h"
|
||||
#include "GeometryCache.h"
|
||||
|
||||
#include "ambient_occlusion_vert.h"
|
||||
#include "ambient_occlusion_frag.h"
|
||||
#include "gaussian_blur_vertical_vert.h"
|
||||
#include "gaussian_blur_horizontal_vert.h"
|
||||
#include "gaussian_blur_frag.h"
|
||||
#include "occlusion_blend_frag.h"
|
||||
#include "ssao_makePyramid_frag.h"
|
||||
#include "ssao_makeOcclusion_frag.h"
|
||||
#include "ssao_makeHorizontalBlur_frag.h"
|
||||
#include "ssao_makeVerticalBlur_frag.h"
|
||||
|
||||
class GaussianDistribution {
|
||||
public:
|
||||
|
||||
static double integral(float x, float deviation) {
|
||||
return 0.5 * erf((double)x / ((double)deviation * sqrt(2.0)));
|
||||
}
|
||||
|
||||
static double rangeIntegral(float x0, float x1, float deviation) {
|
||||
return integral(x1, deviation) - integral(x0, deviation);
|
||||
}
|
||||
|
||||
static std::vector<float> evalSampling(int samplingRadius, float deviation) {
|
||||
std::vector<float> coefs(samplingRadius + 1, 0.0f);
|
||||
|
||||
// corner case when radius is 0 or under
|
||||
if (samplingRadius <= 0) {
|
||||
coefs[0] = 1.0f;
|
||||
return coefs;
|
||||
}
|
||||
|
||||
// Evaluate all the samples range integral of width 1 from center until the penultimate one
|
||||
float halfWidth = 0.5f;
|
||||
double sum = 0.0;
|
||||
for (int i = 0; i < samplingRadius; i++) {
|
||||
float x = (float) i;
|
||||
double sample = rangeIntegral(x - halfWidth, x + halfWidth, deviation);
|
||||
coefs[i] = sample;
|
||||
sum += sample;
|
||||
}
|
||||
|
||||
// last sample goes to infinity
|
||||
float lastSampleX0 = (float) samplingRadius - halfWidth;
|
||||
float largeEnough = lastSampleX0 + 1000.0f * deviation;
|
||||
double sample = rangeIntegral(lastSampleX0, largeEnough, deviation);
|
||||
coefs[samplingRadius] = sample;
|
||||
sum += sample;
|
||||
|
||||
return coefs;
|
||||
}
|
||||
|
||||
static void evalSampling(float* coefs, unsigned int coefsLength, int samplingRadius, float deviation) {
|
||||
auto coefsVector = evalSampling(samplingRadius, deviation);
|
||||
if (coefsLength> coefsVector.size() + 1) {
|
||||
unsigned int coefsNum = 0;
|
||||
for (auto s : coefsVector) {
|
||||
coefs[coefsNum] = s;
|
||||
coefsNum++;
|
||||
}
|
||||
for (;coefsNum < coefsLength; coefsNum++) {
|
||||
coefs[coefsNum] = 0.0f;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
AmbientOcclusion::AmbientOcclusion() {
|
||||
const int AmbientOcclusionEffect_FrameTransformSlot = 0;
|
||||
const int AmbientOcclusionEffect_ParamsSlot = 1;
|
||||
const int AmbientOcclusionEffect_DepthMapSlot = 0;
|
||||
const int AmbientOcclusionEffect_PyramidMapSlot = 0;
|
||||
const int AmbientOcclusionEffect_OcclusionMapSlot = 0;
|
||||
|
||||
AmbientOcclusionEffect::AmbientOcclusionEffect() {
|
||||
FrameTransform frameTransform;
|
||||
_frameTransformBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(FrameTransform), (const gpu::Byte*) &frameTransform));
|
||||
Parameters parameters;
|
||||
_parametersBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(Parameters), (const gpu::Byte*) ¶meters));
|
||||
}
|
||||
|
||||
const gpu::PipelinePointer& AmbientOcclusion::getOcclusionPipeline() {
|
||||
if (!_occlusionPipeline) {
|
||||
auto vs = gpu::Shader::createVertex(std::string(ambient_occlusion_vert));
|
||||
auto ps = gpu::Shader::createPixel(std::string(ambient_occlusion_frag));
|
||||
const gpu::PipelinePointer& AmbientOcclusionEffect::getPyramidPipeline() {
|
||||
if (!_pyramidPipeline) {
|
||||
auto vs = gpu::StandardShaderLib::getDrawViewportQuadTransformTexcoordVS();
|
||||
auto ps = gpu::Shader::createPixel(std::string(ssao_makePyramid_frag));
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("depthTexture"), 0));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("normalTexture"), 1));
|
||||
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("ambientOcclusionFrameTransformBuffer"), AmbientOcclusionEffect_FrameTransformSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("ambientOcclusionParamsBuffer"), AmbientOcclusionEffect_ParamsSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("depthMap"), AmbientOcclusionEffect_DepthMapSlot));
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
_gScaleLoc = program->getUniforms().findLocation("g_scale");
|
||||
_gBiasLoc = program->getUniforms().findLocation("g_bias");
|
||||
_gSampleRadiusLoc = program->getUniforms().findLocation("g_sample_rad");
|
||||
_gIntensityLoc = program->getUniforms().findLocation("g_intensity");
|
||||
|
||||
_nearLoc = program->getUniforms().findLocation("near");
|
||||
_depthScaleLoc = program->getUniforms().findLocation("depthScale");
|
||||
_depthTexCoordOffsetLoc = program->getUniforms().findLocation("depthTexCoordOffset");
|
||||
_depthTexCoordScaleLoc = program->getUniforms().findLocation("depthTexCoordScale");
|
||||
_renderTargetResLoc = program->getUniforms().findLocation("renderTargetRes");
|
||||
_renderTargetResInvLoc = program->getUniforms().findLocation("renderTargetResInv");
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
state->setDepthTest(false, false, gpu::LESS_EQUAL);
|
||||
// Stencil test the pyramid passe for objects pixels only, not the background
|
||||
state->setStencilTest(true, 0xFF, gpu::State::StencilTest(0, 0xFF, gpu::NOT_EQUAL, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP));
|
||||
|
||||
// Blend on transparent
|
||||
state->setBlendFunction(false,
|
||||
gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA,
|
||||
gpu::State::DEST_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ZERO);
|
||||
state->setColorWriteMask(true, false, false, false);
|
||||
|
||||
// Link the occlusion FBO to texture
|
||||
_occlusionBuffer = gpu::FramebufferPointer(gpu::Framebuffer::create(gpu::Element::COLOR_RGBA_32,
|
||||
DependencyManager::get<FramebufferCache>()->getFrameBufferSize().width(), DependencyManager::get<FramebufferCache>()->getFrameBufferSize().height()));
|
||||
auto format = gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA);
|
||||
auto width = _occlusionBuffer->getWidth();
|
||||
auto height = _occlusionBuffer->getHeight();
|
||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
||||
_occlusionTexture = gpu::TexturePointer(gpu::Texture::create2D(format, width, height, defaultSampler));
|
||||
// Good to go add the brand new pipeline
|
||||
_pyramidPipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
return _pyramidPipeline;
|
||||
}
|
||||
|
||||
const gpu::PipelinePointer& AmbientOcclusionEffect::getOcclusionPipeline() {
|
||||
if (!_occlusionPipeline) {
|
||||
auto vs = gpu::StandardShaderLib::getDrawViewportQuadTransformTexcoordVS();
|
||||
auto ps = gpu::Shader::createPixel(std::string(ssao_makeOcclusion_frag));
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("ambientOcclusionFrameTransformBuffer"), AmbientOcclusionEffect_FrameTransformSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("ambientOcclusionParamsBuffer"), AmbientOcclusionEffect_ParamsSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("pyramidMap"), AmbientOcclusionEffect_PyramidMapSlot));
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
state->setColorWriteMask(true, true, true, false);
|
||||
|
||||
// Good to go add the brand new pipeline
|
||||
_occlusionPipeline = gpu::Pipeline::create(program, state);
|
||||
|
@ -82,32 +148,46 @@ const gpu::PipelinePointer& AmbientOcclusion::getOcclusionPipeline() {
|
|||
return _occlusionPipeline;
|
||||
}
|
||||
|
||||
const gpu::PipelinePointer& AmbientOcclusion::getVBlurPipeline() {
|
||||
if (!_vBlurPipeline) {
|
||||
auto vs = gpu::Shader::createVertex(std::string(gaussian_blur_vertical_vert));
|
||||
auto ps = gpu::Shader::createPixel(std::string(gaussian_blur_frag));
|
||||
|
||||
const gpu::PipelinePointer& AmbientOcclusionEffect::getHBlurPipeline() {
|
||||
if (!_hBlurPipeline) {
|
||||
auto vs = gpu::StandardShaderLib::getDrawViewportQuadTransformTexcoordVS();
|
||||
auto ps = gpu::Shader::createPixel(std::string(ssao_makeHorizontalBlur_frag));
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("ambientOcclusionFrameTransformBuffer"), AmbientOcclusionEffect_FrameTransformSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("ambientOcclusionParamsBuffer"), AmbientOcclusionEffect_ParamsSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("occlusionMap"), AmbientOcclusionEffect_OcclusionMapSlot));
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
state->setDepthTest(false, false, gpu::LESS_EQUAL);
|
||||
state->setColorWriteMask(true, true, true, false);
|
||||
|
||||
// Good to go add the brand new pipeline
|
||||
_hBlurPipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
return _hBlurPipeline;
|
||||
}
|
||||
|
||||
// Blend on transparent
|
||||
state->setBlendFunction(false,
|
||||
gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA,
|
||||
gpu::State::DEST_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ZERO);
|
||||
|
||||
// Link the horizontal blur FBO to texture
|
||||
_vBlurBuffer = gpu::FramebufferPointer(gpu::Framebuffer::create(gpu::Element::COLOR_RGBA_32,
|
||||
DependencyManager::get<FramebufferCache>()->getFrameBufferSize().width(), DependencyManager::get<FramebufferCache>()->getFrameBufferSize().height()));
|
||||
auto format = gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA);
|
||||
auto width = _vBlurBuffer->getWidth();
|
||||
auto height = _vBlurBuffer->getHeight();
|
||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
||||
_vBlurTexture = gpu::TexturePointer(gpu::Texture::create2D(format, width, height, defaultSampler));
|
||||
const gpu::PipelinePointer& AmbientOcclusionEffect::getVBlurPipeline() {
|
||||
if (!_vBlurPipeline) {
|
||||
auto vs = gpu::StandardShaderLib::getDrawViewportQuadTransformTexcoordVS();
|
||||
auto ps = gpu::Shader::createPixel(std::string(ssao_makeVerticalBlur_frag));
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("ambientOcclusionFrameTransformBuffer"), AmbientOcclusionEffect_FrameTransformSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("ambientOcclusionParamsBuffer"), AmbientOcclusionEffect_ParamsSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("occlusionMap"), AmbientOcclusionEffect_OcclusionMapSlot));
|
||||
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
// Vertical blur write just the final result Occlusion value in the alpha channel
|
||||
state->setColorWriteMask(true, true, true, false);
|
||||
|
||||
// Good to go add the brand new pipeline
|
||||
_vBlurPipeline = gpu::Pipeline::create(program, state);
|
||||
|
@ -115,171 +195,228 @@ const gpu::PipelinePointer& AmbientOcclusion::getVBlurPipeline() {
|
|||
return _vBlurPipeline;
|
||||
}
|
||||
|
||||
const gpu::PipelinePointer& AmbientOcclusion::getHBlurPipeline() {
|
||||
if (!_hBlurPipeline) {
|
||||
auto vs = gpu::Shader::createVertex(std::string(gaussian_blur_horizontal_vert));
|
||||
auto ps = gpu::Shader::createPixel(std::string(gaussian_blur_frag));
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
state->setDepthTest(false, false, gpu::LESS_EQUAL);
|
||||
|
||||
// Blend on transparent
|
||||
state->setBlendFunction(false,
|
||||
gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA,
|
||||
gpu::State::DEST_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ZERO);
|
||||
|
||||
// Link the horizontal blur FBO to texture
|
||||
_hBlurBuffer = gpu::FramebufferPointer(gpu::Framebuffer::create(gpu::Element::COLOR_RGBA_32,
|
||||
DependencyManager::get<FramebufferCache>()->getFrameBufferSize().width(), DependencyManager::get<FramebufferCache>()->getFrameBufferSize().height()));
|
||||
auto format = gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA);
|
||||
auto width = _hBlurBuffer->getWidth();
|
||||
auto height = _hBlurBuffer->getHeight();
|
||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
||||
_hBlurTexture = gpu::TexturePointer(gpu::Texture::create2D(format, width, height, defaultSampler));
|
||||
|
||||
// Good to go add the brand new pipeline
|
||||
_hBlurPipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
return _hBlurPipeline;
|
||||
void AmbientOcclusionEffect::setDepthInfo(float nearZ, float farZ) {
|
||||
_frameTransformBuffer.edit<FrameTransform>().depthInfo = glm::vec4(nearZ*farZ, farZ -nearZ, -farZ, 0.0f);
|
||||
}
|
||||
|
||||
const gpu::PipelinePointer& AmbientOcclusion::getBlendPipeline() {
|
||||
if (!_blendPipeline) {
|
||||
auto vs = gpu::Shader::createVertex(std::string(ambient_occlusion_vert));
|
||||
auto ps = gpu::Shader::createPixel(std::string(occlusion_blend_frag));
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
void AmbientOcclusionEffect::setResolutionLevel(int level) {
|
||||
const int MAX_RESOLUTION_LEVEL = 4;
|
||||
level = std::max(0, std::min(level, MAX_RESOLUTION_LEVEL));
|
||||
if (level != getResolutionLevel()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>().resolutionInfo;
|
||||
current.x = (float)level;
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("blurredOcclusionTexture"), 0));
|
||||
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
state->setDepthTest(false, false, gpu::LESS_EQUAL);
|
||||
|
||||
// Blend on transparent
|
||||
state->setBlendFunction(true,
|
||||
gpu::State::INV_SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::SRC_ALPHA);
|
||||
|
||||
// Good to go add the brand new pipeline
|
||||
_blendPipeline = gpu::Pipeline::create(program, state);
|
||||
// Communicate the change to the Framebuffer cache
|
||||
DependencyManager::get<FramebufferCache>()->setAmbientOcclusionResolutionLevel(level);
|
||||
}
|
||||
return _blendPipeline;
|
||||
}
|
||||
|
||||
void AmbientOcclusion::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext) {
|
||||
void AmbientOcclusionEffect::setRadius(float radius) {
|
||||
const double RADIUS_POWER = 6.0;
|
||||
radius = std::max(0.01f, radius);
|
||||
if (radius != getRadius()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>().radiusInfo;
|
||||
current.x = radius;
|
||||
current.y = radius * radius;
|
||||
current.z = (float)(1.0 / pow((double)radius, RADIUS_POWER));
|
||||
}
|
||||
}
|
||||
|
||||
void AmbientOcclusionEffect::setLevel(float level) {
|
||||
level = std::max(0.01f, level);
|
||||
if (level != getLevel()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>().radiusInfo;
|
||||
current.w = level;
|
||||
}
|
||||
}
|
||||
|
||||
void AmbientOcclusionEffect::setDithering(bool enabled) {
|
||||
if (enabled != isDitheringEnabled()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>().ditheringInfo;
|
||||
current.x = (float)enabled;
|
||||
}
|
||||
}
|
||||
|
||||
void AmbientOcclusionEffect::setBordering(bool enabled) {
|
||||
if (enabled != isBorderingEnabled()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>().ditheringInfo;
|
||||
current.w = (float)enabled;
|
||||
}
|
||||
}
|
||||
|
||||
void AmbientOcclusionEffect::setFalloffBias(float bias) {
|
||||
bias = std::max(0.0f, std::min(bias, 0.2f));
|
||||
if (bias != getFalloffBias()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>().ditheringInfo;
|
||||
current.z = (float)bias;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void AmbientOcclusionEffect::setNumSamples(int numSamples) {
|
||||
numSamples = std::max(1.0f, (float) numSamples);
|
||||
if (numSamples != getNumSamples()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>().sampleInfo;
|
||||
current.x = numSamples;
|
||||
current.y = 1.0f / numSamples;
|
||||
}
|
||||
}
|
||||
|
||||
void AmbientOcclusionEffect::setNumSpiralTurns(float numTurns) {
|
||||
numTurns = std::max(0.0f, (float)numTurns);
|
||||
if (numTurns != getNumSpiralTurns()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>().sampleInfo;
|
||||
current.z = numTurns;
|
||||
}
|
||||
}
|
||||
|
||||
void AmbientOcclusionEffect::setEdgeSharpness(float sharpness) {
|
||||
sharpness = std::max(0.0f, (float)sharpness);
|
||||
if (sharpness != getEdgeSharpness()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>().blurInfo;
|
||||
current.x = sharpness;
|
||||
}
|
||||
}
|
||||
|
||||
void AmbientOcclusionEffect::setBlurRadius(int radius) {
|
||||
const int MAX_BLUR_RADIUS = 6;
|
||||
radius = std::max(0, std::min(MAX_BLUR_RADIUS, radius));
|
||||
if (radius != getBlurRadius()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>().blurInfo;
|
||||
current.y = (float)radius;
|
||||
updateGaussianDistribution();
|
||||
}
|
||||
}
|
||||
|
||||
void AmbientOcclusionEffect::setBlurDeviation(float deviation) {
|
||||
deviation = std::max(0.0f, deviation);
|
||||
if (deviation != getBlurDeviation()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>().blurInfo;
|
||||
current.z = deviation;
|
||||
updateGaussianDistribution();
|
||||
}
|
||||
}
|
||||
void AmbientOcclusionEffect::updateGaussianDistribution() {
|
||||
auto coefs = _parametersBuffer.edit<Parameters>()._gaussianCoefs;
|
||||
GaussianDistribution::evalSampling(coefs, Parameters::GAUSSIAN_COEFS_LENGTH, getBlurRadius(), getBlurDeviation());
|
||||
}
|
||||
|
||||
void AmbientOcclusionEffect::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext) {
|
||||
assert(renderContext->getArgs());
|
||||
assert(renderContext->getArgs()->_viewFrustum);
|
||||
|
||||
RenderArgs* args = renderContext->getArgs();
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
auto framebufferCache = DependencyManager::get<FramebufferCache>();
|
||||
QSize framebufferSize = framebufferCache->getFrameBufferSize();
|
||||
float fbWidth = framebufferSize.width();
|
||||
float fbHeight = framebufferSize.height();
|
||||
float sMin = args->_viewport.x / fbWidth;
|
||||
float sWidth = args->_viewport.z / fbWidth;
|
||||
float tMin = args->_viewport.y / fbHeight;
|
||||
float tHeight = args->_viewport.w / fbHeight;
|
||||
|
||||
auto framebufferCache = DependencyManager::get<FramebufferCache>();
|
||||
auto depthBuffer = framebufferCache->getPrimaryDepthTexture();
|
||||
auto normalBuffer = framebufferCache->getDeferredNormalTexture();
|
||||
auto pyramidFBO = framebufferCache->getDepthPyramidFramebuffer();
|
||||
auto occlusionFBO = framebufferCache->getOcclusionFramebuffer();
|
||||
auto occlusionBlurredFBO = framebufferCache->getOcclusionBlurredFramebuffer();
|
||||
|
||||
QSize framebufferSize = framebufferCache->getFrameBufferSize();
|
||||
float sMin = args->_viewport.x / (float)framebufferSize.width();
|
||||
float sWidth = args->_viewport.z / (float)framebufferSize.width();
|
||||
float tMin = args->_viewport.y / (float)framebufferSize.height();
|
||||
float tHeight = args->_viewport.w / (float)framebufferSize.height();
|
||||
|
||||
auto resolutionLevel = getResolutionLevel();
|
||||
|
||||
// Update the depth info with near and far (same for stereo)
|
||||
setDepthInfo(args->_viewFrustum->getNearClip(), args->_viewFrustum->getFarClip());
|
||||
|
||||
_frameTransformBuffer.edit<FrameTransform>().pixelInfo = args->_viewport;
|
||||
//_parametersBuffer.edit<Parameters>()._ditheringInfo.y += 0.25f;
|
||||
|
||||
// Running in stero ?
|
||||
bool isStereo = args->_context->isStereo();
|
||||
if (!isStereo) {
|
||||
// Eval the mono projection
|
||||
mat4 monoProjMat;
|
||||
args->_viewFrustum->evalProjectionMatrix(monoProjMat);
|
||||
_frameTransformBuffer.edit<FrameTransform>().projection[0] = monoProjMat;
|
||||
_frameTransformBuffer.edit<FrameTransform>().stereoInfo = glm::vec4(0.0f, (float)args->_viewport.z, 0.0f, 0.0f);
|
||||
|
||||
} else {
|
||||
|
||||
mat4 projMats[2];
|
||||
mat4 eyeViews[2];
|
||||
args->_context->getStereoProjections(projMats);
|
||||
args->_context->getStereoViews(eyeViews);
|
||||
|
||||
for (int i = 0; i < 2; i++) {
|
||||
// Compose the mono Eye space to Stereo clip space Projection Matrix
|
||||
auto sideViewMat = projMats[i] * eyeViews[i];
|
||||
_frameTransformBuffer.edit<FrameTransform>().projection[i] = sideViewMat;
|
||||
}
|
||||
|
||||
_frameTransformBuffer.edit<FrameTransform>().stereoInfo = glm::vec4(1.0f, (float)(args->_viewport.z >> 1), 0.0f, 1.0f);
|
||||
|
||||
}
|
||||
|
||||
auto pyramidPipeline = getPyramidPipeline();
|
||||
auto occlusionPipeline = getOcclusionPipeline();
|
||||
auto firstHBlurPipeline = getHBlurPipeline();
|
||||
auto lastVBlurPipeline = getVBlurPipeline();
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
|
||||
_gpuTimer.begin(batch);
|
||||
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
batch.setProjectionTransform(glm::mat4());
|
||||
batch.setViewTransform(Transform());
|
||||
|
||||
Transform model;
|
||||
model.setTranslation(glm::vec3(sMin, tMin, 0.0f));
|
||||
model.setScale(glm::vec3(sWidth, tHeight, 1.0f));
|
||||
batch.setModelTransform(model);
|
||||
|
||||
batch.setUniformBuffer(AmbientOcclusionEffect_FrameTransformSlot, _frameTransformBuffer);
|
||||
batch.setUniformBuffer(AmbientOcclusionEffect_ParamsSlot, _parametersBuffer);
|
||||
|
||||
|
||||
glm::mat4 projMat;
|
||||
Transform viewMat;
|
||||
args->_viewFrustum->evalProjectionMatrix(projMat);
|
||||
args->_viewFrustum->evalViewTransform(viewMat);
|
||||
batch.setProjectionTransform(projMat);
|
||||
batch.setViewTransform(viewMat);
|
||||
batch.setModelTransform(Transform());
|
||||
// Pyramid pass
|
||||
batch.setFramebuffer(pyramidFBO);
|
||||
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, glm::vec4(args->_viewFrustum->getFarClip(), 0.0f, 0.0f, 0.0f));
|
||||
batch.setPipeline(pyramidPipeline);
|
||||
batch.setResourceTexture(AmbientOcclusionEffect_DepthMapSlot, depthBuffer);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
|
||||
// Occlusion step
|
||||
getOcclusionPipeline();
|
||||
batch.setResourceTexture(0, framebufferCache->getPrimaryDepthTexture());
|
||||
batch.setResourceTexture(1, framebufferCache->getDeferredNormalTexture());
|
||||
_occlusionBuffer->setRenderBuffer(0, _occlusionTexture);
|
||||
batch.setFramebuffer(_occlusionBuffer);
|
||||
// Make pyramid mips
|
||||
batch.generateTextureMips(pyramidFBO->getRenderBuffer(0));
|
||||
|
||||
// Occlusion uniforms
|
||||
g_scale = 1.0f;
|
||||
g_bias = 1.0f;
|
||||
g_sample_rad = 1.0f;
|
||||
g_intensity = 1.0f;
|
||||
// Adjust Viewport for rendering resolution
|
||||
if (resolutionLevel > 0) {
|
||||
glm::ivec4 viewport(args->_viewport.x, args->_viewport.y, args->_viewport.z >> resolutionLevel, args->_viewport.w >> resolutionLevel);
|
||||
batch.setViewportTransform(viewport);
|
||||
}
|
||||
|
||||
// Bind the first gpu::Pipeline we need - for calculating occlusion buffer
|
||||
batch.setPipeline(getOcclusionPipeline());
|
||||
batch._glUniform1f(_gScaleLoc, g_scale);
|
||||
batch._glUniform1f(_gBiasLoc, g_bias);
|
||||
batch._glUniform1f(_gSampleRadiusLoc, g_sample_rad);
|
||||
batch._glUniform1f(_gIntensityLoc, g_intensity);
|
||||
// Occlusion pass
|
||||
batch.setFramebuffer(occlusionFBO);
|
||||
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, glm::vec4(1.0f));
|
||||
batch.setPipeline(occlusionPipeline);
|
||||
batch.setResourceTexture(AmbientOcclusionEffect_PyramidMapSlot, pyramidFBO->getRenderBuffer(0));
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
|
||||
// setup uniforms for unpacking a view-space position from the depth buffer
|
||||
// This is code taken from DeferredLightEffect.render() method in DeferredLightingEffect.cpp.
|
||||
// DeferredBuffer.slh shows how the unpacking is done and what variables are needed.
|
||||
|
||||
if (getBlurRadius() > 0) {
|
||||
// Blur 1st pass
|
||||
batch.setFramebuffer(occlusionBlurredFBO);
|
||||
batch.setPipeline(firstHBlurPipeline);
|
||||
batch.setResourceTexture(AmbientOcclusionEffect_OcclusionMapSlot, occlusionFBO->getRenderBuffer(0));
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
|
||||
// initialize the view-space unpacking uniforms using frustum data
|
||||
float left, right, bottom, top, nearVal, farVal;
|
||||
glm::vec4 nearClipPlane, farClipPlane;
|
||||
// Blur 2nd pass
|
||||
batch.setFramebuffer(occlusionFBO);
|
||||
batch.setPipeline(lastVBlurPipeline);
|
||||
batch.setResourceTexture(AmbientOcclusionEffect_OcclusionMapSlot, occlusionBlurredFBO->getRenderBuffer(0));
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
}
|
||||
|
||||
_gpuTimer.end(batch);
|
||||
|
||||
args->_viewFrustum->computeOffAxisFrustum(left, right, bottom, top, nearVal, farVal, nearClipPlane, farClipPlane);
|
||||
|
||||
float depthScale = (farVal - nearVal) / farVal;
|
||||
float nearScale = -1.0f / nearVal;
|
||||
float depthTexCoordScaleS = (right - left) * nearScale / sWidth;
|
||||
float depthTexCoordScaleT = (top - bottom) * nearScale / tHeight;
|
||||
float depthTexCoordOffsetS = left * nearScale - sMin * depthTexCoordScaleS;
|
||||
float depthTexCoordOffsetT = bottom * nearScale - tMin * depthTexCoordScaleT;
|
||||
|
||||
// now set the position-unpacking unforms
|
||||
batch._glUniform1f(_nearLoc, nearVal);
|
||||
batch._glUniform1f(_depthScaleLoc, depthScale);
|
||||
batch._glUniform2f(_depthTexCoordOffsetLoc, depthTexCoordOffsetS, depthTexCoordOffsetT);
|
||||
batch._glUniform2f(_depthTexCoordScaleLoc, depthTexCoordScaleS, depthTexCoordScaleT);
|
||||
|
||||
batch._glUniform2f(_renderTargetResLoc, fbWidth, fbHeight);
|
||||
batch._glUniform2f(_renderTargetResInvLoc, 1.0f / fbWidth, 1.0f / fbHeight);
|
||||
|
||||
glm::vec4 color(0.0f, 0.0f, 0.0f, 1.0f);
|
||||
glm::vec2 bottomLeft(-1.0f, -1.0f);
|
||||
glm::vec2 topRight(1.0f, 1.0f);
|
||||
glm::vec2 texCoordTopLeft(0.0f, 0.0f);
|
||||
glm::vec2 texCoordBottomRight(1.0f, 1.0f);
|
||||
DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);
|
||||
|
||||
// Vertical blur step
|
||||
getVBlurPipeline();
|
||||
batch.setResourceTexture(0, _occlusionTexture);
|
||||
_vBlurBuffer->setRenderBuffer(0, _vBlurTexture);
|
||||
batch.setFramebuffer(_vBlurBuffer);
|
||||
|
||||
// Bind the second gpu::Pipeline we need - for calculating blur buffer
|
||||
batch.setPipeline(getVBlurPipeline());
|
||||
|
||||
DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);
|
||||
|
||||
// Horizontal blur step
|
||||
getHBlurPipeline();
|
||||
batch.setResourceTexture(0, _vBlurTexture);
|
||||
_hBlurBuffer->setRenderBuffer(0, _hBlurTexture);
|
||||
batch.setFramebuffer(_hBlurBuffer);
|
||||
|
||||
// Bind the third gpu::Pipeline we need - for calculating blur buffer
|
||||
batch.setPipeline(getHBlurPipeline());
|
||||
|
||||
DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);
|
||||
|
||||
// Blend step
|
||||
getBlendPipeline();
|
||||
batch.setResourceTexture(0, _hBlurTexture);
|
||||
batch.setFramebuffer(framebufferCache->getDeferredFramebuffer());
|
||||
|
||||
// Bind the fourth gpu::Pipeline we need - for blending the primary color buffer with blurred occlusion texture
|
||||
batch.setPipeline(getBlendPipeline());
|
||||
|
||||
DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -16,53 +16,118 @@
|
|||
|
||||
#include "render/DrawTask.h"
|
||||
|
||||
class AmbientOcclusion {
|
||||
|
||||
class AmbientOcclusionEffect {
|
||||
public:
|
||||
|
||||
AmbientOcclusion();
|
||||
AmbientOcclusionEffect();
|
||||
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext);
|
||||
using JobModel = render::Task::Job::Model<AmbientOcclusion>;
|
||||
|
||||
void setResolutionLevel(int level);
|
||||
int getResolutionLevel() const { return _parametersBuffer.get<Parameters>().resolutionInfo.x; }
|
||||
|
||||
const gpu::PipelinePointer& getOcclusionPipeline();
|
||||
const gpu::PipelinePointer& getHBlurPipeline();
|
||||
const gpu::PipelinePointer& getVBlurPipeline();
|
||||
const gpu::PipelinePointer& getBlendPipeline();
|
||||
void setRadius(float radius);
|
||||
float getRadius() const { return _parametersBuffer.get<Parameters>().radiusInfo.x; }
|
||||
|
||||
// Obscurance level which intensify or dim down the obscurance effect
|
||||
void setLevel(float level);
|
||||
float getLevel() const { return _parametersBuffer.get<Parameters>().radiusInfo.w; }
|
||||
|
||||
// On to randomize the distribution of rays per pixel, should always be true
|
||||
void setDithering(bool enabled);
|
||||
bool isDitheringEnabled() const { return _parametersBuffer.get<Parameters>().ditheringInfo.x; }
|
||||
|
||||
// On to avoid evaluating information from non existing pixels Out of the frame, should always be true
|
||||
void setBordering(bool enabled);
|
||||
bool isBorderingEnabled() const { return _parametersBuffer.get<Parameters>().ditheringInfo.w; }
|
||||
|
||||
// Faloff Bias
|
||||
void setFalloffBias(float bias);
|
||||
int getFalloffBias() const { return (int)_parametersBuffer.get<Parameters>().ditheringInfo.z; }
|
||||
|
||||
// Number of samples per pixel to evaluate the Obscurance
|
||||
void setNumSamples(int numSamples);
|
||||
int getNumSamples() const { return (int)_parametersBuffer.get<Parameters>().sampleInfo.x; }
|
||||
|
||||
// Number of spiral turns defining an angle span to distribute the samples ray directions
|
||||
void setNumSpiralTurns(float numTurns);
|
||||
float getNumSpiralTurns() const { return _parametersBuffer.get<Parameters>().sampleInfo.z; }
|
||||
|
||||
// Edge blurring setting
|
||||
void setEdgeSharpness(float sharpness);
|
||||
int getEdgeSharpness() const { return (int)_parametersBuffer.get<Parameters>().blurInfo.x; }
|
||||
|
||||
// Blurring Radius
|
||||
// 0 means no blurring
|
||||
const int MAX_BLUR_RADIUS = 6;
|
||||
void setBlurRadius(int radius);
|
||||
int getBlurRadius() const { return (int)_parametersBuffer.get<Parameters>().blurInfo.y; }
|
||||
|
||||
void setBlurDeviation(float deviation);
|
||||
float getBlurDeviation() const { return _parametersBuffer.get<Parameters>().blurInfo.z; }
|
||||
|
||||
|
||||
double getGPUTime() const { return _gpuTimer.getAverage(); }
|
||||
|
||||
using JobModel = render::Task::Job::Model<AmbientOcclusionEffect>;
|
||||
|
||||
private:
|
||||
|
||||
// Uniforms for AO
|
||||
gpu::int32 _gScaleLoc;
|
||||
gpu::int32 _gBiasLoc;
|
||||
gpu::int32 _gSampleRadiusLoc;
|
||||
gpu::int32 _gIntensityLoc;
|
||||
void updateGaussianDistribution();
|
||||
void setDepthInfo(float nearZ, float farZ);
|
||||
|
||||
typedef gpu::BufferView UniformBufferView;
|
||||
|
||||
gpu::int32 _nearLoc;
|
||||
gpu::int32 _depthScaleLoc;
|
||||
gpu::int32 _depthTexCoordOffsetLoc;
|
||||
gpu::int32 _depthTexCoordScaleLoc;
|
||||
gpu::int32 _renderTargetResLoc;
|
||||
gpu::int32 _renderTargetResInvLoc;
|
||||
// Class describing the uniform buffer with the transform info common to the AO shaders
|
||||
// It s changing every frame
|
||||
class FrameTransform {
|
||||
public:
|
||||
// Pixel info is { viemport width height and stereo on off}
|
||||
glm::vec4 pixelInfo;
|
||||
// Depth info is { n.f, f - n, -f}
|
||||
glm::vec4 depthInfo;
|
||||
// Stereo info
|
||||
glm::vec4 stereoInfo { 0.0 };
|
||||
// Mono proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space
|
||||
glm::mat4 projection[2];
|
||||
|
||||
FrameTransform() {}
|
||||
};
|
||||
gpu::BufferView _frameTransformBuffer;
|
||||
|
||||
// Class describing the uniform buffer with all the parameters common to the AO shaders
|
||||
class Parameters {
|
||||
public:
|
||||
// Resolution info
|
||||
glm::vec4 resolutionInfo { -1.0f, 0.0f, 0.0f, 0.0f };
|
||||
// radius info is { R, R^2, 1 / R^6, ObscuranceScale}
|
||||
glm::vec4 radiusInfo{ 0.5f, 0.5f * 0.5f, 1.0f / (0.25f * 0.25f * 0.25f), 1.0f };
|
||||
// Dithering info
|
||||
glm::vec4 ditheringInfo { 0.0f, 0.0f, 0.01f, 1.0f };
|
||||
// Sampling info
|
||||
glm::vec4 sampleInfo { 11.0f, 1.0/11.0f, 7.0f, 1.0f };
|
||||
// Blurring info
|
||||
glm::vec4 blurInfo { 1.0f, 3.0f, 2.0f, 0.0f };
|
||||
// gaussian distribution coefficients first is the sampling radius (max is 6)
|
||||
const static int GAUSSIAN_COEFS_LENGTH = 8;
|
||||
float _gaussianCoefs[GAUSSIAN_COEFS_LENGTH];
|
||||
|
||||
Parameters() {}
|
||||
};
|
||||
gpu::BufferView _parametersBuffer;
|
||||
|
||||
const gpu::PipelinePointer& getPyramidPipeline();
|
||||
const gpu::PipelinePointer& getOcclusionPipeline();
|
||||
const gpu::PipelinePointer& getHBlurPipeline(); // first
|
||||
const gpu::PipelinePointer& getVBlurPipeline(); // second
|
||||
|
||||
float g_scale;
|
||||
float g_bias;
|
||||
float g_sample_rad;
|
||||
float g_intensity;
|
||||
|
||||
gpu::PipelinePointer _pyramidPipeline;
|
||||
gpu::PipelinePointer _occlusionPipeline;
|
||||
gpu::PipelinePointer _hBlurPipeline;
|
||||
gpu::PipelinePointer _vBlurPipeline;
|
||||
gpu::PipelinePointer _blendPipeline;
|
||||
|
||||
gpu::FramebufferPointer _occlusionBuffer;
|
||||
gpu::FramebufferPointer _hBlurBuffer;
|
||||
gpu::FramebufferPointer _vBlurBuffer;
|
||||
|
||||
gpu::TexturePointer _occlusionTexture;
|
||||
gpu::TexturePointer _hBlurTexture;
|
||||
gpu::TexturePointer _vBlurTexture;
|
||||
|
||||
gpu::RangeTimer _gpuTimer;
|
||||
};
|
||||
|
||||
#endif // hifi_AmbientOcclusionEffect_h
|
||||
|
|
|
@ -33,7 +33,10 @@ enum Slots {
|
|||
Specular,
|
||||
Depth,
|
||||
Lighting,
|
||||
Shadow
|
||||
Shadow,
|
||||
Pyramid,
|
||||
AmbientOcclusion,
|
||||
AmbientOcclusionBlurred
|
||||
};
|
||||
|
||||
static const std::string DEFAULT_DIFFUSE_SHADER {
|
||||
|
@ -41,11 +44,7 @@ static const std::string DEFAULT_DIFFUSE_SHADER {
|
|||
" return vec4(pow(texture(diffuseMap, uv).xyz, vec3(1.0 / 2.2)), 1.0);"
|
||||
" }"
|
||||
};
|
||||
static const std::string DEFAULT_ALPHA_SHADER {
|
||||
"vec4 getFragmentColor() {"
|
||||
" return vec4(vec3(texture(diffuseMap, uv).a), 1.0);"
|
||||
" }"
|
||||
};
|
||||
|
||||
static const std::string DEFAULT_SPECULAR_SHADER {
|
||||
"vec4 getFragmentColor() {"
|
||||
" return vec4(texture(specularMap, uv).xyz, 1.0);"
|
||||
|
@ -58,7 +57,7 @@ static const std::string DEFAULT_ROUGHNESS_SHADER {
|
|||
};
|
||||
static const std::string DEFAULT_NORMAL_SHADER {
|
||||
"vec4 getFragmentColor() {"
|
||||
" return vec4(normalize(texture(normalMap, uv).xyz), 1.0);"
|
||||
" return vec4(normalize(texture(normalMap, uv).xyz * 2.0 - vec3(1.0)), 1.0);"
|
||||
" }"
|
||||
};
|
||||
static const std::string DEFAULT_DEPTH_SHADER {
|
||||
|
@ -71,13 +70,35 @@ static const std::string DEFAULT_LIGHTING_SHADER {
|
|||
" return vec4(pow(texture(lightingMap, uv).xyz, vec3(1.0 / 2.2)), 1.0);"
|
||||
" }"
|
||||
};
|
||||
static const std::string DEFAULT_SHADOW_SHADER {
|
||||
|
||||
static const std::string DEFAULT_SHADOW_SHADER{
|
||||
"uniform sampler2D shadowMapColor;"
|
||||
// The actual shadowMap is a sampler2DShadow, so we cannot normally sample it
|
||||
"vec4 getFragmentColor() {"
|
||||
" return vec4(texture(shadowMapColor, uv).xyz, 1.0);"
|
||||
" }"
|
||||
};
|
||||
|
||||
static const std::string DEFAULT_PYRAMID_DEPTH_SHADER {
|
||||
"vec4 getFragmentColor() {"
|
||||
" return vec4(vec3(1.0 - texture(pyramidMap, uv).x * 0.01), 1.0);"
|
||||
//" return vec4(vec3(1.0 - textureLod(pyramidMap, uv, 3).x * 0.01), 1.0);"
|
||||
" }"
|
||||
};
|
||||
|
||||
static const std::string DEFAULT_AMBIENT_OCCLUSION_SHADER{
|
||||
"vec4 getFragmentColor() {"
|
||||
" return vec4(vec3(texture(occlusionMap, uv).x), 1.0);"
|
||||
// When drawing color " return vec4(vec3(texture(occlusionMap, uv).xyz), 1.0);"
|
||||
// when drawing normal " return vec4(normalize(texture(occlusionMap, uv).xyz * 2.0 - vec3(1.0)), 1.0);"
|
||||
" }"
|
||||
};
|
||||
static const std::string DEFAULT_AMBIENT_OCCLUSION_BLURRED_SHADER{
|
||||
"vec4 getFragmentColor() {"
|
||||
" return vec4(vec3(texture(occlusionBlurredMap, uv).x), 1.0);"
|
||||
" }"
|
||||
};
|
||||
|
||||
static const std::string DEFAULT_CUSTOM_SHADER {
|
||||
"vec4 getFragmentColor() {"
|
||||
" return vec4(1.0, 0.0, 0.0, 1.0);"
|
||||
|
@ -108,8 +129,6 @@ std::string DebugDeferredBuffer::getShaderSourceCode(Modes mode, std::string cus
|
|||
switch (mode) {
|
||||
case DiffuseMode:
|
||||
return DEFAULT_DIFFUSE_SHADER;
|
||||
case AlphaMode:
|
||||
return DEFAULT_ALPHA_SHADER;
|
||||
case SpecularMode:
|
||||
return DEFAULT_SPECULAR_SHADER;
|
||||
case RoughnessMode:
|
||||
|
@ -122,6 +141,12 @@ std::string DebugDeferredBuffer::getShaderSourceCode(Modes mode, std::string cus
|
|||
return DEFAULT_LIGHTING_SHADER;
|
||||
case ShadowMode:
|
||||
return DEFAULT_SHADOW_SHADER;
|
||||
case PyramidDepthMode:
|
||||
return DEFAULT_PYRAMID_DEPTH_SHADER;
|
||||
case AmbientOcclusionMode:
|
||||
return DEFAULT_AMBIENT_OCCLUSION_SHADER;
|
||||
case AmbientOcclusionBlurredMode:
|
||||
return DEFAULT_AMBIENT_OCCLUSION_BLURRED_SHADER;
|
||||
case CustomMode:
|
||||
return getFileContent(customFile, DEFAULT_CUSTOM_SHADER);
|
||||
}
|
||||
|
@ -170,6 +195,9 @@ const gpu::PipelinePointer& DebugDeferredBuffer::getPipeline(Modes mode, std::st
|
|||
slotBindings.insert(gpu::Shader::Binding("depthMap", Depth));
|
||||
slotBindings.insert(gpu::Shader::Binding("lightingMap", Lighting));
|
||||
slotBindings.insert(gpu::Shader::Binding("shadowMapColor", Shadow));
|
||||
slotBindings.insert(gpu::Shader::Binding("pyramidMap", Pyramid));
|
||||
slotBindings.insert(gpu::Shader::Binding("occlusionMap", AmbientOcclusion));
|
||||
slotBindings.insert(gpu::Shader::Binding("occlusionBlurredMap", AmbientOcclusionBlurred));
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
auto pipeline = gpu::Pipeline::create(program, std::make_shared<gpu::State>());
|
||||
|
@ -226,7 +254,10 @@ void DebugDeferredBuffer::run(const SceneContextPointer& sceneContext, const Ren
|
|||
batch.setResourceTexture(Depth, framebufferCache->getPrimaryDepthTexture());
|
||||
batch.setResourceTexture(Lighting, framebufferCache->getLightingTexture());
|
||||
batch.setResourceTexture(Shadow, lightStage.lights[0]->shadow.framebuffer->getRenderBuffer(0));
|
||||
|
||||
batch.setResourceTexture(Pyramid, framebufferCache->getDepthPyramidTexture());
|
||||
batch.setResourceTexture(AmbientOcclusion, framebufferCache->getOcclusionTexture());
|
||||
batch.setResourceTexture(AmbientOcclusionBlurred, framebufferCache->getOcclusionBlurredTexture());
|
||||
|
||||
const glm::vec4 color(1.0f, 1.0f, 1.0f, 1.0f);
|
||||
const glm::vec2 bottomLeft(renderContext->_deferredDebugSize.x, renderContext->_deferredDebugSize.y);
|
||||
const glm::vec2 topRight(renderContext->_deferredDebugSize.z, renderContext->_deferredDebugSize.w);
|
||||
|
|
|
@ -27,14 +27,15 @@ public:
|
|||
private:
|
||||
enum Modes : uint8_t {
|
||||
DiffuseMode = 0,
|
||||
AlphaMode,
|
||||
SpecularMode,
|
||||
RoughnessMode,
|
||||
NormalMode,
|
||||
DepthMode,
|
||||
LightingMode,
|
||||
ShadowMode,
|
||||
|
||||
PyramidDepthMode,
|
||||
AmbientOcclusionMode,
|
||||
AmbientOcclusionBlurredMode,
|
||||
CustomMode // Needs to stay last
|
||||
};
|
||||
struct CustomPipeline {
|
||||
|
|
|
@ -24,6 +24,9 @@ uniform sampler2D specularMap;
|
|||
// the depth texture
|
||||
uniform sampler2D depthMap;
|
||||
|
||||
// the obscurance texture
|
||||
uniform sampler2D obscuranceMap;
|
||||
|
||||
// the lighting texture
|
||||
uniform sampler2D lightingMap;
|
||||
|
||||
|
@ -68,17 +71,21 @@ struct DeferredFragment {
|
|||
vec4 position;
|
||||
vec3 normal;
|
||||
vec3 diffuse;
|
||||
float opacity;
|
||||
float obscurance;
|
||||
vec3 specular;
|
||||
float gloss;
|
||||
int mode;
|
||||
};
|
||||
|
||||
const int LIGHT_MAPPED = 1;
|
||||
|
||||
DeferredFragment unpackDeferredFragment(DeferredTransform deferredTransform, vec2 texcoord) {
|
||||
DeferredFragment frag;
|
||||
frag.depthVal = texture(depthMap, texcoord).r;
|
||||
frag.normalVal = texture(normalMap, texcoord);
|
||||
frag.diffuseVal = texture(diffuseMap, texcoord);
|
||||
frag.specularVal = texture(specularMap, texcoord);
|
||||
frag.obscurance = texture(obscuranceMap, texcoord).x;
|
||||
|
||||
if (getStereoMode(deferredTransform)) {
|
||||
if (texcoord.x > 0.5) {
|
||||
|
@ -88,14 +95,19 @@ DeferredFragment unpackDeferredFragment(DeferredTransform deferredTransform, vec
|
|||
}
|
||||
frag.position = evalEyePositionFromZ(deferredTransform, frag.depthVal, texcoord);
|
||||
|
||||
// Unpack the normal from the map
|
||||
// Unpack the normal from the map
|
||||
frag.normal = normalize(frag.normalVal.xyz * 2.0 - vec3(1.0));
|
||||
|
||||
frag.mode = 0;
|
||||
if ((frag.normalVal.a >= 0.45) && (frag.normalVal.a <= 0.55)) {
|
||||
frag.mode = LIGHT_MAPPED;
|
||||
}
|
||||
|
||||
frag.diffuse = frag.diffuseVal.xyz;
|
||||
frag.opacity = frag.diffuseVal.w;
|
||||
frag.specular = frag.specularVal.xyz;
|
||||
frag.gloss = frag.specularVal.w;
|
||||
|
||||
|
||||
return frag;
|
||||
}
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ uniform SphericalHarmonics ambientSphere;
|
|||
<@include model/Light.slh@>
|
||||
|
||||
<@func declareEvalAmbientGlobalColor()@>
|
||||
vec3 evalAmbientGlobalColor(mat4 invViewMat, float shadowAttenuation, vec3 position, vec3 normal, vec3 diffuse, vec3 specular, float gloss) {
|
||||
vec3 evalAmbientGlobalColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal, vec3 diffuse, vec3 specular, float gloss) {
|
||||
|
||||
// Need the light now
|
||||
Light light = getLight();
|
||||
|
@ -79,11 +79,11 @@ vec3 evalAmbientGlobalColor(mat4 invViewMat, float shadowAttenuation, vec3 posit
|
|||
vec4 fragEyeVector = invViewMat * vec4(-position, 0.0);
|
||||
vec3 fragEyeDir = normalize(fragEyeVector.xyz);
|
||||
|
||||
vec3 color = diffuse.rgb * getLightColor(light) * getLightAmbientIntensity(light);
|
||||
vec3 color = diffuse.rgb * getLightColor(light) * obscurance * getLightAmbientIntensity(light);
|
||||
|
||||
vec4 shading = evalFragShading(fragNormal, -getLightDirection(light), fragEyeDir, specular, gloss);
|
||||
|
||||
color += vec3(diffuse * shading.w + shading.rgb) * shadowAttenuation * getLightColor(light) * getLightIntensity(light);
|
||||
color += vec3(diffuse * shading.w + shading.rgb) * min(shadowAttenuation, obscurance) * getLightColor(light) * getLightIntensity(light);
|
||||
|
||||
return color;
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ vec3 evalAmbientGlobalColor(mat4 invViewMat, float shadowAttenuation, vec3 posit
|
|||
|
||||
<$declareSphericalHarmonics()$>
|
||||
|
||||
vec3 evalAmbientSphereGlobalColor(mat4 invViewMat, float shadowAttenuation, vec3 position, vec3 normal, vec3 diffuse, vec3 specular, float gloss) {
|
||||
vec3 evalAmbientSphereGlobalColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal, vec3 diffuse, vec3 specular, float gloss) {
|
||||
// Need the light now
|
||||
Light light = getLight();
|
||||
|
||||
|
@ -102,11 +102,11 @@ vec3 evalAmbientSphereGlobalColor(mat4 invViewMat, float shadowAttenuation, vec3
|
|||
vec3 fragEyeDir = normalize(fragEyeVector.xyz);
|
||||
|
||||
vec3 ambientNormal = fragNormal.xyz;
|
||||
vec3 color = diffuse.rgb * evalSphericalLight(ambientSphere, ambientNormal).xyz * getLightAmbientIntensity(light);
|
||||
vec3 color = diffuse.rgb * evalSphericalLight(ambientSphere, ambientNormal).xyz * obscurance * getLightAmbientIntensity(light);
|
||||
|
||||
vec4 shading = evalFragShading(fragNormal, -getLightDirection(light), fragEyeDir, specular, gloss);
|
||||
|
||||
color += vec3(diffuse * shading.w + shading.rgb) * shadowAttenuation * getLightColor(light) * getLightIntensity(light);
|
||||
color += vec3(diffuse * shading.w + shading.rgb) * min(shadowAttenuation, obscurance) * getLightColor(light) * getLightIntensity(light);
|
||||
|
||||
return color;
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ vec3 evalAmbientSphereGlobalColor(mat4 invViewMat, float shadowAttenuation, vec3
|
|||
<$declareSkyboxMap()$>
|
||||
<$declareSphericalHarmonics()$>
|
||||
|
||||
vec3 evalSkyboxGlobalColor(mat4 invViewMat, float shadowAttenuation, vec3 position, vec3 normal, vec3 diffuse, vec3 specular, float gloss) {
|
||||
vec3 evalSkyboxGlobalColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal, vec3 diffuse, vec3 specular, float gloss) {
|
||||
// Need the light now
|
||||
Light light = getLight();
|
||||
|
||||
|
@ -125,18 +125,18 @@ vec3 evalSkyboxGlobalColor(mat4 invViewMat, float shadowAttenuation, vec3 positi
|
|||
vec4 fragEyeVector = invViewMat * vec4(-position, 0.0);
|
||||
vec3 fragEyeDir = normalize(fragEyeVector.xyz);
|
||||
|
||||
vec3 color = diffuse.rgb * evalSphericalLight(ambientSphere, fragNormal).xyz * getLightAmbientIntensity(light);
|
||||
vec3 color = diffuse.rgb * evalSphericalLight(ambientSphere, fragNormal).xyz * obscurance * getLightAmbientIntensity(light);
|
||||
|
||||
vec4 shading = evalFragShading(fragNormal, -getLightDirection(light), fragEyeDir, specular, gloss);
|
||||
|
||||
color += vec3(diffuse * shading.w + shading.rgb) * shadowAttenuation * getLightColor(light) * getLightIntensity(light);
|
||||
color += vec3(diffuse * shading.w + shading.rgb) * min(shadowAttenuation, obscurance) * getLightColor(light) * getLightIntensity(light);
|
||||
|
||||
return color;
|
||||
}
|
||||
<@endfunc@>
|
||||
|
||||
<@func declareEvalLightmappedColor()@>
|
||||
vec3 evalLightmappedColor(mat4 invViewMat, float shadowAttenuation, vec3 normal, vec3 diffuse, vec3 lightmap) {
|
||||
vec3 evalLightmappedColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 normal, vec3 diffuse, vec3 lightmap) {
|
||||
|
||||
Light light = getLight();
|
||||
|
||||
|
@ -156,7 +156,7 @@ vec3 evalLightmappedColor(mat4 invViewMat, float shadowAttenuation, vec3 normal,
|
|||
// ambient is a tiny percentage of the lightmap and only when in the shadow
|
||||
vec3 ambientLight = (1 - lightAttenuation) * lightmap * getLightAmbientIntensity(light);
|
||||
|
||||
return diffuse * (ambientLight + diffuseLight);
|
||||
return obscurance * diffuse * (ambientLight + diffuseLight);
|
||||
}
|
||||
<@endfunc@>
|
||||
|
||||
|
|
|
@ -48,6 +48,15 @@ struct LightLocations {
|
|||
int shadowTransformBuffer;
|
||||
};
|
||||
|
||||
enum {
|
||||
DEFERRED_BUFFER_COLOR_UNIT = 0,
|
||||
DEFERRED_BUFFER_NORMAL_UNIT = 1,
|
||||
DEFERRED_BUFFER_EMISSIVE_UNIT = 2,
|
||||
DEFERRED_BUFFER_DEPTH_UNIT = 3,
|
||||
DEFERRED_BUFFER_OBSCURANCE_UNIT = 4,
|
||||
SHADOW_MAP_UNIT = 5,
|
||||
SKYBOX_MAP_UNIT = 6,
|
||||
};
|
||||
static void loadLightProgram(const char* vertSource, const char* fragSource, bool lightVolume, gpu::PipelinePointer& program, LightLocationsPtr& locations);
|
||||
|
||||
void DeferredLightingEffect::init() {
|
||||
|
@ -145,7 +154,8 @@ void DeferredLightingEffect::prepare(RenderArgs* args) {
|
|||
});
|
||||
}
|
||||
|
||||
void DeferredLightingEffect::render(RenderArgs* args) {
|
||||
void DeferredLightingEffect::render(const render::RenderContextPointer& renderContext) {
|
||||
auto args = renderContext->getArgs();
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
|
||||
// Allocate the parameters buffer used by all the deferred shaders
|
||||
|
@ -160,6 +170,7 @@ void DeferredLightingEffect::render(RenderArgs* args) {
|
|||
|
||||
// perform deferred lighting, rendering to free fbo
|
||||
auto framebufferCache = DependencyManager::get<FramebufferCache>();
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
|
||||
QSize framebufferSize = framebufferCache->getFrameBufferSize();
|
||||
|
||||
|
@ -171,16 +182,23 @@ void DeferredLightingEffect::render(RenderArgs* args) {
|
|||
batch.setStateScissorRect(args->_viewport);
|
||||
|
||||
// BInd the G-Buffer surfaces
|
||||
batch.setResourceTexture(0, framebufferCache->getDeferredColorTexture());
|
||||
batch.setResourceTexture(1, framebufferCache->getDeferredNormalTexture());
|
||||
batch.setResourceTexture(2, framebufferCache->getDeferredSpecularTexture());
|
||||
batch.setResourceTexture(3, framebufferCache->getPrimaryDepthTexture());
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_COLOR_UNIT, framebufferCache->getDeferredColorTexture());
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_NORMAL_UNIT, framebufferCache->getDeferredNormalTexture());
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_EMISSIVE_UNIT, framebufferCache->getDeferredSpecularTexture());
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_DEPTH_UNIT, framebufferCache->getPrimaryDepthTexture());
|
||||
|
||||
// need to assign the white texture if ao is off
|
||||
if (renderContext->getOcclusionStatus()) {
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_OBSCURANCE_UNIT, framebufferCache->getOcclusionTexture());
|
||||
} else {
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_OBSCURANCE_UNIT, textureCache->getWhiteTexture());
|
||||
}
|
||||
|
||||
assert(_lightStage.lights.size() > 0);
|
||||
const auto& globalShadow = _lightStage.lights[0]->shadow;
|
||||
|
||||
// Bind the shadow buffer
|
||||
batch.setResourceTexture(4, globalShadow.map);
|
||||
batch.setResourceTexture(SHADOW_MAP_UNIT, globalShadow.map);
|
||||
|
||||
// THe main viewport is assumed to be the mono viewport (or the 2 stereo faces side by side within that viewport)
|
||||
auto monoViewport = args->_viewport;
|
||||
|
@ -322,7 +340,7 @@ void DeferredLightingEffect::render(RenderArgs* args) {
|
|||
}
|
||||
|
||||
if (useSkyboxCubemap) {
|
||||
batch.setResourceTexture(5, _skybox->getCubemap());
|
||||
batch.setResourceTexture(SKYBOX_MAP_UNIT, _skybox->getCubemap());
|
||||
}
|
||||
|
||||
if (locations->lightBufferUnit >= 0) {
|
||||
|
@ -341,7 +359,7 @@ void DeferredLightingEffect::render(RenderArgs* args) {
|
|||
}
|
||||
|
||||
if (useSkyboxCubemap) {
|
||||
batch.setResourceTexture(5, nullptr);
|
||||
batch.setResourceTexture(SKYBOX_MAP_UNIT, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -457,10 +475,14 @@ void DeferredLightingEffect::render(RenderArgs* args) {
|
|||
}
|
||||
|
||||
// Probably not necessary in the long run because the gpu layer would unbound this texture if used as render target
|
||||
batch.setResourceTexture(0, nullptr);
|
||||
batch.setResourceTexture(1, nullptr);
|
||||
batch.setResourceTexture(2, nullptr);
|
||||
batch.setResourceTexture(3, nullptr);
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_COLOR_UNIT, nullptr);
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_NORMAL_UNIT, nullptr);
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_EMISSIVE_UNIT, nullptr);
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_DEPTH_UNIT, nullptr);
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_OBSCURANCE_UNIT, nullptr);
|
||||
batch.setResourceTexture(SHADOW_MAP_UNIT, nullptr);
|
||||
batch.setResourceTexture(SKYBOX_MAP_UNIT, nullptr);
|
||||
|
||||
batch.setUniformBuffer(_directionalLightLocations->deferredTransformBuffer, nullptr);
|
||||
});
|
||||
|
||||
|
@ -485,12 +507,13 @@ static void loadLightProgram(const char* vertSource, const char* fragSource, boo
|
|||
gpu::ShaderPointer program = gpu::Shader::createProgram(VS, PS);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("diffuseMap"), 0));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("normalMap"), 1));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("specularMap"), 2));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("depthMap"), 3));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("shadowMap"), 4));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("skyboxMap"), 5));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("diffuseMap"), DEFERRED_BUFFER_COLOR_UNIT));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("normalMap"), DEFERRED_BUFFER_NORMAL_UNIT));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("specularMap"), DEFERRED_BUFFER_EMISSIVE_UNIT));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("depthMap"), DEFERRED_BUFFER_DEPTH_UNIT));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("obscuranceMap"), DEFERRED_BUFFER_OBSCURANCE_UNIT));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("shadowMap"), SHADOW_MAP_UNIT));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("skyboxMap"), SKYBOX_MAP_UNIT));
|
||||
|
||||
static const int LIGHT_GPU_SLOT = 3;
|
||||
static const int DEFERRED_TRANSFORM_BUFFER_SLOT = 2;
|
||||
|
|
|
@ -21,6 +21,8 @@
|
|||
#include "model/Stage.h"
|
||||
#include "model/Geometry.h"
|
||||
|
||||
#include "render/Context.h"
|
||||
|
||||
#include "LightStage.h"
|
||||
|
||||
class RenderArgs;
|
||||
|
@ -42,7 +44,7 @@ public:
|
|||
float intensity = 0.5f, const glm::quat& orientation = glm::quat(), float exponent = 0.0f, float cutoff = PI);
|
||||
|
||||
void prepare(RenderArgs* args);
|
||||
void render(RenderArgs* args);
|
||||
void render(const render::RenderContextPointer& renderContext);
|
||||
|
||||
void setupTransparent(RenderArgs* args, int lightBufferUnit);
|
||||
|
||||
|
|
|
@ -45,6 +45,12 @@ void FramebufferCache::setFrameBufferSize(QSize frameBufferSize) {
|
|||
_cachedFramebuffers.clear();
|
||||
_lightingTexture.reset();
|
||||
_lightingFramebuffer.reset();
|
||||
_depthPyramidFramebuffer.reset();
|
||||
_depthPyramidTexture.reset();
|
||||
_occlusionFramebuffer.reset();
|
||||
_occlusionTexture.reset();
|
||||
_occlusionBlurredFramebuffer.reset();
|
||||
_occlusionBlurredTexture.reset();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -96,6 +102,42 @@ void FramebufferCache::createPrimaryFramebuffer() {
|
|||
_lightingFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_lightingFramebuffer->setRenderBuffer(0, _lightingTexture);
|
||||
_lightingFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||
|
||||
|
||||
// For AO:
|
||||
auto pointMipSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_POINT);
|
||||
_depthPyramidTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::RGB), width, height, pointMipSampler));
|
||||
_depthPyramidFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_depthPyramidFramebuffer->setRenderBuffer(0, _depthPyramidTexture);
|
||||
_depthPyramidFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||
|
||||
|
||||
resizeAmbientOcclusionBuffers();
|
||||
}
|
||||
|
||||
|
||||
void FramebufferCache::resizeAmbientOcclusionBuffers() {
|
||||
_occlusionFramebuffer.reset();
|
||||
_occlusionTexture.reset();
|
||||
_occlusionBlurredFramebuffer.reset();
|
||||
_occlusionBlurredTexture.reset();
|
||||
|
||||
|
||||
auto width = _frameBufferSize.width() >> _AOResolutionLevel;
|
||||
auto height = _frameBufferSize.height() >> _AOResolutionLevel;
|
||||
auto colorFormat = gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGB);
|
||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR);
|
||||
auto depthFormat = gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::DEPTH_STENCIL); // Depth24_Stencil8 texel format
|
||||
|
||||
_occlusionTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width, height, defaultSampler));
|
||||
_occlusionFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_occlusionFramebuffer->setRenderBuffer(0, _occlusionTexture);
|
||||
_occlusionFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||
|
||||
_occlusionBlurredTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width, height, defaultSampler));
|
||||
_occlusionBlurredFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_occlusionBlurredFramebuffer->setRenderBuffer(0, _occlusionBlurredTexture);
|
||||
_occlusionBlurredFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||
}
|
||||
|
||||
gpu::FramebufferPointer FramebufferCache::getPrimaryFramebuffer() {
|
||||
|
@ -189,3 +231,54 @@ gpu::FramebufferPointer FramebufferCache::getSelfieFramebuffer() {
|
|||
}
|
||||
return _selfieFramebuffer;
|
||||
}
|
||||
|
||||
gpu::FramebufferPointer FramebufferCache::getDepthPyramidFramebuffer() {
|
||||
if (!_depthPyramidFramebuffer) {
|
||||
createPrimaryFramebuffer();
|
||||
}
|
||||
return _depthPyramidFramebuffer;
|
||||
}
|
||||
|
||||
gpu::TexturePointer FramebufferCache::getDepthPyramidTexture() {
|
||||
if (!_depthPyramidTexture) {
|
||||
createPrimaryFramebuffer();
|
||||
}
|
||||
return _depthPyramidTexture;
|
||||
}
|
||||
|
||||
void FramebufferCache::setAmbientOcclusionResolutionLevel(int level) {
|
||||
const int MAX_AO_RESOLUTION_LEVEL = 4;
|
||||
level = std::max(0, std::min(level, MAX_AO_RESOLUTION_LEVEL));
|
||||
if (level != _AOResolutionLevel) {
|
||||
_AOResolutionLevel = level;
|
||||
resizeAmbientOcclusionBuffers();
|
||||
}
|
||||
}
|
||||
|
||||
gpu::FramebufferPointer FramebufferCache::getOcclusionFramebuffer() {
|
||||
if (!_occlusionFramebuffer) {
|
||||
resizeAmbientOcclusionBuffers();
|
||||
}
|
||||
return _occlusionFramebuffer;
|
||||
}
|
||||
|
||||
gpu::TexturePointer FramebufferCache::getOcclusionTexture() {
|
||||
if (!_occlusionTexture) {
|
||||
resizeAmbientOcclusionBuffers();
|
||||
}
|
||||
return _occlusionTexture;
|
||||
}
|
||||
|
||||
gpu::FramebufferPointer FramebufferCache::getOcclusionBlurredFramebuffer() {
|
||||
if (!_occlusionBlurredFramebuffer) {
|
||||
resizeAmbientOcclusionBuffers();
|
||||
}
|
||||
return _occlusionBlurredFramebuffer;
|
||||
}
|
||||
|
||||
gpu::TexturePointer FramebufferCache::getOcclusionBlurredTexture() {
|
||||
if (!_occlusionBlurredTexture) {
|
||||
resizeAmbientOcclusionBuffers();
|
||||
}
|
||||
return _occlusionBlurredTexture;
|
||||
}
|
|
@ -44,7 +44,16 @@ public:
|
|||
gpu::TexturePointer getDeferredNormalTexture();
|
||||
gpu::TexturePointer getDeferredSpecularTexture();
|
||||
|
||||
gpu::FramebufferPointer getDepthPyramidFramebuffer();
|
||||
gpu::TexturePointer getDepthPyramidTexture();
|
||||
|
||||
void setAmbientOcclusionResolutionLevel(int level);
|
||||
gpu::FramebufferPointer getOcclusionFramebuffer();
|
||||
gpu::TexturePointer getOcclusionTexture();
|
||||
gpu::FramebufferPointer getOcclusionBlurredFramebuffer();
|
||||
gpu::TexturePointer getOcclusionBlurredTexture();
|
||||
|
||||
|
||||
gpu::TexturePointer getLightingTexture();
|
||||
gpu::FramebufferPointer getLightingFramebuffer();
|
||||
|
||||
|
@ -83,7 +92,22 @@ private:
|
|||
|
||||
gpu::FramebufferPointer _selfieFramebuffer;
|
||||
|
||||
gpu::FramebufferPointer _depthPyramidFramebuffer;
|
||||
gpu::TexturePointer _depthPyramidTexture;
|
||||
|
||||
|
||||
gpu::FramebufferPointer _occlusionFramebuffer;
|
||||
gpu::TexturePointer _occlusionTexture;
|
||||
|
||||
gpu::FramebufferPointer _occlusionBlurredFramebuffer;
|
||||
gpu::TexturePointer _occlusionBlurredTexture;
|
||||
|
||||
QSize _frameBufferSize{ 100, 100 };
|
||||
int _AOResolutionLevel = 1; // AO perform at half res
|
||||
|
||||
// Resize/reallocate the buffers used for AO
|
||||
// the size of the AO buffers is scaled by the AOResolutionScale;
|
||||
void resizeAmbientOcclusionBuffers();
|
||||
};
|
||||
|
||||
#endif // hifi_FramebufferCache_h
|
||||
|
|
|
@ -64,11 +64,10 @@ void PrepareDeferred::run(const SceneContextPointer& sceneContext, const RenderC
|
|||
}
|
||||
|
||||
void RenderDeferred::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext) {
|
||||
DependencyManager::get<DeferredLightingEffect>()->render(renderContext->getArgs());
|
||||
DependencyManager::get<DeferredLightingEffect>()->render(renderContext);
|
||||
}
|
||||
|
||||
void ToneMappingDeferred::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext) {
|
||||
PerformanceTimer perfTimer("ToneMappingDeferred");
|
||||
_toneMappingEffect.render(renderContext->getArgs());
|
||||
}
|
||||
|
||||
|
@ -108,17 +107,17 @@ RenderDeferredTask::RenderDeferredTask(CullFunctor cullFunctor) : Task() {
|
|||
// Use Stencil and start drawing background in Lighting buffer
|
||||
addJob<DrawBackgroundDeferred>("DrawBackgroundDeferred");
|
||||
|
||||
// AO job
|
||||
addJob<AmbientOcclusionEffect>("AmbientOcclusion");
|
||||
_jobs.back().setEnabled(false);
|
||||
_occlusionJobIndex = (int)_jobs.size() - 1;
|
||||
|
||||
// Draw Lights just add the lights to the current list of lights to deal with. NOt really gpu job for now.
|
||||
addJob<DrawLight>("DrawLight", cullFunctor);
|
||||
|
||||
// DeferredBuffer is complete, now let's shade it into the LightingBuffer
|
||||
addJob<RenderDeferred>("RenderDeferred");
|
||||
|
||||
// AO job, to be revisited
|
||||
addJob<AmbientOcclusion>("AmbientOcclusion");
|
||||
_occlusionJobIndex = (int)_jobs.size() - 1;
|
||||
enableJob(_occlusionJobIndex, false);
|
||||
|
||||
// AA job to be revisited
|
||||
addJob<Antialiasing>("Antialiasing");
|
||||
_antialiasingJobIndex = (int)_jobs.size() - 1;
|
||||
|
@ -173,15 +172,36 @@ void RenderDeferredTask::run(const SceneContextPointer& sceneContext, const Rend
|
|||
setDrawHitEffect(renderContext->getDrawHitEffect());
|
||||
// TODO: turn on/off AO through menu item
|
||||
setOcclusionStatus(renderContext->getOcclusionStatus());
|
||||
|
||||
if (_occlusionJobIndex >= 0) {
|
||||
_jobs[_occlusionJobIndex].edit<AmbientOcclusionEffect>().setResolutionLevel(renderContext->getAmbientOcclusion().resolutionLevel);
|
||||
_jobs[_occlusionJobIndex].edit<AmbientOcclusionEffect>().setRadius(renderContext->getAmbientOcclusion().radius);
|
||||
_jobs[_occlusionJobIndex].edit<AmbientOcclusionEffect>().setLevel(renderContext->getAmbientOcclusion().level);
|
||||
_jobs[_occlusionJobIndex].edit<AmbientOcclusionEffect>().setNumSamples(renderContext->getAmbientOcclusion().numSamples);
|
||||
_jobs[_occlusionJobIndex].edit<AmbientOcclusionEffect>().setNumSpiralTurns(renderContext->getAmbientOcclusion().numSpiralTurns);
|
||||
_jobs[_occlusionJobIndex].edit<AmbientOcclusionEffect>().setDithering(renderContext->getAmbientOcclusion().ditheringEnabled);
|
||||
_jobs[_occlusionJobIndex].edit<AmbientOcclusionEffect>().setFalloffBias(renderContext->getAmbientOcclusion().falloffBias);
|
||||
_jobs[_occlusionJobIndex].edit<AmbientOcclusionEffect>().setEdgeSharpness(renderContext->getAmbientOcclusion().edgeSharpness);
|
||||
_jobs[_occlusionJobIndex].edit<AmbientOcclusionEffect>().setBlurRadius(renderContext->getAmbientOcclusion().blurRadius);
|
||||
_jobs[_occlusionJobIndex].edit<AmbientOcclusionEffect>().setBlurDeviation(renderContext->getAmbientOcclusion().blurDeviation);
|
||||
}
|
||||
|
||||
setAntialiasingStatus(renderContext->getFxaaStatus());
|
||||
setToneMappingExposure(renderContext->getTone().exposure);
|
||||
setToneMappingToneCurve(renderContext->getTone().toneCurve);
|
||||
// TODO: Allow runtime manipulation of culling ShouldRenderFunctor
|
||||
|
||||
renderContext->getArgs()->_context->syncCache();
|
||||
|
||||
for (auto job : _jobs) {
|
||||
job.run(sceneContext, renderContext);
|
||||
}
|
||||
|
||||
if (_occlusionJobIndex >= 0 && renderContext->getOcclusionStatus()) {
|
||||
renderContext->getAmbientOcclusion().gpuTime = _jobs[_occlusionJobIndex].edit<AmbientOcclusionEffect>().getGPUTime();
|
||||
} else {
|
||||
renderContext->getAmbientOcclusion().gpuTime = 0.0;
|
||||
}
|
||||
};
|
||||
|
||||
void DrawOpaqueDeferred::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const ItemIDsBounds& inItems) {
|
||||
|
|
|
@ -42,11 +42,15 @@ QString RenderScripting::Tone::getCurve() const {
|
|||
|
||||
render::RenderContext RenderScriptingInterface::getRenderContext() {
|
||||
render::RenderContext::ItemsConfig items{ *_opaque, *_transparent, *_overlay3D };
|
||||
return render::RenderContext{ items, *_tone, _drawStatus, _drawHitEffect, _deferredDebugSize, _deferredDebugMode };
|
||||
return render::RenderContext{ items, *_tone, *_ambientOcclusion, _drawStatus, _drawHitEffect, _deferredDebugSize, _deferredDebugMode };
|
||||
}
|
||||
|
||||
void RenderScriptingInterface::setItemCounts(const render::RenderContext::ItemsConfig& items) {
|
||||
_opaque->setCounts(items.opaque);
|
||||
_transparent->setCounts(items.transparent);
|
||||
_overlay3D->setCounts(items.overlay3D);
|
||||
}
|
||||
|
||||
void RenderScriptingInterface::setJobGPUTimes(double aoTime) {
|
||||
_ambientOcclusion->gpuTime = aoTime;
|
||||
}
|
|
@ -65,6 +65,24 @@ namespace RenderScripting {
|
|||
void setCurve(const QString& curve);
|
||||
};
|
||||
using TonePointer = std::unique_ptr<Tone>;
|
||||
|
||||
class AmbientOcclusion : public QObject, public render::RenderContext::AmbientOcclusion {
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
Q_PROPERTY(int resolutionLevel MEMBER resolutionLevel)
|
||||
Q_PROPERTY(float radius MEMBER radius)
|
||||
Q_PROPERTY(float level MEMBER level)
|
||||
Q_PROPERTY(int numSamples MEMBER numSamples)
|
||||
Q_PROPERTY(float numSpiralTurns MEMBER numSpiralTurns)
|
||||
Q_PROPERTY(bool ditheringEnabled MEMBER ditheringEnabled)
|
||||
Q_PROPERTY(float falloffBias MEMBER falloffBias)
|
||||
Q_PROPERTY(float edgeSharpness MEMBER edgeSharpness)
|
||||
Q_PROPERTY(int blurRadius MEMBER blurRadius)
|
||||
Q_PROPERTY(float blurDeviation MEMBER blurDeviation)
|
||||
Q_PROPERTY(double gpuTime MEMBER gpuTime)
|
||||
};
|
||||
using AmbientOcclusionPointer = std::unique_ptr<AmbientOcclusion>;
|
||||
};
|
||||
|
||||
class RenderScriptingInterface : public QObject, public Dependency {
|
||||
|
@ -77,7 +95,8 @@ class RenderScriptingInterface : public QObject, public Dependency {
|
|||
Q_PROPERTY(RenderScripting::ItemCounter* overlay3D READ getOverlay3D)
|
||||
|
||||
Q_PROPERTY(RenderScripting::Tone* tone READ getTone)
|
||||
|
||||
Q_PROPERTY(RenderScripting::AmbientOcclusion* ambientOcclusion READ getAmbientOcclusion)
|
||||
|
||||
Q_PROPERTY(int displayItemStatus MEMBER _drawStatus)
|
||||
Q_PROPERTY(bool displayHitEffect MEMBER _drawHitEffect)
|
||||
|
||||
|
@ -87,6 +106,9 @@ class RenderScriptingInterface : public QObject, public Dependency {
|
|||
render::RenderContext getRenderContext();
|
||||
void setItemCounts(const render::RenderContext::ItemsConfig& items);
|
||||
|
||||
// FIXME: It is ugly, we need a cleaner solution
|
||||
void setJobGPUTimes(double aoTime);
|
||||
|
||||
protected:
|
||||
RenderScriptingInterface();
|
||||
~RenderScriptingInterface() {};
|
||||
|
@ -96,12 +118,15 @@ protected:
|
|||
RenderScripting::ItemCounter* getOverlay3D() const { return _overlay3D.get(); }
|
||||
|
||||
RenderScripting::Tone* getTone() const { return _tone.get(); }
|
||||
RenderScripting::AmbientOcclusion* getAmbientOcclusion() const { return _ambientOcclusion.get(); }
|
||||
|
||||
RenderScripting::ItemStatePointer _opaque = RenderScripting::ItemStatePointer{new RenderScripting::ItemState{}};
|
||||
RenderScripting::ItemStatePointer _transparent = RenderScripting::ItemStatePointer{new RenderScripting::ItemState{}};
|
||||
RenderScripting::ItemCounterPointer _overlay3D = RenderScripting::ItemCounterPointer{new RenderScripting::ItemCounter{}};
|
||||
|
||||
RenderScripting::TonePointer _tone = RenderScripting::TonePointer{ new RenderScripting::Tone{} };
|
||||
|
||||
RenderScripting::AmbientOcclusionPointer _ambientOcclusion = RenderScripting::AmbientOcclusionPointer{ new RenderScripting::AmbientOcclusion{} };
|
||||
|
||||
// Options
|
||||
int _drawStatus = 0;
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
|
||||
#include <ViewFrustum.h>
|
||||
|
||||
#include "render/Context.h"
|
||||
#include "DeferredLightingEffect.h"
|
||||
#include "FramebufferCache.h"
|
||||
|
||||
|
@ -118,7 +119,7 @@ RenderShadowTask::RenderShadowTask(CullFunctor cullFunctor) : Task() {
|
|||
addJob<RenderShadowMap>("RenderShadowMap", shadowShapes, shapePlumber);
|
||||
}
|
||||
|
||||
void RenderShadowTask::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext) {
|
||||
void RenderShadowTask::run(const SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext) {
|
||||
assert(sceneContext);
|
||||
RenderArgs* args = renderContext->getArgs();
|
||||
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
|
||||
#include "FramebufferCache.h"
|
||||
|
||||
const int ToneMappingEffect_ParamsSlot = 0;
|
||||
const int ToneMappingEffect_LightingMapSlot = 0;
|
||||
|
||||
ToneMappingEffect::ToneMappingEffect() {
|
||||
Parameters parameters;
|
||||
|
@ -91,7 +93,8 @@ void ToneMappingEffect::init() {
|
|||
auto blitProgram = gpu::ShaderPointer(gpu::Shader::createProgram(blitVS, blitPS));
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("toneMappingParamsBuffer"), 3));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("toneMappingParamsBuffer"), ToneMappingEffect_ParamsSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("colorMap"), ToneMappingEffect_LightingMapSlot));
|
||||
gpu::Shader::makeProgram(*blitProgram, slotBindings);
|
||||
auto blitState = std::make_shared<gpu::State>();
|
||||
blitState->setColorWriteMask(true, true, true, true);
|
||||
|
@ -138,8 +141,8 @@ void ToneMappingEffect::render(RenderArgs* args) {
|
|||
batch.setModelTransform(model);
|
||||
}
|
||||
|
||||
batch.setUniformBuffer(3, _parametersBuffer);
|
||||
batch.setResourceTexture(0, lightingBuffer);
|
||||
batch.setUniformBuffer(ToneMappingEffect_ParamsSlot, _parametersBuffer);
|
||||
batch.setResourceTexture(ToneMappingEffect_LightingMapSlot, lightingBuffer);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
});
|
||||
}
|
|
@ -1,279 +0,0 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// ambient_occlusion.frag
|
||||
// fragment shader
|
||||
//
|
||||
// Created by Niraj Venkat on 7/15/15.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include DeferredBufferWrite.slh@>
|
||||
|
||||
<@include gpu/Transform.slh@>
|
||||
|
||||
<$declareStandardTransform()$>
|
||||
|
||||
// Based on NVidia HBAO implementation in D3D11
|
||||
// http://www.nvidia.co.uk/object/siggraph-2008-HBAO.html
|
||||
|
||||
in vec2 varTexcoord;
|
||||
|
||||
uniform sampler2D depthTexture;
|
||||
uniform sampler2D normalTexture;
|
||||
|
||||
uniform float g_scale;
|
||||
uniform float g_bias;
|
||||
uniform float g_sample_rad;
|
||||
uniform float g_intensity;
|
||||
|
||||
// the distance to the near clip plane
|
||||
uniform float near;
|
||||
|
||||
// scale factor for depth: (far - near) / far
|
||||
uniform float depthScale;
|
||||
|
||||
// offset for depth texture coordinates
|
||||
uniform vec2 depthTexCoordOffset;
|
||||
|
||||
// scale for depth texture coordinates
|
||||
uniform vec2 depthTexCoordScale;
|
||||
|
||||
// the resolution of the occlusion buffer
|
||||
// and its inverse
|
||||
uniform vec2 renderTargetRes;
|
||||
uniform vec2 renderTargetResInv;
|
||||
|
||||
|
||||
|
||||
const float PI = 3.14159265;
|
||||
|
||||
const float AOStrength = 1.9;
|
||||
|
||||
|
||||
// TODO: R (radius) should be exposed as a uniform parameter
|
||||
const float R = 0.01;
|
||||
const float R2 = 0.01*0.01;
|
||||
const float NegInvR2 = - 1.0 / (0.01*0.01);
|
||||
|
||||
|
||||
|
||||
// can't use tan to initialize a const value
|
||||
const float TanBias = 0.57735027; // tan(30.0 * PI / 180.0);
|
||||
const float MaxRadiusPixels = 50.0;
|
||||
|
||||
const int NumDirections = 6;
|
||||
const int NumSamples = 4;
|
||||
|
||||
out vec4 outFragColor;
|
||||
|
||||
/**
|
||||
* Gets the normal in view space from a normal texture.
|
||||
* uv: the uv texture coordinates to look up in the texture at.
|
||||
*/
|
||||
vec3 GetViewNormalFromTexture(vec2 uv) {
|
||||
// convert [0,1] -> [-1,1], note: since we're normalizing
|
||||
// we don't need to do v*2 - 1.0, we can just do a v-0.5
|
||||
return normalize(texture(normalTexture, uv).xyz - 0.5);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the linearized depth in view space.
|
||||
* d: the depth value [0-1], usually from a depth texture to convert.
|
||||
*/
|
||||
float ViewSpaceZFromDepth(float d){
|
||||
return near / (d * depthScale - 1.0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a uv coordinate and depth value into a 3D view space coordinate.
|
||||
* uv: the uv coordinates to convert
|
||||
* z: the view space depth of the uv coordinate.
|
||||
*/
|
||||
vec3 UVToViewSpace(vec2 uv, float z){
|
||||
return vec3((depthTexCoordOffset + varTexcoord * depthTexCoordScale) * z, z);
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a uv coordinate into a 3D view space coordinate.
|
||||
* The depth of the uv coord is determined from the depth texture.
|
||||
* uv: the uv coordinates to convert
|
||||
*/
|
||||
vec3 GetViewPos(vec2 uv) {
|
||||
float z = ViewSpaceZFromDepth(texture(depthTexture, uv).r);
|
||||
return UVToViewSpace(uv, z);
|
||||
}
|
||||
|
||||
|
||||
float TanToSin(float x) {
|
||||
return x * inversesqrt(x*x + 1.0);
|
||||
}
|
||||
|
||||
float InvLength(vec2 V) {
|
||||
return inversesqrt(dot(V, V));
|
||||
}
|
||||
|
||||
float Tangent(vec3 V) {
|
||||
return V.z * InvLength(V.xy);
|
||||
}
|
||||
|
||||
float BiasedTangent(vec3 V) {
|
||||
return V.z * InvLength(V.xy) + TanBias;
|
||||
}
|
||||
|
||||
float Tangent(vec3 P, vec3 S) {
|
||||
return -(P.z - S.z) * InvLength(S.xy - P.xy);
|
||||
}
|
||||
|
||||
float Length2(vec3 V) {
|
||||
return dot(V, V);
|
||||
}
|
||||
|
||||
vec3 MinDiff(vec3 P, vec3 Pr, vec3 Pl) {
|
||||
vec3 V1 = Pr - P;
|
||||
vec3 V2 = P - Pl;
|
||||
return (Length2(V1) < Length2(V2)) ? V1 : V2;
|
||||
}
|
||||
|
||||
vec2 SnapUVOffset(vec2 uv) {
|
||||
return round(uv * renderTargetRes) * renderTargetResInv;
|
||||
}
|
||||
|
||||
float Falloff(float d2) {
|
||||
return d2 * NegInvR2 + 1.0f;
|
||||
}
|
||||
|
||||
float HorizonOcclusion(vec2 deltaUV, vec3 P, vec3 dPdu, vec3 dPdv, float randstep, float numSamples) {
|
||||
float ao = 0;
|
||||
|
||||
// Offset the first coord with some noise
|
||||
vec2 uv = varTexcoord + SnapUVOffset(randstep*deltaUV);
|
||||
deltaUV = SnapUVOffset(deltaUV);
|
||||
|
||||
// Calculate the tangent vector
|
||||
vec3 T = deltaUV.x * dPdu + deltaUV.y * dPdv;
|
||||
|
||||
// Get the angle of the tangent vector from the viewspace axis
|
||||
float tanH = BiasedTangent(T);
|
||||
float sinH = TanToSin(tanH);
|
||||
|
||||
float tanS;
|
||||
float d2;
|
||||
vec3 S;
|
||||
|
||||
// Sample to find the maximum angle
|
||||
for (float s = 1; s <= numSamples; ++s) {
|
||||
uv += deltaUV;
|
||||
S = GetViewPos(uv);
|
||||
tanS = Tangent(P, S);
|
||||
d2 = Length2(S - P);
|
||||
|
||||
// Is the sample within the radius and the angle greater?
|
||||
if (d2 < R2 && tanS > tanH) {
|
||||
float sinS = TanToSin(tanS);
|
||||
// Apply falloff based on the distance
|
||||
ao += Falloff(d2) * (sinS - sinH);
|
||||
|
||||
tanH = tanS;
|
||||
sinH = sinS;
|
||||
}
|
||||
}
|
||||
return ao;
|
||||
}
|
||||
|
||||
vec2 RotateDirections(vec2 Dir, vec2 CosSin) {
|
||||
return vec2(Dir.x*CosSin.x - Dir.y*CosSin.y,
|
||||
Dir.x*CosSin.y + Dir.y*CosSin.x);
|
||||
}
|
||||
|
||||
void ComputeSteps(inout vec2 stepSizeUv, inout float numSteps, float rayRadiusPix, float rand) {
|
||||
// Avoid oversampling if numSteps is greater than the kernel radius in pixels
|
||||
numSteps = min(NumSamples, rayRadiusPix);
|
||||
|
||||
// Divide by Ns+1 so that the farthest samples are not fully attenuated
|
||||
float stepSizePix = rayRadiusPix / (numSteps + 1);
|
||||
|
||||
// Clamp numSteps if it is greater than the max kernel footprint
|
||||
float maxNumSteps = MaxRadiusPixels / stepSizePix;
|
||||
if (maxNumSteps < numSteps) {
|
||||
// Use dithering to avoid AO discontinuities
|
||||
numSteps = floor(maxNumSteps + rand);
|
||||
numSteps = max(numSteps, 1);
|
||||
stepSizePix = MaxRadiusPixels / numSteps;
|
||||
}
|
||||
|
||||
// Step size in uv space
|
||||
stepSizeUv = stepSizePix * renderTargetResInv;
|
||||
}
|
||||
|
||||
float getRandom(vec2 uv) {
|
||||
return fract(sin(dot(uv.xy ,vec2(12.9898,78.233))) * 43758.5453);
|
||||
}
|
||||
|
||||
void main(void) {
|
||||
mat4 projMatrix = getTransformCamera()._projection;
|
||||
|
||||
float numDirections = NumDirections;
|
||||
|
||||
vec3 P, Pr, Pl, Pt, Pb;
|
||||
P = GetViewPos(varTexcoord);
|
||||
|
||||
// Sample neighboring pixels
|
||||
Pr = GetViewPos(varTexcoord + vec2( renderTargetResInv.x, 0));
|
||||
Pl = GetViewPos(varTexcoord + vec2(-renderTargetResInv.x, 0));
|
||||
Pt = GetViewPos(varTexcoord + vec2( 0, renderTargetResInv.y));
|
||||
Pb = GetViewPos(varTexcoord + vec2( 0,-renderTargetResInv.y));
|
||||
|
||||
// Calculate tangent basis vectors using the minimum difference
|
||||
vec3 dPdu = MinDiff(P, Pr, Pl);
|
||||
vec3 dPdv = MinDiff(P, Pt, Pb) * (renderTargetRes.y * renderTargetResInv.x);
|
||||
|
||||
// Get the random samples from the noise function
|
||||
vec3 random = vec3(getRandom(varTexcoord.xy), getRandom(varTexcoord.yx), getRandom(varTexcoord.xx));
|
||||
|
||||
// Calculate the projected size of the hemisphere
|
||||
float w = P.z * projMatrix[2][3] + projMatrix[3][3];
|
||||
vec2 rayRadiusUV = (0.5 * R * vec2(projMatrix[0][0], projMatrix[1][1]) / w); // [-1,1] -> [0,1] uv
|
||||
float rayRadiusPix = rayRadiusUV.x * renderTargetRes.x;
|
||||
|
||||
float ao = 1.0;
|
||||
|
||||
// Make sure the radius of the evaluated hemisphere is more than a pixel
|
||||
if(rayRadiusPix > 1.0) {
|
||||
ao = 0.0;
|
||||
float numSteps;
|
||||
vec2 stepSizeUV;
|
||||
|
||||
// Compute the number of steps
|
||||
ComputeSteps(stepSizeUV, numSteps, rayRadiusPix, random.z);
|
||||
|
||||
float alpha = 2.0 * PI / numDirections;
|
||||
|
||||
// Calculate the horizon occlusion of each direction
|
||||
for(float d = 0; d < numDirections; ++d) {
|
||||
float theta = alpha * d;
|
||||
|
||||
// Apply noise to the direction
|
||||
vec2 dir = RotateDirections(vec2(cos(theta), sin(theta)), random.xy);
|
||||
vec2 deltaUV = dir * stepSizeUV;
|
||||
|
||||
// Sample the pixels along the direction
|
||||
ao += HorizonOcclusion( deltaUV,
|
||||
P,
|
||||
dPdu,
|
||||
dPdv,
|
||||
random.z,
|
||||
numSteps);
|
||||
}
|
||||
|
||||
// Average the results and produce the final AO
|
||||
ao = 1.0 - ao / numDirections * AOStrength;
|
||||
}
|
||||
|
||||
|
||||
outFragColor = vec4(vec3(ao), 1.0);
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// ambient_occlusion.vert
|
||||
// vertex shader
|
||||
//
|
||||
// Created by Niraj Venkat on 7/15/15.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include gpu/Inputs.slh@>
|
||||
|
||||
<@include gpu/Transform.slh@>
|
||||
|
||||
<$declareStandardTransform()$>
|
||||
|
||||
out vec2 varTexcoord;
|
||||
|
||||
void main(void) {
|
||||
varTexcoord = inTexCoord0.xy;
|
||||
gl_Position = inPosition;
|
||||
}
|
|
@ -14,6 +14,10 @@
|
|||
|
||||
<@include DeferredBuffer.slh@>
|
||||
|
||||
uniform sampler2D pyramidMap;
|
||||
uniform sampler2D occlusionMap;
|
||||
uniform sampler2D occlusionBlurredMap;
|
||||
|
||||
in vec2 uv;
|
||||
out vec4 outFragColor;
|
||||
|
||||
|
|
|
@ -25,12 +25,13 @@ void main(void) {
|
|||
DeferredTransform deferredTransform = getDeferredTransform();
|
||||
DeferredFragment frag = unpackDeferredFragment(deferredTransform, _texCoord0);
|
||||
|
||||
float shadowAttenuation = 1.0;
|
||||
float shadowAttenuation = 1.0;
|
||||
|
||||
if ((frag.normalVal.a >= 0.45) && (frag.normalVal.a <= 0.55)) {
|
||||
if (frag.mode == LIGHT_MAPPED) {
|
||||
vec3 color = evalLightmappedColor(
|
||||
deferredTransform.viewInverse,
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
frag.specularVal.xyz);
|
||||
|
@ -39,6 +40,7 @@ void main(void) {
|
|||
vec3 color = evalAmbientSphereGlobalColor(
|
||||
deferredTransform.viewInverse,
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.position.xyz,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
|
|
|
@ -27,12 +27,13 @@ void main(void) {
|
|||
DeferredFragment frag = unpackDeferredFragment(deferredTransform, _texCoord0);
|
||||
|
||||
vec4 worldPos = deferredTransform.viewInverse * vec4(frag.position.xyz, 1.0);
|
||||
float shadowAttenuation = evalShadowAttenuation(worldPos);
|
||||
float shadowAttenuation = evalShadowAttenuation(worldPos);
|
||||
|
||||
if ((frag.normalVal.a >= 0.45) && (frag.normalVal.a <= 0.55)) {
|
||||
if (frag.mode == LIGHT_MAPPED) {
|
||||
vec3 color = evalLightmappedColor(
|
||||
deferredTransform.viewInverse,
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
frag.specularVal.xyz);
|
||||
|
@ -41,6 +42,7 @@ void main(void) {
|
|||
vec3 color = evalAmbientSphereGlobalColor(
|
||||
deferredTransform.viewInverse,
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.position.xyz,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
|
|
|
@ -25,13 +25,14 @@ void main(void) {
|
|||
DeferredTransform deferredTransform = getDeferredTransform();
|
||||
DeferredFragment frag = unpackDeferredFragment(deferredTransform, _texCoord0);
|
||||
|
||||
float shadowAttenuation = 1.0;
|
||||
float shadowAttenuation = 1.0;
|
||||
|
||||
// Light mapped or not ?
|
||||
if ((frag.normalVal.a >= 0.45) && (frag.normalVal.a <= 0.55)) {
|
||||
if (frag.mode == LIGHT_MAPPED) {
|
||||
vec3 color = evalLightmappedColor(
|
||||
deferredTransform.viewInverse,
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
frag.specularVal.xyz);
|
||||
|
@ -39,7 +40,8 @@ void main(void) {
|
|||
} else {
|
||||
vec3 color = evalAmbientGlobalColor(
|
||||
deferredTransform.viewInverse,
|
||||
shadowAttenuation,
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.position.xyz,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
|
|
|
@ -27,13 +27,14 @@ void main(void) {
|
|||
DeferredFragment frag = unpackDeferredFragment(deferredTransform, _texCoord0);
|
||||
|
||||
vec4 worldPos = deferredTransform.viewInverse * vec4(frag.position.xyz, 1.0);
|
||||
float shadowAttenuation = evalShadowAttenuation(worldPos);
|
||||
float shadowAttenuation = evalShadowAttenuation(worldPos);
|
||||
|
||||
// Light mapped or not ?
|
||||
if ((frag.normalVal.a >= 0.45) && (frag.normalVal.a <= 0.55)) {
|
||||
if (frag.mode == LIGHT_MAPPED) {
|
||||
vec3 color = evalLightmappedColor(
|
||||
deferredTransform.viewInverse,
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
frag.specularVal.xyz);
|
||||
|
@ -41,7 +42,8 @@ void main(void) {
|
|||
} else {
|
||||
vec3 color = evalAmbientGlobalColor(
|
||||
deferredTransform.viewInverse,
|
||||
shadowAttenuation,
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.position.xyz,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
|
|
|
@ -25,13 +25,14 @@ void main(void) {
|
|||
DeferredTransform deferredTransform = getDeferredTransform();
|
||||
DeferredFragment frag = unpackDeferredFragment(deferredTransform, _texCoord0);
|
||||
|
||||
float shadowAttenuation = 1.0;
|
||||
float shadowAttenuation = 1.0;
|
||||
|
||||
// Light mapped or not ?
|
||||
if ((frag.normalVal.a >= 0.45) && (frag.normalVal.a <= 0.55)) {
|
||||
if (frag.mode == LIGHT_MAPPED) {
|
||||
vec3 color = evalLightmappedColor(
|
||||
deferredTransform.viewInverse,
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
frag.specularVal.xyz);
|
||||
|
@ -40,6 +41,7 @@ void main(void) {
|
|||
vec3 color = evalSkyboxGlobalColor(
|
||||
deferredTransform.viewInverse,
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.position.xyz,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
|
|
|
@ -27,13 +27,14 @@ void main(void) {
|
|||
DeferredFragment frag = unpackDeferredFragment(deferredTransform, _texCoord0);
|
||||
|
||||
vec4 worldPos = deferredTransform.viewInverse * vec4(frag.position.xyz, 1.0);
|
||||
float shadowAttenuation = evalShadowAttenuation(worldPos);
|
||||
float shadowAttenuation = evalShadowAttenuation(worldPos);
|
||||
|
||||
// Light mapped or not ?
|
||||
if ((frag.normalVal.a >= 0.45) && (frag.normalVal.a <= 0.55)) {
|
||||
if (frag.mode == LIGHT_MAPPED) {
|
||||
vec3 color = evalLightmappedColor(
|
||||
deferredTransform.viewInverse,
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
frag.specularVal.xyz);
|
||||
|
@ -42,6 +43,7 @@ void main(void) {
|
|||
vec3 color = evalSkyboxGlobalColor(
|
||||
deferredTransform.viewInverse,
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.position.xyz,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
|
|
|
@ -1,43 +0,0 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// gaussian_blur.frag
|
||||
// fragment shader
|
||||
//
|
||||
// Created by Niraj Venkat on 7/17/15.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include DeferredBufferWrite.slh@>
|
||||
|
||||
// the interpolated normal
|
||||
//in vec4 interpolatedNormal;
|
||||
|
||||
in vec2 varTexcoord;
|
||||
in vec2 varBlurTexcoords[14];
|
||||
|
||||
uniform sampler2D occlusionTexture;
|
||||
|
||||
out vec4 outFragColor;
|
||||
void main(void) {
|
||||
outFragColor = vec4(0.0);
|
||||
outFragColor += texture(occlusionTexture, varBlurTexcoords[0])*0.0044299121055113265;
|
||||
outFragColor += texture(occlusionTexture, varBlurTexcoords[1])*0.00895781211794;
|
||||
outFragColor += texture(occlusionTexture, varBlurTexcoords[2])*0.0215963866053;
|
||||
outFragColor += texture(occlusionTexture, varBlurTexcoords[3])*0.0443683338718;
|
||||
outFragColor += texture(occlusionTexture, varBlurTexcoords[4])*0.0776744219933;
|
||||
outFragColor += texture(occlusionTexture, varBlurTexcoords[5])*0.115876621105;
|
||||
outFragColor += texture(occlusionTexture, varBlurTexcoords[6])*0.147308056121;
|
||||
outFragColor += texture(occlusionTexture, varTexcoord)*0.159576912161;
|
||||
outFragColor += texture(occlusionTexture, varBlurTexcoords[7])*0.147308056121;
|
||||
outFragColor += texture(occlusionTexture, varBlurTexcoords[8])*0.115876621105;
|
||||
outFragColor += texture(occlusionTexture, varBlurTexcoords[9])*0.0776744219933;
|
||||
outFragColor += texture(occlusionTexture, varBlurTexcoords[10])*0.0443683338718;
|
||||
outFragColor += texture(occlusionTexture, varBlurTexcoords[11])*0.0215963866053;
|
||||
outFragColor += texture(occlusionTexture, varBlurTexcoords[12])*0.00895781211794;
|
||||
outFragColor += texture(occlusionTexture, varBlurTexcoords[13])*0.0044299121055113265;
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// guassian_blur_horizontal.vert
|
||||
// vertex shader
|
||||
//
|
||||
// Created by Niraj Venkat on 7/17/15.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include gpu/Inputs.slh@>
|
||||
|
||||
<@include gpu/Transform.slh@>
|
||||
|
||||
<$declareStandardTransform()$>
|
||||
|
||||
out vec2 varTexcoord;
|
||||
out vec2 varBlurTexcoords[14];
|
||||
|
||||
void main(void) {
|
||||
varTexcoord = inTexCoord0.xy;
|
||||
gl_Position = inPosition;
|
||||
|
||||
varBlurTexcoords[0] = varTexcoord + vec2(-0.028, 0.0);
|
||||
varBlurTexcoords[1] = varTexcoord + vec2(-0.024, 0.0);
|
||||
varBlurTexcoords[2] = varTexcoord + vec2(-0.020, 0.0);
|
||||
varBlurTexcoords[3] = varTexcoord + vec2(-0.016, 0.0);
|
||||
varBlurTexcoords[4] = varTexcoord + vec2(-0.012, 0.0);
|
||||
varBlurTexcoords[5] = varTexcoord + vec2(-0.008, 0.0);
|
||||
varBlurTexcoords[6] = varTexcoord + vec2(-0.004, 0.0);
|
||||
varBlurTexcoords[7] = varTexcoord + vec2(0.004, 0.0);
|
||||
varBlurTexcoords[8] = varTexcoord + vec2(0.008, 0.0);
|
||||
varBlurTexcoords[9] = varTexcoord + vec2(0.012, 0.0);
|
||||
varBlurTexcoords[10] = varTexcoord + vec2(0.016, 0.0);
|
||||
varBlurTexcoords[11] = varTexcoord + vec2(0.020, 0.0);
|
||||
varBlurTexcoords[12] = varTexcoord + vec2(0.024, 0.0);
|
||||
varBlurTexcoords[13] = varTexcoord + vec2(0.028, 0.0);
|
||||
}
|
||||
|
|
@ -1,43 +0,0 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// guassian_blur_vertical.vert
|
||||
// vertex shader
|
||||
//
|
||||
// Created by Niraj Venkat on 7/17/15.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include gpu/Inputs.slh@>
|
||||
|
||||
<@include gpu/Transform.slh@>
|
||||
|
||||
<$declareStandardTransform()$>
|
||||
|
||||
out vec2 varTexcoord;
|
||||
out vec2 varBlurTexcoords[14];
|
||||
|
||||
void main(void) {
|
||||
varTexcoord = inTexCoord0.xy;
|
||||
gl_Position = inPosition;
|
||||
|
||||
varBlurTexcoords[0] = varTexcoord + vec2(0.0, -0.028);
|
||||
varBlurTexcoords[1] = varTexcoord + vec2(0.0, -0.024);
|
||||
varBlurTexcoords[2] = varTexcoord + vec2(0.0, -0.020);
|
||||
varBlurTexcoords[3] = varTexcoord + vec2(0.0, -0.016);
|
||||
varBlurTexcoords[4] = varTexcoord + vec2(0.0, -0.012);
|
||||
varBlurTexcoords[5] = varTexcoord + vec2(0.0, -0.008);
|
||||
varBlurTexcoords[6] = varTexcoord + vec2(0.0, -0.004);
|
||||
varBlurTexcoords[7] = varTexcoord + vec2(0.0, 0.004);
|
||||
varBlurTexcoords[8] = varTexcoord + vec2(0.0, 0.008);
|
||||
varBlurTexcoords[9] = varTexcoord + vec2(0.0, 0.012);
|
||||
varBlurTexcoords[10] = varTexcoord + vec2(0.0, 0.016);
|
||||
varBlurTexcoords[11] = varTexcoord + vec2(0.0, 0.020);
|
||||
varBlurTexcoords[12] = varTexcoord + vec2(0.0, 0.024);
|
||||
varBlurTexcoords[13] = varTexcoord + vec2(0.0, 0.028);
|
||||
}
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// occlusion_blend.frag
|
||||
// fragment shader
|
||||
//
|
||||
// Created by Niraj Venkat on 7/20/15.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include DeferredBufferWrite.slh@>
|
||||
|
||||
in vec2 varTexcoord;
|
||||
out vec4 outFragColor;
|
||||
|
||||
uniform sampler2D blurredOcclusionTexture;
|
||||
|
||||
void main(void) {
|
||||
vec4 occlusionColor = texture(blurredOcclusionTexture, varTexcoord);
|
||||
|
||||
outFragColor = vec4(vec3(0.0), occlusionColor.r);
|
||||
|
||||
}
|
|
@ -67,7 +67,7 @@ void main(void) {
|
|||
|
||||
// Final Lighting color
|
||||
vec3 fragColor = (shading.w * frag.diffuse + shading.xyz);
|
||||
_fragColor = vec4(fragColor * radialAttenuation * getLightColor(light) * getLightIntensity(light), 0.0);
|
||||
_fragColor = vec4(fragColor * radialAttenuation * getLightColor(light) * getLightIntensity(light) * frag.obscurance, 0.0);
|
||||
|
||||
if (getLightShowContour(light) > 0.0) {
|
||||
// Show edge
|
||||
|
|
|
@ -74,7 +74,7 @@ void main(void) {
|
|||
|
||||
// Final Lighting color
|
||||
vec3 fragColor = (shading.w * frag.diffuse + shading.xyz);
|
||||
_fragColor = vec4(fragColor * angularAttenuation * radialAttenuation * getLightColor(light) * getLightIntensity(light), 0.0);
|
||||
_fragColor = vec4(fragColor * angularAttenuation * radialAttenuation * getLightColor(light) * getLightIntensity(light) * frag.obscurance, 0.0);
|
||||
|
||||
if (getLightShowContour(light) > 0.0) {
|
||||
// Show edges
|
||||
|
|
243
libraries/render-utils/src/ssao.slh
Normal file
243
libraries/render-utils/src/ssao.slh
Normal file
|
@ -0,0 +1,243 @@
|
|||
<!
|
||||
// AmbientOcclusion.slh
|
||||
// libraries/render-utils/src
|
||||
//
|
||||
// Created by Sam Gateau on 1/1/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
!>
|
||||
<@if not SSAO_SLH@>
|
||||
<@def SSAO_SLH@>
|
||||
|
||||
<@func declarePackOcclusionDepth()@>
|
||||
|
||||
const float FAR_PLANE_Z = -300.0;
|
||||
|
||||
float CSZToDephtKey(float z) {
|
||||
return clamp(z * (1.0 / FAR_PLANE_Z), 0.0, 1.0);
|
||||
}
|
||||
vec3 packOcclusionDepth(float occlusion, float depth) {
|
||||
// Round to the nearest 1/256.0
|
||||
float temp = floor(depth * 256.0);
|
||||
return vec3(occlusion, temp * (1.0 / 256.0), depth * 256.0 - temp);
|
||||
}
|
||||
vec2 unpackOcclusionDepth(vec3 raw) {
|
||||
float z = raw.y * (256.0 / 257.0) + raw.z * (1.0 / 257.0);
|
||||
return vec2(raw.x, z);
|
||||
}
|
||||
<@endfunc@>
|
||||
|
||||
<@func declareAmbientOcclusion()@>
|
||||
|
||||
struct AmbientOcclusionFrameTransform {
|
||||
vec4 _pixelInfo;
|
||||
vec4 _depthInfo;
|
||||
vec4 _stereoInfo;
|
||||
mat4 _projection[2];
|
||||
};
|
||||
|
||||
struct AmbientOcclusionParams {
|
||||
vec4 _resolutionInfo;
|
||||
vec4 _radiusInfo;
|
||||
vec4 _ditheringInfo;
|
||||
vec4 _sampleInfo;
|
||||
vec4 _blurInfo;
|
||||
float _gaussianCoefs[8];
|
||||
};
|
||||
|
||||
uniform ambientOcclusionFrameTransformBuffer {
|
||||
AmbientOcclusionFrameTransform frameTransform;
|
||||
};
|
||||
uniform ambientOcclusionParamsBuffer {
|
||||
AmbientOcclusionParams params;
|
||||
};
|
||||
|
||||
|
||||
int getResolutionLevel() {
|
||||
return int(params._resolutionInfo.x);
|
||||
}
|
||||
|
||||
vec2 getWidthHeight() {
|
||||
return vec2(ivec2(frameTransform._pixelInfo.zw) >> getResolutionLevel());
|
||||
}
|
||||
float getProjScale() {
|
||||
return getWidthHeight().y * frameTransform._projection[0][1][1] * 0.5;
|
||||
}
|
||||
mat4 getProjection(int side) {
|
||||
return frameTransform._projection[side];
|
||||
}
|
||||
|
||||
bool isStereo() {
|
||||
return frameTransform._stereoInfo.x > 0.0f;
|
||||
}
|
||||
|
||||
float getStereoSideWidth() {
|
||||
return float(int(frameTransform._stereoInfo.y) >> getResolutionLevel());
|
||||
}
|
||||
|
||||
ivec3 getStereoSideInfo(int xPos) {
|
||||
int sideWidth = int(getStereoSideWidth());
|
||||
return ivec3(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth);
|
||||
}
|
||||
|
||||
|
||||
float evalZeyeFromZdb(float depth) {
|
||||
return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);
|
||||
}
|
||||
|
||||
vec3 evalEyeNormal(vec3 C) {
|
||||
//return normalize(cross(dFdy(C), dFdx(C)));
|
||||
return normalize(cross(dFdx(C), dFdy(C)));
|
||||
}
|
||||
|
||||
|
||||
float getRadius() {
|
||||
return params._radiusInfo.x;
|
||||
}
|
||||
float getRadius2() {
|
||||
return params._radiusInfo.y;
|
||||
}
|
||||
float getInvRadius6() {
|
||||
return params._radiusInfo.z;
|
||||
}
|
||||
float getObscuranceScaling() {
|
||||
return params._radiusInfo.z * params._radiusInfo.w;
|
||||
}
|
||||
|
||||
float isDitheringEnabled() {
|
||||
return params._ditheringInfo.x;
|
||||
}
|
||||
float getFrameDithering() {
|
||||
return params._ditheringInfo.y;
|
||||
}
|
||||
float isBorderingEnabled() {
|
||||
return params._ditheringInfo.w;
|
||||
}
|
||||
|
||||
float getFalloffBias() {
|
||||
return params._ditheringInfo.z;
|
||||
}
|
||||
|
||||
float getNumSamples() {
|
||||
return params._sampleInfo.x;
|
||||
}
|
||||
float getInvNumSamples() {
|
||||
return params._sampleInfo.y;
|
||||
}
|
||||
float getNumSpiralTurns() {
|
||||
return params._sampleInfo.z;
|
||||
}
|
||||
|
||||
float getBlurEdgeSharpness() {
|
||||
return params._blurInfo.x;
|
||||
}
|
||||
|
||||
#ifdef CONSTANT_GAUSSIAN
|
||||
const int BLUR_RADIUS = 4;
|
||||
const float gaussian[BLUR_RADIUS + 1] =
|
||||
// KEEP this dead code for eventual performance improvment
|
||||
// float[](0.356642, 0.239400, 0.072410, 0.009869);
|
||||
// float[](0.398943, 0.241971, 0.053991, 0.004432, 0.000134); // stddev = 1.0
|
||||
float[](0.153170, 0.144893, 0.122649, 0.092902, 0.062970); // stddev = 2.0
|
||||
//float[](0.197413, 0.17467, 0.12098,0.065591,0.040059);
|
||||
// float[](0.111220, 0.107798, 0.098151, 0.083953, 0.067458, 0.050920, 0.036108); // stddev = 3.0
|
||||
|
||||
int getBlurRadius() {
|
||||
return BLUR_RADIUS;
|
||||
return int(params._blurInfo.y);
|
||||
}
|
||||
|
||||
float getBlurCoef(int c) {
|
||||
return gaussian[c];
|
||||
return params._gaussianCoefs[c];
|
||||
}
|
||||
#else
|
||||
int getBlurRadius() {
|
||||
return int(params._blurInfo.y);
|
||||
}
|
||||
|
||||
float getBlurCoef(int c) {
|
||||
return params._gaussianCoefs[c];
|
||||
}
|
||||
#endif
|
||||
|
||||
<@endfunc@>
|
||||
|
||||
<@func declareBlurPass(axis)@>
|
||||
|
||||
<$declarePackOcclusionDepth()$>
|
||||
<$declareAmbientOcclusion()$>
|
||||
|
||||
// the source occlusion texture
|
||||
uniform sampler2D occlusionMap;
|
||||
|
||||
vec2 fetchOcclusionDepthRaw(ivec2 coords, out vec3 raw) {
|
||||
raw = texelFetch(occlusionMap, coords, 0).xyz;
|
||||
return unpackOcclusionDepth(raw);
|
||||
}
|
||||
|
||||
vec2 fetchOcclusionDepth(ivec2 coords) {
|
||||
return unpackOcclusionDepth(texelFetch(occlusionMap, coords, 0).xyz);
|
||||
}
|
||||
|
||||
const int RADIUS_SCALE = 2;
|
||||
const float BLUR_WEIGHT_OFFSET = 0.3;
|
||||
const float BLUR_EDGE_SCALE = 2000.0;
|
||||
|
||||
vec2 evalTapWeightedValue(ivec3 side, int r, ivec2 ssC, float key) {
|
||||
ivec2 tapOffset = <$axis$> * (r * RADIUS_SCALE);
|
||||
ivec2 ssP = (ssC + tapOffset);
|
||||
|
||||
if ((ssP.x < side.y || ssP.x >= side.z + side.y) || (ssP.y < 0 || ssP.y >= int(getWidthHeight().y))) {
|
||||
return vec2(0.0);
|
||||
}
|
||||
vec2 tapOZ = fetchOcclusionDepth(ssC + tapOffset);
|
||||
|
||||
// spatial domain: offset gaussian tap
|
||||
float weight = BLUR_WEIGHT_OFFSET + getBlurCoef(abs(r));
|
||||
|
||||
// range domain (the "bilateral" weight). As depth difference increases, decrease weight.
|
||||
weight *= max(0.0, 1.0 - (getBlurEdgeSharpness() * BLUR_EDGE_SCALE) * abs(tapOZ.y - key));
|
||||
|
||||
return vec2(tapOZ.x * weight, weight);
|
||||
}
|
||||
|
||||
vec3 getBlurredOcclusion(vec2 coord) {
|
||||
ivec2 ssC = ivec2(coord);
|
||||
|
||||
// Stereo side info
|
||||
ivec3 side = getStereoSideInfo(ssC.x);
|
||||
|
||||
vec3 rawSample;
|
||||
vec2 occlusionDepth = fetchOcclusionDepthRaw(ssC, rawSample);
|
||||
float key = occlusionDepth.y;
|
||||
|
||||
// Central pixel contribution
|
||||
float mainWeight = getBlurCoef(0);
|
||||
vec2 weightedSums = vec2(occlusionDepth.x * mainWeight, mainWeight);
|
||||
|
||||
// Accumulate weighted contributions along the bluring axis in the [-radius, radius] range
|
||||
int blurRadius = getBlurRadius();
|
||||
// negative side first
|
||||
for (int r = -blurRadius; r <= -1; ++r) {
|
||||
weightedSums += evalTapWeightedValue(side, r, ssC, key);
|
||||
}
|
||||
// then positive side
|
||||
for (int r = 1; r <= blurRadius; ++r) {
|
||||
weightedSums += evalTapWeightedValue(side, r, ssC, key);
|
||||
}
|
||||
|
||||
// Final normalization
|
||||
const float epsilon = 0.0001;
|
||||
float result = weightedSums.x / (weightedSums.y + epsilon);
|
||||
|
||||
rawSample.x = result;
|
||||
return rawSample;
|
||||
}
|
||||
|
||||
<@endfunc@>
|
||||
|
||||
|
||||
<@endif@>
|
23
libraries/render-utils/src/ssao_makeHorizontalBlur.slf
Normal file
23
libraries/render-utils/src/ssao_makeHorizontalBlur.slf
Normal file
|
@ -0,0 +1,23 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// Created by Sam Gateau on 1/1/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include ssao.slh@>
|
||||
|
||||
const ivec2 horizontal = ivec2(1,0);
|
||||
<$declareBlurPass(horizontal)$>
|
||||
|
||||
|
||||
in vec2 varTexCoord0;
|
||||
out vec4 outFragColor;
|
||||
|
||||
void main(void) {
|
||||
outFragColor = vec4(getBlurredOcclusion(gl_FragCoord.xy), 1.0);
|
||||
}
|
161
libraries/render-utils/src/ssao_makeOcclusion.slf
Normal file
161
libraries/render-utils/src/ssao_makeOcclusion.slf
Normal file
|
@ -0,0 +1,161 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// Created by Sam Gateau on 1/1/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include ssao.slh@>
|
||||
<$declareAmbientOcclusion()$>
|
||||
<$declarePackOcclusionDepth()$>
|
||||
|
||||
|
||||
const int LOG_MAX_OFFSET = 3;
|
||||
const int MAX_MIP_LEVEL = 5;
|
||||
|
||||
// the depth pyramid texture
|
||||
uniform sampler2D pyramidMap;
|
||||
|
||||
float getZEye(ivec2 pixel) {
|
||||
return -texelFetch(pyramidMap, pixel, getResolutionLevel()).x;
|
||||
}
|
||||
|
||||
vec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {
|
||||
// compute the view space position using the depth
|
||||
// basically manually pick the proj matrix components to do the inverse
|
||||
float Xe = (-Zeye * (texcoord.x * 2.0 - 1.0) - Zeye * frameTransform._projection[side][2][0] - frameTransform._projection[side][3][0]) / frameTransform._projection[side][0][0];
|
||||
float Ye = (-Zeye * (texcoord.y * 2.0 - 1.0) - Zeye * frameTransform._projection[side][2][1] - frameTransform._projection[side][3][1]) / frameTransform._projection[side][1][1];
|
||||
return vec3(Xe, Ye, Zeye);
|
||||
}
|
||||
|
||||
out vec4 outFragColor;
|
||||
|
||||
uniform sampler2D normalMap;
|
||||
|
||||
float getAngleDithering(in ivec2 pixelPos) {
|
||||
// Hash function used in the AlchemyAO paper
|
||||
return isDitheringEnabled() * (3 * pixelPos.x ^ pixelPos.y + pixelPos.x * pixelPos.y) * 10 + getFrameDithering();
|
||||
}
|
||||
|
||||
const float TWO_PI = 6.28;
|
||||
|
||||
vec2 tapLocation(int sampleNumber, float spinAngle, out float ssR){
|
||||
// Radius relative to ssR
|
||||
float alpha = float(sampleNumber + 0.5) * getInvNumSamples();
|
||||
float angle = alpha * (getNumSpiralTurns() * TWO_PI) + spinAngle;
|
||||
|
||||
ssR = alpha;
|
||||
return vec2(cos(angle), sin(angle));
|
||||
}
|
||||
|
||||
vec3 getOffsetPosition(ivec3 side, ivec2 ssC, vec2 unitOffset, float ssR) {
|
||||
// Derivation:
|
||||
// mipLevel = floor(log(ssR / MAX_OFFSET));
|
||||
int mipLevel = clamp(findMSB(int(ssR)) - LOG_MAX_OFFSET, 0, MAX_MIP_LEVEL);
|
||||
|
||||
ivec2 ssOffset = ivec2(ssR * unitOffset);
|
||||
ivec2 ssP = ssOffset + ssC;
|
||||
if (bool(isBorderingEnabled())) {
|
||||
ssP.x = ((ssP.x < 0 || ssP.x >= side.z) ? ssC.x - ssOffset.x : ssP.x);
|
||||
ssP.y = ((ssP.y < 0 || ssP.y >= int(getWidthHeight().y)) ? ssC.y - ssOffset.y : ssP.y);
|
||||
}
|
||||
|
||||
ivec2 ssPFull = ivec2(ssP.x + side.y, ssP.y);
|
||||
|
||||
vec3 P;
|
||||
|
||||
// We need to divide by 2^mipLevel to read the appropriately scaled coordinate from a MIP-map.
|
||||
// Manually clamp to the texture size because texelFetch bypasses the texture unit
|
||||
ivec2 mipP = clamp(ssPFull >> mipLevel, ivec2(0), textureSize(pyramidMap, getResolutionLevel() + mipLevel) - ivec2(1));
|
||||
P.z = -texelFetch(pyramidMap, mipP, getResolutionLevel() + mipLevel).r;
|
||||
|
||||
// Offset to pixel center
|
||||
vec2 tapUV = (vec2(ssP) + vec2(0.5)) / float(side.z);
|
||||
P = evalEyePositionFromZeye(side.x, P.z, tapUV);
|
||||
return P;
|
||||
}
|
||||
|
||||
float sampleAO(in ivec3 side, in ivec2 ssC, in vec3 C, in vec3 n_C, in float ssDiskRadius, in int tapIndex, in float randomPatternRotationAngle) {
|
||||
// Offset on the unit disk, spun for this pixel
|
||||
float ssR;
|
||||
vec2 unitOffset = tapLocation(tapIndex, randomPatternRotationAngle, ssR);
|
||||
ssR *= ssDiskRadius;
|
||||
|
||||
|
||||
|
||||
// The occluding point in camera space
|
||||
vec3 Q = getOffsetPosition(side, ssC, unitOffset, ssR);
|
||||
|
||||
vec3 v = Q - C;
|
||||
float vv = dot(v, v);
|
||||
float vn = dot(v, n_C);
|
||||
|
||||
// Fall off function as recommended in SAO paper
|
||||
const float epsilon = 0.01;
|
||||
float f = max(getRadius2() - vv, 0.0);
|
||||
return f * f * f * max((vn - getFalloffBias()) / (epsilon + vv), 0.0);
|
||||
}
|
||||
|
||||
void main(void) {
|
||||
// Pixel being shaded
|
||||
ivec2 ssC = ivec2(gl_FragCoord.xy);
|
||||
|
||||
// Fetch the z under the pixel (stereo or not)
|
||||
float Zeye = getZEye(ssC);
|
||||
|
||||
// Stereo side info
|
||||
ivec3 side = getStereoSideInfo(ssC.x);
|
||||
|
||||
// From now on, ssC is the pixel pos in the side
|
||||
ssC.x -= side.y;
|
||||
vec2 fragPos = (vec2(ssC) + 0.5) / getStereoSideWidth();
|
||||
|
||||
// The position and normal of the pixel fragment in Eye space
|
||||
vec3 Cp = evalEyePositionFromZeye(side.x, Zeye, fragPos);
|
||||
vec3 Cn = evalEyeNormal(Cp);
|
||||
|
||||
// Choose the screen-space sample radius
|
||||
// proportional to the projected area of the sphere
|
||||
float ssDiskRadius = -getProjScale() * getRadius() / Cp.z;
|
||||
|
||||
// Let's make noise
|
||||
float randomPatternRotationAngle = getAngleDithering(ssC);
|
||||
|
||||
// Accumulate the Obscurance for each samples
|
||||
float sum = 0.0;
|
||||
for (int i = 0; i < getNumSamples(); ++i) {
|
||||
sum += sampleAO(side, ssC, Cp, Cn, ssDiskRadius, i, randomPatternRotationAngle);
|
||||
}
|
||||
|
||||
float A = max(0.0, 1.0 - sum * getObscuranceScaling() * 5.0 * getInvNumSamples());
|
||||
|
||||
<! // KEEP IT for Debugging
|
||||
// Bilateral box-filter over a quad for free, respecting depth edges
|
||||
// (the difference that this makes is subtle)
|
||||
if (abs(dFdx(Cp.z)) < 0.02) {
|
||||
A -= dFdx(A) * ((ssC.x & 1) - 0.5);
|
||||
}
|
||||
if (abs(dFdy(Cp.z)) < 0.02) {
|
||||
A -= dFdy(A) * ((ssC.y & 1) - 0.5);
|
||||
}
|
||||
!>
|
||||
|
||||
outFragColor = vec4(packOcclusionDepth(A, CSZToDephtKey(Cp.z)), 1.0);
|
||||
|
||||
<! // KEEP IT for Debugging
|
||||
// Debug Normal: outFragColor = vec4((Cn + vec3(1.0))* 0.5, 1.0);
|
||||
// Debug Radius outFragColor = vec4(vec3(ssDiskRadius / 100.0), 1.0);
|
||||
// Debug MaxMiplevel outFragColor = vec4(1.0 - vec3(float(clamp(findMSB(int(ssDiskRadius)) - LOG_MAX_OFFSET, 0, MAX_MIP_LEVEL))/ float(MAX_MIP_LEVEL)), 1.0);
|
||||
// Debug OffsetPosition
|
||||
float ssR;
|
||||
vec2 unitOffset = tapLocation(int(getNumSamples() - 1), 0, ssR);
|
||||
vec3 Q = getOffsetPosition(side, ssC, unitOffset, ssR * ssDiskRadius);
|
||||
//outFragColor = vec4(vec3(Q.x / 10.0, Q.y / 2.0, -Q.z/ 3.0), 1.0);
|
||||
vec3 v = normalize(Q - Cp);
|
||||
outFragColor = vec4((v + vec3(1.0))* 0.5, 1.0);
|
||||
!>
|
||||
}
|
23
libraries/render-utils/src/ssao_makePyramid.slf
Normal file
23
libraries/render-utils/src/ssao_makePyramid.slf
Normal file
|
@ -0,0 +1,23 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// Created by Sam Gateau on 1/1/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include ssao.slh@>
|
||||
<$declareAmbientOcclusion()$>
|
||||
|
||||
uniform sampler2D depthMap;
|
||||
|
||||
out vec4 outFragColor;
|
||||
|
||||
void main(void) {
|
||||
float Zdb = texelFetch(depthMap, ivec2(gl_FragCoord.xy), 0).x;
|
||||
float Zeye = -evalZeyeFromZdb(Zdb);
|
||||
outFragColor = vec4(Zeye, 0.0, 0.0, 1.0);
|
||||
}
|
22
libraries/render-utils/src/ssao_makeVerticalBlur.slf
Normal file
22
libraries/render-utils/src/ssao_makeVerticalBlur.slf
Normal file
|
@ -0,0 +1,22 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// Created by Sam Gateau on 1/1/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
<@include ssao.slh@>
|
||||
|
||||
const ivec2 vertical = ivec2(0,1);
|
||||
<$declareBlurPass(vertical)$>
|
||||
|
||||
in vec2 varTexCoord0;
|
||||
out vec4 outFragColor;
|
||||
|
||||
void main(void) {
|
||||
float occlusion = getBlurredOcclusion(gl_FragCoord.xy).x;
|
||||
outFragColor = vec4(occlusion, 0.0, 0.0, occlusion);
|
||||
}
|
|
@ -13,13 +13,11 @@
|
|||
|
||||
using namespace render;
|
||||
|
||||
RenderContext::RenderContext(ItemsConfig items, Tone tone, int drawStatus, bool drawHitEffect, glm::vec4 deferredDebugSize, int deferredDebugMode)
|
||||
RenderContext::RenderContext(ItemsConfig items, Tone tone, AmbientOcclusion ao, int drawStatus, bool drawHitEffect, glm::vec4 deferredDebugSize, int deferredDebugMode)
|
||||
: _deferredDebugMode{ deferredDebugMode }, _deferredDebugSize{ deferredDebugSize },
|
||||
_args{ nullptr },
|
||||
_drawStatus{ drawStatus }, _drawHitEffect{ drawHitEffect },
|
||||
_items{ items }, _tone{ tone }
|
||||
{
|
||||
}
|
||||
_items{ items }, _tone{ tone }, _ambientOcclusion{ ao } {}
|
||||
|
||||
void RenderContext::setOptions(bool occlusion, bool fxaa, bool showOwned) {
|
||||
_occlusionStatus = occlusion;
|
||||
|
|
|
@ -72,13 +72,30 @@ public:
|
|||
float exposure = 0.0;
|
||||
};
|
||||
|
||||
RenderContext(ItemsConfig items, Tone tone, int drawStatus, bool drawHitEffect, glm::vec4 deferredDebugSize, int deferredDebugMode);
|
||||
RenderContext() {}
|
||||
class AmbientOcclusion {
|
||||
public:
|
||||
int resolutionLevel { 1 };
|
||||
float radius { 0.5f }; // radius in meters of the AO effect
|
||||
float level { 0.5f }; // Level of the obscrance value
|
||||
int numSamples { 11 }; // Num Samples per pixel
|
||||
float numSpiralTurns { 7.0f };
|
||||
bool ditheringEnabled { true };
|
||||
float falloffBias { 0.01f };
|
||||
float edgeSharpness { 1.0f };
|
||||
int blurRadius { 4 };
|
||||
float blurDeviation { 2.5f};
|
||||
|
||||
double gpuTime { 0.0 };
|
||||
};
|
||||
|
||||
RenderContext(ItemsConfig items, Tone tone, AmbientOcclusion ao, int drawStatus, bool drawHitEffect, glm::vec4 deferredDebugSize, int deferredDebugMode);
|
||||
RenderContext() {};
|
||||
|
||||
void setArgs(RenderArgs* args) { _args = args; }
|
||||
RenderArgs* getArgs() { return _args; }
|
||||
ItemsConfig& getItemsConfig() { return _items; }
|
||||
Tone& getTone() { return _tone; }
|
||||
AmbientOcclusion& getAmbientOcclusion() { return _ambientOcclusion; }
|
||||
int getDrawStatus() { return _drawStatus; }
|
||||
bool getDrawHitEffect() { return _drawHitEffect; }
|
||||
bool getOcclusionStatus() { return _occlusionStatus; }
|
||||
|
@ -96,10 +113,11 @@ protected:
|
|||
int _drawStatus; // bitflag
|
||||
bool _drawHitEffect;
|
||||
bool _occlusionStatus { false };
|
||||
bool _fxaaStatus = { false };
|
||||
bool _fxaaStatus { false };
|
||||
|
||||
ItemsConfig _items;
|
||||
Tone _tone;
|
||||
AmbientOcclusion _ambientOcclusion;
|
||||
};
|
||||
typedef std::shared_ptr<RenderContext> RenderContextPointer;
|
||||
|
||||
|
|
|
@ -71,12 +71,6 @@
|
|||
#include "textured_particle_frag.h"
|
||||
#include "textured_particle_vert.h"
|
||||
|
||||
#include "ambient_occlusion_vert.h"
|
||||
#include "ambient_occlusion_frag.h"
|
||||
#include "gaussian_blur_vertical_vert.h"
|
||||
#include "gaussian_blur_horizontal_vert.h"
|
||||
#include "gaussian_blur_frag.h"
|
||||
#include "occlusion_blend_frag.h"
|
||||
|
||||
#include "hit_effect_vert.h"
|
||||
#include "hit_effect_frag.h"
|
||||
|
@ -205,12 +199,12 @@ void QTestWindow::draw() {
|
|||
testShaderBuild(model_shadow_vert, model_shadow_frag);
|
||||
testShaderBuild(untextured_particle_vert, untextured_particle_frag);
|
||||
testShaderBuild(textured_particle_vert, textured_particle_frag);
|
||||
|
||||
/* FIXME: Bring back the ssao shader tests
|
||||
testShaderBuild(gaussian_blur_vertical_vert, gaussian_blur_frag);
|
||||
testShaderBuild(gaussian_blur_horizontal_vert, gaussian_blur_frag);
|
||||
testShaderBuild(ambient_occlusion_vert, ambient_occlusion_frag);
|
||||
testShaderBuild(ambient_occlusion_vert, occlusion_blend_frag);
|
||||
|
||||
*/
|
||||
testShaderBuild(hit_effect_vert, hit_effect_frag);
|
||||
|
||||
testShaderBuild(overlay3D_vert, overlay3D_frag);
|
||||
|
|
Loading…
Reference in a new issue