mirror of
https://github.com/Armored-Dragon/overte.git
synced 2025-03-11 16:13:16 +01:00
Merge pull request #8211 from highfidelity/skin
Skin sub-surface lighting
This commit is contained in:
commit
592a50356b
134 changed files with 7200 additions and 1424 deletions
|
@ -305,6 +305,8 @@ public:
|
|||
// Don't actually crash in debug builds, in case this apparent deadlock is simply from
|
||||
// the developer actively debugging code
|
||||
#ifdef NDEBUG
|
||||
|
||||
|
||||
deadlockDetectionCrash();
|
||||
#endif
|
||||
}
|
||||
|
@ -1690,7 +1692,6 @@ void Application::paintGL() {
|
|||
auto inputs = AvatarInputs::getInstance();
|
||||
if (inputs->mirrorVisible()) {
|
||||
PerformanceTimer perfTimer("Mirror");
|
||||
auto primaryFbo = DependencyManager::get<FramebufferCache>()->getPrimaryFramebuffer();
|
||||
|
||||
renderArgs._renderMode = RenderArgs::MIRROR_RENDER_MODE;
|
||||
renderArgs._blitFramebuffer = DependencyManager::get<FramebufferCache>()->getSelfieFramebuffer();
|
||||
|
|
|
@ -41,5 +41,5 @@ void main(void) {
|
|||
vec3 yzDiffuseScaled = yzDiffuse.rgb * abs(worldNormal.x);
|
||||
vec4 diffuse = vec4(xyDiffuseScaled + xzDiffuseScaled + yzDiffuseScaled, 1.0);
|
||||
|
||||
packDeferredFragment(_normal, 1.0, vec3(diffuse), DEFAULT_ROUGHNESS, DEFAULT_METALLIC, DEFAULT_EMISSIVE, DEFAULT_OCCLUSION);
|
||||
packDeferredFragment(_normal, 1.0, vec3(diffuse), DEFAULT_ROUGHNESS, DEFAULT_METALLIC, DEFAULT_EMISSIVE, DEFAULT_OCCLUSION, DEFAULT_SCATTERING);
|
||||
}
|
||||
|
|
|
@ -462,7 +462,7 @@ FBXGeometry* FBXReader::extractFBXGeometry(const QVariantHash& mapping, const QS
|
|||
QVector<ExtractedBlendshape> blendshapes;
|
||||
|
||||
QHash<QString, FBXModel> models;
|
||||
QHash<QString, Cluster> clusters;
|
||||
QHash<QString, Cluster> clusters;
|
||||
QHash<QString, AnimationCurve> animationCurves;
|
||||
|
||||
QHash<QString, QString> typeFlags;
|
||||
|
@ -1366,7 +1366,7 @@ FBXGeometry* FBXReader::extractFBXGeometry(const QVariantHash& mapping, const QS
|
|||
geometry.meshExtents.reset();
|
||||
|
||||
// Create the Material Library
|
||||
consolidateFBXMaterials();
|
||||
consolidateFBXMaterials(mapping);
|
||||
geometry.materials = _fbxMaterials;
|
||||
|
||||
// see if any materials have texture children
|
||||
|
|
|
@ -167,6 +167,7 @@ public:
|
|||
FBXTexture metallicTexture;
|
||||
FBXTexture emissiveTexture;
|
||||
FBXTexture occlusionTexture;
|
||||
FBXTexture scatteringTexture;
|
||||
FBXTexture lightmapTexture;
|
||||
glm::vec2 lightmapParams{ 0.0f, 1.0f };
|
||||
|
||||
|
@ -443,7 +444,7 @@ public:
|
|||
|
||||
QHash<QString, FBXMaterial> _fbxMaterials;
|
||||
|
||||
void consolidateFBXMaterials();
|
||||
void consolidateFBXMaterials(const QVariantHash& mapping);
|
||||
|
||||
bool _loadLightmaps = true;
|
||||
float _lightmapOffset = 0.0f;
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
#include <QtDebug>
|
||||
#include <QtEndian>
|
||||
#include <QFileInfo>
|
||||
#include <QJsonDocument>
|
||||
#include <QJsonObject>
|
||||
#include "FBXReader.h"
|
||||
|
||||
#include <memory>
|
||||
|
@ -67,9 +69,13 @@ FBXTexture FBXReader::getTexture(const QString& textureID) {
|
|||
return texture;
|
||||
}
|
||||
|
||||
void FBXReader::consolidateFBXMaterials() {
|
||||
|
||||
// foreach (const QString& materialID, materials) {
|
||||
void FBXReader::consolidateFBXMaterials(const QVariantHash& mapping) {
|
||||
|
||||
QString materialMapString = mapping.value("materialMap").toString();
|
||||
QJsonDocument materialMapDocument = QJsonDocument::fromJson(materialMapString.toUtf8());
|
||||
QJsonObject materialMap = materialMapDocument.object();
|
||||
|
||||
// foreach (const QString& materialID, materials) {
|
||||
for (QHash<QString, FBXMaterial>::iterator it = _fbxMaterials.begin(); it != _fbxMaterials.end(); it++) {
|
||||
FBXMaterial& material = (*it);
|
||||
|
||||
|
@ -252,6 +258,24 @@ void FBXReader::consolidateFBXMaterials() {
|
|||
}
|
||||
}
|
||||
}
|
||||
qDebug() << " fbx material Name:" << material.name;
|
||||
|
||||
if (materialMap.contains(material.name)) {
|
||||
QJsonObject materialOptions = materialMap.value(material.name).toObject();
|
||||
qDebug() << "Mapping fbx material:" << material.name << " with HifiMaterial: " << materialOptions;
|
||||
|
||||
if (materialOptions.contains("scattering")) {
|
||||
float scattering = (float) materialOptions.value("scattering").toDouble();
|
||||
material._material->setScattering(scattering);
|
||||
}
|
||||
|
||||
if (materialOptions.contains("scatteringMap")) {
|
||||
QByteArray scatteringMap = materialOptions.value("scatteringMap").toVariant().toByteArray();
|
||||
material.scatteringTexture = FBXTexture();
|
||||
material.scatteringTexture.name = material.name + ".scatteringMap";
|
||||
material.scatteringTexture.filename = scatteringMap;
|
||||
}
|
||||
}
|
||||
|
||||
if (material.opacity <= 0.0f) {
|
||||
material._material->setOpacity(1.0f);
|
||||
|
|
|
@ -14,11 +14,16 @@
|
|||
#include <QObject>
|
||||
#include <QOpenGLDebugLogger>
|
||||
|
||||
void OpenGLDebug::log(const QOpenGLDebugMessage & debugMessage) {
|
||||
qDebug() << debugMessage;
|
||||
}
|
||||
|
||||
void setupDebugLogger(QObject* window) {
|
||||
QOpenGLDebugLogger* logger = new QOpenGLDebugLogger(window);
|
||||
logger->initialize(); // initializes in the current context, i.e. ctx
|
||||
logger->enableMessages();
|
||||
QObject::connect(logger, &QOpenGLDebugLogger::messageLogged, window, [&](const QOpenGLDebugMessage & debugMessage) {
|
||||
qDebug() << debugMessage;
|
||||
OpenGLDebug::log(debugMessage);
|
||||
|
||||
});
|
||||
}
|
|
@ -13,7 +13,13 @@
|
|||
#define hifi_QOpenGLDebugLoggerWrapper_h
|
||||
|
||||
class QObject;
|
||||
class QOpenGLDebugMessage;
|
||||
|
||||
void setupDebugLogger(QObject* window);
|
||||
|
||||
class OpenGLDebug {
|
||||
public:
|
||||
static void log(const QOpenGLDebugMessage & debugMessage);
|
||||
};
|
||||
|
||||
#endif // hifi_QOpenGLDebugLoggerWrapper_h
|
|
@ -14,11 +14,22 @@
|
|||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
// Eventually, we want to test with TIME_ELAPSED instead of TIMESTAMP
|
||||
#ifdef Q_OS_MAC
|
||||
static bool timeElapsed = true;
|
||||
#else
|
||||
static bool timeElapsed = false;
|
||||
#endif
|
||||
|
||||
void GLBackend::do_beginQuery(Batch& batch, size_t paramOffset) {
|
||||
auto query = batch._queries.get(batch._params[paramOffset]._uint);
|
||||
GLQuery* glquery = syncGPUObject(*query);
|
||||
if (glquery) {
|
||||
glBeginQuery(GL_TIME_ELAPSED, glquery->_qo);
|
||||
if (timeElapsed) {
|
||||
glBeginQuery(GL_TIME_ELAPSED, glquery->_endqo);
|
||||
} else {
|
||||
glQueryCounter(glquery->_beginqo, GL_TIMESTAMP);
|
||||
}
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
|
@ -27,7 +38,11 @@ void GLBackend::do_endQuery(Batch& batch, size_t paramOffset) {
|
|||
auto query = batch._queries.get(batch._params[paramOffset]._uint);
|
||||
GLQuery* glquery = syncGPUObject(*query);
|
||||
if (glquery) {
|
||||
glEndQuery(GL_TIME_ELAPSED);
|
||||
if (timeElapsed) {
|
||||
glEndQuery(GL_TIME_ELAPSED);
|
||||
} else {
|
||||
glQueryCounter(glquery->_endqo, GL_TIMESTAMP);
|
||||
}
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
|
@ -36,9 +51,16 @@ void GLBackend::do_getQuery(Batch& batch, size_t paramOffset) {
|
|||
auto query = batch._queries.get(batch._params[paramOffset]._uint);
|
||||
GLQuery* glquery = syncGPUObject(*query);
|
||||
if (glquery) {
|
||||
glGetQueryObjectui64v(glquery->_qo, GL_QUERY_RESULT_AVAILABLE, &glquery->_result);
|
||||
glGetQueryObjectui64v(glquery->_endqo, GL_QUERY_RESULT_AVAILABLE, &glquery->_result);
|
||||
if (glquery->_result == GL_TRUE) {
|
||||
glGetQueryObjectui64v(glquery->_qo, GL_QUERY_RESULT, &glquery->_result);
|
||||
if (timeElapsed) {
|
||||
glGetQueryObjectui64v(glquery->_endqo, GL_QUERY_RESULT, &glquery->_result);
|
||||
} else {
|
||||
GLuint64 start, end;
|
||||
glGetQueryObjectui64v(glquery->_beginqo, GL_QUERY_RESULT, &start);
|
||||
glGetQueryObjectui64v(glquery->_endqo, GL_QUERY_RESULT, &end);
|
||||
glquery->_result = end - start;
|
||||
}
|
||||
query->triggerReturnHandler(glquery->_result);
|
||||
}
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
|
|
@ -41,15 +41,21 @@ public:
|
|||
return 0;
|
||||
}
|
||||
|
||||
return object->_qo;
|
||||
return object->_endqo;
|
||||
}
|
||||
|
||||
const GLuint& _qo { _id };
|
||||
const GLuint& _endqo = { _id };
|
||||
const GLuint _beginqo = { 0 };
|
||||
GLuint64 _result { (GLuint64)-1 };
|
||||
|
||||
protected:
|
||||
GLQuery(const Query& query, GLuint id) : Parent(query, id) {}
|
||||
~GLQuery() { if (_id) { glDeleteQueries(1, &_id); } }
|
||||
GLQuery(const Query& query, GLuint endId, GLuint beginId) : Parent(query, endId), _beginqo(beginId){}
|
||||
~GLQuery() {
|
||||
if (_id) {
|
||||
GLuint ids[2] = { _endqo, _beginqo };
|
||||
glDeleteQueries(2, ids);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} }
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
|
||||
#include <GPUIdent.h>
|
||||
#include <NumericalConstants.h>
|
||||
#include <fstream>
|
||||
|
||||
Q_LOGGING_CATEGORY(gpugllogging, "hifi.gpu.gl")
|
||||
|
||||
|
@ -670,11 +671,14 @@ bool compileShader(GLenum shaderDomain, const std::string& shaderSource, const s
|
|||
|
||||
// if compilation fails
|
||||
if (!compiled) {
|
||||
|
||||
// save the source code to a temp file so we can debug easily
|
||||
/* std::ofstream filestream;
|
||||
/*
|
||||
std::ofstream filestream;
|
||||
filestream.open("debugshader.glsl");
|
||||
if (filestream.is_open()) {
|
||||
filestream << shaderSource->source;
|
||||
filestream << srcstr[0];
|
||||
filestream << srcstr[1];
|
||||
filestream.close();
|
||||
}
|
||||
*/
|
||||
|
|
|
@ -25,7 +25,7 @@ public:
|
|||
}
|
||||
|
||||
GL41Query(const Query& query)
|
||||
: Parent(query, allocateQuery()) { }
|
||||
: Parent(query, allocateQuery(), allocateQuery()) { }
|
||||
};
|
||||
|
||||
gl::GLQuery* GL41Backend::syncGPUObject(const Query& query) {
|
||||
|
|
|
@ -19,12 +19,12 @@ class GL45Query : public gpu::gl::GLQuery {
|
|||
public:
|
||||
static GLuint allocateQuery() {
|
||||
GLuint result;
|
||||
glCreateQueries(GL_TIME_ELAPSED, 1, &result);
|
||||
glCreateQueries(GL_TIMESTAMP, 1, &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
GL45Query(const Query& query)
|
||||
: Parent(query, allocateQuery()) { }
|
||||
: Parent(query, allocateQuery(), allocateQuery()){}
|
||||
};
|
||||
|
||||
gl::GLQuery* GL45Backend::syncGPUObject(const Query& query) {
|
||||
|
|
|
@ -91,6 +91,9 @@ const Backend::TransformCamera& Backend::TransformCamera::recomputeDerived(const
|
|||
Mat4 viewUntranslated = _view;
|
||||
viewUntranslated[3] = Vec4(0.0f, 0.0f, 0.0f, 1.0f);
|
||||
_projectionViewUntranslated = _projection * viewUntranslated;
|
||||
|
||||
_stereoInfo = Vec4(0.0f);
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
@ -104,7 +107,9 @@ Backend::TransformCamera Backend::TransformCamera::getEyeCamera(int eye, const S
|
|||
}
|
||||
result._projection = _stereo._eyeProjections[eye];
|
||||
result.recomputeDerived(offsetTransform);
|
||||
|
||||
|
||||
result._stereoInfo = Vec4(1.0f, (float) eye, 0.0f, 0.0f);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -104,6 +104,7 @@ public:
|
|||
Mat4 _projection;
|
||||
mutable Mat4 _projectionInverse;
|
||||
Vec4 _viewport; // Public value is int but float in the shader to stay in floats for all the transform computations.
|
||||
mutable Vec4 _stereoInfo;
|
||||
|
||||
const Backend::TransformCamera& recomputeDerived(const Transform& xformView) const;
|
||||
TransformCamera getEyeCamera(int eye, const StereoState& stereo, const Transform& xformView) const;
|
||||
|
|
|
@ -12,6 +12,7 @@ using namespace gpu;
|
|||
|
||||
const Element Element::COLOR_RGBA_32{ VEC4, NUINT8, RGBA };
|
||||
const Element Element::COLOR_SRGBA_32{ VEC4, NUINT8, SRGBA };
|
||||
const Element Element::COLOR_R11G11B10{ SCALAR, FLOAT, R11G11B10 };
|
||||
const Element Element::VEC4F_COLOR_RGBA{ VEC4, FLOAT, RGBA };
|
||||
const Element Element::VEC2F_UV{ VEC2, FLOAT, UV };
|
||||
const Element Element::VEC2F_XY{ VEC2, FLOAT, XY };
|
||||
|
|
|
@ -246,6 +246,7 @@ public:
|
|||
|
||||
static const Element COLOR_RGBA_32;
|
||||
static const Element COLOR_SRGBA_32;
|
||||
static const Element COLOR_R11G11B10;
|
||||
static const Element VEC4F_COLOR_RGBA;
|
||||
static const Element VEC2F_UV;
|
||||
static const Element VEC2F_XY;
|
||||
|
|
|
@ -13,6 +13,8 @@
|
|||
#include <math.h>
|
||||
#include <QDebug>
|
||||
|
||||
#include <Transform.h>
|
||||
|
||||
using namespace gpu;
|
||||
|
||||
Framebuffer::~Framebuffer() {
|
||||
|
@ -290,4 +292,29 @@ Format Framebuffer::getDepthStencilBufferFormat() const {
|
|||
} else {
|
||||
return _depthStencilBuffer._element;
|
||||
}
|
||||
}
|
||||
}
|
||||
glm::vec4 Framebuffer::evalSubregionTexcoordTransformCoefficients(const glm::ivec2& sourceSurface, const glm::ivec2& destRegionSize, const glm::ivec2& destRegionOffset) {
|
||||
float sMin = destRegionOffset.x / (float)sourceSurface.x;
|
||||
float sWidth = destRegionSize.x / (float)sourceSurface.x;
|
||||
float tMin = destRegionOffset.y / (float)sourceSurface.y;
|
||||
float tHeight = destRegionSize.y / (float)sourceSurface.y;
|
||||
return glm::vec4(sMin, tMin, sWidth, tHeight);
|
||||
}
|
||||
|
||||
glm::vec4 Framebuffer::evalSubregionTexcoordTransformCoefficients(const glm::ivec2& sourceSurface, const glm::ivec4& destViewport) {
|
||||
return evalSubregionTexcoordTransformCoefficients(sourceSurface, glm::ivec2(destViewport.z, destViewport.w), glm::ivec2(destViewport.x, destViewport.y));
|
||||
}
|
||||
|
||||
Transform Framebuffer::evalSubregionTexcoordTransform(const glm::ivec2& sourceSurface, const glm::ivec2& destRegionSize, const glm::ivec2& destRegionOffset) {
|
||||
float sMin = destRegionOffset.x / (float)sourceSurface.x;
|
||||
float sWidth = destRegionSize.x / (float)sourceSurface.x;
|
||||
float tMin = destRegionOffset.y / (float)sourceSurface.y;
|
||||
float tHeight = destRegionSize.y / (float)sourceSurface.y;
|
||||
Transform model;
|
||||
model.setTranslation(glm::vec3(sMin, tMin, 0.0));
|
||||
model.setScale(glm::vec3(sWidth, tHeight, 1.0));
|
||||
return model;
|
||||
}
|
||||
Transform Framebuffer::evalSubregionTexcoordTransform(const glm::ivec2& sourceSurface, const glm::ivec4& destViewport) {
|
||||
return evalSubregionTexcoordTransform(sourceSurface, glm::ivec2(destViewport.z, destViewport.w), glm::ivec2(destViewport.x, destViewport.y));
|
||||
}
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
#include "Texture.h"
|
||||
#include <memory>
|
||||
|
||||
class Transform; // Texcood transform util
|
||||
|
||||
namespace gpu {
|
||||
|
||||
typedef Element Format;
|
||||
|
@ -139,6 +141,12 @@ public:
|
|||
Stamp getDepthStamp() const { return _depthStamp; }
|
||||
const std::vector<Stamp>& getColorStamps() const { return _colorStamps; }
|
||||
|
||||
static glm::vec4 evalSubregionTexcoordTransformCoefficients(const glm::ivec2& sourceSurface, const glm::ivec2& destRegionSize, const glm::ivec2& destRegionOffset = glm::ivec2(0));
|
||||
static glm::vec4 evalSubregionTexcoordTransformCoefficients(const glm::ivec2& sourceSurface, const glm::ivec4& destViewport);
|
||||
|
||||
static Transform evalSubregionTexcoordTransform(const glm::ivec2& sourceSurface, const glm::ivec2& destRegionSize, const glm::ivec2& destRegionOffset = glm::ivec2(0));
|
||||
static Transform evalSubregionTexcoordTransform(const glm::ivec2& sourceSurface, const glm::ivec4& destViewport);
|
||||
|
||||
protected:
|
||||
SwapchainPointer _swapchain;
|
||||
|
||||
|
|
61
libraries/gpu/src/gpu/PackedNormal.slh
Normal file
61
libraries/gpu/src/gpu/PackedNormal.slh
Normal file
|
@ -0,0 +1,61 @@
|
|||
<!
|
||||
// PackedNormal.slh
|
||||
// libraries/gpu/src
|
||||
//
|
||||
// Created by Sam Gateau on 7/19/16.
|
||||
// Copyright 2013 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
!>
|
||||
<@if not PACKED_NORMAL_SLH@>
|
||||
<@def PACKED_NORMAL_SLH@>
|
||||
|
||||
vec2 signNotZero(vec2 v) {
|
||||
return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);
|
||||
}
|
||||
|
||||
vec2 float32x3_to_oct(in vec3 v) {
|
||||
vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));
|
||||
return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);
|
||||
}
|
||||
|
||||
|
||||
vec3 oct_to_float32x3(in vec2 e) {
|
||||
vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));
|
||||
if (v.z < 0) {
|
||||
v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);
|
||||
}
|
||||
return normalize(v);
|
||||
}
|
||||
|
||||
vec3 snorm12x2_to_unorm8x3(vec2 f) {
|
||||
vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));
|
||||
float t = floor(u.y / 256.0);
|
||||
|
||||
return floor(vec3(
|
||||
u.x / 16.0,
|
||||
fract(u.x / 16.0) * 256.0 + t,
|
||||
u.y - t * 256.0
|
||||
)) / 255.0;
|
||||
}
|
||||
|
||||
vec2 unorm8x3_to_snorm12x2(vec3 u) {
|
||||
u *= 255.0;
|
||||
u.y *= (1.0 / 16.0);
|
||||
vec2 s = vec2( u.x * 16.0 + floor(u.y),
|
||||
fract(u.y) * (16.0 * 256.0) + u.z);
|
||||
return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));
|
||||
}
|
||||
|
||||
|
||||
// Recommended function to pack/unpack vec3<float> normals to vec3<uint8> rgb with best efficiency
|
||||
vec3 packNormal(in vec3 n) {
|
||||
return snorm12x2_to_unorm8x3(float32x3_to_oct(n));
|
||||
}
|
||||
|
||||
vec3 unpackNormal(in vec3 p) {
|
||||
return oct_to_float32x3(unorm8x3_to_snorm12x2(p));
|
||||
}
|
||||
|
||||
<@endif@>
|
|
@ -25,7 +25,7 @@ Query::~Query()
|
|||
}
|
||||
|
||||
double Query::getElapsedTime() const {
|
||||
return ((double) _queryResult) * 0.000001;
|
||||
return ((double)_queryResult) / 1000000.0;
|
||||
}
|
||||
|
||||
void Query::triggerReturnHandler(uint64_t queryResult) {
|
||||
|
|
|
@ -66,6 +66,9 @@ namespace gpu {
|
|||
|
||||
int rangeIndex(int index) const { return (index % QUERY_QUEUE_SIZE); }
|
||||
};
|
||||
|
||||
using RangeTimerPointer = std::shared_ptr<RangeTimer>;
|
||||
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -18,6 +18,7 @@ struct TransformCamera {
|
|||
mat4 _projection;
|
||||
mat4 _projectionInverse;
|
||||
vec4 _viewport;
|
||||
vec4 _stereoInfo;
|
||||
};
|
||||
|
||||
layout(std140) uniform transformCameraBuffer {
|
||||
|
@ -31,6 +32,16 @@ TransformCamera getTransformCamera() {
|
|||
vec3 getEyeWorldPos() {
|
||||
return _camera._viewInverse[3].xyz;
|
||||
}
|
||||
|
||||
|
||||
bool cam_isStereo() {
|
||||
return _camera._stereoInfo.x > 0.0;
|
||||
}
|
||||
|
||||
float cam_getStereoSide() {
|
||||
return _camera._stereoInfo.y;
|
||||
}
|
||||
|
||||
<@endfunc@>
|
||||
|
||||
|
||||
|
|
|
@ -499,6 +499,11 @@ NetworkMaterial::NetworkMaterial(const FBXMaterial& material, const QUrl& textur
|
|||
setTextureMap(MapChannel::EMISSIVE_MAP, map);
|
||||
}
|
||||
|
||||
if (!material.scatteringTexture.filename.isEmpty()) {
|
||||
auto map = fetchTextureMap(textureBaseUrl, material.scatteringTexture, NetworkTexture::SCATTERING_TEXTURE, MapChannel::SCATTERING_MAP);
|
||||
setTextureMap(MapChannel::SCATTERING_MAP, map);
|
||||
}
|
||||
|
||||
if (!material.lightmapTexture.filename.isEmpty()) {
|
||||
auto map = fetchTextureMap(textureBaseUrl, material.lightmapTexture, NetworkTexture::LIGHTMAP_TEXTURE, MapChannel::LIGHTMAP_MAP);
|
||||
_lightmapTransform = material.lightmapTexture.transform;
|
||||
|
@ -519,6 +524,7 @@ void NetworkMaterial::setTextures(const QVariantMap& textureMap) {
|
|||
const auto& occlusionName = getTextureName(MapChannel::OCCLUSION_MAP);
|
||||
const auto& emissiveName = getTextureName(MapChannel::EMISSIVE_MAP);
|
||||
const auto& lightmapName = getTextureName(MapChannel::LIGHTMAP_MAP);
|
||||
const auto& scatteringName = getTextureName(MapChannel::SCATTERING_MAP);
|
||||
|
||||
if (!albedoName.isEmpty()) {
|
||||
auto url = textureMap.contains(albedoName) ? textureMap[albedoName].toUrl() : QUrl();
|
||||
|
@ -561,6 +567,12 @@ void NetworkMaterial::setTextures(const QVariantMap& textureMap) {
|
|||
setTextureMap(MapChannel::EMISSIVE_MAP, map);
|
||||
}
|
||||
|
||||
if (!scatteringName.isEmpty()) {
|
||||
auto url = textureMap.contains(scatteringName) ? textureMap[scatteringName].toUrl() : QUrl();
|
||||
auto map = fetchTextureMap(url, NetworkTexture::SCATTERING_TEXTURE, MapChannel::SCATTERING_MAP);
|
||||
setTextureMap(MapChannel::SCATTERING_MAP, map);
|
||||
}
|
||||
|
||||
if (!lightmapName.isEmpty()) {
|
||||
auto url = textureMap.contains(lightmapName) ? textureMap[lightmapName].toUrl() : QUrl();
|
||||
auto map = fetchTextureMap(url, NetworkTexture::LIGHTMAP_TEXTURE, MapChannel::LIGHTMAP_MAP);
|
||||
|
|
|
@ -51,6 +51,7 @@ public:
|
|||
EMISSIVE_TEXTURE,
|
||||
CUBE_TEXTURE,
|
||||
OCCLUSION_TEXTURE,
|
||||
SCATTERING_TEXTURE = OCCLUSION_TEXTURE,
|
||||
LIGHTMAP_TEXTURE,
|
||||
CUSTOM_TEXTURE
|
||||
};
|
||||
|
|
|
@ -126,28 +126,48 @@ float getLightAmbientMapNumMips(Light l) {
|
|||
}
|
||||
|
||||
|
||||
<@if GPU_FEATURE_PROFILE == GPU_CORE @>
|
||||
uniform lightBuffer {
|
||||
Light light;
|
||||
};
|
||||
Light getLight() {
|
||||
return light;
|
||||
}
|
||||
<@else@>
|
||||
uniform vec4 lightBuffer[7];
|
||||
Light getLight() {
|
||||
Light light;
|
||||
light._position = lightBuffer[0];
|
||||
light._direction = lightBuffer[1];
|
||||
light._color = lightBuffer[2];
|
||||
light._attenuation = lightBuffer[3];
|
||||
light._spot = lightBuffer[4];
|
||||
light._shadow = lightBuffer[5];
|
||||
light._control = lightBuffer[6];
|
||||
|
||||
return light;
|
||||
|
||||
|
||||
|
||||
bool clipFragToLightVolumePoint(Light light, vec3 fragPos, out vec4 fragLightVecLen2) {
|
||||
fragLightVecLen2.xyz = getLightPosition(light) - fragPos.xyz;
|
||||
fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);
|
||||
|
||||
// Kill if too far from the light center
|
||||
if (fragLightVecLen2.w > getLightCutoffSquareRadius(light)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool clipFragToLightVolumeSpot(Light light, vec3 fragPos, out vec4 fragLightVecLen2, out vec4 fragLightDirLen, out float cosSpotAngle) {
|
||||
fragLightVecLen2.xyz = getLightPosition(light) - fragPos.xyz;
|
||||
fragLightVecLen2.w = dot(fragLightVecLen2.xyz, fragLightVecLen2.xyz);
|
||||
|
||||
// Kill if too far from the light center
|
||||
if (fragLightVecLen2.w > getLightCutoffSquareRadius(light)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Allright we re valid in the volume
|
||||
fragLightDirLen.w = length(fragLightVecLen2.xyz);
|
||||
fragLightDirLen.xyz = fragLightVecLen2.xyz / fragLightDirLen.w;
|
||||
|
||||
// Kill if not in the spot light (ah ah !)
|
||||
cosSpotAngle = max(-dot(fragLightDirLen.xyz, getLightDirection(light)), 0.0);
|
||||
if (cosSpotAngle < getLightSpotAngleCos(light)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
<@endif@>
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -104,6 +104,13 @@ void Material::setMetallic(float metallic) {
|
|||
_schemaBuffer.edit<Schema>()._metallic = metallic;
|
||||
}
|
||||
|
||||
void Material::setScattering(float scattering) {
|
||||
scattering = glm::clamp(scattering, 0.0f, 1.0f);
|
||||
_key.setMetallic(scattering > 0.0f);
|
||||
_schemaBuffer.edit<Schema>()._key = (uint32)_key._flags.to_ulong();
|
||||
_schemaBuffer.edit<Schema>()._scattering = scattering;
|
||||
}
|
||||
|
||||
void Material::setTextureMap(MapChannel channel, const TextureMapPointer& textureMap) {
|
||||
if (textureMap) {
|
||||
_key.setMapChannel(channel, (true));
|
||||
|
|
|
@ -35,6 +35,7 @@ public:
|
|||
OPACITY_VAL_BIT,
|
||||
OPACITY_MASK_MAP_BIT, // Opacity Map and Opacity MASK map are mutually exclusive
|
||||
OPACITY_TRANSLUCENT_MAP_BIT,
|
||||
SCATTERING_VAL_BIT,
|
||||
|
||||
// THe map bits must be in the same sequence as the enum names for the map channels
|
||||
EMISSIVE_MAP_BIT,
|
||||
|
@ -44,6 +45,7 @@ public:
|
|||
NORMAL_MAP_BIT,
|
||||
OCCLUSION_MAP_BIT,
|
||||
LIGHTMAP_MAP_BIT,
|
||||
SCATTERING_MAP_BIT,
|
||||
|
||||
NUM_FLAGS,
|
||||
};
|
||||
|
@ -57,6 +59,7 @@ public:
|
|||
NORMAL_MAP,
|
||||
OCCLUSION_MAP,
|
||||
LIGHTMAP_MAP,
|
||||
SCATTERING_MAP,
|
||||
|
||||
NUM_MAP_CHANNELS,
|
||||
};
|
||||
|
@ -83,6 +86,8 @@ public:
|
|||
|
||||
Builder& withTranslucentFactor() { _flags.set(OPACITY_VAL_BIT); return (*this); }
|
||||
|
||||
Builder& withScattering() { _flags.set(SCATTERING_VAL_BIT); return (*this); }
|
||||
|
||||
Builder& withEmissiveMap() { _flags.set(EMISSIVE_MAP_BIT); return (*this); }
|
||||
Builder& withAlbedoMap() { _flags.set(ALBEDO_MAP_BIT); return (*this); }
|
||||
Builder& withMetallicMap() { _flags.set(METALLIC_MAP_BIT); return (*this); }
|
||||
|
@ -94,6 +99,7 @@ public:
|
|||
Builder& withNormalMap() { _flags.set(NORMAL_MAP_BIT); return (*this); }
|
||||
Builder& withOcclusionMap() { _flags.set(OCCLUSION_MAP_BIT); return (*this); }
|
||||
Builder& withLightmapMap() { _flags.set(LIGHTMAP_MAP_BIT); return (*this); }
|
||||
Builder& withScatteringMap() { _flags.set(SCATTERING_MAP_BIT); return (*this); }
|
||||
|
||||
// Convenient standard keys that we will keep on using all over the place
|
||||
static MaterialKey opaqueAlbedo() { return Builder().withAlbedo().build(); }
|
||||
|
@ -135,7 +141,7 @@ public:
|
|||
|
||||
void setOpacityMaskMap(bool value) { _flags.set(OPACITY_MASK_MAP_BIT, value); }
|
||||
bool isOpacityMaskMap() const { return _flags[OPACITY_MASK_MAP_BIT]; }
|
||||
|
||||
|
||||
void setNormalMap(bool value) { _flags.set(NORMAL_MAP_BIT, value); }
|
||||
bool isNormalMap() const { return _flags[NORMAL_MAP_BIT]; }
|
||||
|
||||
|
@ -145,6 +151,12 @@ public:
|
|||
void setLightmapMap(bool value) { _flags.set(LIGHTMAP_MAP_BIT, value); }
|
||||
bool isLightmapMap() const { return _flags[LIGHTMAP_MAP_BIT]; }
|
||||
|
||||
void setScattering(bool value) { _flags.set(SCATTERING_VAL_BIT, value); }
|
||||
bool isScattering() const { return _flags[SCATTERING_VAL_BIT]; }
|
||||
|
||||
void setScatteringMap(bool value) { _flags.set(SCATTERING_MAP_BIT, value); }
|
||||
bool isScatteringMap() const { return _flags[SCATTERING_MAP_BIT]; }
|
||||
|
||||
void setMapChannel(MapChannel channel, bool value) { _flags.set(EMISSIVE_MAP_BIT + channel, value); }
|
||||
bool isMapChannel(MapChannel channel) const { return _flags[EMISSIVE_MAP_BIT + channel]; }
|
||||
|
||||
|
@ -218,6 +230,13 @@ public:
|
|||
Builder& withoutLightmapMap() { _value.reset(MaterialKey::LIGHTMAP_MAP_BIT); _mask.set(MaterialKey::LIGHTMAP_MAP_BIT); return (*this); }
|
||||
Builder& withLightmapMap() { _value.set(MaterialKey::LIGHTMAP_MAP_BIT); _mask.set(MaterialKey::LIGHTMAP_MAP_BIT); return (*this); }
|
||||
|
||||
Builder& withoutScattering() { _value.reset(MaterialKey::SCATTERING_VAL_BIT); _mask.set(MaterialKey::SCATTERING_VAL_BIT); return (*this); }
|
||||
Builder& withScattering() { _value.set(MaterialKey::SCATTERING_VAL_BIT); _mask.set(MaterialKey::SCATTERING_VAL_BIT); return (*this); }
|
||||
|
||||
Builder& withoutScatteringMap() { _value.reset(MaterialKey::SCATTERING_MAP_BIT); _mask.set(MaterialKey::SCATTERING_MAP_BIT); return (*this); }
|
||||
Builder& withScatteringMap() { _value.set(MaterialKey::SCATTERING_MAP_BIT); _mask.set(MaterialKey::SCATTERING_MAP_BIT); return (*this); }
|
||||
|
||||
|
||||
// Convenient standard keys that we will keep on using all over the place
|
||||
static MaterialFilter opaqueAlbedo() { return Builder().withAlbedo().withoutTranslucentFactor().build(); }
|
||||
};
|
||||
|
@ -275,6 +294,8 @@ public:
|
|||
void setRoughness(float roughness);
|
||||
float getRoughness() const { return _schemaBuffer.get<Schema>()._roughness; }
|
||||
|
||||
void setScattering(float scattering);
|
||||
float getScattering() const { return _schemaBuffer.get<Schema>()._scattering; }
|
||||
|
||||
// Schema to access the attribute values of the material
|
||||
class Schema {
|
||||
|
@ -288,7 +309,9 @@ public:
|
|||
glm::vec3 _fresnel{ 0.03f }; // Fresnel value for a default non metallic
|
||||
float _metallic{ 0.0f }; // Not Metallic
|
||||
|
||||
glm::vec3 _spare{ 0.0f };
|
||||
float _scattering{ 0.0f }; // Scattering info
|
||||
|
||||
glm::vec2 _spare{ 0.0f };
|
||||
|
||||
uint32_t _key{ 0 }; // a copy of the materialKey
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ struct Material {
|
|||
vec4 _emissiveOpacity;
|
||||
vec4 _albedoRoughness;
|
||||
vec4 _fresnelMetallic;
|
||||
vec4 _spareKey;
|
||||
vec4 _scatteringSpare2Key;
|
||||
};
|
||||
|
||||
uniform materialBuffer {
|
||||
|
@ -37,7 +37,9 @@ float getMaterialMetallic(Material m) { return m._fresnelMetallic.a; }
|
|||
|
||||
float getMaterialShininess(Material m) { return 1.0 - getMaterialRoughness(m); }
|
||||
|
||||
int getMaterialKey(Material m) { return floatBitsToInt(m._spareKey.w); }
|
||||
float getMaterialScattering(Material m) { return m._scatteringSpare2Key.x; }
|
||||
|
||||
int getMaterialKey(Material m) { return floatBitsToInt(m._scatteringSpare2Key.w); }
|
||||
|
||||
const int EMISSIVE_VAL_BIT = 0x00000001;
|
||||
const int UNLIT_VAL_BIT = 0x00000002;
|
||||
|
@ -47,14 +49,17 @@ const int GLOSSY_VAL_BIT = 0x00000010;
|
|||
const int OPACITY_VAL_BIT = 0x00000020;
|
||||
const int OPACITY_MASK_MAP_BIT = 0x00000040;
|
||||
const int OPACITY_TRANSLUCENT_MAP_BIT = 0x00000080;
|
||||
const int SCATTERING_VAL_BIT = 0x00000100;
|
||||
|
||||
const int EMISSIVE_MAP_BIT = 0x00000100;
|
||||
const int ALBEDO_MAP_BIT = 0x00000200;
|
||||
const int METALLIC_MAP_BIT = 0x00000400;
|
||||
const int ROUGHNESS_MAP_BIT = 0x00000800;
|
||||
const int NORMAL_MAP_BIT = 0x00001000;
|
||||
const int OCCLUSION_MAP_BIT = 0x00002000;
|
||||
const int LIGHTMAP_MAP_BIT = 0x00004000;
|
||||
|
||||
const int EMISSIVE_MAP_BIT = 0x00000200;
|
||||
const int ALBEDO_MAP_BIT = 0x00000400;
|
||||
const int METALLIC_MAP_BIT = 0x00000800;
|
||||
const int ROUGHNESS_MAP_BIT = 0x00001000;
|
||||
const int NORMAL_MAP_BIT = 0x00002000;
|
||||
const int OCCLUSION_MAP_BIT = 0x00004000;
|
||||
const int LIGHTMAP_MAP_BIT = 0x00008000;
|
||||
const int SCATTERING_MAP_BIT = 0x00010000;
|
||||
|
||||
|
||||
<@endif@>
|
||||
|
|
|
@ -283,6 +283,7 @@ void AmbientOcclusionEffect::updateGaussianDistribution() {
|
|||
}
|
||||
|
||||
void AmbientOcclusionEffect::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext) {
|
||||
#ifdef FIX_THE_FRAMEBUFFER_CACHE
|
||||
assert(renderContext->args);
|
||||
assert(renderContext->args->hasViewFrustum());
|
||||
|
||||
|
@ -406,4 +407,5 @@ void AmbientOcclusionEffect::run(const render::SceneContextPointer& sceneContext
|
|||
|
||||
// Update the timer
|
||||
std::static_pointer_cast<Config>(renderContext->jobConfig)->gpuTime = _gpuTimer.getAverage();
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -32,6 +32,9 @@ Antialiasing::Antialiasing() {
|
|||
}
|
||||
|
||||
const gpu::PipelinePointer& Antialiasing::getAntialiasingPipeline() {
|
||||
int width = DependencyManager::get<FramebufferCache>()->getFrameBufferSize().width();
|
||||
int height = DependencyManager::get<FramebufferCache>()->getFrameBufferSize().height();
|
||||
|
||||
if (!_antialiasingPipeline) {
|
||||
auto vs = gpu::Shader::createVertex(std::string(fxaa_vert));
|
||||
auto ps = gpu::Shader::createPixel(std::string(fxaa_frag));
|
||||
|
@ -49,11 +52,8 @@ const gpu::PipelinePointer& Antialiasing::getAntialiasingPipeline() {
|
|||
state->setDepthTest(false, false, gpu::LESS_EQUAL);
|
||||
|
||||
// Link the antialiasing FBO to texture
|
||||
_antialiasingBuffer = gpu::FramebufferPointer(gpu::Framebuffer::create(gpu::Element::COLOR_RGBA_32,
|
||||
DependencyManager::get<FramebufferCache>()->getFrameBufferSize().width(), DependencyManager::get<FramebufferCache>()->getFrameBufferSize().height()));
|
||||
auto format = DependencyManager::get<FramebufferCache>()->getLightingTexture()->getTexelFormat();
|
||||
auto width = _antialiasingBuffer->getWidth();
|
||||
auto height = _antialiasingBuffer->getHeight();
|
||||
_antialiasingBuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
auto format = gpu::Element::COLOR_SRGBA_32; // DependencyManager::get<FramebufferCache>()->getLightingTexture()->getTexelFormat();
|
||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
||||
_antialiasingTexture = gpu::TexturePointer(gpu::Texture::create2D(format, width, height, defaultSampler));
|
||||
_antialiasingBuffer->setRenderBuffer(0, _antialiasingTexture);
|
||||
|
@ -62,10 +62,8 @@ const gpu::PipelinePointer& Antialiasing::getAntialiasingPipeline() {
|
|||
_antialiasingPipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
int w = DependencyManager::get<FramebufferCache>()->getFrameBufferSize().width();
|
||||
int h = DependencyManager::get<FramebufferCache>()->getFrameBufferSize().height();
|
||||
if (w != _antialiasingBuffer->getWidth() || h != _antialiasingBuffer->getHeight()) {
|
||||
_antialiasingBuffer->resize(w, h);
|
||||
if (width != _antialiasingBuffer->getWidth() || height != _antialiasingBuffer->getHeight()) {
|
||||
_antialiasingBuffer->resize(width, height);
|
||||
}
|
||||
|
||||
return _antialiasingPipeline;
|
||||
|
@ -92,7 +90,7 @@ const gpu::PipelinePointer& Antialiasing::getBlendPipeline() {
|
|||
return _blendPipeline;
|
||||
}
|
||||
|
||||
void Antialiasing::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext) {
|
||||
void Antialiasing::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const gpu::FramebufferPointer& sourceBuffer) {
|
||||
assert(renderContext->args);
|
||||
assert(renderContext->args->hasViewFrustum());
|
||||
|
||||
|
@ -126,7 +124,7 @@ void Antialiasing::run(const render::SceneContextPointer& sceneContext, const re
|
|||
|
||||
// FXAA step
|
||||
getAntialiasingPipeline();
|
||||
batch.setResourceTexture(0, framebufferCache->getLightingTexture());
|
||||
batch.setResourceTexture(0, sourceBuffer->getRenderBuffer(0));
|
||||
batch.setFramebuffer(_antialiasingBuffer);
|
||||
batch.setPipeline(getAntialiasingPipeline());
|
||||
|
||||
|
@ -152,10 +150,11 @@ void Antialiasing::run(const render::SceneContextPointer& sceneContext, const re
|
|||
glm::vec2 texCoordBottomRight(1.0f, 1.0f);
|
||||
DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);
|
||||
|
||||
|
||||
// Blend step
|
||||
getBlendPipeline();
|
||||
batch.setResourceTexture(0, _antialiasingTexture);
|
||||
batch.setFramebuffer(framebufferCache->getLightingFramebuffer());
|
||||
batch.setFramebuffer(sourceBuffer);
|
||||
batch.setPipeline(getBlendPipeline());
|
||||
|
||||
DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);
|
||||
|
|
|
@ -26,11 +26,11 @@ public:
|
|||
class Antialiasing {
|
||||
public:
|
||||
using Config = AntiAliasingConfig;
|
||||
using JobModel = render::Job::Model<Antialiasing, Config>;
|
||||
using JobModel = render::Job::ModelI<Antialiasing, gpu::FramebufferPointer, Config>;
|
||||
|
||||
Antialiasing();
|
||||
void configure(const Config& config) {}
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext);
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const gpu::FramebufferPointer& sourceBuffer);
|
||||
|
||||
const gpu::PipelinePointer& getAntialiasingPipeline();
|
||||
const gpu::PipelinePointer& getBlendPipeline();
|
||||
|
|
|
@ -32,7 +32,7 @@ void DebugDeferredBufferConfig::setMode(int newMode) {
|
|||
if (newMode == mode) {
|
||||
return;
|
||||
} else if (newMode > DebugDeferredBuffer::CustomMode || newMode < 0) {
|
||||
mode = DebugDeferredBuffer::CustomMode;
|
||||
mode = 0;
|
||||
} else {
|
||||
mode = newMode;
|
||||
}
|
||||
|
@ -46,7 +46,12 @@ enum Slot {
|
|||
Depth,
|
||||
Lighting,
|
||||
Shadow,
|
||||
Pyramid,
|
||||
LinearDepth,
|
||||
HalfLinearDepth,
|
||||
HalfNormal,
|
||||
Curvature,
|
||||
DiffusedCurvature,
|
||||
Scattering,
|
||||
AmbientOcclusion,
|
||||
AmbientOcclusionBlurred
|
||||
};
|
||||
|
@ -56,7 +61,7 @@ enum Slot {
|
|||
static const std::string DEFAULT_ALBEDO_SHADER {
|
||||
"vec4 getFragmentColor() {"
|
||||
" DeferredFragment frag = unpackDeferredFragmentNoPosition(uv);"
|
||||
" return vec4(pow(frag.diffuse, vec3(1.0 / 2.2)), 1.0);"
|
||||
" return vec4(pow(frag.albedo, vec3(1.0 / 2.2)), 1.0);"
|
||||
" }"
|
||||
};
|
||||
|
||||
|
@ -90,21 +95,28 @@ static const std::string DEFAULT_OCCLUSION_SHADER{
|
|||
static const std::string DEFAULT_EMISSIVE_SHADER{
|
||||
"vec4 getFragmentColor() {"
|
||||
" DeferredFragment frag = unpackDeferredFragmentNoPosition(uv);"
|
||||
" return (frag.mode == FRAG_MODE_SHADED ? vec4(pow(frag.emissive, vec3(1.0 / 2.2)), 1.0) : vec4(vec3(0.0), 1.0));"
|
||||
" return (frag.mode == FRAG_MODE_SHADED ? vec4(pow(texture(specularMap, uv).rgb, vec3(1.0 / 2.2)), 1.0) : vec4(vec3(0.0), 1.0));"
|
||||
" }"
|
||||
};
|
||||
|
||||
static const std::string DEFAULT_UNLIT_SHADER{
|
||||
"vec4 getFragmentColor() {"
|
||||
" DeferredFragment frag = unpackDeferredFragmentNoPosition(uv);"
|
||||
" return (frag.mode == FRAG_MODE_UNLIT ? vec4(pow(frag.diffuse, vec3(1.0 / 2.2)), 1.0) : vec4(vec3(0.0), 1.0));"
|
||||
" return (frag.mode == FRAG_MODE_UNLIT ? vec4(pow(frag.albedo, vec3(1.0 / 2.2)), 1.0) : vec4(vec3(0.0), 1.0));"
|
||||
" }"
|
||||
};
|
||||
|
||||
static const std::string DEFAULT_LIGHTMAP_SHADER{
|
||||
"vec4 getFragmentColor() {"
|
||||
" DeferredFragment frag = unpackDeferredFragmentNoPosition(uv);"
|
||||
" return (frag.mode == FRAG_MODE_LIGHTMAPPED ? vec4(pow(frag.emissive, vec3(1.0 / 2.2)), 1.0) : vec4(vec3(0.0), 1.0));"
|
||||
" return (frag.mode == FRAG_MODE_LIGHTMAPPED ? vec4(pow(texture(specularMap, uv).rgb, vec3(1.0 / 2.2)), 1.0) : vec4(vec3(0.0), 1.0));"
|
||||
" }"
|
||||
};
|
||||
|
||||
static const std::string DEFAULT_SCATTERING_SHADER{
|
||||
"vec4 getFragmentColor() {"
|
||||
" DeferredFragment frag = unpackDeferredFragmentNoPosition(uv);"
|
||||
" return (frag.mode == FRAG_MODE_SCATTERING ? vec4(vec3(pow(frag.scattering, 1.0 / 2.2)), 1.0) : vec4(vec3(0.0), 1.0));"
|
||||
" }"
|
||||
};
|
||||
|
||||
|
@ -131,13 +143,63 @@ static const std::string DEFAULT_SHADOW_SHADER {
|
|||
" }"
|
||||
};
|
||||
|
||||
static const std::string DEFAULT_PYRAMID_DEPTH_SHADER {
|
||||
static const std::string DEFAULT_LINEAR_DEPTH_SHADER {
|
||||
"vec4 getFragmentColor() {"
|
||||
" return vec4(vec3(1.0 - texture(pyramidMap, uv).x * 0.01), 1.0);"
|
||||
" return vec4(vec3(1.0 - texture(linearDepthMap, uv).x * 0.01), 1.0);"
|
||||
" }"
|
||||
};
|
||||
|
||||
static const std::string DEFAULT_HALF_LINEAR_DEPTH_SHADER{
|
||||
"vec4 getFragmentColor() {"
|
||||
" return vec4(vec3(1.0 - texture(halfLinearDepthMap, uv).x * 0.01), 1.0);"
|
||||
" }"
|
||||
};
|
||||
|
||||
static const std::string DEFAULT_HALF_NORMAL_SHADER{
|
||||
"vec4 getFragmentColor() {"
|
||||
" return vec4(vec3(texture(halfNormalMap, uv).xyz), 1.0);"
|
||||
" }"
|
||||
};
|
||||
|
||||
static const std::string DEFAULT_CURVATURE_SHADER{
|
||||
"vec4 getFragmentColor() {"
|
||||
" return vec4(pow(vec3(texture(curvatureMap, uv).a), vec3(1.0 / 2.2)), 1.0);"
|
||||
// " return vec4(pow(vec3(texture(curvatureMap, uv).xyz), vec3(1.0 / 2.2)), 1.0);"
|
||||
//" return vec4(vec3(1.0 - textureLod(pyramidMap, uv, 3).x * 0.01), 1.0);"
|
||||
" }"
|
||||
};
|
||||
|
||||
static const std::string DEFAULT_NORMAL_CURVATURE_SHADER{
|
||||
"vec4 getFragmentColor() {"
|
||||
//" return vec4(pow(vec3(texture(curvatureMap, uv).a), vec3(1.0 / 2.2)), 1.0);"
|
||||
" return vec4(vec3(texture(curvatureMap, uv).xyz), 1.0);"
|
||||
//" return vec4(vec3(1.0 - textureLod(pyramidMap, uv, 3).x * 0.01), 1.0);"
|
||||
" }"
|
||||
};
|
||||
|
||||
static const std::string DEFAULT_DIFFUSED_CURVATURE_SHADER{
|
||||
"vec4 getFragmentColor() {"
|
||||
" return vec4(pow(vec3(texture(diffusedCurvatureMap, uv).a), vec3(1.0 / 2.2)), 1.0);"
|
||||
// " return vec4(pow(vec3(texture(curvatureMap, uv).xyz), vec3(1.0 / 2.2)), 1.0);"
|
||||
//" return vec4(vec3(1.0 - textureLod(pyramidMap, uv, 3).x * 0.01), 1.0);"
|
||||
" }"
|
||||
};
|
||||
|
||||
static const std::string DEFAULT_DIFFUSED_NORMAL_CURVATURE_SHADER{
|
||||
"vec4 getFragmentColor() {"
|
||||
//" return vec4(pow(vec3(texture(curvatureMap, uv).a), vec3(1.0 / 2.2)), 1.0);"
|
||||
" return vec4(vec3(texture(diffusedCurvatureMap, uv).xyz), 1.0);"
|
||||
//" return vec4(vec3(1.0 - textureLod(pyramidMap, uv, 3).x * 0.01), 1.0);"
|
||||
" }"
|
||||
};
|
||||
|
||||
static const std::string DEFAULT_DEBUG_SCATTERING_SHADER{
|
||||
"vec4 getFragmentColor() {"
|
||||
" return vec4(pow(vec3(texture(scatteringMap, uv).xyz), vec3(1.0 / 2.2)), 1.0);"
|
||||
// " return vec4(vec3(texture(scatteringMap, uv).xyz), 1.0);"
|
||||
" }"
|
||||
};
|
||||
|
||||
static const std::string DEFAULT_AMBIENT_OCCLUSION_SHADER{
|
||||
"vec4 getFragmentColor() {"
|
||||
" return vec4(vec3(texture(obscuranceMap, uv).x), 1.0);"
|
||||
|
@ -197,18 +259,36 @@ std::string DebugDeferredBuffer::getShaderSourceCode(Mode mode, std::string cust
|
|||
return DEFAULT_OCCLUSION_SHADER;
|
||||
case LightmapMode:
|
||||
return DEFAULT_LIGHTMAP_SHADER;
|
||||
case ScatteringMode:
|
||||
return DEFAULT_SCATTERING_SHADER;
|
||||
case LightingMode:
|
||||
return DEFAULT_LIGHTING_SHADER;
|
||||
case ShadowMode:
|
||||
return DEFAULT_SHADOW_SHADER;
|
||||
case PyramidDepthMode:
|
||||
return DEFAULT_PYRAMID_DEPTH_SHADER;
|
||||
case LinearDepthMode:
|
||||
return DEFAULT_LINEAR_DEPTH_SHADER;
|
||||
case HalfLinearDepthMode:
|
||||
return DEFAULT_HALF_LINEAR_DEPTH_SHADER;
|
||||
case HalfNormalMode:
|
||||
return DEFAULT_HALF_NORMAL_SHADER;
|
||||
case CurvatureMode:
|
||||
return DEFAULT_CURVATURE_SHADER;
|
||||
case NormalCurvatureMode:
|
||||
return DEFAULT_NORMAL_CURVATURE_SHADER;
|
||||
case DiffusedCurvatureMode:
|
||||
return DEFAULT_DIFFUSED_CURVATURE_SHADER;
|
||||
case DiffusedNormalCurvatureMode:
|
||||
return DEFAULT_DIFFUSED_NORMAL_CURVATURE_SHADER;
|
||||
case ScatteringDebugMode:
|
||||
return DEFAULT_DEBUG_SCATTERING_SHADER;
|
||||
case AmbientOcclusionMode:
|
||||
return DEFAULT_AMBIENT_OCCLUSION_SHADER;
|
||||
case AmbientOcclusionBlurredMode:
|
||||
return DEFAULT_AMBIENT_OCCLUSION_BLURRED_SHADER;
|
||||
case CustomMode:
|
||||
return getFileContent(customFile, DEFAULT_CUSTOM_SHADER);
|
||||
default:
|
||||
return DEFAULT_ALBEDO_SHADER;
|
||||
}
|
||||
Q_UNREACHABLE();
|
||||
return std::string();
|
||||
|
@ -256,7 +336,12 @@ const gpu::PipelinePointer& DebugDeferredBuffer::getPipeline(Mode mode, std::str
|
|||
slotBindings.insert(gpu::Shader::Binding("obscuranceMap", AmbientOcclusion));
|
||||
slotBindings.insert(gpu::Shader::Binding("lightingMap", Lighting));
|
||||
slotBindings.insert(gpu::Shader::Binding("shadowMap", Shadow));
|
||||
slotBindings.insert(gpu::Shader::Binding("pyramidMap", Pyramid));
|
||||
slotBindings.insert(gpu::Shader::Binding("linearDepthMap", LinearDepth));
|
||||
slotBindings.insert(gpu::Shader::Binding("halfLinearDepthMap", HalfLinearDepth));
|
||||
slotBindings.insert(gpu::Shader::Binding("halfNormalMap", HalfNormal));
|
||||
slotBindings.insert(gpu::Shader::Binding("curvatureMap", Curvature));
|
||||
slotBindings.insert(gpu::Shader::Binding("diffusedCurvatureMap", DiffusedCurvature));
|
||||
slotBindings.insert(gpu::Shader::Binding("scatteringMap", Scattering));
|
||||
slotBindings.insert(gpu::Shader::Binding("occlusionBlurredMap", AmbientOcclusionBlurred));
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
|
@ -282,12 +367,23 @@ void DebugDeferredBuffer::configure(const Config& config) {
|
|||
_size = config.size;
|
||||
}
|
||||
|
||||
void DebugDeferredBuffer::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext) {
|
||||
void DebugDeferredBuffer::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const Inputs& inputs) {
|
||||
if (_mode == Off) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(renderContext->args);
|
||||
assert(renderContext->args->hasViewFrustum());
|
||||
RenderArgs* args = renderContext->args;
|
||||
|
||||
auto& deferredFramebuffer = inputs.get0();
|
||||
auto& linearDepthTarget = inputs.get1();
|
||||
auto& surfaceGeometryFramebuffer = inputs.get2();
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
|
||||
const auto geometryBuffer = DependencyManager::get<GeometryCache>();
|
||||
const auto framebufferCache = DependencyManager::get<FramebufferCache>();
|
||||
const auto textureCache = DependencyManager::get<TextureCache>();
|
||||
|
@ -306,13 +402,18 @@ void DebugDeferredBuffer::run(const SceneContextPointer& sceneContext, const Ren
|
|||
|
||||
batch.setPipeline(getPipeline(_mode, first));
|
||||
|
||||
batch.setResourceTexture(Albedo, framebufferCache->getDeferredColorTexture());
|
||||
batch.setResourceTexture(Normal, framebufferCache->getDeferredNormalTexture());
|
||||
batch.setResourceTexture(Specular, framebufferCache->getDeferredSpecularTexture());
|
||||
batch.setResourceTexture(Depth, framebufferCache->getPrimaryDepthTexture());
|
||||
batch.setResourceTexture(Lighting, framebufferCache->getLightingTexture());
|
||||
batch.setResourceTexture(Albedo, deferredFramebuffer->getDeferredColorTexture());
|
||||
batch.setResourceTexture(Normal, deferredFramebuffer->getDeferredNormalTexture());
|
||||
batch.setResourceTexture(Specular, deferredFramebuffer->getDeferredSpecularTexture());
|
||||
batch.setResourceTexture(Depth, deferredFramebuffer->getPrimaryDepthTexture());
|
||||
batch.setResourceTexture(Lighting, deferredFramebuffer->getLightingTexture());
|
||||
batch.setResourceTexture(Shadow, lightStage.lights[0]->shadow.framebuffer->getDepthStencilBuffer());
|
||||
batch.setResourceTexture(Pyramid, framebufferCache->getDepthPyramidTexture());
|
||||
batch.setResourceTexture(LinearDepth, linearDepthTarget->getLinearDepthTexture());
|
||||
batch.setResourceTexture(HalfLinearDepth, linearDepthTarget->getHalfLinearDepthTexture());
|
||||
batch.setResourceTexture(HalfNormal, linearDepthTarget->getHalfNormalTexture());
|
||||
|
||||
batch.setResourceTexture(Curvature, surfaceGeometryFramebuffer->getCurvatureTexture());
|
||||
batch.setResourceTexture(DiffusedCurvature, surfaceGeometryFramebuffer->getLowCurvatureTexture());
|
||||
if (DependencyManager::get<DeferredLightingEffect>()->isAmbientOcclusionEnabled()) {
|
||||
batch.setResourceTexture(AmbientOcclusion, framebufferCache->getOcclusionTexture());
|
||||
} else {
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
#include <QFileInfo>
|
||||
|
||||
#include <render/DrawTask.h>
|
||||
#include "DeferredFramebuffer.h"
|
||||
#include "SurfaceGeometryPass.h"
|
||||
|
||||
class DebugDeferredBufferConfig : public render::Job::Config {
|
||||
Q_OBJECT
|
||||
|
@ -34,20 +36,22 @@ signals:
|
|||
|
||||
class DebugDeferredBuffer {
|
||||
public:
|
||||
using Inputs = render::VaryingSet4<DeferredFramebufferPointer, LinearDepthFramebufferPointer, SurfaceGeometryFramebufferPointer, gpu::FramebufferPointer>;
|
||||
using Config = DebugDeferredBufferConfig;
|
||||
using JobModel = render::Job::Model<DebugDeferredBuffer, Config>;
|
||||
using JobModel = render::Job::ModelI<DebugDeferredBuffer, Inputs, Config>;
|
||||
|
||||
DebugDeferredBuffer();
|
||||
|
||||
void configure(const Config& config);
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext);
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const Inputs& inputs);
|
||||
|
||||
protected:
|
||||
friend class DebugDeferredBufferConfig;
|
||||
|
||||
enum Mode : uint8_t {
|
||||
// Use Mode suffix to avoid collisions
|
||||
DepthMode = 0,
|
||||
Off = 0,
|
||||
DepthMode,
|
||||
AlbedoMode,
|
||||
NormalMode,
|
||||
RoughnessMode,
|
||||
|
@ -56,23 +60,33 @@ protected:
|
|||
UnlitMode,
|
||||
OcclusionMode,
|
||||
LightmapMode,
|
||||
ScatteringMode,
|
||||
LightingMode,
|
||||
ShadowMode,
|
||||
PyramidDepthMode,
|
||||
LinearDepthMode,
|
||||
HalfLinearDepthMode,
|
||||
HalfNormalMode,
|
||||
CurvatureMode,
|
||||
NormalCurvatureMode,
|
||||
DiffusedCurvatureMode,
|
||||
DiffusedNormalCurvatureMode,
|
||||
ScatteringDebugMode,
|
||||
AmbientOcclusionMode,
|
||||
AmbientOcclusionBlurredMode,
|
||||
CustomMode // Needs to stay last
|
||||
CustomMode, // Needs to stay last
|
||||
|
||||
NumModes,
|
||||
};
|
||||
|
||||
private:
|
||||
Mode _mode;
|
||||
Mode _mode{ Off };
|
||||
glm::vec4 _size;
|
||||
|
||||
struct CustomPipeline {
|
||||
gpu::PipelinePointer pipeline;
|
||||
mutable QFileInfo info;
|
||||
};
|
||||
using StandardPipelines = std::array<gpu::PipelinePointer, CustomMode>;
|
||||
using StandardPipelines = std::array<gpu::PipelinePointer, NumModes>;
|
||||
using CustomPipelines = std::unordered_map<std::string, CustomPipeline>;
|
||||
|
||||
bool pipelineNeedsUpdate(Mode mode, std::string customFile = std::string()) const;
|
||||
|
|
|
@ -11,6 +11,8 @@
|
|||
<@if not DEFERRED_BUFFER_SLH@>
|
||||
<@def DEFERRED_BUFFER_SLH@>
|
||||
|
||||
<@include gpu/PackedNormal.slh@>
|
||||
|
||||
// Unpack the metallic-mode value
|
||||
const float FRAG_PACK_SHADED_NON_METALLIC = 0.0;
|
||||
const float FRAG_PACK_SHADED_METALLIC = 0.1;
|
||||
|
@ -20,11 +22,16 @@ const float FRAG_PACK_LIGHTMAPPED_NON_METALLIC = 0.2;
|
|||
const float FRAG_PACK_LIGHTMAPPED_METALLIC = 0.3;
|
||||
const float FRAG_PACK_LIGHTMAPPED_RANGE_INV = 1.0 / (FRAG_PACK_LIGHTMAPPED_METALLIC - FRAG_PACK_LIGHTMAPPED_NON_METALLIC);
|
||||
|
||||
const float FRAG_PACK_UNLIT = 0.5;
|
||||
const float FRAG_PACK_SCATTERING_NON_METALLIC = 0.4;
|
||||
const float FRAG_PACK_SCATTERING_METALLIC = 0.5;
|
||||
const float FRAG_PACK_SCATTERING_RANGE_INV = 1.0 / (FRAG_PACK_SCATTERING_METALLIC - FRAG_PACK_SCATTERING_NON_METALLIC);
|
||||
|
||||
const float FRAG_PACK_UNLIT = 0.6;
|
||||
|
||||
const int FRAG_MODE_UNLIT = 0;
|
||||
const int FRAG_MODE_SHADED = 1;
|
||||
const int FRAG_MODE_LIGHTMAPPED = 2;
|
||||
const int FRAG_MODE_SCATTERING = 3;
|
||||
|
||||
void unpackModeMetallic(float rawValue, out int mode, out float metallic) {
|
||||
if (rawValue <= FRAG_PACK_SHADED_METALLIC) {
|
||||
|
@ -32,7 +39,10 @@ void unpackModeMetallic(float rawValue, out int mode, out float metallic) {
|
|||
metallic = clamp((rawValue - FRAG_PACK_SHADED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);
|
||||
} else if (rawValue <= FRAG_PACK_LIGHTMAPPED_METALLIC) {
|
||||
mode = FRAG_MODE_LIGHTMAPPED;
|
||||
metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_SHADED_RANGE_INV, 0.0, 1.0);
|
||||
metallic = clamp((rawValue - FRAG_PACK_LIGHTMAPPED_NON_METALLIC) * FRAG_PACK_LIGHTMAPPED_RANGE_INV, 0.0, 1.0);
|
||||
} else if (rawValue <= FRAG_PACK_SCATTERING_METALLIC) {
|
||||
mode = FRAG_MODE_SCATTERING;
|
||||
metallic = clamp((rawValue - FRAG_PACK_SCATTERING_NON_METALLIC) * FRAG_PACK_SCATTERING_RANGE_INV, 0.0, 1.0);
|
||||
} else if (rawValue >= FRAG_PACK_UNLIT) {
|
||||
mode = FRAG_MODE_UNLIT;
|
||||
metallic = 0.0;
|
||||
|
@ -47,48 +57,15 @@ float packLightmappedMetallic(float metallic) {
|
|||
return mix(FRAG_PACK_LIGHTMAPPED_NON_METALLIC, FRAG_PACK_LIGHTMAPPED_METALLIC, metallic);
|
||||
}
|
||||
|
||||
float packScatteringMetallic(float metallic) {
|
||||
return mix(FRAG_PACK_SCATTERING_NON_METALLIC, FRAG_PACK_SCATTERING_METALLIC, metallic);
|
||||
}
|
||||
|
||||
float packUnlit() {
|
||||
return FRAG_PACK_UNLIT;
|
||||
}
|
||||
|
||||
|
||||
vec2 signNotZero(vec2 v) {
|
||||
return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);
|
||||
}
|
||||
|
||||
vec2 float32x3_to_oct(in vec3 v) {
|
||||
vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));
|
||||
return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);
|
||||
}
|
||||
|
||||
|
||||
vec3 oct_to_float32x3(in vec2 e) {
|
||||
vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));
|
||||
if (v.z < 0) {
|
||||
v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);
|
||||
}
|
||||
return normalize(v);
|
||||
}
|
||||
|
||||
vec3 snorm12x2_to_unorm8x3(vec2 f) {
|
||||
vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));
|
||||
float t = floor(u.y / 256.0);
|
||||
|
||||
return floor(vec3(
|
||||
u.x / 16.0,
|
||||
fract(u.x / 16.0) * 256.0 + t,
|
||||
u.y - t * 256.0
|
||||
)) / 255.0;
|
||||
}
|
||||
|
||||
vec2 unorm8x3_to_snorm12x2(vec3 u) {
|
||||
u *= 255.0;
|
||||
u.y *= (1.0 / 16.0);
|
||||
vec2 s = vec2( u.x * 16.0 + floor(u.y),
|
||||
fract(u.y) * (16.0 * 256.0) + u.z);
|
||||
return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));
|
||||
}
|
||||
|
||||
<!
|
||||
uniform sampler2D normalFittingMap;
|
||||
|
||||
vec3 bestFitNormal(vec3 normal) {
|
||||
|
@ -107,14 +84,6 @@ vec3 bestFitNormal(vec3 normal) {
|
|||
|
||||
return (cN * 0.5 + 0.5);
|
||||
}
|
||||
|
||||
vec3 packNormal(in vec3 n) {
|
||||
return snorm12x2_to_unorm8x3(float32x3_to_oct(n));
|
||||
}
|
||||
|
||||
vec3 unpackNormal(in vec3 p) {
|
||||
return oct_to_float32x3(unorm8x3_to_snorm12x2(p));
|
||||
}
|
||||
|
||||
!>
|
||||
|
||||
<@endif@>
|
||||
|
|
|
@ -32,97 +32,78 @@ uniform sampler2D obscuranceMap;
|
|||
uniform sampler2D lightingMap;
|
||||
|
||||
|
||||
struct DeferredTransform {
|
||||
mat4 projection;
|
||||
mat4 viewInverse;
|
||||
float stereoSide;
|
||||
vec3 _spareABC;
|
||||
};
|
||||
|
||||
layout(std140) uniform deferredTransformBuffer {
|
||||
DeferredTransform _deferredTransform;
|
||||
};
|
||||
DeferredTransform getDeferredTransform() {
|
||||
return _deferredTransform;
|
||||
}
|
||||
|
||||
bool getStereoMode(DeferredTransform deferredTransform) {
|
||||
return (deferredTransform.stereoSide != 0.0);
|
||||
}
|
||||
float getStereoSide(DeferredTransform deferredTransform) {
|
||||
return (deferredTransform.stereoSide);
|
||||
}
|
||||
|
||||
vec4 evalEyePositionFromZ(DeferredTransform deferredTransform, float depthVal, vec2 texcoord) {
|
||||
vec3 nPos = vec3(texcoord.xy * 2.0f - 1.0f, depthVal * 2.0f - 1.0f);
|
||||
|
||||
// compute the view space position using the depth
|
||||
// basically manually pick the proj matrix components to do the inverse
|
||||
float Ze = -deferredTransform.projection[3][2] / (nPos.z + deferredTransform.projection[2][2]);
|
||||
float Xe = (-Ze * nPos.x - Ze * deferredTransform.projection[2][0] - deferredTransform.projection[3][0]) / deferredTransform.projection[0][0];
|
||||
float Ye = (-Ze * nPos.y - Ze * deferredTransform.projection[2][1] - deferredTransform.projection[3][1]) / deferredTransform.projection[1][1];
|
||||
return vec4(Xe, Ye, Ze, 1.0f);
|
||||
}
|
||||
|
||||
struct DeferredFragment {
|
||||
vec4 normalVal;
|
||||
vec4 diffuseVal;
|
||||
vec4 specularVal;
|
||||
vec4 position;
|
||||
vec3 normal;
|
||||
float metallic;
|
||||
vec3 diffuse;
|
||||
vec3 albedo;
|
||||
float obscurance;
|
||||
vec3 specular;
|
||||
vec3 fresnel;
|
||||
float roughness;
|
||||
vec3 emissive;
|
||||
int mode;
|
||||
float scattering;
|
||||
float depthVal;
|
||||
};
|
||||
|
||||
vec4 unpackDeferredPosition(DeferredTransform deferredTransform, float depthValue, vec2 texcoord) {
|
||||
if (getStereoMode(deferredTransform)) {
|
||||
if (texcoord.x > 0.5) {
|
||||
texcoord.x -= 0.5;
|
||||
}
|
||||
texcoord.x *= 2.0;
|
||||
}
|
||||
return evalEyePositionFromZ(deferredTransform, depthValue, texcoord);
|
||||
}
|
||||
|
||||
DeferredFragment unpackDeferredFragmentNoPosition(vec2 texcoord) {
|
||||
|
||||
vec4 normalVal;
|
||||
vec4 diffuseVal;
|
||||
vec4 specularVal;
|
||||
|
||||
DeferredFragment frag;
|
||||
frag.depthVal = -1;
|
||||
frag.normalVal = texture(normalMap, texcoord);
|
||||
frag.diffuseVal = texture(albedoMap, texcoord);
|
||||
frag.specularVal = texture(specularMap, texcoord);
|
||||
normalVal = texture(normalMap, texcoord);
|
||||
diffuseVal = texture(albedoMap, texcoord);
|
||||
specularVal = texture(specularMap, texcoord);
|
||||
frag.obscurance = texture(obscuranceMap, texcoord).x;
|
||||
|
||||
// Unpack the normal from the map
|
||||
frag.normal = unpackNormal(frag.normalVal.xyz);
|
||||
frag.roughness = frag.normalVal.a;
|
||||
frag.normal = unpackNormal(normalVal.xyz);
|
||||
frag.roughness = normalVal.a;
|
||||
|
||||
// Diffuse color and unpack the mode and the metallicness
|
||||
frag.diffuse = frag.diffuseVal.xyz;
|
||||
unpackModeMetallic(frag.diffuseVal.w, frag.mode, frag.metallic);
|
||||
frag.albedo = diffuseVal.xyz;
|
||||
frag.scattering = 0.0;
|
||||
unpackModeMetallic(diffuseVal.w, frag.mode, frag.metallic);
|
||||
|
||||
//frag.emissive = specularVal.xyz;
|
||||
frag.obscurance = min(specularVal.w, frag.obscurance);
|
||||
|
||||
|
||||
if (frag.mode == FRAG_MODE_SCATTERING) {
|
||||
frag.scattering = specularVal.x;
|
||||
}
|
||||
|
||||
if (frag.metallic <= 0.5) {
|
||||
frag.metallic = 0.0;
|
||||
frag.specular = vec3(0.03); // Default Di-electric fresnel value
|
||||
frag.fresnel = vec3(0.03); // Default Di-electric fresnel value
|
||||
} else {
|
||||
frag.specular = vec3(frag.diffuseVal.xyz);
|
||||
frag.fresnel = vec3(diffuseVal.xyz);
|
||||
frag.metallic = 1.0;
|
||||
}
|
||||
|
||||
frag.emissive = frag.specularVal.xyz;
|
||||
frag.obscurance = min(frag.specularVal.w, frag.obscurance);
|
||||
|
||||
return frag;
|
||||
}
|
||||
|
||||
DeferredFragment unpackDeferredFragment(DeferredTransform deferredTransform, vec2 texcoord) {
|
||||
|
||||
<@include DeferredTransform.slh@>
|
||||
<$declareDeferredFrameTransform()$>
|
||||
|
||||
vec4 unpackDeferredPosition(DeferredFrameTransform deferredTransform, float depthValue, vec2 texcoord) {
|
||||
int side = 0;
|
||||
if (isStereo()) {
|
||||
if (texcoord.x > 0.5) {
|
||||
texcoord.x -= 0.5;
|
||||
side = 1;
|
||||
}
|
||||
texcoord.x *= 2.0;
|
||||
}
|
||||
float Zeye = evalZeyeFromZdb(depthValue);
|
||||
|
||||
return vec4(evalEyePositionFromZeye(side, Zeye, texcoord), 1.0);
|
||||
}
|
||||
|
||||
DeferredFragment unpackDeferredFragment(DeferredFrameTransform deferredTransform, vec2 texcoord) {
|
||||
|
||||
float depthValue = texture(depthMap, texcoord).r;
|
||||
|
||||
|
@ -136,5 +117,30 @@ DeferredFragment unpackDeferredFragment(DeferredTransform deferredTransform, vec
|
|||
|
||||
|
||||
|
||||
<@func declareDeferredCurvature()@>
|
||||
|
||||
// the curvature texture
|
||||
uniform sampler2D curvatureMap;
|
||||
|
||||
vec4 fetchCurvature(vec2 texcoord) {
|
||||
return texture(curvatureMap, texcoord);
|
||||
}
|
||||
|
||||
// the curvature texture
|
||||
uniform sampler2D diffusedCurvatureMap;
|
||||
|
||||
vec4 fetchDiffusedCurvature(vec2 texcoord) {
|
||||
return texture(diffusedCurvatureMap, texcoord);
|
||||
}
|
||||
|
||||
void unpackMidLowNormalCurvature(vec2 texcoord, out vec4 midNormalCurvature, out vec4 lowNormalCurvature) {
|
||||
midNormalCurvature = fetchCurvature(texcoord);
|
||||
lowNormalCurvature = fetchDiffusedCurvature(texcoord);
|
||||
midNormalCurvature.xyz = normalize((midNormalCurvature.xyz - 0.5f) * 2.0f);
|
||||
lowNormalCurvature.xyz = normalize((lowNormalCurvature.xyz - 0.5f) * 2.0f);
|
||||
midNormalCurvature.w = (midNormalCurvature.w * 2 - 1);
|
||||
lowNormalCurvature.w = (lowNormalCurvature.w * 2 - 1);
|
||||
}
|
||||
<@endfunc@>
|
||||
|
||||
<@endif@>
|
||||
|
|
|
@ -13,10 +13,12 @@
|
|||
|
||||
<@include DeferredBuffer.slh@>
|
||||
|
||||
|
||||
layout(location = 0) out vec4 _fragColor0;
|
||||
layout(location = 1) out vec4 _fragColor1;
|
||||
layout(location = 2) out vec4 _fragColor2;
|
||||
|
||||
layout(location = 3) out vec4 _fragColor3;
|
||||
|
||||
// the alpha threshold
|
||||
const float alphaThreshold = 0.5;
|
||||
|
@ -30,25 +32,30 @@ const float DEFAULT_METALLIC = 0;
|
|||
const vec3 DEFAULT_SPECULAR = vec3(0.1);
|
||||
const vec3 DEFAULT_EMISSIVE = vec3(0.0);
|
||||
const float DEFAULT_OCCLUSION = 1.0;
|
||||
const float DEFAULT_SCATTERING = 0.0;
|
||||
const vec3 DEFAULT_FRESNEL = DEFAULT_EMISSIVE;
|
||||
|
||||
void packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion) {
|
||||
void packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 emissive, float occlusion, float scattering) {
|
||||
if (alpha != 1.0) {
|
||||
discard;
|
||||
}
|
||||
_fragColor0 = vec4(albedo, packShadedMetallic(metallic));
|
||||
_fragColor0 = vec4(albedo, ((scattering > 0.0) ? packScatteringMetallic(metallic) : packShadedMetallic(metallic)));
|
||||
_fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));
|
||||
_fragColor2 = vec4(emissive, occlusion);
|
||||
_fragColor2 = vec4(((scattering > 0.0) ? vec3(scattering) : emissive), occlusion);
|
||||
|
||||
_fragColor3 = vec4(emissive, 1.0);
|
||||
}
|
||||
|
||||
|
||||
void packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 emissive) {
|
||||
void packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float roughness, float metallic, vec3 fresnel, vec3 lightmap) {
|
||||
if (alpha != 1.0) {
|
||||
discard;
|
||||
}
|
||||
|
||||
_fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));
|
||||
_fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));
|
||||
_fragColor2 = vec4(emissive, 1.0);
|
||||
_fragColor2 = vec4(lightmap, 1.0);
|
||||
|
||||
_fragColor3 = vec4(lightmap * albedo, 1.0);
|
||||
}
|
||||
|
||||
void packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {
|
||||
|
@ -57,7 +64,8 @@ void packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {
|
|||
}
|
||||
_fragColor0 = vec4(color, packUnlit());
|
||||
_fragColor1 = vec4(packNormal(normal), 1.0);
|
||||
//_fragColor2 = vec4(vec3(0.0), 1.0); // If unlit, do not worry about the emissive color target
|
||||
// _fragColor2 = vec4(vec3(0.0), 1.0);
|
||||
_fragColor3 = vec4(color, 1.0);
|
||||
}
|
||||
|
||||
void packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 albedo, vec3 fresnel, float roughness) {
|
||||
|
|
71
libraries/render-utils/src/DeferredFrameTransform.cpp
Normal file
71
libraries/render-utils/src/DeferredFrameTransform.cpp
Normal file
|
@ -0,0 +1,71 @@
|
|||
//
|
||||
// DeferredFrameTransform.cpp
|
||||
// libraries/render-utils/src/
|
||||
//
|
||||
// Created by Sam Gateau 6/3/2016.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "DeferredFrameTransform.h"
|
||||
|
||||
#include "gpu/Context.h"
|
||||
#include "render/Engine.h"
|
||||
|
||||
DeferredFrameTransform::DeferredFrameTransform() {
|
||||
FrameTransform frameTransform;
|
||||
_frameTransformBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(FrameTransform), (const gpu::Byte*) &frameTransform));
|
||||
}
|
||||
|
||||
void DeferredFrameTransform::update(RenderArgs* args) {
|
||||
|
||||
// Update the depth info with near and far (same for stereo)
|
||||
auto nearZ = args->getViewFrustum().getNearClip();
|
||||
auto farZ = args->getViewFrustum().getFarClip();
|
||||
|
||||
auto& frameTransformBuffer = _frameTransformBuffer.edit<FrameTransform>();
|
||||
frameTransformBuffer.depthInfo = glm::vec4(nearZ*farZ, farZ - nearZ, -farZ, 0.0f);
|
||||
|
||||
frameTransformBuffer.pixelInfo = args->_viewport;
|
||||
|
||||
//_parametersBuffer.edit<Parameters>()._ditheringInfo.y += 0.25f;
|
||||
|
||||
Transform cameraTransform;
|
||||
args->getViewFrustum().evalViewTransform(cameraTransform);
|
||||
cameraTransform.getMatrix(frameTransformBuffer.invView);
|
||||
cameraTransform.getInverseMatrix(frameTransformBuffer.view);
|
||||
|
||||
args->getViewFrustum().evalProjectionMatrix(frameTransformBuffer.projectionMono);
|
||||
|
||||
// Running in stero ?
|
||||
bool isStereo = args->_context->isStereo();
|
||||
if (!isStereo) {
|
||||
frameTransformBuffer.projection[0] = frameTransformBuffer.projectionMono;
|
||||
frameTransformBuffer.stereoInfo = glm::vec4(0.0f, (float)args->_viewport.z, 0.0f, 0.0f);
|
||||
frameTransformBuffer.invpixelInfo = glm::vec4(1.0f / args->_viewport.z, 1.0f / args->_viewport.w, 0.0f, 0.0f);
|
||||
} else {
|
||||
|
||||
mat4 projMats[2];
|
||||
mat4 eyeViews[2];
|
||||
args->_context->getStereoProjections(projMats);
|
||||
args->_context->getStereoViews(eyeViews);
|
||||
|
||||
for (int i = 0; i < 2; i++) {
|
||||
// Compose the mono Eye space to Stereo clip space Projection Matrix
|
||||
auto sideViewMat = projMats[i] * eyeViews[i];
|
||||
frameTransformBuffer.projection[i] = sideViewMat;
|
||||
}
|
||||
|
||||
frameTransformBuffer.stereoInfo = glm::vec4(1.0f, (float)(args->_viewport.z >> 1), 0.0f, 1.0f);
|
||||
frameTransformBuffer.invpixelInfo = glm::vec4(1.0f / (float)(args->_viewport.z >> 1), 1.0f / args->_viewport.w, 0.0f, 0.0f);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
void GenerateDeferredFrameTransform::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, DeferredFrameTransformPointer& frameTransform) {
|
||||
if (!frameTransform) {
|
||||
frameTransform = std::make_shared<DeferredFrameTransform>();
|
||||
}
|
||||
frameTransform->update(renderContext->args);
|
||||
}
|
78
libraries/render-utils/src/DeferredFrameTransform.h
Normal file
78
libraries/render-utils/src/DeferredFrameTransform.h
Normal file
|
@ -0,0 +1,78 @@
|
|||
//
|
||||
// DeferredFrameTransform.h
|
||||
// libraries/render-utils/src/
|
||||
//
|
||||
// Created by Sam Gateau 6/3/2016.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_DeferredFrameTransform_h
|
||||
#define hifi_DeferredFrameTransform_h
|
||||
|
||||
#include "gpu/Resource.h"
|
||||
#include "render/DrawTask.h"
|
||||
|
||||
class RenderArgs;
|
||||
|
||||
// DeferredFrameTransform is a helper class gathering in one place the needed camera transform
|
||||
// and frame resolution needed for all the deferred rendering passes taking advantage of the Deferred buffers
|
||||
class DeferredFrameTransform {
|
||||
public:
|
||||
using UniformBufferView = gpu::BufferView;
|
||||
|
||||
DeferredFrameTransform();
|
||||
|
||||
void update(RenderArgs* args);
|
||||
|
||||
UniformBufferView getFrameTransformBuffer() const { return _frameTransformBuffer; }
|
||||
|
||||
protected:
|
||||
|
||||
|
||||
// Class describing the uniform buffer with the transform info common to the AO shaders
|
||||
// It s changing every frame
|
||||
class FrameTransform {
|
||||
public:
|
||||
// Pixel info is { viewport width height}
|
||||
glm::vec4 pixelInfo;
|
||||
glm::vec4 invpixelInfo;
|
||||
// Depth info is { n.f, f - n, -f}
|
||||
glm::vec4 depthInfo;
|
||||
// Stereo info is { isStereoFrame, halfWidth }
|
||||
glm::vec4 stereoInfo{ 0.0 };
|
||||
// Mono proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space
|
||||
glm::mat4 projection[2];
|
||||
// THe mono projection for sure
|
||||
glm::mat4 projectionMono;
|
||||
// Inv View matrix from eye space (mono) to world space
|
||||
glm::mat4 invView;
|
||||
// View matrix from world space to eye space (mono)
|
||||
glm::mat4 view;
|
||||
|
||||
FrameTransform() {}
|
||||
};
|
||||
UniformBufferView _frameTransformBuffer;
|
||||
|
||||
|
||||
};
|
||||
|
||||
using DeferredFrameTransformPointer = std::shared_ptr<DeferredFrameTransform>;
|
||||
|
||||
|
||||
|
||||
|
||||
class GenerateDeferredFrameTransform {
|
||||
public:
|
||||
using JobModel = render::Job::ModelO<GenerateDeferredFrameTransform, DeferredFrameTransformPointer>;
|
||||
|
||||
GenerateDeferredFrameTransform() {}
|
||||
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, DeferredFrameTransformPointer& frameTransform);
|
||||
|
||||
private:
|
||||
};
|
||||
|
||||
#endif // hifi_DeferredFrameTransform_h
|
143
libraries/render-utils/src/DeferredFramebuffer.cpp
Normal file
143
libraries/render-utils/src/DeferredFramebuffer.cpp
Normal file
|
@ -0,0 +1,143 @@
|
|||
//
|
||||
// DeferredFramebuffer.h
|
||||
// libraries/render-utils/src/
|
||||
//
|
||||
// Created by Sam Gateau 7/11/2016.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "DeferredFramebuffer.h"
|
||||
|
||||
|
||||
DeferredFramebuffer::DeferredFramebuffer() {
|
||||
}
|
||||
|
||||
|
||||
void DeferredFramebuffer::updatePrimaryDepth(const gpu::TexturePointer& depthBuffer) {
|
||||
//If the depth buffer or size changed, we need to delete our FBOs
|
||||
bool reset = false;
|
||||
if ((_primaryDepthTexture != depthBuffer)) {
|
||||
_primaryDepthTexture = depthBuffer;
|
||||
reset = true;
|
||||
}
|
||||
if (_primaryDepthTexture) {
|
||||
auto newFrameSize = glm::ivec2(_primaryDepthTexture->getDimensions());
|
||||
if (_frameSize != newFrameSize) {
|
||||
_frameSize = newFrameSize;
|
||||
reset = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (reset) {
|
||||
_deferredFramebuffer.reset();
|
||||
_deferredFramebufferDepthColor.reset();
|
||||
_deferredColorTexture.reset();
|
||||
_deferredNormalTexture.reset();
|
||||
_deferredSpecularTexture.reset();
|
||||
_lightingTexture.reset();
|
||||
_lightingFramebuffer.reset();
|
||||
}
|
||||
}
|
||||
|
||||
void DeferredFramebuffer::allocate() {
|
||||
|
||||
_deferredFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_deferredFramebufferDepthColor = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
|
||||
auto colorFormat = gpu::Element::COLOR_SRGBA_32;
|
||||
auto linearFormat = gpu::Element::COLOR_RGBA_32;
|
||||
auto width = _frameSize.x;
|
||||
auto height = _frameSize.y;
|
||||
|
||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
||||
|
||||
_deferredColorTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width, height, defaultSampler));
|
||||
|
||||
_deferredNormalTexture = gpu::TexturePointer(gpu::Texture::create2D(linearFormat, width, height, defaultSampler));
|
||||
_deferredSpecularTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width, height, defaultSampler));
|
||||
|
||||
_deferredFramebuffer->setRenderBuffer(0, _deferredColorTexture);
|
||||
_deferredFramebuffer->setRenderBuffer(1, _deferredNormalTexture);
|
||||
_deferredFramebuffer->setRenderBuffer(2, _deferredSpecularTexture);
|
||||
|
||||
_deferredFramebufferDepthColor->setRenderBuffer(0, _deferredColorTexture);
|
||||
|
||||
auto depthFormat = gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::DEPTH_STENCIL); // Depth24_Stencil8 texel format
|
||||
if (!_primaryDepthTexture) {
|
||||
_primaryDepthTexture = gpu::TexturePointer(gpu::Texture::create2D(depthFormat, width, height, defaultSampler));
|
||||
}
|
||||
|
||||
_deferredFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||
|
||||
_deferredFramebufferDepthColor->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||
|
||||
|
||||
auto smoothSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR);
|
||||
|
||||
_lightingTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::R11G11B10), width, height, defaultSampler));
|
||||
_lightingFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_lightingFramebuffer->setRenderBuffer(0, _lightingTexture);
|
||||
_lightingFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||
|
||||
_deferredFramebuffer->setRenderBuffer(3, _lightingTexture);
|
||||
|
||||
}
|
||||
|
||||
|
||||
gpu::TexturePointer DeferredFramebuffer::getPrimaryDepthTexture() {
|
||||
if (!_primaryDepthTexture) {
|
||||
allocate();
|
||||
}
|
||||
return _primaryDepthTexture;
|
||||
}
|
||||
|
||||
gpu::FramebufferPointer DeferredFramebuffer::getDeferredFramebuffer() {
|
||||
if (!_deferredFramebuffer) {
|
||||
allocate();
|
||||
}
|
||||
return _deferredFramebuffer;
|
||||
}
|
||||
|
||||
gpu::FramebufferPointer DeferredFramebuffer::getDeferredFramebufferDepthColor() {
|
||||
if (!_deferredFramebufferDepthColor) {
|
||||
allocate();
|
||||
}
|
||||
return _deferredFramebufferDepthColor;
|
||||
}
|
||||
|
||||
gpu::TexturePointer DeferredFramebuffer::getDeferredColorTexture() {
|
||||
if (!_deferredColorTexture) {
|
||||
allocate();
|
||||
}
|
||||
return _deferredColorTexture;
|
||||
}
|
||||
|
||||
gpu::TexturePointer DeferredFramebuffer::getDeferredNormalTexture() {
|
||||
if (!_deferredNormalTexture) {
|
||||
allocate();
|
||||
}
|
||||
return _deferredNormalTexture;
|
||||
}
|
||||
|
||||
gpu::TexturePointer DeferredFramebuffer::getDeferredSpecularTexture() {
|
||||
if (!_deferredSpecularTexture) {
|
||||
allocate();
|
||||
}
|
||||
return _deferredSpecularTexture;
|
||||
}
|
||||
|
||||
gpu::FramebufferPointer DeferredFramebuffer::getLightingFramebuffer() {
|
||||
if (!_lightingFramebuffer) {
|
||||
allocate();
|
||||
}
|
||||
return _lightingFramebuffer;
|
||||
}
|
||||
|
||||
gpu::TexturePointer DeferredFramebuffer::getLightingTexture() {
|
||||
if (!_lightingTexture) {
|
||||
allocate();
|
||||
}
|
||||
return _lightingTexture;
|
||||
}
|
60
libraries/render-utils/src/DeferredFramebuffer.h
Normal file
60
libraries/render-utils/src/DeferredFramebuffer.h
Normal file
|
@ -0,0 +1,60 @@
|
|||
//
|
||||
// DeferredFramebuffer.h
|
||||
// libraries/render-utils/src/
|
||||
//
|
||||
// Created by Sam Gateau 7/11/2016.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_DeferredFramebuffer_h
|
||||
#define hifi_DeferredFramebuffer_h
|
||||
|
||||
#include "gpu/Resource.h"
|
||||
#include "gpu/Framebuffer.h"
|
||||
|
||||
class RenderArgs;
|
||||
|
||||
// DeferredFramebuffer is a helper class gathering in one place the GBuffer (Framebuffer) and lighting framebuffer
|
||||
class DeferredFramebuffer {
|
||||
public:
|
||||
DeferredFramebuffer();
|
||||
|
||||
gpu::FramebufferPointer getDeferredFramebuffer();
|
||||
gpu::FramebufferPointer getDeferredFramebufferDepthColor();
|
||||
|
||||
gpu::TexturePointer getDeferredColorTexture();
|
||||
gpu::TexturePointer getDeferredNormalTexture();
|
||||
gpu::TexturePointer getDeferredSpecularTexture();
|
||||
|
||||
gpu::FramebufferPointer getLightingFramebuffer();
|
||||
gpu::TexturePointer getLightingTexture();
|
||||
|
||||
// Update the depth buffer which will drive the allocation of all the other resources according to its size.
|
||||
void updatePrimaryDepth(const gpu::TexturePointer& depthBuffer);
|
||||
gpu::TexturePointer getPrimaryDepthTexture();
|
||||
const glm::ivec2& getFrameSize() const { return _frameSize; }
|
||||
|
||||
protected:
|
||||
void allocate();
|
||||
|
||||
gpu::TexturePointer _primaryDepthTexture;
|
||||
|
||||
gpu::FramebufferPointer _deferredFramebuffer;
|
||||
gpu::FramebufferPointer _deferredFramebufferDepthColor;
|
||||
|
||||
gpu::TexturePointer _deferredColorTexture;
|
||||
gpu::TexturePointer _deferredNormalTexture;
|
||||
gpu::TexturePointer _deferredSpecularTexture;
|
||||
|
||||
gpu::TexturePointer _lightingTexture;
|
||||
gpu::FramebufferPointer _lightingFramebuffer;
|
||||
|
||||
glm::ivec2 _frameSize;
|
||||
};
|
||||
|
||||
using DeferredFramebufferPointer = std::shared_ptr<DeferredFramebuffer>;
|
||||
|
||||
#endif // hifi_DeferredFramebuffer_h
|
|
@ -12,119 +12,123 @@
|
|||
<@def DEFERRED_GLOBAL_LIGHT_SLH@>
|
||||
|
||||
<@include model/Light.slh@>
|
||||
<@include DeferredLighting.slh@>
|
||||
|
||||
<@func declareSkyboxMap()@>
|
||||
// declareSkyboxMap
|
||||
uniform samplerCube skyboxMap;
|
||||
<@include LightingModel.slh@>
|
||||
|
||||
vec4 evalSkyboxLight(vec3 direction, float lod) {
|
||||
// textureQueryLevels is not available until #430, so we require explicit lod
|
||||
// float mipmapLevel = lod * textureQueryLevels(skyboxMap);
|
||||
return textureLod(skyboxMap, direction, lod);
|
||||
}
|
||||
<@endfunc@>
|
||||
<@include LightAmbient.slh@>
|
||||
<@include LightDirectional.slh@>
|
||||
|
||||
<@func declareEvalGlobalSpecularIrradiance(supportAmbientSphere, supportAmbientMap, supportIfAmbientMapElseAmbientSphere)@>
|
||||
|
||||
vec3 fresnelSchlickAmbient(vec3 fresnelColor, vec3 lightDir, vec3 halfDir, float gloss) {
|
||||
return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * pow(1.0 - clamp(dot(lightDir, halfDir), 0.0, 1.0), 5);
|
||||
}
|
||||
|
||||
<@if supportAmbientMap@>
|
||||
<$declareSkyboxMap()$>
|
||||
<@endif@>
|
||||
|
||||
vec3 evalGlobalSpecularIrradiance(Light light, vec3 fragEyeDir, vec3 fragNormal, float roughness, vec3 fresnel, float obscurance) {
|
||||
vec3 direction = -reflect(fragEyeDir, fragNormal);
|
||||
vec3 ambientFresnel = fresnelSchlickAmbient(fresnel, fragEyeDir, fragNormal, 1 - roughness);
|
||||
vec3 specularLight;
|
||||
<@if supportIfAmbientMapElseAmbientSphere@>
|
||||
if (getLightHasAmbientMap(light))
|
||||
<@endif@>
|
||||
<@if supportAmbientMap@>
|
||||
{
|
||||
float levels = getLightAmbientMapNumMips(light);
|
||||
float lod = min(floor((roughness) * levels), levels);
|
||||
specularLight = evalSkyboxLight(direction, lod).xyz;
|
||||
}
|
||||
<@endif@>
|
||||
<@if supportIfAmbientMapElseAmbientSphere@>
|
||||
else
|
||||
<@endif@>
|
||||
<@if supportAmbientSphere@>
|
||||
{
|
||||
specularLight = evalSphericalLight(getLightAmbientSphere(light), direction).xyz;
|
||||
}
|
||||
<@endif@>
|
||||
|
||||
return specularLight * ambientFresnel * getLightAmbientIntensity(light);
|
||||
}
|
||||
<@endfunc@>
|
||||
|
||||
<@func prepareGlobalLight()@>
|
||||
<@func prepareGlobalLight(isScattering)@>
|
||||
// prepareGlobalLight
|
||||
|
||||
// Transform directions to worldspace
|
||||
// vec3 fragNormal = vec3(invViewMat * vec4(normal, 0.0));
|
||||
vec3 fragNormal = vec3((normal));
|
||||
vec3 fragEyeVector = vec3(invViewMat * vec4(-position, 0.0));
|
||||
vec3 fragEyeDir = normalize(fragEyeVector);
|
||||
|
||||
// Get light
|
||||
Light light = getLight();
|
||||
vec3 fresnel = vec3(0.03); // Default Di-electric fresnel value
|
||||
if (metallic > 0.5) {
|
||||
fresnel = albedo;
|
||||
metallic = 1.0;
|
||||
}
|
||||
vec4 shading = evalFragShading(fragNormal, -getLightDirection(light), fragEyeDir, metallic, fresnel, roughness);
|
||||
vec3 color = vec3(albedo * shading.w + shading.rgb) * min(shadowAttenuation, obscurance) * getLightColor(light) * getLightIntensity(light);
|
||||
color += emissive;
|
||||
|
||||
vec3 color = vec3(0.0);
|
||||
|
||||
<@endfunc@>
|
||||
|
||||
|
||||
<@func declareEvalAmbientGlobalColor()@>
|
||||
vec3 evalAmbientGlobalColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal, vec3 albedo, float metallic, vec3 emissive, float roughness) {
|
||||
vec3 evalAmbientGlobalColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal, vec3 albedo, vec3 fresnel, float metallic, float roughness) {
|
||||
<$prepareGlobalLight()$>
|
||||
color += albedo * getLightColor(light) * obscurance * getLightAmbientIntensity(light);
|
||||
return color;
|
||||
}
|
||||
<@endfunc@>
|
||||
|
||||
<@func declareEvalAmbientSphereGlobalColor()@>
|
||||
<$declareEvalGlobalSpecularIrradiance(1, 0, 0)$>
|
||||
<@func declareEvalAmbientSphereGlobalColor(supportScattering)@>
|
||||
|
||||
<$declareLightingAmbient(1, _SCRIBE_NULL, _SCRIBE_NULL, $supportScattering$)$>
|
||||
<$declareLightingDirectional($supportScattering$)$>
|
||||
|
||||
<@if supportScattering@>
|
||||
<$declareDeferredCurvature()$>
|
||||
<@endif@>
|
||||
|
||||
vec3 evalAmbientSphereGlobalColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal,
|
||||
vec3 albedo, vec3 fresnel, float metallic, float roughness
|
||||
<@if supportScattering@>
|
||||
, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature
|
||||
<@endif@> ) {
|
||||
|
||||
<$prepareGlobalLight($supportScattering$)$>
|
||||
|
||||
// Ambient
|
||||
vec3 ambientDiffuse;
|
||||
vec3 ambientSpecular;
|
||||
evalLightingAmbient(ambientDiffuse, ambientSpecular, light, fragEyeDir, fragNormal, roughness, metallic, fresnel, albedo, obscurance
|
||||
<@if supportScattering@>
|
||||
,scattering, midNormalCurvature, lowNormalCurvature
|
||||
<@endif@> );
|
||||
color += ambientDiffuse;
|
||||
color += ambientSpecular;
|
||||
|
||||
|
||||
vec3 evalAmbientSphereGlobalColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal, vec3 albedo, float metallic, vec3 emissive, float roughness) {
|
||||
<$prepareGlobalLight()$>
|
||||
|
||||
// Diffuse from ambient
|
||||
color += (1 - metallic) * albedo * evalSphericalLight(getLightAmbientSphere(light), fragNormal).xyz * obscurance * getLightAmbientIntensity(light);
|
||||
|
||||
// Specular highlight from ambient
|
||||
vec3 specularLighting = evalGlobalSpecularIrradiance(light, fragEyeDir, fragNormal, roughness, fresnel, obscurance);
|
||||
color += specularLighting;
|
||||
// Directional
|
||||
vec3 directionalDiffuse;
|
||||
vec3 directionalSpecular;
|
||||
evalLightingDirectional(directionalDiffuse, directionalSpecular, light, fragEyeDir, fragNormal, roughness, metallic, fresnel, albedo, shadowAttenuation
|
||||
<@if supportScattering@>
|
||||
,scattering, midNormalCurvature, lowNormalCurvature
|
||||
<@endif@> );
|
||||
color += directionalDiffuse;
|
||||
color += directionalSpecular;
|
||||
|
||||
return color;
|
||||
}
|
||||
|
||||
<@endfunc@>
|
||||
|
||||
<@func declareEvalSkyboxGlobalColor()@>
|
||||
<$declareEvalGlobalSpecularIrradiance(0, 1, 0)$>
|
||||
|
||||
vec3 evalSkyboxGlobalColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal, vec3 albedo, float metallic, vec3 emissive, float roughness) {
|
||||
<$prepareGlobalLight()$>
|
||||
<@func declareEvalSkyboxGlobalColor(supportScattering)@>
|
||||
|
||||
// Diffuse from ambient
|
||||
color += (1 - metallic) * albedo * evalSphericalLight(getLightAmbientSphere(light), fragNormal).xyz * obscurance * getLightAmbientIntensity(light);
|
||||
<$declareLightingAmbient(_SCRIBE_NULL, 1, _SCRIBE_NULL, $supportScattering$)$>
|
||||
<$declareLightingDirectional($supportScattering$)$>
|
||||
|
||||
// Specular highlight from ambient
|
||||
vec3 specularLighting = evalGlobalSpecularIrradiance(light, fragEyeDir, fragNormal, roughness, fresnel, obscurance);
|
||||
color += specularLighting;
|
||||
<@if supportScattering@>
|
||||
<$declareDeferredCurvature()$>
|
||||
<@endif@>
|
||||
|
||||
vec3 evalSkyboxGlobalColor(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal,
|
||||
vec3 albedo, vec3 fresnel, float metallic, float roughness
|
||||
<@if supportScattering@>
|
||||
, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature
|
||||
<@endif@>
|
||||
) {
|
||||
<$prepareGlobalLight($supportScattering$)$>
|
||||
|
||||
// Ambient
|
||||
vec3 ambientDiffuse;
|
||||
vec3 ambientSpecular;
|
||||
evalLightingAmbient(ambientDiffuse, ambientSpecular, light, fragEyeDir, fragNormal, roughness, metallic, fresnel, albedo, obscurance
|
||||
<@if supportScattering@>
|
||||
,scattering, midNormalCurvature, lowNormalCurvature
|
||||
<@endif@>
|
||||
);
|
||||
color += ambientDiffuse;
|
||||
color += ambientSpecular;
|
||||
|
||||
|
||||
// Directional
|
||||
vec3 directionalDiffuse;
|
||||
vec3 directionalSpecular;
|
||||
evalLightingDirectional(directionalDiffuse, directionalSpecular, light, fragEyeDir, fragNormal, roughness, metallic, fresnel, albedo, shadowAttenuation
|
||||
<@if supportScattering@>
|
||||
,scattering, midNormalCurvature, lowNormalCurvature
|
||||
<@endif@>
|
||||
);
|
||||
color += directionalDiffuse;
|
||||
color += directionalSpecular;
|
||||
|
||||
return color;
|
||||
}
|
||||
|
||||
<@endfunc@>
|
||||
|
||||
<@func declareEvalLightmappedColor()@>
|
||||
|
@ -147,25 +151,37 @@ vec3 evalLightmappedColor(mat4 invViewMat, float shadowAttenuation, float obscur
|
|||
// Ambient light is the lightmap when in shadow
|
||||
vec3 ambientLight = (1 - lightAttenuation) * lightmap * getLightAmbientIntensity(light);
|
||||
|
||||
return obscurance * albedo * (diffuseLight + ambientLight);
|
||||
return isLightmapEnabled() * obscurance * albedo * (diffuseLight + ambientLight);
|
||||
}
|
||||
<@endfunc@>
|
||||
|
||||
|
||||
|
||||
|
||||
<@func declareEvalGlobalLightingAlphaBlended()@>
|
||||
|
||||
<$declareEvalGlobalSpecularIrradiance(1, 1, 1)$>
|
||||
<$declareLightingAmbient(1, 1, 1)$>
|
||||
<$declareLightingDirectional()$>
|
||||
|
||||
vec3 evalGlobalLightingAlphaBlended(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal, vec3 albedo, float metallic, vec3 emissive, float roughness, float opacity) {
|
||||
vec3 evalGlobalLightingAlphaBlended(mat4 invViewMat, float shadowAttenuation, float obscurance, vec3 position, vec3 normal, vec3 albedo, vec3 fresnel, float metallic, vec3 emissive, float roughness, float opacity) {
|
||||
<$prepareGlobalLight()$>
|
||||
|
||||
color += emissive * isEmissiveEnabled();
|
||||
|
||||
// Diffuse from ambient
|
||||
color += (1 - metallic) * albedo * evalSphericalLight(getLightAmbientSphere(light), fragNormal).xyz * obscurance * getLightAmbientIntensity(light);
|
||||
// Ambient
|
||||
vec3 ambientDiffuse;
|
||||
vec3 ambientSpecular;
|
||||
evalLightingAmbient(ambientDiffuse, ambientSpecular, light, fragEyeDir, fragNormal, roughness, metallic, fresnel, albedo, obscurance);
|
||||
color += ambientDiffuse;
|
||||
color += ambientSpecular / opacity;
|
||||
|
||||
// Specular highlight from ambient
|
||||
vec3 specularLighting = evalGlobalSpecularIrradiance(light, fragEyeDir, fragNormal, roughness, fresnel, obscurance);
|
||||
color += specularLighting / opacity;
|
||||
|
||||
// Directional
|
||||
vec3 directionalDiffuse;
|
||||
vec3 directionalSpecular;
|
||||
evalLightingDirectional(directionalDiffuse, directionalSpecular, light, fragEyeDir, fragNormal, roughness, metallic, fresnel, albedo, shadowAttenuation);
|
||||
color += directionalDiffuse;
|
||||
color += directionalSpecular / opacity;
|
||||
|
||||
return color;
|
||||
}
|
||||
|
|
|
@ -7,74 +7,4 @@
|
|||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
!>
|
||||
<@if not DEFERRED_LIGHTING_SLH@>
|
||||
<@def DEFERRED_LIGHTING_SLH@>
|
||||
|
||||
|
||||
<@func declareEvalPBRShading()@>
|
||||
|
||||
vec3 fresnelSchlick(vec3 fresnelColor, vec3 lightDir, vec3 halfDir) {
|
||||
return fresnelColor + (1.0 - fresnelColor) * pow(1.0 - clamp(dot(lightDir, halfDir), 0.0, 1.0), 5);
|
||||
}
|
||||
|
||||
float specularDistribution(float roughness, vec3 normal, vec3 halfDir) {
|
||||
float ndoth = clamp(dot(halfDir, normal), 0.0, 1.0);
|
||||
float gloss2 = pow(0.001 + roughness, 4);
|
||||
float denom = (ndoth * ndoth*(gloss2 - 1) + 1);
|
||||
float power = gloss2 / (3.14159 * denom * denom);
|
||||
return power;
|
||||
}
|
||||
<! //NOTE: ANother implementation for specularDistribution
|
||||
float specularDistribution(float roughness, vec3 normal, vec3 halfDir) {
|
||||
float gloss = exp2(10 * (1.0 - roughness) + 1);
|
||||
float power = pow(clamp(dot(halfDir, normal), 0.0, 1.0), gloss);
|
||||
power *= (gloss * 0.125 + 0.25);
|
||||
return power;
|
||||
}
|
||||
!>
|
||||
// Frag Shading returns the diffuse amount as W and the specular rgb as xyz
|
||||
vec4 evalPBRShading(vec3 fragNormal, vec3 fragLightDir, vec3 fragEyeDir, float metallic, vec3 fresnel, float roughness) {
|
||||
// Diffuse Lighting
|
||||
float diffuse = clamp(dot(fragNormal, fragLightDir), 0.0, 1.0);
|
||||
|
||||
// Specular Lighting
|
||||
vec3 halfDir = normalize(fragEyeDir + fragLightDir);
|
||||
vec3 fresnelColor = fresnelSchlick(fresnel, fragLightDir,halfDir);
|
||||
float power = specularDistribution(roughness, fragNormal, halfDir);
|
||||
vec3 specular = power * fresnelColor * diffuse;
|
||||
|
||||
return vec4(specular, (1.0 - metallic) * diffuse * (1 - fresnelColor.x));
|
||||
}
|
||||
<@endfunc@>
|
||||
|
||||
<@func declareEvalBlinnRShading()@>
|
||||
|
||||
vec4 evalBlinnShading(vec3 fragNormal, vec3 fragLightDir, vec3 fragEyeDir, vec3 specular, float roughness) {
|
||||
// Diffuse Lighting
|
||||
float diffuseDot = dot(fragNormal, fragLightDir);
|
||||
float facingLight = step(0.0, diffuseDot);
|
||||
float diffuse = diffuseDot * facingLight;
|
||||
|
||||
// Specular Lighting depends on the half vector and the roughness
|
||||
vec3 halfDir = normalize(fragEyeDir + fragLightDir);
|
||||
|
||||
float gloss = (1.0 - roughness) * 128.0;
|
||||
glos *= gloss;
|
||||
float specularPower = pow(facingLight * max(0.0, dot(halfDir, fragNormal)), gloss);
|
||||
vec3 reflect = specularPower * specular * diffuse;
|
||||
|
||||
return vec4(reflect, diffuse);
|
||||
}
|
||||
|
||||
<@endfunc@>
|
||||
|
||||
|
||||
<$declareEvalPBRShading()$>
|
||||
|
||||
// Return xyz the specular/reflection component and w the diffuse component
|
||||
vec4 evalFragShading(vec3 fragNormal, vec3 fragLightDir, vec3 fragEyeDir, float metallic, vec3 specular, float roughness) {
|
||||
return evalPBRShading(fragNormal, fragLightDir, fragEyeDir, metallic, specular, roughness);
|
||||
}
|
||||
|
||||
<@endif@>
|
||||
!>
|
|
@ -38,17 +38,21 @@
|
|||
#include "point_light_frag.h"
|
||||
#include "spot_light_frag.h"
|
||||
|
||||
using namespace render;
|
||||
|
||||
struct LightLocations {
|
||||
int radius;
|
||||
int ambientSphere;
|
||||
int lightBufferUnit;
|
||||
int texcoordMat;
|
||||
int coneParam;
|
||||
int deferredTransformBuffer;
|
||||
int shadowTransformBuffer;
|
||||
int radius{ -1 };
|
||||
int ambientSphere{ -1 };
|
||||
int lightBufferUnit{ -1 };
|
||||
int texcoordFrameTransform{ -1 };
|
||||
int sphereParam{ -1 };
|
||||
int coneParam{ -1 };
|
||||
int deferredFrameTransformBuffer{ -1 };
|
||||
int subsurfaceScatteringParametersBuffer{ -1 };
|
||||
int shadowTransformBuffer{ -1 };
|
||||
};
|
||||
|
||||
enum {
|
||||
enum DeferredShader_MapSlot {
|
||||
DEFERRED_BUFFER_COLOR_UNIT = 0,
|
||||
DEFERRED_BUFFER_NORMAL_UNIT = 1,
|
||||
DEFERRED_BUFFER_EMISSIVE_UNIT = 2,
|
||||
|
@ -56,7 +60,18 @@ enum {
|
|||
DEFERRED_BUFFER_OBSCURANCE_UNIT = 4,
|
||||
SHADOW_MAP_UNIT = 5,
|
||||
SKYBOX_MAP_UNIT = 6,
|
||||
DEFERRED_BUFFER_CURVATURE_UNIT,
|
||||
DEFERRED_BUFFER_DIFFUSED_CURVATURE_UNIT,
|
||||
SCATTERING_LUT_UNIT,
|
||||
SCATTERING_SPECULAR_UNIT,
|
||||
};
|
||||
enum DeferredShader_BufferSlot {
|
||||
DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT = 0,
|
||||
SCATTERING_PARAMETERS_BUFFER_SLOT,
|
||||
LIGHTING_MODEL_BUFFER_SLOT = render::ShapePipeline::Slot::LIGHTING_MODEL,
|
||||
LIGHT_GPU_SLOT = render::ShapePipeline::Slot::LIGHT,
|
||||
};
|
||||
|
||||
static void loadLightProgram(const char* vertSource, const char* fragSource, bool lightVolume, gpu::PipelinePointer& program, LightLocationsPtr& locations);
|
||||
|
||||
void DeferredLightingEffect::init() {
|
||||
|
@ -132,368 +147,6 @@ void DeferredLightingEffect::addSpotLight(const glm::vec3& position, float radiu
|
|||
}
|
||||
}
|
||||
|
||||
void DeferredLightingEffect::prepare(RenderArgs* args) {
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
batch.setStateScissorRect(args->_viewport);
|
||||
|
||||
// Clear Lighting buffer
|
||||
auto lightingFbo = DependencyManager::get<FramebufferCache>()->getLightingFramebuffer();
|
||||
|
||||
batch.setFramebuffer(lightingFbo);
|
||||
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, vec4(vec3(0), 0), true);
|
||||
|
||||
// Clear deferred
|
||||
auto deferredFbo = DependencyManager::get<FramebufferCache>()->getDeferredFramebuffer();
|
||||
|
||||
batch.setFramebuffer(deferredFbo);
|
||||
|
||||
// Clear Color, Depth and Stencil for deferred buffer
|
||||
batch.clearFramebuffer(
|
||||
gpu::Framebuffer::BUFFER_COLOR0 | gpu::Framebuffer::BUFFER_COLOR1 | gpu::Framebuffer::BUFFER_COLOR2 |
|
||||
gpu::Framebuffer::BUFFER_DEPTH |
|
||||
gpu::Framebuffer::BUFFER_STENCIL,
|
||||
vec4(vec3(0), 0), 1.0, 0.0, true);
|
||||
});
|
||||
}
|
||||
|
||||
void DeferredLightingEffect::render(const render::RenderContextPointer& renderContext) {
|
||||
auto args = renderContext->args;
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
|
||||
// Allocate the parameters buffer used by all the deferred shaders
|
||||
if (!_deferredTransformBuffer[0]._buffer) {
|
||||
DeferredTransform parameters;
|
||||
_deferredTransformBuffer[0] = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(DeferredTransform), (const gpu::Byte*) ¶meters));
|
||||
_deferredTransformBuffer[1] = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(DeferredTransform), (const gpu::Byte*) ¶meters));
|
||||
}
|
||||
|
||||
// Framebuffer copy operations cannot function as multipass stereo operations.
|
||||
batch.enableStereo(false);
|
||||
|
||||
// perform deferred lighting, rendering to free fbo
|
||||
auto framebufferCache = DependencyManager::get<FramebufferCache>();
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
|
||||
QSize framebufferSize = framebufferCache->getFrameBufferSize();
|
||||
|
||||
// binding the first framebuffer
|
||||
auto lightingFBO = framebufferCache->getLightingFramebuffer();
|
||||
batch.setFramebuffer(lightingFBO);
|
||||
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
batch.setStateScissorRect(args->_viewport);
|
||||
|
||||
// Bind the G-Buffer surfaces
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_COLOR_UNIT, framebufferCache->getDeferredColorTexture());
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_NORMAL_UNIT, framebufferCache->getDeferredNormalTexture());
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_EMISSIVE_UNIT, framebufferCache->getDeferredSpecularTexture());
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_DEPTH_UNIT, framebufferCache->getPrimaryDepthTexture());
|
||||
|
||||
// FIXME: Different render modes should have different tasks
|
||||
if (args->_renderMode == RenderArgs::DEFAULT_RENDER_MODE && _ambientOcclusionEnabled) {
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_OBSCURANCE_UNIT, framebufferCache->getOcclusionTexture());
|
||||
} else {
|
||||
// need to assign the white texture if ao is off
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_OBSCURANCE_UNIT, textureCache->getWhiteTexture());
|
||||
}
|
||||
|
||||
assert(_lightStage.lights.size() > 0);
|
||||
const auto& globalShadow = _lightStage.lights[0]->shadow;
|
||||
|
||||
// Bind the shadow buffer
|
||||
batch.setResourceTexture(SHADOW_MAP_UNIT, globalShadow.map);
|
||||
|
||||
// THe main viewport is assumed to be the mono viewport (or the 2 stereo faces side by side within that viewport)
|
||||
auto monoViewport = args->_viewport;
|
||||
float sMin = args->_viewport.x / (float)framebufferSize.width();
|
||||
float sWidth = args->_viewport.z / (float)framebufferSize.width();
|
||||
float tMin = args->_viewport.y / (float)framebufferSize.height();
|
||||
float tHeight = args->_viewport.w / (float)framebufferSize.height();
|
||||
|
||||
// The view frustum is the mono frustum base
|
||||
auto viewFrustum = args->getViewFrustum();
|
||||
|
||||
// Eval the mono projection
|
||||
mat4 monoProjMat;
|
||||
viewFrustum.evalProjectionMatrix(monoProjMat);
|
||||
|
||||
// The mono view transform
|
||||
Transform monoViewTransform;
|
||||
viewFrustum.evalViewTransform(monoViewTransform);
|
||||
|
||||
// THe mono view matrix coming from the mono view transform
|
||||
glm::mat4 monoViewMat;
|
||||
monoViewTransform.getMatrix(monoViewMat);
|
||||
|
||||
// Running in stero ?
|
||||
bool isStereo = args->_context->isStereo();
|
||||
int numPasses = 1;
|
||||
|
||||
mat4 projMats[2];
|
||||
Transform viewTransforms[2];
|
||||
ivec4 viewports[2];
|
||||
vec4 clipQuad[2];
|
||||
vec2 screenBottomLeftCorners[2];
|
||||
vec2 screenTopRightCorners[2];
|
||||
vec4 fetchTexcoordRects[2];
|
||||
|
||||
DeferredTransform deferredTransforms[2];
|
||||
auto geometryCache = DependencyManager::get<GeometryCache>();
|
||||
|
||||
if (isStereo) {
|
||||
numPasses = 2;
|
||||
|
||||
mat4 eyeViews[2];
|
||||
args->_context->getStereoProjections(projMats);
|
||||
args->_context->getStereoViews(eyeViews);
|
||||
|
||||
float halfWidth = 0.5f * sWidth;
|
||||
|
||||
for (int i = 0; i < numPasses; i++) {
|
||||
// In stereo, the 2 sides are layout side by side in the mono viewport and their width is half
|
||||
int sideWidth = monoViewport.z >> 1;
|
||||
viewports[i] = ivec4(monoViewport.x + (i * sideWidth), monoViewport.y, sideWidth, monoViewport.w);
|
||||
|
||||
deferredTransforms[i].projection = projMats[i];
|
||||
|
||||
auto sideViewMat = monoViewMat * glm::inverse(eyeViews[i]);
|
||||
// viewTransforms[i].evalFromRawMatrix(sideViewMat);
|
||||
viewTransforms[i] = monoViewTransform;
|
||||
viewTransforms[i].postTranslate(-glm::vec3((eyeViews[i][3])));// evalFromRawMatrix(sideViewMat);
|
||||
deferredTransforms[i].viewInverse = sideViewMat;
|
||||
|
||||
deferredTransforms[i].stereoSide = (i == 0 ? -1.0f : 1.0f);
|
||||
|
||||
clipQuad[i] = glm::vec4(sMin + i * halfWidth, tMin, halfWidth, tHeight);
|
||||
screenBottomLeftCorners[i] = glm::vec2(-1.0f + i * 1.0f, -1.0f);
|
||||
screenTopRightCorners[i] = glm::vec2(i * 1.0f, 1.0f);
|
||||
|
||||
fetchTexcoordRects[i] = glm::vec4(sMin + i * halfWidth, tMin, halfWidth, tHeight);
|
||||
}
|
||||
} else {
|
||||
|
||||
viewports[0] = monoViewport;
|
||||
projMats[0] = monoProjMat;
|
||||
|
||||
deferredTransforms[0].projection = monoProjMat;
|
||||
|
||||
deferredTransforms[0].viewInverse = monoViewMat;
|
||||
viewTransforms[0] = monoViewTransform;
|
||||
|
||||
deferredTransforms[0].stereoSide = 0.0f;
|
||||
|
||||
clipQuad[0] = glm::vec4(sMin, tMin, sWidth, tHeight);
|
||||
screenBottomLeftCorners[0] = glm::vec2(-1.0f, -1.0f);
|
||||
screenTopRightCorners[0] = glm::vec2(1.0f, 1.0f);
|
||||
|
||||
fetchTexcoordRects[0] = glm::vec4(sMin, tMin, sWidth, tHeight);
|
||||
}
|
||||
|
||||
auto eyePoint = viewFrustum.getPosition();
|
||||
float nearRadius = glm::distance(eyePoint, viewFrustum.getNearTopLeft());
|
||||
|
||||
|
||||
for (int side = 0; side < numPasses; side++) {
|
||||
// Render in this side's viewport
|
||||
batch.setViewportTransform(viewports[side]);
|
||||
batch.setStateScissorRect(viewports[side]);
|
||||
|
||||
// Sync and Bind the correct DeferredTransform ubo
|
||||
_deferredTransformBuffer[side]._buffer->setSubData(0, sizeof(DeferredTransform), (const gpu::Byte*) &deferredTransforms[side]);
|
||||
batch.setUniformBuffer(_directionalLightLocations->deferredTransformBuffer, _deferredTransformBuffer[side]);
|
||||
|
||||
glm::vec2 topLeft(-1.0f, -1.0f);
|
||||
glm::vec2 bottomRight(1.0f, 1.0f);
|
||||
glm::vec2 texCoordTopLeft(clipQuad[side].x, clipQuad[side].y);
|
||||
glm::vec2 texCoordBottomRight(clipQuad[side].x + clipQuad[side].z, clipQuad[side].y + clipQuad[side].w);
|
||||
|
||||
// First Global directional light and ambient pass
|
||||
{
|
||||
auto& program = _shadowMapEnabled ? _directionalLightShadow : _directionalLight;
|
||||
LightLocationsPtr locations = _shadowMapEnabled ? _directionalLightShadowLocations : _directionalLightLocations;
|
||||
const auto& keyLight = _allocatedLights[_globalLights.front()];
|
||||
|
||||
// Setup the global directional pass pipeline
|
||||
{
|
||||
if (_shadowMapEnabled) {
|
||||
if (keyLight->getAmbientMap()) {
|
||||
program = _directionalSkyboxLightShadow;
|
||||
locations = _directionalSkyboxLightShadowLocations;
|
||||
} else {
|
||||
program = _directionalAmbientSphereLightShadow;
|
||||
locations = _directionalAmbientSphereLightShadowLocations;
|
||||
}
|
||||
} else {
|
||||
if (keyLight->getAmbientMap()) {
|
||||
program = _directionalSkyboxLight;
|
||||
locations = _directionalSkyboxLightLocations;
|
||||
} else {
|
||||
program = _directionalAmbientSphereLight;
|
||||
locations = _directionalAmbientSphereLightLocations;
|
||||
}
|
||||
}
|
||||
|
||||
if (locations->shadowTransformBuffer >= 0) {
|
||||
batch.setUniformBuffer(locations->shadowTransformBuffer, globalShadow.getBuffer());
|
||||
}
|
||||
batch.setPipeline(program);
|
||||
}
|
||||
|
||||
{ // Setup the global lighting
|
||||
setupKeyLightBatch(batch, locations->lightBufferUnit, SKYBOX_MAP_UNIT);
|
||||
}
|
||||
|
||||
{
|
||||
batch.setModelTransform(Transform());
|
||||
batch.setProjectionTransform(glm::mat4());
|
||||
batch.setViewTransform(Transform());
|
||||
|
||||
glm::vec4 color(1.0f, 1.0f, 1.0f, 1.0f);
|
||||
geometryCache->renderQuad(batch, topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight, color);
|
||||
}
|
||||
|
||||
if (keyLight->getAmbientMap()) {
|
||||
batch.setResourceTexture(SKYBOX_MAP_UNIT, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
auto texcoordMat = glm::mat4();
|
||||
/* texcoordMat[0] = glm::vec4(sWidth / 2.0f, 0.0f, 0.0f, sMin + sWidth / 2.0f);
|
||||
texcoordMat[1] = glm::vec4(0.0f, tHeight / 2.0f, 0.0f, tMin + tHeight / 2.0f);
|
||||
*/ texcoordMat[0] = glm::vec4(fetchTexcoordRects[side].z / 2.0f, 0.0f, 0.0f, fetchTexcoordRects[side].x + fetchTexcoordRects[side].z / 2.0f);
|
||||
texcoordMat[1] = glm::vec4(0.0f, fetchTexcoordRects[side].w / 2.0f, 0.0f, fetchTexcoordRects[side].y + fetchTexcoordRects[side].w / 2.0f);
|
||||
texcoordMat[2] = glm::vec4(0.0f, 0.0f, 1.0f, 0.0f);
|
||||
texcoordMat[3] = glm::vec4(0.0f, 0.0f, 0.0f, 1.0f);
|
||||
|
||||
// enlarge the scales slightly to account for tesselation
|
||||
const float SCALE_EXPANSION = 0.05f;
|
||||
|
||||
|
||||
batch.setProjectionTransform(projMats[side]);
|
||||
batch.setViewTransform(viewTransforms[side]);
|
||||
|
||||
// Splat Point lights
|
||||
if (!_pointLights.empty()) {
|
||||
batch.setPipeline(_pointLight);
|
||||
|
||||
batch._glUniformMatrix4fv(_pointLightLocations->texcoordMat, 1, false, reinterpret_cast< const float* >(&texcoordMat));
|
||||
|
||||
for (auto lightID : _pointLights) {
|
||||
auto& light = _allocatedLights[lightID];
|
||||
// IN DEBUG: light->setShowContour(true);
|
||||
batch.setUniformBuffer(_pointLightLocations->lightBufferUnit, light->getSchemaBuffer());
|
||||
|
||||
float expandedRadius = light->getMaximumRadius() * (1.0f + SCALE_EXPANSION);
|
||||
// TODO: We shouldn;t have to do that test and use a different volume geometry for when inside the vlight volume,
|
||||
// we should be able to draw thre same geometry use DepthClamp but for unknown reason it's s not working...
|
||||
if (glm::distance(eyePoint, glm::vec3(light->getPosition())) < expandedRadius + nearRadius) {
|
||||
Transform model;
|
||||
model.setTranslation(glm::vec3(0.0f, 0.0f, -1.0f));
|
||||
batch.setModelTransform(model);
|
||||
batch.setViewTransform(Transform());
|
||||
batch.setProjectionTransform(glm::mat4());
|
||||
|
||||
glm::vec4 color(1.0f, 1.0f, 1.0f, 1.0f);
|
||||
DependencyManager::get<GeometryCache>()->renderQuad(batch, topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight, color);
|
||||
|
||||
batch.setProjectionTransform(projMats[side]);
|
||||
batch.setViewTransform(viewTransforms[side]);
|
||||
} else {
|
||||
Transform model;
|
||||
model.setTranslation(glm::vec3(light->getPosition().x, light->getPosition().y, light->getPosition().z));
|
||||
batch.setModelTransform(model.postScale(expandedRadius));
|
||||
batch._glColor4f(1.0f, 1.0f, 1.0f, 1.0f);
|
||||
geometryCache->renderSphere(batch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Splat spot lights
|
||||
if (!_spotLights.empty()) {
|
||||
batch.setPipeline(_spotLight);
|
||||
|
||||
batch._glUniformMatrix4fv(_spotLightLocations->texcoordMat, 1, false, reinterpret_cast< const float* >(&texcoordMat));
|
||||
|
||||
for (auto lightID : _spotLights) {
|
||||
auto light = _allocatedLights[lightID];
|
||||
// IN DEBUG: light->setShowContour(true);
|
||||
batch.setUniformBuffer(_spotLightLocations->lightBufferUnit, light->getSchemaBuffer());
|
||||
|
||||
auto eyeLightPos = eyePoint - light->getPosition();
|
||||
auto eyeHalfPlaneDistance = glm::dot(eyeLightPos, light->getDirection());
|
||||
|
||||
const float TANGENT_LENGTH_SCALE = 0.666f;
|
||||
glm::vec4 coneParam(light->getSpotAngleCosSin(), TANGENT_LENGTH_SCALE * tanf(0.5f * light->getSpotAngle()), 1.0f);
|
||||
|
||||
float expandedRadius = light->getMaximumRadius() * (1.0f + SCALE_EXPANSION);
|
||||
// TODO: We shouldn;t have to do that test and use a different volume geometry for when inside the vlight volume,
|
||||
// we should be able to draw thre same geometry use DepthClamp but for unknown reason it's s not working...
|
||||
const float OVER_CONSERVATIVE_SCALE = 1.1f;
|
||||
if ((eyeHalfPlaneDistance > -nearRadius) &&
|
||||
(glm::distance(eyePoint, glm::vec3(light->getPosition())) < (expandedRadius * OVER_CONSERVATIVE_SCALE) + nearRadius)) {
|
||||
coneParam.w = 0.0f;
|
||||
batch._glUniform4fv(_spotLightLocations->coneParam, 1, reinterpret_cast< const float* >(&coneParam));
|
||||
|
||||
Transform model;
|
||||
model.setTranslation(glm::vec3(0.0f, 0.0f, -1.0f));
|
||||
batch.setModelTransform(model);
|
||||
batch.setViewTransform(Transform());
|
||||
batch.setProjectionTransform(glm::mat4());
|
||||
|
||||
glm::vec4 color(1.0f, 1.0f, 1.0f, 1.0f);
|
||||
DependencyManager::get<GeometryCache>()->renderQuad(batch, topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight, color);
|
||||
|
||||
batch.setProjectionTransform( projMats[side]);
|
||||
batch.setViewTransform(viewTransforms[side]);
|
||||
} else {
|
||||
light->setShowContour(false);
|
||||
coneParam.w = 1.0f;
|
||||
batch._glUniform4fv(_spotLightLocations->coneParam, 1, reinterpret_cast< const float* >(&coneParam));
|
||||
|
||||
Transform model;
|
||||
model.setTranslation(light->getPosition());
|
||||
model.postRotate(light->getOrientation());
|
||||
model.postScale(glm::vec3(expandedRadius, expandedRadius, expandedRadius));
|
||||
|
||||
batch.setModelTransform(model);
|
||||
auto mesh = getSpotLightMesh();
|
||||
|
||||
batch.setIndexBuffer(mesh->getIndexBuffer());
|
||||
batch.setInputBuffer(0, mesh->getVertexBuffer());
|
||||
batch.setInputFormat(mesh->getVertexFormat());
|
||||
|
||||
{
|
||||
auto& part = mesh->getPartBuffer().get<model::Mesh::Part>(0);
|
||||
batch.drawIndexed(model::Mesh::topologyToPrimitive(part._topology), part._numIndices, part._startIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Probably not necessary in the long run because the gpu layer would unbound this texture if used as render target
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_COLOR_UNIT, nullptr);
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_NORMAL_UNIT, nullptr);
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_EMISSIVE_UNIT, nullptr);
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_DEPTH_UNIT, nullptr);
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_OBSCURANCE_UNIT, nullptr);
|
||||
batch.setResourceTexture(SHADOW_MAP_UNIT, nullptr);
|
||||
batch.setResourceTexture(SKYBOX_MAP_UNIT, nullptr);
|
||||
|
||||
batch.setUniformBuffer(_directionalLightLocations->deferredTransformBuffer, nullptr);
|
||||
});
|
||||
|
||||
// End of the Lighting pass
|
||||
if (!_pointLights.empty()) {
|
||||
_pointLights.clear();
|
||||
}
|
||||
if (!_spotLights.empty()) {
|
||||
_spotLights.clear();
|
||||
}
|
||||
}
|
||||
|
||||
void DeferredLightingEffect::setupKeyLightBatch(gpu::Batch& batch, int lightBufferUnit, int skyboxCubemapUnit) {
|
||||
PerformanceTimer perfTimer("DLE->setupBatch()");
|
||||
auto keyLight = _allocatedLights[_globalLights.front()];
|
||||
|
@ -522,21 +175,29 @@ static void loadLightProgram(const char* vertSource, const char* fragSource, boo
|
|||
slotBindings.insert(gpu::Shader::Binding(std::string("shadowMap"), SHADOW_MAP_UNIT));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("skyboxMap"), SKYBOX_MAP_UNIT));
|
||||
|
||||
static const int LIGHT_GPU_SLOT = 3;
|
||||
static const int DEFERRED_TRANSFORM_BUFFER_SLOT = 2;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("curvatureMap"), DEFERRED_BUFFER_CURVATURE_UNIT));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("diffusedCurvatureMap"), DEFERRED_BUFFER_DIFFUSED_CURVATURE_UNIT));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("scatteringLUT"), SCATTERING_LUT_UNIT));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("scatteringSpecularBeckmann"), SCATTERING_SPECULAR_UNIT));
|
||||
|
||||
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("deferredFrameTransformBuffer"), DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("lightingModelBuffer"), LIGHTING_MODEL_BUFFER_SLOT));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("subsurfaceScatteringParametersBuffer"), SCATTERING_PARAMETERS_BUFFER_SLOT));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("lightBuffer"), LIGHT_GPU_SLOT));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("deferredTransformBuffer"), DEFERRED_TRANSFORM_BUFFER_SLOT));
|
||||
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
locations->radius = program->getUniforms().findLocation("radius");
|
||||
locations->ambientSphere = program->getUniforms().findLocation("ambientSphere.L00");
|
||||
|
||||
locations->texcoordMat = program->getUniforms().findLocation("texcoordMat");
|
||||
locations->texcoordFrameTransform = program->getUniforms().findLocation("texcoordFrameTransform");
|
||||
locations->sphereParam = program->getUniforms().findLocation("sphereParam");
|
||||
locations->coneParam = program->getUniforms().findLocation("coneParam");
|
||||
|
||||
locations->lightBufferUnit = program->getBuffers().findLocation("lightBuffer");
|
||||
locations->deferredTransformBuffer = program->getBuffers().findLocation("deferredTransformBuffer");
|
||||
locations->deferredFrameTransformBuffer = program->getBuffers().findLocation("deferredFrameTransformBuffer");
|
||||
locations->subsurfaceScatteringParametersBuffer = program->getBuffers().findLocation("subsurfaceScatteringParametersBuffer");
|
||||
locations->shadowTransformBuffer = program->getBuffers().findLocation("shadowTransformBuffer");
|
||||
|
||||
auto state = std::make_shared<gpu::State>();
|
||||
|
@ -550,11 +211,13 @@ static void loadLightProgram(const char* vertSource, const char* fragSource, boo
|
|||
state->setDepthTest(true, false, gpu::LESS_EQUAL);
|
||||
|
||||
// TODO: We should use DepthClamp and avoid changing geometry for inside /outside cases
|
||||
|
||||
// additive blending
|
||||
state->setBlendFunction(true, gpu::State::ONE, gpu::State::BLEND_OP_ADD, gpu::State::ONE);
|
||||
|
||||
} else {
|
||||
state->setCullMode(gpu::State::CULL_BACK);
|
||||
// additive blending
|
||||
state->setBlendFunction(true, gpu::State::ONE, gpu::State::BLEND_OP_ADD, gpu::State::ONE);
|
||||
}
|
||||
pipeline = gpu::Pipeline::create(program, state);
|
||||
|
||||
|
@ -667,3 +330,387 @@ model::MeshPointer DeferredLightingEffect::getSpotLightMesh() {
|
|||
return _spotLightMesh;
|
||||
}
|
||||
|
||||
void PreparePrimaryFramebuffer::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, gpu::FramebufferPointer& primaryFramebuffer) {
|
||||
|
||||
auto framebufferCache = DependencyManager::get<FramebufferCache>();
|
||||
auto framebufferSize = framebufferCache->getFrameBufferSize();
|
||||
glm::ivec2 frameSize(framebufferSize.width(), framebufferSize.height());
|
||||
|
||||
if (!_primaryFramebuffer) {
|
||||
_primaryFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
auto colorFormat = gpu::Element::COLOR_SRGBA_32;
|
||||
|
||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
||||
auto primaryColorTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, frameSize.x, frameSize.y, defaultSampler));
|
||||
|
||||
|
||||
_primaryFramebuffer->setRenderBuffer(0, primaryColorTexture);
|
||||
|
||||
|
||||
auto depthFormat = gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::DEPTH_STENCIL); // Depth24_Stencil8 texel format
|
||||
auto primaryDepthTexture = gpu::TexturePointer(gpu::Texture::create2D(depthFormat, frameSize.x, frameSize.y, defaultSampler));
|
||||
|
||||
_primaryFramebuffer->setDepthStencilBuffer(primaryDepthTexture, depthFormat);
|
||||
|
||||
}
|
||||
_primaryFramebuffer->resize(frameSize.x, frameSize.y);
|
||||
|
||||
primaryFramebuffer = _primaryFramebuffer;
|
||||
}
|
||||
|
||||
void PrepareDeferred::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs) {
|
||||
auto args = renderContext->args;
|
||||
|
||||
auto primaryFramebuffer = inputs.get0();
|
||||
auto lightingModel = inputs.get1();
|
||||
|
||||
if (!_deferredFramebuffer) {
|
||||
_deferredFramebuffer = std::make_shared<DeferredFramebuffer>();
|
||||
}
|
||||
_deferredFramebuffer->updatePrimaryDepth(primaryFramebuffer->getDepthStencilBuffer());
|
||||
|
||||
outputs.edit0() = _deferredFramebuffer;
|
||||
outputs.edit1() = _deferredFramebuffer->getLightingFramebuffer();
|
||||
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
batch.setStateScissorRect(args->_viewport);
|
||||
|
||||
// Clear deferred
|
||||
auto deferredFbo = _deferredFramebuffer->getDeferredFramebuffer();
|
||||
batch.setFramebuffer(deferredFbo);
|
||||
|
||||
// Clear Color, Depth and Stencil for deferred buffer
|
||||
batch.clearFramebuffer(
|
||||
gpu::Framebuffer::BUFFER_COLOR0 | gpu::Framebuffer::BUFFER_COLOR1 | gpu::Framebuffer::BUFFER_COLOR2 | gpu::Framebuffer::BUFFER_COLOR3 |
|
||||
gpu::Framebuffer::BUFFER_DEPTH |
|
||||
gpu::Framebuffer::BUFFER_STENCIL,
|
||||
vec4(vec3(0), 0), 1.0, 0.0, true);
|
||||
|
||||
// For the rest of the rendering, bind the lighting model
|
||||
batch.setUniformBuffer(LIGHTING_MODEL_BUFFER_SLOT, lightingModel->getParametersBuffer());
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
void RenderDeferredSetup::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext,
|
||||
const DeferredFrameTransformPointer& frameTransform,
|
||||
const DeferredFramebufferPointer& deferredFramebuffer,
|
||||
const LightingModelPointer& lightingModel,
|
||||
const SurfaceGeometryFramebufferPointer& surfaceGeometryFramebuffer,
|
||||
const gpu::FramebufferPointer& lowCurvatureNormalFramebuffer,
|
||||
const SubsurfaceScatteringResourcePointer& subsurfaceScatteringResource) {
|
||||
|
||||
auto args = renderContext->args;
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
|
||||
// Framebuffer copy operations cannot function as multipass stereo operations.
|
||||
batch.enableStereo(false);
|
||||
|
||||
// perform deferred lighting, rendering to free fbo
|
||||
auto framebufferCache = DependencyManager::get<FramebufferCache>();
|
||||
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
auto deferredLightingEffect = DependencyManager::get<DeferredLightingEffect>();
|
||||
|
||||
// binding the first framebuffer
|
||||
auto lightingFBO = deferredFramebuffer->getLightingFramebuffer();
|
||||
batch.setFramebuffer(lightingFBO);
|
||||
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
batch.setStateScissorRect(args->_viewport);
|
||||
|
||||
|
||||
// Bind the G-Buffer surfaces
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_COLOR_UNIT, deferredFramebuffer->getDeferredColorTexture());
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_NORMAL_UNIT, deferredFramebuffer->getDeferredNormalTexture());
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_EMISSIVE_UNIT, deferredFramebuffer->getDeferredSpecularTexture());
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_DEPTH_UNIT, deferredFramebuffer->getPrimaryDepthTexture());
|
||||
|
||||
// FIXME: Different render modes should have different tasks
|
||||
if (args->_renderMode == RenderArgs::DEFAULT_RENDER_MODE && deferredLightingEffect->isAmbientOcclusionEnabled()) {
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_OBSCURANCE_UNIT, framebufferCache->getOcclusionTexture());
|
||||
} else {
|
||||
// need to assign the white texture if ao is off
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_OBSCURANCE_UNIT, textureCache->getWhiteTexture());
|
||||
}
|
||||
|
||||
// The Deferred Frame Transform buffer
|
||||
batch.setUniformBuffer(DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT, frameTransform->getFrameTransformBuffer());
|
||||
|
||||
// THe lighting model
|
||||
batch.setUniformBuffer(LIGHTING_MODEL_BUFFER_SLOT, lightingModel->getParametersBuffer());
|
||||
|
||||
// Subsurface scattering specific
|
||||
if (surfaceGeometryFramebuffer) {
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_CURVATURE_UNIT, surfaceGeometryFramebuffer->getCurvatureTexture());
|
||||
}
|
||||
if (lowCurvatureNormalFramebuffer) {
|
||||
// batch.setResourceTexture(DEFERRED_BUFFER_DIFFUSED_CURVATURE_UNIT, lowCurvatureNormalFramebuffer->getRenderBuffer(0));
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_DIFFUSED_CURVATURE_UNIT, surfaceGeometryFramebuffer->getLowCurvatureTexture());
|
||||
}
|
||||
if (subsurfaceScatteringResource) {
|
||||
batch.setUniformBuffer(SCATTERING_PARAMETERS_BUFFER_SLOT, subsurfaceScatteringResource->getParametersBuffer());
|
||||
batch.setResourceTexture(SCATTERING_LUT_UNIT, subsurfaceScatteringResource->getScatteringTable());
|
||||
batch.setResourceTexture(SCATTERING_SPECULAR_UNIT, subsurfaceScatteringResource->getScatteringSpecular());
|
||||
}
|
||||
|
||||
// Global directional light and ambient pass
|
||||
|
||||
assert(deferredLightingEffect->getLightStage().lights.size() > 0);
|
||||
const auto& globalShadow = deferredLightingEffect->getLightStage().lights[0]->shadow;
|
||||
|
||||
// Bind the shadow buffer
|
||||
batch.setResourceTexture(SHADOW_MAP_UNIT, globalShadow.map);
|
||||
|
||||
auto& program = deferredLightingEffect->_shadowMapEnabled ? deferredLightingEffect->_directionalLightShadow : deferredLightingEffect->_directionalLight;
|
||||
LightLocationsPtr locations = deferredLightingEffect->_shadowMapEnabled ? deferredLightingEffect->_directionalLightShadowLocations : deferredLightingEffect->_directionalLightLocations;
|
||||
const auto& keyLight = deferredLightingEffect->_allocatedLights[deferredLightingEffect->_globalLights.front()];
|
||||
|
||||
// Setup the global directional pass pipeline
|
||||
{
|
||||
if (deferredLightingEffect->_shadowMapEnabled) {
|
||||
if (keyLight->getAmbientMap()) {
|
||||
program = deferredLightingEffect->_directionalSkyboxLightShadow;
|
||||
locations = deferredLightingEffect->_directionalSkyboxLightShadowLocations;
|
||||
} else {
|
||||
program = deferredLightingEffect->_directionalAmbientSphereLightShadow;
|
||||
locations = deferredLightingEffect->_directionalAmbientSphereLightShadowLocations;
|
||||
}
|
||||
} else {
|
||||
if (keyLight->getAmbientMap()) {
|
||||
program = deferredLightingEffect->_directionalSkyboxLight;
|
||||
locations = deferredLightingEffect->_directionalSkyboxLightLocations;
|
||||
} else {
|
||||
program = deferredLightingEffect->_directionalAmbientSphereLight;
|
||||
locations = deferredLightingEffect->_directionalAmbientSphereLightLocations;
|
||||
}
|
||||
}
|
||||
|
||||
if (locations->shadowTransformBuffer >= 0) {
|
||||
batch.setUniformBuffer(locations->shadowTransformBuffer, globalShadow.getBuffer());
|
||||
}
|
||||
batch.setPipeline(program);
|
||||
}
|
||||
|
||||
// Adjust the texcoordTransform in the case we are rendeirng a sub region(mini mirror)
|
||||
auto textureFrameTransform = gpu::Framebuffer::evalSubregionTexcoordTransformCoefficients(deferredFramebuffer->getFrameSize(), args->_viewport);
|
||||
batch._glUniform4fv(locations->texcoordFrameTransform, 1, reinterpret_cast< const float* >(&textureFrameTransform));
|
||||
|
||||
{ // Setup the global lighting
|
||||
deferredLightingEffect->setupKeyLightBatch(batch, locations->lightBufferUnit, SKYBOX_MAP_UNIT);
|
||||
}
|
||||
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
|
||||
if (keyLight->getAmbientMap()) {
|
||||
batch.setResourceTexture(SKYBOX_MAP_UNIT, nullptr);
|
||||
}
|
||||
batch.setResourceTexture(SHADOW_MAP_UNIT, nullptr);
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
void RenderDeferredLocals::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext,
|
||||
const DeferredFrameTransformPointer& frameTransform,
|
||||
const DeferredFramebufferPointer& deferredFramebuffer,
|
||||
const LightingModelPointer& lightingModel) {
|
||||
|
||||
bool points = lightingModel->isPointLightEnabled();
|
||||
bool spots = lightingModel->isSpotLightEnabled();
|
||||
|
||||
if (!points && !spots) {
|
||||
return;
|
||||
}
|
||||
auto args = renderContext->args;
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
|
||||
// THe main viewport is assumed to be the mono viewport (or the 2 stereo faces side by side within that viewport)
|
||||
auto monoViewport = args->_viewport;
|
||||
|
||||
// The view frustum is the mono frustum base
|
||||
auto viewFrustum = args->getViewFrustum();
|
||||
|
||||
// Eval the mono projection
|
||||
mat4 monoProjMat;
|
||||
viewFrustum.evalProjectionMatrix(monoProjMat);
|
||||
|
||||
// The mono view transform
|
||||
Transform monoViewTransform;
|
||||
viewFrustum.evalViewTransform(monoViewTransform);
|
||||
|
||||
// THe mono view matrix coming from the mono view transform
|
||||
glm::mat4 monoViewMat;
|
||||
monoViewTransform.getMatrix(monoViewMat);
|
||||
|
||||
auto geometryCache = DependencyManager::get<GeometryCache>();
|
||||
|
||||
auto eyePoint = viewFrustum.getPosition();
|
||||
float nearRadius = glm::distance(eyePoint, viewFrustum.getNearTopLeft());
|
||||
|
||||
auto deferredLightingEffect = DependencyManager::get<DeferredLightingEffect>();
|
||||
|
||||
// Render in this side's viewport
|
||||
batch.setViewportTransform(monoViewport);
|
||||
batch.setStateScissorRect(monoViewport);
|
||||
|
||||
// enlarge the scales slightly to account for tesselation
|
||||
const float SCALE_EXPANSION = 0.05f;
|
||||
|
||||
auto textureFrameTransform = gpu::Framebuffer::evalSubregionTexcoordTransformCoefficients(deferredFramebuffer->getFrameSize(), monoViewport);
|
||||
|
||||
batch.setProjectionTransform(monoProjMat);
|
||||
batch.setViewTransform(monoViewTransform);
|
||||
|
||||
// Splat Point lights
|
||||
if (points && !deferredLightingEffect->_pointLights.empty()) {
|
||||
// POint light pipeline
|
||||
batch.setPipeline(deferredLightingEffect->_pointLight);
|
||||
batch._glUniform4fv(deferredLightingEffect->_pointLightLocations->texcoordFrameTransform, 1, reinterpret_cast< const float* >(&textureFrameTransform));
|
||||
|
||||
for (auto lightID : deferredLightingEffect->_pointLights) {
|
||||
auto& light = deferredLightingEffect->_allocatedLights[lightID];
|
||||
// IN DEBUG: light->setShowContour(true);
|
||||
batch.setUniformBuffer(deferredLightingEffect->_pointLightLocations->lightBufferUnit, light->getSchemaBuffer());
|
||||
|
||||
float expandedRadius = light->getMaximumRadius() * (1.0f + SCALE_EXPANSION);
|
||||
glm::vec4 sphereParam(expandedRadius, 0.0f, 0.0f, 1.0f);
|
||||
|
||||
// TODO: We shouldn;t have to do that test and use a different volume geometry for when inside the vlight volume,
|
||||
// we should be able to draw thre same geometry use DepthClamp but for unknown reason it's s not working...
|
||||
if (glm::distance(eyePoint, glm::vec3(light->getPosition())) < expandedRadius + nearRadius) {
|
||||
sphereParam.w = 0.0f;
|
||||
batch._glUniform4fv(deferredLightingEffect->_pointLightLocations->sphereParam, 1, reinterpret_cast< const float* >(&sphereParam));
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
} else {
|
||||
sphereParam.w = 1.0f;
|
||||
batch._glUniform4fv(deferredLightingEffect->_pointLightLocations->sphereParam, 1, reinterpret_cast< const float* >(&sphereParam));
|
||||
|
||||
Transform model;
|
||||
model.setTranslation(glm::vec3(light->getPosition().x, light->getPosition().y, light->getPosition().z));
|
||||
batch.setModelTransform(model.postScale(expandedRadius));
|
||||
batch._glColor4f(1.0f, 1.0f, 1.0f, 1.0f);
|
||||
geometryCache->renderSphere(batch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Splat spot lights
|
||||
if (spots && !deferredLightingEffect->_spotLights.empty()) {
|
||||
// Spot light pipeline
|
||||
batch.setPipeline(deferredLightingEffect->_spotLight);
|
||||
batch._glUniform4fv(deferredLightingEffect->_spotLightLocations->texcoordFrameTransform, 1, reinterpret_cast< const float* >(&textureFrameTransform));
|
||||
|
||||
// Spot mesh
|
||||
auto mesh = deferredLightingEffect->getSpotLightMesh();
|
||||
batch.setIndexBuffer(mesh->getIndexBuffer());
|
||||
batch.setInputBuffer(0, mesh->getVertexBuffer());
|
||||
batch.setInputFormat(mesh->getVertexFormat());
|
||||
auto& conePart = mesh->getPartBuffer().get<model::Mesh::Part>(0);
|
||||
|
||||
for (auto lightID : deferredLightingEffect->_spotLights) {
|
||||
auto light = deferredLightingEffect->_allocatedLights[lightID];
|
||||
// IN DEBUG:
|
||||
// light->setShowContour(true);
|
||||
batch.setUniformBuffer(deferredLightingEffect->_spotLightLocations->lightBufferUnit, light->getSchemaBuffer());
|
||||
|
||||
auto eyeLightPos = eyePoint - light->getPosition();
|
||||
auto eyeHalfPlaneDistance = glm::dot(eyeLightPos, light->getDirection());
|
||||
|
||||
const float TANGENT_LENGTH_SCALE = 0.666f;
|
||||
glm::vec4 coneParam(light->getSpotAngleCosSin(), TANGENT_LENGTH_SCALE * tanf(0.5f * light->getSpotAngle()), 1.0f);
|
||||
|
||||
float expandedRadius = light->getMaximumRadius() * (1.0f + SCALE_EXPANSION);
|
||||
// TODO: We shouldn;t have to do that test and use a different volume geometry for when inside the vlight volume,
|
||||
// we should be able to draw thre same geometry use DepthClamp but for unknown reason it's s not working...
|
||||
const float OVER_CONSERVATIVE_SCALE = 1.1f;
|
||||
if ((eyeHalfPlaneDistance > -nearRadius) &&
|
||||
(glm::distance(eyePoint, glm::vec3(light->getPosition())) < (expandedRadius * OVER_CONSERVATIVE_SCALE) + nearRadius)) {
|
||||
coneParam.w = 0.0f;
|
||||
batch._glUniform4fv(deferredLightingEffect->_spotLightLocations->coneParam, 1, reinterpret_cast< const float* >(&coneParam));
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
} else {
|
||||
coneParam.w = 1.0f;
|
||||
batch._glUniform4fv(deferredLightingEffect->_spotLightLocations->coneParam, 1, reinterpret_cast< const float* >(&coneParam));
|
||||
|
||||
Transform model;
|
||||
model.setTranslation(light->getPosition());
|
||||
model.postRotate(light->getOrientation());
|
||||
model.postScale(glm::vec3(expandedRadius, expandedRadius, expandedRadius));
|
||||
|
||||
batch.setModelTransform(model);
|
||||
|
||||
batch.drawIndexed(model::Mesh::topologyToPrimitive(conePart._topology), conePart._numIndices, conePart._startIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void RenderDeferredCleanup::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext) {
|
||||
auto args = renderContext->args;
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
// Probably not necessary in the long run because the gpu layer would unbound this texture if used as render target
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_COLOR_UNIT, nullptr);
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_NORMAL_UNIT, nullptr);
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_EMISSIVE_UNIT, nullptr);
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_DEPTH_UNIT, nullptr);
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_OBSCURANCE_UNIT, nullptr);
|
||||
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_CURVATURE_UNIT, nullptr);
|
||||
batch.setResourceTexture(DEFERRED_BUFFER_DIFFUSED_CURVATURE_UNIT, nullptr);
|
||||
batch.setResourceTexture(SCATTERING_LUT_UNIT, nullptr);
|
||||
batch.setResourceTexture(SCATTERING_SPECULAR_UNIT, nullptr);
|
||||
|
||||
batch.setUniformBuffer(SCATTERING_PARAMETERS_BUFFER_SLOT, nullptr);
|
||||
// batch.setUniformBuffer(LIGHTING_MODEL_BUFFER_SLOT, nullptr);
|
||||
batch.setUniformBuffer(DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT, nullptr);
|
||||
});
|
||||
|
||||
auto deferredLightingEffect = DependencyManager::get<DeferredLightingEffect>();
|
||||
|
||||
// End of the Lighting pass
|
||||
if (!deferredLightingEffect->_pointLights.empty()) {
|
||||
deferredLightingEffect->_pointLights.clear();
|
||||
}
|
||||
if (!deferredLightingEffect->_spotLights.empty()) {
|
||||
deferredLightingEffect->_spotLights.clear();
|
||||
}
|
||||
}
|
||||
|
||||
RenderDeferred::RenderDeferred() {
|
||||
|
||||
}
|
||||
|
||||
|
||||
void RenderDeferred::configure(const Config& config) {
|
||||
}
|
||||
|
||||
void RenderDeferred::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const Inputs& inputs) {
|
||||
auto deferredTransform = inputs.get0();
|
||||
auto deferredFramebuffer = inputs.get1();
|
||||
auto lightingModel = inputs.get2();
|
||||
auto surfaceGeometryFramebuffer = inputs.get3();
|
||||
auto lowCurvatureNormalFramebuffer = inputs.get4();
|
||||
auto subsurfaceScatteringResource = inputs.get5();
|
||||
auto args = renderContext->args;
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
_gpuTimer.begin(batch);
|
||||
});
|
||||
|
||||
setupJob.run(sceneContext, renderContext, deferredTransform, deferredFramebuffer, lightingModel, surfaceGeometryFramebuffer, lowCurvatureNormalFramebuffer, subsurfaceScatteringResource);
|
||||
|
||||
lightsJob.run(sceneContext, renderContext, deferredTransform, deferredFramebuffer, lightingModel);
|
||||
|
||||
cleanupJob.run(sceneContext, renderContext);
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
_gpuTimer.end(batch);
|
||||
});
|
||||
|
||||
auto config = std::static_pointer_cast<Config>(renderContext->jobConfig);
|
||||
config->gpuTime = _gpuTimer.getAverage();
|
||||
}
|
||||
|
|
|
@ -21,13 +21,21 @@
|
|||
#include "model/Geometry.h"
|
||||
|
||||
#include "render/Context.h"
|
||||
#include <render/CullTask.h>
|
||||
|
||||
#include "DeferredFrameTransform.h"
|
||||
#include "DeferredFramebuffer.h"
|
||||
#include "LightingModel.h"
|
||||
|
||||
#include "LightStage.h"
|
||||
#include "SurfaceGeometryPass.h"
|
||||
#include "SubsurfaceScattering.h"
|
||||
|
||||
class RenderArgs;
|
||||
struct LightLocations;
|
||||
using LightLocationsPtr = std::shared_ptr<LightLocations>;
|
||||
/// Handles deferred lighting for the bits that require it (voxels...)
|
||||
|
||||
// THis is where we currently accumulate the local lights, let s change that sooner than later
|
||||
class DeferredLightingEffect : public Dependency {
|
||||
SINGLETON_DEPENDENCY
|
||||
|
||||
|
@ -42,9 +50,6 @@ public:
|
|||
void addSpotLight(const glm::vec3& position, float radius, const glm::vec3& color = glm::vec3(1.0f, 1.0f, 1.0f),
|
||||
float intensity = 0.5f, float falloffRadius = 0.01f,
|
||||
const glm::quat& orientation = glm::quat(), float exponent = 0.0f, float cutoff = PI);
|
||||
|
||||
void prepare(RenderArgs* args);
|
||||
void render(const render::RenderContextPointer& renderContext);
|
||||
|
||||
void setupKeyLightBatch(gpu::Batch& batch, int lightBufferUnit, int skyboxCubemapUnit);
|
||||
|
||||
|
@ -95,19 +100,100 @@ private:
|
|||
std::vector<int> _globalLights;
|
||||
std::vector<int> _pointLights;
|
||||
std::vector<int> _spotLights;
|
||||
|
||||
friend class RenderDeferredSetup;
|
||||
friend class RenderDeferredLocals;
|
||||
friend class RenderDeferredCleanup;
|
||||
};
|
||||
|
||||
// Class describing the uniform buffer with all the parameters common to the deferred shaders
|
||||
class DeferredTransform {
|
||||
public:
|
||||
glm::mat4 projection;
|
||||
glm::mat4 viewInverse;
|
||||
float stereoSide { 0.f };
|
||||
float spareA, spareB, spareC;
|
||||
class PreparePrimaryFramebuffer {
|
||||
public:
|
||||
using JobModel = render::Job::ModelO<PreparePrimaryFramebuffer, gpu::FramebufferPointer>;
|
||||
|
||||
DeferredTransform() {}
|
||||
};
|
||||
typedef gpu::BufferView UniformBufferView;
|
||||
UniformBufferView _deferredTransformBuffer[2];
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, gpu::FramebufferPointer& primaryFramebuffer);
|
||||
|
||||
gpu::FramebufferPointer _primaryFramebuffer;
|
||||
};
|
||||
|
||||
class PrepareDeferred {
|
||||
public:
|
||||
// Inputs: primaryFramebuffer and lightingModel
|
||||
using Inputs = render::VaryingSet2 <gpu::FramebufferPointer, LightingModelPointer>;
|
||||
// Output: DeferredFramebuffer, LightingFramebuffer
|
||||
using Outputs = render::VaryingSet2<DeferredFramebufferPointer, gpu::FramebufferPointer>;
|
||||
|
||||
using JobModel = render::Job::ModelIO<PrepareDeferred, Inputs, Outputs>;
|
||||
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs);
|
||||
|
||||
DeferredFramebufferPointer _deferredFramebuffer;
|
||||
};
|
||||
|
||||
class RenderDeferredSetup {
|
||||
public:
|
||||
// using JobModel = render::Job::ModelI<RenderDeferredSetup, DeferredFrameTransformPointer>;
|
||||
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext,
|
||||
const DeferredFrameTransformPointer& frameTransform,
|
||||
const DeferredFramebufferPointer& deferredFramebuffer,
|
||||
const LightingModelPointer& lightingModel,
|
||||
const SurfaceGeometryFramebufferPointer& surfaceGeometryFramebuffer,
|
||||
const gpu::FramebufferPointer& lowCurvatureNormalFramebuffer,
|
||||
const SubsurfaceScatteringResourcePointer& subsurfaceScatteringResource);
|
||||
};
|
||||
|
||||
class RenderDeferredLocals {
|
||||
public:
|
||||
using JobModel = render::Job::ModelI<RenderDeferredLocals, DeferredFrameTransformPointer>;
|
||||
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext,
|
||||
const DeferredFrameTransformPointer& frameTransform,
|
||||
const DeferredFramebufferPointer& deferredFramebuffer,
|
||||
const LightingModelPointer& lightingModel);
|
||||
};
|
||||
|
||||
|
||||
class RenderDeferredCleanup {
|
||||
public:
|
||||
using JobModel = render::Job::Model<RenderDeferredCleanup>;
|
||||
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext);
|
||||
};
|
||||
|
||||
|
||||
class RenderDeferredConfig : public render::Job::Config {
|
||||
Q_OBJECT
|
||||
Q_PROPERTY(double gpuTime READ getGpuTime)
|
||||
public:
|
||||
RenderDeferredConfig() : render::Job::Config(true) {}
|
||||
|
||||
double getGpuTime() { return gpuTime; }
|
||||
|
||||
double gpuTime{ 0.0 };
|
||||
|
||||
signals:
|
||||
void dirty();
|
||||
};
|
||||
|
||||
|
||||
class RenderDeferred {
|
||||
public:
|
||||
using Inputs = render::VaryingSet6 < DeferredFrameTransformPointer, DeferredFramebufferPointer, LightingModelPointer, SurfaceGeometryFramebufferPointer, gpu::FramebufferPointer, SubsurfaceScatteringResourcePointer>;
|
||||
using Config = RenderDeferredConfig;
|
||||
using JobModel = render::Job::ModelI<RenderDeferred, Inputs, Config>;
|
||||
|
||||
RenderDeferred();
|
||||
|
||||
void configure(const Config& config);
|
||||
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const Inputs& inputs);
|
||||
|
||||
RenderDeferredSetup setupJob;
|
||||
RenderDeferredLocals lightsJob;
|
||||
RenderDeferredCleanup cleanupJob;
|
||||
|
||||
protected:
|
||||
gpu::RangeTimer _gpuTimer;
|
||||
};
|
||||
|
||||
#endif // hifi_DeferredLightingEffect_h
|
||||
|
|
123
libraries/render-utils/src/DeferredTransform.slh
Normal file
123
libraries/render-utils/src/DeferredTransform.slh
Normal file
|
@ -0,0 +1,123 @@
|
|||
<!
|
||||
// DeferredTransform.slh
|
||||
// libraries/render-utils/src
|
||||
//
|
||||
// Created by Sam Gateau on 6/2/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
!>
|
||||
<@if not DEFERRED_TRANSFORM_SLH@>
|
||||
<@def DEFERRED_TRANSFORM_SLH@>
|
||||
|
||||
<@func declareDeferredFrameTransform()@>
|
||||
|
||||
struct DeferredFrameTransform {
|
||||
vec4 _pixelInfo;
|
||||
vec4 _invPixelInfo;
|
||||
vec4 _depthInfo;
|
||||
vec4 _stereoInfo;
|
||||
mat4 _projection[2];
|
||||
mat4 _projectionMono;
|
||||
mat4 _viewInverse;
|
||||
mat4 _view;
|
||||
};
|
||||
|
||||
uniform deferredFrameTransformBuffer {
|
||||
DeferredFrameTransform frameTransform;
|
||||
};
|
||||
|
||||
DeferredFrameTransform getDeferredFrameTransform() {
|
||||
return frameTransform;
|
||||
}
|
||||
|
||||
vec2 getWidthHeight(int resolutionLevel) {
|
||||
return vec2(ivec2(frameTransform._pixelInfo.zw) >> resolutionLevel);
|
||||
}
|
||||
|
||||
vec2 getInvWidthHeight() {
|
||||
return frameTransform._invPixelInfo.xy;
|
||||
}
|
||||
|
||||
float getProjScaleEye() {
|
||||
return frameTransform._projection[0][1][1];
|
||||
}
|
||||
|
||||
float getProjScale(int resolutionLevel) {
|
||||
return getWidthHeight(resolutionLevel).y * frameTransform._projection[0][1][1] * 0.5;
|
||||
}
|
||||
mat4 getProjection(int side) {
|
||||
return frameTransform._projection[side];
|
||||
}
|
||||
mat4 getProjectionMono() {
|
||||
return frameTransform._projectionMono;
|
||||
}
|
||||
|
||||
// positive near distance of the projection
|
||||
float getProjectionNear() {
|
||||
float planeC = frameTransform._projection[0][2][3] + frameTransform._projection[0][2][2];
|
||||
float planeD = frameTransform._projection[0][3][2];
|
||||
return planeD / planeC;
|
||||
}
|
||||
|
||||
// positive far distance of the projection
|
||||
float getPosLinearDepthFar() {
|
||||
return -frameTransform._depthInfo.z;
|
||||
}
|
||||
|
||||
mat4 getViewInverse() {
|
||||
return frameTransform._viewInverse;
|
||||
}
|
||||
|
||||
mat4 getView() {
|
||||
return frameTransform._view;
|
||||
}
|
||||
|
||||
bool isStereo() {
|
||||
return frameTransform._stereoInfo.x > 0.0f;
|
||||
}
|
||||
|
||||
float getStereoSideWidth(int resolutionLevel) {
|
||||
return float(int(frameTransform._stereoInfo.y) >> resolutionLevel);
|
||||
}
|
||||
|
||||
ivec4 getStereoSideInfo(int xPos, int resolutionLevel) {
|
||||
int sideWidth = int(getStereoSideWidth(resolutionLevel));
|
||||
return ivec4(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth, isStereo());
|
||||
}
|
||||
|
||||
float evalZeyeFromZdb(float depth) {
|
||||
return frameTransform._depthInfo.x / (depth * frameTransform._depthInfo.y + frameTransform._depthInfo.z);
|
||||
}
|
||||
|
||||
vec3 evalEyeNormal(vec3 C) {
|
||||
//return normalize(cross(dFdy(C), dFdx(C)));
|
||||
return normalize(cross(dFdx(C), dFdy(C)));
|
||||
}
|
||||
|
||||
vec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {
|
||||
// compute the view space position using the depth
|
||||
// basically manually pick the proj matrix components to do the inverse
|
||||
float Xe = (-Zeye * (texcoord.x * 2.0 - 1.0) - Zeye * frameTransform._projection[side][2][0] - frameTransform._projection[side][3][0]) / frameTransform._projection[side][0][0];
|
||||
float Ye = (-Zeye * (texcoord.y * 2.0 - 1.0) - Zeye * frameTransform._projection[side][2][1] - frameTransform._projection[side][3][1]) / frameTransform._projection[side][1][1];
|
||||
return vec3(Xe, Ye, Zeye);
|
||||
}
|
||||
|
||||
ivec2 getPixelPosTexcoordPosAndSide(in vec2 glFragCoord, out ivec2 pixelPos, out vec2 texcoordPos, out ivec4 stereoSide) {
|
||||
ivec2 fragPos = ivec2(glFragCoord.xy);
|
||||
|
||||
stereoSide = getStereoSideInfo(fragPos.x, 0);
|
||||
|
||||
pixelPos = fragPos;
|
||||
pixelPos.x -= stereoSide.y;
|
||||
|
||||
texcoordPos = (vec2(pixelPos) + 0.5) * getInvWidthHeight();
|
||||
|
||||
return fragPos;
|
||||
}
|
||||
|
||||
<@endfunc@>
|
||||
|
||||
|
||||
<@endif@>
|
|
@ -33,20 +33,8 @@ void FramebufferCache::setFrameBufferSize(QSize frameBufferSize) {
|
|||
//If the size changed, we need to delete our FBOs
|
||||
if (_frameBufferSize != frameBufferSize) {
|
||||
_frameBufferSize = frameBufferSize;
|
||||
_primaryFramebuffer.reset();
|
||||
_primaryDepthTexture.reset();
|
||||
_primaryColorTexture.reset();
|
||||
_deferredFramebuffer.reset();
|
||||
_deferredFramebufferDepthColor.reset();
|
||||
_deferredColorTexture.reset();
|
||||
_deferredNormalTexture.reset();
|
||||
_deferredSpecularTexture.reset();
|
||||
_selfieFramebuffer.reset();
|
||||
_cachedFramebuffers.clear();
|
||||
_lightingTexture.reset();
|
||||
_lightingFramebuffer.reset();
|
||||
_depthPyramidFramebuffer.reset();
|
||||
_depthPyramidTexture.reset();
|
||||
_occlusionFramebuffer.reset();
|
||||
_occlusionTexture.reset();
|
||||
_occlusionBlurredFramebuffer.reset();
|
||||
|
@ -55,41 +43,11 @@ void FramebufferCache::setFrameBufferSize(QSize frameBufferSize) {
|
|||
}
|
||||
|
||||
void FramebufferCache::createPrimaryFramebuffer() {
|
||||
_primaryFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_deferredFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_deferredFramebufferDepthColor = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
|
||||
auto colorFormat = gpu::Element::COLOR_SRGBA_32;
|
||||
auto linearFormat = gpu::Element::COLOR_RGBA_32;
|
||||
auto width = _frameBufferSize.width();
|
||||
auto height = _frameBufferSize.height();
|
||||
|
||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
||||
_primaryColorTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width, height, defaultSampler));
|
||||
|
||||
_primaryFramebuffer->setRenderBuffer(0, _primaryColorTexture);
|
||||
|
||||
_deferredColorTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width, height, defaultSampler));
|
||||
|
||||
_deferredNormalTexture = gpu::TexturePointer(gpu::Texture::create2D(linearFormat, width, height, defaultSampler));
|
||||
_deferredSpecularTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width, height, defaultSampler));
|
||||
|
||||
_deferredFramebuffer->setRenderBuffer(0, _deferredColorTexture);
|
||||
_deferredFramebuffer->setRenderBuffer(1, _deferredNormalTexture);
|
||||
_deferredFramebuffer->setRenderBuffer(2, _deferredSpecularTexture);
|
||||
|
||||
_deferredFramebufferDepthColor->setRenderBuffer(0, _deferredColorTexture);
|
||||
|
||||
// auto depthFormat = gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::DEPTH);
|
||||
auto depthFormat = gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::DEPTH_STENCIL); // Depth24_Stencil8 texel format
|
||||
_primaryDepthTexture = gpu::TexturePointer(gpu::Texture::create2D(depthFormat, width, height, defaultSampler));
|
||||
|
||||
_primaryFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||
|
||||
_deferredFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||
|
||||
_deferredFramebufferDepthColor->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||
|
||||
|
||||
_selfieFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
auto tex = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width * 0.5, height * 0.5, defaultSampler));
|
||||
|
@ -97,20 +55,8 @@ void FramebufferCache::createPrimaryFramebuffer() {
|
|||
|
||||
auto smoothSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR);
|
||||
|
||||
_lightingTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::R11G11B10), width, height, defaultSampler));
|
||||
_lightingFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_lightingFramebuffer->setRenderBuffer(0, _lightingTexture);
|
||||
_lightingFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||
|
||||
|
||||
// For AO:
|
||||
auto pointMipSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_POINT);
|
||||
_depthPyramidTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::RGB), width, height, pointMipSampler));
|
||||
_depthPyramidFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_depthPyramidFramebuffer->setRenderBuffer(0, _depthPyramidTexture);
|
||||
_depthPyramidFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||
|
||||
|
||||
resizeAmbientOcclusionBuffers();
|
||||
}
|
||||
|
||||
|
@ -126,88 +72,19 @@ void FramebufferCache::resizeAmbientOcclusionBuffers() {
|
|||
auto height = _frameBufferSize.height() >> _AOResolutionLevel;
|
||||
auto colorFormat = gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGB);
|
||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR);
|
||||
auto depthFormat = gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::DEPTH_STENCIL); // Depth24_Stencil8 texel format
|
||||
// auto depthFormat = gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::DEPTH_STENCIL); // Depth24_Stencil8 texel format
|
||||
|
||||
_occlusionTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width, height, defaultSampler));
|
||||
_occlusionFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_occlusionFramebuffer->setRenderBuffer(0, _occlusionTexture);
|
||||
_occlusionFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||
// _occlusionFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||
|
||||
_occlusionBlurredTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width, height, defaultSampler));
|
||||
_occlusionBlurredFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_occlusionBlurredFramebuffer->setRenderBuffer(0, _occlusionBlurredTexture);
|
||||
_occlusionBlurredFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||
// _occlusionBlurredFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||
}
|
||||
|
||||
gpu::FramebufferPointer FramebufferCache::getPrimaryFramebuffer() {
|
||||
if (!_primaryFramebuffer) {
|
||||
createPrimaryFramebuffer();
|
||||
}
|
||||
return _primaryFramebuffer;
|
||||
}
|
||||
|
||||
gpu::TexturePointer FramebufferCache::getPrimaryDepthTexture() {
|
||||
if (!_primaryDepthTexture) {
|
||||
createPrimaryFramebuffer();
|
||||
}
|
||||
return _primaryDepthTexture;
|
||||
}
|
||||
|
||||
gpu::TexturePointer FramebufferCache::getPrimaryColorTexture() {
|
||||
if (!_primaryColorTexture) {
|
||||
createPrimaryFramebuffer();
|
||||
}
|
||||
return _primaryColorTexture;
|
||||
}
|
||||
|
||||
gpu::FramebufferPointer FramebufferCache::getDeferredFramebuffer() {
|
||||
if (!_deferredFramebuffer) {
|
||||
createPrimaryFramebuffer();
|
||||
}
|
||||
return _deferredFramebuffer;
|
||||
}
|
||||
|
||||
gpu::FramebufferPointer FramebufferCache::getDeferredFramebufferDepthColor() {
|
||||
if (!_deferredFramebufferDepthColor) {
|
||||
createPrimaryFramebuffer();
|
||||
}
|
||||
return _deferredFramebufferDepthColor;
|
||||
}
|
||||
|
||||
gpu::TexturePointer FramebufferCache::getDeferredColorTexture() {
|
||||
if (!_deferredColorTexture) {
|
||||
createPrimaryFramebuffer();
|
||||
}
|
||||
return _deferredColorTexture;
|
||||
}
|
||||
|
||||
gpu::TexturePointer FramebufferCache::getDeferredNormalTexture() {
|
||||
if (!_deferredNormalTexture) {
|
||||
createPrimaryFramebuffer();
|
||||
}
|
||||
return _deferredNormalTexture;
|
||||
}
|
||||
|
||||
gpu::TexturePointer FramebufferCache::getDeferredSpecularTexture() {
|
||||
if (!_deferredSpecularTexture) {
|
||||
createPrimaryFramebuffer();
|
||||
}
|
||||
return _deferredSpecularTexture;
|
||||
}
|
||||
|
||||
gpu::FramebufferPointer FramebufferCache::getLightingFramebuffer() {
|
||||
if (!_lightingFramebuffer) {
|
||||
createPrimaryFramebuffer();
|
||||
}
|
||||
return _lightingFramebuffer;
|
||||
}
|
||||
|
||||
gpu::TexturePointer FramebufferCache::getLightingTexture() {
|
||||
if (!_lightingTexture) {
|
||||
createPrimaryFramebuffer();
|
||||
}
|
||||
return _lightingTexture;
|
||||
}
|
||||
|
||||
gpu::FramebufferPointer FramebufferCache::getFramebuffer() {
|
||||
if (_cachedFramebuffers.isEmpty()) {
|
||||
|
@ -231,20 +108,6 @@ gpu::FramebufferPointer FramebufferCache::getSelfieFramebuffer() {
|
|||
return _selfieFramebuffer;
|
||||
}
|
||||
|
||||
gpu::FramebufferPointer FramebufferCache::getDepthPyramidFramebuffer() {
|
||||
if (!_depthPyramidFramebuffer) {
|
||||
createPrimaryFramebuffer();
|
||||
}
|
||||
return _depthPyramidFramebuffer;
|
||||
}
|
||||
|
||||
gpu::TexturePointer FramebufferCache::getDepthPyramidTexture() {
|
||||
if (!_depthPyramidTexture) {
|
||||
createPrimaryFramebuffer();
|
||||
}
|
||||
return _depthPyramidTexture;
|
||||
}
|
||||
|
||||
void FramebufferCache::setAmbientOcclusionResolutionLevel(int level) {
|
||||
const int MAX_AO_RESOLUTION_LEVEL = 4;
|
||||
level = std::max(0, std::min(level, MAX_AO_RESOLUTION_LEVEL));
|
||||
|
|
|
@ -30,32 +30,11 @@ public:
|
|||
void setFrameBufferSize(QSize frameBufferSize);
|
||||
const QSize& getFrameBufferSize() const { return _frameBufferSize; }
|
||||
|
||||
/// Returns a pointer to the primary framebuffer object. This render target includes a depth component, and is
|
||||
/// used for scene rendering.
|
||||
gpu::FramebufferPointer getPrimaryFramebuffer();
|
||||
|
||||
gpu::TexturePointer getPrimaryDepthTexture();
|
||||
gpu::TexturePointer getPrimaryColorTexture();
|
||||
|
||||
gpu::FramebufferPointer getDeferredFramebuffer();
|
||||
gpu::FramebufferPointer getDeferredFramebufferDepthColor();
|
||||
|
||||
gpu::TexturePointer getDeferredColorTexture();
|
||||
gpu::TexturePointer getDeferredNormalTexture();
|
||||
gpu::TexturePointer getDeferredSpecularTexture();
|
||||
|
||||
gpu::FramebufferPointer getDepthPyramidFramebuffer();
|
||||
gpu::TexturePointer getDepthPyramidTexture();
|
||||
|
||||
void setAmbientOcclusionResolutionLevel(int level);
|
||||
gpu::FramebufferPointer getOcclusionFramebuffer();
|
||||
gpu::TexturePointer getOcclusionTexture();
|
||||
gpu::FramebufferPointer getOcclusionBlurredFramebuffer();
|
||||
gpu::TexturePointer getOcclusionBlurredTexture();
|
||||
|
||||
|
||||
gpu::TexturePointer getLightingTexture();
|
||||
gpu::FramebufferPointer getLightingFramebuffer();
|
||||
|
||||
/// Returns the framebuffer object used to render selfie maps;
|
||||
gpu::FramebufferPointer getSelfieFramebuffer();
|
||||
|
@ -73,29 +52,10 @@ private:
|
|||
|
||||
void createPrimaryFramebuffer();
|
||||
|
||||
gpu::FramebufferPointer _primaryFramebuffer;
|
||||
|
||||
gpu::TexturePointer _primaryDepthTexture;
|
||||
gpu::TexturePointer _primaryColorTexture;
|
||||
|
||||
gpu::FramebufferPointer _deferredFramebuffer;
|
||||
gpu::FramebufferPointer _deferredFramebufferDepthColor;
|
||||
|
||||
gpu::TexturePointer _deferredColorTexture;
|
||||
gpu::TexturePointer _deferredNormalTexture;
|
||||
gpu::TexturePointer _deferredSpecularTexture;
|
||||
|
||||
gpu::TexturePointer _lightingTexture;
|
||||
gpu::FramebufferPointer _lightingFramebuffer;
|
||||
|
||||
gpu::FramebufferPointer _shadowFramebuffer;
|
||||
|
||||
gpu::FramebufferPointer _selfieFramebuffer;
|
||||
|
||||
gpu::FramebufferPointer _depthPyramidFramebuffer;
|
||||
gpu::TexturePointer _depthPyramidTexture;
|
||||
|
||||
|
||||
gpu::FramebufferPointer _occlusionFramebuffer;
|
||||
gpu::TexturePointer _occlusionTexture;
|
||||
|
||||
|
|
115
libraries/render-utils/src/LightAmbient.slh
Normal file
115
libraries/render-utils/src/LightAmbient.slh
Normal file
|
@ -0,0 +1,115 @@
|
|||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// Created by Sam Gateau on 7/5/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
|
||||
|
||||
<@func declareSkyboxMap()@>
|
||||
// declareSkyboxMap
|
||||
uniform samplerCube skyboxMap;
|
||||
|
||||
vec4 evalSkyboxLight(vec3 direction, float lod) {
|
||||
// textureQueryLevels is not available until #430, so we require explicit lod
|
||||
// float mipmapLevel = lod * textureQueryLevels(skyboxMap);
|
||||
return textureLod(skyboxMap, direction, lod);
|
||||
}
|
||||
<@endfunc@>
|
||||
|
||||
<@func declareEvalAmbientSpecularIrradiance(supportAmbientSphere, supportAmbientMap, supportIfAmbientMapElseAmbientSphere)@>
|
||||
|
||||
vec3 fresnelSchlickAmbient(vec3 fresnelColor, vec3 lightDir, vec3 halfDir, float gloss) {
|
||||
return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * pow(1.0 - clamp(dot(lightDir, halfDir), 0.0, 1.0), 5);
|
||||
}
|
||||
|
||||
<@if supportAmbientMap@>
|
||||
<$declareSkyboxMap()$>
|
||||
<@endif@>
|
||||
|
||||
vec3 evalAmbientSpecularIrradiance(Light light, vec3 fragEyeDir, vec3 fragNormal, float roughness, vec3 fresnel) {
|
||||
vec3 direction = -reflect(fragEyeDir, fragNormal);
|
||||
vec3 ambientFresnel = fresnelSchlickAmbient(fresnel, fragEyeDir, fragNormal, 1 - roughness);
|
||||
vec3 specularLight;
|
||||
<@if supportIfAmbientMapElseAmbientSphere@>
|
||||
if (getLightHasAmbientMap(light))
|
||||
<@endif@>
|
||||
<@if supportAmbientMap@>
|
||||
{
|
||||
float levels = getLightAmbientMapNumMips(light);
|
||||
float lod = min(floor((roughness)* levels), levels);
|
||||
specularLight = evalSkyboxLight(direction, lod).xyz;
|
||||
}
|
||||
<@endif@>
|
||||
<@if supportIfAmbientMapElseAmbientSphere@>
|
||||
else
|
||||
<@endif@>
|
||||
<@if supportAmbientSphere@>
|
||||
{
|
||||
specularLight = evalSphericalLight(getLightAmbientSphere(light), direction).xyz;
|
||||
}
|
||||
<@endif@>
|
||||
|
||||
return specularLight * ambientFresnel;
|
||||
}
|
||||
<@endfunc@>
|
||||
|
||||
<@func declareLightingAmbient(supportAmbientSphere, supportAmbientMap, supportIfAmbientMapElseAmbientSphere, supportScattering)@>
|
||||
|
||||
<$declareEvalAmbientSpecularIrradiance($supportAmbientSphere$, $supportAmbientMap$, $supportIfAmbientMapElseAmbientSphere$)$>
|
||||
|
||||
<@if supportScattering@>
|
||||
float curvatureAO(in float k) {
|
||||
return 1.0f - (0.0022f * k * k) + (0.0776f * k) + 0.7369;
|
||||
}
|
||||
<@endif@>
|
||||
|
||||
void evalLightingAmbient(out vec3 diffuse, out vec3 specular, Light light, vec3 eyeDir, vec3 normal,
|
||||
float roughness, float metallic, vec3 fresnel, vec3 albedo, float obscurance
|
||||
<@if supportScattering@>
|
||||
, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature
|
||||
<@endif@>
|
||||
) {
|
||||
|
||||
|
||||
// Diffuse from ambient
|
||||
diffuse = (1 - metallic) * evalSphericalLight(getLightAmbientSphere(light), normal).xyz;
|
||||
|
||||
// Specular highlight from ambient
|
||||
specular = evalAmbientSpecularIrradiance(light, eyeDir, normal, roughness, fresnel) * obscurance * getLightAmbientIntensity(light);
|
||||
|
||||
|
||||
<@if supportScattering@>
|
||||
float ambientOcclusion = curvatureAO(lowNormalCurvature.w * 20.0f) * 0.5f;
|
||||
float ambientOcclusionHF = curvatureAO(midNormalCurvature.w * 8.0f) * 0.5f;
|
||||
ambientOcclusion = min(ambientOcclusion, ambientOcclusionHF);
|
||||
|
||||
obscurance = min(obscurance, ambientOcclusion);
|
||||
|
||||
if (scattering * isScatteringEnabled() > 0.0) {
|
||||
|
||||
// Diffuse from ambient
|
||||
diffuse = evalSphericalLight(getLightAmbientSphere(light), lowNormalCurvature.xyz).xyz;
|
||||
|
||||
specular = vec3(0.0);
|
||||
}
|
||||
<@endif@>
|
||||
|
||||
if (!(isObscuranceEnabled() > 0.0)) {
|
||||
obscurance = 1.0;
|
||||
}
|
||||
|
||||
float lightEnergy = obscurance * getLightAmbientIntensity(light);
|
||||
|
||||
if (isAlbedoEnabled() > 0.0) {
|
||||
diffuse *= albedo;
|
||||
}
|
||||
|
||||
diffuse *= lightEnergy * isDiffuseEnabled() * isAmbientEnabled();
|
||||
specular *= lightEnergy * isSpecularEnabled() * isAmbientEnabled();
|
||||
}
|
||||
|
||||
<@endfunc@>
|
36
libraries/render-utils/src/LightDirectional.slh
Normal file
36
libraries/render-utils/src/LightDirectional.slh
Normal file
|
@ -0,0 +1,36 @@
|
|||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// Created by Sam Gateau on 7/5/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
|
||||
|
||||
<@func declareLightingDirectional(supportScattering)@>
|
||||
|
||||
void evalLightingDirectional(out vec3 diffuse, out vec3 specular, Light light,
|
||||
vec3 eyeDir, vec3 normal, float roughness,
|
||||
float metallic, vec3 fresnel, vec3 albedo, float shadow
|
||||
<@if supportScattering@>
|
||||
, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature
|
||||
<@endif@>
|
||||
) {
|
||||
|
||||
// Attenuation
|
||||
vec3 lightEnergy = shadow * getLightColor(light) * getLightIntensity(light);
|
||||
|
||||
evalFragShading(diffuse, specular, normal, -getLightDirection(light), eyeDir, metallic, fresnel, roughness, albedo
|
||||
<@if supportScattering@>
|
||||
,scattering, midNormalCurvature, lowNormalCurvature
|
||||
<@endif@>
|
||||
);
|
||||
|
||||
diffuse *= lightEnergy * isDiffuseEnabled() * isDirectionalEnabled();
|
||||
specular *= lightEnergy * isSpecularEnabled() * isDirectionalEnabled();
|
||||
}
|
||||
|
||||
<@endfunc@>
|
||||
|
52
libraries/render-utils/src/LightPoint.slh
Normal file
52
libraries/render-utils/src/LightPoint.slh
Normal file
|
@ -0,0 +1,52 @@
|
|||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// Created by Sam Gateau on 7/5/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
|
||||
|
||||
<@func declareLightingPoint(supportScattering)@>
|
||||
|
||||
void evalLightingPoint(out vec3 diffuse, out vec3 specular, Light light,
|
||||
vec3 fragLightVec, vec3 fragEyeDir, vec3 normal, float roughness,
|
||||
float metallic, vec3 fresnel, vec3 albedo, float shadow
|
||||
<@if supportScattering@>
|
||||
, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature
|
||||
<@endif@>
|
||||
) {
|
||||
|
||||
// Allright we re valid in the volume
|
||||
float fragLightDistance = length(fragLightVec);
|
||||
vec3 fragLightDir = fragLightVec / fragLightDistance;
|
||||
|
||||
// Eval attenuation
|
||||
float radialAttenuation = evalLightAttenuation(light, fragLightDistance);
|
||||
vec3 lightEnergy = radialAttenuation * shadow * getLightColor(light) * getLightIntensity(light);
|
||||
|
||||
// Eval shading
|
||||
evalFragShading(diffuse, specular, normal, fragLightDir, fragEyeDir, metallic, fresnel, roughness, albedo
|
||||
<@if supportScattering@>
|
||||
,scattering, midNormalCurvature, lowNormalCurvature
|
||||
<@endif@>
|
||||
);
|
||||
|
||||
diffuse *= lightEnergy * isDiffuseEnabled() * isPointEnabled();
|
||||
specular *= lightEnergy * isSpecularEnabled() * isPointEnabled();
|
||||
|
||||
if (isShowLightContour() > 0.0) {
|
||||
// Show edge
|
||||
float edge = abs(2.0 * ((getLightRadius(light) - fragLightDistance) / (0.1)) - 1.0);
|
||||
if (edge < 1) {
|
||||
float edgeCoord = exp2(-8.0*edge*edge);
|
||||
diffuse = vec3(edgeCoord * edgeCoord * getLightShowContour(light) * getLightColor(light));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
<@endfunc@>
|
||||
|
||||
|
56
libraries/render-utils/src/LightSpot.slh
Normal file
56
libraries/render-utils/src/LightSpot.slh
Normal file
|
@ -0,0 +1,56 @@
|
|||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// Created by Sam Gateau on 7/5/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
|
||||
|
||||
<@func declareLightingSpot(supportScattering)@>
|
||||
|
||||
void evalLightingSpot(out vec3 diffuse, out vec3 specular, Light light,
|
||||
vec4 fragLightDirLen, float cosSpotAngle, vec3 fragEyeDir, vec3 normal, float roughness,
|
||||
float metallic, vec3 fresnel, vec3 albedo, float shadow
|
||||
<@if supportScattering@>
|
||||
, float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature
|
||||
<@endif@>
|
||||
) {
|
||||
|
||||
// Allright we re valid in the volume
|
||||
float fragLightDistance = fragLightDirLen.w;
|
||||
vec3 fragLightDir = fragLightDirLen.xyz;
|
||||
|
||||
// Eval attenuation
|
||||
float radialAttenuation = evalLightAttenuation(light, fragLightDistance);
|
||||
float angularAttenuation = evalLightSpotAttenuation(light, cosSpotAngle);
|
||||
vec3 lightEnergy = angularAttenuation * radialAttenuation * shadow * getLightColor(light) * getLightIntensity(light);
|
||||
|
||||
// Eval shading
|
||||
evalFragShading(diffuse, specular, normal, fragLightDir, fragEyeDir, metallic, fresnel, roughness, albedo
|
||||
<@if supportScattering@>
|
||||
,scattering, midNormalCurvature, lowNormalCurvature
|
||||
<@endif@>
|
||||
);
|
||||
|
||||
diffuse *= lightEnergy * isDiffuseEnabled() * isSpotEnabled();
|
||||
specular *= lightEnergy * isSpecularEnabled() * isSpotEnabled();
|
||||
|
||||
if (isShowLightContour() > 0.0) {
|
||||
// Show edges
|
||||
float edgeDistR = (getLightRadius(light) - fragLightDistance);
|
||||
float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -getLightSpotOutsideNormal2(light));
|
||||
float edgeDist = min(edgeDistR, edgeDistS);
|
||||
float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);
|
||||
if (edge < 1) {
|
||||
float edgeCoord = exp2(-8.0*edge*edge);
|
||||
diffuse = vec3(edgeCoord * edgeCoord * getLightColor(light));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
<@endfunc@>
|
||||
|
||||
|
|
@ -88,8 +88,9 @@ const glm::mat4& LightStage::Shadow::getProjection() const {
|
|||
}
|
||||
|
||||
const LightStage::LightPointer LightStage::addLight(model::LightPointer light) {
|
||||
Shadow stageShadow{light};
|
||||
LightPointer stageLight = std::make_shared<Light>(std::move(stageShadow));
|
||||
// Shadow stageShadow{light};
|
||||
LightPointer stageLight = std::make_shared<Light>(Shadow(light));
|
||||
stageLight->light = light;
|
||||
lights.push_back(stageLight);
|
||||
return stageLight;
|
||||
}
|
||||
|
|
|
@ -52,6 +52,8 @@ public:
|
|||
glm::float32 scale = 1 / MAP_SIZE;
|
||||
};
|
||||
UniformBufferView _schemaBuffer = nullptr;
|
||||
|
||||
friend class Light;
|
||||
};
|
||||
using ShadowPointer = std::shared_ptr<Shadow>;
|
||||
|
||||
|
|
164
libraries/render-utils/src/LightingModel.cpp
Normal file
164
libraries/render-utils/src/LightingModel.cpp
Normal file
|
@ -0,0 +1,164 @@
|
|||
//
|
||||
// LightingModel.cpp
|
||||
// libraries/render-utils/src/
|
||||
//
|
||||
// Created by Sam Gateau 7/1/2016.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "LightingModel.h"
|
||||
|
||||
LightingModel::LightingModel() {
|
||||
Parameters parameters;
|
||||
_parametersBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(Parameters), (const gpu::Byte*) ¶meters));
|
||||
}
|
||||
|
||||
void LightingModel::setUnlit(bool enable) {
|
||||
if (enable != isUnlitEnabled()) {
|
||||
_parametersBuffer.edit<Parameters>().enableUnlit = (float) enable;
|
||||
}
|
||||
}
|
||||
bool LightingModel::isUnlitEnabled() const {
|
||||
return (bool)_parametersBuffer.get<Parameters>().enableUnlit;
|
||||
}
|
||||
|
||||
void LightingModel::setEmissive(bool enable) {
|
||||
if (enable != isEmissiveEnabled()) {
|
||||
_parametersBuffer.edit<Parameters>().enableEmissive = (float)enable;
|
||||
}
|
||||
}
|
||||
bool LightingModel::isEmissiveEnabled() const {
|
||||
return (bool)_parametersBuffer.get<Parameters>().enableEmissive;
|
||||
}
|
||||
void LightingModel::setLightmap(bool enable) {
|
||||
if (enable != isLightmapEnabled()) {
|
||||
_parametersBuffer.edit<Parameters>().enableLightmap = (float)enable;
|
||||
}
|
||||
}
|
||||
bool LightingModel::isLightmapEnabled() const {
|
||||
return (bool)_parametersBuffer.get<Parameters>().enableLightmap;
|
||||
}
|
||||
|
||||
void LightingModel::setBackground(bool enable) {
|
||||
if (enable != isBackgroundEnabled()) {
|
||||
_parametersBuffer.edit<Parameters>().enableBackground = (float)enable;
|
||||
}
|
||||
}
|
||||
bool LightingModel::isBackgroundEnabled() const {
|
||||
return (bool)_parametersBuffer.get<Parameters>().enableBackground;
|
||||
}
|
||||
void LightingModel::setObscurance(bool enable) {
|
||||
if (enable != isObscuranceEnabled()) {
|
||||
_parametersBuffer.edit<Parameters>().enableObscurance = (float)enable;
|
||||
}
|
||||
}
|
||||
bool LightingModel::isObscuranceEnabled() const {
|
||||
return (bool)_parametersBuffer.get<Parameters>().enableObscurance;
|
||||
}
|
||||
|
||||
void LightingModel::setScattering(bool enable) {
|
||||
if (enable != isScatteringEnabled()) {
|
||||
_parametersBuffer.edit<Parameters>().enableScattering = (float)enable;
|
||||
}
|
||||
}
|
||||
bool LightingModel::isScatteringEnabled() const {
|
||||
return (bool)_parametersBuffer.get<Parameters>().enableScattering;
|
||||
}
|
||||
|
||||
void LightingModel::setDiffuse(bool enable) {
|
||||
if (enable != isDiffuseEnabled()) {
|
||||
_parametersBuffer.edit<Parameters>().enableDiffuse = (float)enable;
|
||||
}
|
||||
}
|
||||
bool LightingModel::isDiffuseEnabled() const {
|
||||
return (bool)_parametersBuffer.get<Parameters>().enableDiffuse;
|
||||
}
|
||||
void LightingModel::setSpecular(bool enable) {
|
||||
if (enable != isSpecularEnabled()) {
|
||||
_parametersBuffer.edit<Parameters>().enableSpecular = (float)enable;
|
||||
}
|
||||
}
|
||||
bool LightingModel::isSpecularEnabled() const {
|
||||
return (bool)_parametersBuffer.get<Parameters>().enableSpecular;
|
||||
}
|
||||
void LightingModel::setAlbedo(bool enable) {
|
||||
if (enable != isAlbedoEnabled()) {
|
||||
_parametersBuffer.edit<Parameters>().enableAlbedo = (float)enable;
|
||||
}
|
||||
}
|
||||
bool LightingModel::isAlbedoEnabled() const {
|
||||
return (bool)_parametersBuffer.get<Parameters>().enableAlbedo;
|
||||
}
|
||||
|
||||
void LightingModel::setAmbientLight(bool enable) {
|
||||
if (enable != isAmbientLightEnabled()) {
|
||||
_parametersBuffer.edit<Parameters>().enableAmbientLight = (float)enable;
|
||||
}
|
||||
}
|
||||
bool LightingModel::isAmbientLightEnabled() const {
|
||||
return (bool)_parametersBuffer.get<Parameters>().enableAmbientLight;
|
||||
}
|
||||
void LightingModel::setDirectionalLight(bool enable) {
|
||||
if (enable != isDirectionalLightEnabled()) {
|
||||
_parametersBuffer.edit<Parameters>().enableDirectionalLight = (float)enable;
|
||||
}
|
||||
}
|
||||
bool LightingModel::isDirectionalLightEnabled() const {
|
||||
return (bool)_parametersBuffer.get<Parameters>().enableDirectionalLight;
|
||||
}
|
||||
void LightingModel::setPointLight(bool enable) {
|
||||
if (enable != isPointLightEnabled()) {
|
||||
_parametersBuffer.edit<Parameters>().enablePointLight = (float)enable;
|
||||
}
|
||||
}
|
||||
bool LightingModel::isPointLightEnabled() const {
|
||||
return (bool)_parametersBuffer.get<Parameters>().enablePointLight;
|
||||
}
|
||||
void LightingModel::setSpotLight(bool enable) {
|
||||
if (enable != isSpotLightEnabled()) {
|
||||
_parametersBuffer.edit<Parameters>().enableSpotLight = (float)enable;
|
||||
}
|
||||
}
|
||||
bool LightingModel::isSpotLightEnabled() const {
|
||||
return (bool)_parametersBuffer.get<Parameters>().enableSpotLight;
|
||||
}
|
||||
void LightingModel::setShowLightContour(bool enable) {
|
||||
if (enable != isShowLightContourEnabled()) {
|
||||
_parametersBuffer.edit<Parameters>().showLightContour = (float)enable;
|
||||
}
|
||||
}
|
||||
bool LightingModel::isShowLightContourEnabled() const {
|
||||
return (bool)_parametersBuffer.get<Parameters>().showLightContour;
|
||||
}
|
||||
|
||||
MakeLightingModel::MakeLightingModel() {
|
||||
_lightingModel = std::make_shared<LightingModel>();
|
||||
}
|
||||
|
||||
void MakeLightingModel::configure(const Config& config) {
|
||||
_lightingModel->setUnlit(config.enableUnlit);
|
||||
_lightingModel->setEmissive(config.enableEmissive);
|
||||
_lightingModel->setLightmap(config.enableLightmap);
|
||||
_lightingModel->setBackground(config.enableBackground);
|
||||
|
||||
_lightingModel->setObscurance(config.enableObscurance);
|
||||
|
||||
_lightingModel->setScattering(config.enableScattering);
|
||||
_lightingModel->setDiffuse(config.enableDiffuse);
|
||||
_lightingModel->setSpecular(config.enableSpecular);
|
||||
_lightingModel->setAlbedo(config.enableAlbedo);
|
||||
|
||||
_lightingModel->setAmbientLight(config.enableAmbientLight);
|
||||
_lightingModel->setDirectionalLight(config.enableDirectionalLight);
|
||||
_lightingModel->setPointLight(config.enablePointLight);
|
||||
_lightingModel->setSpotLight(config.enableSpotLight);
|
||||
|
||||
_lightingModel->setShowLightContour(config.showLightContour);
|
||||
}
|
||||
|
||||
void MakeLightingModel::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, LightingModelPointer& lightingModel) {
|
||||
|
||||
lightingModel = _lightingModel;
|
||||
}
|
166
libraries/render-utils/src/LightingModel.h
Normal file
166
libraries/render-utils/src/LightingModel.h
Normal file
|
@ -0,0 +1,166 @@
|
|||
//
|
||||
// LightingModel.h
|
||||
// libraries/render-utils/src/
|
||||
//
|
||||
// Created by Sam Gateau 7/1/2016.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_LightingModel_h
|
||||
#define hifi_LightingModel_h
|
||||
|
||||
#include "gpu/Resource.h"
|
||||
#include "render/DrawTask.h"
|
||||
|
||||
class RenderArgs;
|
||||
|
||||
// LightingModel is a helper class gathering in one place the flags to enable the lighting contributions
|
||||
class LightingModel {
|
||||
public:
|
||||
using UniformBufferView = gpu::BufferView;
|
||||
|
||||
LightingModel();
|
||||
|
||||
|
||||
void setUnlit(bool enable);
|
||||
bool isUnlitEnabled() const;
|
||||
|
||||
void setEmissive(bool enable);
|
||||
bool isEmissiveEnabled() const;
|
||||
void setLightmap(bool enable);
|
||||
bool isLightmapEnabled() const;
|
||||
|
||||
void setBackground(bool enable);
|
||||
bool isBackgroundEnabled() const;
|
||||
|
||||
void setObscurance(bool enable);
|
||||
bool isObscuranceEnabled() const;
|
||||
|
||||
void setScattering(bool enable);
|
||||
bool isScatteringEnabled() const;
|
||||
void setDiffuse(bool enable);
|
||||
bool isDiffuseEnabled() const;
|
||||
void setSpecular(bool enable);
|
||||
bool isSpecularEnabled() const;
|
||||
|
||||
void setAlbedo(bool enable);
|
||||
bool isAlbedoEnabled() const;
|
||||
|
||||
|
||||
void setAmbientLight(bool enable);
|
||||
bool isAmbientLightEnabled() const;
|
||||
void setDirectionalLight(bool enable);
|
||||
bool isDirectionalLightEnabled() const;
|
||||
void setPointLight(bool enable);
|
||||
bool isPointLightEnabled() const;
|
||||
void setSpotLight(bool enable);
|
||||
bool isSpotLightEnabled() const;
|
||||
|
||||
void setShowLightContour(bool enable);
|
||||
bool isShowLightContourEnabled() const;
|
||||
|
||||
UniformBufferView getParametersBuffer() const { return _parametersBuffer; }
|
||||
|
||||
protected:
|
||||
|
||||
|
||||
// Class describing the uniform buffer with the transform info common to the AO shaders
|
||||
// It s changing every frame
|
||||
class Parameters {
|
||||
public:
|
||||
float enableUnlit{ 1.0f };
|
||||
float enableEmissive{ 1.0f };
|
||||
float enableLightmap{ 1.0f };
|
||||
float enableBackground{ 1.0f };
|
||||
|
||||
float enableScattering{ 1.0f };
|
||||
float enableDiffuse{ 1.0f };
|
||||
float enableSpecular{ 1.0f };
|
||||
float enableAlbedo{ 1.0f };
|
||||
|
||||
|
||||
float enableAmbientLight{ 1.0f };
|
||||
float enableDirectionalLight{ 1.0f };
|
||||
float enablePointLight{ 1.0f };
|
||||
float enableSpotLight{ 1.0f };
|
||||
|
||||
float showLightContour{ 0.0f }; // false by default
|
||||
float enableObscurance{ 1.0f };
|
||||
|
||||
glm::vec2 spares{ 0.0f };
|
||||
|
||||
Parameters() {}
|
||||
};
|
||||
UniformBufferView _parametersBuffer;
|
||||
};
|
||||
|
||||
using LightingModelPointer = std::shared_ptr<LightingModel>;
|
||||
|
||||
|
||||
|
||||
|
||||
class MakeLightingModelConfig : public render::Job::Config {
|
||||
Q_OBJECT
|
||||
|
||||
Q_PROPERTY(bool enableUnlit MEMBER enableUnlit NOTIFY dirty)
|
||||
Q_PROPERTY(bool enableEmissive MEMBER enableEmissive NOTIFY dirty)
|
||||
Q_PROPERTY(bool enableLightmap MEMBER enableLightmap NOTIFY dirty)
|
||||
Q_PROPERTY(bool enableBackground MEMBER enableBackground NOTIFY dirty)
|
||||
|
||||
Q_PROPERTY(bool enableObscurance MEMBER enableObscurance NOTIFY dirty)
|
||||
|
||||
Q_PROPERTY(bool enableScattering MEMBER enableScattering NOTIFY dirty)
|
||||
Q_PROPERTY(bool enableDiffuse MEMBER enableDiffuse NOTIFY dirty)
|
||||
Q_PROPERTY(bool enableSpecular MEMBER enableSpecular NOTIFY dirty)
|
||||
Q_PROPERTY(bool enableAlbedo MEMBER enableAlbedo NOTIFY dirty)
|
||||
|
||||
Q_PROPERTY(bool enableAmbientLight MEMBER enableAmbientLight NOTIFY dirty)
|
||||
Q_PROPERTY(bool enableDirectionalLight MEMBER enableDirectionalLight NOTIFY dirty)
|
||||
Q_PROPERTY(bool enablePointLight MEMBER enablePointLight NOTIFY dirty)
|
||||
Q_PROPERTY(bool enableSpotLight MEMBER enableSpotLight NOTIFY dirty)
|
||||
|
||||
Q_PROPERTY(bool showLightContour MEMBER showLightContour NOTIFY dirty)
|
||||
|
||||
public:
|
||||
MakeLightingModelConfig() : render::Job::Config() {} // Make Lighting Model is always on
|
||||
|
||||
bool enableUnlit{ true };
|
||||
bool enableEmissive{ true };
|
||||
bool enableLightmap{ true };
|
||||
bool enableBackground{ true };
|
||||
bool enableObscurance{ true };
|
||||
|
||||
bool enableScattering{ true };
|
||||
bool enableDiffuse{ true };
|
||||
bool enableSpecular{ true };
|
||||
bool enableAlbedo{ true };
|
||||
|
||||
bool enableAmbientLight{ true };
|
||||
bool enableDirectionalLight{ true };
|
||||
bool enablePointLight{ true };
|
||||
bool enableSpotLight{ true };
|
||||
|
||||
bool showLightContour{ false }; // false by default
|
||||
|
||||
signals:
|
||||
void dirty();
|
||||
};
|
||||
|
||||
class MakeLightingModel {
|
||||
public:
|
||||
using Config = MakeLightingModelConfig;
|
||||
using JobModel = render::Job::ModelO<MakeLightingModel, LightingModelPointer, Config>;
|
||||
|
||||
MakeLightingModel();
|
||||
|
||||
void configure(const Config& config);
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, LightingModelPointer& lightingModel);
|
||||
|
||||
private:
|
||||
LightingModelPointer _lightingModel;
|
||||
};
|
||||
|
||||
#endif // hifi_SurfaceGeometryPass_h
|
200
libraries/render-utils/src/LightingModel.slh
Normal file
200
libraries/render-utils/src/LightingModel.slh
Normal file
|
@ -0,0 +1,200 @@
|
|||
<!
|
||||
// LightingModel.slh
|
||||
// fragment shader
|
||||
//
|
||||
// Created by Sam Gateau on 1/25/14.
|
||||
// Copyright 2013 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
!>
|
||||
<@if not LIGHTING_MODEL_SLH@>
|
||||
<@def LIGHTING_MODEL_SLH@>
|
||||
|
||||
<@func declareLightingModel()@>
|
||||
|
||||
struct LightingModel {
|
||||
vec4 _UnlitEmissiveLightmapBackground;
|
||||
vec4 _ScatteringDiffuseSpecularAlbedo;
|
||||
vec4 _AmbientDirectionalPointSpot;
|
||||
vec4 _ShowContourObscuranceSpare2;
|
||||
};
|
||||
|
||||
uniform lightingModelBuffer{
|
||||
LightingModel lightingModel;
|
||||
};
|
||||
|
||||
float isUnlitEnabled() {
|
||||
return lightingModel._UnlitEmissiveLightmapBackground.x;
|
||||
}
|
||||
float isEmissiveEnabled() {
|
||||
return lightingModel._UnlitEmissiveLightmapBackground.y;
|
||||
}
|
||||
float isLightmapEnabled() {
|
||||
return lightingModel._UnlitEmissiveLightmapBackground.z;
|
||||
}
|
||||
float isBackgroundEnabled() {
|
||||
return lightingModel._UnlitEmissiveLightmapBackground.w;
|
||||
}
|
||||
float isObscuranceEnabled() {
|
||||
return lightingModel._ShowContourObscuranceSpare2.y;
|
||||
}
|
||||
|
||||
float isScatteringEnabled() {
|
||||
return lightingModel._ScatteringDiffuseSpecularAlbedo.x;
|
||||
}
|
||||
float isDiffuseEnabled() {
|
||||
return lightingModel._ScatteringDiffuseSpecularAlbedo.y;
|
||||
}
|
||||
float isSpecularEnabled() {
|
||||
return lightingModel._ScatteringDiffuseSpecularAlbedo.z;
|
||||
}
|
||||
float isAlbedoEnabled() {
|
||||
return lightingModel._ScatteringDiffuseSpecularAlbedo.w;
|
||||
}
|
||||
|
||||
float isAmbientEnabled() {
|
||||
return lightingModel._AmbientDirectionalPointSpot.x;
|
||||
}
|
||||
float isDirectionalEnabled() {
|
||||
return lightingModel._AmbientDirectionalPointSpot.y;
|
||||
}
|
||||
float isPointEnabled() {
|
||||
return lightingModel._AmbientDirectionalPointSpot.z;
|
||||
}
|
||||
float isSpotEnabled() {
|
||||
return lightingModel._AmbientDirectionalPointSpot.w;
|
||||
}
|
||||
|
||||
float isShowLightContour() {
|
||||
return lightingModel._ShowContourObscuranceSpare2.x;
|
||||
}
|
||||
|
||||
|
||||
<@endfunc@>
|
||||
<$declareLightingModel()$>
|
||||
|
||||
<@func declareBeckmannSpecular()@>
|
||||
|
||||
uniform sampler2D scatteringSpecularBeckmann;
|
||||
|
||||
float fetchSpecularBeckmann(float ndoth, float roughness) {
|
||||
return pow(2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);
|
||||
}
|
||||
|
||||
float fresnelSchlickScalar(float fresnelColor, vec3 lightDir, vec3 halfDir) {
|
||||
float base = 1.0 - clamp(dot(lightDir, halfDir), 0.0, 1.0);
|
||||
float exponential = pow(base, 5.0);
|
||||
return (exponential)+fresnelColor * (1.0 - exponential);
|
||||
}
|
||||
|
||||
vec2 skinSpecular(vec3 N, vec3 L, vec3 V, float roughness, float intensity) {
|
||||
vec2 result = vec2(0.0, 1.0);
|
||||
float ndotl = dot(N, L);
|
||||
if (ndotl > 0.0) {
|
||||
vec3 h = L + V;
|
||||
vec3 H = normalize(h);
|
||||
float ndoth = dot(N, H);
|
||||
float PH = fetchSpecularBeckmann(ndoth, roughness);
|
||||
float F = fresnelSchlickScalar(0.028, H, V);
|
||||
float frSpec = max(PH * F / dot(h, h), 0.0);
|
||||
result.x = ndotl * intensity * frSpec;
|
||||
result.y -= F;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
<@endfunc@>
|
||||
|
||||
<@func declareEvalPBRShading()@>
|
||||
|
||||
vec3 fresnelSchlickColor(vec3 fresnelColor, vec3 lightDir, vec3 halfDir) {
|
||||
float base = 1.0 - clamp(dot(lightDir, halfDir), 0.0, 1.0);
|
||||
float exponential = pow(base, 5.0);
|
||||
return vec3(exponential) + fresnelColor * (1.0 - exponential);
|
||||
}
|
||||
|
||||
|
||||
|
||||
float specularDistribution(float roughness, vec3 normal, vec3 halfDir) {
|
||||
float ndoth = clamp(dot(halfDir, normal), 0.0, 1.0);
|
||||
float gloss2 = pow(0.001 + roughness, 4);
|
||||
float denom = (ndoth * ndoth*(gloss2 - 1) + 1);
|
||||
float power = gloss2 / (3.14159 * denom * denom);
|
||||
return power;
|
||||
}
|
||||
<! //NOTE: ANother implementation for specularDistribution
|
||||
float specularDistribution(float roughness, vec3 normal, vec3 halfDir) {
|
||||
float gloss = exp2(10 * (1.0 - roughness) + 1);
|
||||
float power = pow(clamp(dot(halfDir, normal), 0.0, 1.0), gloss);
|
||||
power *= (gloss * 0.125 + 0.25);
|
||||
return power;
|
||||
}
|
||||
!>
|
||||
// Frag Shading returns the diffuse amount as W and the specular rgb as xyz
|
||||
vec4 evalPBRShading(vec3 fragNormal, vec3 fragLightDir, vec3 fragEyeDir, float metallic, vec3 fresnel, float roughness) {
|
||||
// Diffuse Lighting
|
||||
float diffuse = clamp(dot(fragNormal, fragLightDir), 0.0, 1.0);
|
||||
|
||||
// Specular Lighting
|
||||
vec3 halfDir = normalize(fragEyeDir + fragLightDir);
|
||||
vec3 fresnelColor = fresnelSchlickColor(fresnel, fragLightDir, halfDir);
|
||||
float power = specularDistribution(roughness, fragNormal, halfDir);
|
||||
vec3 specular = power * fresnelColor * diffuse;
|
||||
|
||||
return vec4(specular, (1.0 - metallic) * diffuse * (1 - fresnelColor.x));
|
||||
}
|
||||
<@endfunc@>
|
||||
|
||||
|
||||
|
||||
<$declareEvalPBRShading()$>
|
||||
|
||||
// Return xyz the specular/reflection component and w the diffuse component
|
||||
//vec4 evalFragShading(vec3 fragNormal, vec3 fragLightDir, vec3 fragEyeDir, float metallic, vec3 fresnel, float roughness) {
|
||||
// return evalPBRShading(fragNormal, fragLightDir, fragEyeDir, metallic, fresnel, roughness);
|
||||
//}
|
||||
|
||||
void evalFragShading(out vec3 diffuse, out vec3 specular,
|
||||
vec3 fragNormal, vec3 fragLightDir, vec3 fragEyeDir,
|
||||
float metallic, vec3 fresnel, float roughness, vec3 albedo) {
|
||||
vec4 shading = evalPBRShading(fragNormal, fragLightDir, fragEyeDir, metallic, fresnel, roughness);
|
||||
diffuse = vec3(shading.w);
|
||||
if (isAlbedoEnabled() > 0.0) {
|
||||
diffuse *= albedo;
|
||||
}
|
||||
specular = shading.xyz;
|
||||
}
|
||||
|
||||
<$declareBeckmannSpecular()$>
|
||||
<@include SubsurfaceScattering.slh@>
|
||||
<$declareSubsurfaceScatteringBRDF()$>
|
||||
|
||||
|
||||
void evalFragShading(out vec3 diffuse, out vec3 specular,
|
||||
vec3 fragNormal, vec3 fragLightDir, vec3 fragEyeDir,
|
||||
float metallic, vec3 fresnel, float roughness, vec3 albedo,
|
||||
float scattering, vec4 midNormalCurvature, vec4 lowNormalCurvature) {
|
||||
if (scattering * isScatteringEnabled() > 0.0) {
|
||||
vec3 brdf = evalSkinBRDF(fragLightDir, fragNormal, midNormalCurvature.xyz, lowNormalCurvature.xyz, lowNormalCurvature.w);
|
||||
float NdotL = clamp(dot(fragNormal, fragLightDir), 0.0, 1.0);
|
||||
diffuse = mix(vec3(NdotL), brdf, scattering);
|
||||
|
||||
// Specular Lighting
|
||||
vec3 halfDir = normalize(fragEyeDir + fragLightDir);
|
||||
vec2 specularBrdf = skinSpecular(fragNormal, fragLightDir, fragEyeDir, roughness, 1.0);
|
||||
|
||||
diffuse *= specularBrdf.y;
|
||||
specular = vec3(specularBrdf.x);
|
||||
} else {
|
||||
vec4 shading = evalPBRShading(fragNormal, fragLightDir, fragEyeDir, metallic, fresnel, roughness);
|
||||
diffuse = vec3(shading.w);
|
||||
specular = shading.xyz;
|
||||
}
|
||||
if (isAlbedoEnabled() > 0.0) {
|
||||
diffuse *= albedo;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
<@endif@>
|
|
@ -44,7 +44,7 @@ TexMapArray getTexMapArray() {
|
|||
<@endfunc@>
|
||||
|
||||
|
||||
<@func declareMaterialTextures(withAlbedo, withRoughness, withNormal, withMetallic, withEmissive, withOcclusion)@>
|
||||
<@func declareMaterialTextures(withAlbedo, withRoughness, withNormal, withMetallic, withEmissive, withOcclusion, withScattering)@>
|
||||
|
||||
<@if withAlbedo@>
|
||||
uniform sampler2D albedoMap;
|
||||
|
@ -87,10 +87,20 @@ float fetchOcclusionMap(vec2 uv) {
|
|||
return texture(occlusionMap, uv).r;
|
||||
}
|
||||
<@endif@>
|
||||
|
||||
<@if withScattering@>
|
||||
uniform sampler2D scatteringMap;
|
||||
float fetchScatteringMap(vec2 uv) {
|
||||
float scattering = texture(scatteringMap, uv).r; // boolean scattering for now
|
||||
return max(((scattering - 0.1) / 0.9), 0.0);
|
||||
return texture(scatteringMap, uv).r; // boolean scattering for now
|
||||
}
|
||||
<@endif@>
|
||||
|
||||
<@endfunc@>
|
||||
|
||||
|
||||
<@func fetchMaterialTexturesCoord0(matKey, texcoord0, albedo, roughness, normal, metallic, emissive)@>
|
||||
<@func fetchMaterialTexturesCoord0(matKey, texcoord0, albedo, roughness, normal, metallic, emissive, scattering)@>
|
||||
<@if albedo@>
|
||||
vec4 <$albedo$> = (((<$matKey$> & (ALBEDO_MAP_BIT | OPACITY_MASK_MAP_BIT | OPACITY_TRANSLUCENT_MAP_BIT)) != 0) ? fetchAlbedoMap(<$texcoord0$>) : vec4(1.0));
|
||||
<@endif@>
|
||||
|
@ -106,6 +116,9 @@ float fetchOcclusionMap(vec2 uv) {
|
|||
<@if emissive@>
|
||||
vec3 <$emissive$> = (((<$matKey$> & EMISSIVE_MAP_BIT) != 0) ? fetchEmissiveMap(<$texcoord0$>) : vec3(0.0));
|
||||
<@endif@>
|
||||
<@if scattering@>
|
||||
float <$scattering$> = (((<$matKey$> & SCATTERING_MAP_BIT) != 0) ? fetchScatteringMap(<$texcoord0$>) : 0.0);
|
||||
<@endif@>
|
||||
<@endfunc@>
|
||||
|
||||
<@func fetchMaterialTexturesCoord1(matKey, texcoord1, occlusion, lightmapVal)@>
|
||||
|
@ -191,4 +204,10 @@ vec3 fetchLightmapMap(vec2 uv) {
|
|||
}
|
||||
<@endfunc@>
|
||||
|
||||
<@func evalMaterialScattering(fetchedScattering, materialScattering, matKey, scattering)@>
|
||||
{
|
||||
<$scattering$> = (((<$matKey$> & SCATTERING_MAP_BIT) != 0) ? <$fetchedScattering$> : <$materialScattering$>);
|
||||
}
|
||||
<@endfunc@>
|
||||
|
||||
<@endif@>
|
|
@ -217,6 +217,20 @@ void MeshPartPayload::bindMaterial(gpu::Batch& batch, const ShapePipeline::Locat
|
|||
batch.setResourceTexture(ShapePipeline::Slot::MAP::OCCLUSION, nullptr);
|
||||
}
|
||||
|
||||
// Scattering map
|
||||
if (materialKey.isScatteringMap()) {
|
||||
auto scatteringMap = textureMaps[model::MaterialKey::SCATTERING_MAP];
|
||||
if (scatteringMap && scatteringMap->isDefined()) {
|
||||
batch.setResourceTexture(ShapePipeline::Slot::MAP::SCATTERING, scatteringMap->getTextureView());
|
||||
|
||||
// texcoord are assumed to be the same has albedo
|
||||
} else {
|
||||
batch.setResourceTexture(ShapePipeline::Slot::MAP::SCATTERING, textureCache->getWhiteTexture());
|
||||
}
|
||||
} else {
|
||||
batch.setResourceTexture(ShapePipeline::Slot::MAP::SCATTERING, nullptr);
|
||||
}
|
||||
|
||||
// Emissive / Lightmap
|
||||
if (materialKey.isLightmapMap()) {
|
||||
auto lightmapMap = textureMaps[model::MaterialKey::LIGHTMAP_MAP];
|
||||
|
|
|
@ -23,9 +23,13 @@
|
|||
#include <render/DrawTask.h>
|
||||
#include <render/DrawStatus.h>
|
||||
#include <render/DrawSceneOctree.h>
|
||||
#include <render/BlurTask.h>
|
||||
|
||||
#include "LightingModel.h"
|
||||
#include "DebugDeferredBuffer.h"
|
||||
#include "DeferredFramebuffer.h"
|
||||
#include "DeferredLightingEffect.h"
|
||||
#include "SurfaceGeometryPass.h"
|
||||
#include "FramebufferCache.h"
|
||||
#include "HitEffect.h"
|
||||
#include "TextureCache.h"
|
||||
|
@ -33,21 +37,17 @@
|
|||
#include "AmbientOcclusionEffect.h"
|
||||
#include "AntialiasingEffect.h"
|
||||
#include "ToneMappingEffect.h"
|
||||
#include "SubsurfaceScattering.h"
|
||||
|
||||
#include <gpu/StandardShaderLib.h>
|
||||
|
||||
#include "drawOpaqueStencil_frag.h"
|
||||
|
||||
|
||||
using namespace render;
|
||||
|
||||
extern void initStencilPipeline(gpu::PipelinePointer& pipeline);
|
||||
extern void initOverlay3DPipelines(render::ShapePlumber& plumber);
|
||||
extern void initDeferredPipelines(render::ShapePlumber& plumber);
|
||||
|
||||
void PrepareDeferred::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext) {
|
||||
DependencyManager::get<DeferredLightingEffect>()->prepare(renderContext->args);
|
||||
}
|
||||
|
||||
void RenderDeferred::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext) {
|
||||
DependencyManager::get<DeferredLightingEffect>()->render(renderContext);
|
||||
}
|
||||
|
||||
RenderDeferredTask::RenderDeferredTask(CullFunctor cullFunctor) {
|
||||
cullFunctor = cullFunctor ? cullFunctor : [](const RenderArgs*, const AABox&){ return true; };
|
||||
|
||||
|
@ -92,17 +92,53 @@ RenderDeferredTask::RenderDeferredTask(CullFunctor cullFunctor) {
|
|||
const auto overlayTransparents = addJob<DepthSortItems>("DepthSortOverlayTransparent", filteredNonspatialBuckets[TRANSPARENT_SHAPE_BUCKET], DepthSortItems(false));
|
||||
const auto background = filteredNonspatialBuckets[BACKGROUND_BUCKET];
|
||||
|
||||
// GPU jobs: Start preparing the deferred and lighting buffer
|
||||
addJob<PrepareDeferred>("PrepareDeferred");
|
||||
// Prepare deferred, generate the shared Deferred Frame Transform
|
||||
const auto deferredFrameTransform = addJob<GenerateDeferredFrameTransform>("DeferredFrameTransform");
|
||||
const auto lightingModel = addJob<MakeLightingModel>("LightingModel");
|
||||
|
||||
|
||||
// GPU jobs: Start preparing the primary, deferred and lighting buffer
|
||||
const auto primaryFramebuffer = addJob<PreparePrimaryFramebuffer>("PreparePrimaryBuffer");
|
||||
|
||||
// const auto fullFrameRangeTimer = addJob<BeginGPURangeTimer>("BeginRangeTimer");
|
||||
const auto opaqueRangeTimer = addJob<BeginGPURangeTimer>("BeginOpaqueRangeTimer");
|
||||
|
||||
const auto prepareDeferredInputs = PrepareDeferred::Inputs(primaryFramebuffer, lightingModel).hasVarying();
|
||||
const auto prepareDeferredOutputs = addJob<PrepareDeferred>("PrepareDeferred", prepareDeferredInputs);
|
||||
const auto deferredFramebuffer = prepareDeferredOutputs.getN<PrepareDeferred::Outputs>(0);
|
||||
const auto lightingFramebuffer = prepareDeferredOutputs.getN<PrepareDeferred::Outputs>(1);
|
||||
|
||||
// Render opaque objects in DeferredBuffer
|
||||
addJob<DrawStateSortDeferred>("DrawOpaqueDeferred", opaques, shapePlumber);
|
||||
const auto opaqueInputs = DrawStateSortDeferred::Inputs(opaques, lightingModel).hasVarying();
|
||||
addJob<DrawStateSortDeferred>("DrawOpaqueDeferred", opaqueInputs, shapePlumber);
|
||||
|
||||
// Once opaque is all rendered create stencil background
|
||||
addJob<DrawStencilDeferred>("DrawOpaqueStencil");
|
||||
addJob<DrawStencilDeferred>("DrawOpaqueStencil", deferredFramebuffer);
|
||||
|
||||
// Use Stencil and start drawing background in Lighting buffer
|
||||
addJob<DrawBackgroundDeferred>("DrawBackgroundDeferred", background);
|
||||
addJob<EndGPURangeTimer>("OpaqueRangeTimer", opaqueRangeTimer);
|
||||
|
||||
|
||||
// Opaque all rendered
|
||||
|
||||
// Linear Depth Pass
|
||||
const auto linearDepthPassInputs = LinearDepthPass::Inputs(deferredFrameTransform, deferredFramebuffer).hasVarying();
|
||||
const auto linearDepthPassOutputs = addJob<LinearDepthPass>("LinearDepth", linearDepthPassInputs);
|
||||
const auto linearDepthTarget = linearDepthPassOutputs.getN<LinearDepthPass::Outputs>(0);
|
||||
const auto linearDepthTexture = linearDepthPassOutputs.getN<LinearDepthPass::Outputs>(2);
|
||||
const auto halfLinearDepthTexture = linearDepthPassOutputs.getN<LinearDepthPass::Outputs>(3);
|
||||
const auto halfNormalTexture = linearDepthPassOutputs.getN<LinearDepthPass::Outputs>(4);
|
||||
|
||||
|
||||
// Curvature pass
|
||||
const auto surfaceGeometryPassInputs = SurfaceGeometryPass::Inputs(deferredFrameTransform, deferredFramebuffer, linearDepthTarget).hasVarying();
|
||||
const auto surfaceGeometryPassOutputs = addJob<SurfaceGeometryPass>("SurfaceGeometry", surfaceGeometryPassInputs);
|
||||
const auto surfaceGeometryFramebuffer = surfaceGeometryPassOutputs.getN<SurfaceGeometryPass::Outputs>(0);
|
||||
const auto curvatureFramebuffer = surfaceGeometryPassOutputs.getN<SurfaceGeometryPass::Outputs>(1);
|
||||
const auto midCurvatureNormalFramebuffer = surfaceGeometryPassOutputs.getN<SurfaceGeometryPass::Outputs>(2);
|
||||
const auto lowCurvatureNormalFramebuffer = surfaceGeometryPassOutputs.getN<SurfaceGeometryPass::Outputs>(3);
|
||||
|
||||
// Simply update the scattering resource
|
||||
const auto scatteringResource = addJob<SubsurfaceScattering>("Scattering");
|
||||
|
||||
// AO job
|
||||
addJob<AmbientOcclusionEffect>("AmbientOcclusion");
|
||||
|
@ -110,27 +146,40 @@ RenderDeferredTask::RenderDeferredTask(CullFunctor cullFunctor) {
|
|||
// Draw Lights just add the lights to the current list of lights to deal with. NOt really gpu job for now.
|
||||
addJob<DrawLight>("DrawLight", lights);
|
||||
|
||||
// DeferredBuffer is complete, now let's shade it into the LightingBuffer
|
||||
addJob<RenderDeferred>("RenderDeferred");
|
||||
const auto deferredLightingInputs = RenderDeferred::Inputs(deferredFrameTransform, deferredFramebuffer, lightingModel,
|
||||
surfaceGeometryFramebuffer, lowCurvatureNormalFramebuffer, scatteringResource).hasVarying();
|
||||
|
||||
// AA job to be revisited
|
||||
addJob<Antialiasing>("Antialiasing");
|
||||
// DeferredBuffer is complete, now let's shade it into the LightingBuffer
|
||||
addJob<RenderDeferred>("RenderDeferred", deferredLightingInputs);
|
||||
|
||||
// Use Stencil and draw background in Lighting buffer to complete filling in the opaque
|
||||
const auto backgroundInputs = DrawBackgroundDeferred::Inputs(background, lightingModel).hasVarying();
|
||||
addJob<DrawBackgroundDeferred>("DrawBackgroundDeferred", backgroundInputs);
|
||||
|
||||
// Render transparent objects forward in LightingBuffer
|
||||
addJob<DrawDeferred>("DrawTransparentDeferred", transparents, shapePlumber);
|
||||
|
||||
const auto transparentsInputs = DrawDeferred::Inputs(transparents, lightingModel).hasVarying();
|
||||
addJob<DrawDeferred>("DrawTransparentDeferred", transparentsInputs, shapePlumber);
|
||||
|
||||
const auto toneAndPostRangeTimer = addJob<BeginGPURangeTimer>("BeginToneAndPostRangeTimer");
|
||||
|
||||
// Lighting Buffer ready for tone mapping
|
||||
addJob<ToneMappingDeferred>("ToneMapping");
|
||||
const auto toneMappingInputs = render::Varying(ToneMappingDeferred::Inputs(lightingFramebuffer, primaryFramebuffer));
|
||||
addJob<ToneMappingDeferred>("ToneMapping", toneMappingInputs);
|
||||
|
||||
// Overlays
|
||||
addJob<DrawOverlay3D>("DrawOverlay3DOpaque", overlayOpaques, true);
|
||||
addJob<DrawOverlay3D>("DrawOverlay3DTransparent", overlayTransparents, false);
|
||||
|
||||
const auto overlayOpaquesInputs = DrawOverlay3D::Inputs(overlayOpaques, lightingModel).hasVarying();
|
||||
const auto overlayTransparentsInputs = DrawOverlay3D::Inputs(overlayTransparents, lightingModel).hasVarying();
|
||||
addJob<DrawOverlay3D>("DrawOverlay3DOpaque", overlayOpaquesInputs, true);
|
||||
addJob<DrawOverlay3D>("DrawOverlay3DTransparent", overlayTransparentsInputs, false);
|
||||
|
||||
|
||||
// Debugging stages
|
||||
{
|
||||
addJob<DebugSubsurfaceScattering>("DebugScattering", deferredLightingInputs);
|
||||
|
||||
// Debugging Deferred buffer job
|
||||
addJob<DebugDeferredBuffer>("DebugDeferredBuffer");
|
||||
const auto debugFramebuffers = render::Varying(DebugDeferredBuffer::Inputs(deferredFramebuffer, linearDepthTarget, surfaceGeometryFramebuffer, lowCurvatureNormalFramebuffer));
|
||||
addJob<DebugDeferredBuffer>("DebugDeferredBuffer", debugFramebuffers);
|
||||
|
||||
// Scene Octree Debuging job
|
||||
{
|
||||
|
@ -147,11 +196,17 @@ RenderDeferredTask::RenderDeferredTask(CullFunctor cullFunctor) {
|
|||
}
|
||||
}
|
||||
|
||||
// FIXME: Hit effect is never used, let's hide it for now, probably a more generic way to add custom post process effects
|
||||
// addJob<HitEffect>("HitEffect");
|
||||
|
||||
// AA job to be revisited
|
||||
addJob<Antialiasing>("Antialiasing", primaryFramebuffer);
|
||||
|
||||
addJob<EndGPURangeTimer>("ToneAndPostRangeTimer", toneAndPostRangeTimer);
|
||||
|
||||
// Blit!
|
||||
addJob<Blit>("Blit");
|
||||
addJob<Blit>("Blit", primaryFramebuffer);
|
||||
|
||||
// addJob<EndGPURangeTimer>("RangeTimer", fullFrameRangeTimer);
|
||||
|
||||
}
|
||||
|
||||
void RenderDeferredTask::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext) {
|
||||
|
@ -167,21 +222,45 @@ void RenderDeferredTask::run(const SceneContextPointer& sceneContext, const Rend
|
|||
return;
|
||||
}
|
||||
|
||||
auto config = std::static_pointer_cast<Config>(renderContext->jobConfig);
|
||||
|
||||
for (auto job : _jobs) {
|
||||
job.run(sceneContext, renderContext);
|
||||
}
|
||||
}
|
||||
|
||||
void DrawDeferred::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const ItemBounds& inItems) {
|
||||
void BeginGPURangeTimer::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, gpu::RangeTimerPointer& timer) {
|
||||
timer = _gpuTimer;
|
||||
gpu::doInBatch(renderContext->args->_context, [&](gpu::Batch& batch) {
|
||||
_gpuTimer->begin(batch);
|
||||
});
|
||||
}
|
||||
|
||||
void EndGPURangeTimer::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const gpu::RangeTimerPointer& timer) {
|
||||
gpu::doInBatch(renderContext->args->_context, [&](gpu::Batch& batch) {
|
||||
timer->end(batch);
|
||||
});
|
||||
|
||||
auto config = std::static_pointer_cast<Config>(renderContext->jobConfig);
|
||||
config->gpuTime = timer->getAverage();
|
||||
}
|
||||
|
||||
|
||||
void DrawDeferred::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const Inputs& inputs) {
|
||||
assert(renderContext->args);
|
||||
assert(renderContext->args->hasViewFrustum());
|
||||
|
||||
auto config = std::static_pointer_cast<Config>(renderContext->jobConfig);
|
||||
|
||||
const auto& inItems = inputs.get0();
|
||||
const auto& lightingModel = inputs.get1();
|
||||
|
||||
RenderArgs* args = renderContext->args;
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
args->_batch = &batch;
|
||||
|
||||
// Setup camera, projection and viewport for all items
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
batch.setStateScissorRect(args->_viewport);
|
||||
|
||||
|
@ -193,6 +272,9 @@ void DrawDeferred::run(const SceneContextPointer& sceneContext, const RenderCont
|
|||
batch.setProjectionTransform(projMat);
|
||||
batch.setViewTransform(viewMat);
|
||||
|
||||
// Setup lighting model for all items;
|
||||
batch.setUniformBuffer(render::ShapePipeline::Slot::LIGHTING_MODEL, lightingModel->getParametersBuffer());
|
||||
|
||||
renderShapes(sceneContext, renderContext, _shapePlumber, inItems, _maxDrawn);
|
||||
args->_batch = nullptr;
|
||||
});
|
||||
|
@ -200,16 +282,21 @@ void DrawDeferred::run(const SceneContextPointer& sceneContext, const RenderCont
|
|||
config->setNumDrawn((int)inItems.size());
|
||||
}
|
||||
|
||||
void DrawStateSortDeferred::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const ItemBounds& inItems) {
|
||||
void DrawStateSortDeferred::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const Inputs& inputs) {
|
||||
assert(renderContext->args);
|
||||
assert(renderContext->args->hasViewFrustum());
|
||||
|
||||
auto config = std::static_pointer_cast<Config>(renderContext->jobConfig);
|
||||
|
||||
const auto& inItems = inputs.get0();
|
||||
const auto& lightingModel = inputs.get1();
|
||||
|
||||
RenderArgs* args = renderContext->args;
|
||||
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
args->_batch = &batch;
|
||||
|
||||
// Setup camera, projection and viewport for all items
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
batch.setStateScissorRect(args->_viewport);
|
||||
|
||||
|
@ -221,6 +308,9 @@ void DrawStateSortDeferred::run(const SceneContextPointer& sceneContext, const R
|
|||
batch.setProjectionTransform(projMat);
|
||||
batch.setViewTransform(viewMat);
|
||||
|
||||
// Setup lighting model for all items;
|
||||
batch.setUniformBuffer(render::ShapePipeline::Slot::LIGHTING_MODEL, lightingModel->getParametersBuffer());
|
||||
|
||||
if (_stateSort) {
|
||||
renderStateSortShapes(sceneContext, renderContext, _shapePlumber, inItems, _maxDrawn);
|
||||
} else {
|
||||
|
@ -238,12 +328,15 @@ DrawOverlay3D::DrawOverlay3D(bool opaque) :
|
|||
initOverlay3DPipelines(*_shapePlumber);
|
||||
}
|
||||
|
||||
void DrawOverlay3D::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const render::ItemBounds& inItems) {
|
||||
void DrawOverlay3D::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const Inputs& inputs) {
|
||||
assert(renderContext->args);
|
||||
assert(renderContext->args->hasViewFrustum());
|
||||
|
||||
auto config = std::static_pointer_cast<Config>(renderContext->jobConfig);
|
||||
|
||||
const auto& inItems = inputs.get0();
|
||||
const auto& lightingModel = inputs.get1();
|
||||
|
||||
config->setNumDrawn((int)inItems.size());
|
||||
emit config->numDrawnChanged();
|
||||
|
||||
|
@ -274,21 +367,35 @@ void DrawOverlay3D::run(const SceneContextPointer& sceneContext, const RenderCon
|
|||
batch.setProjectionTransform(projMat);
|
||||
batch.setViewTransform(viewMat);
|
||||
|
||||
// Setup lighting model for all items;
|
||||
batch.setUniformBuffer(render::ShapePipeline::Slot::LIGHTING_MODEL, lightingModel->getParametersBuffer());
|
||||
|
||||
renderShapes(sceneContext, renderContext, _shapePlumber, inItems, _maxDrawn);
|
||||
args->_batch = nullptr;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
gpu::PipelinePointer DrawStencilDeferred::_opaquePipeline;
|
||||
const gpu::PipelinePointer& DrawStencilDeferred::getOpaquePipeline() {
|
||||
|
||||
gpu::PipelinePointer DrawStencilDeferred::getOpaquePipeline() {
|
||||
if (!_opaquePipeline) {
|
||||
initStencilPipeline(_opaquePipeline);
|
||||
const gpu::int8 STENCIL_OPAQUE = 1;
|
||||
auto vs = gpu::StandardShaderLib::getDrawUnitQuadTexcoordVS();
|
||||
auto ps = gpu::Shader::createPixel(std::string(drawOpaqueStencil_frag));
|
||||
auto program = gpu::Shader::createProgram(vs, ps);
|
||||
gpu::Shader::makeProgram((*program));
|
||||
|
||||
auto state = std::make_shared<gpu::State>();
|
||||
state->setDepthTest(true, false, gpu::LESS_EQUAL);
|
||||
state->setStencilTest(true, 0xFF, gpu::State::StencilTest(STENCIL_OPAQUE, 0xFF, gpu::ALWAYS, gpu::State::STENCIL_OP_REPLACE, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_REPLACE));
|
||||
state->setColorWriteMask(0);
|
||||
|
||||
_opaquePipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
return _opaquePipeline;
|
||||
}
|
||||
|
||||
void DrawStencilDeferred::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext) {
|
||||
void DrawStencilDeferred::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const DeferredFramebufferPointer& deferredFramebuffer) {
|
||||
assert(renderContext->args);
|
||||
assert(renderContext->args->hasViewFrustum());
|
||||
|
||||
|
@ -297,7 +404,8 @@ void DrawStencilDeferred::run(const SceneContextPointer& sceneContext, const Ren
|
|||
doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
args->_batch = &batch;
|
||||
|
||||
auto deferredFboColorDepthStencil = DependencyManager::get<FramebufferCache>()->getDeferredFramebufferDepthColor();
|
||||
auto deferredFboColorDepthStencil = deferredFramebuffer->getDeferredFramebufferDepthColor();
|
||||
|
||||
|
||||
batch.enableStereo(false);
|
||||
|
||||
|
@ -314,21 +422,23 @@ void DrawStencilDeferred::run(const SceneContextPointer& sceneContext, const Ren
|
|||
args->_batch = nullptr;
|
||||
}
|
||||
|
||||
void DrawBackgroundDeferred::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const ItemBounds& inItems) {
|
||||
void DrawBackgroundDeferred::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const Inputs& inputs) {
|
||||
assert(renderContext->args);
|
||||
assert(renderContext->args->hasViewFrustum());
|
||||
|
||||
const auto& inItems = inputs.get0();
|
||||
const auto& lightingModel = inputs.get1();
|
||||
if (!lightingModel->isBackgroundEnabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
RenderArgs* args = renderContext->args;
|
||||
doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
args->_batch = &batch;
|
||||
_gpuTimer.begin(batch);
|
||||
|
||||
auto lightingFBO = DependencyManager::get<FramebufferCache>()->getLightingFramebuffer();
|
||||
// _gpuTimer.begin(batch);
|
||||
|
||||
batch.enableSkybox(true);
|
||||
|
||||
batch.setFramebuffer(lightingFBO);
|
||||
|
||||
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
batch.setStateScissorRect(args->_viewport);
|
||||
|
||||
|
@ -341,14 +451,14 @@ void DrawBackgroundDeferred::run(const SceneContextPointer& sceneContext, const
|
|||
batch.setViewTransform(viewMat);
|
||||
|
||||
renderItems(sceneContext, renderContext, inItems);
|
||||
_gpuTimer.end(batch);
|
||||
// _gpuTimer.end(batch);
|
||||
});
|
||||
args->_batch = nullptr;
|
||||
|
||||
std::static_pointer_cast<Config>(renderContext->jobConfig)->gpuTime = _gpuTimer.getAverage();
|
||||
// std::static_pointer_cast<Config>(renderContext->jobConfig)->gpuTime = _gpuTimer.getAverage();
|
||||
}
|
||||
|
||||
void Blit::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext) {
|
||||
void Blit::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext, const gpu::FramebufferPointer& srcFramebuffer) {
|
||||
assert(renderContext->args);
|
||||
assert(renderContext->args->_context);
|
||||
|
||||
|
@ -364,8 +474,7 @@ void Blit::run(const SceneContextPointer& sceneContext, const RenderContextPoint
|
|||
int height = renderArgs->_viewport.w;
|
||||
|
||||
// Blit primary to blit FBO
|
||||
auto framebufferCache = DependencyManager::get<FramebufferCache>();
|
||||
auto primaryFbo = framebufferCache->getPrimaryFramebuffer();
|
||||
auto primaryFbo = srcFramebuffer;
|
||||
|
||||
gpu::doInBatch(renderArgs->_context, [&](gpu::Batch& batch) {
|
||||
batch.setFramebuffer(blitFbo);
|
||||
|
|
|
@ -14,28 +14,46 @@
|
|||
|
||||
#include <gpu/Pipeline.h>
|
||||
#include <render/CullTask.h>
|
||||
#include "LightingModel.h"
|
||||
|
||||
class SetupDeferred {
|
||||
|
||||
class BeginGPURangeTimer {
|
||||
public:
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext);
|
||||
|
||||
using JobModel = render::Job::Model<SetupDeferred>;
|
||||
using JobModel = render::Job::ModelO<BeginGPURangeTimer, gpu::RangeTimerPointer>;
|
||||
|
||||
BeginGPURangeTimer() : _gpuTimer(std::make_shared<gpu::RangeTimer>()) {}
|
||||
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, gpu::RangeTimerPointer& timer);
|
||||
|
||||
protected:
|
||||
gpu::RangeTimerPointer _gpuTimer;
|
||||
};
|
||||
|
||||
|
||||
class PrepareDeferred {
|
||||
class GPURangeTimerConfig : public render::Job::Config {
|
||||
Q_OBJECT
|
||||
Q_PROPERTY(double gpuTime READ getGpuTime)
|
||||
public:
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext);
|
||||
|
||||
using JobModel = render::Job::Model<PrepareDeferred>;
|
||||
double getGpuTime() { return gpuTime; }
|
||||
|
||||
protected:
|
||||
friend class EndGPURangeTimer;
|
||||
double gpuTime;
|
||||
};
|
||||
|
||||
class EndGPURangeTimer {
|
||||
public:
|
||||
using Config = GPURangeTimerConfig;
|
||||
using JobModel = render::Job::ModelI<EndGPURangeTimer, gpu::RangeTimerPointer, Config>;
|
||||
|
||||
EndGPURangeTimer() {}
|
||||
|
||||
void configure(const Config& config) {}
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const gpu::RangeTimerPointer& timer);
|
||||
|
||||
protected:
|
||||
};
|
||||
|
||||
class RenderDeferred {
|
||||
public:
|
||||
using JobModel = render::Job::Model<RenderDeferred>;
|
||||
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext);
|
||||
};
|
||||
|
||||
class DrawConfig : public render::Job::Config {
|
||||
Q_OBJECT
|
||||
|
@ -59,13 +77,14 @@ protected:
|
|||
|
||||
class DrawDeferred {
|
||||
public:
|
||||
using Inputs = render::VaryingSet2 <render::ItemBounds, LightingModelPointer>;
|
||||
using Config = DrawConfig;
|
||||
using JobModel = render::Job::ModelI<DrawDeferred, render::ItemBounds, Config>;
|
||||
using JobModel = render::Job::ModelI<DrawDeferred, Inputs, Config>;
|
||||
|
||||
DrawDeferred(render::ShapePlumberPointer shapePlumber) : _shapePlumber{ shapePlumber } {}
|
||||
|
||||
void configure(const Config& config) { _maxDrawn = config.maxDrawn; }
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const render::ItemBounds& inItems);
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const Inputs& inputs);
|
||||
|
||||
protected:
|
||||
render::ShapePlumberPointer _shapePlumber;
|
||||
|
@ -95,13 +114,15 @@ protected:
|
|||
|
||||
class DrawStateSortDeferred {
|
||||
public:
|
||||
using Inputs = render::VaryingSet2 <render::ItemBounds, LightingModelPointer>;
|
||||
|
||||
using Config = DrawStateSortConfig;
|
||||
using JobModel = render::Job::ModelI<DrawStateSortDeferred, render::ItemBounds, Config>;
|
||||
using JobModel = render::Job::ModelI<DrawStateSortDeferred, Inputs, Config>;
|
||||
|
||||
DrawStateSortDeferred(render::ShapePlumberPointer shapePlumber) : _shapePlumber{ shapePlumber } {}
|
||||
|
||||
void configure(const Config& config) { _maxDrawn = config.maxDrawn; _stateSort = config.stateSort; }
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const render::ItemBounds& inItems);
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const Inputs& inputs);
|
||||
|
||||
protected:
|
||||
render::ShapePlumberPointer _shapePlumber;
|
||||
|
@ -109,15 +130,17 @@ protected:
|
|||
bool _stateSort;
|
||||
};
|
||||
|
||||
class DeferredFramebuffer;
|
||||
class DrawStencilDeferred {
|
||||
public:
|
||||
using JobModel = render::Job::Model<DrawStencilDeferred>;
|
||||
using JobModel = render::Job::ModelI<DrawStencilDeferred, std::shared_ptr<DeferredFramebuffer>>;
|
||||
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext);
|
||||
static const gpu::PipelinePointer& getOpaquePipeline();
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const std::shared_ptr<DeferredFramebuffer>& deferredFramebuffer);
|
||||
|
||||
protected:
|
||||
static gpu::PipelinePointer _opaquePipeline; //lazy evaluation hence mutable
|
||||
gpu::PipelinePointer _opaquePipeline;
|
||||
|
||||
gpu::PipelinePointer getOpaquePipeline();
|
||||
};
|
||||
|
||||
class DrawBackgroundDeferredConfig : public render::Job::Config {
|
||||
|
@ -133,11 +156,13 @@ protected:
|
|||
|
||||
class DrawBackgroundDeferred {
|
||||
public:
|
||||
using Inputs = render::VaryingSet2 <render::ItemBounds, LightingModelPointer>;
|
||||
|
||||
using Config = DrawBackgroundDeferredConfig;
|
||||
using JobModel = render::Job::ModelI<DrawBackgroundDeferred, render::ItemBounds, Config>;
|
||||
using JobModel = render::Job::ModelI<DrawBackgroundDeferred, Inputs, Config>;
|
||||
|
||||
void configure(const Config& config) {}
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const render::ItemBounds& inItems);
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const Inputs& inputs);
|
||||
|
||||
protected:
|
||||
gpu::RangeTimer _gpuTimer;
|
||||
|
@ -163,13 +188,15 @@ protected:
|
|||
|
||||
class DrawOverlay3D {
|
||||
public:
|
||||
using Inputs = render::VaryingSet2 <render::ItemBounds, LightingModelPointer>;
|
||||
|
||||
using Config = DrawOverlay3DConfig;
|
||||
using JobModel = render::Job::ModelI<DrawOverlay3D, render::ItemBounds, Config>;
|
||||
using JobModel = render::Job::ModelI<DrawOverlay3D, Inputs, Config>;
|
||||
|
||||
DrawOverlay3D(bool opaque);
|
||||
|
||||
void configure(const Config& config) { _maxDrawn = config.maxDrawn; }
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const render::ItemBounds& inItems);
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const Inputs& inputs);
|
||||
|
||||
protected:
|
||||
render::ShapePlumberPointer _shapePlumber;
|
||||
|
@ -179,18 +206,34 @@ protected:
|
|||
|
||||
class Blit {
|
||||
public:
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext);
|
||||
using JobModel = render::Job::ModelI<Blit, gpu::FramebufferPointer>;
|
||||
|
||||
using JobModel = render::Job::Model<Blit>;
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const gpu::FramebufferPointer& srcFramebuffer);
|
||||
};
|
||||
|
||||
class RenderDeferredTaskConfig : public render::Task::Config {
|
||||
Q_OBJECT
|
||||
Q_PROPERTY(double gpuTime READ getGpuTime)
|
||||
public:
|
||||
double getGpuTime() { return gpuTime; }
|
||||
|
||||
protected:
|
||||
friend class RenderDeferredTask;
|
||||
double gpuTime;
|
||||
};
|
||||
|
||||
class RenderDeferredTask : public render::Task {
|
||||
public:
|
||||
using Config = RenderDeferredTaskConfig;
|
||||
RenderDeferredTask(render::CullFunctor cullFunctor);
|
||||
|
||||
void configure(const Config& config) {}
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext);
|
||||
|
||||
using JobModel = Model<RenderDeferredTask>;
|
||||
using JobModel = Model<RenderDeferredTask, Config>;
|
||||
|
||||
protected:
|
||||
gpu::RangeTimer _gpuTimer;
|
||||
};
|
||||
|
||||
#endif // hifi_RenderDeferredTask_h
|
||||
|
|
|
@ -45,25 +45,9 @@
|
|||
#include "overlay3D_unlit_frag.h"
|
||||
#include "overlay3D_translucent_unlit_frag.h"
|
||||
|
||||
#include "drawOpaqueStencil_frag.h"
|
||||
|
||||
using namespace render;
|
||||
|
||||
void initStencilPipeline(gpu::PipelinePointer& pipeline) {
|
||||
const gpu::int8 STENCIL_OPAQUE = 1;
|
||||
auto vs = gpu::StandardShaderLib::getDrawUnitQuadTexcoordVS();
|
||||
auto ps = gpu::Shader::createPixel(std::string(drawOpaqueStencil_frag));
|
||||
auto program = gpu::Shader::createProgram(vs, ps);
|
||||
gpu::Shader::makeProgram((*program));
|
||||
|
||||
auto state = std::make_shared<gpu::State>();
|
||||
state->setDepthTest(true, false, gpu::LESS_EQUAL);
|
||||
state->setStencilTest(true, 0xFF, gpu::State::StencilTest(STENCIL_OPAQUE, 0xFF, gpu::ALWAYS, gpu::State::STENCIL_OP_REPLACE, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_REPLACE));
|
||||
state->setColorWriteMask(0);
|
||||
|
||||
pipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
gpu::BufferView getDefaultMaterialBuffer() {
|
||||
model::Material::Schema schema;
|
||||
schema._albedo = vec3(1.0f);
|
||||
|
|
|
@ -85,8 +85,7 @@ void RenderShadowMap::run(const render::SceneContextPointer& sceneContext, const
|
|||
});
|
||||
}
|
||||
|
||||
// The shadow task *must* use this base ctor to initialize with its own Config, see Task.h
|
||||
RenderShadowTask::RenderShadowTask(CullFunctor cullFunctor) : Task(std::make_shared<Config>()) {
|
||||
RenderShadowTask::RenderShadowTask(CullFunctor cullFunctor) {
|
||||
cullFunctor = cullFunctor ? cullFunctor : [](const RenderArgs*, const AABox&){ return true; };
|
||||
|
||||
// Prepare the ShapePipeline
|
||||
|
|
593
libraries/render-utils/src/SubsurfaceScattering.cpp
Normal file
593
libraries/render-utils/src/SubsurfaceScattering.cpp
Normal file
|
@ -0,0 +1,593 @@
|
|||
//
|
||||
// SubsurfaceScattering.cpp
|
||||
// libraries/render-utils/src/
|
||||
//
|
||||
// Created by Sam Gateau 6/3/2016.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "SubsurfaceScattering.h"
|
||||
|
||||
#include <gpu/Context.h>
|
||||
#include <gpu/StandardShaderLib.h>
|
||||
|
||||
#include "FramebufferCache.h"
|
||||
|
||||
#include "DeferredLightingEffect.h"
|
||||
|
||||
#include "subsurfaceScattering_makeProfile_frag.h"
|
||||
#include "subsurfaceScattering_makeLUT_frag.h"
|
||||
#include "subsurfaceScattering_makeSpecularBeckmann_frag.h"
|
||||
|
||||
#include "subsurfaceScattering_drawScattering_frag.h"
|
||||
|
||||
enum ScatteringShaderBufferSlots {
|
||||
ScatteringTask_FrameTransformSlot = 0,
|
||||
ScatteringTask_ParamSlot,
|
||||
ScatteringTask_LightSlot,
|
||||
};
|
||||
enum ScatteringShaderMapSlots {
|
||||
ScatteringTask_ScatteringTableSlot = 0,
|
||||
ScatteringTask_CurvatureMapSlot,
|
||||
ScatteringTask_DiffusedCurvatureMapSlot,
|
||||
ScatteringTask_NormalMapSlot,
|
||||
|
||||
ScatteringTask_AlbedoMapSlot,
|
||||
ScatteringTask_LinearMapSlot,
|
||||
|
||||
ScatteringTask_IBLMapSlot,
|
||||
|
||||
};
|
||||
|
||||
SubsurfaceScatteringResource::SubsurfaceScatteringResource() {
|
||||
Parameters parameters;
|
||||
_parametersBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(Parameters), (const gpu::Byte*) ¶meters));
|
||||
|
||||
}
|
||||
|
||||
void SubsurfaceScatteringResource::setBentNormalFactors(const glm::vec4& rgbsBentFactors) {
|
||||
if (rgbsBentFactors != getBentNormalFactors()) {
|
||||
_parametersBuffer.edit<Parameters>().normalBentInfo = rgbsBentFactors;
|
||||
}
|
||||
}
|
||||
|
||||
glm::vec4 SubsurfaceScatteringResource::getBentNormalFactors() const {
|
||||
return _parametersBuffer.get<Parameters>().normalBentInfo;
|
||||
}
|
||||
|
||||
void SubsurfaceScatteringResource::setCurvatureFactors(const glm::vec2& sbCurvatureFactors) {
|
||||
if (sbCurvatureFactors != getCurvatureFactors()) {
|
||||
_parametersBuffer.edit<Parameters>().curvatureInfo = sbCurvatureFactors;
|
||||
}
|
||||
}
|
||||
|
||||
glm::vec2 SubsurfaceScatteringResource::getCurvatureFactors() const {
|
||||
return _parametersBuffer.get<Parameters>().curvatureInfo;
|
||||
}
|
||||
|
||||
|
||||
void SubsurfaceScatteringResource::setLevel(float level) {
|
||||
if (level != getLevel()) {
|
||||
_parametersBuffer.edit<Parameters>().level = level;
|
||||
}
|
||||
}
|
||||
float SubsurfaceScatteringResource::getLevel() const {
|
||||
return _parametersBuffer.get<Parameters>().level;
|
||||
}
|
||||
|
||||
void SubsurfaceScatteringResource::setShowBRDF(bool show) {
|
||||
if (show != isShowBRDF()) {
|
||||
_parametersBuffer.edit<Parameters>().showBRDF = show;
|
||||
}
|
||||
}
|
||||
bool SubsurfaceScatteringResource::isShowBRDF() const {
|
||||
return (bool)_parametersBuffer.get<Parameters>().showBRDF;
|
||||
}
|
||||
|
||||
void SubsurfaceScatteringResource::setShowCurvature(bool show) {
|
||||
if (show != isShowCurvature()) {
|
||||
_parametersBuffer.edit<Parameters>().showCurvature = show;
|
||||
}
|
||||
}
|
||||
bool SubsurfaceScatteringResource::isShowCurvature() const {
|
||||
return (bool)_parametersBuffer.get<Parameters>().showCurvature;
|
||||
}
|
||||
|
||||
void SubsurfaceScatteringResource::setShowDiffusedNormal(bool show) {
|
||||
if (show != isShowDiffusedNormal()) {
|
||||
_parametersBuffer.edit<Parameters>().showDiffusedNormal = show;
|
||||
}
|
||||
}
|
||||
bool SubsurfaceScatteringResource::isShowDiffusedNormal() const {
|
||||
return (bool)_parametersBuffer.get<Parameters>().showDiffusedNormal;
|
||||
}
|
||||
|
||||
void SubsurfaceScatteringResource::generateScatteringTable(RenderArgs* args) {
|
||||
if (!_scatteringProfile) {
|
||||
_scatteringProfile = generateScatteringProfile(args);
|
||||
}
|
||||
if (!_scatteringTable) {
|
||||
_scatteringTable = generatePreIntegratedScattering(_scatteringProfile, args);
|
||||
}
|
||||
if (!_scatteringSpecular) {
|
||||
_scatteringSpecular = generateScatteringSpecularBeckmann(args);
|
||||
}
|
||||
}
|
||||
|
||||
SubsurfaceScattering::SubsurfaceScattering() {
|
||||
_scatteringResource = std::make_shared<SubsurfaceScatteringResource>();
|
||||
}
|
||||
|
||||
void SubsurfaceScattering::configure(const Config& config) {
|
||||
|
||||
glm::vec4 bentInfo(config.bentRed, config.bentGreen, config.bentBlue, config.bentScale);
|
||||
_scatteringResource->setBentNormalFactors(bentInfo);
|
||||
|
||||
glm::vec2 curvatureInfo(config.curvatureOffset, config.curvatureScale);
|
||||
_scatteringResource->setCurvatureFactors(curvatureInfo);
|
||||
|
||||
_scatteringResource->setLevel((float)config.enableScattering);
|
||||
_scatteringResource->setShowBRDF(config.showScatteringBRDF);
|
||||
_scatteringResource->setShowCurvature(config.showCurvature);
|
||||
_scatteringResource->setShowDiffusedNormal(config.showDiffusedNormal);
|
||||
}
|
||||
|
||||
void SubsurfaceScattering::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, Outputs& outputs) {
|
||||
assert(renderContext->args);
|
||||
assert(renderContext->args->hasViewFrustum());
|
||||
|
||||
if (!_scatteringResource->getScatteringTable()) {
|
||||
_scatteringResource->generateScatteringTable(renderContext->args);
|
||||
}
|
||||
|
||||
outputs = _scatteringResource;
|
||||
}
|
||||
|
||||
#ifdef GENERATE_SCATTERING_RESOURCE_ON_CPU
|
||||
|
||||
// Reference: http://www.altdevblogaday.com/2011/12/31/skin-shading-in-unity3d/
|
||||
#include <cstdio>
|
||||
#include <cmath>
|
||||
#include <algorithm>
|
||||
|
||||
#define _PI 3.14159265358979523846
|
||||
|
||||
using namespace std;
|
||||
|
||||
double gaussian(float v, float r) {
|
||||
double g = (1.0 / sqrt(2.0 * _PI * v)) * exp(-(r*r) / (2.0 * v));
|
||||
return g;
|
||||
}
|
||||
|
||||
vec3 scatter(double r) {
|
||||
// Values from GPU Gems 3 "Advanced Skin Rendering".
|
||||
// Originally taken from real life samples.
|
||||
static const double profile[][4] = {
|
||||
{ 0.0064, 0.233, 0.455, 0.649 },
|
||||
{ 0.0484, 0.100, 0.336, 0.344 },
|
||||
{ 0.1870, 0.118, 0.198, 0.000 },
|
||||
{ 0.5670, 0.113, 0.007, 0.007 },
|
||||
{ 1.9900, 0.358, 0.004, 0.000 },
|
||||
{ 7.4100, 0.078, 0.000, 0.000 }
|
||||
};
|
||||
static const int profileNum = 6;
|
||||
vec3 ret(0.0);
|
||||
for (int i = 0; i < profileNum; i++) {
|
||||
double g = gaussian(profile[i][0] * 1.414f, r);
|
||||
ret.x += g * profile[i][1];
|
||||
ret.y += g * profile[i][2];
|
||||
ret.z += g * profile[i][3];
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
vec3 integrate(double cosTheta, double skinRadius) {
|
||||
// Angle from lighting direction.
|
||||
double theta = acos(cosTheta);
|
||||
vec3 totalWeights(0.0);
|
||||
vec3 totalLight(0.0);
|
||||
vec3 skinColour(1.0);
|
||||
|
||||
double a = -(_PI);
|
||||
|
||||
double inc = 0.005;
|
||||
|
||||
while (a <= (_PI)) {
|
||||
double sampleAngle = theta + a;
|
||||
double diffuse = cos(sampleAngle);
|
||||
if (diffuse < 0.0) diffuse = 0.0;
|
||||
if (diffuse > 1.0) diffuse = 1.0;
|
||||
|
||||
// Distance.
|
||||
double sampleDist = abs(2.0 * skinRadius * sin(a * 0.5));
|
||||
|
||||
// Profile Weight.
|
||||
vec3 weights = scatter(sampleDist);
|
||||
|
||||
totalWeights += weights;
|
||||
totalLight.x += diffuse * weights.x * (skinColour.x * skinColour.x);
|
||||
totalLight.y += diffuse * weights.y * (skinColour.y * skinColour.y);
|
||||
totalLight.z += diffuse * weights.z * (skinColour.z * skinColour.z);
|
||||
a += inc;
|
||||
}
|
||||
|
||||
vec3 result;
|
||||
result.x = totalLight.x / totalWeights.x;
|
||||
result.y = totalLight.y / totalWeights.y;
|
||||
result.z = totalLight.z / totalWeights.z;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void diffuseScatter(gpu::TexturePointer& lut) {
|
||||
int width = lut->getWidth();
|
||||
int height = lut->getHeight();
|
||||
|
||||
const int COMPONENT_COUNT = 4;
|
||||
std::vector<unsigned char> bytes(COMPONENT_COUNT * height * width);
|
||||
|
||||
int index = 0;
|
||||
for (int j = 0; j < height; j++) {
|
||||
for (int i = 0; i < width; i++) {
|
||||
// Lookup by: x: NDotL y: 1 / r
|
||||
float y = 2.0 * 1.0 / ((j + 1.0) / (double)height);
|
||||
float x = ((i / (double)width) * 2.0) - 1.0;
|
||||
vec3 val = integrate(x, y);
|
||||
|
||||
// Convert to linear
|
||||
// val.x = sqrt(val.x);
|
||||
// val.y = sqrt(val.y);
|
||||
// val.z = sqrt(val.z);
|
||||
|
||||
// Convert to 24-bit image.
|
||||
unsigned char valI[3];
|
||||
if (val.x > 1.0) val.x = 1.0;
|
||||
if (val.y > 1.0) val.y = 1.0;
|
||||
if (val.z > 1.0) val.z = 1.0;
|
||||
valI[0] = (unsigned char)(val.x * 256.0);
|
||||
valI[1] = (unsigned char)(val.y * 256.0);
|
||||
valI[2] = (unsigned char)(val.z * 256.0);
|
||||
|
||||
bytes[COMPONENT_COUNT * index] = valI[0];
|
||||
bytes[COMPONENT_COUNT * index + 1] = valI[1];
|
||||
bytes[COMPONENT_COUNT * index + 2] = valI[2];
|
||||
bytes[COMPONENT_COUNT * index + 3] = 255.0;
|
||||
|
||||
index++;
|
||||
}
|
||||
}
|
||||
|
||||
lut->assignStoredMip(0, gpu::Element::COLOR_RGBA_32, bytes.size(), bytes.data());
|
||||
}
|
||||
|
||||
|
||||
void diffuseProfile(gpu::TexturePointer& profile) {
|
||||
int width = profile->getWidth();
|
||||
int height = profile->getHeight();
|
||||
|
||||
const int COMPONENT_COUNT = 4;
|
||||
std::vector<unsigned char> bytes(COMPONENT_COUNT * height * width);
|
||||
|
||||
int index = 0;
|
||||
for (int j = 0; j < height; j++) {
|
||||
for (int i = 0; i < width; i++) {
|
||||
float y = (double)(i + 1.0) / (double)width;
|
||||
vec3 val = scatter(y * 2.0f);
|
||||
|
||||
// Convert to 24-bit image.
|
||||
unsigned char valI[3];
|
||||
if (val.x > 1.0) val.x = 1.0;
|
||||
if (val.y > 1.0) val.y = 1.0;
|
||||
if (val.z > 1.0) val.z = 1.0;
|
||||
valI[0] = (unsigned char)(val.x * 255.0);
|
||||
valI[1] = (unsigned char)(val.y * 255.0);
|
||||
valI[2] = (unsigned char)(val.z * 255.0);
|
||||
|
||||
bytes[COMPONENT_COUNT * index] = valI[0];
|
||||
bytes[COMPONENT_COUNT * index + 1] = valI[1];
|
||||
bytes[COMPONENT_COUNT * index + 2] = valI[2];
|
||||
bytes[COMPONENT_COUNT * index + 3] = 255.0;
|
||||
|
||||
index++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
profile->assignStoredMip(0, gpu::Element::COLOR_RGBA_32, bytes.size(), bytes.data());
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void diffuseProfileGPU(gpu::TexturePointer& profileMap, RenderArgs* args) {
|
||||
int width = profileMap->getWidth();
|
||||
int height = profileMap->getHeight();
|
||||
|
||||
gpu::PipelinePointer makePipeline;
|
||||
{
|
||||
auto vs = gpu::StandardShaderLib::getDrawUnitQuadTexcoordVS();
|
||||
auto ps = gpu::Shader::createPixel(std::string(subsurfaceScattering_makeProfile_frag));
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
makePipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
auto makeFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
makeFramebuffer->setRenderBuffer(0, profileMap);
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
|
||||
batch.setViewportTransform(glm::ivec4(0, 0, width, height));
|
||||
|
||||
batch.setFramebuffer(makeFramebuffer);
|
||||
batch.setPipeline(makePipeline);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
batch.setResourceTexture(0, nullptr);
|
||||
batch.setPipeline(nullptr);
|
||||
batch.setFramebuffer(nullptr);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
void diffuseScatterGPU(const gpu::TexturePointer& profileMap, gpu::TexturePointer& lut, RenderArgs* args) {
|
||||
int width = lut->getWidth();
|
||||
int height = lut->getHeight();
|
||||
|
||||
gpu::PipelinePointer makePipeline;
|
||||
{
|
||||
auto vs = gpu::StandardShaderLib::getDrawUnitQuadTexcoordVS();
|
||||
auto ps = gpu::Shader::createPixel(std::string(subsurfaceScattering_makeLUT_frag));
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("scatteringProfile"), 0));
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
makePipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
auto makeFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
makeFramebuffer->setRenderBuffer(0, lut);
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
|
||||
batch.setViewportTransform(glm::ivec4(0, 0, width, height));
|
||||
|
||||
batch.setFramebuffer(makeFramebuffer);
|
||||
batch.setPipeline(makePipeline);
|
||||
batch.setResourceTexture(0, profileMap);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
batch.setResourceTexture(0, nullptr);
|
||||
batch.setPipeline(nullptr);
|
||||
batch.setFramebuffer(nullptr);
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
void computeSpecularBeckmannGPU(gpu::TexturePointer& beckmannMap, RenderArgs* args) {
|
||||
int width = beckmannMap->getWidth();
|
||||
int height = beckmannMap->getHeight();
|
||||
|
||||
gpu::PipelinePointer makePipeline;
|
||||
{
|
||||
auto vs = gpu::StandardShaderLib::getDrawUnitQuadTexcoordVS();
|
||||
auto ps = gpu::Shader::createPixel(std::string(subsurfaceScattering_makeSpecularBeckmann_frag));
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
makePipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
auto makeFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
makeFramebuffer->setRenderBuffer(0, beckmannMap);
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
|
||||
batch.setViewportTransform(glm::ivec4(0, 0, width, height));
|
||||
|
||||
batch.setFramebuffer(makeFramebuffer);
|
||||
batch.setPipeline(makePipeline);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
batch.setResourceTexture(0, nullptr);
|
||||
batch.setPipeline(nullptr);
|
||||
batch.setFramebuffer(nullptr);
|
||||
});
|
||||
}
|
||||
|
||||
gpu::TexturePointer SubsurfaceScatteringResource::generateScatteringProfile(RenderArgs* args) {
|
||||
const int PROFILE_RESOLUTION = 512;
|
||||
// const auto pixelFormat = gpu::Element::COLOR_SRGBA_32;
|
||||
const auto pixelFormat = gpu::Element::COLOR_R11G11B10;
|
||||
auto profileMap = gpu::TexturePointer(gpu::Texture::create2D(pixelFormat, PROFILE_RESOLUTION, 1, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP)));
|
||||
diffuseProfileGPU(profileMap, args);
|
||||
return profileMap;
|
||||
}
|
||||
|
||||
gpu::TexturePointer SubsurfaceScatteringResource::generatePreIntegratedScattering(const gpu::TexturePointer& profile, RenderArgs* args) {
|
||||
|
||||
const int TABLE_RESOLUTION = 512;
|
||||
// const auto pixelFormat = gpu::Element::COLOR_SRGBA_32;
|
||||
const auto pixelFormat = gpu::Element::COLOR_R11G11B10;
|
||||
auto scatteringLUT = gpu::TexturePointer(gpu::Texture::create2D(pixelFormat, TABLE_RESOLUTION, TABLE_RESOLUTION, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP)));
|
||||
//diffuseScatter(scatteringLUT);
|
||||
diffuseScatterGPU(profile, scatteringLUT, args);
|
||||
return scatteringLUT;
|
||||
}
|
||||
|
||||
gpu::TexturePointer SubsurfaceScatteringResource::generateScatteringSpecularBeckmann(RenderArgs* args) {
|
||||
const int SPECULAR_RESOLUTION = 256;
|
||||
auto beckmannMap = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32 /*gpu::Element(gpu::SCALAR, gpu::HALF, gpu::RGB)*/, SPECULAR_RESOLUTION, SPECULAR_RESOLUTION, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP)));
|
||||
computeSpecularBeckmannGPU(beckmannMap, args);
|
||||
return beckmannMap;
|
||||
}
|
||||
|
||||
DebugSubsurfaceScattering::DebugSubsurfaceScattering() {
|
||||
}
|
||||
|
||||
void DebugSubsurfaceScattering::configure(const Config& config) {
|
||||
|
||||
_showProfile = config.showProfile;
|
||||
_showLUT = config.showLUT;
|
||||
_showSpecularTable = config.showSpecularTable;
|
||||
_showCursorPixel = config.showCursorPixel;
|
||||
_debugCursorTexcoord = config.debugCursorTexcoord;
|
||||
}
|
||||
|
||||
|
||||
|
||||
gpu::PipelinePointer DebugSubsurfaceScattering::getScatteringPipeline() {
|
||||
if (!_scatteringPipeline) {
|
||||
auto vs = gpu::StandardShaderLib::getDrawUnitQuadTexcoordVS();
|
||||
auto ps = gpu::Shader::createPixel(std::string(subsurfaceScattering_drawScattering_frag));
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("deferredFrameTransformBuffer"), ScatteringTask_FrameTransformSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("scatteringParamsBuffer"), ScatteringTask_ParamSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("lightBuffer"), ScatteringTask_LightSlot));
|
||||
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("scatteringLUT"), ScatteringTask_ScatteringTableSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("curvatureMap"), ScatteringTask_CurvatureMapSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("diffusedCurvatureMap"), ScatteringTask_DiffusedCurvatureMapSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("normalMap"), ScatteringTask_NormalMapSlot));
|
||||
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("albedoMap"), ScatteringTask_AlbedoMapSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("linearDepthMap"), ScatteringTask_LinearMapSlot));
|
||||
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("skyboxMap"), ScatteringTask_IBLMapSlot));
|
||||
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
_scatteringPipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
return _scatteringPipeline;
|
||||
}
|
||||
|
||||
|
||||
gpu::PipelinePointer _showLUTPipeline;
|
||||
gpu::PipelinePointer getShowLUTPipeline();
|
||||
gpu::PipelinePointer DebugSubsurfaceScattering::getShowLUTPipeline() {
|
||||
if (!_showLUTPipeline) {
|
||||
auto vs = gpu::StandardShaderLib::getDrawUnitQuadTexcoordVS();
|
||||
auto ps = gpu::StandardShaderLib::getDrawTextureOpaquePS();
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
_showLUTPipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
return _showLUTPipeline;
|
||||
}
|
||||
|
||||
|
||||
void DebugSubsurfaceScattering::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const Inputs& inputs) {
|
||||
assert(renderContext->args);
|
||||
assert(renderContext->args->hasViewFrustum());
|
||||
|
||||
RenderArgs* args = renderContext->args;
|
||||
|
||||
|
||||
auto& frameTransform = inputs.get0();
|
||||
auto& deferredFramebuffer = inputs.get1();
|
||||
|
||||
auto& surfaceGeometryFramebuffer = inputs.get3();
|
||||
auto curvatureFramebuffer = surfaceGeometryFramebuffer->getCurvatureFramebuffer();
|
||||
auto linearDepthTexture = surfaceGeometryFramebuffer->getLinearDepthTexture();
|
||||
|
||||
auto& diffusedFramebuffer = inputs.get4();
|
||||
auto& scatteringResource = inputs.get5();
|
||||
|
||||
if (!scatteringResource) {
|
||||
return;
|
||||
}
|
||||
auto scatteringProfile = scatteringResource->getScatteringProfile();
|
||||
auto scatteringTable = scatteringResource->getScatteringTable();
|
||||
auto scatteringSpecular = scatteringResource->getScatteringSpecular();
|
||||
|
||||
|
||||
|
||||
|
||||
const auto theLight = DependencyManager::get<DeferredLightingEffect>()->getLightStage().lights[0];
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
|
||||
|
||||
auto viewportSize = std::min(args->_viewport.z, args->_viewport.w) >> 1;
|
||||
auto offsetViewport = viewportSize * 0.1;
|
||||
|
||||
if (_showProfile) {
|
||||
batch.setViewportTransform(glm::ivec4(0, 0, viewportSize, offsetViewport));
|
||||
batch.setPipeline(getShowLUTPipeline());
|
||||
batch.setResourceTexture(0, scatteringProfile);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
}
|
||||
|
||||
if (_showLUT) {
|
||||
batch.setViewportTransform(glm::ivec4(0, offsetViewport * 1.5, viewportSize, viewportSize));
|
||||
batch.setPipeline(getShowLUTPipeline());
|
||||
batch.setResourceTexture(0, scatteringTable);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
|
||||
if (_showCursorPixel) {
|
||||
|
||||
auto debugScatteringPipeline = getScatteringPipeline();
|
||||
batch.setPipeline(debugScatteringPipeline);
|
||||
|
||||
Transform model;
|
||||
model.setTranslation(glm::vec3(0.0, offsetViewport * 1.5 / args->_viewport.w, 0.0));
|
||||
model.setScale(glm::vec3(viewportSize / (float)args->_viewport.z, viewportSize / (float)args->_viewport.w, 1.0));
|
||||
batch.setModelTransform(model);
|
||||
|
||||
batch.setUniformBuffer(ScatteringTask_FrameTransformSlot, frameTransform->getFrameTransformBuffer());
|
||||
batch.setUniformBuffer(ScatteringTask_ParamSlot, scatteringResource->getParametersBuffer());
|
||||
if (theLight->light) {
|
||||
batch.setUniformBuffer(ScatteringTask_LightSlot, theLight->light->getSchemaBuffer());
|
||||
}
|
||||
batch.setResourceTexture(ScatteringTask_ScatteringTableSlot, scatteringTable);
|
||||
batch.setResourceTexture(ScatteringTask_CurvatureMapSlot, curvatureFramebuffer->getRenderBuffer(0));
|
||||
batch.setResourceTexture(ScatteringTask_DiffusedCurvatureMapSlot, diffusedFramebuffer->getRenderBuffer(0));
|
||||
batch.setResourceTexture(ScatteringTask_NormalMapSlot, deferredFramebuffer->getDeferredNormalTexture());
|
||||
batch.setResourceTexture(ScatteringTask_AlbedoMapSlot, deferredFramebuffer->getDeferredColorTexture());
|
||||
batch.setResourceTexture(ScatteringTask_LinearMapSlot, linearDepthTexture);
|
||||
|
||||
|
||||
batch._glUniform2f(debugScatteringPipeline->getProgram()->getUniforms().findLocation("uniformCursorTexcoord"), _debugCursorTexcoord.x, _debugCursorTexcoord.y);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
}
|
||||
}
|
||||
|
||||
if (_showSpecularTable) {
|
||||
batch.setViewportTransform(glm::ivec4(viewportSize + offsetViewport * 0.5, 0, viewportSize * 0.5, viewportSize * 0.5));
|
||||
batch.setPipeline(getShowLUTPipeline());
|
||||
batch.setResourceTexture(0, scatteringSpecular);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
}
|
||||
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
|
||||
});
|
||||
}
|
189
libraries/render-utils/src/SubsurfaceScattering.h
Normal file
189
libraries/render-utils/src/SubsurfaceScattering.h
Normal file
|
@ -0,0 +1,189 @@
|
|||
//
|
||||
// SubsurfaceScattering.h
|
||||
// libraries/render-utils/src/
|
||||
//
|
||||
// Created by Sam Gateau 6/3/2016.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_SubsurfaceScattering_h
|
||||
#define hifi_SubsurfaceScattering_h
|
||||
|
||||
#include <DependencyManager.h>
|
||||
|
||||
#include "render/DrawTask.h"
|
||||
#include "DeferredFrameTransform.h"
|
||||
#include "DeferredFramebuffer.h"
|
||||
#include "SurfaceGeometryPass.h"
|
||||
#include "LightingModel.h"
|
||||
|
||||
class SubsurfaceScatteringResource {
|
||||
public:
|
||||
using UniformBufferView = gpu::BufferView;
|
||||
|
||||
SubsurfaceScatteringResource();
|
||||
|
||||
void setBentNormalFactors(const glm::vec4& rgbsBentFactors);
|
||||
glm::vec4 getBentNormalFactors() const;
|
||||
|
||||
void setCurvatureFactors(const glm::vec2& sbCurvatureFactors);
|
||||
glm::vec2 getCurvatureFactors() const;
|
||||
|
||||
void setLevel(float level);
|
||||
float getLevel() const;
|
||||
|
||||
|
||||
void setShowBRDF(bool show);
|
||||
bool isShowBRDF() const;
|
||||
void setShowCurvature(bool show);
|
||||
bool isShowCurvature() const;
|
||||
void setShowDiffusedNormal(bool show);
|
||||
bool isShowDiffusedNormal() const;
|
||||
|
||||
UniformBufferView getParametersBuffer() const { return _parametersBuffer; }
|
||||
|
||||
gpu::TexturePointer getScatteringProfile() const { return _scatteringProfile; }
|
||||
gpu::TexturePointer getScatteringTable() const { return _scatteringTable; }
|
||||
gpu::TexturePointer getScatteringSpecular() const { return _scatteringSpecular; }
|
||||
|
||||
void generateScatteringTable(RenderArgs* args);
|
||||
|
||||
static gpu::TexturePointer generateScatteringProfile(RenderArgs* args);
|
||||
static gpu::TexturePointer generatePreIntegratedScattering(const gpu::TexturePointer& profile, RenderArgs* args);
|
||||
static gpu::TexturePointer generateScatteringSpecularBeckmann(RenderArgs* args);
|
||||
|
||||
protected:
|
||||
|
||||
|
||||
// Class describing the uniform buffer with the transform info common to the AO shaders
|
||||
// It s changing every frame
|
||||
class Parameters {
|
||||
public:
|
||||
glm::vec4 normalBentInfo{ 1.5f, 0.8f, 0.3f, 1.5f };
|
||||
glm::vec2 curvatureInfo{ 0.08f, 0.8f };
|
||||
float level{ 1.0f };
|
||||
float showBRDF{ 0.0f };
|
||||
float showCurvature{ 0.0f };
|
||||
float showDiffusedNormal{ 0.0f };
|
||||
float spare1{ 0.0f };
|
||||
float spare2{ 0.0f };
|
||||
|
||||
|
||||
Parameters() {}
|
||||
};
|
||||
UniformBufferView _parametersBuffer;
|
||||
|
||||
|
||||
|
||||
gpu::TexturePointer _scatteringProfile;
|
||||
gpu::TexturePointer _scatteringTable;
|
||||
gpu::TexturePointer _scatteringSpecular;
|
||||
};
|
||||
|
||||
using SubsurfaceScatteringResourcePointer = std::shared_ptr<SubsurfaceScatteringResource>;
|
||||
|
||||
|
||||
|
||||
class SubsurfaceScatteringConfig : public render::Job::Config {
|
||||
Q_OBJECT
|
||||
Q_PROPERTY(float bentRed MEMBER bentRed NOTIFY dirty)
|
||||
Q_PROPERTY(float bentGreen MEMBER bentGreen NOTIFY dirty)
|
||||
Q_PROPERTY(float bentBlue MEMBER bentBlue NOTIFY dirty)
|
||||
Q_PROPERTY(float bentScale MEMBER bentScale NOTIFY dirty)
|
||||
|
||||
Q_PROPERTY(float curvatureOffset MEMBER curvatureOffset NOTIFY dirty)
|
||||
Q_PROPERTY(float curvatureScale MEMBER curvatureScale NOTIFY dirty)
|
||||
|
||||
Q_PROPERTY(bool enableScattering MEMBER enableScattering NOTIFY dirty)
|
||||
Q_PROPERTY(bool showScatteringBRDF MEMBER showScatteringBRDF NOTIFY dirty)
|
||||
Q_PROPERTY(bool showCurvature MEMBER showCurvature NOTIFY dirty)
|
||||
Q_PROPERTY(bool showDiffusedNormal MEMBER showDiffusedNormal NOTIFY dirty)
|
||||
|
||||
public:
|
||||
SubsurfaceScatteringConfig() : render::Job::Config(true) {}
|
||||
|
||||
float bentRed{ 1.5f };
|
||||
float bentGreen{ 0.8f };
|
||||
float bentBlue{ 0.3f };
|
||||
float bentScale{ 1.5f };
|
||||
|
||||
float curvatureOffset{ 0.08f };
|
||||
float curvatureScale{ 0.9f };
|
||||
|
||||
bool enableScattering{ true };
|
||||
bool showScatteringBRDF{ false };
|
||||
bool showCurvature{ false };
|
||||
bool showDiffusedNormal{ false };
|
||||
|
||||
signals:
|
||||
void dirty();
|
||||
};
|
||||
|
||||
class SubsurfaceScattering {
|
||||
public:
|
||||
//using Inputs = render::VaryingSet4<DeferredFrameTransformPointer, gpu::FramebufferPointer, gpu::FramebufferPointer, SubsurfaceScatteringResourcePointer>;
|
||||
using Outputs = SubsurfaceScatteringResourcePointer;
|
||||
using Config = SubsurfaceScatteringConfig;
|
||||
using JobModel = render::Job::ModelO<SubsurfaceScattering, Outputs, Config>;
|
||||
|
||||
SubsurfaceScattering();
|
||||
|
||||
void configure(const Config& config);
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, Outputs& outputs);
|
||||
|
||||
private:
|
||||
SubsurfaceScatteringResourcePointer _scatteringResource;
|
||||
};
|
||||
|
||||
|
||||
|
||||
class DebugSubsurfaceScatteringConfig : public render::Job::Config {
|
||||
Q_OBJECT
|
||||
|
||||
Q_PROPERTY(bool showProfile MEMBER showProfile NOTIFY dirty)
|
||||
Q_PROPERTY(bool showLUT MEMBER showLUT NOTIFY dirty)
|
||||
Q_PROPERTY(bool showSpecularTable MEMBER showSpecularTable NOTIFY dirty)
|
||||
Q_PROPERTY(bool showCursorPixel MEMBER showCursorPixel NOTIFY dirty)
|
||||
Q_PROPERTY(glm::vec2 debugCursorTexcoord MEMBER debugCursorTexcoord NOTIFY dirty)
|
||||
public:
|
||||
DebugSubsurfaceScatteringConfig() : render::Job::Config(true) {}
|
||||
|
||||
bool showProfile{ false };
|
||||
bool showLUT{ false };
|
||||
bool showSpecularTable{ false };
|
||||
bool showCursorPixel{ false };
|
||||
glm::vec2 debugCursorTexcoord{ 0.5, 0.5 };
|
||||
|
||||
signals:
|
||||
void dirty();
|
||||
};
|
||||
|
||||
class DebugSubsurfaceScattering {
|
||||
public:
|
||||
using Inputs = render::VaryingSet6<DeferredFrameTransformPointer, DeferredFramebufferPointer, LightingModelPointer, SurfaceGeometryFramebufferPointer, gpu::FramebufferPointer, SubsurfaceScatteringResourcePointer>;
|
||||
using Config = DebugSubsurfaceScatteringConfig;
|
||||
using JobModel = render::Job::ModelI<DebugSubsurfaceScattering, Inputs, Config>;
|
||||
|
||||
DebugSubsurfaceScattering();
|
||||
|
||||
void configure(const Config& config);
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const Inputs& inputs);
|
||||
|
||||
private:
|
||||
|
||||
gpu::PipelinePointer _scatteringPipeline;
|
||||
gpu::PipelinePointer getScatteringPipeline();
|
||||
|
||||
gpu::PipelinePointer _showLUTPipeline;
|
||||
gpu::PipelinePointer getShowLUTPipeline();
|
||||
bool _showProfile{ false };
|
||||
bool _showLUT{ false };
|
||||
bool _showSpecularTable{ false };
|
||||
bool _showCursorPixel{ false };
|
||||
glm::vec2 _debugCursorTexcoord;
|
||||
};
|
||||
|
||||
#endif // hifi_SubsurfaceScattering_h
|
226
libraries/render-utils/src/SubsurfaceScattering.slh
Normal file
226
libraries/render-utils/src/SubsurfaceScattering.slh
Normal file
|
@ -0,0 +1,226 @@
|
|||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// Created by Sam Gateau on 6/8/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
<@if not SUBSURFACE_SCATTERING_SLH@>
|
||||
<@def SUBSURFACE_SCATTERING_SLH@>
|
||||
|
||||
<@func declareSubsurfaceScatteringProfileSource()@>
|
||||
|
||||
float gaussian(float v, float r) {
|
||||
const float _PI = 3.14159265358979523846;
|
||||
return (1.0 / sqrt(2.0 * _PI * v)) * exp(-(r*r) / (2.0 * v));
|
||||
}
|
||||
|
||||
vec3 scatter(float r) {
|
||||
// r is the distance expressed in millimeter
|
||||
// returns the scatter reflectance
|
||||
// Values from GPU Gems 3 "Advanced Skin Rendering".
|
||||
// Originally taken from real life samples.
|
||||
const vec4 profile[6] = vec4[6](
|
||||
vec4(0.0064, 0.233, 0.455, 0.649),
|
||||
vec4(0.0484, 0.100, 0.336, 0.344),
|
||||
vec4(0.1870, 0.118, 0.198, 0.000),
|
||||
vec4(0.5670, 0.113, 0.007, 0.007),
|
||||
vec4(1.9900, 0.358, 0.004, 0.000),
|
||||
vec4(7.4100, 0.078, 0.000, 0.000)
|
||||
);
|
||||
const int profileNum = 6;
|
||||
|
||||
vec3 ret = vec3(0.0);
|
||||
for (int i = 0; i < profileNum; i++) {
|
||||
float v = profile[i].x * 1.414;
|
||||
float g = gaussian(v, r);
|
||||
ret += g * profile[i].yzw;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
<@endfunc@>
|
||||
|
||||
<@func declareSubsurfaceScatteringGenerateProfileMap()@>
|
||||
<$declareSubsurfaceScatteringProfileSource()$>
|
||||
|
||||
vec3 generateProfile(vec2 uv) {
|
||||
return scatter(uv.x * 2.0);
|
||||
}
|
||||
|
||||
<@endfunc@>
|
||||
|
||||
<@func declareSubsurfaceScatteringProfileMap()@>
|
||||
|
||||
uniform sampler2D scatteringProfile;
|
||||
|
||||
vec3 scatter(float r) {
|
||||
return texture(scatteringProfile, vec2(r * 0.5, 0.5)).rgb;
|
||||
}
|
||||
|
||||
<@endfunc@>
|
||||
|
||||
|
||||
<@func declareSkinSpecularLighting()@>
|
||||
|
||||
uniform sampler2D scatteringSpecularBeckmann;
|
||||
|
||||
float fetchSpecularBeckmann(float ndoth, float roughness) {
|
||||
return pow( 2.0 * texture(scatteringSpecularBeckmann, vec2(ndoth, roughness)).r, 10.0);
|
||||
}
|
||||
|
||||
float fresnelReflectance(vec3 H, vec3 V, float Fo) {
|
||||
float base = 1.0 - dot(V, H);
|
||||
float exponential = pow(base, 5.0);
|
||||
return exponential + Fo * (1.0 - exponential);
|
||||
}
|
||||
|
||||
float skinSpecular(vec3 N, vec3 L, vec3 V, float roughness, float intensity) {
|
||||
float result = 0.0;
|
||||
float ndotl = dot(N, L);
|
||||
if (ndotl > 0.0) {
|
||||
vec3 h = L + V;
|
||||
vec3 H = normalize(h);
|
||||
float ndoth = dot(N, H);
|
||||
float PH = fetchSpecularBeckmann(ndoth, roughness);
|
||||
float F = fresnelReflectance(H, V, 0.028);
|
||||
float frSpec = max(PH * F / dot(h, h), 0.0);
|
||||
result = ndotl * intensity * frSpec;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
<@endfunc@>
|
||||
|
||||
<@func declareSubsurfaceScatteringIntegrate(NumIntegrationSteps)@>
|
||||
|
||||
|
||||
vec3 integrate(float cosTheta, float skinRadius) {
|
||||
// Angle from lighting direction.
|
||||
float theta = acos(cosTheta);
|
||||
vec3 totalWeights = vec3(0.0);
|
||||
vec3 totalLight = vec3(0.0);
|
||||
|
||||
const float _PI = 3.14159265358979523846;
|
||||
const float step = 2.0 * _PI / <$NumIntegrationSteps$>;
|
||||
float a = -(_PI);
|
||||
|
||||
|
||||
while (a <= (_PI)) {
|
||||
float sampleAngle = theta + a;
|
||||
float diffuse = clamp(cos(sampleAngle), 0.0, 1.0);
|
||||
//if (diffuse < 0.0) diffuse = 0.0;
|
||||
//if (diffuse > 1.0) diffuse = 1.0;
|
||||
|
||||
// Distance.
|
||||
float sampleDist = abs(2.0 * skinRadius * sin(a * 0.5));
|
||||
|
||||
// Profile Weight.
|
||||
vec3 weights = scatter(sampleDist);
|
||||
|
||||
totalWeights += weights;
|
||||
totalLight += diffuse * weights;
|
||||
a += step;
|
||||
}
|
||||
|
||||
vec3 result = (totalLight / totalWeights);
|
||||
return clamp(result, vec3(0.0), vec3(1.0));
|
||||
|
||||
}
|
||||
<@endfunc@>
|
||||
|
||||
<@func declareSubsurfaceScatteringResource()@>
|
||||
|
||||
uniform sampler2D scatteringLUT;
|
||||
|
||||
vec3 fetchBRDF(float LdotN, float curvature) {
|
||||
return texture(scatteringLUT, vec2( clamp(LdotN * 0.5 + 0.5, 0.0, 1.0), clamp(2 * curvature, 0.0, 1.0))).xyz;
|
||||
}
|
||||
|
||||
vec3 fetchBRDFSpectrum(vec3 LdotNSpectrum, float curvature) {
|
||||
return vec3(
|
||||
fetchBRDF(LdotNSpectrum.r, curvature).r,
|
||||
fetchBRDF(LdotNSpectrum.g, curvature).g,
|
||||
fetchBRDF(LdotNSpectrum.b, curvature).b);
|
||||
}
|
||||
|
||||
// Subsurface Scattering parameters
|
||||
struct ScatteringParameters {
|
||||
vec4 normalBendInfo; // R, G, B, factor
|
||||
vec4 curvatureInfo;// Offset, Scale, level
|
||||
vec4 debugFlags;
|
||||
};
|
||||
|
||||
uniform subsurfaceScatteringParametersBuffer {
|
||||
ScatteringParameters parameters;
|
||||
};
|
||||
|
||||
vec3 getBendFactor() {
|
||||
return parameters.normalBendInfo.xyz * parameters.normalBendInfo.w;
|
||||
}
|
||||
|
||||
float getScatteringLevel() {
|
||||
return parameters.curvatureInfo.z;
|
||||
}
|
||||
|
||||
bool showBRDF() {
|
||||
return parameters.curvatureInfo.w > 0.0;
|
||||
}
|
||||
|
||||
bool showCurvature() {
|
||||
return parameters.debugFlags.x > 0.0;
|
||||
}
|
||||
bool showDiffusedNormal() {
|
||||
return parameters.debugFlags.y > 0.0;
|
||||
}
|
||||
|
||||
|
||||
float tuneCurvatureUnsigned(float curvature) {
|
||||
return abs(curvature) * parameters.curvatureInfo.y + parameters.curvatureInfo.x;
|
||||
}
|
||||
|
||||
float unpackCurvature(float packedCurvature) {
|
||||
return (packedCurvature * 2 - 1);
|
||||
}
|
||||
|
||||
vec3 evalScatteringBentNdotL(vec3 normal, vec3 midNormal, vec3 lowNormal, vec3 lightDir) {
|
||||
vec3 bendFactorSpectrum = getBendFactor();
|
||||
// vec3 rN = normalize(mix(normal, lowNormal, bendFactorSpectrum.x));
|
||||
vec3 rN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.x));
|
||||
vec3 gN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.y));
|
||||
vec3 bN = normalize(mix(midNormal, lowNormal, bendFactorSpectrum.z));
|
||||
|
||||
vec3 NdotLSpectrum = vec3(dot(rN, lightDir), dot(gN, lightDir), dot(bN, lightDir));
|
||||
|
||||
return NdotLSpectrum;
|
||||
}
|
||||
|
||||
|
||||
|
||||
<@endfunc@>
|
||||
|
||||
<@func declareSubsurfaceScatteringBRDF()@>
|
||||
<$declareSubsurfaceScatteringResource()$>
|
||||
|
||||
vec3 evalSkinBRDF(vec3 lightDir, vec3 normal, vec3 midNormal, vec3 lowNormal, float curvature) {
|
||||
if (showDiffusedNormal()) {
|
||||
return lowNormal * 0.5 + vec3(0.5);
|
||||
}
|
||||
if (showCurvature()) {
|
||||
return (curvature > 0 ? vec3(curvature, 0.0, 0.0) : vec3(0.0, 0.0, -curvature));
|
||||
}
|
||||
|
||||
vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, lightDir);
|
||||
|
||||
float tunedCurvature = tuneCurvatureUnsigned(curvature);
|
||||
|
||||
vec3 brdf = fetchBRDFSpectrum(bentNdotL, tunedCurvature);
|
||||
return brdf;
|
||||
}
|
||||
|
||||
<@endfunc@>
|
||||
|
||||
<@endif@>
|
16
libraries/render-utils/src/SurfaceGeometry.slh
Normal file
16
libraries/render-utils/src/SurfaceGeometry.slh
Normal file
|
@ -0,0 +1,16 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// Created by Sam Gateau on 6/3/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include DeferredTransform.slh@>
|
||||
<$declareDeferredFrameTransform()$>
|
||||
|
||||
|
||||
|
553
libraries/render-utils/src/SurfaceGeometryPass.cpp
Normal file
553
libraries/render-utils/src/SurfaceGeometryPass.cpp
Normal file
|
@ -0,0 +1,553 @@
|
|||
//
|
||||
// SurfaceGeometryPass.cpp
|
||||
// libraries/render-utils/src/
|
||||
//
|
||||
// Created by Sam Gateau 6/3/2016.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "SurfaceGeometryPass.h"
|
||||
|
||||
#include <limits>
|
||||
|
||||
#include <gpu/Context.h>
|
||||
#include <gpu/StandardShaderLib.h>
|
||||
|
||||
|
||||
const int DepthLinearPass_FrameTransformSlot = 0;
|
||||
const int DepthLinearPass_DepthMapSlot = 0;
|
||||
const int DepthLinearPass_NormalMapSlot = 1;
|
||||
|
||||
const int SurfaceGeometryPass_FrameTransformSlot = 0;
|
||||
const int SurfaceGeometryPass_ParamsSlot = 1;
|
||||
const int SurfaceGeometryPass_DepthMapSlot = 0;
|
||||
const int SurfaceGeometryPass_NormalMapSlot = 1;
|
||||
|
||||
#include "surfaceGeometry_makeLinearDepth_frag.h"
|
||||
#include "surfaceGeometry_downsampleDepthNormal_frag.h"
|
||||
|
||||
#include "surfaceGeometry_makeCurvature_frag.h"
|
||||
|
||||
|
||||
|
||||
LinearDepthFramebuffer::LinearDepthFramebuffer() {
|
||||
}
|
||||
|
||||
|
||||
void LinearDepthFramebuffer::updatePrimaryDepth(const gpu::TexturePointer& depthBuffer) {
|
||||
//If the depth buffer or size changed, we need to delete our FBOs
|
||||
bool reset = false;
|
||||
if ((_primaryDepthTexture != depthBuffer)) {
|
||||
_primaryDepthTexture = depthBuffer;
|
||||
reset = true;
|
||||
}
|
||||
if (_primaryDepthTexture) {
|
||||
auto newFrameSize = glm::ivec2(_primaryDepthTexture->getDimensions());
|
||||
if (_frameSize != newFrameSize) {
|
||||
_frameSize = newFrameSize;
|
||||
_halfFrameSize = newFrameSize >> 1;
|
||||
|
||||
reset = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (reset) {
|
||||
clear();
|
||||
}
|
||||
}
|
||||
|
||||
void LinearDepthFramebuffer::clear() {
|
||||
_linearDepthFramebuffer.reset();
|
||||
_linearDepthTexture.reset();
|
||||
}
|
||||
|
||||
void LinearDepthFramebuffer::allocate() {
|
||||
|
||||
auto width = _frameSize.x;
|
||||
auto height = _frameSize.y;
|
||||
|
||||
// For Linear Depth:
|
||||
_linearDepthTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::RGB), width, height,
|
||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
// _linearDepthTexture->autoGenerateMips(1);
|
||||
_linearDepthFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_linearDepthFramebuffer->setRenderBuffer(0, _linearDepthTexture);
|
||||
_linearDepthFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, _primaryDepthTexture->getTexelFormat());
|
||||
|
||||
// For Downsampling:
|
||||
_halfLinearDepthTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::RGB), _halfFrameSize.x, _halfFrameSize.y,
|
||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
|
||||
_halfNormalTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC3, gpu::NUINT8, gpu::RGB), _halfFrameSize.x, _halfFrameSize.y,
|
||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
|
||||
_downsampleFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_downsampleFramebuffer->setRenderBuffer(0, _halfLinearDepthTexture);
|
||||
_downsampleFramebuffer->setRenderBuffer(1, _halfNormalTexture);
|
||||
}
|
||||
|
||||
gpu::FramebufferPointer LinearDepthFramebuffer::getLinearDepthFramebuffer() {
|
||||
if (!_linearDepthFramebuffer) {
|
||||
allocate();
|
||||
}
|
||||
return _linearDepthFramebuffer;
|
||||
}
|
||||
|
||||
gpu::TexturePointer LinearDepthFramebuffer::getLinearDepthTexture() {
|
||||
if (!_linearDepthTexture) {
|
||||
allocate();
|
||||
}
|
||||
return _linearDepthTexture;
|
||||
}
|
||||
|
||||
gpu::FramebufferPointer LinearDepthFramebuffer::getDownsampleFramebuffer() {
|
||||
if (!_downsampleFramebuffer) {
|
||||
allocate();
|
||||
}
|
||||
return _downsampleFramebuffer;
|
||||
}
|
||||
|
||||
gpu::TexturePointer LinearDepthFramebuffer::getHalfLinearDepthTexture() {
|
||||
if (!_halfLinearDepthTexture) {
|
||||
allocate();
|
||||
}
|
||||
return _halfLinearDepthTexture;
|
||||
}
|
||||
|
||||
gpu::TexturePointer LinearDepthFramebuffer::getHalfNormalTexture() {
|
||||
if (!_halfNormalTexture) {
|
||||
allocate();
|
||||
}
|
||||
return _halfNormalTexture;
|
||||
}
|
||||
|
||||
|
||||
LinearDepthPass::LinearDepthPass() {
|
||||
}
|
||||
|
||||
void LinearDepthPass::configure(const Config& config) {
|
||||
}
|
||||
|
||||
void LinearDepthPass::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs) {
|
||||
assert(renderContext->args);
|
||||
assert(renderContext->args->hasViewFrustum());
|
||||
|
||||
RenderArgs* args = renderContext->args;
|
||||
|
||||
const auto frameTransform = inputs.get0();
|
||||
const auto deferredFramebuffer = inputs.get1();
|
||||
|
||||
if (!_linearDepthFramebuffer) {
|
||||
_linearDepthFramebuffer = std::make_shared<LinearDepthFramebuffer>();
|
||||
}
|
||||
_linearDepthFramebuffer->updatePrimaryDepth(deferredFramebuffer->getPrimaryDepthTexture());
|
||||
|
||||
auto depthBuffer = deferredFramebuffer->getPrimaryDepthTexture();
|
||||
auto normalTexture = deferredFramebuffer->getDeferredNormalTexture();
|
||||
|
||||
auto linearDepthFBO = _linearDepthFramebuffer->getLinearDepthFramebuffer();
|
||||
auto linearDepthTexture = _linearDepthFramebuffer->getLinearDepthTexture();
|
||||
|
||||
auto downsampleFBO = _linearDepthFramebuffer->getDownsampleFramebuffer();
|
||||
auto halfLinearDepthTexture = _linearDepthFramebuffer->getHalfLinearDepthTexture();
|
||||
auto halfNormalTexture = _linearDepthFramebuffer->getHalfNormalTexture();
|
||||
|
||||
outputs.edit0() = _linearDepthFramebuffer;
|
||||
outputs.edit1() = linearDepthFBO;
|
||||
outputs.edit2() = linearDepthTexture;
|
||||
outputs.edit3() = halfLinearDepthTexture;
|
||||
outputs.edit4() = halfNormalTexture;
|
||||
|
||||
auto linearDepthPipeline = getLinearDepthPipeline();
|
||||
auto downsamplePipeline = getDownsamplePipeline();
|
||||
|
||||
auto depthViewport = args->_viewport;
|
||||
auto halfViewport = depthViewport >> 1;
|
||||
float clearLinearDepth = args->getViewFrustum().getFarClip() * 2.0f;
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
_gpuTimer.begin(batch);
|
||||
batch.enableStereo(false);
|
||||
|
||||
batch.setViewportTransform(depthViewport);
|
||||
batch.setProjectionTransform(glm::mat4());
|
||||
batch.setViewTransform(Transform());
|
||||
batch.setModelTransform(gpu::Framebuffer::evalSubregionTexcoordTransform(_linearDepthFramebuffer->getDepthFrameSize(), depthViewport));
|
||||
|
||||
batch.setUniformBuffer(DepthLinearPass_FrameTransformSlot, frameTransform->getFrameTransformBuffer());
|
||||
|
||||
// LinearDepth
|
||||
batch.setFramebuffer(linearDepthFBO);
|
||||
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, glm::vec4(clearLinearDepth, 0.0f, 0.0f, 0.0f));
|
||||
batch.setPipeline(linearDepthPipeline);
|
||||
batch.setResourceTexture(DepthLinearPass_DepthMapSlot, depthBuffer);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
|
||||
// Downsample
|
||||
batch.setViewportTransform(halfViewport);
|
||||
|
||||
batch.setFramebuffer(downsampleFBO);
|
||||
batch.setResourceTexture(DepthLinearPass_DepthMapSlot, linearDepthTexture);
|
||||
batch.setResourceTexture(DepthLinearPass_NormalMapSlot, normalTexture);
|
||||
batch.setPipeline(downsamplePipeline);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
|
||||
_gpuTimer.end(batch);
|
||||
});
|
||||
|
||||
auto config = std::static_pointer_cast<Config>(renderContext->jobConfig);
|
||||
config->gpuTime = _gpuTimer.getAverage();
|
||||
}
|
||||
|
||||
|
||||
const gpu::PipelinePointer& LinearDepthPass::getLinearDepthPipeline() {
|
||||
if (!_linearDepthPipeline) {
|
||||
auto vs = gpu::StandardShaderLib::getDrawViewportQuadTransformTexcoordVS();
|
||||
auto ps = gpu::Shader::createPixel(std::string(surfaceGeometry_makeLinearDepth_frag));
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("deferredFrameTransformBuffer"), DepthLinearPass_FrameTransformSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("depthMap"), DepthLinearPass_DepthMapSlot));
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
// Stencil test the curvature pass for objects pixels only, not the background
|
||||
state->setStencilTest(true, 0xFF, gpu::State::StencilTest(0, 0xFF, gpu::NOT_EQUAL, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP));
|
||||
|
||||
state->setColorWriteMask(true, false, false, false);
|
||||
|
||||
// Good to go add the brand new pipeline
|
||||
_linearDepthPipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
return _linearDepthPipeline;
|
||||
}
|
||||
|
||||
|
||||
const gpu::PipelinePointer& LinearDepthPass::getDownsamplePipeline() {
|
||||
if (!_downsamplePipeline) {
|
||||
auto vs = gpu::StandardShaderLib::getDrawViewportQuadTransformTexcoordVS();
|
||||
auto ps = gpu::Shader::createPixel(std::string(surfaceGeometry_downsampleDepthNormal_frag));
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("deferredFrameTransformBuffer"), DepthLinearPass_FrameTransformSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("linearDepthMap"), DepthLinearPass_DepthMapSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("normalMap"), DepthLinearPass_NormalMapSlot));
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
state->setColorWriteMask(true, true, true, false);
|
||||
|
||||
// Good to go add the brand new pipeline
|
||||
_downsamplePipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
return _downsamplePipeline;
|
||||
}
|
||||
|
||||
|
||||
//#define USE_STENCIL_TEST
|
||||
|
||||
SurfaceGeometryFramebuffer::SurfaceGeometryFramebuffer() {
|
||||
}
|
||||
|
||||
void SurfaceGeometryFramebuffer::updateLinearDepth(const gpu::TexturePointer& linearDepthBuffer) {
|
||||
//If the depth buffer or size changed, we need to delete our FBOs
|
||||
bool reset = false;
|
||||
if ((_linearDepthTexture != linearDepthBuffer)) {
|
||||
_linearDepthTexture = linearDepthBuffer;
|
||||
reset = true;
|
||||
}
|
||||
if (_linearDepthTexture) {
|
||||
auto newFrameSize = glm::ivec2(_linearDepthTexture->getDimensions());
|
||||
if (_frameSize != newFrameSize) {
|
||||
_frameSize = newFrameSize;
|
||||
reset = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (reset) {
|
||||
clear();
|
||||
}
|
||||
}
|
||||
|
||||
void SurfaceGeometryFramebuffer::clear() {
|
||||
_curvatureFramebuffer.reset();
|
||||
_curvatureTexture.reset();
|
||||
_lowCurvatureFramebuffer.reset();
|
||||
_lowCurvatureTexture.reset();
|
||||
_blurringFramebuffer.reset();
|
||||
_blurringTexture.reset();
|
||||
}
|
||||
|
||||
gpu::TexturePointer SurfaceGeometryFramebuffer::getLinearDepthTexture() {
|
||||
return _linearDepthTexture;
|
||||
}
|
||||
|
||||
void SurfaceGeometryFramebuffer::allocate() {
|
||||
|
||||
auto width = _frameSize.x;
|
||||
auto height = _frameSize.y;
|
||||
|
||||
_curvatureTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
_curvatureFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_curvatureFramebuffer->setRenderBuffer(0, _curvatureTexture);
|
||||
|
||||
_lowCurvatureTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
_lowCurvatureFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_lowCurvatureFramebuffer->setRenderBuffer(0, _lowCurvatureTexture);
|
||||
|
||||
_blurringTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
_blurringFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_blurringFramebuffer->setRenderBuffer(0, _blurringTexture);
|
||||
}
|
||||
|
||||
gpu::FramebufferPointer SurfaceGeometryFramebuffer::getCurvatureFramebuffer() {
|
||||
if (!_curvatureFramebuffer) {
|
||||
allocate();
|
||||
}
|
||||
return _curvatureFramebuffer;
|
||||
}
|
||||
|
||||
gpu::TexturePointer SurfaceGeometryFramebuffer::getCurvatureTexture() {
|
||||
if (!_curvatureTexture) {
|
||||
allocate();
|
||||
}
|
||||
return _curvatureTexture;
|
||||
}
|
||||
|
||||
gpu::FramebufferPointer SurfaceGeometryFramebuffer::getLowCurvatureFramebuffer() {
|
||||
if (!_lowCurvatureFramebuffer) {
|
||||
allocate();
|
||||
}
|
||||
return _lowCurvatureFramebuffer;
|
||||
}
|
||||
|
||||
gpu::TexturePointer SurfaceGeometryFramebuffer::getLowCurvatureTexture() {
|
||||
if (!_lowCurvatureTexture) {
|
||||
allocate();
|
||||
}
|
||||
return _lowCurvatureTexture;
|
||||
}
|
||||
|
||||
gpu::FramebufferPointer SurfaceGeometryFramebuffer::getBlurringFramebuffer() {
|
||||
if (!_blurringFramebuffer) {
|
||||
allocate();
|
||||
}
|
||||
return _blurringFramebuffer;
|
||||
}
|
||||
|
||||
gpu::TexturePointer SurfaceGeometryFramebuffer::getBlurringTexture() {
|
||||
if (!_blurringTexture) {
|
||||
allocate();
|
||||
}
|
||||
return _blurringTexture;
|
||||
}
|
||||
|
||||
void SurfaceGeometryFramebuffer::setResolutionLevel(int resolutionLevel) {
|
||||
if (resolutionLevel != getResolutionLevel()) {
|
||||
clear();
|
||||
_resolutionLevel = resolutionLevel;
|
||||
}
|
||||
}
|
||||
|
||||
SurfaceGeometryPass::SurfaceGeometryPass() :
|
||||
_diffusePass(false)
|
||||
{
|
||||
Parameters parameters;
|
||||
_parametersBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(Parameters), (const gpu::Byte*) ¶meters));
|
||||
}
|
||||
|
||||
void SurfaceGeometryPass::configure(const Config& config) {
|
||||
const float CM_TO_M = 0.01f;
|
||||
|
||||
if ((config.depthThreshold * CM_TO_M) != getCurvatureDepthThreshold()) {
|
||||
_parametersBuffer.edit<Parameters>().curvatureInfo.x = config.depthThreshold * CM_TO_M;
|
||||
}
|
||||
|
||||
if (config.basisScale != getCurvatureBasisScale()) {
|
||||
_parametersBuffer.edit<Parameters>().curvatureInfo.y = config.basisScale;
|
||||
}
|
||||
|
||||
if (config.curvatureScale != getCurvatureScale()) {
|
||||
_parametersBuffer.edit<Parameters>().curvatureInfo.w = config.curvatureScale;
|
||||
}
|
||||
|
||||
if (!_surfaceGeometryFramebuffer) {
|
||||
_surfaceGeometryFramebuffer = std::make_shared<SurfaceGeometryFramebuffer>();
|
||||
}
|
||||
|
||||
_surfaceGeometryFramebuffer->setResolutionLevel(config.resolutionLevel);
|
||||
if (config.resolutionLevel != getResolutionLevel()) {
|
||||
_parametersBuffer.edit<Parameters>().resolutionInfo.w = config.resolutionLevel;
|
||||
}
|
||||
|
||||
auto filterRadius = (getResolutionLevel() > 0 ? config.diffuseFilterScale / 2.0f : config.diffuseFilterScale);
|
||||
_diffusePass.getParameters()->setFilterRadiusScale(filterRadius);
|
||||
_diffusePass.getParameters()->setDepthThreshold(config.diffuseDepthThreshold);
|
||||
|
||||
}
|
||||
|
||||
|
||||
void SurfaceGeometryPass::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs) {
|
||||
assert(renderContext->args);
|
||||
assert(renderContext->args->hasViewFrustum());
|
||||
|
||||
RenderArgs* args = renderContext->args;
|
||||
|
||||
const auto frameTransform = inputs.get0();
|
||||
const auto deferredFramebuffer = inputs.get1();
|
||||
const auto linearDepthFramebuffer = inputs.get2();
|
||||
|
||||
|
||||
auto linearDepthTexture = linearDepthFramebuffer->getLinearDepthTexture();
|
||||
auto normalTexture = deferredFramebuffer->getDeferredNormalTexture();
|
||||
auto sourceViewport = args->_viewport;
|
||||
auto curvatureViewport = sourceViewport;
|
||||
|
||||
if (_surfaceGeometryFramebuffer->getResolutionLevel() > 0) {
|
||||
linearDepthTexture = linearDepthFramebuffer->getHalfLinearDepthTexture();
|
||||
normalTexture = linearDepthFramebuffer->getHalfNormalTexture();
|
||||
curvatureViewport = curvatureViewport >> _surfaceGeometryFramebuffer->getResolutionLevel();
|
||||
}
|
||||
|
||||
if (!_surfaceGeometryFramebuffer) {
|
||||
_surfaceGeometryFramebuffer = std::make_shared<SurfaceGeometryFramebuffer>();
|
||||
}
|
||||
_surfaceGeometryFramebuffer->updateLinearDepth(linearDepthTexture);
|
||||
|
||||
auto curvatureFramebuffer = _surfaceGeometryFramebuffer->getCurvatureFramebuffer();
|
||||
auto curvatureTexture = _surfaceGeometryFramebuffer->getCurvatureTexture();
|
||||
#ifdef USE_STENCIL_TEST
|
||||
if (curvatureFramebuffer->getDepthStencilBuffer() != deferredFramebuffer->getPrimaryDepthTexture()) {
|
||||
curvatureFramebuffer->setDepthStencilBuffer(deferredFramebuffer->getPrimaryDepthTexture(), deferredFramebuffer->getPrimaryDepthTexture()->getTexelFormat());
|
||||
}
|
||||
#endif
|
||||
|
||||
auto lowCurvatureFramebuffer = _surfaceGeometryFramebuffer->getLowCurvatureFramebuffer();
|
||||
auto lowCurvatureTexture = _surfaceGeometryFramebuffer->getLowCurvatureTexture();
|
||||
|
||||
auto blurringFramebuffer = _surfaceGeometryFramebuffer->getBlurringFramebuffer();
|
||||
auto blurringTexture = _surfaceGeometryFramebuffer->getBlurringTexture();
|
||||
|
||||
outputs.edit0() = _surfaceGeometryFramebuffer;
|
||||
outputs.edit1() = curvatureFramebuffer;
|
||||
outputs.edit2() = curvatureFramebuffer;
|
||||
outputs.edit3() = lowCurvatureFramebuffer;
|
||||
|
||||
auto curvaturePipeline = getCurvaturePipeline();
|
||||
auto diffuseVPipeline = _diffusePass.getBlurVPipeline();
|
||||
auto diffuseHPipeline = _diffusePass.getBlurHPipeline();
|
||||
|
||||
_diffusePass.getParameters()->setWidthHeight(curvatureViewport.z, curvatureViewport.w, args->_context->isStereo());
|
||||
glm::ivec2 textureSize(curvatureTexture->getDimensions());
|
||||
_diffusePass.getParameters()->setTexcoordTransform(gpu::Framebuffer::evalSubregionTexcoordTransformCoefficients(textureSize, curvatureViewport));
|
||||
_diffusePass.getParameters()->setDepthPerspective(args->getViewFrustum().getProjection()[1][1]);
|
||||
_diffusePass.getParameters()->setLinearDepthPosFar(args->getViewFrustum().getFarClip());
|
||||
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
_gpuTimer.begin(batch);
|
||||
batch.enableStereo(false);
|
||||
|
||||
batch.setProjectionTransform(glm::mat4());
|
||||
batch.setViewTransform(Transform());
|
||||
|
||||
batch.setViewportTransform(curvatureViewport);
|
||||
batch.setModelTransform(gpu::Framebuffer::evalSubregionTexcoordTransform(_surfaceGeometryFramebuffer->getSourceFrameSize(), curvatureViewport));
|
||||
|
||||
// Curvature pass
|
||||
batch.setUniformBuffer(SurfaceGeometryPass_FrameTransformSlot, frameTransform->getFrameTransformBuffer());
|
||||
batch.setUniformBuffer(SurfaceGeometryPass_ParamsSlot, _parametersBuffer);
|
||||
batch.setFramebuffer(curvatureFramebuffer);
|
||||
// We can avoid the clear by drawing the same clear vallue from the makeCurvature shader. same performances or no worse
|
||||
#ifdef USE_STENCIL_TEST
|
||||
// Except if stenciling out
|
||||
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, glm::vec4(0.0));
|
||||
#endif
|
||||
batch.setPipeline(curvaturePipeline);
|
||||
batch.setResourceTexture(SurfaceGeometryPass_DepthMapSlot, linearDepthTexture);
|
||||
batch.setResourceTexture(SurfaceGeometryPass_NormalMapSlot, normalTexture);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
|
||||
|
||||
batch.setResourceTexture(SurfaceGeometryPass_DepthMapSlot, nullptr);
|
||||
batch.setResourceTexture(SurfaceGeometryPass_NormalMapSlot, nullptr);
|
||||
batch.setUniformBuffer(SurfaceGeometryPass_ParamsSlot, nullptr);
|
||||
batch.setUniformBuffer(SurfaceGeometryPass_FrameTransformSlot, nullptr);
|
||||
|
||||
// Diffusion pass
|
||||
const int BlurTask_ParamsSlot = 0;
|
||||
const int BlurTask_SourceSlot = 0;
|
||||
const int BlurTask_DepthSlot = 1;
|
||||
batch.setUniformBuffer(BlurTask_ParamsSlot, _diffusePass.getParameters()->_parametersBuffer);
|
||||
|
||||
batch.setResourceTexture(BlurTask_DepthSlot, linearDepthTexture);
|
||||
|
||||
batch.setFramebuffer(blurringFramebuffer);
|
||||
batch.setPipeline(diffuseVPipeline);
|
||||
batch.setResourceTexture(BlurTask_SourceSlot, curvatureTexture);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
|
||||
batch.setFramebuffer(curvatureFramebuffer);
|
||||
batch.setPipeline(diffuseHPipeline);
|
||||
batch.setResourceTexture(BlurTask_SourceSlot, blurringTexture);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
|
||||
batch.setFramebuffer(blurringFramebuffer);
|
||||
batch.setPipeline(diffuseVPipeline);
|
||||
batch.setResourceTexture(BlurTask_SourceSlot, curvatureTexture);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
|
||||
batch.setFramebuffer(lowCurvatureFramebuffer);
|
||||
batch.setPipeline(diffuseHPipeline);
|
||||
batch.setResourceTexture(BlurTask_SourceSlot, blurringTexture);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
|
||||
batch.setResourceTexture(BlurTask_SourceSlot, nullptr);
|
||||
batch.setResourceTexture(BlurTask_DepthSlot, nullptr);
|
||||
batch.setUniformBuffer(BlurTask_ParamsSlot, nullptr);
|
||||
|
||||
_gpuTimer.end(batch);
|
||||
});
|
||||
|
||||
|
||||
auto config = std::static_pointer_cast<Config>(renderContext->jobConfig);
|
||||
config->gpuTime = _gpuTimer.getAverage();
|
||||
}
|
||||
|
||||
|
||||
const gpu::PipelinePointer& SurfaceGeometryPass::getCurvaturePipeline() {
|
||||
if (!_curvaturePipeline) {
|
||||
auto vs = gpu::StandardShaderLib::getDrawViewportQuadTransformTexcoordVS();
|
||||
auto ps = gpu::Shader::createPixel(std::string(surfaceGeometry_makeCurvature_frag));
|
||||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("deferredFrameTransformBuffer"), SurfaceGeometryPass_FrameTransformSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("surfaceGeometryParamsBuffer"), SurfaceGeometryPass_ParamsSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("depthMap"), SurfaceGeometryPass_DepthMapSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("normalMap"), SurfaceGeometryPass_NormalMapSlot));
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
#ifdef USE_STENCIL_TEST
|
||||
// Stencil test the curvature pass for objects pixels only, not the background
|
||||
state->setStencilTest(true, 0xFF, gpu::State::StencilTest(0, 0xFF, gpu::NOT_EQUAL, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP));
|
||||
#endif
|
||||
// Good to go add the brand new pipeline
|
||||
_curvaturePipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
return _curvaturePipeline;
|
||||
}
|
226
libraries/render-utils/src/SurfaceGeometryPass.h
Normal file
226
libraries/render-utils/src/SurfaceGeometryPass.h
Normal file
|
@ -0,0 +1,226 @@
|
|||
//
|
||||
// SurfaceGeometryPass.h
|
||||
// libraries/render-utils/src/
|
||||
//
|
||||
// Created by Sam Gateau 6/3/2016.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_SurfaceGeometryPass_h
|
||||
#define hifi_SurfaceGeometryPass_h
|
||||
|
||||
#include <DependencyManager.h>
|
||||
|
||||
#include "render/DrawTask.h"
|
||||
#include "render/BlurTask.h"
|
||||
#include "DeferredFrameTransform.h"
|
||||
#include "DeferredFramebuffer.h"
|
||||
|
||||
|
||||
// SurfaceGeometryFramebuffer is a helper class gathering in one place theframebuffers and targets describing the surface geometry linear depth
|
||||
// from a z buffer
|
||||
class LinearDepthFramebuffer {
|
||||
public:
|
||||
LinearDepthFramebuffer();
|
||||
|
||||
gpu::FramebufferPointer getLinearDepthFramebuffer();
|
||||
gpu::TexturePointer getLinearDepthTexture();
|
||||
|
||||
gpu::FramebufferPointer getDownsampleFramebuffer();
|
||||
gpu::TexturePointer getHalfLinearDepthTexture();
|
||||
gpu::TexturePointer getHalfNormalTexture();
|
||||
|
||||
// Update the depth buffer which will drive the allocation of all the other resources according to its size.
|
||||
void updatePrimaryDepth(const gpu::TexturePointer& depthBuffer);
|
||||
gpu::TexturePointer getPrimaryDepthTexture();
|
||||
const glm::ivec2& getDepthFrameSize() const { return _frameSize; }
|
||||
|
||||
void setResolutionLevel(int level);
|
||||
int getResolutionLevel() const { return _resolutionLevel; }
|
||||
|
||||
protected:
|
||||
void clear();
|
||||
void allocate();
|
||||
|
||||
gpu::TexturePointer _primaryDepthTexture;
|
||||
|
||||
gpu::FramebufferPointer _linearDepthFramebuffer;
|
||||
gpu::TexturePointer _linearDepthTexture;
|
||||
|
||||
gpu::FramebufferPointer _downsampleFramebuffer;
|
||||
gpu::TexturePointer _halfLinearDepthTexture;
|
||||
gpu::TexturePointer _halfNormalTexture;
|
||||
|
||||
|
||||
glm::ivec2 _frameSize;
|
||||
glm::ivec2 _halfFrameSize;
|
||||
int _resolutionLevel{ 0 };
|
||||
};
|
||||
|
||||
using LinearDepthFramebufferPointer = std::shared_ptr<LinearDepthFramebuffer>;
|
||||
|
||||
|
||||
class LinearDepthPassConfig : public render::Job::Config {
|
||||
Q_OBJECT
|
||||
Q_PROPERTY(double gpuTime READ getGpuTime)
|
||||
public:
|
||||
LinearDepthPassConfig() : render::Job::Config(true) {}
|
||||
|
||||
double getGpuTime() { return gpuTime; }
|
||||
|
||||
double gpuTime{ 0.0 };
|
||||
|
||||
signals:
|
||||
void dirty();
|
||||
};
|
||||
|
||||
class LinearDepthPass {
|
||||
public:
|
||||
using Inputs = render::VaryingSet2<DeferredFrameTransformPointer, DeferredFramebufferPointer>;
|
||||
using Outputs = render::VaryingSet5<LinearDepthFramebufferPointer, gpu::FramebufferPointer, gpu::TexturePointer, gpu::TexturePointer, gpu::TexturePointer>;
|
||||
using Config = LinearDepthPassConfig;
|
||||
using JobModel = render::Job::ModelIO<LinearDepthPass, Inputs, Outputs, Config>;
|
||||
|
||||
LinearDepthPass();
|
||||
|
||||
void configure(const Config& config);
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs);
|
||||
|
||||
private:
|
||||
typedef gpu::BufferView UniformBufferView;
|
||||
|
||||
LinearDepthFramebufferPointer _linearDepthFramebuffer;
|
||||
|
||||
const gpu::PipelinePointer& getLinearDepthPipeline();
|
||||
gpu::PipelinePointer _linearDepthPipeline;
|
||||
|
||||
const gpu::PipelinePointer& getDownsamplePipeline();
|
||||
gpu::PipelinePointer _downsamplePipeline;
|
||||
|
||||
gpu::RangeTimer _gpuTimer;
|
||||
};
|
||||
|
||||
|
||||
// SurfaceGeometryFramebuffer is a helper class gathering in one place theframebuffers and targets describing the surface geometry linear depth and curvature generated
|
||||
// from a z buffer and a normal buffer
|
||||
class SurfaceGeometryFramebuffer {
|
||||
public:
|
||||
SurfaceGeometryFramebuffer();
|
||||
|
||||
gpu::FramebufferPointer getCurvatureFramebuffer();
|
||||
gpu::TexturePointer getCurvatureTexture();
|
||||
|
||||
gpu::FramebufferPointer getLowCurvatureFramebuffer();
|
||||
gpu::TexturePointer getLowCurvatureTexture();
|
||||
|
||||
gpu::FramebufferPointer getBlurringFramebuffer();
|
||||
gpu::TexturePointer getBlurringTexture();
|
||||
|
||||
// Update the source framebuffer size which will drive the allocation of all the other resources.
|
||||
void updateLinearDepth(const gpu::TexturePointer& linearDepthBuffer);
|
||||
gpu::TexturePointer getLinearDepthTexture();
|
||||
const glm::ivec2& getSourceFrameSize() const { return _frameSize; }
|
||||
|
||||
void setResolutionLevel(int level);
|
||||
int getResolutionLevel() const { return _resolutionLevel; }
|
||||
|
||||
protected:
|
||||
void clear();
|
||||
void allocate();
|
||||
|
||||
gpu::TexturePointer _linearDepthTexture;
|
||||
|
||||
gpu::FramebufferPointer _curvatureFramebuffer;
|
||||
gpu::TexturePointer _curvatureTexture;
|
||||
|
||||
gpu::FramebufferPointer _blurringFramebuffer;
|
||||
gpu::TexturePointer _blurringTexture;
|
||||
|
||||
gpu::FramebufferPointer _lowCurvatureFramebuffer;
|
||||
gpu::TexturePointer _lowCurvatureTexture;
|
||||
|
||||
glm::ivec2 _frameSize;
|
||||
int _resolutionLevel{ 0 };
|
||||
};
|
||||
|
||||
using SurfaceGeometryFramebufferPointer = std::shared_ptr<SurfaceGeometryFramebuffer>;
|
||||
|
||||
class SurfaceGeometryPassConfig : public render::Job::Config {
|
||||
Q_OBJECT
|
||||
Q_PROPERTY(float depthThreshold MEMBER depthThreshold NOTIFY dirty)
|
||||
Q_PROPERTY(float basisScale MEMBER basisScale NOTIFY dirty)
|
||||
Q_PROPERTY(float curvatureScale MEMBER curvatureScale NOTIFY dirty)
|
||||
Q_PROPERTY(int resolutionLevel MEMBER resolutionLevel NOTIFY dirty)
|
||||
|
||||
Q_PROPERTY(float diffuseFilterScale MEMBER diffuseFilterScale NOTIFY dirty)
|
||||
Q_PROPERTY(float diffuseDepthThreshold MEMBER diffuseDepthThreshold NOTIFY dirty)
|
||||
|
||||
Q_PROPERTY(double gpuTime READ getGpuTime)
|
||||
public:
|
||||
SurfaceGeometryPassConfig() : render::Job::Config(true) {}
|
||||
|
||||
float depthThreshold{ 5.0f }; // centimeters
|
||||
float basisScale{ 1.0f };
|
||||
float curvatureScale{ 10.0f };
|
||||
int resolutionLevel{ 1 };
|
||||
float diffuseFilterScale{ 0.2f };
|
||||
float diffuseDepthThreshold{ 1.0f };
|
||||
|
||||
double getGpuTime() { return gpuTime; }
|
||||
|
||||
double gpuTime{ 0.0 };
|
||||
|
||||
signals:
|
||||
void dirty();
|
||||
};
|
||||
|
||||
class SurfaceGeometryPass {
|
||||
public:
|
||||
using Inputs = render::VaryingSet3<DeferredFrameTransformPointer, DeferredFramebufferPointer, LinearDepthFramebufferPointer>;
|
||||
using Outputs = render::VaryingSet4<SurfaceGeometryFramebufferPointer, gpu::FramebufferPointer, gpu::FramebufferPointer, gpu::FramebufferPointer>;
|
||||
using Config = SurfaceGeometryPassConfig;
|
||||
using JobModel = render::Job::ModelIO<SurfaceGeometryPass, Inputs, Outputs, Config>;
|
||||
|
||||
SurfaceGeometryPass();
|
||||
|
||||
void configure(const Config& config);
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs);
|
||||
|
||||
|
||||
float getCurvatureDepthThreshold() const { return _parametersBuffer.get<Parameters>().curvatureInfo.x; }
|
||||
float getCurvatureBasisScale() const { return _parametersBuffer.get<Parameters>().curvatureInfo.y; }
|
||||
float getCurvatureScale() const { return _parametersBuffer.get<Parameters>().curvatureInfo.w; }
|
||||
int getResolutionLevel() const { return (int)_parametersBuffer.get<Parameters>().resolutionInfo.w; }
|
||||
|
||||
private:
|
||||
typedef gpu::BufferView UniformBufferView;
|
||||
|
||||
// Class describing the uniform buffer with all the parameters common to the AO shaders
|
||||
class Parameters {
|
||||
public:
|
||||
// Resolution info
|
||||
glm::vec4 resolutionInfo { 0.0f, 0.0f, 0.0f, 1.0f }; // Default Curvature & Diffusion is running half res
|
||||
// Curvature algorithm
|
||||
glm::vec4 curvatureInfo{ 0.0f };
|
||||
|
||||
Parameters() {}
|
||||
};
|
||||
gpu::BufferView _parametersBuffer;
|
||||
|
||||
|
||||
SurfaceGeometryFramebufferPointer _surfaceGeometryFramebuffer;
|
||||
|
||||
const gpu::PipelinePointer& getCurvaturePipeline();
|
||||
|
||||
gpu::PipelinePointer _curvaturePipeline;
|
||||
|
||||
render::BlurGaussianDepthAware _diffusePass;
|
||||
|
||||
|
||||
gpu::RangeTimer _gpuTimer;
|
||||
};
|
||||
|
||||
#endif // hifi_SurfaceGeometryPass_h
|
|
@ -18,6 +18,8 @@
|
|||
|
||||
#include "FramebufferCache.h"
|
||||
|
||||
#include "toneMapping_frag.h"
|
||||
|
||||
const int ToneMappingEffect_ParamsSlot = 0;
|
||||
const int ToneMappingEffect_LightingMapSlot = 0;
|
||||
|
||||
|
@ -27,67 +29,7 @@ ToneMappingEffect::ToneMappingEffect() {
|
|||
}
|
||||
|
||||
void ToneMappingEffect::init() {
|
||||
const char BlitTextureGamma_frag[] = R"SCRIBE(
|
||||
// Generated on Sat Oct 24 09:34:37 2015
|
||||
//
|
||||
// Draw texture 0 fetched at texcoord.xy
|
||||
//
|
||||
// Created by Sam Gateau on 6/22/2015
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
struct ToneMappingParams {
|
||||
vec4 _exp_2powExp_s0_s1;
|
||||
ivec4 _toneCurve_s0_s1_s2;
|
||||
};
|
||||
|
||||
const float INV_GAMMA_22 = 1.0 / 2.2;
|
||||
const int ToneCurveNone = 0;
|
||||
const int ToneCurveGamma22 = 1;
|
||||
const int ToneCurveReinhard = 2;
|
||||
const int ToneCurveFilmic = 3;
|
||||
|
||||
uniform toneMappingParamsBuffer {
|
||||
ToneMappingParams params;
|
||||
};
|
||||
float getTwoPowExposure() {
|
||||
return params._exp_2powExp_s0_s1.y;
|
||||
}
|
||||
int getToneCurve() {
|
||||
return params._toneCurve_s0_s1_s2.x;
|
||||
}
|
||||
|
||||
uniform sampler2D colorMap;
|
||||
|
||||
in vec2 varTexCoord0;
|
||||
out vec4 outFragColor;
|
||||
|
||||
void main(void) {
|
||||
vec4 fragColorRaw = texture(colorMap, varTexCoord0);
|
||||
vec3 fragColor = fragColorRaw.xyz;
|
||||
|
||||
vec3 srcColor = fragColor * getTwoPowExposure();
|
||||
|
||||
int toneCurve = getToneCurve();
|
||||
vec3 tonedColor = srcColor;
|
||||
if (toneCurve == ToneCurveFilmic) {
|
||||
vec3 x = max(vec3(0.0), srcColor-0.004);
|
||||
tonedColor = (x * (6.2 * x + 0.5)) / (x * (6.2 * x + 1.7) + 0.06);
|
||||
} else if (toneCurve == ToneCurveReinhard) {
|
||||
tonedColor = srcColor/(1.0 + srcColor);
|
||||
tonedColor = pow(tonedColor, vec3(INV_GAMMA_22));
|
||||
} else if (toneCurve == ToneCurveGamma22) {
|
||||
tonedColor = pow(srcColor, vec3(INV_GAMMA_22));
|
||||
} // else None toned = src
|
||||
|
||||
outFragColor = vec4(tonedColor, 1.0);
|
||||
}
|
||||
|
||||
)SCRIBE";
|
||||
auto blitPS = gpu::ShaderPointer(gpu::Shader::createPixel(std::string(BlitTextureGamma_frag)));
|
||||
auto blitPS = gpu::ShaderPointer(gpu::Shader::createPixel(std::string(toneMapping_frag)));
|
||||
|
||||
auto blitVS = gpu::StandardShaderLib::getDrawViewportQuadTransformTexcoordVS();
|
||||
auto blitProgram = gpu::ShaderPointer(gpu::Shader::createProgram(blitVS, blitPS));
|
||||
|
@ -102,26 +44,28 @@ void ToneMappingEffect::init() {
|
|||
}
|
||||
|
||||
void ToneMappingEffect::setExposure(float exposure) {
|
||||
_parametersBuffer.edit<Parameters>()._exposure = exposure;
|
||||
_parametersBuffer.edit<Parameters>()._twoPowExposure = pow(2.0, exposure);
|
||||
auto& params = _parametersBuffer.get<Parameters>();
|
||||
if (params._exposure != exposure) {
|
||||
_parametersBuffer.edit<Parameters>()._exposure = exposure;
|
||||
_parametersBuffer.edit<Parameters>()._twoPowExposure = pow(2.0, exposure);
|
||||
}
|
||||
}
|
||||
|
||||
void ToneMappingEffect::setToneCurve(ToneCurve curve) {
|
||||
_parametersBuffer.edit<Parameters>()._toneCurve = curve;
|
||||
auto& params = _parametersBuffer.get<Parameters>();
|
||||
if (params._toneCurve != curve) {
|
||||
_parametersBuffer.edit<Parameters>()._toneCurve = curve;
|
||||
}
|
||||
}
|
||||
|
||||
void ToneMappingEffect::render(RenderArgs* args) {
|
||||
void ToneMappingEffect::render(RenderArgs* args, const gpu::TexturePointer& lightingBuffer, const gpu::FramebufferPointer& destinationFramebuffer) {
|
||||
if (!_blitLightBuffer) {
|
||||
init();
|
||||
}
|
||||
auto framebufferCache = DependencyManager::get<FramebufferCache>();
|
||||
auto framebufferSize = glm::ivec2(lightingBuffer->getDimensions());
|
||||
gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
QSize framebufferSize = framebufferCache->getFrameBufferSize();
|
||||
|
||||
auto lightingBuffer = framebufferCache->getLightingTexture();
|
||||
auto destFbo = framebufferCache->getPrimaryFramebuffer();
|
||||
batch.setFramebuffer(destFbo);
|
||||
batch.setFramebuffer(destinationFramebuffer);
|
||||
|
||||
// FIXME: Generate the Luminosity map
|
||||
//batch.generateTextureMips(lightingBuffer);
|
||||
|
@ -129,17 +73,8 @@ void ToneMappingEffect::render(RenderArgs* args) {
|
|||
batch.setViewportTransform(args->_viewport);
|
||||
batch.setProjectionTransform(glm::mat4());
|
||||
batch.setViewTransform(Transform());
|
||||
{
|
||||
float sMin = args->_viewport.x / (float)framebufferSize.width();
|
||||
float sWidth = args->_viewport.z / (float)framebufferSize.width();
|
||||
float tMin = args->_viewport.y / (float)framebufferSize.height();
|
||||
float tHeight = args->_viewport.w / (float)framebufferSize.height();
|
||||
Transform model;
|
||||
batch.setPipeline(_blitLightBuffer);
|
||||
model.setTranslation(glm::vec3(sMin, tMin, 0.0));
|
||||
model.setScale(glm::vec3(sWidth, tHeight, 1.0));
|
||||
batch.setModelTransform(model);
|
||||
}
|
||||
batch.setModelTransform(gpu::Framebuffer::evalSubregionTexcoordTransform(framebufferSize, args->_viewport));
|
||||
batch.setPipeline(_blitLightBuffer);
|
||||
|
||||
batch.setUniformBuffer(ToneMappingEffect_ParamsSlot, _parametersBuffer);
|
||||
batch.setResourceTexture(ToneMappingEffect_LightingMapSlot, lightingBuffer);
|
||||
|
@ -149,15 +84,13 @@ void ToneMappingEffect::render(RenderArgs* args) {
|
|||
|
||||
|
||||
void ToneMappingDeferred::configure(const Config& config) {
|
||||
if (config.exposure >= 0.0f) {
|
||||
_toneMappingEffect.setExposure(config.exposure);
|
||||
}
|
||||
|
||||
if (config.curve >= 0) {
|
||||
_toneMappingEffect.setToneCurve((ToneMappingEffect::ToneCurve)config.curve);
|
||||
}
|
||||
_toneMappingEffect.setExposure(config.exposure);
|
||||
_toneMappingEffect.setToneCurve((ToneMappingEffect::ToneCurve)config.curve);
|
||||
}
|
||||
|
||||
void ToneMappingDeferred::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext) {
|
||||
_toneMappingEffect.render(renderContext->args);
|
||||
void ToneMappingDeferred::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const Inputs& inputs) {
|
||||
|
||||
auto lightingBuffer = inputs.get0()->getRenderBuffer(0);
|
||||
auto destFbo = inputs.get1();
|
||||
_toneMappingEffect.render(renderContext->args, lightingBuffer, destFbo);
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ public:
|
|||
ToneMappingEffect();
|
||||
virtual ~ToneMappingEffect() {}
|
||||
|
||||
void render(RenderArgs* args);
|
||||
void render(RenderArgs* args, const gpu::TexturePointer& lightingBuffer, const gpu::FramebufferPointer& destinationBuffer);
|
||||
|
||||
void setExposure(float exposure);
|
||||
float getExposure() const { return _parametersBuffer.get<Parameters>()._exposure; }
|
||||
|
@ -71,7 +71,7 @@ class ToneMappingConfig : public render::Job::Config {
|
|||
public:
|
||||
ToneMappingConfig() : render::Job::Config(true) {}
|
||||
|
||||
void setExposure(float newExposure) { exposure = std::max(0.0f, newExposure); emit dirty(); }
|
||||
void setExposure(float newExposure) { exposure = newExposure; emit dirty(); }
|
||||
void setCurve(int newCurve) { curve = std::max((int)ToneMappingEffect::None, std::min((int)ToneMappingEffect::Filmic, newCurve)); emit dirty(); }
|
||||
|
||||
|
||||
|
@ -83,11 +83,13 @@ signals:
|
|||
|
||||
class ToneMappingDeferred {
|
||||
public:
|
||||
// Inputs: lightingFramebuffer, destinationFramebuffer
|
||||
using Inputs = render::VaryingSet2<gpu::FramebufferPointer, gpu::FramebufferPointer>;
|
||||
using Config = ToneMappingConfig;
|
||||
using JobModel = render::Job::Model<ToneMappingDeferred, Config>;
|
||||
using JobModel = render::Job::ModelI<ToneMappingDeferred, Inputs, Config>;
|
||||
|
||||
void configure(const Config& config);
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext);
|
||||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const Inputs& inputs);
|
||||
|
||||
ToneMappingEffect _toneMappingEffect;
|
||||
};
|
||||
|
|
|
@ -14,9 +14,14 @@
|
|||
|
||||
<@include DeferredBufferRead.slh@>
|
||||
|
||||
uniform sampler2D pyramidMap;
|
||||
uniform sampler2D linearDepthMap;
|
||||
uniform sampler2D halfLinearDepthMap;
|
||||
uniform sampler2D halfNormalMap;
|
||||
uniform sampler2D occlusionMap;
|
||||
uniform sampler2D occlusionBlurredMap;
|
||||
uniform sampler2D curvatureMap;
|
||||
uniform sampler2D diffusedCurvatureMap;
|
||||
uniform sampler2D scatteringMap;
|
||||
|
||||
in vec2 uv;
|
||||
out vec4 outFragColor;
|
||||
|
|
|
@ -5,18 +5,31 @@
|
|||
// deferred_light.vert
|
||||
// vertex shader
|
||||
//
|
||||
// Created by Andrzej Kapolka on 9/18/14.
|
||||
// Created by Sam Gateau on 6/16/16.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include gpu/Inputs.slh@>
|
||||
|
||||
out vec2 _texCoord0;
|
||||
|
||||
uniform vec4 texcoordFrameTransform;
|
||||
|
||||
void main(void) {
|
||||
_texCoord0 = inTexCoord0.st;
|
||||
gl_Position = inPosition;
|
||||
const float depth = 1.0;
|
||||
const vec4 UNIT_QUAD[4] = vec4[4](
|
||||
vec4(-1.0, -1.0, depth, 1.0),
|
||||
vec4(1.0, -1.0, depth, 1.0),
|
||||
vec4(-1.0, 1.0, depth, 1.0),
|
||||
vec4(1.0, 1.0, depth, 1.0)
|
||||
);
|
||||
vec4 pos = UNIT_QUAD[gl_VertexID];
|
||||
|
||||
_texCoord0 = (pos.xy + 1) * 0.5;
|
||||
|
||||
_texCoord0 *= texcoordFrameTransform.zw;
|
||||
_texCoord0 += texcoordFrameTransform.xy;
|
||||
|
||||
gl_Position = pos;
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
// deferred_light_limited.vert
|
||||
// vertex shader
|
||||
//
|
||||
// Created by Andrzej Kapolka on 9/19/14.
|
||||
// Created by Sam Gateau on 6/16/16.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
|
@ -18,17 +18,40 @@
|
|||
|
||||
<$declareStandardTransform()$>
|
||||
|
||||
uniform mat4 texcoordMat;
|
||||
uniform vec4 sphereParam;
|
||||
|
||||
out vec4 _texCoord0;
|
||||
|
||||
void main(void) {
|
||||
// standard transform
|
||||
TransformCamera cam = getTransformCamera();
|
||||
TransformObject obj = getTransformObject();
|
||||
<$transformModelToClipPos(cam, obj, inPosition, gl_Position)$>;
|
||||
if (sphereParam.w != 0.0) {
|
||||
|
||||
vec4 projected = gl_Position / gl_Position.w;
|
||||
_texCoord0 = vec4(dot(projected, texcoordMat[0]) * gl_Position.w,
|
||||
dot(projected, texcoordMat[1]) * gl_Position.w, 0.0, gl_Position.w);
|
||||
// standard transform
|
||||
TransformCamera cam = getTransformCamera();
|
||||
TransformObject obj = getTransformObject();
|
||||
<$transformModelToClipPos(cam, obj, inPosition, gl_Position)$>;
|
||||
|
||||
vec4 projected = gl_Position / gl_Position.w;
|
||||
projected.xy = (projected.xy + 1.0) * 0.5;
|
||||
|
||||
if (cam_isStereo()) {
|
||||
projected.x = 0.5 * (projected.x + cam_getStereoSide());
|
||||
}
|
||||
_texCoord0 = vec4(projected.xy, 0.0, 1.0) * gl_Position.w;
|
||||
} else {
|
||||
const float depth = -1.0; //Draw at near plane
|
||||
const vec4 UNIT_QUAD[4] = vec4[4](
|
||||
vec4(-1.0, -1.0, depth, 1.0),
|
||||
vec4(1.0, -1.0, depth, 1.0),
|
||||
vec4(-1.0, 1.0, depth, 1.0),
|
||||
vec4(1.0, 1.0, depth, 1.0)
|
||||
);
|
||||
vec4 pos = UNIT_QUAD[gl_VertexID];
|
||||
|
||||
_texCoord0 = vec4((pos.xy + 1) * 0.5, 0.0, 1.0);
|
||||
|
||||
if (cam_isStereo()) {
|
||||
_texCoord0.x = 0.5 * (_texCoord0.x + cam_getStereoSide());
|
||||
}
|
||||
gl_Position = pos;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
|
||||
<$declareStandardTransform()$>
|
||||
|
||||
uniform mat4 texcoordMat;
|
||||
uniform vec4 coneParam;
|
||||
|
||||
out vec4 _texCoord0;
|
||||
|
@ -38,14 +37,33 @@ void main(void) {
|
|||
} else {
|
||||
coneVertex.z = 0.0;
|
||||
}
|
||||
|
||||
|
||||
// standard transform
|
||||
TransformCamera cam = getTransformCamera();
|
||||
TransformObject obj = getTransformObject();
|
||||
<$transformModelToClipPos(cam, obj, coneVertex, gl_Position)$>;
|
||||
vec4 projected = gl_Position / gl_Position.w;
|
||||
projected.xy = (projected.xy + 1.0) * 0.5;
|
||||
|
||||
if (cam_isStereo()) {
|
||||
projected.x = 0.5 * (projected.x + cam_getStereoSide());
|
||||
}
|
||||
_texCoord0 = vec4(projected.xy, 0.0, 1.0) * gl_Position.w;
|
||||
} else {
|
||||
const float depth = -1.0; //Draw at near plane
|
||||
const vec4 UNIT_QUAD[4] = vec4[4](
|
||||
vec4(-1.0, -1.0, depth, 1.0),
|
||||
vec4(1.0, -1.0, depth, 1.0),
|
||||
vec4(-1.0, 1.0, depth, 1.0),
|
||||
vec4(1.0, 1.0, depth, 1.0)
|
||||
);
|
||||
vec4 pos = UNIT_QUAD[gl_VertexID];
|
||||
|
||||
_texCoord0 = vec4((pos.xy + 1) * 0.5, 0.0, 1.0);
|
||||
if (cam_isStereo()) {
|
||||
_texCoord0.x = 0.5 * (_texCoord0.x + cam_getStereoSide());
|
||||
}
|
||||
gl_Position = pos;
|
||||
}
|
||||
|
||||
// standard transform
|
||||
TransformCamera cam = getTransformCamera();
|
||||
TransformObject obj = getTransformObject();
|
||||
<$transformModelToClipPos(cam, obj, coneVertex, gl_Position)$>;
|
||||
|
||||
vec4 projected = gl_Position / gl_Position.w;
|
||||
_texCoord0 = vec4(dot(projected, texcoordMat[0]) * gl_Position.w,
|
||||
dot(projected, texcoordMat[1]) * gl_Position.w, 0.0, gl_Position.w);
|
||||
}
|
||||
|
|
|
@ -12,43 +12,50 @@
|
|||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
|
||||
|
||||
<@include DeferredBufferRead.slh@>
|
||||
<@include DeferredGlobalLight.slh@>
|
||||
|
||||
<$declareEvalLightmappedColor()$>
|
||||
<$declareEvalAmbientSphereGlobalColor()$>
|
||||
|
||||
<$declareEvalAmbientSphereGlobalColor(supportScattering)$>
|
||||
|
||||
|
||||
in vec2 _texCoord0;
|
||||
out vec4 _fragColor;
|
||||
|
||||
void main(void) {
|
||||
DeferredTransform deferredTransform = getDeferredTransform();
|
||||
DeferredFrameTransform deferredTransform = getDeferredFrameTransform();
|
||||
DeferredFragment frag = unpackDeferredFragment(deferredTransform, _texCoord0);
|
||||
|
||||
float shadowAttenuation = 1.0;
|
||||
|
||||
if (frag.mode == FRAG_MODE_UNLIT) {
|
||||
_fragColor = vec4(frag.diffuse, 1.0);
|
||||
discard;
|
||||
} else if (frag.mode == FRAG_MODE_LIGHTMAPPED) {
|
||||
vec3 color = evalLightmappedColor(
|
||||
deferredTransform.viewInverse,
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
frag.specularVal.xyz);
|
||||
_fragColor = vec4(color, 1.0);
|
||||
discard;
|
||||
} else {
|
||||
vec3 color = evalAmbientSphereGlobalColor(
|
||||
deferredTransform.viewInverse,
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.position.xyz,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
frag.metallic,
|
||||
frag.emissive,
|
||||
frag.roughness);
|
||||
_fragColor = vec4(color, frag.normalVal.a);
|
||||
vec4 midNormalCurvature;
|
||||
vec4 lowNormalCurvature;
|
||||
if (frag.mode == FRAG_MODE_SCATTERING) {
|
||||
unpackMidLowNormalCurvature(_texCoord0, midNormalCurvature, lowNormalCurvature);
|
||||
}
|
||||
|
||||
vec3 color = evalAmbientSphereGlobalColor(
|
||||
getViewInverse(),
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.position.xyz,
|
||||
frag.normal,
|
||||
frag.albedo,
|
||||
frag.fresnel,
|
||||
frag.metallic,
|
||||
frag.roughness,
|
||||
frag.scattering,
|
||||
midNormalCurvature,
|
||||
lowNormalCurvature);
|
||||
_fragColor = vec4(color, 1.0);
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,40 +17,42 @@
|
|||
<@include DeferredGlobalLight.slh@>
|
||||
|
||||
<$declareEvalLightmappedColor()$>
|
||||
<$declareEvalAmbientSphereGlobalColor()$>
|
||||
<$declareEvalAmbientSphereGlobalColor(isScattering)$>
|
||||
|
||||
in vec2 _texCoord0;
|
||||
out vec4 _fragColor;
|
||||
|
||||
void main(void) {
|
||||
DeferredTransform deferredTransform = getDeferredTransform();
|
||||
DeferredFrameTransform deferredTransform = getDeferredFrameTransform();
|
||||
DeferredFragment frag = unpackDeferredFragment(deferredTransform, _texCoord0);
|
||||
|
||||
vec4 worldPos = deferredTransform.viewInverse * vec4(frag.position.xyz, 1.0);
|
||||
vec4 worldPos = getViewInverse() * vec4(frag.position.xyz, 1.0);
|
||||
float shadowAttenuation = evalShadowAttenuation(worldPos);
|
||||
|
||||
if (frag.mode == FRAG_MODE_UNLIT) {
|
||||
_fragColor = vec4(frag.diffuse, 1.0);
|
||||
discard;
|
||||
} else if (frag.mode == FRAG_MODE_LIGHTMAPPED) {
|
||||
vec3 color = evalLightmappedColor(
|
||||
deferredTransform.viewInverse,
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
frag.specularVal.xyz);
|
||||
_fragColor = vec4(color, 1.0);
|
||||
discard;
|
||||
} else {
|
||||
vec4 midNormalCurvature;
|
||||
vec4 lowNormalCurvature;
|
||||
if (frag.mode == FRAG_MODE_SCATTERING) {
|
||||
unpackMidLowNormalCurvature(_texCoord0, midNormalCurvature, lowNormalCurvature);
|
||||
}
|
||||
vec3 color = evalAmbientSphereGlobalColor(
|
||||
deferredTransform.viewInverse,
|
||||
getViewInverse(),
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.position.xyz,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
frag.albedo,
|
||||
frag.fresnel,
|
||||
frag.metallic,
|
||||
frag.emissive,
|
||||
frag.roughness);
|
||||
_fragColor = vec4(color, frag.normalVal.a);
|
||||
frag.roughness,
|
||||
frag.scattering,
|
||||
midNormalCurvature,
|
||||
lowNormalCurvature);
|
||||
|
||||
_fragColor = vec4(color, 1.0);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,34 +22,26 @@ in vec2 _texCoord0;
|
|||
out vec4 _fragColor;
|
||||
|
||||
void main(void) {
|
||||
DeferredTransform deferredTransform = getDeferredTransform();
|
||||
DeferredFrameTransform deferredTransform = getDeferredFrameTransform();
|
||||
DeferredFragment frag = unpackDeferredFragment(deferredTransform, _texCoord0);
|
||||
|
||||
float shadowAttenuation = 1.0;
|
||||
|
||||
// Light mapped or not ?
|
||||
if (frag.mode == FRAG_MODE_UNLIT) {
|
||||
_fragColor = vec4(frag.diffuse, 1.0);
|
||||
discard;
|
||||
} else if (frag.mode == FRAG_MODE_LIGHTMAPPED) {
|
||||
vec3 color = evalLightmappedColor(
|
||||
deferredTransform.viewInverse,
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
frag.specularVal.xyz);
|
||||
_fragColor = vec4(color, 1.0);
|
||||
discard;
|
||||
} else {
|
||||
vec3 color = evalAmbientGlobalColor(
|
||||
deferredTransform.viewInverse,
|
||||
getViewInverse(),
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.position.xyz,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
frag.albedo,
|
||||
frag.fresnel,
|
||||
frag.metallic,
|
||||
frag.emissive,
|
||||
frag.roughness);
|
||||
_fragColor = vec4(color, frag.normalVal.a);
|
||||
_fragColor = vec4(color, 1.0);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,35 +23,27 @@ in vec2 _texCoord0;
|
|||
out vec4 _fragColor;
|
||||
|
||||
void main(void) {
|
||||
DeferredTransform deferredTransform = getDeferredTransform();
|
||||
DeferredFrameTransform deferredTransform = getDeferredFrameTransform();
|
||||
DeferredFragment frag = unpackDeferredFragment(deferredTransform, _texCoord0);
|
||||
|
||||
vec4 worldPos = deferredTransform.viewInverse * vec4(frag.position.xyz, 1.0);
|
||||
vec4 worldPos = getViewInverse() * vec4(frag.position.xyz, 1.0);
|
||||
float shadowAttenuation = evalShadowAttenuation(worldPos);
|
||||
|
||||
// Light mapped or not ?
|
||||
if (frag.mode == FRAG_MODE_UNLIT) {
|
||||
_fragColor = vec4(frag.diffuse, 1.0);
|
||||
discard;
|
||||
} else if (frag.mode == FRAG_MODE_LIGHTMAPPED) {
|
||||
vec3 color = evalLightmappedColor(
|
||||
deferredTransform.viewInverse,
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
frag.specularVal.xyz);
|
||||
_fragColor = vec4(color, 1.0);
|
||||
discard;
|
||||
} else {
|
||||
vec3 color = evalAmbientGlobalColor(
|
||||
deferredTransform.viewInverse,
|
||||
getViewInverse(),
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.position.xyz,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
frag.albedo,
|
||||
frag.fresnel,
|
||||
frag.metallic,
|
||||
frag.emissive,
|
||||
frag.roughness);
|
||||
_fragColor = vec4(color, frag.normalVal.a);
|
||||
_fragColor = vec4(color, 1.0);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,41 +16,42 @@
|
|||
<@include DeferredGlobalLight.slh@>
|
||||
|
||||
<$declareEvalLightmappedColor()$>
|
||||
<$declareEvalSkyboxGlobalColor()$>
|
||||
<$declareEvalSkyboxGlobalColor(isScattering)$>
|
||||
|
||||
in vec2 _texCoord0;
|
||||
out vec4 _fragColor;
|
||||
|
||||
void main(void) {
|
||||
DeferredTransform deferredTransform = getDeferredTransform();
|
||||
DeferredFrameTransform deferredTransform = getDeferredFrameTransform();
|
||||
DeferredFragment frag = unpackDeferredFragment(deferredTransform, _texCoord0);
|
||||
|
||||
float shadowAttenuation = 1.0;
|
||||
|
||||
// Light mapped or not ?
|
||||
if (frag.mode == FRAG_MODE_UNLIT) {
|
||||
_fragColor = vec4(frag.diffuse, 1.0);
|
||||
discard;
|
||||
} else if (frag.mode == FRAG_MODE_LIGHTMAPPED) {
|
||||
vec3 color = evalLightmappedColor(
|
||||
deferredTransform.viewInverse,
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
frag.specularVal.xyz);
|
||||
_fragColor = vec4(color, 1.0);
|
||||
discard;
|
||||
} else {
|
||||
vec3 color = evalSkyboxGlobalColor(
|
||||
deferredTransform.viewInverse,
|
||||
vec4 midNormalCurvature;
|
||||
vec4 lowNormalCurvature;
|
||||
if (frag.mode == FRAG_MODE_SCATTERING) {
|
||||
unpackMidLowNormalCurvature(_texCoord0, midNormalCurvature, lowNormalCurvature);
|
||||
}
|
||||
vec3 color = evalSkyboxGlobalColor(
|
||||
getViewInverse(),
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.position.xyz,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
frag.albedo,
|
||||
frag.fresnel,
|
||||
frag.metallic,
|
||||
frag.emissive,
|
||||
frag.roughness);
|
||||
frag.roughness,
|
||||
frag.scattering,
|
||||
midNormalCurvature,
|
||||
lowNormalCurvature);
|
||||
_fragColor = vec4(color, 1.0);
|
||||
|
||||
_fragColor = vec4(color, frag.normalVal.a);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,42 +17,44 @@
|
|||
<@include DeferredGlobalLight.slh@>
|
||||
|
||||
<$declareEvalLightmappedColor()$>
|
||||
<$declareEvalSkyboxGlobalColor()$>
|
||||
<$declareEvalSkyboxGlobalColor(isScattering)$>
|
||||
|
||||
in vec2 _texCoord0;
|
||||
out vec4 _fragColor;
|
||||
|
||||
void main(void) {
|
||||
DeferredTransform deferredTransform = getDeferredTransform();
|
||||
DeferredFrameTransform deferredTransform = getDeferredFrameTransform();
|
||||
DeferredFragment frag = unpackDeferredFragment(deferredTransform, _texCoord0);
|
||||
|
||||
vec4 worldPos = deferredTransform.viewInverse * vec4(frag.position.xyz, 1.0);
|
||||
vec4 worldPos = getViewInverse() * vec4(frag.position.xyz, 1.0);
|
||||
float shadowAttenuation = evalShadowAttenuation(worldPos);
|
||||
|
||||
// Light mapped or not ?
|
||||
if (frag.mode == FRAG_MODE_UNLIT) {
|
||||
_fragColor = vec4(frag.diffuse, 1.0);
|
||||
discard;
|
||||
} else if (frag.mode == FRAG_MODE_LIGHTMAPPED) {
|
||||
vec3 color = evalLightmappedColor(
|
||||
deferredTransform.viewInverse,
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
frag.specularVal.xyz);
|
||||
_fragColor = vec4(color, 1.0);
|
||||
discard;
|
||||
} else {
|
||||
vec4 midNormalCurvature;
|
||||
vec4 lowNormalCurvature;
|
||||
if (frag.mode == FRAG_MODE_SCATTERING) {
|
||||
unpackMidLowNormalCurvature(_texCoord0, midNormalCurvature, lowNormalCurvature);
|
||||
}
|
||||
vec3 color = evalSkyboxGlobalColor(
|
||||
deferredTransform.viewInverse,
|
||||
getViewInverse(),
|
||||
shadowAttenuation,
|
||||
frag.obscurance,
|
||||
frag.position.xyz,
|
||||
frag.normal,
|
||||
frag.diffuse,
|
||||
frag.albedo,
|
||||
frag.fresnel,
|
||||
frag.metallic,
|
||||
frag.emissive,
|
||||
frag.roughness);
|
||||
frag.roughness,
|
||||
frag.scattering,
|
||||
midNormalCurvature,
|
||||
lowNormalCurvature);
|
||||
|
||||
_fragColor = vec4(color, frag.normalVal.a);
|
||||
|
||||
_fragColor = vec4(color, 1.0);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,6 +45,8 @@ void main(void) {
|
|||
vec3 emissive = getMaterialEmissive(mat);
|
||||
<$evalMaterialEmissive(emissiveTex, emissive, matKey, emissive)$>;
|
||||
|
||||
float scattering = getMaterialScattering(mat);
|
||||
|
||||
packDeferredFragment(
|
||||
normalize(_normal.xyz),
|
||||
opacity,
|
||||
|
@ -52,5 +54,6 @@ void main(void) {
|
|||
roughness,
|
||||
getMaterialMetallic(mat),
|
||||
emissive,
|
||||
occlusionTex);
|
||||
occlusionTex,
|
||||
scattering);
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
<@include model/Material.slh@>
|
||||
|
||||
<@include MaterialTextures.slh@>
|
||||
<$declareMaterialTextures(ALBEDO, ROUGHNESS, NORMAL, _SCRIBE_NULL, EMISSIVE, OCCLUSION)$>
|
||||
<$declareMaterialTextures(ALBEDO, ROUGHNESS, NORMAL, _SCRIBE_NULL, EMISSIVE, OCCLUSION, SCATTERING)$>
|
||||
|
||||
in vec4 _position;
|
||||
in vec2 _texCoord0;
|
||||
|
@ -29,7 +29,7 @@ in vec3 _color;
|
|||
void main(void) {
|
||||
Material mat = getMaterial();
|
||||
int matKey = getMaterialKey(mat);
|
||||
<$fetchMaterialTexturesCoord0(matKey, _texCoord0, albedoTex, roughnessTex, normalTex, _SCRIBE_NULL, emissiveTex)$>
|
||||
<$fetchMaterialTexturesCoord0(matKey, _texCoord0, albedoTex, roughnessTex, normalTex, _SCRIBE_NULL, emissiveTex, scatteringTex)$>
|
||||
<$fetchMaterialTexturesCoord1(matKey, _texCoord1, occlusionTex)$>
|
||||
|
||||
float opacity = 1.0;
|
||||
|
@ -49,6 +49,9 @@ void main(void) {
|
|||
vec3 viewNormal;
|
||||
<$tangentToViewSpace(normalTex, _normal, _tangent, viewNormal)$>
|
||||
|
||||
float scattering = getMaterialScattering(mat);
|
||||
<$evalMaterialScattering(scatteringTex, scattering, matKey, scattering)$>;
|
||||
|
||||
packDeferredFragment(
|
||||
viewNormal,
|
||||
opacity,
|
||||
|
@ -56,5 +59,6 @@ void main(void) {
|
|||
roughness,
|
||||
getMaterialMetallic(mat),
|
||||
emissive,
|
||||
occlusionTex);
|
||||
occlusionTex,
|
||||
scattering);
|
||||
}
|
||||
|
|
|
@ -52,6 +52,7 @@ void main(void) {
|
|||
float metallic = getMaterialMetallic(mat);
|
||||
<$evalMaterialMetallic(metallicTex, metallic, matKey, metallic)$>;
|
||||
|
||||
float scattering = getMaterialScattering(mat);
|
||||
|
||||
packDeferredFragment(
|
||||
normalize(viewNormal.xyz),
|
||||
|
@ -60,5 +61,6 @@ void main(void) {
|
|||
roughness,
|
||||
metallic,
|
||||
emissive,
|
||||
occlusionTex);
|
||||
occlusionTex,
|
||||
scattering);
|
||||
}
|
||||
|
|
|
@ -49,6 +49,8 @@ void main(void) {
|
|||
float metallic = getMaterialMetallic(mat);
|
||||
<$evalMaterialMetallic(metallicTex, metallic, matKey, metallic)$>;
|
||||
|
||||
float scattering = getMaterialScattering(mat);
|
||||
|
||||
packDeferredFragment(
|
||||
normalize(_normal),
|
||||
opacity,
|
||||
|
@ -56,5 +58,6 @@ void main(void) {
|
|||
roughness,
|
||||
metallic,
|
||||
emissive,
|
||||
occlusionTex);
|
||||
occlusionTex,
|
||||
scattering);
|
||||
}
|
||||
|
|
|
@ -50,11 +50,17 @@ void main(void) {
|
|||
<$evalMaterialRoughness(roughnessTex, roughness, matKey, roughness)$>;
|
||||
|
||||
float metallic = getMaterialMetallic(mat);
|
||||
vec3 fresnel = vec3(0.03); // Default Di-electric fresnel value
|
||||
if (metallic <= 0.5) {
|
||||
metallic = 0.0;
|
||||
} else {
|
||||
fresnel = albedo;
|
||||
metallic = 1.0;
|
||||
}
|
||||
|
||||
vec3 emissive = getMaterialEmissive(mat);
|
||||
<$evalMaterialEmissive(emissiveTex, emissive, matKey, emissive)$>;
|
||||
|
||||
|
||||
vec3 fragPosition = _position.xyz;
|
||||
vec3 fragNormal = normalize(_normal);
|
||||
|
||||
|
@ -67,6 +73,7 @@ void main(void) {
|
|||
fragPosition,
|
||||
fragNormal,
|
||||
albedo,
|
||||
fresnel,
|
||||
metallic,
|
||||
emissive,
|
||||
roughness, opacity),
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
<@include MaterialTextures.slh@>
|
||||
<$declareMaterialTextures(ALBEDO, ROUGHNESS, _SCRIBE_NULL, _SCRIBE_NULL, EMISSIVE, OCCLUSION)$>
|
||||
<@include LightingModel.slh@>
|
||||
|
||||
in vec2 _texCoord0;
|
||||
in vec3 _color;
|
||||
|
@ -35,5 +36,5 @@ void main(void) {
|
|||
<$evalMaterialAlbedo(albedoTex, albedo, matKey, albedo)$>;
|
||||
albedo *= _color;
|
||||
|
||||
_fragColor = vec4(albedo, opacity);
|
||||
_fragColor = vec4(albedo * isUnlitEnabled(), opacity);
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
//
|
||||
|
||||
<@include DeferredBufferWrite.slh@>
|
||||
<@include LightingModel.slh@>
|
||||
<@include model/Material.slh@>
|
||||
|
||||
<@include MaterialTextures.slh@>
|
||||
|
@ -40,5 +41,5 @@ void main(void) {
|
|||
packDeferredFragmentUnlit(
|
||||
normalize(_normal),
|
||||
opacity,
|
||||
albedo);
|
||||
albedo * isUnlitEnabled());
|
||||
}
|
||||
|
|
|
@ -11,13 +11,18 @@
|
|||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include DeferredLighting.slh@>
|
||||
|
||||
<@include model/Light.slh@>
|
||||
|
||||
<@include LightingModel.slh@>
|
||||
|
||||
<@include LightDirectional.slh@>
|
||||
<$declareLightingDirectional()$>
|
||||
|
||||
<@include gpu/Transform.slh@>
|
||||
<$declareStandardCameraTransform()$>
|
||||
|
||||
vec4 evalGlobalColor(float shadowAttenuation, vec3 position, vec3 normal, vec3 albedo, float metallic, vec3 specular, float roughness, float opacity) {
|
||||
vec4 evalGlobalColor(float shadowAttenuation, vec3 position, vec3 normal, vec3 albedo, float metallic, vec3 fresnel, float roughness, float opacity) {
|
||||
|
||||
// Need the light now
|
||||
Light light = getLight();
|
||||
|
@ -30,9 +35,12 @@ vec4 evalGlobalColor(float shadowAttenuation, vec3 position, vec3 normal, vec3 a
|
|||
|
||||
vec3 color = opacity * albedo * getLightColor(light) * getLightAmbientIntensity(light);
|
||||
|
||||
vec4 shading = evalFragShading(fragNormal, -getLightDirection(light), fragEyeDir, metallic, specular, roughness);
|
||||
|
||||
color += vec3(albedo * shading.w * opacity + shading.rgb) * shadowAttenuation * getLightColor(light) * getLightIntensity(light);
|
||||
// Directional
|
||||
vec3 directionalDiffuse;
|
||||
vec3 directionalSpecular;
|
||||
evalLightingDirectional(directionalDiffuse, directionalSpecular, light, fragEyeDir, fragNormal, roughness, metallic, fresnel, albedo, shadowAttenuation);
|
||||
color += directionalDiffuse * isDiffuseEnabled() * isDirectionalEnabled();
|
||||
color += directionalSpecular * isSpecularEnabled() * isDirectionalEnabled();
|
||||
|
||||
return vec4(color, opacity);
|
||||
}
|
||||
|
|
|
@ -12,13 +12,17 @@
|
|||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include DeferredLighting.slh@>
|
||||
<@include model/Light.slh@>
|
||||
|
||||
<@include LightingModel.slh@>
|
||||
|
||||
<@include LightDirectional.slh@>
|
||||
<$declareLightingDirectional()$>
|
||||
|
||||
<@include gpu/Transform.slh@>
|
||||
<$declareStandardCameraTransform()$>
|
||||
|
||||
vec4 evalGlobalColor(float shadowAttenuation, vec3 position, vec3 normal, vec3 albedo, float metallic, vec3 specular, float roughness, float opacity) {
|
||||
vec4 evalGlobalColor(float shadowAttenuation, vec3 position, vec3 normal, vec3 albedo, float metallic, vec3 fresnel, float roughness, float opacity) {
|
||||
|
||||
// Need the light now
|
||||
Light light = getLight();
|
||||
|
@ -31,9 +35,12 @@ vec4 evalGlobalColor(float shadowAttenuation, vec3 position, vec3 normal, vec3 a
|
|||
|
||||
vec3 color = opacity * albedo * getLightColor(light) * getLightAmbientIntensity(light);
|
||||
|
||||
vec4 shading = evalFragShading(fragNormal, -getLightDirection(light), fragEyeDir, metallic, specular, roughness);
|
||||
|
||||
color += vec3(albedo * shading.w * opacity + shading.rgb) * shadowAttenuation * getLightColor(light) * getLightIntensity(light);
|
||||
// Directional
|
||||
vec3 directionalDiffuse;
|
||||
vec3 directionalSpecular;
|
||||
evalLightingDirectional(directionalDiffuse, directionalSpecular, light, fragEyeDir, fragNormal, roughness, metallic, fresnel, albedo, shadowAttenuation);
|
||||
color += directionalDiffuse;
|
||||
color += directionalSpecular / opacity;
|
||||
|
||||
return vec4(color, opacity);
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
// point_light.frag
|
||||
// fragment shader
|
||||
//
|
||||
// Created by Andrzej Kapolka on 9/18/14.
|
||||
// Created by Sam Gateau on 9/18/15.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
|
@ -15,29 +15,36 @@
|
|||
// Everything about deferred buffer
|
||||
<@include DeferredBufferRead.slh@>
|
||||
|
||||
//Everything about deferred lighting
|
||||
<@include DeferredLighting.slh@>
|
||||
<$declareDeferredCurvature()$>
|
||||
|
||||
// Everything about light
|
||||
<@include model/Light.slh@>
|
||||
|
||||
<@include LightingModel.slh@>
|
||||
|
||||
<@include LightPoint.slh@>
|
||||
<$declareLightingPoint(supportScattering)$>
|
||||
|
||||
|
||||
uniform vec4 texcoordFrameTransform;
|
||||
|
||||
in vec4 _texCoord0;
|
||||
out vec4 _fragColor;
|
||||
|
||||
void main(void) {
|
||||
DeferredTransform deferredTransform = getDeferredTransform();
|
||||
DeferredFrameTransform deferredTransform = getDeferredFrameTransform();
|
||||
|
||||
// Grab the fragment data from the uv
|
||||
vec2 texCoord = _texCoord0.st / _texCoord0.q;
|
||||
texCoord *= texcoordFrameTransform.zw;
|
||||
texCoord += texcoordFrameTransform.xy;
|
||||
|
||||
DeferredFragment frag = unpackDeferredFragment(deferredTransform, texCoord);
|
||||
|
||||
if (frag.mode == FRAG_MODE_UNLIT) {
|
||||
discard;
|
||||
}
|
||||
|
||||
mat4 invViewMat = deferredTransform.viewInverse;
|
||||
|
||||
// Kill if in front of the light volume
|
||||
float depth = frag.depthVal;
|
||||
if (depth < gl_FragCoord.z) {
|
||||
|
@ -47,38 +54,33 @@ void main(void) {
|
|||
// Need the light now
|
||||
Light light = getLight();
|
||||
|
||||
// Make the Light vector going from fragment to light center in world space
|
||||
// Frag pos in world
|
||||
mat4 invViewMat = getViewInverse();
|
||||
vec4 fragPos = invViewMat * frag.position;
|
||||
vec3 fragLightVec = getLightPosition(light) - fragPos.xyz;
|
||||
|
||||
// Kill if too far from the light center
|
||||
if (dot(fragLightVec, fragLightVec) > getLightCutoffSquareRadius(light)) {
|
||||
// Clip againgst the light volume and Make the Light vector going from fragment to light center in world space
|
||||
vec4 fragLightVecLen2;
|
||||
if (!clipFragToLightVolumePoint(light, fragPos.xyz, fragLightVecLen2)) {
|
||||
discard;
|
||||
}
|
||||
|
||||
// Allright we re valid in the volume
|
||||
float fragLightDistance = length(fragLightVec);
|
||||
vec3 fragLightDir = fragLightVec / fragLightDistance;
|
||||
|
||||
// Eval shading
|
||||
vec3 fragNormal = vec3(frag.normal);
|
||||
// Frag to eye vec
|
||||
vec4 fragEyeVector = invViewMat * vec4(-frag.position.xyz, 0.0);
|
||||
vec3 fragEyeDir = normalize(fragEyeVector.xyz);
|
||||
vec4 shading = evalFragShading(fragNormal, fragLightDir, fragEyeDir, frag.metallic, frag.specular, frag.roughness);
|
||||
|
||||
// Eval attenuation
|
||||
float radialAttenuation = evalLightAttenuation(light, fragLightDistance);
|
||||
|
||||
// Final Lighting color
|
||||
vec3 fragColor = (shading.w * frag.diffuse + shading.xyz);
|
||||
_fragColor = vec4(fragColor * radialAttenuation * getLightColor(light) * getLightIntensity(light) * frag.obscurance, 0.0);
|
||||
|
||||
if (getLightShowContour(light) > 0.0) {
|
||||
// Show edge
|
||||
float edge = abs(2.0 * ((getLightRadius(light) - fragLightDistance) / (0.1)) - 1.0);
|
||||
if (edge < 1) {
|
||||
float edgeCoord = exp2(-8.0*edge*edge);
|
||||
_fragColor = vec4(edgeCoord * edgeCoord * getLightShowContour(light) * getLightColor(light), 0.0);
|
||||
}
|
||||
vec3 diffuse;
|
||||
vec3 specular;
|
||||
vec4 midNormalCurvature;
|
||||
vec4 lowNormalCurvature;
|
||||
if (frag.mode == FRAG_MODE_SCATTERING) {
|
||||
unpackMidLowNormalCurvature(texCoord, midNormalCurvature, lowNormalCurvature);
|
||||
}
|
||||
evalLightingPoint(diffuse, specular, light,
|
||||
fragLightVecLen2.xyz, fragEyeDir, frag.normal, frag.roughness,
|
||||
frag.metallic, frag.fresnel, frag.albedo, 1.0,
|
||||
frag.scattering, midNormalCurvature, lowNormalCurvature);
|
||||
|
||||
_fragColor.rgb += diffuse;
|
||||
_fragColor.rgb += specular;
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ void main(void) {
|
|||
#ifdef PROCEDURAL_V1
|
||||
specular = getProceduralColor().rgb;
|
||||
// Procedural Shaders are expected to be Gamma corrected so let's bring back the RGB in linear space for the rest of the pipeline
|
||||
specular = pow(specular, vec3(2.2));
|
||||
//specular = pow(specular, vec3(2.2));
|
||||
emissiveAmount = 1.0;
|
||||
#else
|
||||
emissiveAmount = getProceduralColors(diffuse, specular, shininess);
|
||||
|
@ -54,6 +54,6 @@ void main(void) {
|
|||
normal, 1.0, diffuse, max(0, 1.0 - shininess / 128.0), DEFAULT_METALLIC, specular, specular);
|
||||
} else {
|
||||
packDeferredFragment(
|
||||
normal, 1.0, diffuse, max(0, 1.0 - shininess / 128.0), length(specular), DEFAULT_EMISSIVE, DEFAULT_OCCLUSION);
|
||||
normal, 1.0, diffuse, max(0, 1.0 - shininess / 128.0), length(specular), DEFAULT_EMISSIVE, DEFAULT_OCCLUSION, DEFAULT_SCATTERING);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,5 +36,6 @@ void main(void) {
|
|||
DEFAULT_ROUGHNESS,
|
||||
DEFAULT_METALLIC,
|
||||
DEFAULT_EMISSIVE,
|
||||
DEFAULT_OCCLUSION);
|
||||
DEFAULT_OCCLUSION,
|
||||
DEFAULT_SCATTERING);
|
||||
}
|
|
@ -5,7 +5,7 @@
|
|||
// spot_light.frag
|
||||
// fragment shader
|
||||
//
|
||||
// Created by Andrzej Kapolka on 9/18/14.
|
||||
// Created by Sam Gateau on 9/18/15.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
|
@ -15,29 +15,36 @@
|
|||
// Everything about deferred buffer
|
||||
<@include DeferredBufferRead.slh@>
|
||||
|
||||
//Everything about deferred lighting
|
||||
<@include DeferredLighting.slh@>
|
||||
<$declareDeferredCurvature()$>
|
||||
|
||||
// Everything about light
|
||||
<@include model/Light.slh@>
|
||||
|
||||
<@include LightingModel.slh@>
|
||||
|
||||
<@include LightSpot.slh@>
|
||||
<$declareLightingSpot(supportScattering)$>
|
||||
|
||||
uniform vec4 texcoordFrameTransform;
|
||||
|
||||
in vec4 _texCoord0;
|
||||
out vec4 _fragColor;
|
||||
|
||||
void main(void) {
|
||||
|
||||
DeferredTransform deferredTransform = getDeferredTransform();
|
||||
DeferredFrameTransform deferredTransform = getDeferredFrameTransform();
|
||||
|
||||
// Grab the fragment data from the uv
|
||||
vec2 texCoord = _texCoord0.st / _texCoord0.q;
|
||||
texCoord *= texcoordFrameTransform.zw;
|
||||
texCoord += texcoordFrameTransform.xy;
|
||||
|
||||
DeferredFragment frag = unpackDeferredFragment(deferredTransform, texCoord);
|
||||
|
||||
if (frag.mode == FRAG_MODE_UNLIT) {
|
||||
discard;
|
||||
}
|
||||
|
||||
mat4 invViewMat = deferredTransform.viewInverse;
|
||||
|
||||
// Kill if in front of the light volume
|
||||
float depth = frag.depthVal;
|
||||
if (depth < gl_FragCoord.z) {
|
||||
|
@ -47,50 +54,36 @@ void main(void) {
|
|||
// Need the light now
|
||||
Light light = getLight();
|
||||
|
||||
// Make the Light vector going from fragment to light center in world space
|
||||
// Frag pos in world
|
||||
mat4 invViewMat = getViewInverse();
|
||||
vec4 fragPos = invViewMat * frag.position;
|
||||
vec3 fragLightVec = getLightPosition(light) - fragPos.xyz;
|
||||
|
||||
// Kill if too far from the light center
|
||||
if (dot(fragLightVec, fragLightVec) > getLightCutoffSquareRadius(light)) {
|
||||
// Clip againgst the light volume and Make the Light vector going from fragment to light center in world space
|
||||
vec4 fragLightVecLen2;
|
||||
vec4 fragLightDirLen;
|
||||
float cosSpotAngle;
|
||||
if (!clipFragToLightVolumeSpot(light, fragPos.xyz, fragLightVecLen2, fragLightDirLen, cosSpotAngle)) {
|
||||
discard;
|
||||
}
|
||||
|
||||
// Allright we re valid in the volume
|
||||
float fragLightDistance = length(fragLightVec);
|
||||
vec3 fragLightDir = fragLightVec / fragLightDistance;
|
||||
|
||||
// Kill if not in the spot light (ah ah !)
|
||||
vec3 lightSpotDir = getLightDirection(light);
|
||||
float cosSpotAngle = max(-dot(fragLightDir, lightSpotDir), 0.0);
|
||||
if (cosSpotAngle < getLightSpotAngleCos(light)) {
|
||||
discard;
|
||||
}
|
||||
|
||||
// Eval shading
|
||||
vec3 fragNormal = vec3(frag.normal);
|
||||
// Frag to eye vec
|
||||
vec4 fragEyeVector = invViewMat * vec4(-frag.position.xyz, 0.0);
|
||||
vec3 fragEyeDir = normalize(fragEyeVector.xyz);
|
||||
vec4 shading = evalFragShading(fragNormal, fragLightDir, fragEyeDir, frag.metallic, frag.specular, frag.roughness);
|
||||
|
||||
// Eval attenuation
|
||||
float radialAttenuation = evalLightAttenuation(light, fragLightDistance);
|
||||
float angularAttenuation = evalLightSpotAttenuation(light, cosSpotAngle);
|
||||
|
||||
// Final Lighting color
|
||||
vec3 fragColor = (shading.w * frag.diffuse + shading.xyz);
|
||||
_fragColor = vec4(fragColor * angularAttenuation * radialAttenuation * getLightColor(light) * getLightIntensity(light) * frag.obscurance, 0.0);
|
||||
|
||||
if (getLightShowContour(light) > 0.0) {
|
||||
// Show edges
|
||||
float edgeDistR = (getLightRadius(light) - fragLightDistance);
|
||||
float edgeDistS = dot(fragLightDistance * vec2(cosSpotAngle, sqrt(1.0 - cosSpotAngle * cosSpotAngle)), -getLightSpotOutsideNormal2(light));
|
||||
float edgeDist = min(edgeDistR, edgeDistS);
|
||||
float edge = abs(2.0 * (edgeDist / (0.1)) - 1.0);
|
||||
if (edge < 1) {
|
||||
float edgeCoord = exp2(-8.0*edge*edge);
|
||||
_fragColor = vec4(edgeCoord * edgeCoord * getLightColor(light), 0.0);
|
||||
}
|
||||
vec3 diffuse;
|
||||
vec3 specular;
|
||||
vec4 midNormalCurvature;
|
||||
vec4 lowNormalCurvature;
|
||||
if (frag.mode == FRAG_MODE_SCATTERING) {
|
||||
unpackMidLowNormalCurvature(texCoord, midNormalCurvature, lowNormalCurvature);
|
||||
}
|
||||
evalLightingSpot(diffuse, specular, light,
|
||||
fragLightDirLen.xyzw, cosSpotAngle, fragEyeDir, frag.normal, frag.roughness,
|
||||
frag.metallic, frag.fresnel, frag.albedo, 1.0,
|
||||
frag.scattering, midNormalCurvature, lowNormalCurvature);
|
||||
|
||||
_fragColor.rgb += diffuse;
|
||||
_fragColor.rgb += specular;
|
||||
}
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ void main(void) {
|
|||
|
||||
// From now on, ssC is the pixel pos in the side
|
||||
ssC.x -= side.y;
|
||||
vec2 fragPos = (vec2(ssC) + 0.5) / getStereoSideWidth();
|
||||
vec2 fragPos = (vec2(ssC) + 0.5) / getStereoSideWidth();
|
||||
|
||||
// The position and normal of the pixel fragment in Eye space
|
||||
vec3 Cp = evalEyePositionFromZeye(side.x, Zeye, fragPos);
|
||||
|
|
|
@ -0,0 +1,108 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// Created by Sam Gateau on 6/8/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
|
||||
|
||||
<@include DeferredBufferRead.slh@>
|
||||
<@include model/Light.slh@>
|
||||
|
||||
<$declareDeferredCurvature()$>
|
||||
|
||||
<@include SubsurfaceScattering.slh@>
|
||||
<$declareSubsurfaceScatteringBRDF()$>
|
||||
|
||||
in vec2 varTexCoord0;
|
||||
out vec4 _fragColor;
|
||||
|
||||
uniform vec2 uniformCursorTexcoord = vec2(0.5);
|
||||
|
||||
//uniform vec3 uniformLightVector = vec3(1.0);
|
||||
|
||||
vec3 evalScatteringBRDF(vec2 texcoord) {
|
||||
DeferredFragment fragment = unpackDeferredFragmentNoPosition(texcoord);
|
||||
|
||||
vec3 normal = fragment.normal; // .getWorldNormal(varTexCoord0);
|
||||
vec4 blurredCurvature = fetchCurvature(texcoord);
|
||||
vec4 diffusedCurvature = fetchDiffusedCurvature(texcoord);
|
||||
vec3 midNormal = normalize((blurredCurvature.xyz - 0.5f) * 2.0f);
|
||||
vec3 lowNormal = normalize((diffusedCurvature.xyz - 0.5f) * 2.0f);
|
||||
float curvature = unpackCurvature(diffusedCurvature.w);
|
||||
|
||||
|
||||
// Transform directions to worldspace
|
||||
vec3 fragNormal = vec3((normal));
|
||||
|
||||
// Get light
|
||||
Light light = getLight();
|
||||
vec3 fresnel = vec3(0.028); // Default Di-electric fresnel value for skin
|
||||
float metallic = 0.0;
|
||||
|
||||
vec3 fragLightDir = -normalize(getLightDirection(light));
|
||||
|
||||
|
||||
vec3 brdf = evalSkinBRDF(fragLightDir, fragNormal, midNormal, lowNormal, curvature);
|
||||
|
||||
return brdf;
|
||||
}
|
||||
|
||||
vec3 drawScatteringTableUV(vec2 cursor, vec2 texcoord) {
|
||||
DeferredFragment fragment = unpackDeferredFragmentNoPosition(cursor);
|
||||
|
||||
vec3 normal = fragment.normal; // .getWorldNormal(varTexCoord0);
|
||||
vec4 blurredCurvature = fetchCurvature(cursor);
|
||||
vec4 diffusedCurvature = fetchDiffusedCurvature(cursor);
|
||||
vec3 midNormal = normalize((blurredCurvature.xyz - 0.5f) * 2.0f);
|
||||
vec3 lowNormal = normalize((diffusedCurvature.xyz - 0.5f) * 2.0f);
|
||||
float curvature = unpackCurvature(diffusedCurvature.w);
|
||||
|
||||
// Get light
|
||||
Light light = getLight();
|
||||
vec3 fresnel = vec3(0.028); // Default Di-electric fresnel value for skin
|
||||
|
||||
vec3 fragLightDir = -normalize(getLightDirection(light));
|
||||
|
||||
vec3 bentNdotL = evalScatteringBentNdotL(normal, midNormal, lowNormal, fragLightDir);
|
||||
|
||||
// return clamp(bentNdotL * 0.5 + 0.5, 0.0, 1.0);
|
||||
|
||||
vec3 distance = vec3(0.0);
|
||||
for (int c = 0; c < 3; c++) {
|
||||
vec2 BRDFuv = vec2(clamp(bentNdotL[c] * 0.5 + 0.5, 0.0, 1.0), clamp(2 * curvature, 0.0, 1.0));
|
||||
vec2 delta = BRDFuv - texcoord;
|
||||
distance[c] = 1.0 - dot(delta, delta);
|
||||
}
|
||||
|
||||
distance *= distance;
|
||||
|
||||
float threshold = 0.999;
|
||||
vec3 color = vec3(0.0);
|
||||
bool keep = false;
|
||||
for (int c = 0; c < 3; c++) {
|
||||
if (distance[c] > threshold) {
|
||||
keep = true;
|
||||
color[c] += 1.0;
|
||||
}
|
||||
}
|
||||
|
||||
if (!keep)
|
||||
discard;
|
||||
|
||||
return color;
|
||||
}
|
||||
|
||||
void main(void) {
|
||||
// _fragColor = vec4(evalScatteringBRDF(varTexCoord0), 1.0);
|
||||
// _fragColor = vec4(uniformCursorTexcoord, 0.0, 1.0);
|
||||
|
||||
_fragColor = vec4(drawScatteringTableUV(uniformCursorTexcoord, varTexCoord0), 1.0);
|
||||
}
|
||||
|
||||
|
27
libraries/render-utils/src/subsurfaceScattering_makeLUT.slf
Normal file
27
libraries/render-utils/src/subsurfaceScattering_makeLUT.slf
Normal file
|
@ -0,0 +1,27 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// Created by Sam Gateau on 6/8/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include SubsurfaceScattering.slh@>
|
||||
<$declareSubsurfaceScatteringProfileSource()$>
|
||||
<$declareSubsurfaceScatteringIntegrate(2000)$>
|
||||
|
||||
in vec2 varTexCoord0;
|
||||
out vec4 outFragColor;
|
||||
|
||||
void main(void) {
|
||||
|
||||
// Lookup by: x: NDotL y: 1 / r
|
||||
//float y = 2.0 * 1.0 / ((j + 1.0) / (double)height);
|
||||
//float x = ((i / (double)width) * 2.0) - 1.0;
|
||||
|
||||
outFragColor = vec4(integrate(varTexCoord0.x * 2.0 - 1.0, 2.0 / varTexCoord0.y), 1.0);
|
||||
}
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// Created by Sam Gateau on 6/27/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include SubsurfaceScattering.slh@>
|
||||
<$declareSubsurfaceScatteringGenerateProfileMap()$>
|
||||
|
||||
in vec2 varTexCoord0;
|
||||
out vec4 outFragColor;
|
||||
|
||||
void main(void) {
|
||||
outFragColor = vec4(generateProfile(varTexCoord0.xy), 1.0);
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// Created by Sam Gateau on 6/30/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
|
||||
|
||||
in vec2 varTexCoord0;
|
||||
out vec4 outFragColor;
|
||||
|
||||
float specularBeckmann(float ndoth, float roughness) {
|
||||
float alpha = acos(ndoth);
|
||||
float ta = tan(alpha);
|
||||
float val = 1.0 / (roughness * roughness * pow(ndoth, 4.0)) * exp(-(ta * ta) / (roughness * roughness));
|
||||
return val;
|
||||
}
|
||||
|
||||
void main(void) {
|
||||
outFragColor = vec4(vec3(0.5 * pow( specularBeckmann(varTexCoord0.x, varTexCoord0.y), 0.1)), 1.0);
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// Created by Sam Gateau on 6/3/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
|
||||
<@include gpu/PackedNormal.slh@>
|
||||
|
||||
uniform sampler2D linearDepthMap;
|
||||
uniform sampler2D normalMap;
|
||||
|
||||
in vec2 varTexCoord0;
|
||||
|
||||
out vec4 outLinearDepth;
|
||||
out vec4 outNormal;
|
||||
|
||||
void main(void) {
|
||||
// Gather 2 by 2 quads from texture
|
||||
|
||||
// Try different filters for Z
|
||||
// vec4 Zeyes = textureGather(linearDepthMap, varTexCoord0, 0);
|
||||
// float Zeye = min(min(Zeyes.x, Zeyes.y), min(Zeyes.z, Zeyes.w));
|
||||
float Zeye = texture(linearDepthMap, varTexCoord0).x;
|
||||
|
||||
vec4 rawNormalsX = textureGather(normalMap, varTexCoord0, 0);
|
||||
vec4 rawNormalsY = textureGather(normalMap, varTexCoord0, 1);
|
||||
vec4 rawNormalsZ = textureGather(normalMap, varTexCoord0, 2);
|
||||
|
||||
|
||||
vec3 normal = vec3(0.0);
|
||||
normal += unpackNormal(vec3(rawNormalsX[0], rawNormalsY[0], rawNormalsZ[0]));
|
||||
normal += unpackNormal(vec3(rawNormalsX[1], rawNormalsY[1], rawNormalsZ[1]));
|
||||
normal += unpackNormal(vec3(rawNormalsX[2], rawNormalsY[2], rawNormalsZ[2]));
|
||||
normal += unpackNormal(vec3(rawNormalsX[3], rawNormalsY[3], rawNormalsZ[3]));
|
||||
|
||||
normal = normalize(normal);
|
||||
|
||||
outLinearDepth = vec4(Zeye, 0.0, 0.0, 0.0);
|
||||
outNormal = vec4((normal + vec3(1.0)) * 0.5, 0.0);
|
||||
}
|
||||
|
232
libraries/render-utils/src/surfaceGeometry_makeCurvature.slf
Normal file
232
libraries/render-utils/src/surfaceGeometry_makeCurvature.slf
Normal file
|
@ -0,0 +1,232 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
//
|
||||
// Created by Sam Gateau on 6/3/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
<@include DeferredTransform.slh@>
|
||||
<$declareDeferredFrameTransform()$>
|
||||
|
||||
<@include gpu/PackedNormal.slh@>
|
||||
|
||||
struct SurfaceGeometryParams {
|
||||
// Resolution info
|
||||
vec4 resolutionInfo;
|
||||
// Curvature algorithm
|
||||
vec4 curvatureInfo;
|
||||
};
|
||||
|
||||
uniform surfaceGeometryParamsBuffer {
|
||||
SurfaceGeometryParams params;
|
||||
};
|
||||
|
||||
float getCurvatureDepthThreshold() {
|
||||
return params.curvatureInfo.x;
|
||||
}
|
||||
|
||||
float getCurvatureBasisScale() {
|
||||
return params.curvatureInfo.y;
|
||||
}
|
||||
|
||||
float getCurvatureScale() {
|
||||
return params.curvatureInfo.w;
|
||||
}
|
||||
|
||||
bool isFullResolution() {
|
||||
return params.resolutionInfo.w == 0.0;
|
||||
}
|
||||
|
||||
|
||||
uniform sampler2D linearDepthMap;
|
||||
float getZEye(ivec2 pixel) {
|
||||
return -texelFetch(linearDepthMap, pixel, 0).x;
|
||||
}
|
||||
float getZEyeLinear(vec2 texcoord) {
|
||||
return -texture(linearDepthMap, texcoord).x;
|
||||
}
|
||||
|
||||
vec2 sideToFrameTexcoord(vec2 side, vec2 texcoordPos) {
|
||||
return vec2((texcoordPos.x + side.x) * side.y, texcoordPos.y);
|
||||
}
|
||||
|
||||
uniform sampler2D normalMap;
|
||||
|
||||
vec3 getRawNormal(vec2 texcoord) {
|
||||
return texture(normalMap, texcoord).xyz;
|
||||
}
|
||||
|
||||
vec3 getWorldNormal(vec2 texcoord) {
|
||||
vec3 rawNormal = getRawNormal(texcoord);
|
||||
if (isFullResolution()) {
|
||||
return unpackNormal(rawNormal);
|
||||
} else {
|
||||
return normalize((rawNormal - vec3(0.5)) * 2.0);
|
||||
}
|
||||
}
|
||||
|
||||
vec3 getWorldNormalDiff(vec2 texcoord, vec2 delta) {
|
||||
return getWorldNormal(texcoord + delta) - getWorldNormal(texcoord - delta);
|
||||
}
|
||||
|
||||
float getEyeDepthDiff(vec2 texcoord, vec2 delta) {
|
||||
return getZEyeLinear(texcoord + delta) - getZEyeLinear(texcoord - delta);
|
||||
}
|
||||
|
||||
|
||||
|
||||
in vec2 varTexCoord0;
|
||||
out vec4 outFragColor;
|
||||
|
||||
void main(void) {
|
||||
// Pixel being shaded
|
||||
ivec2 pixelPos;
|
||||
vec2 texcoordPos;
|
||||
ivec4 stereoSide;
|
||||
ivec2 framePixelPos = getPixelPosTexcoordPosAndSide(gl_FragCoord.xy, pixelPos, texcoordPos, stereoSide);
|
||||
vec2 stereoSideClip = vec2(stereoSide.x, (isStereo() ? 0.5 : 1.0));
|
||||
|
||||
// Texcoord to fetch in the deferred texture are the exact UVs comming from vertex shader
|
||||
// sideToFrameTexcoord(stereoSideClip, texcoordPos);
|
||||
vec2 frameTexcoordPos = varTexCoord0;
|
||||
|
||||
// Fetch the z under the pixel (stereo or not)
|
||||
float Zeye = getZEye(framePixelPos);
|
||||
if (Zeye <= -getPosLinearDepthFar()) {
|
||||
outFragColor = vec4(1.0, 0.0, 0.0, 0.0);
|
||||
return;
|
||||
}
|
||||
|
||||
float nearPlaneScale = 0.5 * getProjectionNear();
|
||||
|
||||
vec3 worldNormal = getWorldNormal(frameTexcoordPos);
|
||||
|
||||
// The position of the pixel fragment in Eye space then in world space
|
||||
vec3 eyePos = evalEyePositionFromZeye(stereoSide.x, Zeye, texcoordPos);
|
||||
// vec3 worldPos = (frameTransform._viewInverse * vec4(eyePos, 1.0)).xyz;
|
||||
|
||||
/* if (texcoordPos.y > 0.5) {
|
||||
outFragColor = vec4(fract(10.0 * worldPos.xyz), 1.0);
|
||||
} else {
|
||||
outFragColor = vec4(fract(10.0 * eyePos.xyz), 1.0);
|
||||
}*/
|
||||
// return;
|
||||
|
||||
// Calculate the perspective scale.
|
||||
// Clamp to 0.5
|
||||
// float perspectiveScale = max(0.5, (-getProjScaleEye() / Zeye));
|
||||
float perspectiveScale = max(0.5, (-getCurvatureBasisScale() * getProjectionNear() / Zeye));
|
||||
|
||||
// Calculate dF/du and dF/dv
|
||||
vec2 viewportScale = perspectiveScale * getInvWidthHeight();
|
||||
vec2 du = vec2( viewportScale.x * (stereoSide.w > 0.0 ? 0.5 : 1.0), 0.0f );
|
||||
vec2 dv = vec2( 0.0f, viewportScale.y );
|
||||
|
||||
vec4 dFdu = vec4(getWorldNormalDiff(frameTexcoordPos, du), getEyeDepthDiff(frameTexcoordPos, du));
|
||||
vec4 dFdv = vec4(getWorldNormalDiff(frameTexcoordPos, dv), getEyeDepthDiff(frameTexcoordPos, dv));
|
||||
|
||||
float threshold = getCurvatureDepthThreshold();
|
||||
dFdu *= step(abs(dFdu.w), threshold);
|
||||
dFdv *= step(abs(dFdv.w), threshold);
|
||||
|
||||
// Calculate ( du/dx, du/dy, du/dz ) and ( dv/dx, dv/dy, dv/dz )
|
||||
// Eval px, py, pz world positions of the basis centered on the world pos of the fragment
|
||||
float axeLength = nearPlaneScale;
|
||||
|
||||
vec3 ax = (frameTransform._view[0].xyz * axeLength);
|
||||
vec3 ay = (frameTransform._view[1].xyz * axeLength);
|
||||
vec3 az = (frameTransform._view[2].xyz * axeLength);
|
||||
|
||||
vec4 px = vec4(eyePos + ax, 0.0);
|
||||
vec4 py = vec4(eyePos + ay, 0.0);
|
||||
vec4 pz = vec4(eyePos + az, 0.0);
|
||||
|
||||
|
||||
/* if (texcoordPos.y > 0.5) {
|
||||
outFragColor = vec4(fract(px.xyz), 1.0);
|
||||
} else {
|
||||
outFragColor = vec4(fract(eyePos.xyz), 1.0);
|
||||
}*/
|
||||
// return;
|
||||
|
||||
|
||||
/* IN case the axis end point goes behind mid way near plane, this shouldn't happen
|
||||
if (px.z >= -nearPlaneScale) {
|
||||
outFragColor = vec4(1.0, 0.0, 0.0, 1.0);
|
||||
return;
|
||||
} else if (py.z >= -nearPlaneScale) {
|
||||
outFragColor = vec4(0.0, 1.0, 0.0, 1.0);
|
||||
return;
|
||||
} else if (pz.z >= -nearPlaneScale) {
|
||||
outFragColor = vec4(0.0, 0.0, 1.0, 1.0);
|
||||
return;
|
||||
}*/
|
||||
|
||||
|
||||
// Project px, py pz to homogeneous clip space
|
||||
// mat4 viewProj = getProjection(stereoSide.x);
|
||||
mat4 viewProj = getProjectionMono();
|
||||
px = viewProj * px;
|
||||
py = viewProj * py;
|
||||
pz = viewProj * pz;
|
||||
|
||||
|
||||
// then to normalized clip space
|
||||
px.xy /= px.w;
|
||||
py.xy /= py.w;
|
||||
pz.xy /= pz.w;
|
||||
|
||||
vec2 nclipPos = (texcoordPos - 0.5) * 2.0;
|
||||
|
||||
|
||||
//vec4 clipPos = frameTransform._projection[stereoSide.x] * vec4(eyePos, 1.0);
|
||||
vec4 clipPos = getProjectionMono() * vec4(eyePos, 1.0);
|
||||
nclipPos = clipPos.xy / clipPos.w;
|
||||
|
||||
/* if (texcoordPos.y > 0.5) {
|
||||
// outFragColor = vec4(fract(10.0 * worldPos.xyz), 1.0);
|
||||
outFragColor = vec4(fract(10.0 * (nclipPos)), 0.0, 1.0);
|
||||
|
||||
} else {
|
||||
outFragColor = vec4(fract(10.0 * (clipPos.xy / clipPos.w)), 0.0, 1.0);
|
||||
// outFragColor = vec4(nclipPos * 0.5 + 0.5, 0.0, 1.0);
|
||||
}*/
|
||||
//return;
|
||||
|
||||
|
||||
float pixPerspectiveScaleInv = 1.0 / (perspectiveScale);
|
||||
px.xy = (px.xy - nclipPos) * pixPerspectiveScaleInv;
|
||||
py.xy = (py.xy - nclipPos) * pixPerspectiveScaleInv;
|
||||
pz.xy = (pz.xy - nclipPos) * pixPerspectiveScaleInv;
|
||||
|
||||
/* if (texcoordPos.y > 0.5) {
|
||||
// outFragColor = vec4(fract(10.0 * worldPos.xyz), 1.0);
|
||||
outFragColor = vec4(fract(10.0 * (px.xy)), 0.0, 1.0);
|
||||
|
||||
} else {
|
||||
outFragColor = vec4(fract(10.0 * (py.xy)), 0.0, 1.0);
|
||||
// outFragColor = vec4(nclipPos * 0.5 + 0.5, 0.0, 1.0);
|
||||
}*/
|
||||
// return;
|
||||
|
||||
// Calculate dF/dx, dF/dy and dF/dz using chain rule
|
||||
vec4 dFdx = dFdu * px.x + dFdv * px.y;
|
||||
vec4 dFdy = dFdu * py.x + dFdv * py.y;
|
||||
vec4 dFdz = dFdu * pz.x + dFdv * pz.y;
|
||||
|
||||
vec3 trace = vec3(dFdx.x, dFdy.y, dFdz.z);
|
||||
|
||||
/*if (dot(trace, trace) > params.curvatureInfo.w) {
|
||||
outFragColor = vec4(dFdx.x, dFdy.y, dFdz.z, 1.0);
|
||||
return;
|
||||
}*/
|
||||
|
||||
// Calculate the mean curvature
|
||||
float meanCurvature = ((trace.x + trace.y + trace.z) * 0.33333333333333333) * params.curvatureInfo.w;
|
||||
|
||||
outFragColor = vec4(vec3(worldNormal + 1.0) * 0.5, (meanCurvature + 1.0) * 0.5);
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue