mirror of
https://github.com/JulianGro/overte.git
synced 2025-04-25 15:13:09 +02:00
FIxing the AO results at the border of the frame by guarding the fetch out of the frame
This commit is contained in:
parent
8d1ab01018
commit
16573357d8
6 changed files with 100 additions and 68 deletions
|
@ -57,7 +57,7 @@ namespace gpu {
|
|||
|
||||
protected:
|
||||
|
||||
static const int QUERY_QUEUE_SIZE = 4;
|
||||
static const int QUERY_QUEUE_SIZE { 4 };
|
||||
|
||||
gpu::Queries _timerQueries;
|
||||
int _headIndex = -1;
|
||||
|
|
|
@ -48,7 +48,7 @@ public:
|
|||
|
||||
// corner case when radius is 0 or under
|
||||
if (samplingRadius <= 0) {
|
||||
coefs[0] = 1.0;
|
||||
coefs[0] = 1.0f;
|
||||
return coefs;
|
||||
}
|
||||
|
||||
|
@ -140,12 +140,11 @@ const gpu::PipelinePointer& AmbientOcclusionEffect::getOcclusionPipeline() {
|
|||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
// Stencil test all the ao passes for objects pixels only, not the background
|
||||
// Stencil test the ao passes for objects pixels only, not the background
|
||||
state->setStencilTest(true, 0xFF, gpu::State::StencilTest(0, 0xFF, gpu::NOT_EQUAL, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP));
|
||||
|
||||
state->setColorWriteMask(true, true, true, false);
|
||||
//state->setColorWriteMask(true, true, true, true);
|
||||
|
||||
|
||||
// Good to go add the brand new pipeline
|
||||
_occlusionPipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
@ -168,7 +167,7 @@ const gpu::PipelinePointer& AmbientOcclusionEffect::getHBlurPipeline() {
|
|||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
// Stencil test all the ao passes for objects pixels only, not the background
|
||||
state->setStencilTest(true, 0xFF, gpu::State::StencilTest(0, 0xFF, gpu::NOT_EQUAL, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP));
|
||||
//state->setStencilTest(true, 0xFF, gpu::State::StencilTest(0, 0xFF, gpu::NOT_EQUAL, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP));
|
||||
|
||||
state->setColorWriteMask(true, true, true, false);
|
||||
|
||||
|
@ -194,7 +193,7 @@ const gpu::PipelinePointer& AmbientOcclusionEffect::getVBlurPipeline() {
|
|||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
// Stencil test all the ao passes for objects pixels only, not the background
|
||||
state->setStencilTest(true, 0xFF, gpu::State::StencilTest(0, 0xFF, gpu::NOT_EQUAL, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP));
|
||||
//state->setStencilTest(true, 0xFF, gpu::State::StencilTest(0, 0xFF, gpu::NOT_EQUAL, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP, gpu::State::STENCIL_OP_KEEP));
|
||||
|
||||
// Vertical blur write just the final result Occlusion value in the alpha channel
|
||||
state->setColorWriteMask(true, true, true, false);
|
||||
|
@ -207,13 +206,13 @@ const gpu::PipelinePointer& AmbientOcclusionEffect::getVBlurPipeline() {
|
|||
|
||||
|
||||
void AmbientOcclusionEffect::setDepthInfo(float nearZ, float farZ) {
|
||||
_frameTransformBuffer.edit<FrameTransform>()._depthInfo = glm::vec4(nearZ*farZ, farZ -nearZ, -farZ, 0.0f);
|
||||
_frameTransformBuffer.edit<FrameTransform>().depthInfo = glm::vec4(nearZ*farZ, farZ -nearZ, -farZ, 0.0f);
|
||||
}
|
||||
|
||||
void AmbientOcclusionEffect::setResolutionLevel(int level) {
|
||||
level = std::max(0, std::min(level, 4));
|
||||
if (level != getResolutionLevel()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>()._resolutionInfo;
|
||||
auto& current = _parametersBuffer.edit<Parameters>().resolutionInfo;
|
||||
current.x = (float)level;
|
||||
|
||||
// Communicate the change to the Framebuffer cache
|
||||
|
@ -222,68 +221,77 @@ void AmbientOcclusionEffect::setResolutionLevel(int level) {
|
|||
}
|
||||
|
||||
void AmbientOcclusionEffect::setRadius(float radius) {
|
||||
const double RADIUS_POWER = 6.0;
|
||||
radius = std::max(0.01f, radius);
|
||||
if (radius != getRadius()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>()._radiusInfo;
|
||||
auto& current = _parametersBuffer.edit<Parameters>().radiusInfo;
|
||||
current.x = radius;
|
||||
current.y = radius * radius;
|
||||
current.z = (float)(1.0 / pow((double)radius, 6.0));
|
||||
current.z = (float)(1.0 / pow((double)radius, RADIUS_POWER));
|
||||
}
|
||||
}
|
||||
|
||||
void AmbientOcclusionEffect::setLevel(float level) {
|
||||
level = std::max(0.01f, level);
|
||||
if (level != getLevel()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>()._radiusInfo;
|
||||
auto& current = _parametersBuffer.edit<Parameters>().radiusInfo;
|
||||
current.w = level;
|
||||
}
|
||||
}
|
||||
|
||||
void AmbientOcclusionEffect::setDithering(bool enabled) {
|
||||
if (enabled != isDitheringEnabled()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>()._ditheringInfo;
|
||||
auto& current = _parametersBuffer.edit<Parameters>().ditheringInfo;
|
||||
current.x = (float)enabled;
|
||||
}
|
||||
}
|
||||
|
||||
void AmbientOcclusionEffect::setBordering(bool enabled) {
|
||||
if (enabled != isBorderingEnabled()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>().ditheringInfo;
|
||||
current.w = (float)enabled;
|
||||
}
|
||||
}
|
||||
|
||||
void AmbientOcclusionEffect::setFalloffBias(float bias) {
|
||||
bias = std::max(0.0f, std::min(bias, 0.2f));
|
||||
if (bias != getFalloffBias()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>()._ditheringInfo;
|
||||
auto& current = _parametersBuffer.edit<Parameters>().ditheringInfo;
|
||||
current.z = (float)bias;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void AmbientOcclusionEffect::setNumSamples(int numSamples) {
|
||||
numSamples = std::max(1.f, (float) numSamples);
|
||||
numSamples = std::max(1.0f, (float) numSamples);
|
||||
if (numSamples != getNumSamples()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>()._sampleInfo;
|
||||
auto& current = _parametersBuffer.edit<Parameters>().sampleInfo;
|
||||
current.x = numSamples;
|
||||
current.y = 1.0 / numSamples;
|
||||
current.y = 1.0f / numSamples;
|
||||
}
|
||||
}
|
||||
|
||||
void AmbientOcclusionEffect::setNumSpiralTurns(float numTurns) {
|
||||
numTurns = std::max(0.f, (float)numTurns);
|
||||
numTurns = std::max(0.0f, (float)numTurns);
|
||||
if (numTurns != getNumSpiralTurns()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>()._sampleInfo;
|
||||
auto& current = _parametersBuffer.edit<Parameters>().sampleInfo;
|
||||
current.z = numTurns;
|
||||
}
|
||||
}
|
||||
|
||||
void AmbientOcclusionEffect::setEdgeSharpness(float sharpness) {
|
||||
sharpness = std::max(0.f, (float)sharpness);
|
||||
sharpness = std::max(0.0f, (float)sharpness);
|
||||
if (sharpness != getEdgeSharpness()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>()._blurInfo;
|
||||
auto& current = _parametersBuffer.edit<Parameters>().blurInfo;
|
||||
current.x = sharpness;
|
||||
}
|
||||
}
|
||||
|
||||
void AmbientOcclusionEffect::setBlurRadius(int radius) {
|
||||
radius = std::max(0, std::min(6, radius));
|
||||
const int MAX_BLUR_RADIUS = 6;
|
||||
radius = std::max(0, std::min(MAX_BLUR_RADIUS, radius));
|
||||
if (radius != getBlurRadius()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>()._blurInfo;
|
||||
auto& current = _parametersBuffer.edit<Parameters>().blurInfo;
|
||||
current.y = (float)radius;
|
||||
updateGaussianDistribution();
|
||||
}
|
||||
|
@ -292,7 +300,7 @@ void AmbientOcclusionEffect::setBlurRadius(int radius) {
|
|||
void AmbientOcclusionEffect::setBlurDeviation(float deviation) {
|
||||
deviation = std::max(0.0f, deviation);
|
||||
if (deviation != getBlurDeviation()) {
|
||||
auto& current = _parametersBuffer.edit<Parameters>()._blurInfo;
|
||||
auto& current = _parametersBuffer.edit<Parameters>().blurInfo;
|
||||
current.z = deviation;
|
||||
updateGaussianDistribution();
|
||||
}
|
||||
|
@ -326,7 +334,7 @@ void AmbientOcclusionEffect::run(const render::SceneContextPointer& sceneContext
|
|||
// Update the depth info with near and far (same for stereo)
|
||||
setDepthInfo(args->_viewFrustum->getNearClip(), args->_viewFrustum->getFarClip());
|
||||
|
||||
_frameTransformBuffer.edit<FrameTransform>()._pixelInfo = args->_viewport;
|
||||
_frameTransformBuffer.edit<FrameTransform>().pixelInfo = args->_viewport;
|
||||
//_parametersBuffer.edit<Parameters>()._ditheringInfo.y += 0.25f;
|
||||
|
||||
// Running in stero ?
|
||||
|
@ -335,8 +343,8 @@ void AmbientOcclusionEffect::run(const render::SceneContextPointer& sceneContext
|
|||
// Eval the mono projection
|
||||
mat4 monoProjMat;
|
||||
args->_viewFrustum->evalProjectionMatrix(monoProjMat);
|
||||
_frameTransformBuffer.edit<FrameTransform>()._projection[0] = monoProjMat;
|
||||
_frameTransformBuffer.edit<FrameTransform>()._stereoInfo = glm::vec4(0.0f, args->_viewport.z, 0.0f, 0.0f);
|
||||
_frameTransformBuffer.edit<FrameTransform>().projection[0] = monoProjMat;
|
||||
_frameTransformBuffer.edit<FrameTransform>().stereoInfo = glm::vec4(0.0f, (float)args->_viewport.z, 0.0f, 0.0f);
|
||||
|
||||
} else {
|
||||
|
||||
|
@ -348,10 +356,10 @@ void AmbientOcclusionEffect::run(const render::SceneContextPointer& sceneContext
|
|||
for (int i = 0; i < 2; i++) {
|
||||
// Compose the mono Eye space to Stereo clip space Projection Matrix
|
||||
auto sideViewMat = projMats[i] * eyeViews[i];
|
||||
_frameTransformBuffer.edit<FrameTransform>()._projection[i] = sideViewMat;
|
||||
_frameTransformBuffer.edit<FrameTransform>().projection[i] = sideViewMat;
|
||||
}
|
||||
|
||||
_frameTransformBuffer.edit<FrameTransform>()._stereoInfo = glm::vec4(1.0f, (float)(args->_viewport.z >> 1), 0.0f, 1.0f);
|
||||
_frameTransformBuffer.edit<FrameTransform>().stereoInfo = glm::vec4(1.0f, (float)(args->_viewport.z >> 1), 0.0f, 1.0f);
|
||||
|
||||
}
|
||||
|
||||
|
@ -370,8 +378,8 @@ void AmbientOcclusionEffect::run(const render::SceneContextPointer& sceneContext
|
|||
batch.setViewTransform(Transform());
|
||||
|
||||
Transform model;
|
||||
model.setTranslation(glm::vec3(sMin, tMin, 0.0));
|
||||
model.setScale(glm::vec3(sWidth, tHeight, 1.0));
|
||||
model.setTranslation(glm::vec3(sMin, tMin, 0.0f));
|
||||
model.setScale(glm::vec3(sWidth, tHeight, 1.0f));
|
||||
batch.setModelTransform(model);
|
||||
|
||||
batch.setUniformBuffer(AmbientOcclusionEffect_FrameTransformSlot, _frameTransformBuffer);
|
||||
|
|
|
@ -25,42 +25,47 @@ public:
|
|||
void run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext);
|
||||
|
||||
void setResolutionLevel(int level);
|
||||
int getResolutionLevel() const { return _parametersBuffer.get<Parameters>()._resolutionInfo.x; }
|
||||
int getResolutionLevel() const { return _parametersBuffer.get<Parameters>().resolutionInfo.x; }
|
||||
|
||||
void setRadius(float radius);
|
||||
float getRadius() const { return _parametersBuffer.get<Parameters>()._radiusInfo.x; }
|
||||
float getRadius() const { return _parametersBuffer.get<Parameters>().radiusInfo.x; }
|
||||
|
||||
// Obscurance level which intensify or dim down the obscurance effect
|
||||
void setLevel(float level);
|
||||
float getLevel() const { return _parametersBuffer.get<Parameters>()._radiusInfo.w; }
|
||||
float getLevel() const { return _parametersBuffer.get<Parameters>().radiusInfo.w; }
|
||||
|
||||
// On to randomize the distribution of rays per pixel, should always be true
|
||||
void setDithering(bool enabled);
|
||||
bool isDitheringEnabled() const { return _parametersBuffer.get<Parameters>()._ditheringInfo.x; }
|
||||
bool isDitheringEnabled() const { return _parametersBuffer.get<Parameters>().ditheringInfo.x; }
|
||||
|
||||
// On to avoid evaluating information from non existing pixels Out of the frame, should always be true
|
||||
void setBordering(bool enabled);
|
||||
bool isBorderingEnabled() const { return _parametersBuffer.get<Parameters>().ditheringInfo.w; }
|
||||
|
||||
// Faloff Bias
|
||||
void setFalloffBias(float bias);
|
||||
int getFalloffBias() const { return (int)_parametersBuffer.get<Parameters>()._ditheringInfo.z; }
|
||||
int getFalloffBias() const { return (int)_parametersBuffer.get<Parameters>().ditheringInfo.z; }
|
||||
|
||||
// Number of samples per pixel to evaluate the Obscurance
|
||||
void setNumSamples(int numSamples);
|
||||
int getNumSamples() const { return (int)_parametersBuffer.get<Parameters>()._sampleInfo.x; }
|
||||
int getNumSamples() const { return (int)_parametersBuffer.get<Parameters>().sampleInfo.x; }
|
||||
|
||||
// Number of spiral turns defining an angle span to distribute the samples ray directions
|
||||
void setNumSpiralTurns(float numTurns);
|
||||
float getNumSpiralTurns() const { return _parametersBuffer.get<Parameters>()._sampleInfo.z; }
|
||||
float getNumSpiralTurns() const { return _parametersBuffer.get<Parameters>().sampleInfo.z; }
|
||||
|
||||
// Edge blurring setting
|
||||
void setEdgeSharpness(float sharpness);
|
||||
int getEdgeSharpness() const { return (int)_parametersBuffer.get<Parameters>()._blurInfo.x; }
|
||||
int getEdgeSharpness() const { return (int)_parametersBuffer.get<Parameters>().blurInfo.x; }
|
||||
|
||||
// Blurring Radius
|
||||
// 0 means no blurring
|
||||
const int MAX_BLUR_RADIUS = 6;
|
||||
void setBlurRadius(int radius);
|
||||
int getBlurRadius() const { return (int)_parametersBuffer.get<Parameters>()._blurInfo.y; }
|
||||
int getBlurRadius() const { return (int)_parametersBuffer.get<Parameters>().blurInfo.y; }
|
||||
|
||||
void setBlurDeviation(float deviation);
|
||||
float getBlurDeviation() const { return _parametersBuffer.get<Parameters>()._blurInfo.z; }
|
||||
float getBlurDeviation() const { return _parametersBuffer.get<Parameters>().blurInfo.z; }
|
||||
|
||||
|
||||
double getGPUTime() const { return _gpuTimer.getAverage(); }
|
||||
|
@ -79,13 +84,13 @@ private:
|
|||
class FrameTransform {
|
||||
public:
|
||||
// Pixel info is { viemport width height and stereo on off}
|
||||
glm::vec4 _pixelInfo;
|
||||
glm::vec4 pixelInfo;
|
||||
// Depth info is { n.f, f - n, -f}
|
||||
glm::vec4 _depthInfo;
|
||||
glm::vec4 depthInfo;
|
||||
// Stereo info
|
||||
glm::vec4 _stereoInfo{ 0.0 };
|
||||
glm::vec4 stereoInfo { 0.0 };
|
||||
// Mono proj matrix or Left and Right proj matrix going from Mono Eye space to side clip space
|
||||
glm::mat4 _projection[2];
|
||||
glm::mat4 projection[2];
|
||||
|
||||
FrameTransform() {}
|
||||
};
|
||||
|
@ -95,15 +100,15 @@ private:
|
|||
class Parameters {
|
||||
public:
|
||||
// Resolution info
|
||||
glm::vec4 _resolutionInfo{ -1.0, 0.0, 0.0, 0.0 };
|
||||
glm::vec4 resolutionInfo { -1.0f, 0.0f, 0.0f, 0.0f };
|
||||
// radius info is { R, R^2, 1 / R^6, ObscuranceScale}
|
||||
glm::vec4 _radiusInfo{ 0.5, 0.5 * 0.5, 1.0 / (0.25 * 0.25 * 0.25), 1.0 };
|
||||
glm::vec4 radiusInfo{ 0.5f, 0.5f * 0.5f, 1.0f / (0.25f * 0.25f * 0.25f), 1.0f };
|
||||
// Dithering info
|
||||
glm::vec4 _ditheringInfo{ 0.0, 0.0, 0.01, 0.0 };
|
||||
glm::vec4 ditheringInfo { 0.0f, 0.0f, 0.01f, 0.0f };
|
||||
// Sampling info
|
||||
glm::vec4 _sampleInfo{ 11.0, 1.0/11.0, 7.0, 1.0 };
|
||||
glm::vec4 sampleInfo { 11.0f, 1.0/11.0f, 7.0f, 1.0f };
|
||||
// Blurring info
|
||||
glm::vec4 _blurInfo{ 1.0, 3.0, 2.0, 0.0 };
|
||||
glm::vec4 blurInfo { 1.0f, 3.0f, 2.0f, 0.0f };
|
||||
// gaussian distribution coefficients first is the sampling radius (max is 6)
|
||||
const static int GAUSSIAN_COEFS_LENGTH = 8;
|
||||
float _gaussianCoefs[GAUSSIAN_COEFS_LENGTH];
|
||||
|
|
|
@ -126,7 +126,7 @@ void FramebufferCache::resizeAmbientOcclusionBuffers() {
|
|||
auto width = _frameBufferSize.width() >> _AOResolutionLevel;
|
||||
auto height = _frameBufferSize.height() >> _AOResolutionLevel;
|
||||
auto colorFormat = gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGB);
|
||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_POINT_MAG_LINEAR);
|
||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR);
|
||||
auto depthFormat = gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::DEPTH_STENCIL); // Depth24_Stencil8 texel format
|
||||
|
||||
_occlusionTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width, height, defaultSampler));
|
||||
|
|
|
@ -77,9 +77,9 @@ float getStereoSideWidth() {
|
|||
return float(int(frameTransform._stereoInfo.y) >> getResolutionLevel());
|
||||
}
|
||||
|
||||
ivec2 getStereoSideInfo(int xPos) {
|
||||
ivec3 getStereoSideInfo(int xPos) {
|
||||
int sideWidth = int(getStereoSideWidth());
|
||||
return (xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth) );
|
||||
return ivec3(xPos < sideWidth ? ivec2(0, 0) : ivec2(1, sideWidth), sideWidth);
|
||||
}
|
||||
|
||||
|
||||
|
@ -112,6 +112,9 @@ float isDitheringEnabled() {
|
|||
float getFrameDithering() {
|
||||
return params._ditheringInfo.y;
|
||||
}
|
||||
float isBorderingEnabled() {
|
||||
return params._ditheringInfo.w;
|
||||
}
|
||||
|
||||
float getFalloffBias() {
|
||||
return params._ditheringInfo.z;
|
||||
|
@ -180,8 +183,13 @@ vec2 fetchOcclusionDepth(ivec2 coords) {
|
|||
|
||||
const int RADIUS_SCALE = 2;
|
||||
|
||||
vec2 evalTapWeightedValue(int r, ivec2 ssC, float key) {
|
||||
vec2 evalTapWeightedValue(ivec3 side, int r, ivec2 ssC, float key) {
|
||||
ivec2 tapOffset = <$axis$> * (r * RADIUS_SCALE);
|
||||
ivec2 ssP = (ssC + tapOffset);
|
||||
|
||||
if ((ssP.x < side.y || ssP.x >= side.z + side.y) || (ssP.y < 0 || ssP.y >= int(getWidthHeight().y))) {
|
||||
return vec2(0.0);
|
||||
}
|
||||
vec2 tapOZ = fetchOcclusionDepth(ssC + tapOffset);
|
||||
|
||||
// spatial domain: offset gaussian tap
|
||||
|
@ -196,6 +204,9 @@ vec2 evalTapWeightedValue(int r, ivec2 ssC, float key) {
|
|||
vec3 getBlurredOcclusion(vec2 coord) {
|
||||
ivec2 ssC = ivec2(coord);
|
||||
|
||||
// Stereo side info
|
||||
ivec3 side = getStereoSideInfo(ssC.x);
|
||||
|
||||
vec3 rawSample;
|
||||
vec2 occlusionDepth = fetchOcclusionDepthRaw(ssC, rawSample);
|
||||
float key = occlusionDepth.y;
|
||||
|
@ -208,11 +219,11 @@ vec3 getBlurredOcclusion(vec2 coord) {
|
|||
int blurRadius = getBlurRadius();
|
||||
// negative side first
|
||||
for (int r = -blurRadius; r <= -1; ++r) {
|
||||
weightedSums += evalTapWeightedValue(r, ssC, key);
|
||||
weightedSums += evalTapWeightedValue(side, r, ssC, key);
|
||||
}
|
||||
// then positive side
|
||||
for (int r = 1; r <= blurRadius; ++r) {
|
||||
weightedSums += evalTapWeightedValue(r, ssC, key);
|
||||
weightedSums += evalTapWeightedValue(side, r, ssC, key);
|
||||
}
|
||||
|
||||
// Final normalization
|
||||
|
|
|
@ -24,11 +24,11 @@ float getZEye(ivec2 pixel) {
|
|||
return -texelFetch(pyramidMap, pixel, getResolutionLevel()).x;
|
||||
}
|
||||
|
||||
vec3 evalEyePositionFromZeye(ivec2 side, float Zeye, vec2 texcoord) {
|
||||
vec3 evalEyePositionFromZeye(int side, float Zeye, vec2 texcoord) {
|
||||
// compute the view space position using the depth
|
||||
// basically manually pick the proj matrix components to do the inverse
|
||||
float Xe = (-Zeye * (texcoord.x * 2.0 - 1.0) - Zeye * frameTransform._projection[side.x][2][0] - frameTransform._projection[side.x][3][0]) / frameTransform._projection[side.x][0][0];
|
||||
float Ye = (-Zeye * (texcoord.y * 2.0 - 1.0) - Zeye * frameTransform._projection[side.x][2][1] - frameTransform._projection[side.x][3][1]) / frameTransform._projection[side.x][1][1];
|
||||
float Xe = (-Zeye * (texcoord.x * 2.0 - 1.0) - Zeye * frameTransform._projection[side][2][0] - frameTransform._projection[side][3][0]) / frameTransform._projection[side][0][0];
|
||||
float Ye = (-Zeye * (texcoord.y * 2.0 - 1.0) - Zeye * frameTransform._projection[side][2][1] - frameTransform._projection[side][3][1]) / frameTransform._projection[side][1][1];
|
||||
return vec3(Xe, Ye, Zeye);
|
||||
}
|
||||
|
||||
|
@ -50,12 +50,18 @@ vec2 tapLocation(int sampleNumber, float spinAngle, out float ssR){
|
|||
return vec2(cos(angle), sin(angle));
|
||||
}
|
||||
|
||||
vec3 getOffsetPosition(ivec2 side, ivec2 ssC, vec2 unitOffset, float ssR) {
|
||||
vec3 getOffsetPosition(ivec3 side, ivec2 ssC, vec2 unitOffset, float ssR) {
|
||||
// Derivation:
|
||||
// mipLevel = floor(log(ssR / MAX_OFFSET));
|
||||
int mipLevel = clamp(findMSB(int(ssR)) - LOG_MAX_OFFSET, 0, MAX_MIP_LEVEL);
|
||||
|
||||
ivec2 ssP = ivec2(ssR * unitOffset) + ssC;
|
||||
ivec2 ssOffset = ivec2(ssR * unitOffset);
|
||||
ivec2 ssP = ssOffset + ssC;
|
||||
if (bool(isBorderingEnabled())) {
|
||||
ssP.x = ((ssP.x < 0 || ssP.x >= side.z) ? ssC.x - ssOffset.x : ssP.x);
|
||||
ssP.y = ((ssP.y < 0 || ssP.y >= int(getWidthHeight().y)) ? ssC.y - ssOffset.y : ssP.y);
|
||||
}
|
||||
|
||||
ivec2 ssPFull = ivec2(ssP.x + side.y, ssP.y);
|
||||
|
||||
vec3 P;
|
||||
|
@ -66,17 +72,19 @@ vec3 getOffsetPosition(ivec2 side, ivec2 ssC, vec2 unitOffset, float ssR) {
|
|||
P.z = -texelFetch(pyramidMap, mipP, getResolutionLevel() + mipLevel).r;
|
||||
|
||||
// Offset to pixel center
|
||||
vec2 tapUV = (vec2(ssP) + vec2(0.5)) / getStereoSideWidth();
|
||||
P = evalEyePositionFromZeye(side, P.z, tapUV);
|
||||
vec2 tapUV = (vec2(ssP) + vec2(0.5)) / float(side.z);
|
||||
P = evalEyePositionFromZeye(side.x, P.z, tapUV);
|
||||
return P;
|
||||
}
|
||||
|
||||
float sampleAO(in ivec2 side, in ivec2 ssC, in vec3 C, in vec3 n_C, in float ssDiskRadius, in int tapIndex, in float randomPatternRotationAngle) {
|
||||
float sampleAO(in ivec3 side, in ivec2 ssC, in vec3 C, in vec3 n_C, in float ssDiskRadius, in int tapIndex, in float randomPatternRotationAngle) {
|
||||
// Offset on the unit disk, spun for this pixel
|
||||
float ssR;
|
||||
vec2 unitOffset = tapLocation(tapIndex, randomPatternRotationAngle, ssR);
|
||||
ssR *= ssDiskRadius;
|
||||
|
||||
|
||||
|
||||
// The occluding point in camera space
|
||||
vec3 Q = getOffsetPosition(side, ssC, unitOffset, ssR);
|
||||
|
||||
|
@ -98,14 +106,14 @@ void main(void) {
|
|||
float Zeye = getZEye(ssC);
|
||||
|
||||
// Stereo side info
|
||||
ivec2 side = getStereoSideInfo(ssC.x);
|
||||
ivec3 side = getStereoSideInfo(ssC.x);
|
||||
|
||||
// From now on, ssC is the pixel pos in the side
|
||||
ssC.x -= side.y;
|
||||
vec2 fragPos = (vec2(ssC) + 0.5) / getStereoSideWidth();
|
||||
|
||||
// The position and normal of the pixel fragment in Eye space
|
||||
vec3 Cp = evalEyePositionFromZeye(side, Zeye, fragPos);
|
||||
vec3 Cp = evalEyePositionFromZeye(side.x, Zeye, fragPos);
|
||||
vec3 Cn = evalEyeNormal(Cp);
|
||||
|
||||
// Choose the screen-space sample radius
|
||||
|
@ -125,12 +133,12 @@ void main(void) {
|
|||
|
||||
// Bilateral box-filter over a quad for free, respecting depth edges
|
||||
// (the difference that this makes is subtle)
|
||||
if (abs(dFdx(Cp.z)) < 0.02) {
|
||||
/* if (abs(dFdx(Cp.z)) < 0.02) {
|
||||
A -= dFdx(A) * ((ssC.x & 1) - 0.5);
|
||||
}
|
||||
if (abs(dFdy(Cp.z)) < 0.02) {
|
||||
A -= dFdy(A) * ((ssC.y & 1) - 0.5);
|
||||
}
|
||||
}*/
|
||||
|
||||
outFragColor = vec4(packOcclusionDepth(A, CSZToDephtKey(Cp.z)), 1.0);
|
||||
|
||||
|
|
Loading…
Reference in a new issue