mirror of
https://github.com/lubosz/overte.git
synced 2025-04-11 13:42:07 +02:00
Merge branch 'master' of https://github.com/highfidelity/hifi into updateControlsReference-case-9539
This commit is contained in:
commit
f7e906440f
28 changed files with 270 additions and 133 deletions
|
@ -263,6 +263,12 @@ Item {
|
|||
}
|
||||
StatText {
|
||||
text: "GPU: " + root.gpuFrameTime.toFixed(1) + " ms"
|
||||
}
|
||||
StatText {
|
||||
text: "GPU (Per pixel): " + root.gpuFrameTimePerPixel.toFixed(5) + " ns/pp"
|
||||
}
|
||||
StatText {
|
||||
text: "GPU frame size: " + root.gpuFrameSize.x + " x " + root.gpuFrameSize.y
|
||||
}
|
||||
StatText {
|
||||
text: "Triangles: " + root.triangles +
|
||||
|
|
|
@ -333,7 +333,13 @@ void Stats::updateStats(bool force) {
|
|||
}
|
||||
|
||||
auto gpuContext = qApp->getGPUContext();
|
||||
|
||||
auto displayPlugin = qApp->getActiveDisplayPlugin();
|
||||
if (displayPlugin) {
|
||||
QVector2D dims(displayPlugin->getRecommendedRenderSize().x, displayPlugin->getRecommendedRenderSize().y);
|
||||
dims *= displayPlugin->getRenderResolutionScale();
|
||||
STAT_UPDATE(gpuFrameSize, dims);
|
||||
STAT_UPDATE(gpuFrameTimePerPixel, (float)(gpuContext->getFrameTimerGPUAverage()*1000000.0 / double(dims.x()*dims.y())));
|
||||
}
|
||||
// Update Frame timing (in ms)
|
||||
STAT_UPDATE(gpuFrameTime, (float)gpuContext->getFrameTimerGPUAverage());
|
||||
STAT_UPDATE(batchFrameTime, (float)gpuContext->getFrameTimerBatchAverage());
|
||||
|
|
|
@ -276,7 +276,9 @@ class Stats : public QQuickItem {
|
|||
STATS_PROPERTY(int, gpuTextureExternalMemory, 0)
|
||||
STATS_PROPERTY(QString, gpuTextureMemoryPressureState, QString())
|
||||
STATS_PROPERTY(int, gpuFreeMemory, 0)
|
||||
STATS_PROPERTY(QVector2D, gpuFrameSize, QVector2D(0,0))
|
||||
STATS_PROPERTY(float, gpuFrameTime, 0)
|
||||
STATS_PROPERTY(float, gpuFrameTimePerPixel, 0)
|
||||
STATS_PROPERTY(float, batchFrameTime, 0)
|
||||
STATS_PROPERTY(float, engineFrameTime, 0)
|
||||
STATS_PROPERTY(float, avatarSimulationTime, 0)
|
||||
|
@ -962,6 +964,20 @@ signals:
|
|||
*/
|
||||
void gpuFrameTimeChanged();
|
||||
|
||||
/**jsdoc
|
||||
* Triggered when the value of the <code>gpuFrameTime</code> property changes.
|
||||
* @function Stats.gpuFrameTimeChanged
|
||||
* @returns {Signal}
|
||||
*/
|
||||
void gpuFrameSizeChanged();
|
||||
|
||||
/**jsdoc
|
||||
* Triggered when the value of the <code>gpuFrameTime</code> property changes.
|
||||
* @function Stats.gpuFrameTimeChanged
|
||||
* @returns {Signal}
|
||||
*/
|
||||
void gpuFrameTimePerPixelChanged();
|
||||
|
||||
/**jsdoc
|
||||
* Triggered when the value of the <code>batchFrameTime</code> property changes.
|
||||
* @function Stats.batchFrameTimeChanged
|
||||
|
|
|
@ -97,6 +97,10 @@ static const float CONTEXT_OVERLAY_UNHOVERED_COLORPULSE = 1.0f;
|
|||
|
||||
void ContextOverlayInterface::setEnabled(bool enabled) {
|
||||
_enabled = enabled;
|
||||
if (!enabled) {
|
||||
// Destroy any potentially-active ContextOverlays when disabling the interface
|
||||
createOrDestroyContextOverlay(EntityItemID(), PointerEvent());
|
||||
}
|
||||
}
|
||||
|
||||
void ContextOverlayInterface::clickDownOnEntity(const EntityItemID& entityItemID, const PointerEvent& event) {
|
||||
|
|
|
@ -2414,11 +2414,7 @@ bool EntityItem::shouldSuppressLocationEdits() const {
|
|||
}
|
||||
|
||||
// if any of the ancestors are MyAvatar, suppress
|
||||
if (isChildOfMyAvatar()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
return isChildOfMyAvatar();
|
||||
}
|
||||
|
||||
QList<EntityDynamicPointer> EntityItem::getActionsOfType(EntityDynamicType typeToGet) const {
|
||||
|
|
|
@ -177,9 +177,9 @@ void Haze::setHazeBaseReference(const float hazeBaseReference) {
|
|||
|
||||
void Haze::setHazeBackgroundBlend(const float hazeBackgroundBlend) {
|
||||
auto& params = _hazeParametersBuffer.get<Parameters>();
|
||||
|
||||
if (params.hazeBackgroundBlend != hazeBackgroundBlend) {
|
||||
_hazeParametersBuffer.edit<Parameters>().hazeBackgroundBlend = hazeBackgroundBlend;
|
||||
auto newBlend = 1.0f - hazeBackgroundBlend;
|
||||
if (params.hazeBackgroundBlend != newBlend) {
|
||||
_hazeParametersBuffer.edit<Parameters>().hazeBackgroundBlend = newBlend;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -234,7 +234,7 @@ void EntityMotionState::getWorldTransform(btTransform& worldTrans) const {
|
|||
return;
|
||||
}
|
||||
assert(entityTreeIsLocked());
|
||||
if (_motionType == MOTION_TYPE_KINEMATIC && !_entity->hasAncestorOfType(NestableType::Avatar)) {
|
||||
if (_motionType == MOTION_TYPE_KINEMATIC) {
|
||||
BT_PROFILE("kinematicIntegration");
|
||||
// This is physical kinematic motion which steps strictly by the subframe count
|
||||
// of the physics simulation and uses full gravity for acceleration.
|
||||
|
@ -327,13 +327,6 @@ bool EntityMotionState::remoteSimulationOutOfSync(uint32_t simulationStep) {
|
|||
return true;
|
||||
}
|
||||
|
||||
bool parentTransformSuccess;
|
||||
Transform localToWorld = _entity->getParentTransform(parentTransformSuccess);
|
||||
Transform worldToLocal;
|
||||
if (parentTransformSuccess) {
|
||||
localToWorld.evalInverse(worldToLocal);
|
||||
}
|
||||
|
||||
int numSteps = simulationStep - _lastStep;
|
||||
float dt = (float)(numSteps) * PHYSICS_ENGINE_FIXED_SUBSTEP;
|
||||
|
||||
|
@ -361,6 +354,10 @@ bool EntityMotionState::remoteSimulationOutOfSync(uint32_t simulationStep) {
|
|||
return true;
|
||||
}
|
||||
|
||||
if (_body->isStaticOrKinematicObject()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
_lastStep = simulationStep;
|
||||
if (glm::length2(_serverVelocity) > 0.0f) {
|
||||
// the entity-server doesn't know where avatars are, so it doesn't do simple extrapolation for children of
|
||||
|
@ -388,6 +385,12 @@ bool EntityMotionState::remoteSimulationOutOfSync(uint32_t simulationStep) {
|
|||
// TODO: compensate for _worldOffset offset here
|
||||
|
||||
// compute position error
|
||||
bool parentTransformSuccess;
|
||||
Transform localToWorld = _entity->getParentTransform(parentTransformSuccess);
|
||||
Transform worldToLocal;
|
||||
if (parentTransformSuccess) {
|
||||
localToWorld.evalInverse(worldToLocal);
|
||||
}
|
||||
|
||||
btTransform worldTrans = _body->getWorldTransform();
|
||||
glm::vec3 position = worldToLocal.transform(bulletToGLM(worldTrans.getOrigin()));
|
||||
|
@ -407,20 +410,23 @@ bool EntityMotionState::remoteSimulationOutOfSync(uint32_t simulationStep) {
|
|||
|
||||
if (glm::length2(_serverAngularVelocity) > 0.0f) {
|
||||
// compute rotation error
|
||||
float attenuation = powf(1.0f - _body->getAngularDamping(), dt);
|
||||
_serverAngularVelocity *= attenuation;
|
||||
//
|
||||
|
||||
// Bullet caps the effective rotation velocity inside its rotation integration step, therefore
|
||||
// we must integrate with the same algorithm and timestep in order achieve similar results.
|
||||
for (int i = 0; i < numSteps; ++i) {
|
||||
_serverRotation = glm::normalize(computeBulletRotationStep(_serverAngularVelocity,
|
||||
PHYSICS_ENGINE_FIXED_SUBSTEP) * _serverRotation);
|
||||
float attenuation = powf(1.0f - _body->getAngularDamping(), PHYSICS_ENGINE_FIXED_SUBSTEP);
|
||||
_serverAngularVelocity *= attenuation;
|
||||
glm::quat rotation = computeBulletRotationStep(_serverAngularVelocity, PHYSICS_ENGINE_FIXED_SUBSTEP);
|
||||
for (int i = 1; i < numSteps; ++i) {
|
||||
_serverAngularVelocity *= attenuation;
|
||||
rotation = computeBulletRotationStep(_serverAngularVelocity, PHYSICS_ENGINE_FIXED_SUBSTEP) * rotation;
|
||||
}
|
||||
_serverRotation = glm::normalize(rotation * _serverRotation);
|
||||
const float MIN_ROTATION_DOT = 0.99999f; // This corresponds to about 0.5 degrees of rotation
|
||||
glm::quat actualRotation = worldToLocal.getRotation() * bulletToGLM(worldTrans.getRotation());
|
||||
return (fabsf(glm::dot(actualRotation, _serverRotation)) < MIN_ROTATION_DOT);
|
||||
}
|
||||
const float MIN_ROTATION_DOT = 0.99999f; // This corresponds to about 0.5 degrees of rotation
|
||||
glm::quat actualRotation = worldToLocal.getRotation() * bulletToGLM(worldTrans.getRotation());
|
||||
|
||||
return (fabsf(glm::dot(actualRotation, _serverRotation)) < MIN_ROTATION_DOT);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool EntityMotionState::shouldSendUpdate(uint32_t simulationStep) {
|
||||
|
|
|
@ -59,7 +59,10 @@ void PhysicalEntitySimulation::addEntityInternal(EntityItemPointer entity) {
|
|||
_entitiesToAddToPhysics.insert(entity);
|
||||
}
|
||||
} else if (canBeKinematic && entity->isMovingRelativeToParent()) {
|
||||
_simpleKinematicEntities.insert(entity);
|
||||
SetOfEntities::iterator itr = _simpleKinematicEntities.find(entity);
|
||||
if (itr == _simpleKinematicEntities.end()) {
|
||||
_simpleKinematicEntities.insert(entity);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -150,7 +153,10 @@ void PhysicalEntitySimulation::changeEntityInternal(EntityItemPointer entity) {
|
|||
removeOwnershipData(motionState);
|
||||
_entitiesToRemoveFromPhysics.insert(entity);
|
||||
if (canBeKinematic && entity->isMovingRelativeToParent()) {
|
||||
_simpleKinematicEntities.insert(entity);
|
||||
SetOfEntities::iterator itr = _simpleKinematicEntities.find(entity);
|
||||
if (itr == _simpleKinematicEntities.end()) {
|
||||
_simpleKinematicEntities.insert(entity);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
_incomingChanges.insert(motionState);
|
||||
|
@ -160,11 +166,20 @@ void PhysicalEntitySimulation::changeEntityInternal(EntityItemPointer entity) {
|
|||
// The intent is for this object to be in the PhysicsEngine, but it has no MotionState yet.
|
||||
// Perhaps it's shape has changed and it can now be added?
|
||||
_entitiesToAddToPhysics.insert(entity);
|
||||
_simpleKinematicEntities.remove(entity); // just in case it's non-physical-kinematic
|
||||
SetOfEntities::iterator itr = _simpleKinematicEntities.find(entity);
|
||||
if (itr != _simpleKinematicEntities.end()) {
|
||||
_simpleKinematicEntities.erase(itr);
|
||||
}
|
||||
} else if (canBeKinematic && entity->isMovingRelativeToParent()) {
|
||||
_simpleKinematicEntities.insert(entity);
|
||||
SetOfEntities::iterator itr = _simpleKinematicEntities.find(entity);
|
||||
if (itr == _simpleKinematicEntities.end()) {
|
||||
_simpleKinematicEntities.insert(entity);
|
||||
}
|
||||
} else {
|
||||
_simpleKinematicEntities.remove(entity); // just in case it's non-physical-kinematic
|
||||
SetOfEntities::iterator itr = _simpleKinematicEntities.find(entity);
|
||||
if (itr != _simpleKinematicEntities.end()) {
|
||||
_simpleKinematicEntities.erase(itr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -212,7 +227,6 @@ const VectorOfMotionStates& PhysicalEntitySimulation::getObjectsToRemoveFromPhys
|
|||
assert(motionState);
|
||||
// TODO CLEan this, just a n extra check to avoid the crash that shouldn;t happen
|
||||
if (motionState) {
|
||||
|
||||
_entitiesToAddToPhysics.remove(entity);
|
||||
if (entity->isDead() && entity->getElement()) {
|
||||
_deadEntities.insert(entity);
|
||||
|
@ -255,7 +269,10 @@ void PhysicalEntitySimulation::getObjectsToAddToPhysics(VectorOfMotionStates& re
|
|||
// this entity should no longer be on the internal _entitiesToAddToPhysics
|
||||
entityItr = _entitiesToAddToPhysics.erase(entityItr);
|
||||
if (entity->isMovingRelativeToParent()) {
|
||||
_simpleKinematicEntities.insert(entity);
|
||||
SetOfEntities::iterator itr = _simpleKinematicEntities.find(entity);
|
||||
if (itr == _simpleKinematicEntities.end()) {
|
||||
_simpleKinematicEntities.insert(entity);
|
||||
}
|
||||
}
|
||||
} else if (entity->isReadyToComputeShape()) {
|
||||
ShapeInfo shapeInfo;
|
||||
|
@ -375,19 +392,21 @@ void PhysicalEntitySimulation::handleChangedMotionStates(const VectorOfMotionSta
|
|||
}
|
||||
|
||||
void PhysicalEntitySimulation::addOwnershipBid(EntityMotionState* motionState) {
|
||||
if (!getEntityTree()->isServerlessMode()) {
|
||||
motionState->initForBid();
|
||||
motionState->sendBid(_entityPacketSender, _physicsEngine->getNumSubsteps());
|
||||
_bids.push_back(motionState);
|
||||
_nextBidExpiry = glm::min(_nextBidExpiry, motionState->getNextBidExpiry());
|
||||
if (getEntityTree()->isServerlessMode()) {
|
||||
return;
|
||||
}
|
||||
motionState->initForBid();
|
||||
motionState->sendBid(_entityPacketSender, _physicsEngine->getNumSubsteps());
|
||||
_bids.push_back(motionState);
|
||||
_nextBidExpiry = glm::min(_nextBidExpiry, motionState->getNextBidExpiry());
|
||||
}
|
||||
|
||||
void PhysicalEntitySimulation::addOwnership(EntityMotionState* motionState) {
|
||||
if (!getEntityTree()->isServerlessMode()) {
|
||||
motionState->initForOwned();
|
||||
_owned.push_back(motionState);
|
||||
if (getEntityTree()->isServerlessMode()) {
|
||||
return;
|
||||
}
|
||||
motionState->initForOwned();
|
||||
_owned.push_back(motionState);
|
||||
}
|
||||
|
||||
void PhysicalEntitySimulation::sendOwnershipBids(uint32_t numSubsteps) {
|
||||
|
@ -426,7 +445,9 @@ void PhysicalEntitySimulation::sendOwnershipBids(uint32_t numSubsteps) {
|
|||
}
|
||||
|
||||
void PhysicalEntitySimulation::sendOwnedUpdates(uint32_t numSubsteps) {
|
||||
bool serverlessMode = getEntityTree()->isServerlessMode();
|
||||
if (getEntityTree()->isServerlessMode()) {
|
||||
return;
|
||||
}
|
||||
PROFILE_RANGE_EX(simulation_physics, "Update", 0x00000000, (uint64_t)_owned.size());
|
||||
uint32_t i = 0;
|
||||
while (i < _owned.size()) {
|
||||
|
@ -438,7 +459,7 @@ void PhysicalEntitySimulation::sendOwnedUpdates(uint32_t numSubsteps) {
|
|||
}
|
||||
_owned.remove(i);
|
||||
} else {
|
||||
if (!serverlessMode && _owned[i]->shouldSendUpdate(numSubsteps)) {
|
||||
if (_owned[i]->shouldSendUpdate(numSubsteps)) {
|
||||
_owned[i]->sendUpdate(_entityPacketSender, numSubsteps);
|
||||
}
|
||||
++i;
|
||||
|
|
16
libraries/render-utils/src/BloomApply.shared.slh
Normal file
16
libraries/render-utils/src/BloomApply.shared.slh
Normal file
|
@ -0,0 +1,16 @@
|
|||
// glsl / C++ compatible source as interface for BloomApply
|
||||
#ifdef __cplusplus
|
||||
# define BA_VEC3 glm::vec3
|
||||
#else
|
||||
# define BA_VEC3 vec3
|
||||
#endif
|
||||
|
||||
struct Parameters
|
||||
{
|
||||
BA_VEC3 _intensities;
|
||||
};
|
||||
|
||||
// <@if 1@>
|
||||
// Trigger Scribe include
|
||||
// <@endif@> <!def that !>
|
||||
//
|
|
@ -9,11 +9,15 @@
|
|||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
<@include BloomApply.shared.slh@>
|
||||
|
||||
uniform sampler2D blurMap0;
|
||||
uniform sampler2D blurMap1;
|
||||
uniform sampler2D blurMap2;
|
||||
uniform vec3 intensity;
|
||||
|
||||
layout(std140) uniform parametersBuffer {
|
||||
Parameters parameters;
|
||||
};
|
||||
|
||||
in vec2 varTexCoord0;
|
||||
out vec4 outFragColor;
|
||||
|
@ -23,5 +27,5 @@ void main(void) {
|
|||
vec4 blur1 = texture(blurMap1, varTexCoord0);
|
||||
vec4 blur2 = texture(blurMap2, varTexCoord0);
|
||||
|
||||
outFragColor = vec4(blur0.rgb*intensity.x + blur1.rgb*intensity.y + blur2.rgb*intensity.z, 1.0f);
|
||||
outFragColor = vec4(blur0.rgb*parameters._intensities.x + blur1.rgb*parameters._intensities.y + blur2.rgb*parameters._intensities.z, 1.0f);
|
||||
}
|
||||
|
|
|
@ -21,13 +21,15 @@
|
|||
|
||||
#define BLOOM_BLUR_LEVEL_COUNT 3
|
||||
|
||||
BloomThreshold::BloomThreshold(unsigned int downsamplingFactor) :
|
||||
_downsamplingFactor(downsamplingFactor) {
|
||||
BloomThreshold::BloomThreshold(unsigned int downsamplingFactor) {
|
||||
assert(downsamplingFactor > 0);
|
||||
_parameters.edit()._sampleCount = downsamplingFactor;
|
||||
}
|
||||
|
||||
void BloomThreshold::configure(const Config& config) {
|
||||
_threshold = config.threshold;
|
||||
if (_parameters.get()._threshold != config.threshold) {
|
||||
_parameters.edit()._threshold = config.threshold;
|
||||
}
|
||||
}
|
||||
|
||||
void BloomThreshold::run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs) {
|
||||
|
@ -43,10 +45,11 @@ void BloomThreshold::run(const render::RenderContextPointer& renderContext, cons
|
|||
|
||||
auto inputBuffer = inputFrameBuffer->getRenderBuffer(0);
|
||||
auto bufferSize = gpu::Vec2u(inputBuffer->getDimensions());
|
||||
const auto downSamplingFactor = _parameters.get()._sampleCount;
|
||||
|
||||
// Downsample resolution
|
||||
bufferSize.x /= _downsamplingFactor;
|
||||
bufferSize.y /= _downsamplingFactor;
|
||||
bufferSize.x /= downSamplingFactor;
|
||||
bufferSize.y /= downSamplingFactor;
|
||||
|
||||
if (!_outputBuffer || _outputBuffer->getSize() != bufferSize) {
|
||||
auto colorTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(inputBuffer->getTexelFormat(), bufferSize.x, bufferSize.y,
|
||||
|
@ -54,10 +57,12 @@ void BloomThreshold::run(const render::RenderContextPointer& renderContext, cons
|
|||
|
||||
_outputBuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("BloomThreshold"));
|
||||
_outputBuffer->setRenderBuffer(0, colorTexture);
|
||||
|
||||
_parameters.edit()._deltaUV = { 1.0f / bufferSize.x, 1.0f / bufferSize.y };
|
||||
}
|
||||
|
||||
static const int COLOR_MAP_SLOT = 0;
|
||||
static const int THRESHOLD_SLOT = 1;
|
||||
static const int PARAMETERS_SLOT = 1;
|
||||
|
||||
if (!_pipeline) {
|
||||
auto vs = gpu::StandardShaderLib::getDrawTransformUnitQuadVS();
|
||||
|
@ -66,7 +71,7 @@ void BloomThreshold::run(const render::RenderContextPointer& renderContext, cons
|
|||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding("colorMap", COLOR_MAP_SLOT));
|
||||
slotBindings.insert(gpu::Shader::Binding("threshold", THRESHOLD_SLOT));
|
||||
slotBindings.insert(gpu::Shader::Binding("parametersBuffer", PARAMETERS_SLOT));
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
@ -86,21 +91,26 @@ void BloomThreshold::run(const render::RenderContextPointer& renderContext, cons
|
|||
|
||||
batch.setFramebuffer(_outputBuffer);
|
||||
batch.setResourceTexture(COLOR_MAP_SLOT, inputBuffer);
|
||||
batch._glUniform1f(THRESHOLD_SLOT, _threshold);
|
||||
batch.setUniformBuffer(PARAMETERS_SLOT, _parameters);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
});
|
||||
|
||||
outputs = _outputBuffer;
|
||||
}
|
||||
|
||||
BloomApply::BloomApply() : _intensities{ 1.0f, 1.0f, 1.0f } {
|
||||
BloomApply::BloomApply() {
|
||||
|
||||
}
|
||||
|
||||
void BloomApply::configure(const Config& config) {
|
||||
_intensities.x = config.intensity / 3.0f;
|
||||
_intensities.y = _intensities.x;
|
||||
_intensities.z = _intensities.x;
|
||||
const auto newIntensity = config.intensity / 3.0f;
|
||||
|
||||
if (_parameters.get()._intensities.x != newIntensity) {
|
||||
auto& parameters = _parameters.edit();
|
||||
parameters._intensities.x = newIntensity;
|
||||
parameters._intensities.y = newIntensity;
|
||||
parameters._intensities.z = newIntensity;
|
||||
}
|
||||
}
|
||||
|
||||
void BloomApply::run(const render::RenderContextPointer& renderContext, const Inputs& inputs) {
|
||||
|
@ -111,7 +121,7 @@ void BloomApply::run(const render::RenderContextPointer& renderContext, const In
|
|||
static const auto BLUR0_SLOT = 0;
|
||||
static const auto BLUR1_SLOT = 1;
|
||||
static const auto BLUR2_SLOT = 2;
|
||||
static const auto INTENSITY_SLOT = 3;
|
||||
static const auto PARAMETERS_SLOT = 0;
|
||||
|
||||
if (!_pipeline) {
|
||||
auto vs = gpu::StandardShaderLib::getDrawTransformUnitQuadVS();
|
||||
|
@ -122,7 +132,7 @@ void BloomApply::run(const render::RenderContextPointer& renderContext, const In
|
|||
slotBindings.insert(gpu::Shader::Binding("blurMap0", BLUR0_SLOT));
|
||||
slotBindings.insert(gpu::Shader::Binding("blurMap1", BLUR1_SLOT));
|
||||
slotBindings.insert(gpu::Shader::Binding("blurMap2", BLUR2_SLOT));
|
||||
slotBindings.insert(gpu::Shader::Binding("intensity", INTENSITY_SLOT));
|
||||
slotBindings.insert(gpu::Shader::Binding("parametersBuffer", PARAMETERS_SLOT));
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
@ -151,7 +161,7 @@ void BloomApply::run(const render::RenderContextPointer& renderContext, const In
|
|||
batch.setResourceTexture(BLUR0_SLOT, blur0FB->getRenderBuffer(0));
|
||||
batch.setResourceTexture(BLUR1_SLOT, blur1FB->getRenderBuffer(0));
|
||||
batch.setResourceTexture(BLUR2_SLOT, blur2FB->getRenderBuffer(0));
|
||||
batch._glUniform3f(INTENSITY_SLOT, _intensities.x, _intensities.y, _intensities.z);
|
||||
batch.setUniformBuffer(PARAMETERS_SLOT, _parameters);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -61,10 +61,11 @@ public:
|
|||
|
||||
private:
|
||||
|
||||
#include "BloomThreshold.shared.slh"
|
||||
|
||||
gpu::FramebufferPointer _outputBuffer;
|
||||
gpu::PipelinePointer _pipeline;
|
||||
float _threshold;
|
||||
unsigned int _downsamplingFactor;
|
||||
gpu::StructBuffer<Parameters> _parameters;
|
||||
};
|
||||
|
||||
|
||||
|
@ -95,8 +96,10 @@ public:
|
|||
|
||||
private:
|
||||
|
||||
#include "BloomApply.shared.slh"
|
||||
|
||||
gpu::PipelinePointer _pipeline;
|
||||
glm::vec3 _intensities;
|
||||
gpu::StructBuffer<Parameters> _parameters;
|
||||
};
|
||||
|
||||
class BloomDraw {
|
||||
|
|
18
libraries/render-utils/src/BloomThreshold.shared.slh
Normal file
18
libraries/render-utils/src/BloomThreshold.shared.slh
Normal file
|
@ -0,0 +1,18 @@
|
|||
// glsl / C++ compatible source as interface for BloomThreshold
|
||||
#ifdef __cplusplus
|
||||
# define BT_VEC2 glm::vec2
|
||||
#else
|
||||
# define BT_VEC2 vec2
|
||||
#endif
|
||||
|
||||
struct Parameters
|
||||
{
|
||||
BT_VEC2 _deltaUV;
|
||||
float _threshold;
|
||||
int _sampleCount;
|
||||
};
|
||||
|
||||
// <@if 1@>
|
||||
// Trigger Scribe include
|
||||
// <@endif@> <!def that !>
|
||||
//
|
|
@ -9,37 +9,35 @@
|
|||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
<@include BloomThreshold.shared.slh@>
|
||||
|
||||
uniform sampler2D colorMap;
|
||||
uniform float threshold;
|
||||
layout(std140) uniform parametersBuffer {
|
||||
Parameters parameters;
|
||||
};
|
||||
|
||||
in vec2 varTexCoord0;
|
||||
out vec4 outFragColor;
|
||||
|
||||
#define DOWNSAMPLING_FACTOR 4
|
||||
#define SAMPLE_COUNT (DOWNSAMPLING_FACTOR/2)
|
||||
|
||||
void main(void) {
|
||||
vec2 deltaX = dFdx(varTexCoord0) / SAMPLE_COUNT;
|
||||
vec2 deltaY = dFdy(varTexCoord0) / SAMPLE_COUNT;
|
||||
vec2 startUv = varTexCoord0;
|
||||
vec4 maskedColor = vec4(0,0,0,0);
|
||||
|
||||
for (int y=0 ; y<SAMPLE_COUNT ; y++) {
|
||||
for (int y=0 ; y<parameters._sampleCount ; y++) {
|
||||
vec2 uv = startUv;
|
||||
|
||||
for (int x=0 ; x<SAMPLE_COUNT ; x++) {
|
||||
for (int x=0 ; x<parameters._sampleCount ; x++) {
|
||||
vec4 color = texture(colorMap, uv);
|
||||
float luminance = (color.r+color.g+color.b) / 3.0;
|
||||
float mask = clamp((luminance-threshold)*0.25, 0, 1);
|
||||
float mask = clamp((luminance-parameters._threshold)*0.25, 0, 1);
|
||||
|
||||
color *= mask;
|
||||
maskedColor += color;
|
||||
uv += deltaX;
|
||||
uv.x += parameters._deltaUV.x;
|
||||
}
|
||||
|
||||
startUv += deltaY;
|
||||
startUv.y += parameters._deltaUV.y;
|
||||
}
|
||||
maskedColor /= SAMPLE_COUNT*SAMPLE_COUNT;
|
||||
maskedColor /= parameters._sampleCount * parameters._sampleCount;
|
||||
outFragColor = vec4(maskedColor.rgb, 1.0);
|
||||
}
|
||||
|
|
|
@ -235,15 +235,14 @@ vec3 evalGlobalLightingAlphaBlendedWithHaze(
|
|||
|
||||
// Haze
|
||||
if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {
|
||||
vec4 colorV4 = computeHazeColor(
|
||||
vec4(color, 1.0), // fragment original color
|
||||
vec4 hazeColor = computeHazeColor(
|
||||
positionES, // fragment position in eye coordinates
|
||||
fragPositionWS, // fragment position in world coordinates
|
||||
invViewMat[3].xyz, // eye position in world coordinates
|
||||
lightDirection // keylight direction vector in world coordinates
|
||||
);
|
||||
|
||||
color = colorV4.rgb;
|
||||
color = mix(color.rgb, hazeColor.rgb, hazeColor.a);
|
||||
}
|
||||
|
||||
return color;
|
||||
|
@ -273,15 +272,14 @@ vec3 evalGlobalLightingAlphaBlendedWithHaze(
|
|||
|
||||
// Haze
|
||||
if ((isHazeEnabled() > 0.0) && (hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {
|
||||
vec4 colorV4 = computeHazeColor(
|
||||
vec4(color, 1.0), // fragment original color
|
||||
vec4 hazeColor = computeHazeColor(
|
||||
positionES, // fragment position in eye coordinates
|
||||
positionWS, // fragment position in world coordinates
|
||||
invViewMat[3].xyz, // eye position in world coordinates
|
||||
lightDirection // keylight direction vector
|
||||
);
|
||||
|
||||
color = colorV4.rgb;
|
||||
color = mix(color.rgb, hazeColor.rgb, hazeColor.a);
|
||||
}
|
||||
|
||||
return color;
|
||||
|
|
|
@ -107,11 +107,11 @@ void MakeHaze::run(const render::RenderContextPointer& renderContext, graphics::
|
|||
haze = _haze;
|
||||
}
|
||||
|
||||
// Buffer slots
|
||||
const int HazeEffect_ParamsSlot = 0;
|
||||
const int HazeEffect_TransformBufferSlot = 1;
|
||||
const int HazeEffect_ColorMapSlot = 2;
|
||||
const int HazeEffect_LinearDepthMapSlot = 3;
|
||||
const int HazeEffect_LightingMapSlot = 4;
|
||||
// Texture slots
|
||||
const int HazeEffect_LinearDepthMapSlot = 0;
|
||||
|
||||
void DrawHaze::configure(const Config& config) {
|
||||
}
|
||||
|
@ -122,11 +122,10 @@ void DrawHaze::run(const render::RenderContextPointer& renderContext, const Inpu
|
|||
return;
|
||||
}
|
||||
|
||||
const auto inputBuffer = inputs.get1()->getRenderBuffer(0);
|
||||
const auto outputBuffer = inputs.get1();
|
||||
const auto framebuffer = inputs.get2();
|
||||
const auto transformBuffer = inputs.get3();
|
||||
|
||||
auto outputBuffer = inputs.get4();
|
||||
const auto lightingModel = inputs.get4();
|
||||
|
||||
auto depthBuffer = framebuffer->getLinearDepthTexture();
|
||||
|
||||
|
@ -139,6 +138,10 @@ void DrawHaze::run(const render::RenderContextPointer& renderContext, const Inpu
|
|||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
|
||||
state->setBlendFunction(true,
|
||||
gpu::State::SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::INV_SRC_ALPHA,
|
||||
gpu::State::FACTOR_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ONE);
|
||||
|
||||
// Mask out haze on the tablet
|
||||
PrepareStencil::testMask(*state);
|
||||
|
||||
|
@ -148,15 +151,15 @@ void DrawHaze::run(const render::RenderContextPointer& renderContext, const Inpu
|
|||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("hazeBuffer"), HazeEffect_ParamsSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("deferredFrameTransformBuffer"), HazeEffect_TransformBufferSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("colorMap"), HazeEffect_ColorMapSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("lightingModelBuffer"), render::ShapePipeline::Slot::LIGHTING_MODEL));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("linearDepthMap"), HazeEffect_LinearDepthMapSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("keyLightBuffer"), HazeEffect_LightingMapSlot));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("keyLightBuffer"), render::ShapePipeline::Slot::KEY_LIGHT));
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
auto sourceFramebufferSize = glm::ivec2(inputBuffer->getDimensions());
|
||||
auto outputFramebufferSize = glm::ivec2(outputBuffer->getSize());
|
||||
|
||||
gpu::doInBatch("DrawHaze::run", args->_context, [&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
|
@ -165,7 +168,7 @@ void DrawHaze::run(const render::RenderContextPointer& renderContext, const Inpu
|
|||
batch.setViewportTransform(args->_viewport);
|
||||
batch.setProjectionTransform(glm::mat4());
|
||||
batch.resetViewTransform();
|
||||
batch.setModelTransform(gpu::Framebuffer::evalSubregionTexcoordTransform(sourceFramebufferSize, args->_viewport));
|
||||
batch.setModelTransform(gpu::Framebuffer::evalSubregionTexcoordTransform(outputFramebufferSize, args->_viewport));
|
||||
|
||||
batch.setPipeline(_hazePipeline);
|
||||
|
||||
|
@ -181,17 +184,17 @@ void DrawHaze::run(const render::RenderContextPointer& renderContext, const Inpu
|
|||
}
|
||||
|
||||
batch.setUniformBuffer(HazeEffect_TransformBufferSlot, transformBuffer->getFrameTransformBuffer());
|
||||
batch.setUniformBuffer(render::ShapePipeline::Slot::LIGHTING_MODEL, lightingModel->getParametersBuffer());
|
||||
|
||||
auto lightStage = args->_scene->getStage<LightStage>();
|
||||
if (lightStage) {
|
||||
graphics::LightPointer keyLight;
|
||||
keyLight = lightStage->getCurrentKeyLight();
|
||||
if (keyLight) {
|
||||
batch.setUniformBuffer(HazeEffect_LightingMapSlot, keyLight->getLightSchemaBuffer());
|
||||
batch.setUniformBuffer(render::ShapePipeline::Slot::KEY_LIGHT, keyLight->getLightSchemaBuffer());
|
||||
}
|
||||
}
|
||||
|
||||
batch.setResourceTexture(HazeEffect_ColorMapSlot, inputBuffer);
|
||||
batch.setResourceTexture(HazeEffect_LinearDepthMapSlot, depthBuffer);
|
||||
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <graphics/Haze.h>
|
||||
|
||||
#include "SurfaceGeometryPass.h"
|
||||
#include "LightingModel.h"
|
||||
|
||||
using LinearDepthFramebufferPointer = std::shared_ptr<LinearDepthFramebuffer>;
|
||||
|
||||
|
@ -159,7 +160,7 @@ public:
|
|||
|
||||
class DrawHaze {
|
||||
public:
|
||||
using Inputs = render::VaryingSet5<graphics::HazePointer, gpu::FramebufferPointer, LinearDepthFramebufferPointer, DeferredFrameTransformPointer, gpu::FramebufferPointer>;
|
||||
using Inputs = render::VaryingSet5<graphics::HazePointer, gpu::FramebufferPointer, LinearDepthFramebufferPointer, DeferredFrameTransformPointer, LightingModelPointer>;
|
||||
using Config = HazeConfig;
|
||||
using JobModel = render::Job::ModelI<DrawHaze, Inputs, Config>;
|
||||
|
||||
|
|
|
@ -228,15 +228,14 @@ vec3 evalGlobalLightingAlphaBlendedWithHaze(
|
|||
// Haze
|
||||
// FIXME - temporarily removed until we support it for forward...
|
||||
/* if ((hazeParams.hazeMode & HAZE_MODE_IS_ACTIVE) == HAZE_MODE_IS_ACTIVE) {
|
||||
vec4 colorV4 = computeHazeColor(
|
||||
vec4(color, 1.0), // fragment original color
|
||||
vec4 hazeColor = computeHazeColor(
|
||||
positionES, // fragment position in eye coordinates
|
||||
fragPositionWS, // fragment position in world coordinates
|
||||
invViewMat[3].xyz, // eye position in world coordinates
|
||||
lightDirection // keylight direction vector
|
||||
);
|
||||
|
||||
color = colorV4.rgb;
|
||||
color = mix(color.rgb, hazeColor.rgb, hazeColor.a);
|
||||
}*/
|
||||
|
||||
return color;
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
|
||||
<@include Haze.slh@>
|
||||
|
||||
uniform sampler2D colorMap;
|
||||
uniform sampler2D linearDepthMap;
|
||||
|
||||
vec4 unpackPositionFromZeye(vec2 texcoord) {
|
||||
|
@ -46,7 +45,6 @@ void main(void) {
|
|||
discard;
|
||||
}
|
||||
|
||||
vec4 fragColor = texture(colorMap, varTexCoord0);
|
||||
vec4 fragPositionES = unpackPositionFromZeye(varTexCoord0);
|
||||
|
||||
mat4 viewInverse = getViewInverse();
|
||||
|
@ -56,5 +54,8 @@ void main(void) {
|
|||
Light light = getKeyLight();
|
||||
vec3 lightDirectionWS = getLightDirection(light);
|
||||
|
||||
outFragColor = computeHazeColor(fragColor, fragPositionES.xyz, fragPositionWS.xyz, eyePositionWS.xyz, lightDirectionWS);
|
||||
outFragColor = computeHazeColor(fragPositionES.xyz, fragPositionWS.xyz, eyePositionWS.xyz, lightDirectionWS);
|
||||
if (outFragColor.a < 1e-4) {
|
||||
discard;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -92,22 +92,21 @@ vec3 computeHazeColorKeyLightAttenuation(vec3 color, vec3 lightDirectionWS, vec3
|
|||
}
|
||||
|
||||
// Input:
|
||||
// fragColor - fragment original color
|
||||
// fragPositionES - fragment position in eye coordinates
|
||||
// fragPositionWS - fragment position in world coordinates
|
||||
// eyePositionWS - eye position in world coordinates
|
||||
// Output:
|
||||
// fragment colour after haze effect
|
||||
// haze colour and alpha contains haze blend factor
|
||||
//
|
||||
// General algorithm taken from http://www.iquilezles.org/www/articles/fog/fog.htm, with permission
|
||||
//
|
||||
vec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {
|
||||
vec4 computeHazeColor(vec3 fragPositionES, vec3 fragPositionWS, vec3 eyePositionWS, vec3 lightDirectionWS) {
|
||||
// Distance to fragment
|
||||
float distance = length(fragPositionES);
|
||||
float eyeWorldHeight = eyePositionWS.y;
|
||||
|
||||
// Convert haze colour from uniform into a vec4
|
||||
vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);
|
||||
vec4 hazeColor = vec4(hazeParams.hazeColor, 1.0);
|
||||
|
||||
// Use the haze colour for the glare colour, if blend is not enabled
|
||||
vec4 blendedHazeColor;
|
||||
|
@ -149,13 +148,13 @@ vec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS,
|
|||
vec3 hazeAmount = 1.0 - exp(-hazeIntegral);
|
||||
|
||||
// Compute color after haze effect
|
||||
potentialFragColor = mix(fragColor, vec4(1.0, 1.0, 1.0, 1.0), vec4(hazeAmount, 1.0));
|
||||
potentialFragColor = vec4(1.0, 1.0, 1.0, hazeAmount);
|
||||
} else if ((hazeParams.hazeMode & HAZE_MODE_IS_ALTITUDE_BASED) != HAZE_MODE_IS_ALTITUDE_BASED) {
|
||||
// Haze is based only on range
|
||||
float hazeAmount = 1.0 - exp(-distance * hazeParams.hazeRangeFactor);
|
||||
|
||||
// Compute color after haze effect
|
||||
potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);
|
||||
potentialFragColor = vec4(blendedHazeColor.rgb, hazeAmount);
|
||||
} else {
|
||||
// Haze is based on both range and altitude
|
||||
// Taken from www.crytek.com/download/GDC2007_RealtimeAtmoFxInGamesRev.ppt
|
||||
|
@ -181,16 +180,14 @@ vec4 computeHazeColor(vec4 fragColor, vec3 fragPositionES, vec3 fragPositionWS,
|
|||
float hazeAmount = 1.0 - exp(-hazeIntegral);
|
||||
|
||||
// Compute color after haze effect
|
||||
potentialFragColor = mix(fragColor, blendedHazeColor, hazeAmount);
|
||||
potentialFragColor = vec4(blendedHazeColor.rgb, hazeAmount);
|
||||
}
|
||||
|
||||
// Mix with background at far range
|
||||
const float BLEND_DISTANCE = 27000.0f;
|
||||
vec4 outFragColor;
|
||||
vec4 outFragColor = potentialFragColor;
|
||||
if (distance > BLEND_DISTANCE) {
|
||||
outFragColor = mix(potentialFragColor, fragColor, hazeParams.backgroundBlend);
|
||||
} else {
|
||||
outFragColor = potentialFragColor;
|
||||
outFragColor.a *= hazeParams.backgroundBlend;
|
||||
}
|
||||
|
||||
return outFragColor;
|
||||
|
|
|
@ -117,6 +117,9 @@ void DrawHighlightMask::run(const render::RenderContextPointer& renderContext, c
|
|||
assert(renderContext->args->hasViewFrustum());
|
||||
auto& inShapes = inputs.get0();
|
||||
|
||||
const int BOUNDS_SLOT = 0;
|
||||
const int PARAMETERS_SLOT = 1;
|
||||
|
||||
if (!_stencilMaskPipeline || !_stencilMaskFillPipeline) {
|
||||
gpu::StatePointer state = gpu::StatePointer(new gpu::State());
|
||||
state->setDepthTest(true, false, gpu::LESS_EQUAL);
|
||||
|
@ -135,6 +138,8 @@ void DrawHighlightMask::run(const render::RenderContextPointer& renderContext, c
|
|||
gpu::ShaderPointer program = gpu::Shader::createProgram(vs, ps);
|
||||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("ssbo0Buffer"), BOUNDS_SLOT));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("parametersBuffer"), PARAMETERS_SLOT));
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
_stencilMaskPipeline = gpu::Pipeline::create(program, state);
|
||||
|
@ -214,6 +219,15 @@ void DrawHighlightMask::run(const render::RenderContextPointer& renderContext, c
|
|||
|
||||
_boundsBuffer->setData(itemBounds.size() * sizeof(render::ItemBound), (const gpu::Byte*) itemBounds.data());
|
||||
|
||||
const auto securityMargin = 2.0f;
|
||||
const float blurPixelWidth = 2.0f * securityMargin * HighlightSharedParameters::getBlurPixelWidth(highlight._style, args->_viewport.w);
|
||||
const auto framebufferSize = ressources->getSourceFrameSize();
|
||||
const glm::vec2 highlightWidth = { blurPixelWidth / framebufferSize.x, blurPixelWidth / framebufferSize.y };
|
||||
|
||||
if (highlightWidth != _outlineWidth.get()) {
|
||||
_outlineWidth.edit() = highlightWidth;
|
||||
}
|
||||
|
||||
gpu::doInBatch("DrawHighlightMask::run::end", args->_context, [&](gpu::Batch& batch) {
|
||||
// Setup camera, projection and viewport for all items
|
||||
batch.setViewportTransform(args->_viewport);
|
||||
|
@ -221,15 +235,10 @@ void DrawHighlightMask::run(const render::RenderContextPointer& renderContext, c
|
|||
batch.setViewTransform(viewMat);
|
||||
|
||||
// Draw stencil mask with object bounding boxes
|
||||
const auto highlightWidthLoc = _stencilMaskPipeline->getProgram()->getUniforms().findLocation("outlineWidth");
|
||||
const auto securityMargin = 2.0f;
|
||||
const float blurPixelWidth = 2.0f * securityMargin * HighlightSharedParameters::getBlurPixelWidth(highlight._style, args->_viewport.w);
|
||||
const auto framebufferSize = ressources->getSourceFrameSize();
|
||||
|
||||
auto stencilPipeline = highlight._style.isFilled() ? _stencilMaskFillPipeline : _stencilMaskPipeline;
|
||||
batch.setPipeline(stencilPipeline);
|
||||
batch.setResourceBuffer(0, _boundsBuffer);
|
||||
batch._glUniform2f(highlightWidthLoc, blurPixelWidth / framebufferSize.x, blurPixelWidth / framebufferSize.y);
|
||||
batch.setResourceBuffer(BOUNDS_SLOT, _boundsBuffer);
|
||||
batch.setUniformBuffer(PARAMETERS_SLOT, _outlineWidth);
|
||||
static const int NUM_VERTICES_PER_CUBE = 36;
|
||||
batch.draw(gpu::TRIANGLES, NUM_VERTICES_PER_CUBE * (gpu::uint32) itemBounds.size(), 0);
|
||||
});
|
||||
|
|
|
@ -127,6 +127,7 @@ protected:
|
|||
render::ShapePlumberPointer _shapePlumber;
|
||||
HighlightSharedParametersPointer _sharedParameters;
|
||||
gpu::BufferPointer _boundsBuffer;
|
||||
gpu::StructBuffer<glm::vec2> _outlineWidth;
|
||||
|
||||
static gpu::PipelinePointer _stencilMaskPipeline;
|
||||
static gpu::PipelinePointer _stencilMaskFillPipeline;
|
||||
|
|
|
@ -40,7 +40,9 @@ ItemBound getItemBound(int i) {
|
|||
}
|
||||
#endif
|
||||
|
||||
uniform vec2 outlineWidth;
|
||||
uniform parametersBuffer {
|
||||
vec2 outlineWidth;
|
||||
};
|
||||
|
||||
void main(void) {
|
||||
const vec3 UNIT_BOX_VERTICES[8] = vec3[8](
|
||||
|
|
|
@ -174,7 +174,7 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
|
|||
// Similar to light stage, background stage has been filled by several potential render items and resolved for the frame in this job
|
||||
task.addJob<DrawBackgroundStage>("DrawBackgroundDeferred", lightingModel);
|
||||
|
||||
const auto drawHazeInputs = render::Varying(DrawHaze::Inputs(hazeModel, lightingFramebuffer, linearDepthTarget, deferredFrameTransform, lightingFramebuffer));
|
||||
const auto drawHazeInputs = render::Varying(DrawHaze::Inputs(hazeModel, lightingFramebuffer, linearDepthTarget, deferredFrameTransform, lightingModel));
|
||||
task.addJob<DrawHaze>("DrawHazeDeferred", drawHazeInputs);
|
||||
|
||||
// Render transparent objects forward in LightingBuffer
|
||||
|
|
|
@ -42,22 +42,27 @@ glm::quat computeBulletRotationStep(const glm::vec3& angularVelocity, float time
|
|||
// Exponential map
|
||||
// google for "Practical Parameterization of Rotations Using the Exponential Map", F. Sebastian Grassia
|
||||
|
||||
float speed = glm::length(angularVelocity);
|
||||
glm::vec3 axis = angularVelocity;
|
||||
float angle = glm::length(axis) * timeStep;
|
||||
// limit the angular motion because the exponential approximation fails for large steps
|
||||
const float ANGULAR_MOTION_THRESHOLD = 0.5f * PI_OVER_TWO;
|
||||
if (speed * timeStep > ANGULAR_MOTION_THRESHOLD) {
|
||||
speed = ANGULAR_MOTION_THRESHOLD / timeStep;
|
||||
if (angle > ANGULAR_MOTION_THRESHOLD) {
|
||||
angle = ANGULAR_MOTION_THRESHOLD;
|
||||
}
|
||||
|
||||
glm::vec3 axis = angularVelocity;
|
||||
if (speed < 0.001f) {
|
||||
// use Taylor's expansions of sync function
|
||||
axis *= (0.5f * timeStep - (timeStep * timeStep * timeStep) * (0.020833333333f * speed * speed));
|
||||
const float MIN_ANGLE = 0.001f;
|
||||
if (angle < MIN_ANGLE) {
|
||||
// for small angles use Taylor's expansion of sin(x):
|
||||
// sin(x) = x - (x^3)/(3!) + ...
|
||||
// where: x = angle/2
|
||||
// sin(angle/2) = angle/2 - (angle*angle*angle)/48
|
||||
// but (angle = speed * timeStep) and we want to normalize the axis by dividing by speed
|
||||
// which gives us:
|
||||
axis *= timeStep * (0.5f - 0.020833333333f * angle * angle);
|
||||
} else {
|
||||
// sync(speed) = sin(c * speed)/t
|
||||
axis *= (sinf(0.5f * speed * timeStep) / speed );
|
||||
axis *= (sinf(0.5f * angle) * timeStep / angle);
|
||||
}
|
||||
return glm::quat(cosf(0.5f * speed * timeStep), axis.x, axis.y, axis.z);
|
||||
return glm::quat(cosf(0.5f * angle), axis.x, axis.y, axis.z);
|
||||
}
|
||||
/* end Bullet code derivation*/
|
||||
|
||||
|
|
|
@ -670,12 +670,13 @@ triggerPressMapping.from(Controller.Standard.RT).peek().to(makePressHandler(Cont
|
|||
triggerPressMapping.from(Controller.Standard.LT).peek().to(makePressHandler(Controller.Standard.LeftHand));
|
||||
|
||||
function tabletVisibilityChanged() {
|
||||
if (!tablet.tabletShown) {
|
||||
if (!tablet.tabletShown && onPalScreen) {
|
||||
ContextOverlay.enabled = true;
|
||||
tablet.gotoHomeScreen();
|
||||
}
|
||||
}
|
||||
|
||||
var wasOnPalScreen = false;
|
||||
var onPalScreen = false;
|
||||
var PAL_QML_SOURCE = "hifi/Pal.qml";
|
||||
function onTabletButtonClicked() {
|
||||
|
@ -706,6 +707,7 @@ function wireEventBridge(on) {
|
|||
}
|
||||
|
||||
function onTabletScreenChanged(type, url) {
|
||||
wasOnPalScreen = onPalScreen;
|
||||
onPalScreen = (type === "QML" && url === PAL_QML_SOURCE);
|
||||
wireEventBridge(onPalScreen);
|
||||
// for toolbar mode: change button to active when window is first openend, false otherwise.
|
||||
|
@ -729,7 +731,9 @@ function onTabletScreenChanged(type, url) {
|
|||
populateNearbyUserList();
|
||||
} else {
|
||||
off();
|
||||
ContextOverlay.enabled = true;
|
||||
if (wasOnPalScreen) {
|
||||
ContextOverlay.enabled = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ Rectangle {
|
|||
// "Spectator" text
|
||||
HifiStylesUit.RalewaySemiBold {
|
||||
id: titleBarText;
|
||||
text: "Spectator Camera 2.2";
|
||||
text: "Spectator Camera 2.3";
|
||||
// Anchors
|
||||
anchors.left: parent.left;
|
||||
anchors.leftMargin: 30;
|
||||
|
|
|
@ -74,6 +74,7 @@
|
|||
"collisionMask": 7,
|
||||
"dynamic": false,
|
||||
"modelURL": Script.resolvePath("spectator-camera.fbx"),
|
||||
"name": "Spectator Camera",
|
||||
"registrationPoint": {
|
||||
"x": 0.56,
|
||||
"y": 0.545,
|
||||
|
@ -102,6 +103,18 @@
|
|||
position: cameraPosition,
|
||||
localOnly: true
|
||||
});
|
||||
|
||||
// Remove the existing camera model from the domain if one exists.
|
||||
// It's easy for this to happen if the user crashes while the Spectator Camera is on.
|
||||
// We do this down here (after the new one is rezzed) so that we don't accidentally delete
|
||||
// the newly-rezzed model.
|
||||
var entityIDs = Entities.findEntitiesByName("Spectator Camera", MyAvatar.position, 100, false);
|
||||
entityIDs.forEach(function (currentEntityID) {
|
||||
var currentEntityOwner = Entities.getEntityProperties(currentEntityID, ['owningAvatarID']).owningAvatarID;
|
||||
if (currentEntityOwner === MyAvatar.sessionUUID && currentEntityID !== camera) {
|
||||
Entities.deleteEntity(currentEntityID);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Function Name: spectatorCameraOff()
|
||||
|
|
Loading…
Reference in a new issue