mirror of
https://github.com/overte-org/overte.git
synced 2025-04-14 15:47:02 +02:00
Merge pull request #7846 from hyperlogic/tony/improved-avatar-debug-draw
Eye tracking bug fix and debug rendering improvements
This commit is contained in:
commit
8fecb51dde
20 changed files with 161 additions and 218 deletions
interface/src
libraries
animation/src
render-utils/src
shared/src
|
@ -45,11 +45,13 @@
|
|||
#include <ResourceScriptingInterface.h>
|
||||
#include <AccountManager.h>
|
||||
#include <AddressManager.h>
|
||||
#include <AnimDebugDraw.h>
|
||||
#include <BuildInfo.h>
|
||||
#include <AssetClient.h>
|
||||
#include <AutoUpdater.h>
|
||||
#include <AudioInjectorManager.h>
|
||||
#include <CursorManager.h>
|
||||
#include <DebugDraw.h>
|
||||
#include <DeferredLightingEffect.h>
|
||||
#include <display-plugins/DisplayPlugin.h>
|
||||
#include <EntityScriptingInterface.h>
|
||||
|
@ -101,7 +103,7 @@
|
|||
#include <Preferences.h>
|
||||
#include <display-plugins/CompositorHelper.h>
|
||||
|
||||
#include "AnimDebugDraw.h"
|
||||
|
||||
#include "AudioClient.h"
|
||||
#include "audio/AudioScope.h"
|
||||
#include "avatar/AvatarManager.h"
|
||||
|
@ -3038,9 +3040,9 @@ void Application::updateLOD() const {
|
|||
}
|
||||
}
|
||||
|
||||
void Application::pushPreRenderLambda(void* key, std::function<void()> func) {
|
||||
std::unique_lock<std::mutex> guard(_preRenderLambdasLock);
|
||||
_preRenderLambdas[key] = func;
|
||||
void Application::pushPostUpdateLambda(void* key, std::function<void()> func) {
|
||||
std::unique_lock<std::mutex> guard(_postUpdateLambdasLock);
|
||||
_postUpdateLambdas[key] = func;
|
||||
}
|
||||
|
||||
// Called during Application::update immediately before AvatarManager::updateMyAvatar, updating my data that is then sent to everyone.
|
||||
|
@ -3562,15 +3564,19 @@ void Application::update(float deltaTime) {
|
|||
}
|
||||
}
|
||||
|
||||
avatarManager->postUpdate(deltaTime);
|
||||
|
||||
{
|
||||
PROFILE_RANGE_EX("PreRenderLambdas", 0xffff0000, (uint64_t)0);
|
||||
|
||||
std::unique_lock<std::mutex> guard(_preRenderLambdasLock);
|
||||
for (auto& iter : _preRenderLambdas) {
|
||||
std::unique_lock<std::mutex> guard(_postUpdateLambdasLock);
|
||||
for (auto& iter : _postUpdateLambdas) {
|
||||
iter.second();
|
||||
}
|
||||
_preRenderLambdas.clear();
|
||||
_postUpdateLambdas.clear();
|
||||
}
|
||||
|
||||
AnimDebugDraw::getInstance().update();
|
||||
}
|
||||
|
||||
|
||||
|
@ -3992,13 +3998,10 @@ namespace render {
|
|||
|
||||
void Application::displaySide(RenderArgs* renderArgs, Camera& theCamera, bool selfAvatarOnly) {
|
||||
|
||||
// FIXME: This preRender call is temporary until we create a separate render::scene for the mirror rendering.
|
||||
// FIXME: This preDisplayRender call is temporary until we create a separate render::scene for the mirror rendering.
|
||||
// Then we can move this logic into the Avatar::simulate call.
|
||||
auto myAvatar = getMyAvatar();
|
||||
myAvatar->preRender(renderArgs);
|
||||
|
||||
// Update animation debug draw renderer
|
||||
AnimDebugDraw::getInstance().update();
|
||||
myAvatar->preDisplaySide(renderArgs);
|
||||
|
||||
activeRenderingThread = QThread::currentThread();
|
||||
PROFILE_RANGE(__FUNCTION__);
|
||||
|
|
|
@ -209,7 +209,7 @@ public:
|
|||
render::EnginePointer getRenderEngine() override { return _renderEngine; }
|
||||
gpu::ContextPointer getGPUContext() const { return _gpuContext; }
|
||||
|
||||
virtual void pushPreRenderLambda(void* key, std::function<void()> func) override;
|
||||
virtual void pushPostUpdateLambda(void* key, std::function<void()> func) override;
|
||||
|
||||
const QRect& getMirrorViewRect() const { return _mirrorViewRect; }
|
||||
|
||||
|
@ -508,8 +508,8 @@ private:
|
|||
|
||||
QThread* _deadlockWatchdogThread;
|
||||
|
||||
std::map<void*, std::function<void()>> _preRenderLambdas;
|
||||
std::mutex _preRenderLambdasLock;
|
||||
std::map<void*, std::function<void()>> _postUpdateLambdas;
|
||||
std::mutex _postUpdateLambdasLock;
|
||||
|
||||
std::atomic<uint32_t> _fullSceneReceivedCounter { 0 }; // how many times have we received a full-scene octree stats packet
|
||||
uint32_t _fullSceneCounterAtLastPhysicsCheck { 0 }; // _fullSceneReceivedCounter last time we checked physics ready
|
||||
|
|
|
@ -480,10 +480,8 @@ Menu::Menu() {
|
|||
avatarManager.data(), SLOT(setShouldShowReceiveStats(bool)));
|
||||
|
||||
addCheckableActionToQMenuAndActionHash(avatarDebugMenu, MenuOption::RenderBoundingCollisionShapes);
|
||||
addCheckableActionToQMenuAndActionHash(avatarDebugMenu, MenuOption::RenderLookAtVectors, 0, false);
|
||||
addCheckableActionToQMenuAndActionHash(avatarDebugMenu, MenuOption::RenderLookAtTargets, 0, false);
|
||||
addCheckableActionToQMenuAndActionHash(avatarDebugMenu, MenuOption::RenderFocusIndicator, 0, false);
|
||||
addCheckableActionToQMenuAndActionHash(avatarDebugMenu, MenuOption::ShowWhosLookingAtMe, 0, false);
|
||||
addCheckableActionToQMenuAndActionHash(avatarDebugMenu, MenuOption::RenderMyLookAtVectors, 0, false);
|
||||
addCheckableActionToQMenuAndActionHash(avatarDebugMenu, MenuOption::RenderOtherLookAtVectors, 0, false);
|
||||
addCheckableActionToQMenuAndActionHash(avatarDebugMenu, MenuOption::FixGaze, 0, false);
|
||||
addCheckableActionToQMenuAndActionHash(avatarDebugMenu, MenuOption::AnimDebugDrawDefaultPose, 0, false,
|
||||
avatar, SLOT(setEnableDebugDrawDefaultPose(bool)));
|
||||
|
|
|
@ -147,9 +147,8 @@ namespace MenuOption {
|
|||
const QString ReloadAllScripts = "Reload All Scripts";
|
||||
const QString ReloadContent = "Reload Content (Clears all caches)";
|
||||
const QString RenderBoundingCollisionShapes = "Show Bounding Collision Shapes";
|
||||
const QString RenderFocusIndicator = "Show Eye Focus";
|
||||
const QString RenderLookAtTargets = "Show Look-at Targets";
|
||||
const QString RenderLookAtVectors = "Show Look-at Vectors";
|
||||
const QString RenderMyLookAtVectors = "Show My Eye Vectors";
|
||||
const QString RenderOtherLookAtVectors = "Show Other Eye Vectors";
|
||||
const QString RenderMaxTextureMemory = "Maximum Texture Memory";
|
||||
const QString RenderMaxTextureAutomatic = "Automatic Texture Memory";
|
||||
const QString RenderMaxTexture64MB = "64 MB";
|
||||
|
@ -174,7 +173,6 @@ namespace MenuOption {
|
|||
const QString ShowDSConnectTable = "Show Domain Connection Timing";
|
||||
const QString ShowBordersEntityNodes = "Show Entity Nodes";
|
||||
const QString ShowRealtimeEntityStats = "Show Realtime Entity Stats";
|
||||
const QString ShowWhosLookingAtMe = "Show Who's Looking at Me";
|
||||
const QString StandingHMDSensorMode = "Standing HMD Sensor Mode";
|
||||
const QString SimulateEyeTracking = "Simulate";
|
||||
const QString SMIEyeTracking = "SMI Eye Tracking";
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include <SharedUtil.h>
|
||||
#include <TextRenderer3D.h>
|
||||
#include <TextureCache.h>
|
||||
#include <DebugDraw.h>
|
||||
|
||||
#include "Application.h"
|
||||
#include "Avatar.h"
|
||||
|
@ -66,11 +67,6 @@ namespace render {
|
|||
}
|
||||
template <> void payloadRender(const AvatarSharedPointer& avatar, RenderArgs* args) {
|
||||
auto avatarPtr = static_pointer_cast<Avatar>(avatar);
|
||||
bool renderLookAtVectors = Menu::getInstance()->isOptionChecked(MenuOption::RenderLookAtVectors);
|
||||
avatarPtr->setDisplayingLookatVectors(renderLookAtVectors);
|
||||
bool renderLookAtTarget = Menu::getInstance()->isOptionChecked(MenuOption::RenderLookAtTargets);
|
||||
avatarPtr->setDisplayingLookatTarget(renderLookAtTarget);
|
||||
|
||||
if (avatarPtr->isInitialized() && args) {
|
||||
PROFILE_RANGE_BATCH(*args->_batch, "renderAvatarPayload");
|
||||
avatarPtr->render(args, qApp->getCamera()->getPosition());
|
||||
|
@ -323,6 +319,39 @@ void Avatar::updateRenderItem(render::PendingChanges& pendingChanges) {
|
|||
}
|
||||
}
|
||||
|
||||
void Avatar::postUpdate(float deltaTime) {
|
||||
|
||||
bool renderLookAtVectors;
|
||||
if (isMyAvatar()) {
|
||||
renderLookAtVectors = Menu::getInstance()->isOptionChecked(MenuOption::RenderMyLookAtVectors);
|
||||
} else {
|
||||
renderLookAtVectors = Menu::getInstance()->isOptionChecked(MenuOption::RenderOtherLookAtVectors);
|
||||
}
|
||||
|
||||
if (renderLookAtVectors) {
|
||||
const float EYE_RAY_LENGTH = 10.0;
|
||||
const glm::vec4 BLUE(0.0f, 0.0f, 1.0f, 1.0f);
|
||||
const glm::vec4 RED(1.0f, 0.0f, 0.0f, 1.0f);
|
||||
|
||||
int leftEyeJoint = getJointIndex("LeftEye");
|
||||
glm::vec3 leftEyePosition;
|
||||
glm::quat leftEyeRotation;
|
||||
|
||||
if (_skeletonModel->getJointPositionInWorldFrame(leftEyeJoint, leftEyePosition) &&
|
||||
_skeletonModel->getJointRotationInWorldFrame(leftEyeJoint, leftEyeRotation)) {
|
||||
DebugDraw::getInstance().drawRay(leftEyePosition, leftEyePosition + leftEyeRotation * Vectors::UNIT_Z * EYE_RAY_LENGTH, BLUE);
|
||||
}
|
||||
|
||||
int rightEyeJoint = getJointIndex("RightEye");
|
||||
glm::vec3 rightEyePosition;
|
||||
glm::quat rightEyeRotation;
|
||||
if (_skeletonModel->getJointPositionInWorldFrame(rightEyeJoint, rightEyePosition) &&
|
||||
_skeletonModel->getJointRotationInWorldFrame(rightEyeJoint, rightEyeRotation)) {
|
||||
DebugDraw::getInstance().drawRay(rightEyePosition, rightEyePosition + rightEyeRotation * Vectors::UNIT_Z * EYE_RAY_LENGTH, RED);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Avatar::render(RenderArgs* renderArgs, const glm::vec3& cameraPosition) {
|
||||
auto& batch = *renderArgs->_batch;
|
||||
PROFILE_RANGE_BATCH(batch, __FUNCTION__);
|
||||
|
@ -402,22 +431,7 @@ void Avatar::render(RenderArgs* renderArgs, const glm::vec3& cameraPosition) {
|
|||
float distanceToTarget = glm::length(toTarget);
|
||||
|
||||
{
|
||||
// glow when moving far away
|
||||
const float GLOW_DISTANCE = 20.0f;
|
||||
const float GLOW_MAX_LOUDNESS = 2500.0f;
|
||||
const float MAX_GLOW = 0.5f;
|
||||
|
||||
float GLOW_FROM_AVERAGE_LOUDNESS = ((this == DependencyManager::get<AvatarManager>()->getMyAvatar())
|
||||
? 0.0f
|
||||
: MAX_GLOW * getHeadData()->getAudioLoudness() / GLOW_MAX_LOUDNESS);
|
||||
GLOW_FROM_AVERAGE_LOUDNESS = 0.0f;
|
||||
|
||||
float glowLevel = _moving && distanceToTarget > GLOW_DISTANCE && renderArgs->_renderMode == RenderArgs::NORMAL_RENDER_MODE
|
||||
? 1.0f
|
||||
: GLOW_FROM_AVERAGE_LOUDNESS;
|
||||
|
||||
// render body
|
||||
renderBody(renderArgs, glowLevel);
|
||||
fixupModelsInScene();
|
||||
|
||||
if (renderArgs->_renderMode != RenderArgs::SHADOW_RENDER_MODE) {
|
||||
// add local lights
|
||||
|
@ -441,64 +455,6 @@ void Avatar::render(RenderArgs* renderArgs, const glm::vec3& cameraPosition) {
|
|||
const float BOUNDING_SHAPE_ALPHA = 0.7f;
|
||||
_skeletonModel->renderBoundingCollisionShapes(*renderArgs->_batch, getUniformScale(), BOUNDING_SHAPE_ALPHA);
|
||||
}
|
||||
|
||||
// If this is the avatar being looked at, render a little ball above their head
|
||||
if (_isLookAtTarget && Menu::getInstance()->isOptionChecked(MenuOption::RenderFocusIndicator)) {
|
||||
static const float INDICATOR_OFFSET = 0.22f;
|
||||
static const float INDICATOR_RADIUS = 0.03f;
|
||||
static const glm::vec4 LOOK_AT_INDICATOR_COLOR = { 0.8f, 0.0f, 0.0f, 0.75f };
|
||||
glm::vec3 avatarPosition = getPosition();
|
||||
glm::vec3 position = glm::vec3(avatarPosition.x, getDisplayNamePosition().y + INDICATOR_OFFSET, avatarPosition.z);
|
||||
PROFILE_RANGE_BATCH(batch, __FUNCTION__":renderFocusIndicator");
|
||||
Transform transform;
|
||||
transform.setTranslation(position);
|
||||
transform.postScale(INDICATOR_RADIUS);
|
||||
batch.setModelTransform(transform);
|
||||
DependencyManager::get<GeometryCache>()->renderSolidSphereInstance(batch, LOOK_AT_INDICATOR_COLOR);
|
||||
}
|
||||
|
||||
// If the avatar is looking at me, indicate that they are
|
||||
if (getHead()->isLookingAtMe() && Menu::getInstance()->isOptionChecked(MenuOption::ShowWhosLookingAtMe)) {
|
||||
PROFILE_RANGE_BATCH(batch, __FUNCTION__":renderLookingAtMe");
|
||||
const glm::vec3 LOOKING_AT_ME_COLOR = { 1.0f, 1.0f, 1.0f };
|
||||
const float LOOKING_AT_ME_ALPHA_START = 0.8f;
|
||||
const float LOOKING_AT_ME_DURATION = 0.5f; // seconds
|
||||
quint64 now = usecTimestampNow();
|
||||
float alpha = LOOKING_AT_ME_ALPHA_START
|
||||
* (1.0f - ((float)(now - getHead()->getLookingAtMeStarted()))
|
||||
/ (LOOKING_AT_ME_DURATION * (float)USECS_PER_SECOND));
|
||||
if (alpha > 0.0f) {
|
||||
if (_skeletonModel->isLoaded()) {
|
||||
const auto& geometry = _skeletonModel->getFBXGeometry();
|
||||
const float DEFAULT_EYE_DIAMETER = 0.048f; // Typical human eye
|
||||
const float RADIUS_INCREMENT = 0.005f;
|
||||
batch.setModelTransform(Transform());
|
||||
|
||||
glm::vec3 position = getHead()->getLeftEyePosition();
|
||||
Transform transform;
|
||||
transform.setTranslation(position);
|
||||
float eyeDiameter = geometry.leftEyeSize;
|
||||
if (eyeDiameter == 0.0f) {
|
||||
eyeDiameter = DEFAULT_EYE_DIAMETER;
|
||||
}
|
||||
|
||||
batch.setModelTransform(Transform(transform).postScale(eyeDiameter * getUniformScale() / 2.0f + RADIUS_INCREMENT));
|
||||
DependencyManager::get<GeometryCache>()->renderSolidSphereInstance(batch,
|
||||
glm::vec4(LOOKING_AT_ME_COLOR, alpha));
|
||||
|
||||
position = getHead()->getRightEyePosition();
|
||||
transform.setTranslation(position);
|
||||
eyeDiameter = geometry.rightEyeSize;
|
||||
if (eyeDiameter == 0.0f) {
|
||||
eyeDiameter = DEFAULT_EYE_DIAMETER;
|
||||
}
|
||||
batch.setModelTransform(Transform(transform).postScale(eyeDiameter * getUniformScale() / 2.0f + RADIUS_INCREMENT));
|
||||
DependencyManager::get<GeometryCache>()->renderSolidSphereInstance(batch,
|
||||
glm::vec4(LOOKING_AT_ME_COLOR, alpha));
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const float DISPLAYNAME_DISTANCE = 20.0f;
|
||||
|
@ -556,11 +512,6 @@ void Avatar::fixupModelsInScene() {
|
|||
scene->enqueuePendingChanges(pendingChanges);
|
||||
}
|
||||
|
||||
void Avatar::renderBody(RenderArgs* renderArgs, float glowLevel) {
|
||||
fixupModelsInScene();
|
||||
getHead()->renderLookAts(renderArgs);
|
||||
}
|
||||
|
||||
bool Avatar::shouldRenderHead(const RenderArgs* renderArgs) const {
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -77,9 +77,9 @@ public:
|
|||
|
||||
void updateRenderItem(render::PendingChanges& pendingChanges);
|
||||
|
||||
virtual void postUpdate(float deltaTime);
|
||||
|
||||
//setters
|
||||
void setDisplayingLookatVectors(bool displayingLookatVectors) { getHead()->setRenderLookatVectors(displayingLookatVectors); }
|
||||
void setDisplayingLookatTarget(bool displayingLookatTarget) { getHead()->setRenderLookatTarget(displayingLookatTarget); }
|
||||
void setIsLookAtTarget(const bool isLookAtTarget) { _isLookAtTarget = isLookAtTarget; }
|
||||
bool getIsLookAtTarget() const { return _isLookAtTarget; }
|
||||
//getters
|
||||
|
@ -232,7 +232,6 @@ protected:
|
|||
|
||||
Transform calculateDisplayNameTransform(const ViewFrustum& view, const glm::vec3& textPosition) const;
|
||||
void renderDisplayName(gpu::Batch& batch, const ViewFrustum& view, const glm::vec3& textPosition) const;
|
||||
virtual void renderBody(RenderArgs* renderArgs, float glowLevel = 0.0f);
|
||||
virtual bool shouldRenderHead(const RenderArgs* renderArgs) const;
|
||||
virtual void fixupModelsInScene();
|
||||
|
||||
|
@ -251,7 +250,7 @@ private:
|
|||
bool _initialized;
|
||||
bool _shouldAnimate { true };
|
||||
bool _shouldSkipRender { false };
|
||||
bool _isLookAtTarget;
|
||||
bool _isLookAtTarget { false };
|
||||
|
||||
float getBoundingRadius() const;
|
||||
|
||||
|
|
|
@ -156,6 +156,15 @@ void AvatarManager::updateOtherAvatars(float deltaTime) {
|
|||
simulateAvatarFades(deltaTime);
|
||||
}
|
||||
|
||||
void AvatarManager::postUpdate(float deltaTime) {
|
||||
auto hashCopy = getHashCopy();
|
||||
AvatarHash::iterator avatarIterator = hashCopy.begin();
|
||||
for (avatarIterator = hashCopy.begin(); avatarIterator != hashCopy.end(); avatarIterator++) {
|
||||
auto avatar = std::static_pointer_cast<Avatar>(avatarIterator.value());
|
||||
avatar->postUpdate(deltaTime);
|
||||
}
|
||||
}
|
||||
|
||||
void AvatarManager::simulateAvatarFades(float deltaTime) {
|
||||
QVector<AvatarSharedPointer>::iterator fadingIterator = _avatarFades.begin();
|
||||
|
||||
|
|
|
@ -44,6 +44,8 @@ public:
|
|||
void updateMyAvatar(float deltaTime);
|
||||
void updateOtherAvatars(float deltaTime);
|
||||
|
||||
void postUpdate(float deltaTime);
|
||||
|
||||
void clearOtherAvatars();
|
||||
void clearAllAvatars();
|
||||
|
||||
|
|
|
@ -46,8 +46,6 @@ Head::Head(Avatar* owningAvatar) :
|
|||
_mouth3(0.0f),
|
||||
_mouth4(0.0f),
|
||||
_mouthTime(0.0f),
|
||||
_renderLookatVectors(false),
|
||||
_renderLookatTarget(false),
|
||||
_saccade(0.0f, 0.0f, 0.0f),
|
||||
_saccadeTarget(0.0f, 0.0f, 0.0f),
|
||||
_leftEyeBlinkVelocity(0.0f),
|
||||
|
@ -316,19 +314,6 @@ void Head::relaxLean(float deltaTime) {
|
|||
_deltaLeanForward *= relaxationFactor;
|
||||
}
|
||||
|
||||
void Head::renderLookAts(RenderArgs* renderArgs) {
|
||||
renderLookAts(renderArgs, _leftEyePosition, _rightEyePosition);
|
||||
}
|
||||
|
||||
void Head::renderLookAts(RenderArgs* renderArgs, glm::vec3 leftEyePosition, glm::vec3 rightEyePosition) {
|
||||
if (_renderLookatVectors) {
|
||||
renderLookatVectors(renderArgs, leftEyePosition, rightEyePosition, getCorrectedLookAtPosition());
|
||||
}
|
||||
if (_renderLookatTarget) {
|
||||
renderLookatTarget(renderArgs, getCorrectedLookAtPosition());
|
||||
}
|
||||
}
|
||||
|
||||
void Head::setScale (float scale) {
|
||||
if (_scale == scale) {
|
||||
return;
|
||||
|
@ -439,31 +424,3 @@ void Head::addLeanDeltas(float sideways, float forward) {
|
|||
_deltaLeanSideways += sideways;
|
||||
_deltaLeanForward += forward;
|
||||
}
|
||||
|
||||
void Head::renderLookatVectors(RenderArgs* renderArgs, glm::vec3 leftEyePosition, glm::vec3 rightEyePosition, glm::vec3 lookatPosition) {
|
||||
auto& batch = *renderArgs->_batch;
|
||||
auto transform = Transform{};
|
||||
batch.setModelTransform(transform);
|
||||
// FIXME: THe line width of 2.0f is not supported anymore, we ll need a workaround
|
||||
|
||||
glm::vec4 startColor(0.2f, 0.2f, 0.2f, 1.0f);
|
||||
glm::vec4 endColor(1.0f, 1.0f, 1.0f, 0.0f);
|
||||
|
||||
auto geometryCache = DependencyManager::get<GeometryCache>();
|
||||
geometryCache->bindSimpleProgram(batch);
|
||||
geometryCache->renderLine(batch, leftEyePosition, lookatPosition, startColor, endColor, _leftEyeLookAtID);
|
||||
geometryCache->renderLine(batch, rightEyePosition, lookatPosition, startColor, endColor, _rightEyeLookAtID);
|
||||
}
|
||||
|
||||
void Head::renderLookatTarget(RenderArgs* renderArgs, glm::vec3 lookatPosition) {
|
||||
auto& batch = *renderArgs->_batch;
|
||||
auto transform = Transform{};
|
||||
transform.setTranslation(lookatPosition);
|
||||
|
||||
auto geometryCache = DependencyManager::get<GeometryCache>();
|
||||
const float LOOK_AT_TARGET_RADIUS = 0.075f;
|
||||
transform.postScale(LOOK_AT_TARGET_RADIUS);
|
||||
const glm::vec4 LOOK_AT_TARGET_COLOR = { 0.8f, 0.0f, 0.0f, 0.75f };
|
||||
batch.setModelTransform(transform);
|
||||
geometryCache->renderSolidSphereInstance(batch, LOOK_AT_TARGET_COLOR);
|
||||
}
|
||||
|
|
|
@ -36,10 +36,6 @@ public:
|
|||
void setPosition(glm::vec3 position) { _position = position; }
|
||||
void setAverageLoudness(float averageLoudness) { _averageLoudness = averageLoudness; }
|
||||
void setReturnToCenter (bool returnHeadToCenter) { _returnHeadToCenter = returnHeadToCenter; }
|
||||
void setRenderLookatVectors(bool onOff) { _renderLookatVectors = onOff; }
|
||||
void setRenderLookatTarget(bool onOff) { _renderLookatTarget = onOff; }
|
||||
void renderLookAts(RenderArgs* renderArgs);
|
||||
void renderLookAts(RenderArgs* renderArgs, glm::vec3 leftEyePosition, glm::vec3 rightEyePosition);
|
||||
|
||||
/// \return orientationBase+Delta
|
||||
glm::quat getFinalOrientationInLocalFrame() const;
|
||||
|
@ -49,7 +45,7 @@ public:
|
|||
|
||||
/// \return orientationBody * orientationBasePitch
|
||||
glm::quat getCameraOrientation () const;
|
||||
|
||||
|
||||
void setCorrectedLookAtPosition(glm::vec3 correctedLookAtPosition);
|
||||
glm::vec3 getCorrectedLookAtPosition();
|
||||
void clearCorrectedLookAtPosition() { _isLookingAtMe = false; }
|
||||
|
@ -65,9 +61,9 @@ public:
|
|||
glm::vec3 getFrontDirection() const { return getOrientation() * IDENTITY_FRONT; }
|
||||
float getFinalLeanSideways() const { return _leanSideways + _deltaLeanSideways; }
|
||||
float getFinalLeanForward() const { return _leanForward + _deltaLeanForward; }
|
||||
|
||||
|
||||
glm::quat getEyeRotation(const glm::vec3& eyePosition) const;
|
||||
|
||||
|
||||
const glm::vec3& getRightEyePosition() const { return _rightEyePosition; }
|
||||
const glm::vec3& getLeftEyePosition() const { return _leftEyePosition; }
|
||||
glm::vec3 getRightEarPosition() const { return _rightEyePosition + (getRightDirection() * EYE_EAR_GAP) + (getFrontDirection() * -EYE_EAR_GAP); }
|
||||
|
@ -84,10 +80,10 @@ public:
|
|||
|
||||
void setDeltaYaw(float yaw) { _deltaYaw = yaw; }
|
||||
float getDeltaYaw() const { return _deltaYaw; }
|
||||
|
||||
|
||||
void setDeltaRoll(float roll) { _deltaRoll = roll; }
|
||||
float getDeltaRoll() const { return _deltaRoll; }
|
||||
|
||||
|
||||
virtual void setFinalYaw(float finalYaw);
|
||||
virtual void setFinalPitch(float finalPitch);
|
||||
virtual void setFinalRoll(float finalRoll);
|
||||
|
@ -99,7 +95,7 @@ public:
|
|||
void addLeanDeltas(float sideways, float forward);
|
||||
|
||||
float getTimeWithoutTalking() const { return _timeWithoutTalking; }
|
||||
|
||||
|
||||
private:
|
||||
glm::vec3 calculateAverageEyePosition() const { return _leftEyePosition + (_rightEyePosition - _leftEyePosition ) * 0.5f; }
|
||||
|
||||
|
@ -113,7 +109,7 @@ private:
|
|||
glm::vec3 _leftEyePosition;
|
||||
glm::vec3 _rightEyePosition;
|
||||
glm::vec3 _eyePosition;
|
||||
|
||||
|
||||
float _scale;
|
||||
float _lastLoudness;
|
||||
float _longTermAverageLoudness;
|
||||
|
@ -124,8 +120,7 @@ private:
|
|||
float _mouth3;
|
||||
float _mouth4;
|
||||
float _mouthTime;
|
||||
bool _renderLookatVectors;
|
||||
bool _renderLookatTarget;
|
||||
|
||||
glm::vec3 _saccade;
|
||||
glm::vec3 _saccadeTarget;
|
||||
float _leftEyeBlinkVelocity;
|
||||
|
@ -145,15 +140,13 @@ private:
|
|||
bool _isLookingAtMe;
|
||||
quint64 _lookingAtMeStarted;
|
||||
quint64 _wasLastLookingAtMe;
|
||||
|
||||
|
||||
glm::vec3 _correctedLookAtPosition;
|
||||
|
||||
int _leftEyeLookAtID;
|
||||
int _rightEyeLookAtID;
|
||||
|
||||
|
||||
// private methods
|
||||
void renderLookatVectors(RenderArgs* renderArgs, glm::vec3 leftEyePosition, glm::vec3 rightEyePosition, glm::vec3 lookatPosition);
|
||||
void renderLookatTarget(RenderArgs* renderArgs, glm::vec3 lookatPosition);
|
||||
void calculateMouthShapes();
|
||||
void applyEyelidOffset(glm::quat headOrientation);
|
||||
};
|
||||
|
|
|
@ -1270,35 +1270,6 @@ void MyAvatar::attach(const QString& modelURL, const QString& jointName,
|
|||
Avatar::attach(modelURL, jointName, translation, rotation, scale, isSoft, allowDuplicates, useSaved);
|
||||
}
|
||||
|
||||
void MyAvatar::renderBody(RenderArgs* renderArgs, float glowLevel) {
|
||||
|
||||
if (!_skeletonModel->isRenderable()) {
|
||||
return; // wait until all models are loaded
|
||||
}
|
||||
|
||||
fixupModelsInScene();
|
||||
|
||||
// This is drawing the lookat vectors from our avatar to wherever we're looking.
|
||||
if (qApp->isHMDMode()) {
|
||||
glm::vec3 cameraPosition = qApp->getCamera()->getPosition();
|
||||
|
||||
glm::mat4 headPose = qApp->getActiveDisplayPlugin()->getHeadPose();
|
||||
glm::mat4 leftEyePose = qApp->getActiveDisplayPlugin()->getEyeToHeadTransform(Eye::Left);
|
||||
leftEyePose = leftEyePose * headPose;
|
||||
glm::vec3 leftEyePosition = extractTranslation(leftEyePose);
|
||||
glm::mat4 rightEyePose = qApp->getActiveDisplayPlugin()->getEyeToHeadTransform(Eye::Right);
|
||||
rightEyePose = rightEyePose * headPose;
|
||||
glm::vec3 rightEyePosition = extractTranslation(rightEyePose);
|
||||
glm::vec3 headPosition = extractTranslation(headPose);
|
||||
|
||||
getHead()->renderLookAts(renderArgs,
|
||||
cameraPosition + getOrientation() * (leftEyePosition - headPosition),
|
||||
cameraPosition + getOrientation() * (rightEyePosition - headPosition));
|
||||
} else {
|
||||
getHead()->renderLookAts(renderArgs);
|
||||
}
|
||||
}
|
||||
|
||||
void MyAvatar::setVisibleInSceneIfReady(Model* model, render::ScenePointer scene, bool visible) {
|
||||
if (model->isActive() && model->isRenderable()) {
|
||||
model->setVisibleInScene(visible, scene);
|
||||
|
@ -1355,10 +1326,11 @@ void MyAvatar::destroyAnimGraph() {
|
|||
_rig->destroyAnimGraph();
|
||||
}
|
||||
|
||||
void MyAvatar::preRender(RenderArgs* renderArgs) {
|
||||
void MyAvatar::postUpdate(float deltaTime) {
|
||||
|
||||
Avatar::postUpdate(deltaTime);
|
||||
|
||||
render::ScenePointer scene = qApp->getMain3DScene();
|
||||
|
||||
if (_skeletonModel->initWhenReady(scene)) {
|
||||
initHeadBones();
|
||||
_skeletonModel->setCauterizeBoneSet(_headBoneSet);
|
||||
|
@ -1408,7 +1380,12 @@ void MyAvatar::preRender(RenderArgs* renderArgs) {
|
|||
|
||||
DebugDraw::getInstance().updateMyAvatarPos(getPosition());
|
||||
DebugDraw::getInstance().updateMyAvatarRot(getOrientation());
|
||||
}
|
||||
|
||||
|
||||
void MyAvatar::preDisplaySide(RenderArgs* renderArgs) {
|
||||
|
||||
// toggle using the cauterizedBones depending on where the camera is and the rendering pass type.
|
||||
const bool shouldDrawHead = shouldRenderHead(renderArgs);
|
||||
if (shouldDrawHead != _prevShouldDrawHead) {
|
||||
_skeletonModel->setCauterizeBones(!shouldDrawHead);
|
||||
|
|
|
@ -96,7 +96,8 @@ public:
|
|||
|
||||
Q_INVOKABLE void reset(bool andRecenter = false);
|
||||
void update(float deltaTime);
|
||||
void preRender(RenderArgs* renderArgs);
|
||||
virtual void postUpdate(float deltaTime) override;
|
||||
void preDisplaySide(RenderArgs* renderArgs);
|
||||
|
||||
const glm::mat4& getHMDSensorMatrix() const { return _hmdSensorMatrix; }
|
||||
const glm::vec3& getHMDSensorPosition() const { return _hmdSensorPosition; }
|
||||
|
@ -310,7 +311,6 @@ private:
|
|||
void simulate(float deltaTime);
|
||||
void updateFromTrackers(float deltaTime);
|
||||
virtual void render(RenderArgs* renderArgs, const glm::vec3& cameraPositio) override;
|
||||
virtual void renderBody(RenderArgs* renderArgs, float glowLevel = 0.0f) override;
|
||||
virtual bool shouldRenderHead(const RenderArgs* renderArgs) const override;
|
||||
void setShouldRenderLocally(bool shouldRender) { _shouldRender = shouldRender; setEnableMeshVisible(shouldRender); }
|
||||
bool getShouldRenderLocally() const { return _shouldRender; }
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <QMultiMap>
|
||||
|
||||
#include <recording/Deck.h>
|
||||
#include <DebugDraw.h>
|
||||
|
||||
#include "Application.h"
|
||||
#include "Avatar.h"
|
||||
|
@ -92,7 +93,6 @@ void SkeletonModel::updateRig(float deltaTime, glm::mat4 parentTransform) {
|
|||
|
||||
Head* head = _owningAvatar->getHead();
|
||||
|
||||
|
||||
// make sure lookAt is not too close to face (avoid crosseyes)
|
||||
glm::vec3 lookAt = _owningAvatar->isMyAvatar() ? head->getLookAtPosition() : head->getCorrectedLookAtPosition();
|
||||
glm::vec3 focusOffset = lookAt - _owningAvatar->getHead()->getEyePosition();
|
||||
|
|
|
@ -1057,20 +1057,30 @@ void Rig::updateNeckJoint(int index, const HeadParameters& params) {
|
|||
}
|
||||
|
||||
void Rig::updateEyeJoint(int index, const glm::vec3& modelTranslation, const glm::quat& modelRotation, const glm::quat& worldHeadOrientation, const glm::vec3& lookAtSpot, const glm::vec3& saccade) {
|
||||
|
||||
// TODO: does not properly handle avatar scale.
|
||||
|
||||
if (isIndexValid(index)) {
|
||||
glm::mat4 rigToWorld = createMatFromQuatAndPos(modelRotation, modelTranslation);
|
||||
glm::mat4 worldToRig = glm::inverse(rigToWorld);
|
||||
glm::vec3 zAxis = glm::normalize(_internalPoseSet._absolutePoses[index].trans - transformPoint(worldToRig, lookAtSpot));
|
||||
glm::vec3 lookAtVector = glm::normalize(transformPoint(worldToRig, lookAtSpot) - _internalPoseSet._absolutePoses[index].trans);
|
||||
|
||||
glm::quat desiredQuat = rotationBetween(IDENTITY_FRONT, zAxis);
|
||||
glm::quat headQuat;
|
||||
int headIndex = indexOfJoint("Head");
|
||||
glm::quat headQuat;
|
||||
if (headIndex >= 0) {
|
||||
headQuat = _internalPoseSet._absolutePoses[headIndex].rot;
|
||||
}
|
||||
|
||||
glm::vec3 headUp = headQuat * Vectors::UNIT_Y;
|
||||
glm::vec3 z, y, x;
|
||||
generateBasisVectors(lookAtVector, headUp, z, y, x);
|
||||
glm::mat3 m(glm::cross(y, z), y, z);
|
||||
glm::quat desiredQuat = glm::normalize(glm::quat_cast(m));
|
||||
|
||||
glm::quat deltaQuat = desiredQuat * glm::inverse(headQuat);
|
||||
|
||||
// limit rotation
|
||||
// limit swing rotation of the deltaQuat by a 30 degree cone.
|
||||
// TODO: use swing twist decomposition constraint instead, for off axis rotation clamping.
|
||||
const float MAX_ANGLE = 30.0f * RADIANS_PER_DEGREE;
|
||||
if (fabsf(glm::angle(deltaQuat)) > MAX_ANGLE) {
|
||||
deltaQuat = glm::angleAxis(glm::clamp(glm::angle(deltaQuat), -MAX_ANGLE, MAX_ANGLE), glm::axis(deltaQuat));
|
||||
|
|
|
@ -46,7 +46,7 @@ public:
|
|||
virtual render::ScenePointer getMain3DScene() = 0;
|
||||
virtual render::EnginePointer getRenderEngine() = 0;
|
||||
|
||||
virtual void pushPreRenderLambda(void* key, std::function<void()> func) = 0;
|
||||
virtual void pushPostUpdateLambda(void* key, std::function<void()> func) = 0;
|
||||
|
||||
// FIXME - we shouldn't assume that there's a single instance of an AbstractViewStateInterface
|
||||
static AbstractViewStateInterface* instance();
|
||||
|
|
|
@ -307,6 +307,16 @@ static void addLink(const AnimPose& rootPose, const AnimPose& pose, const AnimPo
|
|||
}
|
||||
}
|
||||
|
||||
static void addLine(const glm::vec3& start, const glm::vec3& end, const glm::vec4& color, Vertex*& v) {
|
||||
uint32_t colorInt = toRGBA(color);
|
||||
v->pos = start;
|
||||
v->rgba = colorInt;
|
||||
v++;
|
||||
v->pos = end;
|
||||
v->rgba = colorInt;
|
||||
v++;
|
||||
}
|
||||
|
||||
void AnimDebugDraw::update() {
|
||||
|
||||
render::ScenePointer scene = AbstractViewStateInterface::instance()->getMain3DScene();
|
||||
|
@ -319,6 +329,7 @@ void AnimDebugDraw::update() {
|
|||
|
||||
const size_t VERTICES_PER_BONE = (6 + (NUM_CIRCLE_SLICES * 2) * 3);
|
||||
const size_t VERTICES_PER_LINK = 8 * 2;
|
||||
const size_t VERTICES_PER_RAY = 2;
|
||||
|
||||
const float BONE_RADIUS = 0.01f; // 1 cm
|
||||
const float POSE_RADIUS = 0.1f; // 10 cm
|
||||
|
@ -342,6 +353,7 @@ void AnimDebugDraw::update() {
|
|||
numVerts += (int)markerMap.size() * VERTICES_PER_BONE;
|
||||
auto myAvatarMarkerMap = DebugDraw::getInstance().getMyAvatarMarkerMap();
|
||||
numVerts += (int)myAvatarMarkerMap.size() * VERTICES_PER_BONE;
|
||||
numVerts += (int)DebugDraw::getInstance().getRays().size() * VERTICES_PER_RAY;
|
||||
|
||||
// allocate verts!
|
||||
data._vertexBuffer->resize(sizeof(Vertex) * numVerts);
|
||||
|
@ -390,6 +402,12 @@ void AnimDebugDraw::update() {
|
|||
addBone(myAvatarPose, AnimPose(glm::vec3(1), rot, pos), radius, v);
|
||||
}
|
||||
|
||||
// draw rays from shared DebugDraw singleton
|
||||
for (auto& iter : DebugDraw::getInstance().getRays()) {
|
||||
addLine(std::get<0>(iter), std::get<1>(iter), std::get<2>(iter), v);
|
||||
}
|
||||
DebugDraw::getInstance().clearRays();
|
||||
|
||||
assert(numVerts == (v - verts));
|
||||
|
||||
render::Item::Bound theBound;
|
||||
|
|
|
@ -132,7 +132,7 @@ void Model::updateRenderItems() {
|
|||
// the application will ensure only the last lambda is actually invoked.
|
||||
void* key = (void*)this;
|
||||
std::weak_ptr<Model> weakSelf = shared_from_this();
|
||||
AbstractViewStateInterface::instance()->pushPreRenderLambda(key, [weakSelf]() {
|
||||
AbstractViewStateInterface::instance()->pushPostUpdateLambda(key, [weakSelf]() {
|
||||
|
||||
// do nothing, if the model has already been destroyed.
|
||||
auto self = weakSelf.lock();
|
||||
|
|
|
@ -23,6 +23,11 @@ DebugDraw::~DebugDraw() {
|
|||
|
||||
}
|
||||
|
||||
// world space line, drawn only once
|
||||
void DebugDraw::drawRay(const glm::vec3& start, const glm::vec3& end, const glm::vec4& color) {
|
||||
_rays.push_back(Ray(start, end, color));
|
||||
}
|
||||
|
||||
void DebugDraw::addMarker(const std::string& key, const glm::quat& rotation, const glm::vec3& position, const glm::vec4& color) {
|
||||
_markers[key] = MarkerInfo(rotation, position, color);
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <unordered_map>
|
||||
#include <tuple>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <glm/glm.hpp>
|
||||
#include <glm/gtc/quaternion.hpp>
|
||||
|
||||
|
@ -23,16 +24,21 @@ public:
|
|||
DebugDraw();
|
||||
~DebugDraw();
|
||||
|
||||
// world space maker
|
||||
// world space line, drawn only once
|
||||
void drawRay(const glm::vec3& start, const glm::vec3& end, const glm::vec4& color);
|
||||
|
||||
// world space maker, marker drawn every frame until it is removed.
|
||||
void addMarker(const std::string& key, const glm::quat& rotation, const glm::vec3& position, const glm::vec4& color);
|
||||
void removeMarker(const std::string& key);
|
||||
|
||||
// myAvatar relative marker
|
||||
// myAvatar relative marker, maker is drawn every frame until it is removed.
|
||||
void addMyAvatarMarker(const std::string& key, const glm::quat& rotation, const glm::vec3& position, const glm::vec4& color);
|
||||
void removeMyAvatarMarker(const std::string& key);
|
||||
|
||||
using MarkerInfo = std::tuple<glm::quat, glm::vec3, glm::vec4>;
|
||||
using MarkerMap = std::unordered_map<std::string, MarkerInfo>;
|
||||
using Ray = std::tuple<glm::vec3, glm::vec3, glm::vec4>;
|
||||
using Rays = std::vector<Ray>;
|
||||
|
||||
//
|
||||
// accessors used by renderer
|
||||
|
@ -44,12 +50,15 @@ public:
|
|||
const glm::vec3& getMyAvatarPos() const { return _myAvatarPos; }
|
||||
void updateMyAvatarRot(const glm::quat& rot) { _myAvatarRot = rot; }
|
||||
const glm::quat& getMyAvatarRot() const { return _myAvatarRot; }
|
||||
const Rays getRays() const { return _rays; }
|
||||
void clearRays() { _rays.clear(); }
|
||||
|
||||
protected:
|
||||
MarkerMap _markers;
|
||||
MarkerMap _myAvatarMarkers;
|
||||
glm::quat _myAvatarRot;
|
||||
glm::vec3 _myAvatarPos;
|
||||
Rays _rays;
|
||||
};
|
||||
|
||||
#endif // hifi_DebugDraw_h
|
||||
|
|
|
@ -431,13 +431,27 @@ glm::vec3 transformVectorFull(const glm::mat4& m, const glm::vec3& v) {
|
|||
void generateBasisVectors(const glm::vec3& primaryAxis, const glm::vec3& secondaryAxis,
|
||||
glm::vec3& uAxisOut, glm::vec3& vAxisOut, glm::vec3& wAxisOut) {
|
||||
|
||||
// primaryAxis & secondaryAxis must not be zero.
|
||||
#ifndef NDEBUG
|
||||
const float MIN_LENGTH_SQUARED = 1.0e-6f;
|
||||
#endif
|
||||
assert(fabsf(glm::length2(primaryAxis) > MIN_LENGTH_SQUARED));
|
||||
assert(fabsf(glm::length2(secondaryAxis) > MIN_LENGTH_SQUARED));
|
||||
|
||||
uAxisOut = glm::normalize(primaryAxis);
|
||||
wAxisOut = glm::cross(uAxisOut, secondaryAxis);
|
||||
if (glm::length(wAxisOut) > 0.0f) {
|
||||
wAxisOut = glm::normalize(wAxisOut);
|
||||
} else {
|
||||
wAxisOut = glm::normalize(glm::cross(uAxisOut, glm::vec3(0, 1, 0)));
|
||||
glm::vec3 normSecondary = glm::normalize(secondaryAxis);
|
||||
|
||||
// if secondaryAxis is parallel with the primaryAxis, pick another axis.
|
||||
const float EPSILON = 1.0e-4f;
|
||||
if (fabsf(fabsf(glm::dot(uAxisOut, secondaryAxis)) - 1.0f) > EPSILON) {
|
||||
// pick a better secondaryAxis.
|
||||
normSecondary = glm::vec3(1.0f, 0.0f, 0.0f);
|
||||
if (fabsf(fabsf(glm::dot(uAxisOut, secondaryAxis)) - 1.0f) > EPSILON) {
|
||||
normSecondary = glm::vec3(0.0f, 1.0f, 0.0f);
|
||||
}
|
||||
}
|
||||
|
||||
wAxisOut = glm::normalize(glm::cross(uAxisOut, secondaryAxis));
|
||||
vAxisOut = glm::cross(wAxisOut, uAxisOut);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue