mirror of
https://github.com/lubosz/overte.git
synced 2025-08-08 03:08:00 +02:00
Merge branch 'master' of github.com:highfidelity/hifi into improve-rotation-jitter
This commit is contained in:
commit
724f849d88
13 changed files with 154 additions and 130 deletions
Binary file not shown.
|
@ -21,9 +21,9 @@
|
||||||
#include <PIDController.h>
|
#include <PIDController.h>
|
||||||
#include <SimpleMovingAverage.h>
|
#include <SimpleMovingAverage.h>
|
||||||
#include <shared/RateCounter.h>
|
#include <shared/RateCounter.h>
|
||||||
#include <avatars-renderer/AvatarMotionState.h>
|
|
||||||
#include <avatars-renderer/ScriptAvatar.h>
|
#include <avatars-renderer/ScriptAvatar.h>
|
||||||
|
|
||||||
|
#include "AvatarMotionState.h"
|
||||||
#include "MyAvatar.h"
|
#include "MyAvatar.h"
|
||||||
|
|
||||||
class AudioInjector;
|
class AudioInjector;
|
||||||
|
|
|
@ -14,10 +14,10 @@
|
||||||
|
|
||||||
#include <QSet>
|
#include <QSet>
|
||||||
|
|
||||||
|
#include <avatars-renderer/Avatar.h>
|
||||||
#include <ObjectMotionState.h>
|
#include <ObjectMotionState.h>
|
||||||
#include <BulletUtil.h>
|
#include <BulletUtil.h>
|
||||||
|
|
||||||
#include "Avatar.h"
|
|
||||||
|
|
||||||
class AvatarMotionState : public ObjectMotionState {
|
class AvatarMotionState : public ObjectMotionState {
|
||||||
public:
|
public:
|
|
@ -291,6 +291,11 @@ QByteArray MyAvatar::toByteArrayStateful(AvatarDataDetail dataDetail) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void MyAvatar::resetSensorsAndBody() {
|
void MyAvatar::resetSensorsAndBody() {
|
||||||
|
if (QThread::currentThread() != thread()) {
|
||||||
|
QMetaObject::invokeMethod(this, "resetSensorsAndBody");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
qApp->getActiveDisplayPlugin()->resetSensors();
|
qApp->getActiveDisplayPlugin()->resetSensors();
|
||||||
reset(true, false, true);
|
reset(true, false, true);
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,14 +44,17 @@ glm::quat MyHead::getCameraOrientation() const {
|
||||||
void MyHead::simulate(float deltaTime) {
|
void MyHead::simulate(float deltaTime) {
|
||||||
auto player = DependencyManager::get<recording::Deck>();
|
auto player = DependencyManager::get<recording::Deck>();
|
||||||
// Only use face trackers when not playing back a recording.
|
// Only use face trackers when not playing back a recording.
|
||||||
if (!player->isPlaying()) {
|
if (player->isPlaying()) {
|
||||||
|
Parent::simulate(deltaTime);
|
||||||
|
} else {
|
||||||
|
computeAudioLoudness(deltaTime);
|
||||||
|
|
||||||
FaceTracker* faceTracker = qApp->getActiveFaceTracker();
|
FaceTracker* faceTracker = qApp->getActiveFaceTracker();
|
||||||
_isFaceTrackerConnected = faceTracker != NULL && !faceTracker->isMuted();
|
_isFaceTrackerConnected = faceTracker && !faceTracker->isMuted();
|
||||||
if (_isFaceTrackerConnected) {
|
if (_isFaceTrackerConnected) {
|
||||||
_transientBlendshapeCoefficients = faceTracker->getBlendshapeCoefficients();
|
_transientBlendshapeCoefficients = faceTracker->getBlendshapeCoefficients();
|
||||||
|
|
||||||
if (typeid(*faceTracker) == typeid(DdeFaceTracker)) {
|
if (typeid(*faceTracker) == typeid(DdeFaceTracker)) {
|
||||||
|
|
||||||
if (Menu::getInstance()->isOptionChecked(MenuOption::UseAudioForMouth)) {
|
if (Menu::getInstance()->isOptionChecked(MenuOption::UseAudioForMouth)) {
|
||||||
calculateMouthShapes(deltaTime);
|
calculateMouthShapes(deltaTime);
|
||||||
|
|
||||||
|
@ -68,9 +71,19 @@ void MyHead::simulate(float deltaTime) {
|
||||||
}
|
}
|
||||||
applyEyelidOffset(getFinalOrientationInWorldFrame());
|
applyEyelidOffset(getFinalOrientationInWorldFrame());
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
|
computeFaceMovement(deltaTime);
|
||||||
|
}
|
||||||
|
|
||||||
auto eyeTracker = DependencyManager::get<EyeTracker>();
|
auto eyeTracker = DependencyManager::get<EyeTracker>();
|
||||||
_isEyeTrackerConnected = eyeTracker->isTracking();
|
_isEyeTrackerConnected = eyeTracker && eyeTracker->isTracking();
|
||||||
|
if (_isEyeTrackerConnected) {
|
||||||
|
// TODO? figure out where EyeTracker data harvested. Move it here?
|
||||||
|
_saccade = glm::vec3();
|
||||||
|
} else {
|
||||||
|
computeEyeMovement(deltaTime);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
Parent::simulate(deltaTime);
|
computeEyePosition();
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
set(TARGET_NAME avatars-renderer)
|
set(TARGET_NAME avatars-renderer)
|
||||||
AUTOSCRIBE_SHADER_LIB(gpu model render render-utils)
|
AUTOSCRIBE_SHADER_LIB(gpu model render render-utils)
|
||||||
setup_hifi_library(Widgets Network Script)
|
setup_hifi_library(Widgets Network Script)
|
||||||
link_hifi_libraries(shared gpu model animation physics model-networking script-engine render image render-utils)
|
link_hifi_libraries(shared gpu model animation model-networking script-engine render image render-utils)
|
||||||
|
|
||||||
target_bullet()
|
target_bullet()
|
||||||
|
|
|
@ -23,9 +23,10 @@
|
||||||
|
|
||||||
#include "Avatar.h"
|
#include "Avatar.h"
|
||||||
|
|
||||||
|
const float NORMAL_HZ = 60.0f; // the update rate the constant values were tuned for
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
static bool fixGaze { false };
|
|
||||||
static bool disableEyelidAdjustment { false };
|
static bool disableEyelidAdjustment { false };
|
||||||
|
|
||||||
Head::Head(Avatar* owningAvatar) :
|
Head::Head(Avatar* owningAvatar) :
|
||||||
|
@ -42,17 +43,11 @@ void Head::reset() {
|
||||||
_baseYaw = _basePitch = _baseRoll = 0.0f;
|
_baseYaw = _basePitch = _baseRoll = 0.0f;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Head::simulate(float deltaTime) {
|
void Head::computeAudioLoudness(float deltaTime) {
|
||||||
const float NORMAL_HZ = 60.0f; // the update rate the constant values were tuned for
|
|
||||||
|
|
||||||
// grab the audio loudness from the owning avatar, if we have one
|
// grab the audio loudness from the owning avatar, if we have one
|
||||||
float audioLoudness = 0.0f;
|
float audioLoudness = _owningAvatar ? _owningAvatar->getAudioLoudness() : 0.0f;
|
||||||
|
|
||||||
if (_owningAvatar) {
|
// Update audio trailing average for rendering facial animations
|
||||||
audioLoudness = _owningAvatar->getAudioLoudness();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update audio trailing average for rendering facial animations
|
|
||||||
const float AUDIO_AVERAGING_SECS = 0.05f;
|
const float AUDIO_AVERAGING_SECS = 0.05f;
|
||||||
const float AUDIO_LONG_TERM_AVERAGING_SECS = 30.0f;
|
const float AUDIO_LONG_TERM_AVERAGING_SECS = 30.0f;
|
||||||
_averageLoudness = glm::mix(_averageLoudness, audioLoudness, glm::min(deltaTime / AUDIO_AVERAGING_SECS, 1.0f));
|
_averageLoudness = glm::mix(_averageLoudness, audioLoudness, glm::min(deltaTime / AUDIO_AVERAGING_SECS, 1.0f));
|
||||||
|
@ -63,116 +58,114 @@ void Head::simulate(float deltaTime) {
|
||||||
_longTermAverageLoudness = glm::mix(_longTermAverageLoudness, _averageLoudness, glm::min(deltaTime / AUDIO_LONG_TERM_AVERAGING_SECS, 1.0f));
|
_longTermAverageLoudness = glm::mix(_longTermAverageLoudness, _averageLoudness, glm::min(deltaTime / AUDIO_LONG_TERM_AVERAGING_SECS, 1.0f));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!_isFaceTrackerConnected) {
|
float audioAttackAveragingRate = (10.0f - deltaTime * NORMAL_HZ) / 10.0f; // --> 0.9 at 60 Hz
|
||||||
if (!_isEyeTrackerConnected) {
|
_audioAttack = audioAttackAveragingRate * _audioAttack +
|
||||||
// Update eye saccades
|
(1.0f - audioAttackAveragingRate) * fabs((audioLoudness - _longTermAverageLoudness) - _lastLoudness);
|
||||||
const float AVERAGE_MICROSACCADE_INTERVAL = 1.0f;
|
_lastLoudness = (audioLoudness - _longTermAverageLoudness);
|
||||||
const float AVERAGE_SACCADE_INTERVAL = 6.0f;
|
}
|
||||||
const float MICROSACCADE_MAGNITUDE = 0.002f;
|
|
||||||
const float SACCADE_MAGNITUDE = 0.04f;
|
|
||||||
const float NOMINAL_FRAME_RATE = 60.0f;
|
|
||||||
|
|
||||||
if (randFloat() < deltaTime / AVERAGE_MICROSACCADE_INTERVAL) {
|
void Head::computeEyeMovement(float deltaTime) {
|
||||||
_saccadeTarget = MICROSACCADE_MAGNITUDE * randVector();
|
// Update eye saccades
|
||||||
} else if (randFloat() < deltaTime / AVERAGE_SACCADE_INTERVAL) {
|
const float AVERAGE_MICROSACCADE_INTERVAL = 1.0f;
|
||||||
_saccadeTarget = SACCADE_MAGNITUDE * randVector();
|
const float AVERAGE_SACCADE_INTERVAL = 6.0f;
|
||||||
}
|
const float MICROSACCADE_MAGNITUDE = 0.002f;
|
||||||
_saccade += (_saccadeTarget - _saccade) * pow(0.5f, NOMINAL_FRAME_RATE * deltaTime);
|
const float SACCADE_MAGNITUDE = 0.04f;
|
||||||
} else {
|
const float NOMINAL_FRAME_RATE = 60.0f;
|
||||||
_saccade = glm::vec3();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Detect transition from talking to not; force blink after that and a delay
|
if (randFloat() < deltaTime / AVERAGE_MICROSACCADE_INTERVAL) {
|
||||||
bool forceBlink = false;
|
_saccadeTarget = MICROSACCADE_MAGNITUDE * randVector();
|
||||||
const float TALKING_LOUDNESS = 100.0f;
|
} else if (randFloat() < deltaTime / AVERAGE_SACCADE_INTERVAL) {
|
||||||
const float BLINK_AFTER_TALKING = 0.25f;
|
_saccadeTarget = SACCADE_MAGNITUDE * randVector();
|
||||||
_timeWithoutTalking += deltaTime;
|
}
|
||||||
if ((_averageLoudness - _longTermAverageLoudness) > TALKING_LOUDNESS) {
|
_saccade += (_saccadeTarget - _saccade) * pow(0.5f, NOMINAL_FRAME_RATE * deltaTime);
|
||||||
_timeWithoutTalking = 0.0f;
|
|
||||||
} else if (_timeWithoutTalking - deltaTime < BLINK_AFTER_TALKING && _timeWithoutTalking >= BLINK_AFTER_TALKING) {
|
|
||||||
forceBlink = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update audio attack data for facial animation (eyebrows and mouth)
|
// Detect transition from talking to not; force blink after that and a delay
|
||||||
float audioAttackAveragingRate = (10.0f - deltaTime * NORMAL_HZ) / 10.0f; // --> 0.9 at 60 Hz
|
bool forceBlink = false;
|
||||||
_audioAttack = audioAttackAveragingRate * _audioAttack +
|
const float TALKING_LOUDNESS = 100.0f;
|
||||||
(1.0f - audioAttackAveragingRate) * fabs((audioLoudness - _longTermAverageLoudness) - _lastLoudness);
|
const float BLINK_AFTER_TALKING = 0.25f;
|
||||||
_lastLoudness = (audioLoudness - _longTermAverageLoudness);
|
_timeWithoutTalking += deltaTime;
|
||||||
|
if ((_averageLoudness - _longTermAverageLoudness) > TALKING_LOUDNESS) {
|
||||||
|
_timeWithoutTalking = 0.0f;
|
||||||
|
} else if (_timeWithoutTalking - deltaTime < BLINK_AFTER_TALKING && _timeWithoutTalking >= BLINK_AFTER_TALKING) {
|
||||||
|
forceBlink = true;
|
||||||
|
}
|
||||||
|
|
||||||
const float BROW_LIFT_THRESHOLD = 100.0f;
|
const float BLINK_SPEED = 10.0f;
|
||||||
if (_audioAttack > BROW_LIFT_THRESHOLD) {
|
const float BLINK_SPEED_VARIABILITY = 1.0f;
|
||||||
_browAudioLift += sqrtf(_audioAttack) * 0.01f;
|
const float BLINK_START_VARIABILITY = 0.25f;
|
||||||
}
|
const float FULLY_OPEN = 0.0f;
|
||||||
_browAudioLift = glm::clamp(_browAudioLift *= 0.7f, 0.0f, 1.0f);
|
const float FULLY_CLOSED = 1.0f;
|
||||||
|
if (_leftEyeBlinkVelocity == 0.0f && _rightEyeBlinkVelocity == 0.0f) {
|
||||||
const float BLINK_SPEED = 10.0f;
|
// no blinking when brows are raised; blink less with increasing loudness
|
||||||
const float BLINK_SPEED_VARIABILITY = 1.0f;
|
const float BASE_BLINK_RATE = 15.0f / 60.0f;
|
||||||
const float BLINK_START_VARIABILITY = 0.25f;
|
const float ROOT_LOUDNESS_TO_BLINK_INTERVAL = 0.25f;
|
||||||
const float FULLY_OPEN = 0.0f;
|
if (forceBlink || (_browAudioLift < EPSILON && shouldDo(glm::max(1.0f, sqrt(fabs(_averageLoudness - _longTermAverageLoudness)) *
|
||||||
const float FULLY_CLOSED = 1.0f;
|
ROOT_LOUDNESS_TO_BLINK_INTERVAL) / BASE_BLINK_RATE, deltaTime))) {
|
||||||
if (_leftEyeBlinkVelocity == 0.0f && _rightEyeBlinkVelocity == 0.0f) {
|
_leftEyeBlinkVelocity = BLINK_SPEED + randFloat() * BLINK_SPEED_VARIABILITY;
|
||||||
// no blinking when brows are raised; blink less with increasing loudness
|
_rightEyeBlinkVelocity = BLINK_SPEED + randFloat() * BLINK_SPEED_VARIABILITY;
|
||||||
const float BASE_BLINK_RATE = 15.0f / 60.0f;
|
if (randFloat() < 0.5f) {
|
||||||
const float ROOT_LOUDNESS_TO_BLINK_INTERVAL = 0.25f;
|
_leftEyeBlink = BLINK_START_VARIABILITY;
|
||||||
if (forceBlink || (_browAudioLift < EPSILON && shouldDo(glm::max(1.0f, sqrt(fabs(_averageLoudness - _longTermAverageLoudness)) *
|
} else {
|
||||||
ROOT_LOUDNESS_TO_BLINK_INTERVAL) / BASE_BLINK_RATE, deltaTime))) {
|
_rightEyeBlink = BLINK_START_VARIABILITY;
|
||||||
_leftEyeBlinkVelocity = BLINK_SPEED + randFloat() * BLINK_SPEED_VARIABILITY;
|
|
||||||
_rightEyeBlinkVelocity = BLINK_SPEED + randFloat() * BLINK_SPEED_VARIABILITY;
|
|
||||||
if (randFloat() < 0.5f) {
|
|
||||||
_leftEyeBlink = BLINK_START_VARIABILITY;
|
|
||||||
} else {
|
|
||||||
_rightEyeBlink = BLINK_START_VARIABILITY;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
_leftEyeBlink = glm::clamp(_leftEyeBlink + _leftEyeBlinkVelocity * deltaTime, FULLY_OPEN, FULLY_CLOSED);
|
|
||||||
_rightEyeBlink = glm::clamp(_rightEyeBlink + _rightEyeBlinkVelocity * deltaTime, FULLY_OPEN, FULLY_CLOSED);
|
|
||||||
|
|
||||||
if (_leftEyeBlink == FULLY_CLOSED) {
|
|
||||||
_leftEyeBlinkVelocity = -BLINK_SPEED;
|
|
||||||
|
|
||||||
} else if (_leftEyeBlink == FULLY_OPEN) {
|
|
||||||
_leftEyeBlinkVelocity = 0.0f;
|
|
||||||
}
|
|
||||||
if (_rightEyeBlink == FULLY_CLOSED) {
|
|
||||||
_rightEyeBlinkVelocity = -BLINK_SPEED;
|
|
||||||
|
|
||||||
} else if (_rightEyeBlink == FULLY_OPEN) {
|
|
||||||
_rightEyeBlinkVelocity = 0.0f;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// use data to update fake Faceshift blendshape coefficients
|
|
||||||
calculateMouthShapes(deltaTime);
|
|
||||||
FaceTracker::updateFakeCoefficients(_leftEyeBlink,
|
|
||||||
_rightEyeBlink,
|
|
||||||
_browAudioLift,
|
|
||||||
_audioJawOpen,
|
|
||||||
_mouth2,
|
|
||||||
_mouth3,
|
|
||||||
_mouth4,
|
|
||||||
_transientBlendshapeCoefficients);
|
|
||||||
|
|
||||||
applyEyelidOffset(getOrientation());
|
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
_saccade = glm::vec3();
|
_leftEyeBlink = glm::clamp(_leftEyeBlink + _leftEyeBlinkVelocity * deltaTime, FULLY_OPEN, FULLY_CLOSED);
|
||||||
}
|
_rightEyeBlink = glm::clamp(_rightEyeBlink + _rightEyeBlinkVelocity * deltaTime, FULLY_OPEN, FULLY_CLOSED);
|
||||||
if (fixGaze) { // if debug menu turns off, use no saccade
|
|
||||||
_saccade = glm::vec3();
|
if (_leftEyeBlink == FULLY_CLOSED) {
|
||||||
|
_leftEyeBlinkVelocity = -BLINK_SPEED;
|
||||||
|
|
||||||
|
} else if (_leftEyeBlink == FULLY_OPEN) {
|
||||||
|
_leftEyeBlinkVelocity = 0.0f;
|
||||||
|
}
|
||||||
|
if (_rightEyeBlink == FULLY_CLOSED) {
|
||||||
|
_rightEyeBlinkVelocity = -BLINK_SPEED;
|
||||||
|
|
||||||
|
} else if (_rightEyeBlink == FULLY_OPEN) {
|
||||||
|
_rightEyeBlinkVelocity = 0.0f;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
applyEyelidOffset(getOrientation());
|
||||||
|
}
|
||||||
|
|
||||||
|
void Head::computeFaceMovement(float deltaTime) {
|
||||||
|
// Update audio attack data for facial animation (eyebrows and mouth)
|
||||||
|
const float BROW_LIFT_THRESHOLD = 100.0f;
|
||||||
|
if (_audioAttack > BROW_LIFT_THRESHOLD) {
|
||||||
|
_browAudioLift += sqrtf(_audioAttack) * 0.01f;
|
||||||
|
}
|
||||||
|
_browAudioLift = glm::clamp(_browAudioLift *= 0.7f, 0.0f, 1.0f);
|
||||||
|
|
||||||
|
// use data to update fake Faceshift blendshape coefficients
|
||||||
|
calculateMouthShapes(deltaTime);
|
||||||
|
FaceTracker::updateFakeCoefficients(_leftEyeBlink,
|
||||||
|
_rightEyeBlink,
|
||||||
|
_browAudioLift,
|
||||||
|
_audioJawOpen,
|
||||||
|
_mouth2,
|
||||||
|
_mouth3,
|
||||||
|
_mouth4,
|
||||||
|
_transientBlendshapeCoefficients);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Head::computeEyePosition() {
|
||||||
_leftEyePosition = _rightEyePosition = getPosition();
|
_leftEyePosition = _rightEyePosition = getPosition();
|
||||||
_eyePosition = getPosition();
|
|
||||||
|
|
||||||
if (_owningAvatar) {
|
if (_owningAvatar) {
|
||||||
auto skeletonModel = static_cast<Avatar*>(_owningAvatar)->getSkeletonModel();
|
auto skeletonModel = static_cast<Avatar*>(_owningAvatar)->getSkeletonModel();
|
||||||
if (skeletonModel) {
|
if (skeletonModel) {
|
||||||
skeletonModel->getEyePositions(_leftEyePosition, _rightEyePosition);
|
skeletonModel->getEyePositions(_leftEyePosition, _rightEyePosition);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
_eyePosition = 0.5f * (_leftEyePosition + _rightEyePosition);
|
||||||
|
}
|
||||||
|
|
||||||
_eyePosition = calculateAverageEyePosition();
|
void Head::simulate(float deltaTime) {
|
||||||
|
computeAudioLoudness(deltaTime);
|
||||||
|
computeFaceMovement(deltaTime);
|
||||||
|
computeEyeMovement(deltaTime);
|
||||||
|
computeEyePosition();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Head::calculateMouthShapes(float deltaTime) {
|
void Head::calculateMouthShapes(float deltaTime) {
|
||||||
|
|
|
@ -83,7 +83,10 @@ public:
|
||||||
float getTimeWithoutTalking() const { return _timeWithoutTalking; }
|
float getTimeWithoutTalking() const { return _timeWithoutTalking; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
glm::vec3 calculateAverageEyePosition() const { return _leftEyePosition + (_rightEyePosition - _leftEyePosition ) * 0.5f; }
|
void computeAudioLoudness(float deltaTime);
|
||||||
|
void computeEyeMovement(float deltaTime);
|
||||||
|
void computeFaceMovement(float deltaTime);
|
||||||
|
void computeEyePosition();
|
||||||
|
|
||||||
// disallow copies of the Head, copy of owning Avatar is disallowed too
|
// disallow copies of the Head, copy of owning Avatar is disallowed too
|
||||||
Head(const Head&);
|
Head(const Head&);
|
||||||
|
|
|
@ -445,7 +445,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
|
||||||
if (hasFaceTrackerInfo) {
|
if (hasFaceTrackerInfo) {
|
||||||
auto startSection = destinationBuffer;
|
auto startSection = destinationBuffer;
|
||||||
auto faceTrackerInfo = reinterpret_cast<AvatarDataPacket::FaceTrackerInfo*>(destinationBuffer);
|
auto faceTrackerInfo = reinterpret_cast<AvatarDataPacket::FaceTrackerInfo*>(destinationBuffer);
|
||||||
auto blendshapeCoefficients = _headData->getSummedBlendshapeCoefficients();
|
const auto& blendshapeCoefficients = _headData->getSummedBlendshapeCoefficients();
|
||||||
|
|
||||||
faceTrackerInfo->leftEyeBlink = _headData->_leftEyeBlink;
|
faceTrackerInfo->leftEyeBlink = _headData->_leftEyeBlink;
|
||||||
faceTrackerInfo->rightEyeBlink = _headData->_rightEyeBlink;
|
faceTrackerInfo->rightEyeBlink = _headData->_rightEyeBlink;
|
||||||
|
@ -1538,7 +1538,14 @@ void AvatarData::processAvatarIdentity(const Identity& identity, bool& identityC
|
||||||
|
|
||||||
// use the timestamp from this identity, since we want to honor the updated times in "server clock"
|
// use the timestamp from this identity, since we want to honor the updated times in "server clock"
|
||||||
// this will overwrite any changes we made locally to this AvatarData's _identityUpdatedAt
|
// this will overwrite any changes we made locally to this AvatarData's _identityUpdatedAt
|
||||||
_identityUpdatedAt = identity.updatedAt - clockSkew;
|
// Additionally, ensure that the timestamp that we try to record isn't negative, as
|
||||||
|
// "_identityUpdatedAt" is an *unsigned* 64-bit integer. Furthermore, negative timestamps
|
||||||
|
// wouldn't make sense.
|
||||||
|
if (identity.updatedAt > clockSkew) {
|
||||||
|
_identityUpdatedAt = identity.updatedAt - clockSkew;
|
||||||
|
} else {
|
||||||
|
_identityUpdatedAt = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
QByteArray AvatarData::identityByteArray() const {
|
QByteArray AvatarData::identityByteArray() const {
|
||||||
|
|
|
@ -28,12 +28,6 @@ HeadData::HeadData(AvatarData* owningAvatar) :
|
||||||
_basePitch(0.0f),
|
_basePitch(0.0f),
|
||||||
_baseRoll(0.0f),
|
_baseRoll(0.0f),
|
||||||
_lookAtPosition(0.0f, 0.0f, 0.0f),
|
_lookAtPosition(0.0f, 0.0f, 0.0f),
|
||||||
_isFaceTrackerConnected(false),
|
|
||||||
_isEyeTrackerConnected(false),
|
|
||||||
_leftEyeBlink(0.0f),
|
|
||||||
_rightEyeBlink(0.0f),
|
|
||||||
_averageLoudness(0.0f),
|
|
||||||
_browAudioLift(0.0f),
|
|
||||||
_blendshapeCoefficients(QVector<float>(0, 0.0f)),
|
_blendshapeCoefficients(QVector<float>(0, 0.0f)),
|
||||||
_transientBlendshapeCoefficients(QVector<float>(0, 0.0f)),
|
_transientBlendshapeCoefficients(QVector<float>(0, 0.0f)),
|
||||||
_summedBlendshapeCoefficients(QVector<float>(0, 0.0f)),
|
_summedBlendshapeCoefficients(QVector<float>(0, 0.0f)),
|
||||||
|
|
|
@ -63,7 +63,7 @@ public:
|
||||||
void setBlendshapeCoefficients(const QVector<float>& blendshapeCoefficients) { _blendshapeCoefficients = blendshapeCoefficients; }
|
void setBlendshapeCoefficients(const QVector<float>& blendshapeCoefficients) { _blendshapeCoefficients = blendshapeCoefficients; }
|
||||||
|
|
||||||
const glm::vec3& getLookAtPosition() const { return _lookAtPosition; }
|
const glm::vec3& getLookAtPosition() const { return _lookAtPosition; }
|
||||||
void setLookAtPosition(const glm::vec3& lookAtPosition) {
|
void setLookAtPosition(const glm::vec3& lookAtPosition) {
|
||||||
if (_lookAtPosition != lookAtPosition) {
|
if (_lookAtPosition != lookAtPosition) {
|
||||||
_lookAtPositionChanged = usecTimestampNow();
|
_lookAtPositionChanged = usecTimestampNow();
|
||||||
}
|
}
|
||||||
|
@ -85,12 +85,12 @@ protected:
|
||||||
glm::vec3 _lookAtPosition;
|
glm::vec3 _lookAtPosition;
|
||||||
quint64 _lookAtPositionChanged { 0 };
|
quint64 _lookAtPositionChanged { 0 };
|
||||||
|
|
||||||
bool _isFaceTrackerConnected;
|
bool _isFaceTrackerConnected { false };
|
||||||
bool _isEyeTrackerConnected;
|
bool _isEyeTrackerConnected { false };
|
||||||
float _leftEyeBlink;
|
float _leftEyeBlink { 0.0f };
|
||||||
float _rightEyeBlink;
|
float _rightEyeBlink { 0.0f };
|
||||||
float _averageLoudness;
|
float _averageLoudness { 0.0f };
|
||||||
float _browAudioLift;
|
float _browAudioLift { 0.0f };
|
||||||
|
|
||||||
QVector<float> _blendshapeCoefficients;
|
QVector<float> _blendshapeCoefficients;
|
||||||
QVector<float> _transientBlendshapeCoefficients;
|
QVector<float> _transientBlendshapeCoefficients;
|
||||||
|
|
|
@ -126,7 +126,16 @@ QJsonDocument variantMapToJsonDocument(const QSettings::SettingsMap& map) {
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (variantType) {
|
switch (variantType) {
|
||||||
case QVariant::Map:
|
case QVariant::Map: {
|
||||||
|
auto varmap = variant.toMap();
|
||||||
|
for (auto mapit = varmap.cbegin(); mapit != varmap.cend(); ++mapit) {
|
||||||
|
auto& mapkey = mapit.key();
|
||||||
|
auto& mapvariant = mapit.value();
|
||||||
|
object.insert(key + "/" + mapkey, QJsonValue::fromVariant(mapvariant));
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
case QVariant::List:
|
case QVariant::List:
|
||||||
case QVariant::Hash: {
|
case QVariant::Hash: {
|
||||||
qCritical() << "Unsupported variant type" << variant.typeName();
|
qCritical() << "Unsupported variant type" << variant.typeName();
|
||||||
|
|
Loading…
Reference in a new issue