diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index bd66a96879..ae490c05e7 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -3976,7 +3976,7 @@ void Application::updateMyAvatarLookAtPosition() { if (faceAngle < MAXIMUM_FACE_ANGLE) { // Randomly look back and forth between look targets eyeContactTarget target = Menu::getInstance()->isOptionChecked(MenuOption::FixGaze) ? - LEFT_EYE : myAvatar->getEyeContactTarget(); + LEFT_EYE : myAvatar->getEyeContactTarget(); switch (target) { case LEFT_EYE: lookAtSpot = lookingAtHead->getLeftEyePosition(); diff --git a/interface/src/avatar/MyHead.cpp b/interface/src/avatar/MyHead.cpp index 793fbb79c4..f02aefec5b 100644 --- a/interface/src/avatar/MyHead.cpp +++ b/interface/src/avatar/MyHead.cpp @@ -44,17 +44,14 @@ glm::quat MyHead::getCameraOrientation() const { void MyHead::simulate(float deltaTime) { auto player = DependencyManager::get(); // Only use face trackers when not playing back a recording. - if (player->isPlaying()) { - Parent::simulate(deltaTime); - } else { - computeAudioLoudness(deltaTime); - + if (!player->isPlaying()) { FaceTracker* faceTracker = qApp->getActiveFaceTracker(); - _isFaceTrackerConnected = faceTracker && !faceTracker->isMuted(); + _isFaceTrackerConnected = faceTracker != nullptr && !faceTracker->isMuted(); if (_isFaceTrackerConnected) { _transientBlendshapeCoefficients = faceTracker->getBlendshapeCoefficients(); if (typeid(*faceTracker) == typeid(DdeFaceTracker)) { + if (Menu::getInstance()->isOptionChecked(MenuOption::UseAudioForMouth)) { calculateMouthShapes(deltaTime); @@ -71,19 +68,9 @@ void MyHead::simulate(float deltaTime) { } applyEyelidOffset(getFinalOrientationInWorldFrame()); } - } else { - computeFaceMovement(deltaTime); - } - - auto eyeTracker = DependencyManager::get(); - _isEyeTrackerConnected = eyeTracker && eyeTracker->isTracking(); - if (_isEyeTrackerConnected) { - // TODO? figure out where EyeTracker data harvested. Move it here? - _saccade = glm::vec3(); - } else { - computeEyeMovement(deltaTime); } - + auto eyeTracker = DependencyManager::get(); + _isEyeTrackerConnected = eyeTracker->isTracking(); } - computeEyePosition(); + Parent::simulate(deltaTime); } diff --git a/libraries/avatars-renderer/src/avatars-renderer/Head.cpp b/libraries/avatars-renderer/src/avatars-renderer/Head.cpp index b4b0929c0c..96ecd86ff4 100644 --- a/libraries/avatars-renderer/src/avatars-renderer/Head.cpp +++ b/libraries/avatars-renderer/src/avatars-renderer/Head.cpp @@ -23,8 +23,6 @@ #include "Avatar.h" -const float NORMAL_HZ = 60.0f; // the update rate the constant values were tuned for - using namespace std; static bool disableEyelidAdjustment { false }; @@ -43,7 +41,9 @@ void Head::reset() { _baseYaw = _basePitch = _baseRoll = 0.0f; } -void Head::computeAudioLoudness(float deltaTime) { +void Head::simulate(float deltaTime) { + const float NORMAL_HZ = 60.0f; // the update rate the constant values were tuned for + // grab the audio loudness from the owning avatar, if we have one float audioLoudness = _owningAvatar ? _owningAvatar->getAudioLoudness() : 0.0f; @@ -58,99 +58,102 @@ void Head::computeAudioLoudness(float deltaTime) { _longTermAverageLoudness = glm::mix(_longTermAverageLoudness, _averageLoudness, glm::min(deltaTime / AUDIO_LONG_TERM_AVERAGING_SECS, 1.0f)); } - float audioAttackAveragingRate = (10.0f - deltaTime * NORMAL_HZ) / 10.0f; // --> 0.9 at 60 Hz - _audioAttack = audioAttackAveragingRate * _audioAttack + - (1.0f - audioAttackAveragingRate) * fabs((audioLoudness - _longTermAverageLoudness) - _lastLoudness); - _lastLoudness = (audioLoudness - _longTermAverageLoudness); -} + if (!_isFaceTrackerConnected) { + if (!_isEyeTrackerConnected) { + // Update eye saccades + const float AVERAGE_MICROSACCADE_INTERVAL = 1.0f; + const float AVERAGE_SACCADE_INTERVAL = 6.0f; + const float MICROSACCADE_MAGNITUDE = 0.002f; + const float SACCADE_MAGNITUDE = 0.04f; + const float NOMINAL_FRAME_RATE = 60.0f; -void Head::computeEyeMovement(float deltaTime) { - // Update eye saccades - const float AVERAGE_MICROSACCADE_INTERVAL = 1.0f; - const float AVERAGE_SACCADE_INTERVAL = 6.0f; - const float MICROSACCADE_MAGNITUDE = 0.002f; - const float SACCADE_MAGNITUDE = 0.04f; - const float NOMINAL_FRAME_RATE = 60.0f; + if (randFloat() < deltaTime / AVERAGE_MICROSACCADE_INTERVAL) { + _saccadeTarget = MICROSACCADE_MAGNITUDE * randVector(); + } else if (randFloat() < deltaTime / AVERAGE_SACCADE_INTERVAL) { + _saccadeTarget = SACCADE_MAGNITUDE * randVector(); + } + _saccade += (_saccadeTarget - _saccade) * pow(0.5f, NOMINAL_FRAME_RATE * deltaTime); + } else { + _saccade = glm::vec3(); + } - if (randFloat() < deltaTime / AVERAGE_MICROSACCADE_INTERVAL) { - _saccadeTarget = MICROSACCADE_MAGNITUDE * randVector(); - } else if (randFloat() < deltaTime / AVERAGE_SACCADE_INTERVAL) { - _saccadeTarget = SACCADE_MAGNITUDE * randVector(); - } - _saccade += (_saccadeTarget - _saccade) * pow(0.5f, NOMINAL_FRAME_RATE * deltaTime); + // Detect transition from talking to not; force blink after that and a delay + bool forceBlink = false; + const float TALKING_LOUDNESS = 100.0f; + const float BLINK_AFTER_TALKING = 0.25f; + _timeWithoutTalking += deltaTime; + if ((_averageLoudness - _longTermAverageLoudness) > TALKING_LOUDNESS) { + _timeWithoutTalking = 0.0f; + } else if (_timeWithoutTalking - deltaTime < BLINK_AFTER_TALKING && _timeWithoutTalking >= BLINK_AFTER_TALKING) { + forceBlink = true; + } - // Detect transition from talking to not; force blink after that and a delay - bool forceBlink = false; - const float TALKING_LOUDNESS = 100.0f; - const float BLINK_AFTER_TALKING = 0.25f; - _timeWithoutTalking += deltaTime; - if ((_averageLoudness - _longTermAverageLoudness) > TALKING_LOUDNESS) { - _timeWithoutTalking = 0.0f; - } else if (_timeWithoutTalking - deltaTime < BLINK_AFTER_TALKING && _timeWithoutTalking >= BLINK_AFTER_TALKING) { - forceBlink = true; - } + // Update audio attack data for facial animation (eyebrows and mouth) + float audioAttackAveragingRate = (10.0f - deltaTime * NORMAL_HZ) / 10.0f; // --> 0.9 at 60 Hz + _audioAttack = audioAttackAveragingRate * _audioAttack + + (1.0f - audioAttackAveragingRate) * fabs((audioLoudness - _longTermAverageLoudness) - _lastLoudness); + _lastLoudness = (audioLoudness - _longTermAverageLoudness); - const float BLINK_SPEED = 10.0f; - const float BLINK_SPEED_VARIABILITY = 1.0f; - const float BLINK_START_VARIABILITY = 0.25f; - const float FULLY_OPEN = 0.0f; - const float FULLY_CLOSED = 1.0f; - if (_leftEyeBlinkVelocity == 0.0f && _rightEyeBlinkVelocity == 0.0f) { - // no blinking when brows are raised; blink less with increasing loudness - const float BASE_BLINK_RATE = 15.0f / 60.0f; - const float ROOT_LOUDNESS_TO_BLINK_INTERVAL = 0.25f; - if (forceBlink || (_browAudioLift < EPSILON && shouldDo(glm::max(1.0f, sqrt(fabs(_averageLoudness - _longTermAverageLoudness)) * - ROOT_LOUDNESS_TO_BLINK_INTERVAL) / BASE_BLINK_RATE, deltaTime))) { - _leftEyeBlinkVelocity = BLINK_SPEED + randFloat() * BLINK_SPEED_VARIABILITY; - _rightEyeBlinkVelocity = BLINK_SPEED + randFloat() * BLINK_SPEED_VARIABILITY; - if (randFloat() < 0.5f) { - _leftEyeBlink = BLINK_START_VARIABILITY; - } else { - _rightEyeBlink = BLINK_START_VARIABILITY; + const float BROW_LIFT_THRESHOLD = 100.0f; + if (_audioAttack > BROW_LIFT_THRESHOLD) { + _browAudioLift += sqrtf(_audioAttack) * 0.01f; + } + _browAudioLift = glm::clamp(_browAudioLift *= 0.7f, 0.0f, 1.0f); + + const float BLINK_SPEED = 10.0f; + const float BLINK_SPEED_VARIABILITY = 1.0f; + const float BLINK_START_VARIABILITY = 0.25f; + const float FULLY_OPEN = 0.0f; + const float FULLY_CLOSED = 1.0f; + if (_leftEyeBlinkVelocity == 0.0f && _rightEyeBlinkVelocity == 0.0f) { + // no blinking when brows are raised; blink less with increasing loudness + const float BASE_BLINK_RATE = 15.0f / 60.0f; + const float ROOT_LOUDNESS_TO_BLINK_INTERVAL = 0.25f; + if (forceBlink || (_browAudioLift < EPSILON && shouldDo(glm::max(1.0f, sqrt(fabs(_averageLoudness - _longTermAverageLoudness)) * + ROOT_LOUDNESS_TO_BLINK_INTERVAL) / BASE_BLINK_RATE, deltaTime))) { + _leftEyeBlinkVelocity = BLINK_SPEED + randFloat() * BLINK_SPEED_VARIABILITY; + _rightEyeBlinkVelocity = BLINK_SPEED + randFloat() * BLINK_SPEED_VARIABILITY; + if (randFloat() < 0.5f) { + _leftEyeBlink = BLINK_START_VARIABILITY; + } else { + _rightEyeBlink = BLINK_START_VARIABILITY; + } + } + } else { + _leftEyeBlink = glm::clamp(_leftEyeBlink + _leftEyeBlinkVelocity * deltaTime, FULLY_OPEN, FULLY_CLOSED); + _rightEyeBlink = glm::clamp(_rightEyeBlink + _rightEyeBlinkVelocity * deltaTime, FULLY_OPEN, FULLY_CLOSED); + + if (_leftEyeBlink == FULLY_CLOSED) { + _leftEyeBlinkVelocity = -BLINK_SPEED; + + } else if (_leftEyeBlink == FULLY_OPEN) { + _leftEyeBlinkVelocity = 0.0f; + } + if (_rightEyeBlink == FULLY_CLOSED) { + _rightEyeBlinkVelocity = -BLINK_SPEED; + + } else if (_rightEyeBlink == FULLY_OPEN) { + _rightEyeBlinkVelocity = 0.0f; } } + + // use data to update fake Faceshift blendshape coefficients + calculateMouthShapes(deltaTime); + FaceTracker::updateFakeCoefficients(_leftEyeBlink, + _rightEyeBlink, + _browAudioLift, + _audioJawOpen, + _mouth2, + _mouth3, + _mouth4, + _transientBlendshapeCoefficients); + + applyEyelidOffset(getOrientation()); + } else { - _leftEyeBlink = glm::clamp(_leftEyeBlink + _leftEyeBlinkVelocity * deltaTime, FULLY_OPEN, FULLY_CLOSED); - _rightEyeBlink = glm::clamp(_rightEyeBlink + _rightEyeBlinkVelocity * deltaTime, FULLY_OPEN, FULLY_CLOSED); - - if (_leftEyeBlink == FULLY_CLOSED) { - _leftEyeBlinkVelocity = -BLINK_SPEED; - - } else if (_leftEyeBlink == FULLY_OPEN) { - _leftEyeBlinkVelocity = 0.0f; - } - if (_rightEyeBlink == FULLY_CLOSED) { - _rightEyeBlinkVelocity = -BLINK_SPEED; - - } else if (_rightEyeBlink == FULLY_OPEN) { - _rightEyeBlinkVelocity = 0.0f; - } + _saccade = glm::vec3(); } - applyEyelidOffset(getOrientation()); -} - -void Head::computeFaceMovement(float deltaTime) { - // Update audio attack data for facial animation (eyebrows and mouth) - const float BROW_LIFT_THRESHOLD = 100.0f; - if (_audioAttack > BROW_LIFT_THRESHOLD) { - _browAudioLift += sqrtf(_audioAttack) * 0.01f; - } - _browAudioLift = glm::clamp(_browAudioLift *= 0.7f, 0.0f, 1.0f); - - // use data to update fake Faceshift blendshape coefficients - calculateMouthShapes(deltaTime); - FaceTracker::updateFakeCoefficients(_leftEyeBlink, - _rightEyeBlink, - _browAudioLift, - _audioJawOpen, - _mouth2, - _mouth3, - _mouth4, - _transientBlendshapeCoefficients); -} - -void Head::computeEyePosition() { _leftEyePosition = _rightEyePosition = getPosition(); if (_owningAvatar) { auto skeletonModel = static_cast(_owningAvatar)->getSkeletonModel(); @@ -161,13 +164,6 @@ void Head::computeEyePosition() { _eyePosition = 0.5f * (_leftEyePosition + _rightEyePosition); } -void Head::simulate(float deltaTime) { - computeAudioLoudness(deltaTime); - computeFaceMovement(deltaTime); - computeEyeMovement(deltaTime); - computeEyePosition(); -} - void Head::calculateMouthShapes(float deltaTime) { const float JAW_OPEN_SCALE = 0.015f; const float JAW_OPEN_RATE = 0.9f; diff --git a/libraries/avatars-renderer/src/avatars-renderer/Head.h b/libraries/avatars-renderer/src/avatars-renderer/Head.h index 39331500b5..c5902285b9 100644 --- a/libraries/avatars-renderer/src/avatars-renderer/Head.h +++ b/libraries/avatars-renderer/src/avatars-renderer/Head.h @@ -83,11 +83,6 @@ public: float getTimeWithoutTalking() const { return _timeWithoutTalking; } protected: - void computeAudioLoudness(float deltaTime); - void computeEyeMovement(float deltaTime); - void computeFaceMovement(float deltaTime); - void computeEyePosition(); - // disallow copies of the Head, copy of owning Avatar is disallowed too Head(const Head&); Head& operator= (const Head&); diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 3aa5ab69fa..d82068b8ac 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -2030,17 +2030,6 @@ void AvatarData::fromJson(const QJsonObject& json, bool useFrameSkeleton) { version = JSON_AVATAR_JOINT_ROTATIONS_IN_RELATIVE_FRAME_VERSION; } - // The head setOrientation likes to overwrite the avatar orientation, - // so lets do the head first - // Most head data is relative to the avatar, and needs no basis correction, - // but the lookat vector does need correction - if (json.contains(JSON_AVATAR_HEAD)) { - if (!_headData) { - _headData = new HeadData(this); - } - _headData->fromJson(json[JSON_AVATAR_HEAD].toObject()); - } - if (json.contains(JSON_AVATAR_BODY_MODEL)) { auto bodyModelURL = json[JSON_AVATAR_BODY_MODEL].toString(); if (useFrameSkeleton && bodyModelURL != getSkeletonModelURL().toString()) { @@ -2079,6 +2068,14 @@ void AvatarData::fromJson(const QJsonObject& json, bool useFrameSkeleton) { setOrientation(currentBasis->getRotation()); } + // Do after avatar orientation because head look-at needs avatar orientation. + if (json.contains(JSON_AVATAR_HEAD)) { + if (!_headData) { + _headData = new HeadData(this); + } + _headData->fromJson(json[JSON_AVATAR_HEAD].toObject()); + } + if (json.contains(JSON_AVATAR_SCALE)) { setTargetScale((float)json[JSON_AVATAR_SCALE].toDouble()); } diff --git a/libraries/avatars/src/HeadData.cpp b/libraries/avatars/src/HeadData.cpp index 2704b6539c..8ae33a1b4f 100644 --- a/libraries/avatars/src/HeadData.cpp +++ b/libraries/avatars/src/HeadData.cpp @@ -52,6 +52,13 @@ glm::quat HeadData::getOrientation() const { return _owningAvatar->getOrientation() * getRawOrientation(); } +void HeadData::setHeadOrientation(const glm::quat& orientation) { + glm::quat bodyOrientation = _owningAvatar->getOrientation(); + glm::vec3 eulers = glm::degrees(safeEulerAngles(glm::inverse(bodyOrientation) * orientation)); + _basePitch = eulers.x; + _baseYaw = eulers.y; + _baseRoll = eulers.z; +} void HeadData::setOrientation(const glm::quat& orientation) { // rotate body about vertical axis @@ -61,10 +68,7 @@ void HeadData::setOrientation(const glm::quat& orientation) { _owningAvatar->setOrientation(bodyOrientation); // the rest goes to the head - glm::vec3 eulers = glm::degrees(safeEulerAngles(glm::inverse(bodyOrientation) * orientation)); - _basePitch = eulers.x; - _baseYaw = eulers.y; - _baseRoll = eulers.z; + setHeadOrientation(orientation); } //Lazily construct a lookup map from the blendshapes @@ -173,14 +177,14 @@ void HeadData::fromJson(const QJsonObject& json) { } } - if (json.contains(JSON_AVATAR_HEAD_ROTATION)) { - setOrientation(quatFromJsonValue(json[JSON_AVATAR_HEAD_ROTATION])); - } - if (json.contains(JSON_AVATAR_HEAD_LOOKAT)) { auto relativeLookAt = vec3FromJsonValue(json[JSON_AVATAR_HEAD_LOOKAT]); if (glm::length2(relativeLookAt) > 0.01f) { setLookAtPosition((_owningAvatar->getOrientation() * relativeLookAt) + _owningAvatar->getPosition()); } } + + if (json.contains(JSON_AVATAR_HEAD_ROTATION)) { + setHeadOrientation(quatFromJsonValue(json[JSON_AVATAR_HEAD_ROTATION])); + } } diff --git a/libraries/avatars/src/HeadData.h b/libraries/avatars/src/HeadData.h index be9d54e93e..0bb38c1dad 100644 --- a/libraries/avatars/src/HeadData.h +++ b/libraries/avatars/src/HeadData.h @@ -101,6 +101,8 @@ private: // privatize copy ctor and assignment operator so copies of this object cannot be made HeadData(const HeadData&); HeadData& operator= (const HeadData&); + + void setHeadOrientation(const glm::quat& orientation); }; #endif // hifi_HeadData_h