mirror of
https://github.com/lubosz/overte.git
synced 2025-08-04 19:39:44 +02:00
Merge pull request #10492 from ctrlaltdavid/21359
Fix eye behavior in recording playback
This commit is contained in:
commit
978eb0dfa9
7 changed files with 119 additions and 138 deletions
|
@ -44,17 +44,14 @@ glm::quat MyHead::getCameraOrientation() const {
|
|||
void MyHead::simulate(float deltaTime) {
|
||||
auto player = DependencyManager::get<recording::Deck>();
|
||||
// Only use face trackers when not playing back a recording.
|
||||
if (player->isPlaying()) {
|
||||
Parent::simulate(deltaTime);
|
||||
} else {
|
||||
computeAudioLoudness(deltaTime);
|
||||
|
||||
if (!player->isPlaying()) {
|
||||
FaceTracker* faceTracker = qApp->getActiveFaceTracker();
|
||||
_isFaceTrackerConnected = faceTracker && !faceTracker->isMuted();
|
||||
_isFaceTrackerConnected = faceTracker != nullptr && !faceTracker->isMuted();
|
||||
if (_isFaceTrackerConnected) {
|
||||
_transientBlendshapeCoefficients = faceTracker->getBlendshapeCoefficients();
|
||||
|
||||
if (typeid(*faceTracker) == typeid(DdeFaceTracker)) {
|
||||
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::UseAudioForMouth)) {
|
||||
calculateMouthShapes(deltaTime);
|
||||
|
||||
|
@ -71,19 +68,9 @@ void MyHead::simulate(float deltaTime) {
|
|||
}
|
||||
applyEyelidOffset(getFinalOrientationInWorldFrame());
|
||||
}
|
||||
} else {
|
||||
computeFaceMovement(deltaTime);
|
||||
}
|
||||
|
||||
auto eyeTracker = DependencyManager::get<EyeTracker>();
|
||||
_isEyeTrackerConnected = eyeTracker && eyeTracker->isTracking();
|
||||
if (_isEyeTrackerConnected) {
|
||||
// TODO? figure out where EyeTracker data harvested. Move it here?
|
||||
_saccade = glm::vec3();
|
||||
} else {
|
||||
computeEyeMovement(deltaTime);
|
||||
_isEyeTrackerConnected = eyeTracker->isTracking();
|
||||
}
|
||||
|
||||
}
|
||||
computeEyePosition();
|
||||
Parent::simulate(deltaTime);
|
||||
}
|
||||
|
|
|
@ -23,8 +23,6 @@
|
|||
|
||||
#include "Avatar.h"
|
||||
|
||||
const float NORMAL_HZ = 60.0f; // the update rate the constant values were tuned for
|
||||
|
||||
using namespace std;
|
||||
|
||||
static bool disableEyelidAdjustment { false };
|
||||
|
@ -43,7 +41,9 @@ void Head::reset() {
|
|||
_baseYaw = _basePitch = _baseRoll = 0.0f;
|
||||
}
|
||||
|
||||
void Head::computeAudioLoudness(float deltaTime) {
|
||||
void Head::simulate(float deltaTime) {
|
||||
const float NORMAL_HZ = 60.0f; // the update rate the constant values were tuned for
|
||||
|
||||
// grab the audio loudness from the owning avatar, if we have one
|
||||
float audioLoudness = _owningAvatar ? _owningAvatar->getAudioLoudness() : 0.0f;
|
||||
|
||||
|
@ -58,13 +58,8 @@ void Head::computeAudioLoudness(float deltaTime) {
|
|||
_longTermAverageLoudness = glm::mix(_longTermAverageLoudness, _averageLoudness, glm::min(deltaTime / AUDIO_LONG_TERM_AVERAGING_SECS, 1.0f));
|
||||
}
|
||||
|
||||
float audioAttackAveragingRate = (10.0f - deltaTime * NORMAL_HZ) / 10.0f; // --> 0.9 at 60 Hz
|
||||
_audioAttack = audioAttackAveragingRate * _audioAttack +
|
||||
(1.0f - audioAttackAveragingRate) * fabs((audioLoudness - _longTermAverageLoudness) - _lastLoudness);
|
||||
_lastLoudness = (audioLoudness - _longTermAverageLoudness);
|
||||
}
|
||||
|
||||
void Head::computeEyeMovement(float deltaTime) {
|
||||
if (!_isFaceTrackerConnected) {
|
||||
if (!_isEyeTrackerConnected) {
|
||||
// Update eye saccades
|
||||
const float AVERAGE_MICROSACCADE_INTERVAL = 1.0f;
|
||||
const float AVERAGE_SACCADE_INTERVAL = 6.0f;
|
||||
|
@ -78,6 +73,9 @@ void Head::computeEyeMovement(float deltaTime) {
|
|||
_saccadeTarget = SACCADE_MAGNITUDE * randVector();
|
||||
}
|
||||
_saccade += (_saccadeTarget - _saccade) * pow(0.5f, NOMINAL_FRAME_RATE * deltaTime);
|
||||
} else {
|
||||
_saccade = glm::vec3();
|
||||
}
|
||||
|
||||
// Detect transition from talking to not; force blink after that and a delay
|
||||
bool forceBlink = false;
|
||||
|
@ -90,6 +88,18 @@ void Head::computeEyeMovement(float deltaTime) {
|
|||
forceBlink = true;
|
||||
}
|
||||
|
||||
// Update audio attack data for facial animation (eyebrows and mouth)
|
||||
float audioAttackAveragingRate = (10.0f - deltaTime * NORMAL_HZ) / 10.0f; // --> 0.9 at 60 Hz
|
||||
_audioAttack = audioAttackAveragingRate * _audioAttack +
|
||||
(1.0f - audioAttackAveragingRate) * fabs((audioLoudness - _longTermAverageLoudness) - _lastLoudness);
|
||||
_lastLoudness = (audioLoudness - _longTermAverageLoudness);
|
||||
|
||||
const float BROW_LIFT_THRESHOLD = 100.0f;
|
||||
if (_audioAttack > BROW_LIFT_THRESHOLD) {
|
||||
_browAudioLift += sqrtf(_audioAttack) * 0.01f;
|
||||
}
|
||||
_browAudioLift = glm::clamp(_browAudioLift *= 0.7f, 0.0f, 1.0f);
|
||||
|
||||
const float BLINK_SPEED = 10.0f;
|
||||
const float BLINK_SPEED_VARIABILITY = 1.0f;
|
||||
const float BLINK_START_VARIABILITY = 0.25f;
|
||||
|
@ -127,17 +137,6 @@ void Head::computeEyeMovement(float deltaTime) {
|
|||
}
|
||||
}
|
||||
|
||||
applyEyelidOffset(getOrientation());
|
||||
}
|
||||
|
||||
void Head::computeFaceMovement(float deltaTime) {
|
||||
// Update audio attack data for facial animation (eyebrows and mouth)
|
||||
const float BROW_LIFT_THRESHOLD = 100.0f;
|
||||
if (_audioAttack > BROW_LIFT_THRESHOLD) {
|
||||
_browAudioLift += sqrtf(_audioAttack) * 0.01f;
|
||||
}
|
||||
_browAudioLift = glm::clamp(_browAudioLift *= 0.7f, 0.0f, 1.0f);
|
||||
|
||||
// use data to update fake Faceshift blendshape coefficients
|
||||
calculateMouthShapes(deltaTime);
|
||||
FaceTracker::updateFakeCoefficients(_leftEyeBlink,
|
||||
|
@ -148,9 +147,13 @@ void Head::computeFaceMovement(float deltaTime) {
|
|||
_mouth3,
|
||||
_mouth4,
|
||||
_transientBlendshapeCoefficients);
|
||||
}
|
||||
|
||||
void Head::computeEyePosition() {
|
||||
applyEyelidOffset(getOrientation());
|
||||
|
||||
} else {
|
||||
_saccade = glm::vec3();
|
||||
}
|
||||
|
||||
_leftEyePosition = _rightEyePosition = getPosition();
|
||||
if (_owningAvatar) {
|
||||
auto skeletonModel = static_cast<Avatar*>(_owningAvatar)->getSkeletonModel();
|
||||
|
@ -161,13 +164,6 @@ void Head::computeEyePosition() {
|
|||
_eyePosition = 0.5f * (_leftEyePosition + _rightEyePosition);
|
||||
}
|
||||
|
||||
void Head::simulate(float deltaTime) {
|
||||
computeAudioLoudness(deltaTime);
|
||||
computeFaceMovement(deltaTime);
|
||||
computeEyeMovement(deltaTime);
|
||||
computeEyePosition();
|
||||
}
|
||||
|
||||
void Head::calculateMouthShapes(float deltaTime) {
|
||||
const float JAW_OPEN_SCALE = 0.015f;
|
||||
const float JAW_OPEN_RATE = 0.9f;
|
||||
|
|
|
@ -83,11 +83,6 @@ public:
|
|||
float getTimeWithoutTalking() const { return _timeWithoutTalking; }
|
||||
|
||||
protected:
|
||||
void computeAudioLoudness(float deltaTime);
|
||||
void computeEyeMovement(float deltaTime);
|
||||
void computeFaceMovement(float deltaTime);
|
||||
void computeEyePosition();
|
||||
|
||||
// disallow copies of the Head, copy of owning Avatar is disallowed too
|
||||
Head(const Head&);
|
||||
Head& operator= (const Head&);
|
||||
|
|
|
@ -2030,17 +2030,6 @@ void AvatarData::fromJson(const QJsonObject& json, bool useFrameSkeleton) {
|
|||
version = JSON_AVATAR_JOINT_ROTATIONS_IN_RELATIVE_FRAME_VERSION;
|
||||
}
|
||||
|
||||
// The head setOrientation likes to overwrite the avatar orientation,
|
||||
// so lets do the head first
|
||||
// Most head data is relative to the avatar, and needs no basis correction,
|
||||
// but the lookat vector does need correction
|
||||
if (json.contains(JSON_AVATAR_HEAD)) {
|
||||
if (!_headData) {
|
||||
_headData = new HeadData(this);
|
||||
}
|
||||
_headData->fromJson(json[JSON_AVATAR_HEAD].toObject());
|
||||
}
|
||||
|
||||
if (json.contains(JSON_AVATAR_BODY_MODEL)) {
|
||||
auto bodyModelURL = json[JSON_AVATAR_BODY_MODEL].toString();
|
||||
if (useFrameSkeleton && bodyModelURL != getSkeletonModelURL().toString()) {
|
||||
|
@ -2079,6 +2068,14 @@ void AvatarData::fromJson(const QJsonObject& json, bool useFrameSkeleton) {
|
|||
setOrientation(currentBasis->getRotation());
|
||||
}
|
||||
|
||||
// Do after avatar orientation because head look-at needs avatar orientation.
|
||||
if (json.contains(JSON_AVATAR_HEAD)) {
|
||||
if (!_headData) {
|
||||
_headData = new HeadData(this);
|
||||
}
|
||||
_headData->fromJson(json[JSON_AVATAR_HEAD].toObject());
|
||||
}
|
||||
|
||||
if (json.contains(JSON_AVATAR_SCALE)) {
|
||||
setTargetScale((float)json[JSON_AVATAR_SCALE].toDouble());
|
||||
}
|
||||
|
|
|
@ -52,6 +52,13 @@ glm::quat HeadData::getOrientation() const {
|
|||
return _owningAvatar->getOrientation() * getRawOrientation();
|
||||
}
|
||||
|
||||
void HeadData::setHeadOrientation(const glm::quat& orientation) {
|
||||
glm::quat bodyOrientation = _owningAvatar->getOrientation();
|
||||
glm::vec3 eulers = glm::degrees(safeEulerAngles(glm::inverse(bodyOrientation) * orientation));
|
||||
_basePitch = eulers.x;
|
||||
_baseYaw = eulers.y;
|
||||
_baseRoll = eulers.z;
|
||||
}
|
||||
|
||||
void HeadData::setOrientation(const glm::quat& orientation) {
|
||||
// rotate body about vertical axis
|
||||
|
@ -61,10 +68,7 @@ void HeadData::setOrientation(const glm::quat& orientation) {
|
|||
_owningAvatar->setOrientation(bodyOrientation);
|
||||
|
||||
// the rest goes to the head
|
||||
glm::vec3 eulers = glm::degrees(safeEulerAngles(glm::inverse(bodyOrientation) * orientation));
|
||||
_basePitch = eulers.x;
|
||||
_baseYaw = eulers.y;
|
||||
_baseRoll = eulers.z;
|
||||
setHeadOrientation(orientation);
|
||||
}
|
||||
|
||||
//Lazily construct a lookup map from the blendshapes
|
||||
|
@ -173,14 +177,14 @@ void HeadData::fromJson(const QJsonObject& json) {
|
|||
}
|
||||
}
|
||||
|
||||
if (json.contains(JSON_AVATAR_HEAD_ROTATION)) {
|
||||
setOrientation(quatFromJsonValue(json[JSON_AVATAR_HEAD_ROTATION]));
|
||||
}
|
||||
|
||||
if (json.contains(JSON_AVATAR_HEAD_LOOKAT)) {
|
||||
auto relativeLookAt = vec3FromJsonValue(json[JSON_AVATAR_HEAD_LOOKAT]);
|
||||
if (glm::length2(relativeLookAt) > 0.01f) {
|
||||
setLookAtPosition((_owningAvatar->getOrientation() * relativeLookAt) + _owningAvatar->getPosition());
|
||||
}
|
||||
}
|
||||
|
||||
if (json.contains(JSON_AVATAR_HEAD_ROTATION)) {
|
||||
setHeadOrientation(quatFromJsonValue(json[JSON_AVATAR_HEAD_ROTATION]));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -101,6 +101,8 @@ private:
|
|||
// privatize copy ctor and assignment operator so copies of this object cannot be made
|
||||
HeadData(const HeadData&);
|
||||
HeadData& operator= (const HeadData&);
|
||||
|
||||
void setHeadOrientation(const glm::quat& orientation);
|
||||
};
|
||||
|
||||
#endif // hifi_HeadData_h
|
||||
|
|
Loading…
Reference in a new issue