Merge pull request #4669 from ctrlaltdavid/20506

CR for Job #20506 - Use audio for mouth when using DDE
This commit is contained in:
Philip Rosedale 2015-04-20 21:19:46 -07:00
commit a0a22ec94f
5 changed files with 64 additions and 39 deletions

View file

@ -1745,6 +1745,7 @@ void Application::setActiveFaceTracker() {
#endif
#ifdef HAVE_DDE
bool isUsingDDE = Menu::getInstance()->isOptionChecked(MenuOption::DDEFaceRegression);
Menu::getInstance()->getActionForOption(MenuOption::UseAudioForMouth)->setVisible(isUsingDDE);
Menu::getInstance()->getActionForOption(MenuOption::DDEFiltering)->setVisible(isUsingDDE);
Menu::getInstance()->getActionForOption(MenuOption::ResetDDETracking)->setVisible(isUsingDDE);
DependencyManager::get<DdeFaceTracker>()->setEnabled(isUsingDDE);

View file

@ -379,6 +379,8 @@ Menu::Menu() {
}
#ifdef HAVE_DDE
faceTrackingMenu->addSeparator();
QAction* useAudioForMouth = addCheckableActionToQMenuAndActionHash(faceTrackingMenu, MenuOption::UseAudioForMouth, 0, true);
useAudioForMouth->setVisible(false);
QAction* ddeFiltering = addCheckableActionToQMenuAndActionHash(faceTrackingMenu, MenuOption::DDEFiltering, 0, true);
ddeFiltering->setVisible(false);
QAction* ddeFaceTrackerReset = addActionToQMenuAndActionHash(faceTrackingMenu, MenuOption::ResetDDETracking,

View file

@ -187,6 +187,7 @@ namespace MenuOption {
const QString OctreeStats = "Entity Statistics";
const QString OffAxisProjection = "Off-Axis Projection";
const QString OnlyDisplayTopTen = "Only Display Top Ten";
const QString PackageModel = "Package Model...";
const QString Pair = "Pair";
const QString PipelineWarnings = "Log Render Pipeline Warnings";
const QString Preferences = "Preferences...";
@ -245,7 +246,7 @@ namespace MenuOption {
const QString ToolWindow = "Tool Window";
const QString TransmitterDrive = "Transmitter Drive";
const QString TurnWithHead = "Turn using Head";
const QString PackageModel = "Package Model...";
const QString UseAudioForMouth = "Use Audio for Mouth";
const QString VisibleToEveryone = "Everyone";
const QString VisibleToFriends = "Friends";
const QString VisibleToNoOne = "No one";

View file

@ -76,22 +76,6 @@ void Head::simulate(float deltaTime, bool isMine, bool billboard) {
if (isMine) {
MyAvatar* myAvatar = static_cast<MyAvatar*>(_owningAvatar);
// Only use face trackers when not playing back a recording.
if (!myAvatar->isPlaying()) {
FaceTracker* faceTracker = Application::getInstance()->getActiveFaceTracker();
_isFaceTrackerConnected = faceTracker != NULL;
if (_isFaceTrackerConnected) {
_blendshapeCoefficients = faceTracker->getBlendshapeCoefficients();
}
}
// Twist the upper body to follow the rotation of the head, but only do this with my avatar,
// since everyone else will see the full joint rotations for other people.
const float BODY_FOLLOW_HEAD_YAW_RATE = 0.1f;
const float BODY_FOLLOW_HEAD_FACTOR = 0.66f;
float currentTwist = getTorsoTwist();
setTorsoTwist(currentTwist + (getFinalYaw() * BODY_FOLLOW_HEAD_FACTOR - currentTwist) * BODY_FOLLOW_HEAD_YAW_RATE);
}
// Update audio trailing average for rendering facial animations
const float AUDIO_AVERAGING_SECS = 0.05f;
const float AUDIO_LONG_TERM_AVERAGING_SECS = 30.0f;
@ -103,6 +87,40 @@ void Head::simulate(float deltaTime, bool isMine, bool billboard) {
_longTermAverageLoudness = glm::mix(_longTermAverageLoudness, _averageLoudness, glm::min(deltaTime / AUDIO_LONG_TERM_AVERAGING_SECS, 1.0f));
}
// Only use face trackers when not playing back a recording.
if (!myAvatar->isPlaying()) {
FaceTracker* faceTracker = Application::getInstance()->getActiveFaceTracker();
_isFaceTrackerConnected = faceTracker != NULL;
if (_isFaceTrackerConnected) {
_blendshapeCoefficients = faceTracker->getBlendshapeCoefficients();
if (typeid(*faceTracker) == typeid(DdeFaceTracker)
&& Menu::getInstance()->isOptionChecked(MenuOption::UseAudioForMouth)) {
calculateMouthShapes();
const int JAW_OPEN_BLENDSHAPE = 21;
const int MMMM_BLENDSHAPE = 34;
const int FUNNEL_BLENDSHAPE = 40;
const int SMILE_LEFT_BLENDSHAPE = 28;
const int SMILE_RIGHT_BLENDSHAPE = 29;
_blendshapeCoefficients[JAW_OPEN_BLENDSHAPE] += _audioJawOpen;
_blendshapeCoefficients[SMILE_LEFT_BLENDSHAPE] += _mouth4;
_blendshapeCoefficients[SMILE_RIGHT_BLENDSHAPE] += _mouth4;
_blendshapeCoefficients[MMMM_BLENDSHAPE] += _mouth2;
_blendshapeCoefficients[FUNNEL_BLENDSHAPE] += _mouth3;
}
}
}
// Twist the upper body to follow the rotation of the head, but only do this with my avatar,
// since everyone else will see the full joint rotations for other people.
const float BODY_FOLLOW_HEAD_YAW_RATE = 0.1f;
const float BODY_FOLLOW_HEAD_FACTOR = 0.66f;
float currentTwist = getTorsoTwist();
setTorsoTwist(currentTwist + (getFinalYaw() * BODY_FOLLOW_HEAD_FACTOR - currentTwist) * BODY_FOLLOW_HEAD_YAW_RATE);
}
if (!(_isFaceTrackerConnected || billboard)) {
// Update eye saccades
const float AVERAGE_MICROSACCADE_INTERVAL = 0.50f;
@ -177,7 +195,34 @@ void Head::simulate(float deltaTime, bool isMine, bool billboard) {
}
// use data to update fake Faceshift blendshape coefficients
calculateMouthShapes();
DependencyManager::get<Faceshift>()->updateFakeCoefficients(_leftEyeBlink,
_rightEyeBlink,
_browAudioLift,
_audioJawOpen,
_mouth2,
_mouth3,
_mouth4,
_blendshapeCoefficients);
} else {
_saccade = glm::vec3();
}
if (!isMine) {
_faceModel.setLODDistance(static_cast<Avatar*>(_owningAvatar)->getLODDistance());
}
_leftEyePosition = _rightEyePosition = getPosition();
if (!billboard) {
_faceModel.simulate(deltaTime);
if (!_faceModel.getEyePositions(_leftEyePosition, _rightEyePosition)) {
static_cast<Avatar*>(_owningAvatar)->getSkeletonModel().getEyePositions(_leftEyePosition, _rightEyePosition);
}
}
_eyePosition = calculateAverageEyePosition();
}
void Head::calculateMouthShapes() {
const float JAW_OPEN_SCALE = 0.015f;
const float JAW_OPEN_RATE = 0.9f;
const float JAW_CLOSE_RATE = 0.90f;
@ -203,31 +248,6 @@ void Head::simulate(float deltaTime, bool isMine, bool billboard) {
_mouth3 = glm::mix(_audioJawOpen, _mouth3, FUNNEL_PERIOD + randFloat() * FUNNEL_RANDOM_PERIOD);
_mouth2 = glm::mix(_audioJawOpen * MMMM_POWER, _mouth2, MMMM_PERIOD + randFloat() * MMMM_RANDOM_PERIOD);
_mouth4 = glm::mix(_audioJawOpen, _mouth4, SMILE_PERIOD + randFloat() * SMILE_RANDOM_PERIOD);
DependencyManager::get<Faceshift>()->updateFakeCoefficients(_leftEyeBlink,
_rightEyeBlink,
_browAudioLift,
_audioJawOpen,
_mouth2,
_mouth3,
_mouth4,
_blendshapeCoefficients);
} else {
_saccade = glm::vec3();
}
if (!isMine) {
_faceModel.setLODDistance(static_cast<Avatar*>(_owningAvatar)->getLODDistance());
}
_leftEyePosition = _rightEyePosition = getPosition();
if (!billboard) {
_faceModel.simulate(deltaTime);
if (!_faceModel.getEyePositions(_leftEyePosition, _rightEyePosition)) {
static_cast<Avatar*>(_owningAvatar)->getSkeletonModel().getEyePositions(_leftEyePosition, _rightEyePosition);
}
}
_eyePosition = calculateAverageEyePosition();
}
void Head::relaxLean(float deltaTime) {

View file

@ -154,6 +154,7 @@ private:
// private methods
void renderLookatVectors(glm::vec3 leftEyePosition, glm::vec3 rightEyePosition, glm::vec3 lookatPosition);
void calculateMouthShapes();
friend class FaceModel;
};