mirror of
https://github.com/lubosz/overte.git
synced 2025-04-07 03:22:09 +02:00
Merge pull request #16436 from highfidelity/master
Merge master into instancing
This commit is contained in:
commit
05ac16536e
66 changed files with 1483 additions and 1991 deletions
|
@ -376,7 +376,6 @@ void Agent::executeScript() {
|
|||
// setup an Avatar for the script to use
|
||||
auto scriptedAvatar = DependencyManager::get<ScriptableAvatar>();
|
||||
scriptedAvatar->setID(getSessionUUID());
|
||||
scriptedAvatar->setForceFaceTrackerConnected(true);
|
||||
|
||||
// call model URL setters with empty URLs so our avatar, if user, will have the default models
|
||||
scriptedAvatar->setSkeletonModelURL(QUrl());
|
||||
|
|
|
@ -279,18 +279,6 @@ void ScriptableAvatar::setJointMappingsFromNetworkReply() {
|
|||
networkReply->deleteLater();
|
||||
}
|
||||
|
||||
void ScriptableAvatar::setHasProceduralBlinkFaceMovement(bool hasProceduralBlinkFaceMovement) {
|
||||
_headData->setHasProceduralBlinkFaceMovement(hasProceduralBlinkFaceMovement);
|
||||
}
|
||||
|
||||
void ScriptableAvatar::setHasProceduralEyeFaceMovement(bool hasProceduralEyeFaceMovement) {
|
||||
_headData->setHasProceduralEyeFaceMovement(hasProceduralEyeFaceMovement);
|
||||
}
|
||||
|
||||
void ScriptableAvatar::setHasAudioEnabledFaceMovement(bool hasAudioEnabledFaceMovement) {
|
||||
_headData->setHasAudioEnabledFaceMovement(hasAudioEnabledFaceMovement);
|
||||
}
|
||||
|
||||
AvatarEntityMap ScriptableAvatar::getAvatarEntityData() const {
|
||||
// DANGER: Now that we store the AvatarEntityData in packed format this call is potentially Very Expensive!
|
||||
// Avoid calling this method if possible.
|
||||
|
|
|
@ -153,13 +153,6 @@ public:
|
|||
|
||||
virtual QByteArray toByteArrayStateful(AvatarDataDetail dataDetail, bool dropFaceTracking = false) override;
|
||||
|
||||
void setHasProceduralBlinkFaceMovement(bool hasProceduralBlinkFaceMovement);
|
||||
bool getHasProceduralBlinkFaceMovement() const override { return _headData->getHasProceduralBlinkFaceMovement(); }
|
||||
void setHasProceduralEyeFaceMovement(bool hasProceduralEyeFaceMovement);
|
||||
bool getHasProceduralEyeFaceMovement() const override { return _headData->getHasProceduralEyeFaceMovement(); }
|
||||
void setHasAudioEnabledFaceMovement(bool hasAudioEnabledFaceMovement);
|
||||
bool getHasAudioEnabledFaceMovement() const override { return _headData->getHasAudioEnabledFaceMovement(); }
|
||||
|
||||
/**jsdoc
|
||||
* Gets details of all avatar entities.
|
||||
* <p><strong>Warning:</strong> Potentially an expensive call. Do not use if possible.</p>
|
||||
|
|
|
@ -211,10 +211,10 @@ endif()
|
|||
link_hifi_libraries(
|
||||
shared workload task octree ktx gpu gl procedural graphics graphics-scripting render
|
||||
pointers recording hfm fbx networking material-networking
|
||||
model-networking model-baker entities avatars trackers
|
||||
model-networking model-baker entities avatars
|
||||
audio audio-client animation script-engine physics
|
||||
render-utils entities-renderer avatars-renderer ui qml auto-updater midi
|
||||
controllers plugins image trackers platform
|
||||
controllers plugins image platform
|
||||
ui-plugins display-plugins input-plugins
|
||||
# Platform specific GL libraries
|
||||
${PLATFORM_GL_BACKEND}
|
||||
|
|
|
@ -166,9 +166,70 @@
|
|||
{ "from": "Standard.LeftEye", "to": "Actions.LeftEye" },
|
||||
{ "from": "Standard.RightEye", "to": "Actions.RightEye" },
|
||||
|
||||
{ "from": "Standard.LeftEyeBlink", "to": "Actions.LeftEyeBlink" },
|
||||
{ "from": "Standard.RightEyeBlink", "to": "Actions.RightEyeBlink" },
|
||||
|
||||
{ "from": "Standard.EyeBlink_L", "to": "Actions.EyeBlink_L" },
|
||||
{ "from": "Standard.EyeBlink_R", "to": "Actions.EyeBlink_R" },
|
||||
{ "from": "Standard.EyeSquint_L", "to": "Actions.EyeSquint_L" },
|
||||
{ "from": "Standard.EyeSquint_R", "to": "Actions.EyeSquint_R" },
|
||||
{ "from": "Standard.EyeDown_L", "to": "Actions.EyeDown_L" },
|
||||
{ "from": "Standard.EyeDown_R", "to": "Actions.EyeDown_R" },
|
||||
{ "from": "Standard.EyeIn_L", "to": "Actions.EyeIn_L" },
|
||||
{ "from": "Standard.EyeIn_R", "to": "Actions.EyeIn_R" },
|
||||
{ "from": "Standard.EyeOpen_L", "to": "Actions.EyeOpen_L" },
|
||||
{ "from": "Standard.EyeOpen_R", "to": "Actions.EyeOpen_R" },
|
||||
{ "from": "Standard.EyeOut_L", "to": "Actions.EyeOut_L" },
|
||||
{ "from": "Standard.EyeOut_R", "to": "Actions.EyeOut_R" },
|
||||
{ "from": "Standard.EyeUp_L", "to": "Actions.EyeUp_L" },
|
||||
{ "from": "Standard.EyeUp_R", "to": "Actions.EyeUp_R" },
|
||||
{ "from": "Standard.BrowsD_L", "to": "Actions.BrowsD_L" },
|
||||
{ "from": "Standard.BrowsD_R", "to": "Actions.BrowsD_R" },
|
||||
{ "from": "Standard.BrowsU_C", "to": "Actions.BrowsU_C" },
|
||||
{ "from": "Standard.BrowsU_L", "to": "Actions.BrowsU_L" },
|
||||
{ "from": "Standard.BrowsU_R", "to": "Actions.BrowsU_R" },
|
||||
{ "from": "Standard.JawFwd", "to": "Actions.JawFwd" },
|
||||
{ "from": "Standard.JawLeft", "to": "Actions.JawLeft" },
|
||||
{ "from": "Standard.JawOpen", "to": "Actions.JawOpen" },
|
||||
{ "from": "Standard.JawRight", "to": "Actions.JawRight" },
|
||||
{ "from": "Standard.MouthLeft", "to": "Actions.MouthLeft" },
|
||||
{ "from": "Standard.MouthRight", "to": "Actions.MouthRight" },
|
||||
{ "from": "Standard.MouthFrown_L", "to": "Actions.MouthFrown_L" },
|
||||
{ "from": "Standard.MouthFrown_R", "to": "Actions.MouthFrown_R" },
|
||||
{ "from": "Standard.MouthSmile_L", "to": "Actions.MouthSmile_L" },
|
||||
{ "from": "Standard.MouthSmile_R", "to": "Actions.MouthSmile_R" },
|
||||
{ "from": "Standard.MouthDimple_L", "to": "Actions.MouthDimple_L" },
|
||||
{ "from": "Standard.MouthDimple_R", "to": "Actions.MouthDimple_R" },
|
||||
{ "from": "Standard.LipsStretch_L", "to": "Actions.LipsStretch_L" },
|
||||
{ "from": "Standard.LipsStretch_R", "to": "Actions.LipsStretch_R" },
|
||||
{ "from": "Standard.LipsUpperClose", "to": "Actions.LipsUpperClose" },
|
||||
{ "from": "Standard.LipsLowerClose", "to": "Actions.LipsLowerClose" },
|
||||
{ "from": "Standard.LipsUpperOpen", "to": "Actions.LipsUpperOpen" },
|
||||
{ "from": "Standard.LipsLowerOpen", "to": "Actions.LipsLowerOpen" },
|
||||
{ "from": "Standard.LipsFunnel", "to": "Actions.LipsFunnel" },
|
||||
{ "from": "Standard.LipsPucker", "to": "Actions.LipsPucker" },
|
||||
{ "from": "Standard.Puff", "to": "Actions.Puff" },
|
||||
{ "from": "Standard.CheekSquint_L", "to": "Actions.CheekSquint_L" },
|
||||
{ "from": "Standard.CheekSquint_R", "to": "Actions.CheekSquint_R" },
|
||||
{ "from": "Standard.MouthClose", "to": "Actions.MouthClose" },
|
||||
{ "from": "Standard.MouthUpperUp_L", "to": "Actions.MouthUpperUp_L" },
|
||||
{ "from": "Standard.MouthUpperUp_R", "to": "Actions.MouthUpperUp_R" },
|
||||
{ "from": "Standard.MouthLowerDown_L", "to": "Actions.MouthLowerDown_L" },
|
||||
{ "from": "Standard.MouthLowerDown_R", "to": "Actions.MouthLowerDown_R" },
|
||||
{ "from": "Standard.MouthPress_L", "to": "Actions.MouthPress_L" },
|
||||
{ "from": "Standard.MouthPress_R", "to": "Actions.MouthPress_R" },
|
||||
{ "from": "Standard.MouthShrugLower", "to": "Actions.MouthShrugLower" },
|
||||
{ "from": "Standard.MouthShrugUpper", "to": "Actions.MouthShrugUpper" },
|
||||
{ "from": "Standard.NoseSneer_L", "to": "Actions.NoseSneer_L" },
|
||||
{ "from": "Standard.NoseSneer_R", "to": "Actions.NoseSneer_R" },
|
||||
{ "from": "Standard.TongueOut", "to": "Actions.TongueOut" },
|
||||
{ "from": "Standard.UserBlendshape0", "to": "Actions.UserBlendshape0" },
|
||||
{ "from": "Standard.UserBlendshape1", "to": "Actions.UserBlendshape1" },
|
||||
{ "from": "Standard.UserBlendshape2", "to": "Actions.UserBlendshape2" },
|
||||
{ "from": "Standard.UserBlendshape3", "to": "Actions.UserBlendshape3" },
|
||||
{ "from": "Standard.UserBlendshape4", "to": "Actions.UserBlendshape4" },
|
||||
{ "from": "Standard.UserBlendshape5", "to": "Actions.UserBlendshape5" },
|
||||
{ "from": "Standard.UserBlendshape6", "to": "Actions.UserBlendshape6" },
|
||||
{ "from": "Standard.UserBlendshape7", "to": "Actions.UserBlendshape7" },
|
||||
{ "from": "Standard.UserBlendshape8", "to": "Actions.UserBlendshape8" },
|
||||
{ "from": "Standard.UserBlendshape9", "to": "Actions.UserBlendshape9" },
|
||||
|
||||
{ "from": "Standard.TrackedObject00", "to" : "Actions.TrackedObject00" },
|
||||
{ "from": "Standard.TrackedObject01", "to" : "Actions.TrackedObject01" },
|
||||
|
|
|
@ -61,8 +61,70 @@
|
|||
{ "from": "Standard.LeftEye", "to": "Actions.LeftEye" },
|
||||
{ "from": "Standard.RightEye", "to": "Actions.RightEye" },
|
||||
|
||||
{ "from": "Standard.LeftEyeBlink", "to": "Actions.LeftEyeBlink" },
|
||||
{ "from": "Standard.RightEyeBlink", "to": "Actions.RightEyeBlink" },
|
||||
{ "from": "Standard.EyeBlink_L", "to": "Actions.EyeBlink_L" },
|
||||
{ "from": "Standard.EyeBlink_R", "to": "Actions.EyeBlink_R" },
|
||||
{ "from": "Standard.EyeSquint_L", "to": "Actions.EyeSquint_L" },
|
||||
{ "from": "Standard.EyeSquint_R", "to": "Actions.EyeSquint_R" },
|
||||
{ "from": "Standard.EyeDown_L", "to": "Actions.EyeDown_L" },
|
||||
{ "from": "Standard.EyeDown_R", "to": "Actions.EyeDown_R" },
|
||||
{ "from": "Standard.EyeIn_L", "to": "Actions.EyeIn_L" },
|
||||
{ "from": "Standard.EyeIn_R", "to": "Actions.EyeIn_R" },
|
||||
{ "from": "Standard.EyeOpen_L", "to": "Actions.EyeOpen_L" },
|
||||
{ "from": "Standard.EyeOpen_R", "to": "Actions.EyeOpen_R" },
|
||||
{ "from": "Standard.EyeOut_L", "to": "Actions.EyeOut_L" },
|
||||
{ "from": "Standard.EyeOut_R", "to": "Actions.EyeOut_R" },
|
||||
{ "from": "Standard.EyeUp_L", "to": "Actions.EyeUp_L" },
|
||||
{ "from": "Standard.EyeUp_R", "to": "Actions.EyeUp_R" },
|
||||
{ "from": "Standard.BrowsD_L", "to": "Actions.BrowsD_L" },
|
||||
{ "from": "Standard.BrowsD_R", "to": "Actions.BrowsD_R" },
|
||||
{ "from": "Standard.BrowsU_C", "to": "Actions.BrowsU_C" },
|
||||
{ "from": "Standard.BrowsU_L", "to": "Actions.BrowsU_L" },
|
||||
{ "from": "Standard.BrowsU_R", "to": "Actions.BrowsU_R" },
|
||||
{ "from": "Standard.JawFwd", "to": "Actions.JawFwd" },
|
||||
{ "from": "Standard.JawLeft", "to": "Actions.JawLeft" },
|
||||
{ "from": "Standard.JawOpen", "to": "Actions.JawOpen" },
|
||||
{ "from": "Standard.JawRight", "to": "Actions.JawRight" },
|
||||
{ "from": "Standard.MouthLeft", "to": "Actions.MouthLeft" },
|
||||
{ "from": "Standard.MouthRight", "to": "Actions.MouthRight" },
|
||||
{ "from": "Standard.MouthFrown_L", "to": "Actions.MouthFrown_L" },
|
||||
{ "from": "Standard.MouthFrown_R", "to": "Actions.MouthFrown_R" },
|
||||
{ "from": "Standard.MouthSmile_L", "to": "Actions.MouthSmile_L" },
|
||||
{ "from": "Standard.MouthSmile_R", "to": "Actions.MouthSmile_R" },
|
||||
{ "from": "Standard.MouthDimple_L", "to": "Actions.MouthDimple_L" },
|
||||
{ "from": "Standard.MouthDimple_R", "to": "Actions.MouthDimple_R" },
|
||||
{ "from": "Standard.LipsStretch_L", "to": "Actions.LipsStretch_L" },
|
||||
{ "from": "Standard.LipsStretch_R", "to": "Actions.LipsStretch_R" },
|
||||
{ "from": "Standard.LipsUpperClose", "to": "Actions.LipsUpperClose" },
|
||||
{ "from": "Standard.LipsLowerClose", "to": "Actions.LipsLowerClose" },
|
||||
{ "from": "Standard.LipsUpperOpen", "to": "Actions.LipsUpperOpen" },
|
||||
{ "from": "Standard.LipsLowerOpen", "to": "Actions.LipsLowerOpen" },
|
||||
{ "from": "Standard.LipsFunnel", "to": "Actions.LipsFunnel" },
|
||||
{ "from": "Standard.LipsPucker", "to": "Actions.LipsPucker" },
|
||||
{ "from": "Standard.Puff", "to": "Actions.Puff" },
|
||||
{ "from": "Standard.CheekSquint_L", "to": "Actions.CheekSquint_L" },
|
||||
{ "from": "Standard.CheekSquint_R", "to": "Actions.CheekSquint_R" },
|
||||
{ "from": "Standard.MouthClose", "to": "Actions.MouthClose" },
|
||||
{ "from": "Standard.MouthUpperUp_L", "to": "Actions.MouthUpperUp_L" },
|
||||
{ "from": "Standard.MouthUpperUp_R", "to": "Actions.MouthUpperUp_R" },
|
||||
{ "from": "Standard.MouthLowerDown_L", "to": "Actions.MouthLowerDown_L" },
|
||||
{ "from": "Standard.MouthLowerDown_R", "to": "Actions.MouthLowerDown_R" },
|
||||
{ "from": "Standard.MouthPress_L", "to": "Actions.MouthPress_L" },
|
||||
{ "from": "Standard.MouthPress_R", "to": "Actions.MouthPress_R" },
|
||||
{ "from": "Standard.MouthShrugLower", "to": "Actions.MouthShrugLower" },
|
||||
{ "from": "Standard.MouthShrugUpper", "to": "Actions.MouthShrugUpper" },
|
||||
{ "from": "Standard.NoseSneer_L", "to": "Actions.NoseSneer_L" },
|
||||
{ "from": "Standard.NoseSneer_R", "to": "Actions.NoseSneer_R" },
|
||||
{ "from": "Standard.TongueOut", "to": "Actions.TongueOut" },
|
||||
{ "from": "Standard.UserBlendshape0", "to": "Actions.UserBlendshape0" },
|
||||
{ "from": "Standard.UserBlendshape1", "to": "Actions.UserBlendshape1" },
|
||||
{ "from": "Standard.UserBlendshape2", "to": "Actions.UserBlendshape2" },
|
||||
{ "from": "Standard.UserBlendshape3", "to": "Actions.UserBlendshape3" },
|
||||
{ "from": "Standard.UserBlendshape4", "to": "Actions.UserBlendshape4" },
|
||||
{ "from": "Standard.UserBlendshape5", "to": "Actions.UserBlendshape5" },
|
||||
{ "from": "Standard.UserBlendshape6", "to": "Actions.UserBlendshape6" },
|
||||
{ "from": "Standard.UserBlendshape7", "to": "Actions.UserBlendshape7" },
|
||||
{ "from": "Standard.UserBlendshape8", "to": "Actions.UserBlendshape8" },
|
||||
{ "from": "Standard.UserBlendshape9", "to": "Actions.UserBlendshape9" },
|
||||
|
||||
{ "from": "Standard.TrackedObject00", "to" : "Actions.TrackedObject00" },
|
||||
{ "from": "Standard.TrackedObject01", "to" : "Actions.TrackedObject01" },
|
||||
|
|
|
@ -98,8 +98,9 @@
|
|||
{ "from": "Vive.Head", "to" : "Standard.Head" },
|
||||
{ "from": "Vive.LeftEye", "to" : "Standard.LeftEye" },
|
||||
{ "from": "Vive.RightEye", "to" : "Standard.RightEye" },
|
||||
{ "from": "Vive.LeftEyeBlink", "to" : "Standard.LeftEyeBlink" },
|
||||
{ "from": "Vive.RightEyeBlink", "to" : "Standard.RightEyeBlink" },
|
||||
|
||||
{ "from": "Vive.EyeBlink_L", "to" : "Standard.EyeBlink_L" },
|
||||
{ "from": "Vive.EyeBlink_R", "to" : "Standard.EyeBlink_R" },
|
||||
|
||||
{
|
||||
"from": "Vive.LeftFoot", "to" : "Standard.LeftFoot",
|
||||
|
|
|
@ -375,14 +375,14 @@ Rectangle {
|
|||
x: margins.paddings
|
||||
interactive: false;
|
||||
height: contentHeight;
|
||||
spacing: 4;
|
||||
|
||||
clip: true;
|
||||
model: AudioScriptingInterface.devices.input;
|
||||
delegate: Item {
|
||||
width: rightMostInputLevelPos - margins.paddings*2
|
||||
height: margins.sizeCheckBox > checkBoxInput.implicitHeight ?
|
||||
margins.sizeCheckBox : checkBoxInput.implicitHeight
|
||||
|
||||
height: ((type != "hmd" && bar.currentIndex === 0) || (type != "desktop" && bar.currentIndex === 1)) ?
|
||||
(margins.sizeCheckBox > checkBoxInput.implicitHeight ? margins.sizeCheckBox + 4 : checkBoxInput.implicitHeight + 4) : 0
|
||||
visible: (type != "hmd" && bar.currentIndex === 0) || (type != "desktop" && bar.currentIndex === 1)
|
||||
AudioControls.CheckBox {
|
||||
id: checkBoxInput
|
||||
anchors.left: parent.left
|
||||
|
@ -470,13 +470,13 @@ Rectangle {
|
|||
height: contentHeight;
|
||||
anchors.top: outputDeviceHeader.bottom;
|
||||
anchors.topMargin: 10;
|
||||
spacing: 4;
|
||||
clip: true;
|
||||
model: AudioScriptingInterface.devices.output;
|
||||
delegate: Item {
|
||||
width: rightMostInputLevelPos
|
||||
height: margins.sizeCheckBox > checkBoxOutput.implicitHeight ?
|
||||
margins.sizeCheckBox : checkBoxOutput.implicitHeight
|
||||
height: ((type != "hmd" && bar.currentIndex === 0) || (type != "desktop" && bar.currentIndex === 1)) ?
|
||||
(margins.sizeCheckBox > checkBoxOutput.implicitHeight ? margins.sizeCheckBox + 4 : checkBoxOutput.implicitHeight + 4) : 0
|
||||
visible: (type != "hmd" && bar.currentIndex === 0) || (type != "desktop" && bar.currentIndex === 1)
|
||||
|
||||
AudioControls.CheckBox {
|
||||
id: checkBoxOutput
|
||||
|
|
|
@ -170,7 +170,6 @@
|
|||
#include "avatar/MyCharacterController.h"
|
||||
#include "CrashRecoveryHandler.h"
|
||||
#include "CrashHandler.h"
|
||||
#include "devices/DdeFaceTracker.h"
|
||||
#include "DiscoverabilityManager.h"
|
||||
#include "GLCanvas.h"
|
||||
#include "InterfaceDynamicFactory.h"
|
||||
|
@ -888,11 +887,6 @@ bool setupEssentials(int& argc, char** argv, bool runningMarkerExisted) {
|
|||
DependencyManager::set<ScriptCache>();
|
||||
DependencyManager::set<SoundCache>();
|
||||
DependencyManager::set<SoundCacheScriptingInterface>();
|
||||
|
||||
#ifdef HAVE_DDE
|
||||
DependencyManager::set<DdeFaceTracker>();
|
||||
#endif
|
||||
|
||||
DependencyManager::set<AudioClient>();
|
||||
DependencyManager::set<AudioScope>();
|
||||
DependencyManager::set<DeferredLightingEffect>();
|
||||
|
@ -1069,7 +1063,6 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo
|
|||
_lastSendDownstreamAudioStats(usecTimestampNow()),
|
||||
_notifiedPacketVersionMismatchThisDomain(false),
|
||||
_maxOctreePPS(maxOctreePacketsPerSecond.get()),
|
||||
_lastFaceTrackerUpdate(0),
|
||||
_snapshotSound(nullptr),
|
||||
_sampleSound(nullptr)
|
||||
{
|
||||
|
@ -2019,13 +2012,6 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo
|
|||
this->installEventFilter(this);
|
||||
|
||||
|
||||
|
||||
#ifdef HAVE_DDE
|
||||
auto ddeTracker = DependencyManager::get<DdeFaceTracker>();
|
||||
ddeTracker->init();
|
||||
connect(ddeTracker.data(), &FaceTracker::muteToggled, this, &Application::faceTrackerMuteToggled);
|
||||
#endif
|
||||
|
||||
// If launched from Steam, let it handle updates
|
||||
const QString HIFI_NO_UPDATER_COMMAND_LINE_KEY = "--no-updater";
|
||||
bool noUpdater = arguments().indexOf(HIFI_NO_UPDATER_COMMAND_LINE_KEY) != -1;
|
||||
|
@ -2767,9 +2753,6 @@ void Application::cleanupBeforeQuit() {
|
|||
}
|
||||
|
||||
// Stop third party processes so that they're not left running in the event of a subsequent shutdown crash.
|
||||
#ifdef HAVE_DDE
|
||||
DependencyManager::get<DdeFaceTracker>()->setEnabled(false);
|
||||
#endif
|
||||
AnimDebugDraw::getInstance().shutdown();
|
||||
|
||||
// FIXME: once we move to shared pointer for the INputDevice we shoud remove this naked delete:
|
||||
|
@ -2840,10 +2823,6 @@ void Application::cleanupBeforeQuit() {
|
|||
_window->saveGeometry();
|
||||
|
||||
// Destroy third party processes after scripts have finished using them.
|
||||
#ifdef HAVE_DDE
|
||||
DependencyManager::destroy<DdeFaceTracker>();
|
||||
#endif
|
||||
|
||||
DependencyManager::destroy<ContextOverlayInterface>(); // Must be destroyed before TabletScriptingInterface
|
||||
|
||||
// stop QML
|
||||
|
@ -3478,9 +3457,6 @@ void Application::onDesktopRootContextCreated(QQmlContext* surfaceContext) {
|
|||
surfaceContext->setContextProperty("AccountServices", AccountServicesScriptingInterface::getInstance());
|
||||
|
||||
surfaceContext->setContextProperty("DialogsManager", _dialogsManagerScriptingInterface);
|
||||
#ifdef HAVE_DDE
|
||||
surfaceContext->setContextProperty("FaceTracker", DependencyManager::get<DdeFaceTracker>().data());
|
||||
#endif
|
||||
surfaceContext->setContextProperty("AvatarManager", DependencyManager::get<AvatarManager>().data());
|
||||
surfaceContext->setContextProperty("LODManager", DependencyManager::get<LODManager>().data());
|
||||
surfaceContext->setContextProperty("HMD", DependencyManager::get<HMDScriptingInterface>().data());
|
||||
|
@ -3747,16 +3723,6 @@ void Application::runTests() {
|
|||
runUnitTests();
|
||||
}
|
||||
|
||||
void Application::faceTrackerMuteToggled() {
|
||||
|
||||
QAction* muteAction = Menu::getInstance()->getActionForOption(MenuOption::MuteFaceTracking);
|
||||
Q_CHECK_PTR(muteAction);
|
||||
bool isMuted = getSelectedFaceTracker()->isMuted();
|
||||
muteAction->setChecked(isMuted);
|
||||
getSelectedFaceTracker()->setEnabled(!isMuted);
|
||||
Menu::getInstance()->getActionForOption(MenuOption::CalibrateCamera)->setEnabled(!isMuted);
|
||||
}
|
||||
|
||||
void Application::setFieldOfView(float fov) {
|
||||
if (fov != _fieldOfView.get()) {
|
||||
_fieldOfView.set(fov);
|
||||
|
@ -5331,43 +5297,6 @@ ivec2 Application::getMouse() const {
|
|||
return getApplicationCompositor().getReticlePosition();
|
||||
}
|
||||
|
||||
FaceTracker* Application::getActiveFaceTracker() {
|
||||
#ifdef HAVE_DDE
|
||||
auto dde = DependencyManager::get<DdeFaceTracker>();
|
||||
|
||||
if (dde && dde->isActive()) {
|
||||
return static_cast<FaceTracker*>(dde.data());
|
||||
}
|
||||
#endif
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
FaceTracker* Application::getSelectedFaceTracker() {
|
||||
FaceTracker* faceTracker = nullptr;
|
||||
#ifdef HAVE_DDE
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::UseCamera)) {
|
||||
faceTracker = DependencyManager::get<DdeFaceTracker>().data();
|
||||
}
|
||||
#endif
|
||||
return faceTracker;
|
||||
}
|
||||
|
||||
void Application::setActiveFaceTracker() const {
|
||||
#ifdef HAVE_DDE
|
||||
bool isMuted = Menu::getInstance()->isOptionChecked(MenuOption::MuteFaceTracking);
|
||||
bool isUsingDDE = Menu::getInstance()->isOptionChecked(MenuOption::UseCamera);
|
||||
Menu::getInstance()->getActionForOption(MenuOption::BinaryEyelidControl)->setVisible(isUsingDDE);
|
||||
Menu::getInstance()->getActionForOption(MenuOption::CoupleEyelids)->setVisible(isUsingDDE);
|
||||
Menu::getInstance()->getActionForOption(MenuOption::UseAudioForMouth)->setVisible(isUsingDDE);
|
||||
Menu::getInstance()->getActionForOption(MenuOption::VelocityFilter)->setVisible(isUsingDDE);
|
||||
Menu::getInstance()->getActionForOption(MenuOption::CalibrateCamera)->setVisible(isUsingDDE);
|
||||
auto ddeTracker = DependencyManager::get<DdeFaceTracker>();
|
||||
ddeTracker->setIsMuted(isMuted);
|
||||
ddeTracker->setEnabled(isUsingDDE && !isMuted);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool Application::exportEntities(const QString& filename,
|
||||
const QVector<QUuid>& entityIDs,
|
||||
const glm::vec3* givenOffset) {
|
||||
|
@ -5851,8 +5780,7 @@ void Application::updateMyAvatarLookAtPosition(float deltaTime) {
|
|||
PerformanceWarning warn(showWarnings, "Application::updateMyAvatarLookAtPosition()");
|
||||
|
||||
auto myAvatar = getMyAvatar();
|
||||
FaceTracker* faceTracker = getActiveFaceTracker();
|
||||
myAvatar->updateEyesLookAtPosition(faceTracker, _myCamera, deltaTime);
|
||||
myAvatar->updateEyesLookAtPosition(deltaTime);
|
||||
}
|
||||
|
||||
void Application::updateThreads(float deltaTime) {
|
||||
|
@ -6278,37 +6206,6 @@ void Application::update(float deltaTime) {
|
|||
auto myAvatar = getMyAvatar();
|
||||
{
|
||||
PerformanceTimer perfTimer("devices");
|
||||
|
||||
FaceTracker* tracker = getSelectedFaceTracker();
|
||||
if (tracker && Menu::getInstance()->isOptionChecked(MenuOption::MuteFaceTracking) != tracker->isMuted()) {
|
||||
tracker->toggleMute();
|
||||
}
|
||||
|
||||
tracker = getActiveFaceTracker();
|
||||
if (tracker && !tracker->isMuted()) {
|
||||
tracker->update(deltaTime);
|
||||
|
||||
// Auto-mute microphone after losing face tracking?
|
||||
if (tracker->isTracking()) {
|
||||
_lastFaceTrackerUpdate = usecTimestampNow();
|
||||
} else {
|
||||
const quint64 MUTE_MICROPHONE_AFTER_USECS = 5000000; //5 secs
|
||||
Menu* menu = Menu::getInstance();
|
||||
auto audioClient = DependencyManager::get<AudioClient>();
|
||||
if (menu->isOptionChecked(MenuOption::AutoMuteAudio) && !audioClient->isMuted()) {
|
||||
if (_lastFaceTrackerUpdate > 0
|
||||
&& ((usecTimestampNow() - _lastFaceTrackerUpdate) > MUTE_MICROPHONE_AFTER_USECS)) {
|
||||
audioClient->setMuted(true);
|
||||
_lastFaceTrackerUpdate = 0;
|
||||
}
|
||||
} else {
|
||||
_lastFaceTrackerUpdate = 0;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
_lastFaceTrackerUpdate = 0;
|
||||
}
|
||||
|
||||
auto userInputMapper = DependencyManager::get<UserInputMapper>();
|
||||
|
||||
controller::HmdAvatarAlignmentType hmdAvatarAlignmentType;
|
||||
|
@ -7104,10 +7001,6 @@ void Application::copyDisplayViewFrustum(ViewFrustum& viewOut) const {
|
|||
// feature. However, we still use this to reset face trackers, eye trackers, audio and to optionally re-load the avatar
|
||||
// rig and animations from scratch.
|
||||
void Application::resetSensors(bool andReload) {
|
||||
#ifdef HAVE_DDE
|
||||
DependencyManager::get<DdeFaceTracker>()->reset();
|
||||
#endif
|
||||
|
||||
_overlayConductor.centerUI();
|
||||
getActiveDisplayPlugin()->resetSensors();
|
||||
getMyAvatar()->reset(true, andReload);
|
||||
|
@ -7509,10 +7402,6 @@ void Application::registerScriptEngineWithApplicationServices(const ScriptEngine
|
|||
scriptEngine->registerGlobalObject("AccountServices", AccountServicesScriptingInterface::getInstance());
|
||||
qScriptRegisterMetaType(scriptEngine.data(), DownloadInfoResultToScriptValue, DownloadInfoResultFromScriptValue);
|
||||
|
||||
#ifdef HAVE_DDE
|
||||
scriptEngine->registerGlobalObject("FaceTracker", DependencyManager::get<DdeFaceTracker>().data());
|
||||
#endif
|
||||
|
||||
scriptEngine->registerGlobalObject("AvatarManager", DependencyManager::get<AvatarManager>().data());
|
||||
|
||||
scriptEngine->registerGlobalObject("LODManager", DependencyManager::get<LODManager>().data());
|
||||
|
|
|
@ -81,7 +81,6 @@
|
|||
#include "VisionSqueeze.h"
|
||||
|
||||
class GLCanvas;
|
||||
class FaceTracker;
|
||||
class MainWindow;
|
||||
class AssetUpload;
|
||||
class CompositorHelper;
|
||||
|
@ -191,9 +190,6 @@ public:
|
|||
|
||||
ivec2 getMouse() const;
|
||||
|
||||
FaceTracker* getActiveFaceTracker();
|
||||
FaceTracker* getSelectedFaceTracker();
|
||||
|
||||
ApplicationOverlay& getApplicationOverlay() { return _applicationOverlay; }
|
||||
const ApplicationOverlay& getApplicationOverlay() const { return _applicationOverlay; }
|
||||
CompositorHelper& getApplicationCompositor() const;
|
||||
|
@ -423,7 +419,6 @@ public slots:
|
|||
static void packageModel();
|
||||
|
||||
void resetSensors(bool andReload = false);
|
||||
void setActiveFaceTracker() const;
|
||||
|
||||
void hmdVisibleChanged(bool visible);
|
||||
|
||||
|
@ -497,8 +492,6 @@ private slots:
|
|||
|
||||
void resettingDomain();
|
||||
|
||||
void faceTrackerMuteToggled();
|
||||
|
||||
void activeChanged(Qt::ApplicationState state);
|
||||
void windowMinimizedChanged(bool minimized);
|
||||
|
||||
|
@ -736,8 +729,6 @@ private:
|
|||
PerformanceManager _performanceManager;
|
||||
RefreshRateManager _refreshRateManager;
|
||||
|
||||
quint64 _lastFaceTrackerUpdate;
|
||||
|
||||
GameWorkload _gameWorkload;
|
||||
|
||||
GraphicsEngine _graphicsEngine;
|
||||
|
|
|
@ -212,22 +212,36 @@ void AvatarBookmarks::loadBookmark(const QString& bookmarkName) {
|
|||
auto myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar();
|
||||
auto treeRenderer = DependencyManager::get<EntityTreeRenderer>();
|
||||
EntityTreePointer entityTree = treeRenderer ? treeRenderer->getTree() : nullptr;
|
||||
myAvatar->clearWornAvatarEntities();
|
||||
|
||||
// Once the skeleton URL has been loaded, add the Avatar Entities.
|
||||
// We have to wait, because otherwise the avatar entities will try to get attached to the joints
|
||||
// of the *current* avatar at first. But the current avatar might have a different joints scheme
|
||||
// from the new avatar, and that would cause the entities to be attached to the wrong joints.
|
||||
|
||||
std::shared_ptr<QMetaObject::Connection> connection1 = std::make_shared<QMetaObject::Connection>();
|
||||
*connection1 = connect(myAvatar.get(), &MyAvatar::onLoadComplete, [this, bookmark, bookmarkName, myAvatar, connection1]() {
|
||||
qCDebug(interfaceapp) << "Finish loading avatar bookmark" << bookmarkName;
|
||||
QObject::disconnect(*connection1);
|
||||
myAvatar->clearWornAvatarEntities();
|
||||
const float& qScale = bookmark.value(ENTRY_AVATAR_SCALE, 1.0f).toFloat();
|
||||
myAvatar->setAvatarScale(qScale);
|
||||
QList<QVariant> attachments = bookmark.value(ENTRY_AVATAR_ATTACHMENTS, QList<QVariant>()).toList();
|
||||
myAvatar->setAttachmentsVariant(attachments);
|
||||
QVariantList avatarEntities = bookmark.value(ENTRY_AVATAR_ENTITIES, QVariantList()).toList();
|
||||
addAvatarEntities(avatarEntities);
|
||||
emit bookmarkLoaded(bookmarkName);
|
||||
});
|
||||
|
||||
std::shared_ptr<QMetaObject::Connection> connection2 = std::make_shared<QMetaObject::Connection>();
|
||||
*connection2 = connect(myAvatar.get(), &MyAvatar::onLoadFailed, [this, bookmarkName, connection2]() {
|
||||
qCDebug(interfaceapp) << "Failed to load avatar bookmark" << bookmarkName;
|
||||
QObject::disconnect(*connection2);
|
||||
});
|
||||
|
||||
qCDebug(interfaceapp) << "Start loading avatar bookmark" << bookmarkName;
|
||||
|
||||
const QString& avatarUrl = bookmark.value(ENTRY_AVATAR_URL, "").toString();
|
||||
myAvatar->useFullAvatarURL(avatarUrl);
|
||||
qCDebug(interfaceapp) << "Avatar On";
|
||||
const QList<QVariant>& attachments = bookmark.value(ENTRY_AVATAR_ATTACHMENTS, QList<QVariant>()).toList();
|
||||
|
||||
qCDebug(interfaceapp) << "Attach " << attachments;
|
||||
myAvatar->setAttachmentsVariant(attachments);
|
||||
|
||||
const float& qScale = bookmark.value(ENTRY_AVATAR_SCALE, 1.0f).toFloat();
|
||||
myAvatar->setAvatarScale(qScale);
|
||||
|
||||
const QVariantList& avatarEntities = bookmark.value(ENTRY_AVATAR_ENTITIES, QVariantList()).toList();
|
||||
addAvatarEntities(avatarEntities);
|
||||
|
||||
emit bookmarkLoaded(bookmarkName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,7 +37,6 @@
|
|||
#include "avatar/AvatarManager.h"
|
||||
#include "avatar/AvatarPackager.h"
|
||||
#include "AvatarBookmarks.h"
|
||||
#include "devices/DdeFaceTracker.h"
|
||||
#include "MainWindow.h"
|
||||
#include "render/DrawStatus.h"
|
||||
#include "scripting/MenuScriptingInterface.h"
|
||||
|
@ -493,47 +492,6 @@ Menu::Menu() {
|
|||
// Developer > Avatar >>>
|
||||
MenuWrapper* avatarDebugMenu = developerMenu->addMenu("Avatar");
|
||||
|
||||
// Developer > Avatar > Face Tracking
|
||||
MenuWrapper* faceTrackingMenu = avatarDebugMenu->addMenu("Face Tracking");
|
||||
{
|
||||
QActionGroup* faceTrackerGroup = new QActionGroup(avatarDebugMenu);
|
||||
|
||||
bool defaultNoFaceTracking = true;
|
||||
#ifdef HAVE_DDE
|
||||
defaultNoFaceTracking = false;
|
||||
#endif
|
||||
QAction* noFaceTracker = addCheckableActionToQMenuAndActionHash(faceTrackingMenu, MenuOption::NoFaceTracking,
|
||||
0, defaultNoFaceTracking,
|
||||
qApp, SLOT(setActiveFaceTracker()));
|
||||
faceTrackerGroup->addAction(noFaceTracker);
|
||||
|
||||
#ifdef HAVE_DDE
|
||||
QAction* ddeFaceTracker = addCheckableActionToQMenuAndActionHash(faceTrackingMenu, MenuOption::UseCamera,
|
||||
0, true,
|
||||
qApp, SLOT(setActiveFaceTracker()));
|
||||
faceTrackerGroup->addAction(ddeFaceTracker);
|
||||
#endif
|
||||
}
|
||||
#ifdef HAVE_DDE
|
||||
faceTrackingMenu->addSeparator();
|
||||
QAction* binaryEyelidControl = addCheckableActionToQMenuAndActionHash(faceTrackingMenu, MenuOption::BinaryEyelidControl, 0, true);
|
||||
binaryEyelidControl->setVisible(true); // DDE face tracking is on by default
|
||||
QAction* coupleEyelids = addCheckableActionToQMenuAndActionHash(faceTrackingMenu, MenuOption::CoupleEyelids, 0, true);
|
||||
coupleEyelids->setVisible(true); // DDE face tracking is on by default
|
||||
QAction* useAudioForMouth = addCheckableActionToQMenuAndActionHash(faceTrackingMenu, MenuOption::UseAudioForMouth, 0, true);
|
||||
useAudioForMouth->setVisible(true); // DDE face tracking is on by default
|
||||
QAction* ddeFiltering = addCheckableActionToQMenuAndActionHash(faceTrackingMenu, MenuOption::VelocityFilter, 0, true);
|
||||
ddeFiltering->setVisible(true); // DDE face tracking is on by default
|
||||
QAction* ddeCalibrate = addActionToQMenuAndActionHash(faceTrackingMenu, MenuOption::CalibrateCamera, 0,
|
||||
DependencyManager::get<DdeFaceTracker>().data(), SLOT(calibrate()));
|
||||
ddeCalibrate->setVisible(true); // DDE face tracking is on by default
|
||||
faceTrackingMenu->addSeparator();
|
||||
addCheckableActionToQMenuAndActionHash(faceTrackingMenu, MenuOption::MuteFaceTracking,
|
||||
[](bool mute) { FaceTracker::setIsMuted(mute); },
|
||||
Qt::CTRL | Qt::SHIFT | Qt::Key_F, FaceTracker::isMuted());
|
||||
addCheckableActionToQMenuAndActionHash(faceTrackingMenu, MenuOption::AutoMuteAudio, 0, false);
|
||||
#endif
|
||||
|
||||
action = addCheckableActionToQMenuAndActionHash(avatarDebugMenu, MenuOption::AvatarReceiveStats, 0, false);
|
||||
connect(action, &QAction::triggered, [this]{ Avatar::setShowReceiveStats(isOptionChecked(MenuOption::AvatarReceiveStats)); });
|
||||
action = addCheckableActionToQMenuAndActionHash(avatarDebugMenu, MenuOption::ShowBoundingCollisionShapes, 0, false);
|
||||
|
|
|
@ -26,19 +26,22 @@ class AudioScope : public QObject, public Dependency {
|
|||
SINGLETON_DEPENDENCY
|
||||
|
||||
/**jsdoc
|
||||
* The AudioScope API helps control the Audio Scope features in Interface
|
||||
* The <code>AudioScope</code> API provides facilities for an audio scope.
|
||||
*
|
||||
* @namespace AudioScope
|
||||
*
|
||||
* @deprecated This API doesn't work properly. It is deprecated and will be removed.
|
||||
*
|
||||
* @hifi-interface
|
||||
* @hifi-client-entity
|
||||
* @hifi-avatar
|
||||
*
|
||||
* @property {number} scopeInput <em>Read-only.</em>
|
||||
* @property {number} scopeOutputLeft <em>Read-only.</em>
|
||||
* @property {number} scopeOutputRight <em>Read-only.</em>
|
||||
* @property {number} triggerInput <em>Read-only.</em>
|
||||
* @property {number} triggerOutputLeft <em>Read-only.</em>
|
||||
* @property {number} triggerOutputRight <em>Read-only.</em>
|
||||
* @property {number[]} scopeInput - Scope input. <em>Read-only.</em>
|
||||
* @property {number[]} scopeOutputLeft - Scope left output. <em>Read-only.</em>
|
||||
* @property {number[]} scopeOutputRight - Scope right output. <em>Read-only.</em>
|
||||
* @property {number[]} triggerInput - Trigger input. <em>Read-only.</em>
|
||||
* @property {number[]} triggerOutputLeft - Trigger left output. <em>Read-only.</em>
|
||||
* @property {number[]} triggerOutputRight - Trigger right output. <em>Read-only.</em>
|
||||
*/
|
||||
|
||||
Q_PROPERTY(QVector<int> scopeInput READ getScopeInput)
|
||||
|
@ -58,159 +61,186 @@ public:
|
|||
public slots:
|
||||
|
||||
/**jsdoc
|
||||
* Toggle.
|
||||
* @function AudioScope.toggle
|
||||
*/
|
||||
void toggle() { setVisible(!_isEnabled); }
|
||||
|
||||
/**jsdoc
|
||||
* Set visible.
|
||||
* @function AudioScope.setVisible
|
||||
* @param {boolean} visible
|
||||
* @param {boolean} visible - Visible.
|
||||
*/
|
||||
void setVisible(bool visible);
|
||||
|
||||
/**jsdoc
|
||||
* Get visible.
|
||||
* @function AudioScope.getVisible
|
||||
* @returns {boolean}
|
||||
* @returns {boolean} Visible.
|
||||
*/
|
||||
bool getVisible() const { return _isEnabled; }
|
||||
|
||||
/**jsdoc
|
||||
* Toggle pause.
|
||||
* @function AudioScope.togglePause
|
||||
*/
|
||||
void togglePause() { setPause(!_isPaused); }
|
||||
|
||||
/**jsdoc
|
||||
* Set pause.
|
||||
* @function AudioScope.setPause
|
||||
* @param {boolean} paused
|
||||
* @param {boolean} pause - Pause.
|
||||
*/
|
||||
void setPause(bool paused) { _isPaused = paused; emit pauseChanged(); }
|
||||
|
||||
/**jsdoc
|
||||
* Get pause.
|
||||
* @function AudioScope.getPause
|
||||
* @returns {boolean}
|
||||
* @returns {boolean} Pause.
|
||||
*/
|
||||
bool getPause() { return _isPaused; }
|
||||
|
||||
/**jsdoc
|
||||
* Toggle trigger.
|
||||
* @function AudioScope.toggleTrigger
|
||||
*/
|
||||
void toggleTrigger() { _autoTrigger = !_autoTrigger; }
|
||||
|
||||
/**jsdoc
|
||||
* Get auto trigger.
|
||||
* @function AudioScope.getAutoTrigger
|
||||
* @returns {boolean}
|
||||
* @returns {boolean} Auto trigger.
|
||||
*/
|
||||
bool getAutoTrigger() { return _autoTrigger; }
|
||||
|
||||
/**jsdoc
|
||||
* Set auto trigger.
|
||||
* @function AudioScope.setAutoTrigger
|
||||
* @param {boolean} autoTrigger
|
||||
* @param {boolean} autoTrigger - Auto trigger.
|
||||
*/
|
||||
void setAutoTrigger(bool autoTrigger) { _isTriggered = false; _autoTrigger = autoTrigger; }
|
||||
|
||||
/**jsdoc
|
||||
* Set trigger values.
|
||||
* @function AudioScope.setTriggerValues
|
||||
* @param {number} x
|
||||
* @param {number} y
|
||||
* @param {number} x - X.
|
||||
* @param {number} y - Y.
|
||||
*/
|
||||
void setTriggerValues(int x, int y) { _triggerValues.x = x; _triggerValues.y = y; }
|
||||
|
||||
/**jsdoc
|
||||
* Set triggered.
|
||||
* @function AudioScope.setTriggered
|
||||
* @param {boolean} triggered
|
||||
* @param {boolean} triggered - Triggered.
|
||||
*/
|
||||
void setTriggered(bool triggered) { _isTriggered = triggered; }
|
||||
|
||||
/**jsdoc
|
||||
* Get triggered.
|
||||
* @function AudioScope.getTriggered
|
||||
* @returns {boolean}
|
||||
* @returns {boolean} Triggered.
|
||||
*/
|
||||
bool getTriggered() { return _isTriggered; }
|
||||
|
||||
/**jsdoc
|
||||
* Get frames per second.
|
||||
* @function AudioScope.getFramesPerSecond
|
||||
* @returns {number}
|
||||
* @returns {number} Frames per second.
|
||||
*/
|
||||
float getFramesPerSecond();
|
||||
|
||||
/**jsdoc
|
||||
* Get frames per scope.
|
||||
* @function AudioScope.getFramesPerScope
|
||||
* @returns {number}
|
||||
* @returns {number} Frames per scope.
|
||||
*/
|
||||
int getFramesPerScope() { return _framesPerScope; }
|
||||
|
||||
/**jsdoc
|
||||
* Select five frames audio scope.
|
||||
* @function AudioScope.selectAudioScopeFiveFrames
|
||||
*/
|
||||
void selectAudioScopeFiveFrames();
|
||||
|
||||
/**jsdoc
|
||||
* Select twenty frames audio scope.
|
||||
* @function AudioScope.selectAudioScopeTwentyFrames
|
||||
*/
|
||||
void selectAudioScopeTwentyFrames();
|
||||
|
||||
/**jsdoc
|
||||
* Select fifty frames audio scope.
|
||||
* @function AudioScope.selectAudioScopeFiftyFrames
|
||||
*/
|
||||
void selectAudioScopeFiftyFrames();
|
||||
|
||||
/**jsdoc
|
||||
* Get scope input.
|
||||
* @function AudioScope.getScopeInput
|
||||
* @returns {number[]}
|
||||
* @returns {number[]} Scope input.
|
||||
*/
|
||||
QVector<int> getScopeInput() { return _scopeInputData; };
|
||||
|
||||
/**jsdoc
|
||||
* Get scope left output.
|
||||
* @function AudioScope.getScopeOutputLeft
|
||||
* @returns {number[]}
|
||||
* @returns {number[]} Scope left output.
|
||||
*/
|
||||
QVector<int> getScopeOutputLeft() { return _scopeOutputLeftData; };
|
||||
|
||||
/**jsdoc
|
||||
* Get scope right output.
|
||||
* @function AudioScope.getScopeOutputRight
|
||||
* @returns {number[]}
|
||||
* @returns {number[]} Scope right output.
|
||||
*/
|
||||
QVector<int> getScopeOutputRight() { return _scopeOutputRightData; };
|
||||
|
||||
/**jsdoc
|
||||
* Get trigger input.
|
||||
* @function AudioScope.getTriggerInput
|
||||
* @returns {number[]}
|
||||
* @returns {number[]} Trigger input.
|
||||
*/
|
||||
QVector<int> getTriggerInput() { return _triggerInputData; };
|
||||
|
||||
/**jsdoc
|
||||
* Get left trigger output.
|
||||
* @function AudioScope.getTriggerOutputLeft
|
||||
* @returns {number[]}
|
||||
* @returns {number[]} Left trigger output.
|
||||
*/
|
||||
QVector<int> getTriggerOutputLeft() { return _triggerOutputLeftData; };
|
||||
|
||||
/**jsdoc
|
||||
* Get right trigger output.
|
||||
* @function AudioScope.getTriggerOutputRight
|
||||
* @returns {number[]}
|
||||
* @returns {number[]} Right trigger output.
|
||||
*/
|
||||
QVector<int> getTriggerOutputRight() { return _triggerOutputRightData; };
|
||||
|
||||
/**jsdoc
|
||||
* Set local echo.
|
||||
* @function AudioScope.setLocalEcho
|
||||
* @parm {boolean} localEcho
|
||||
* @parm {boolean} localEcho - Local echo.
|
||||
*/
|
||||
void setLocalEcho(bool localEcho);
|
||||
|
||||
/**jsdoc
|
||||
* Set server echo.
|
||||
* @function AudioScope.setServerEcho
|
||||
* @parm {boolean} serverEcho
|
||||
* @parm {boolean} serverEcho - Server echo.
|
||||
*/
|
||||
void setServerEcho(bool serverEcho);
|
||||
|
||||
signals:
|
||||
|
||||
/**jsdoc
|
||||
* Triggered when pause changes.
|
||||
* @function AudioScope.pauseChanged
|
||||
* @returns {Signal}
|
||||
*/
|
||||
void pauseChanged();
|
||||
|
||||
/**jsdoc
|
||||
* Triggered when scope is triggered.
|
||||
* @function AudioScope.triggered
|
||||
* @returns {Signal}
|
||||
*/
|
||||
|
|
|
@ -48,7 +48,6 @@
|
|||
#include <recording/Clip.h>
|
||||
#include <recording/Frame.h>
|
||||
#include <RecordingScriptingInterface.h>
|
||||
#include <trackers/FaceTracker.h>
|
||||
#include <RenderableModelEntityItem.h>
|
||||
#include <VariantMapToScriptValue.h>
|
||||
|
||||
|
@ -349,7 +348,8 @@ MyAvatar::MyAvatar(QThread* thread) :
|
|||
}
|
||||
});
|
||||
|
||||
connect(&(_skeletonModel->getRig()), SIGNAL(onLoadComplete()), this, SIGNAL(onLoadComplete()));
|
||||
connect(&(_skeletonModel->getRig()), &Rig::onLoadComplete, this, &MyAvatar::onLoadComplete);
|
||||
connect(&(_skeletonModel->getRig()), &Rig::onLoadFailed, this, &MyAvatar::onLoadFailed);
|
||||
|
||||
_characterController.setDensity(_density);
|
||||
}
|
||||
|
@ -749,7 +749,6 @@ void MyAvatar::update(float deltaTime) {
|
|||
|
||||
Head* head = getHead();
|
||||
head->relax(deltaTime);
|
||||
updateFromTrackers(deltaTime);
|
||||
|
||||
if (getIsInWalkingState() && glm::length(getControllerPoseInAvatarFrame(controller::Action::HEAD).getVelocity()) < DEFAULT_AVATAR_WALK_SPEED_THRESHOLD) {
|
||||
setIsInWalkingState(false);
|
||||
|
@ -782,18 +781,6 @@ void MyAvatar::update(float deltaTime) {
|
|||
emit energyChanged(currentEnergy);
|
||||
|
||||
updateEyeContactTarget(deltaTime);
|
||||
|
||||
// if we're getting eye rotations from a tracker, disable observer-side procedural eye motions
|
||||
auto userInputMapper = DependencyManager::get<UserInputMapper>();
|
||||
bool eyesTracked =
|
||||
userInputMapper->getPoseState(controller::Action::LEFT_EYE).valid &&
|
||||
userInputMapper->getPoseState(controller::Action::RIGHT_EYE).valid;
|
||||
|
||||
int leftEyeJointIndex = getJointIndex("LeftEye");
|
||||
int rightEyeJointIndex = getJointIndex("RightEye");
|
||||
bool eyesAreOverridden = getIsJointOverridden(leftEyeJointIndex) || getIsJointOverridden(rightEyeJointIndex);
|
||||
|
||||
_headData->setHasProceduralEyeMovement(!(eyesTracked || eyesAreOverridden));
|
||||
}
|
||||
|
||||
void MyAvatar::updateEyeContactTarget(float deltaTime) {
|
||||
|
@ -1148,60 +1135,6 @@ void MyAvatar::updateSensorToWorldMatrix() {
|
|||
|
||||
}
|
||||
|
||||
// Update avatar head rotation with sensor data
|
||||
void MyAvatar::updateFromTrackers(float deltaTime) {
|
||||
glm::vec3 estimatedRotation;
|
||||
|
||||
bool hasHead = getControllerPoseInAvatarFrame(controller::Action::HEAD).isValid();
|
||||
bool playing = DependencyManager::get<recording::Deck>()->isPlaying();
|
||||
if (hasHead && playing) {
|
||||
return;
|
||||
}
|
||||
|
||||
FaceTracker* tracker = qApp->getActiveFaceTracker();
|
||||
bool inFacetracker = tracker && !FaceTracker::isMuted();
|
||||
|
||||
if (inFacetracker) {
|
||||
estimatedRotation = glm::degrees(safeEulerAngles(tracker->getHeadRotation()));
|
||||
}
|
||||
|
||||
// Rotate the body if the head is turned beyond the screen
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::TurnWithHead)) {
|
||||
const float TRACKER_YAW_TURN_SENSITIVITY = 0.5f;
|
||||
const float TRACKER_MIN_YAW_TURN = 15.0f;
|
||||
const float TRACKER_MAX_YAW_TURN = 50.0f;
|
||||
if ( (fabs(estimatedRotation.y) > TRACKER_MIN_YAW_TURN) &&
|
||||
(fabs(estimatedRotation.y) < TRACKER_MAX_YAW_TURN) ) {
|
||||
if (estimatedRotation.y > 0.0f) {
|
||||
_bodyYawDelta += (estimatedRotation.y - TRACKER_MIN_YAW_TURN) * TRACKER_YAW_TURN_SENSITIVITY;
|
||||
} else {
|
||||
_bodyYawDelta += (estimatedRotation.y + TRACKER_MIN_YAW_TURN) * TRACKER_YAW_TURN_SENSITIVITY;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set the rotation of the avatar's head (as seen by others, not affecting view frustum)
|
||||
// to be scaled such that when the user's physical head is pointing at edge of screen, the
|
||||
// avatar head is at the edge of the in-world view frustum. So while a real person may move
|
||||
// their head only 30 degrees or so, this may correspond to a 90 degree field of view.
|
||||
// Note that roll is magnified by a constant because it is not related to field of view.
|
||||
|
||||
|
||||
Head* head = getHead();
|
||||
if (hasHead || playing) {
|
||||
head->setDeltaPitch(estimatedRotation.x);
|
||||
head->setDeltaYaw(estimatedRotation.y);
|
||||
head->setDeltaRoll(estimatedRotation.z);
|
||||
} else {
|
||||
ViewFrustum viewFrustum;
|
||||
qApp->copyViewFrustum(viewFrustum);
|
||||
float magnifyFieldOfView = viewFrustum.getFieldOfView() / _realWorldFieldOfView.get();
|
||||
head->setDeltaPitch(estimatedRotation.x * magnifyFieldOfView);
|
||||
head->setDeltaYaw(estimatedRotation.y * magnifyFieldOfView);
|
||||
head->setDeltaRoll(estimatedRotation.z);
|
||||
}
|
||||
}
|
||||
|
||||
glm::vec3 MyAvatar::getLeftHandPosition() const {
|
||||
auto pose = getControllerPoseInAvatarFrame(controller::Action::LEFT_HAND);
|
||||
return pose.isValid() ? pose.getTranslation() : glm::vec3(0.0f);
|
||||
|
@ -2635,6 +2568,8 @@ void MyAvatar::useFullAvatarURL(const QUrl& fullAvatarURL, const QString& modelN
|
|||
if (urlString.isEmpty() || (fullAvatarURL != getSkeletonModelURL())) {
|
||||
setSkeletonModelURL(fullAvatarURL);
|
||||
UserActivityLogger::getInstance().changedModel("skeleton", urlString);
|
||||
} else {
|
||||
emit onLoadComplete();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3416,31 +3351,6 @@ bool MyAvatar::shouldRenderHead(const RenderArgs* renderArgs) const {
|
|||
return !defaultMode || (!firstPerson && !insideHead) || (overrideAnim && !insideHead);
|
||||
}
|
||||
|
||||
void MyAvatar::setHasScriptedBlendshapes(bool hasScriptedBlendshapes) {
|
||||
if (hasScriptedBlendshapes == _hasScriptedBlendShapes) {
|
||||
return;
|
||||
}
|
||||
if (!hasScriptedBlendshapes) {
|
||||
// send a forced avatarData update to make sure the script can send neutal blendshapes on unload
|
||||
// without having to wait for the update loop, make sure _hasScriptedBlendShapes is still true
|
||||
// before sending the update, or else it won't send the neutal blendshapes to the receiving clients
|
||||
sendAvatarDataPacket(true);
|
||||
}
|
||||
_hasScriptedBlendShapes = hasScriptedBlendshapes;
|
||||
}
|
||||
|
||||
void MyAvatar::setHasProceduralBlinkFaceMovement(bool hasProceduralBlinkFaceMovement) {
|
||||
_headData->setHasProceduralBlinkFaceMovement(hasProceduralBlinkFaceMovement);
|
||||
}
|
||||
|
||||
void MyAvatar::setHasProceduralEyeFaceMovement(bool hasProceduralEyeFaceMovement) {
|
||||
_headData->setHasProceduralEyeFaceMovement(hasProceduralEyeFaceMovement);
|
||||
}
|
||||
|
||||
void MyAvatar::setHasAudioEnabledFaceMovement(bool hasAudioEnabledFaceMovement) {
|
||||
_headData->setHasAudioEnabledFaceMovement(hasAudioEnabledFaceMovement);
|
||||
}
|
||||
|
||||
void MyAvatar::setRotationRecenterFilterLength(float length) {
|
||||
const float MINIMUM_ROTATION_RECENTER_FILTER_LENGTH = 0.01f;
|
||||
_rotationRecenterFilterLength = std::max(MINIMUM_ROTATION_RECENTER_FILTER_LENGTH, length);
|
||||
|
@ -6620,11 +6530,10 @@ bool MyAvatar::getIsJointOverridden(int jointIndex) const {
|
|||
return _skeletonModel->getIsJointOverridden(jointIndex);
|
||||
}
|
||||
|
||||
void MyAvatar::updateEyesLookAtPosition(FaceTracker* faceTracker, Camera& myCamera, float deltaTime) {
|
||||
void MyAvatar::updateEyesLookAtPosition(float deltaTime) {
|
||||
|
||||
updateLookAtTargetAvatar();
|
||||
|
||||
bool isLookingAtSomeone = false;
|
||||
glm::vec3 lookAtSpot;
|
||||
|
||||
const MyHead* myHead = getMyHead();
|
||||
|
@ -6685,7 +6594,6 @@ void MyAvatar::updateEyesLookAtPosition(FaceTracker* faceTracker, Camera& myCame
|
|||
avatar && avatar->getLookAtSnappingEnabled() && getLookAtSnappingEnabled();
|
||||
if (haveLookAtCandidate && mutualLookAtSnappingEnabled) {
|
||||
// If I am looking at someone else, look directly at one of their eyes
|
||||
isLookingAtSomeone = true;
|
||||
auto lookingAtHead = avatar->getHead();
|
||||
|
||||
const float MAXIMUM_FACE_ANGLE = 65.0f * RADIANS_PER_DEGREE;
|
||||
|
@ -6724,21 +6632,6 @@ void MyAvatar::updateEyesLookAtPosition(FaceTracker* faceTracker, Camera& myCame
|
|||
myHead->getEyePosition() + getHeadJointFrontVector() * (float)TREE_SCALE;
|
||||
}
|
||||
}
|
||||
|
||||
// Deflect the eyes a bit to match the detected gaze from the face tracker if active.
|
||||
if (faceTracker && !faceTracker->isMuted()) {
|
||||
float eyePitch = faceTracker->getEstimatedEyePitch();
|
||||
float eyeYaw = faceTracker->getEstimatedEyeYaw();
|
||||
const float GAZE_DEFLECTION_REDUCTION_DURING_EYE_CONTACT = 0.1f;
|
||||
glm::vec3 origin = myHead->getEyePosition();
|
||||
float deflection = faceTracker->getEyeDeflection();
|
||||
if (isLookingAtSomeone) {
|
||||
deflection *= GAZE_DEFLECTION_REDUCTION_DURING_EYE_CONTACT;
|
||||
}
|
||||
lookAtSpot = origin + myCamera.getOrientation() * glm::quat(glm::radians(glm::vec3(
|
||||
eyePitch * deflection, eyeYaw * deflection, 0.0f))) *
|
||||
glm::inverse(myCamera.getOrientation()) * (lookAtSpot - origin);
|
||||
}
|
||||
}
|
||||
}
|
||||
_eyesLookAtTarget.set(lookAtSpot);
|
||||
|
|
|
@ -34,7 +34,6 @@
|
|||
#include "AtRestDetector.h"
|
||||
#include "MyCharacterController.h"
|
||||
#include "RingBufferHistory.h"
|
||||
#include "devices/DdeFaceTracker.h"
|
||||
|
||||
class AvatarActionHold;
|
||||
class ModelItemID;
|
||||
|
@ -184,12 +183,6 @@ class MyAvatar : public Avatar {
|
|||
* property value is <code>audioListenerModeCustom</code>.
|
||||
* @property {Quat} customListenOrientation=Quat.IDENTITY - The listening orientation used when the
|
||||
* <code>audioListenerMode</code> property value is <code>audioListenerModeCustom</code>.
|
||||
* @property {boolean} hasScriptedBlendshapes=false - <code>true</code> to transmit blendshapes over the network.
|
||||
* <p><strong>Note:</strong> Currently doesn't work. Use {@link MyAvatar.setForceFaceTrackerConnected} instead.</p>
|
||||
* @property {boolean} hasProceduralBlinkFaceMovement=true - <code>true</code> if procedural blinking is turned on.
|
||||
* @property {boolean} hasProceduralEyeFaceMovement=true - <code>true</code> if procedural eye movement is turned on.
|
||||
* @property {boolean} hasAudioEnabledFaceMovement=true - <code>true</code> to move the mouth blendshapes with voice audio
|
||||
* when <code>MyAvatar.hasScriptedBlendshapes</code> is enabled.
|
||||
* @property {number} rotationRecenterFilterLength - Configures how quickly the avatar root rotates to recenter its facing
|
||||
* direction to match that of the user's torso based on head and hands orientation. A smaller value makes the
|
||||
* recentering happen more quickly. The minimum value is <code>0.01</code>.
|
||||
|
@ -275,7 +268,7 @@ class MyAvatar : public Avatar {
|
|||
* @property {number} analogPlusSprintSpeed - The sprint (run) speed of your avatar for the "AnalogPlus" control scheme.
|
||||
* @property {MyAvatar.SitStandModelType} userRecenterModel - Controls avatar leaning and recentering behavior.
|
||||
* @property {number} isInSittingState - <code>true</code> if the user wearing the HMD is determined to be sitting
|
||||
* (avatar leaning is disabled, recenntering is enabled), <code>false</code> if the user wearing the HMD is
|
||||
* (avatar leaning is disabled, recentering is enabled), <code>false</code> if the user wearing the HMD is
|
||||
* determined to be standing (avatar leaning is enabled, and avatar recenters if it leans too far).
|
||||
* If <code>userRecenterModel == 2</code> (i.e., auto) the property value automatically updates as the user sits
|
||||
* or stands, unless <code>isSitStandStateLocked == true</code>. Setting the property value overrides the current
|
||||
|
@ -312,7 +305,10 @@ class MyAvatar : public Avatar {
|
|||
* @borrows Avatar.setAttachmentsVariant as setAttachmentsVariant
|
||||
* @borrows Avatar.updateAvatarEntity as updateAvatarEntity
|
||||
* @borrows Avatar.clearAvatarEntity as clearAvatarEntity
|
||||
* @borrows Avatar.setForceFaceTrackerConnected as setForceFaceTrackerConnected
|
||||
* @borrows Avatar.hasScriptedBlendshapes as hasScriptedBlendshapes
|
||||
* @borrows Avatar.hasProceduralBlinkFaceMovement as hasProceduralBlinkFaceMovement
|
||||
* @borrows Avatar.hasProceduralEyeFaceMovement as hasProceduralEyeFaceMovement
|
||||
* @borrows Avatar.hasAudioEnabledFaceMovement as hasAudioEnabledFaceMovement
|
||||
* @borrows Avatar.setSkeletonModelURL as setSkeletonModelURL
|
||||
* @borrows Avatar.getAttachmentData as getAttachmentData
|
||||
* @borrows Avatar.setAttachmentData as setAttachmentData
|
||||
|
@ -359,10 +355,6 @@ class MyAvatar : public Avatar {
|
|||
Q_PROPERTY(AudioListenerMode audioListenerModeCustom READ getAudioListenerModeCustom)
|
||||
Q_PROPERTY(glm::vec3 customListenPosition READ getCustomListenPosition WRITE setCustomListenPosition)
|
||||
Q_PROPERTY(glm::quat customListenOrientation READ getCustomListenOrientation WRITE setCustomListenOrientation)
|
||||
Q_PROPERTY(bool hasScriptedBlendshapes READ getHasScriptedBlendshapes WRITE setHasScriptedBlendshapes)
|
||||
Q_PROPERTY(bool hasProceduralBlinkFaceMovement READ getHasProceduralBlinkFaceMovement WRITE setHasProceduralBlinkFaceMovement)
|
||||
Q_PROPERTY(bool hasProceduralEyeFaceMovement READ getHasProceduralEyeFaceMovement WRITE setHasProceduralEyeFaceMovement)
|
||||
Q_PROPERTY(bool hasAudioEnabledFaceMovement READ getHasAudioEnabledFaceMovement WRITE setHasAudioEnabledFaceMovement)
|
||||
Q_PROPERTY(float rotationRecenterFilterLength READ getRotationRecenterFilterLength WRITE setRotationRecenterFilterLength)
|
||||
Q_PROPERTY(float rotationThreshold READ getRotationThreshold WRITE setRotationThreshold)
|
||||
Q_PROPERTY(bool enableStepResetRotation READ getEnableStepResetRotation WRITE setEnableStepResetRotation)
|
||||
|
@ -1934,7 +1926,7 @@ public:
|
|||
bool getFlowActive() const;
|
||||
bool getNetworkGraphActive() const;
|
||||
|
||||
void updateEyesLookAtPosition(FaceTracker* faceTracker, Camera& myCamera, float deltaTime);
|
||||
void updateEyesLookAtPosition(float deltaTime);
|
||||
|
||||
// sets the reaction enabled and triggered parameters of the passed in params
|
||||
// also clears internal reaction triggers
|
||||
|
@ -2463,10 +2455,17 @@ signals:
|
|||
/**jsdoc
|
||||
* Triggered when the avatar's model finishes loading.
|
||||
* @function MyAvatar.onLoadComplete
|
||||
* @returns {Signal}
|
||||
* @returns {Signal}
|
||||
*/
|
||||
void onLoadComplete();
|
||||
|
||||
/**jsdoc
|
||||
* Triggered when the avatar's model has failed to load.
|
||||
* @function MyAvatar.onLoadFailed
|
||||
* @returns {Signal}
|
||||
*/
|
||||
void onLoadFailed();
|
||||
|
||||
/**jsdoc
|
||||
* Triggered when your avatar changes from being active to being away.
|
||||
* @function MyAvatar.wentAway
|
||||
|
@ -2579,20 +2578,11 @@ private:
|
|||
virtual QByteArray toByteArrayStateful(AvatarDataDetail dataDetail, bool dropFaceTracking) override;
|
||||
|
||||
void simulate(float deltaTime, bool inView) override;
|
||||
void updateFromTrackers(float deltaTime);
|
||||
void saveAvatarUrl();
|
||||
virtual void render(RenderArgs* renderArgs) override;
|
||||
virtual bool shouldRenderHead(const RenderArgs* renderArgs) const override;
|
||||
void setShouldRenderLocally(bool shouldRender) { _shouldRender = shouldRender; setEnableMeshVisible(shouldRender); }
|
||||
bool getShouldRenderLocally() const { return _shouldRender; }
|
||||
void setHasScriptedBlendshapes(bool hasScriptedBlendshapes);
|
||||
bool getHasScriptedBlendshapes() const override { return _hasScriptedBlendShapes; }
|
||||
void setHasProceduralBlinkFaceMovement(bool hasProceduralBlinkFaceMovement);
|
||||
bool getHasProceduralBlinkFaceMovement() const override { return _headData->getHasProceduralBlinkFaceMovement(); }
|
||||
void setHasProceduralEyeFaceMovement(bool hasProceduralEyeFaceMovement);
|
||||
bool getHasProceduralEyeFaceMovement() const override { return _headData->getHasProceduralEyeFaceMovement(); }
|
||||
void setHasAudioEnabledFaceMovement(bool hasAudioEnabledFaceMovement);
|
||||
bool getHasAudioEnabledFaceMovement() const override { return _headData->getHasAudioEnabledFaceMovement(); }
|
||||
void setRotationRecenterFilterLength(float length);
|
||||
float getRotationRecenterFilterLength() const { return _rotationRecenterFilterLength; }
|
||||
void setRotationThreshold(float angleRadians);
|
||||
|
|
|
@ -14,15 +14,80 @@
|
|||
#include <NodeList.h>
|
||||
#include <recording/Deck.h>
|
||||
#include <Rig.h>
|
||||
#include <trackers/FaceTracker.h>
|
||||
#include <FaceshiftConstants.h>
|
||||
#include <BlendshapeConstants.h>
|
||||
|
||||
#include "devices/DdeFaceTracker.h"
|
||||
#include "Application.h"
|
||||
#include "MyAvatar.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
static controller::Action blendshapeActions[] = {
|
||||
controller::Action::EYEBLINK_L,
|
||||
controller::Action::EYEBLINK_R,
|
||||
controller::Action::EYESQUINT_L,
|
||||
controller::Action::EYESQUINT_R,
|
||||
controller::Action::EYEDOWN_L,
|
||||
controller::Action::EYEDOWN_R,
|
||||
controller::Action::EYEIN_L,
|
||||
controller::Action::EYEIN_R,
|
||||
controller::Action::EYEOPEN_L,
|
||||
controller::Action::EYEOPEN_R,
|
||||
controller::Action::EYEOUT_L,
|
||||
controller::Action::EYEOUT_R,
|
||||
controller::Action::EYEUP_L,
|
||||
controller::Action::EYEUP_R,
|
||||
controller::Action::BROWSD_L,
|
||||
controller::Action::BROWSD_R,
|
||||
controller::Action::BROWSU_C,
|
||||
controller::Action::BROWSU_L,
|
||||
controller::Action::BROWSU_R,
|
||||
controller::Action::JAWFWD,
|
||||
controller::Action::JAWLEFT,
|
||||
controller::Action::JAWOPEN,
|
||||
controller::Action::JAWRIGHT,
|
||||
controller::Action::MOUTHLEFT,
|
||||
controller::Action::MOUTHRIGHT,
|
||||
controller::Action::MOUTHFROWN_L,
|
||||
controller::Action::MOUTHFROWN_R,
|
||||
controller::Action::MOUTHSMILE_L,
|
||||
controller::Action::MOUTHSMILE_R,
|
||||
controller::Action::MOUTHDIMPLE_L,
|
||||
controller::Action::MOUTHDIMPLE_R,
|
||||
controller::Action::LIPSSTRETCH_L,
|
||||
controller::Action::LIPSSTRETCH_R,
|
||||
controller::Action::LIPSUPPERCLOSE,
|
||||
controller::Action::LIPSLOWERCLOSE,
|
||||
controller::Action::LIPSUPPEROPEN,
|
||||
controller::Action::LIPSLOWEROPEN,
|
||||
controller::Action::LIPSFUNNEL,
|
||||
controller::Action::LIPSPUCKER,
|
||||
controller::Action::PUFF,
|
||||
controller::Action::CHEEKSQUINT_L,
|
||||
controller::Action::CHEEKSQUINT_R,
|
||||
controller::Action::MOUTHCLOSE,
|
||||
controller::Action::MOUTHUPPERUP_L,
|
||||
controller::Action::MOUTHUPPERUP_R,
|
||||
controller::Action::MOUTHLOWERDOWN_L,
|
||||
controller::Action::MOUTHLOWERDOWN_R,
|
||||
controller::Action::MOUTHPRESS_L,
|
||||
controller::Action::MOUTHPRESS_R,
|
||||
controller::Action::MOUTHSHRUGLOWER,
|
||||
controller::Action::MOUTHSHRUGUPPER,
|
||||
controller::Action::NOSESNEER_L,
|
||||
controller::Action::NOSESNEER_R,
|
||||
controller::Action::TONGUEOUT,
|
||||
controller::Action::USERBLENDSHAPE0,
|
||||
controller::Action::USERBLENDSHAPE1,
|
||||
controller::Action::USERBLENDSHAPE2,
|
||||
controller::Action::USERBLENDSHAPE3,
|
||||
controller::Action::USERBLENDSHAPE4,
|
||||
controller::Action::USERBLENDSHAPE5,
|
||||
controller::Action::USERBLENDSHAPE6,
|
||||
controller::Action::USERBLENDSHAPE7,
|
||||
controller::Action::USERBLENDSHAPE8,
|
||||
controller::Action::USERBLENDSHAPE9
|
||||
};
|
||||
|
||||
MyHead::MyHead(MyAvatar* owningAvatar) : Head(owningAvatar) {
|
||||
}
|
||||
|
||||
|
@ -46,36 +111,57 @@ void MyHead::simulate(float deltaTime) {
|
|||
auto player = DependencyManager::get<recording::Deck>();
|
||||
// Only use face trackers when not playing back a recording.
|
||||
if (!player->isPlaying()) {
|
||||
// TODO -- finish removing face-tracker specific code. To do this, add input channels for
|
||||
// each blendshape-coefficient and update the various json files to relay them in a useful way.
|
||||
// After that, input plugins can be used to drive the avatar's face, and the various "DDE" files
|
||||
// can be ported into the plugin and removed.
|
||||
//
|
||||
// auto faceTracker = qApp->getActiveFaceTracker();
|
||||
// const bool hasActualFaceTrackerConnected = faceTracker && !faceTracker->isMuted();
|
||||
// _isFaceTrackerConnected = hasActualFaceTrackerConnected || _owningAvatar->getHasScriptedBlendshapes();
|
||||
// if (_isFaceTrackerConnected) {
|
||||
// if (hasActualFaceTrackerConnected) {
|
||||
// _blendshapeCoefficients = faceTracker->getBlendshapeCoefficients();
|
||||
// }
|
||||
// }
|
||||
|
||||
auto userInputMapper = DependencyManager::get<UserInputMapper>();
|
||||
|
||||
// if input system has control over blink blendshapes
|
||||
bool eyeLidsTracked =
|
||||
userInputMapper->getActionStateValid(controller::Action::LEFT_EYE_BLINK) &&
|
||||
userInputMapper->getActionStateValid(controller::Action::RIGHT_EYE_BLINK);
|
||||
setFaceTrackerConnected(eyeLidsTracked);
|
||||
if (eyeLidsTracked) {
|
||||
float leftEyeBlink = userInputMapper->getActionState(controller::Action::LEFT_EYE_BLINK);
|
||||
float rightEyeBlink = userInputMapper->getActionState(controller::Action::RIGHT_EYE_BLINK);
|
||||
_blendshapeCoefficients.resize(std::max(_blendshapeCoefficients.size(), 2));
|
||||
_blendshapeCoefficients[EYE_BLINK_INDICES[0]] = leftEyeBlink;
|
||||
_blendshapeCoefficients[EYE_BLINK_INDICES[1]] = rightEyeBlink;
|
||||
} else {
|
||||
const float FULLY_OPEN = 0.0f;
|
||||
_blendshapeCoefficients.resize(std::max(_blendshapeCoefficients.size(), 2));
|
||||
_blendshapeCoefficients[EYE_BLINK_INDICES[0]] = FULLY_OPEN;
|
||||
_blendshapeCoefficients[EYE_BLINK_INDICES[1]] = FULLY_OPEN;
|
||||
userInputMapper->getActionStateValid(controller::Action::EYEBLINK_L) ||
|
||||
userInputMapper->getActionStateValid(controller::Action::EYEBLINK_R);
|
||||
|
||||
// if input system has control over the brows.
|
||||
bool browsTracked =
|
||||
userInputMapper->getActionStateValid(controller::Action::BROWSD_L) ||
|
||||
userInputMapper->getActionStateValid(controller::Action::BROWSD_R) ||
|
||||
userInputMapper->getActionStateValid(controller::Action::BROWSU_L) ||
|
||||
userInputMapper->getActionStateValid(controller::Action::BROWSU_R) ||
|
||||
userInputMapper->getActionStateValid(controller::Action::BROWSU_C);
|
||||
|
||||
// if input system has control of mouth
|
||||
bool mouthTracked =
|
||||
userInputMapper->getActionStateValid(controller::Action::JAWOPEN) ||
|
||||
userInputMapper->getActionStateValid(controller::Action::LIPSUPPERCLOSE) ||
|
||||
userInputMapper->getActionStateValid(controller::Action::LIPSLOWERCLOSE) ||
|
||||
userInputMapper->getActionStateValid(controller::Action::LIPSFUNNEL) ||
|
||||
userInputMapper->getActionStateValid(controller::Action::MOUTHSMILE_L) ||
|
||||
userInputMapper->getActionStateValid(controller::Action::MOUTHSMILE_R);
|
||||
|
||||
bool eyesTracked =
|
||||
userInputMapper->getPoseState(controller::Action::LEFT_EYE).valid &&
|
||||
userInputMapper->getPoseState(controller::Action::RIGHT_EYE).valid;
|
||||
|
||||
MyAvatar* myAvatar = static_cast<MyAvatar*>(_owningAvatar);
|
||||
int leftEyeJointIndex = myAvatar->getJointIndex("LeftEye");
|
||||
int rightEyeJointIndex = myAvatar->getJointIndex("RightEye");
|
||||
bool eyeJointsOverridden = myAvatar->getIsJointOverridden(leftEyeJointIndex) || myAvatar->getIsJointOverridden(rightEyeJointIndex);
|
||||
|
||||
bool anyInputTracked = false;
|
||||
for (int i = 0; i < (int)Blendshapes::BlendshapeCount; i++) {
|
||||
anyInputTracked = anyInputTracked || userInputMapper->getActionStateValid(blendshapeActions[i]);
|
||||
}
|
||||
|
||||
setHasInputDrivenBlendshapes(anyInputTracked);
|
||||
|
||||
// suppress any procedural blendshape animation if they overlap with driven input.
|
||||
setSuppressProceduralAnimationFlag(HeadData::BlinkProceduralBlendshapeAnimation, eyeLidsTracked);
|
||||
setSuppressProceduralAnimationFlag(HeadData::LidAdjustmentProceduralBlendshapeAnimation, eyeLidsTracked || browsTracked);
|
||||
setSuppressProceduralAnimationFlag(HeadData::AudioProceduralBlendshapeAnimation, mouthTracked);
|
||||
setSuppressProceduralAnimationFlag(HeadData::SaccadeProceduralEyeJointAnimation, eyesTracked || eyeJointsOverridden);
|
||||
|
||||
if (anyInputTracked) {
|
||||
for (int i = 0; i < (int)Blendshapes::BlendshapeCount; i++) {
|
||||
_blendshapeCoefficients[i] = userInputMapper->getActionState(blendshapeActions[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
Parent::simulate(deltaTime);
|
||||
|
|
|
@ -112,9 +112,13 @@ static AnimPose computeHipsInSensorFrame(MyAvatar* myAvatar, bool isFlying) {
|
|||
void MySkeletonModel::updateRig(float deltaTime, glm::mat4 parentTransform) {
|
||||
const HFMModel& hfmModel = getHFMModel();
|
||||
|
||||
MyAvatar* myAvatar = static_cast<MyAvatar*>(_owningAvatar);
|
||||
assert(myAvatar);
|
||||
|
||||
Head* head = _owningAvatar->getHead();
|
||||
|
||||
bool eyePosesValid = !head->getHasProceduralEyeMovement();
|
||||
bool eyePosesValid = (myAvatar->getControllerPoseInSensorFrame(controller::Action::LEFT_EYE).isValid() ||
|
||||
myAvatar->getControllerPoseInSensorFrame(controller::Action::RIGHT_EYE).isValid());
|
||||
glm::vec3 lookAt;
|
||||
if (eyePosesValid) {
|
||||
lookAt = head->getLookAtPosition(); // don't apply no-crosseyes code when eyes are being tracked
|
||||
|
@ -122,9 +126,6 @@ void MySkeletonModel::updateRig(float deltaTime, glm::mat4 parentTransform) {
|
|||
lookAt = avoidCrossedEyes(head->getLookAtPosition());
|
||||
}
|
||||
|
||||
MyAvatar* myAvatar = static_cast<MyAvatar*>(_owningAvatar);
|
||||
assert(myAvatar);
|
||||
|
||||
Rig::ControllerParameters params;
|
||||
|
||||
AnimPose avatarToRigPose(glm::vec3(1.0f), Quaternions::Y_180, glm::vec3(0.0f));
|
||||
|
|
|
@ -267,6 +267,7 @@ void OtherAvatar::simulate(float deltaTime, bool inView) {
|
|||
_skeletonModel->getRig().computeExternalPoses(rootTransform);
|
||||
_jointDataSimulationRate.increment();
|
||||
|
||||
head->simulate(deltaTime);
|
||||
_skeletonModel->simulate(deltaTime, true);
|
||||
|
||||
locationChanged(); // joints changed, so if there are any children, update them.
|
||||
|
@ -277,9 +278,11 @@ void OtherAvatar::simulate(float deltaTime, bool inView) {
|
|||
headPosition = getWorldPosition();
|
||||
}
|
||||
head->setPosition(headPosition);
|
||||
} else {
|
||||
head->simulate(deltaTime);
|
||||
_skeletonModel->simulate(deltaTime, false);
|
||||
}
|
||||
head->setScale(getModelScale());
|
||||
head->simulate(deltaTime);
|
||||
relayJointDataToChildren();
|
||||
} else {
|
||||
// a non-full update is still required so that the position, rotation, scale and bounds of the skeletonModel are updated.
|
||||
|
|
|
@ -1,686 +0,0 @@
|
|||
//
|
||||
// DdeFaceTracker.cpp
|
||||
//
|
||||
//
|
||||
// Created by Clement on 8/2/14.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "DdeFaceTracker.h"
|
||||
|
||||
#include <SharedUtil.h>
|
||||
|
||||
#include <QtCore/QCoreApplication>
|
||||
#include <QtCore/QJsonDocument>
|
||||
#include <QtCore/QJsonArray>
|
||||
#include <QtCore/QJsonObject>
|
||||
#include <QtCore/QTimer>
|
||||
|
||||
#include <GLMHelpers.h>
|
||||
#include <NumericalConstants.h>
|
||||
#include <FaceshiftConstants.h>
|
||||
|
||||
#include "Application.h"
|
||||
#include "InterfaceLogging.h"
|
||||
#include "Menu.h"
|
||||
|
||||
|
||||
static const QHostAddress DDE_SERVER_ADDR("127.0.0.1");
|
||||
static const quint16 DDE_SERVER_PORT = 64204;
|
||||
static const quint16 DDE_CONTROL_PORT = 64205;
|
||||
#if defined(Q_OS_WIN)
|
||||
static const QString DDE_PROGRAM_PATH = "/dde/dde.exe";
|
||||
#elif defined(Q_OS_MAC)
|
||||
static const QString DDE_PROGRAM_PATH = "/dde.app/Contents/MacOS/dde";
|
||||
#endif
|
||||
static const QStringList DDE_ARGUMENTS = QStringList()
|
||||
<< "--udp=" + DDE_SERVER_ADDR.toString() + ":" + QString::number(DDE_SERVER_PORT)
|
||||
<< "--receiver=" + QString::number(DDE_CONTROL_PORT)
|
||||
<< "--facedet_interval=500" // ms
|
||||
<< "--headless";
|
||||
|
||||
static const int NUM_EXPRESSIONS = 46;
|
||||
static const int MIN_PACKET_SIZE = (8 + NUM_EXPRESSIONS) * sizeof(float) + sizeof(int);
|
||||
static const int MAX_NAME_SIZE = 31;
|
||||
|
||||
// There's almost but not quite a 1-1 correspondence between DDE's 46 and Faceshift 1.3's 48 packets.
|
||||
// The best guess at mapping is to:
|
||||
// - Swap L and R values
|
||||
// - Skip two Faceshift values: JawChew (22) and LipsLowerDown (37)
|
||||
static const int DDE_TO_FACESHIFT_MAPPING[] = {
|
||||
1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
|
||||
16,
|
||||
18, 17,
|
||||
19,
|
||||
23,
|
||||
21,
|
||||
// Skip JawChew
|
||||
20,
|
||||
25, 24, 27, 26, 29, 28, 31, 30, 33, 32,
|
||||
34, 35, 36,
|
||||
// Skip LipsLowerDown
|
||||
38, 39, 40, 41, 42, 43, 44, 45,
|
||||
47, 46
|
||||
};
|
||||
|
||||
// The DDE coefficients, overall, range from -0.2 to 1.5 or so. However, individual coefficients typically vary much
|
||||
// less than this.
|
||||
static const float DDE_COEFFICIENT_SCALES[] = {
|
||||
1.0f, // EyeBlink_L
|
||||
1.0f, // EyeBlink_R
|
||||
1.0f, // EyeSquint_L
|
||||
1.0f, // EyeSquint_R
|
||||
1.0f, // EyeDown_L
|
||||
1.0f, // EyeDown_R
|
||||
1.0f, // EyeIn_L
|
||||
1.0f, // EyeIn_R
|
||||
1.0f, // EyeOpen_L
|
||||
1.0f, // EyeOpen_R
|
||||
1.0f, // EyeOut_L
|
||||
1.0f, // EyeOut_R
|
||||
1.0f, // EyeUp_L
|
||||
1.0f, // EyeUp_R
|
||||
3.0f, // BrowsD_L
|
||||
3.0f, // BrowsD_R
|
||||
3.0f, // BrowsU_C
|
||||
3.0f, // BrowsU_L
|
||||
3.0f, // BrowsU_R
|
||||
1.0f, // JawFwd
|
||||
2.0f, // JawLeft
|
||||
1.8f, // JawOpen
|
||||
1.0f, // JawChew
|
||||
2.0f, // JawRight
|
||||
1.5f, // MouthLeft
|
||||
1.5f, // MouthRight
|
||||
1.5f, // MouthFrown_L
|
||||
1.5f, // MouthFrown_R
|
||||
2.5f, // MouthSmile_L
|
||||
2.5f, // MouthSmile_R
|
||||
1.0f, // MouthDimple_L
|
||||
1.0f, // MouthDimple_R
|
||||
1.0f, // LipsStretch_L
|
||||
1.0f, // LipsStretch_R
|
||||
1.0f, // LipsUpperClose
|
||||
1.0f, // LipsLowerClose
|
||||
1.0f, // LipsUpperUp
|
||||
1.0f, // LipsLowerDown
|
||||
1.0f, // LipsUpperOpen
|
||||
1.0f, // LipsLowerOpen
|
||||
1.5f, // LipsFunnel
|
||||
2.5f, // LipsPucker
|
||||
1.5f, // ChinLowerRaise
|
||||
1.5f, // ChinUpperRaise
|
||||
1.0f, // Sneer
|
||||
3.0f, // Puff
|
||||
1.0f, // CheekSquint_L
|
||||
1.0f // CheekSquint_R
|
||||
};
|
||||
|
||||
struct DDEPacket {
|
||||
//roughly in mm
|
||||
float focal_length[1];
|
||||
float translation[3];
|
||||
|
||||
//quaternion
|
||||
float rotation[4];
|
||||
|
||||
// The DDE coefficients, overall, range from -0.2 to 1.5 or so. However, individual coefficients typically vary much
|
||||
// less than this.
|
||||
float expressions[NUM_EXPRESSIONS];
|
||||
|
||||
//avatar id selected on the UI
|
||||
int avatar_id;
|
||||
|
||||
//client name, arbitrary length
|
||||
char name[MAX_NAME_SIZE + 1];
|
||||
};
|
||||
|
||||
static const float STARTING_DDE_MESSAGE_TIME = 0.033f;
|
||||
static const float DEFAULT_DDE_EYE_CLOSING_THRESHOLD = 0.8f;
|
||||
static const int CALIBRATION_SAMPLES = 150;
|
||||
|
||||
DdeFaceTracker::DdeFaceTracker() :
|
||||
DdeFaceTracker(QHostAddress::Any, DDE_SERVER_PORT, DDE_CONTROL_PORT)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
DdeFaceTracker::DdeFaceTracker(const QHostAddress& host, quint16 serverPort, quint16 controlPort) :
|
||||
_ddeProcess(NULL),
|
||||
_ddeStopping(false),
|
||||
_host(host),
|
||||
_serverPort(serverPort),
|
||||
_controlPort(controlPort),
|
||||
_lastReceiveTimestamp(0),
|
||||
_reset(false),
|
||||
_leftBlinkIndex(0), // see http://support.faceshift.com/support/articles/35129-export-of-blendshapes
|
||||
_rightBlinkIndex(1),
|
||||
_leftEyeDownIndex(4),
|
||||
_rightEyeDownIndex(5),
|
||||
_leftEyeInIndex(6),
|
||||
_rightEyeInIndex(7),
|
||||
_leftEyeOpenIndex(8),
|
||||
_rightEyeOpenIndex(9),
|
||||
_browDownLeftIndex(14),
|
||||
_browDownRightIndex(15),
|
||||
_browUpCenterIndex(16),
|
||||
_browUpLeftIndex(17),
|
||||
_browUpRightIndex(18),
|
||||
_mouthSmileLeftIndex(28),
|
||||
_mouthSmileRightIndex(29),
|
||||
_jawOpenIndex(21),
|
||||
_lastMessageReceived(0),
|
||||
_averageMessageTime(STARTING_DDE_MESSAGE_TIME),
|
||||
_lastHeadTranslation(glm::vec3(0.0f)),
|
||||
_filteredHeadTranslation(glm::vec3(0.0f)),
|
||||
_lastBrowUp(0.0f),
|
||||
_filteredBrowUp(0.0f),
|
||||
_eyePitch(0.0f),
|
||||
_eyeYaw(0.0f),
|
||||
_lastEyePitch(0.0f),
|
||||
_lastEyeYaw(0.0f),
|
||||
_filteredEyePitch(0.0f),
|
||||
_filteredEyeYaw(0.0f),
|
||||
_longTermAverageEyePitch(0.0f),
|
||||
_longTermAverageEyeYaw(0.0f),
|
||||
_lastEyeBlinks(),
|
||||
_filteredEyeBlinks(),
|
||||
_lastEyeCoefficients(),
|
||||
_eyeClosingThreshold("ddeEyeClosingThreshold", DEFAULT_DDE_EYE_CLOSING_THRESHOLD),
|
||||
_isCalibrating(false),
|
||||
_calibrationCount(0),
|
||||
_calibrationValues(),
|
||||
_calibrationBillboard(NULL),
|
||||
_calibrationMessage(QString()),
|
||||
_isCalibrated(false)
|
||||
{
|
||||
_coefficients.resize(NUM_FACESHIFT_BLENDSHAPES);
|
||||
_blendshapeCoefficients.resize(NUM_FACESHIFT_BLENDSHAPES);
|
||||
_coefficientAverages.resize(NUM_FACESHIFT_BLENDSHAPES);
|
||||
_calibrationValues.resize(NUM_FACESHIFT_BLENDSHAPES);
|
||||
|
||||
_eyeStates[0] = EYE_UNCONTROLLED;
|
||||
_eyeStates[1] = EYE_UNCONTROLLED;
|
||||
|
||||
connect(&_udpSocket, SIGNAL(readyRead()), SLOT(readPendingDatagrams()));
|
||||
connect(&_udpSocket, SIGNAL(error(QAbstractSocket::SocketError)), SLOT(socketErrorOccurred(QAbstractSocket::SocketError)));
|
||||
connect(&_udpSocket, SIGNAL(stateChanged(QAbstractSocket::SocketState)),
|
||||
SLOT(socketStateChanged(QAbstractSocket::SocketState)));
|
||||
}
|
||||
|
||||
DdeFaceTracker::~DdeFaceTracker() {
|
||||
setEnabled(false);
|
||||
|
||||
if (_isCalibrating) {
|
||||
cancelCalibration();
|
||||
}
|
||||
}
|
||||
|
||||
void DdeFaceTracker::init() {
|
||||
FaceTracker::init();
|
||||
setEnabled(Menu::getInstance()->isOptionChecked(MenuOption::UseCamera) && !_isMuted);
|
||||
Menu::getInstance()->getActionForOption(MenuOption::CalibrateCamera)->setEnabled(!_isMuted);
|
||||
}
|
||||
|
||||
void DdeFaceTracker::setEnabled(bool enabled) {
|
||||
if (!_isInitialized) {
|
||||
// Don't enable until have explicitly initialized
|
||||
return;
|
||||
}
|
||||
#ifdef HAVE_DDE
|
||||
|
||||
if (_isCalibrating) {
|
||||
cancelCalibration();
|
||||
}
|
||||
|
||||
// isOpen() does not work as one might expect on QUdpSocket; don't test isOpen() before closing socket.
|
||||
_udpSocket.close();
|
||||
|
||||
// Terminate any existing DDE process, perhaps left running after an Interface crash.
|
||||
// Do this even if !enabled in case user reset their settings after crash.
|
||||
const char* DDE_EXIT_COMMAND = "exit";
|
||||
_udpSocket.bind(_host, _serverPort);
|
||||
_udpSocket.writeDatagram(DDE_EXIT_COMMAND, DDE_SERVER_ADDR, _controlPort);
|
||||
|
||||
if (enabled && !_ddeProcess) {
|
||||
_ddeStopping = false;
|
||||
qCDebug(interfaceapp) << "DDE Face Tracker: Starting";
|
||||
_ddeProcess = new QProcess(qApp);
|
||||
connect(_ddeProcess, SIGNAL(finished(int, QProcess::ExitStatus)), SLOT(processFinished(int, QProcess::ExitStatus)));
|
||||
_ddeProcess->start(QCoreApplication::applicationDirPath() + DDE_PROGRAM_PATH, DDE_ARGUMENTS);
|
||||
}
|
||||
|
||||
if (!enabled && _ddeProcess) {
|
||||
_ddeStopping = true;
|
||||
qCDebug(interfaceapp) << "DDE Face Tracker: Stopping";
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void DdeFaceTracker::processFinished(int exitCode, QProcess::ExitStatus exitStatus) {
|
||||
if (_ddeProcess) {
|
||||
if (_ddeStopping) {
|
||||
qCDebug(interfaceapp) << "DDE Face Tracker: Stopped";
|
||||
|
||||
} else {
|
||||
qCWarning(interfaceapp) << "DDE Face Tracker: Stopped unexpectedly";
|
||||
Menu::getInstance()->setIsOptionChecked(MenuOption::NoFaceTracking, true);
|
||||
}
|
||||
_udpSocket.close();
|
||||
delete _ddeProcess;
|
||||
_ddeProcess = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void DdeFaceTracker::reset() {
|
||||
if (_udpSocket.state() == QAbstractSocket::BoundState) {
|
||||
_reset = true;
|
||||
|
||||
qCDebug(interfaceapp) << "DDE Face Tracker: Reset";
|
||||
|
||||
const char* DDE_RESET_COMMAND = "reset";
|
||||
_udpSocket.writeDatagram(DDE_RESET_COMMAND, DDE_SERVER_ADDR, _controlPort);
|
||||
|
||||
FaceTracker::reset();
|
||||
|
||||
_reset = true;
|
||||
}
|
||||
}
|
||||
|
||||
void DdeFaceTracker::update(float deltaTime) {
|
||||
if (!isActive()) {
|
||||
return;
|
||||
}
|
||||
FaceTracker::update(deltaTime);
|
||||
|
||||
glm::vec3 headEulers = glm::degrees(glm::eulerAngles(_headRotation));
|
||||
_estimatedEyePitch = _eyePitch - headEulers.x;
|
||||
_estimatedEyeYaw = _eyeYaw - headEulers.y;
|
||||
}
|
||||
|
||||
bool DdeFaceTracker::isActive() const {
|
||||
return (_ddeProcess != NULL);
|
||||
}
|
||||
|
||||
bool DdeFaceTracker::isTracking() const {
|
||||
static const quint64 ACTIVE_TIMEOUT_USECS = 3000000; //3 secs
|
||||
return (usecTimestampNow() - _lastReceiveTimestamp < ACTIVE_TIMEOUT_USECS);
|
||||
}
|
||||
|
||||
//private slots and methods
|
||||
void DdeFaceTracker::socketErrorOccurred(QAbstractSocket::SocketError socketError) {
|
||||
qCWarning(interfaceapp) << "DDE Face Tracker: Socket error: " << _udpSocket.errorString();
|
||||
}
|
||||
|
||||
void DdeFaceTracker::socketStateChanged(QAbstractSocket::SocketState socketState) {
|
||||
QString state;
|
||||
switch(socketState) {
|
||||
case QAbstractSocket::BoundState:
|
||||
state = "Bound";
|
||||
break;
|
||||
case QAbstractSocket::ClosingState:
|
||||
state = "Closing";
|
||||
break;
|
||||
case QAbstractSocket::ConnectedState:
|
||||
state = "Connected";
|
||||
break;
|
||||
case QAbstractSocket::ConnectingState:
|
||||
state = "Connecting";
|
||||
break;
|
||||
case QAbstractSocket::HostLookupState:
|
||||
state = "Host Lookup";
|
||||
break;
|
||||
case QAbstractSocket::ListeningState:
|
||||
state = "Listening";
|
||||
break;
|
||||
case QAbstractSocket::UnconnectedState:
|
||||
state = "Unconnected";
|
||||
break;
|
||||
}
|
||||
qCDebug(interfaceapp) << "DDE Face Tracker: Socket: " << state;
|
||||
}
|
||||
|
||||
void DdeFaceTracker::readPendingDatagrams() {
|
||||
QByteArray buffer;
|
||||
while (_udpSocket.hasPendingDatagrams()) {
|
||||
buffer.resize(_udpSocket.pendingDatagramSize());
|
||||
_udpSocket.readDatagram(buffer.data(), buffer.size());
|
||||
}
|
||||
decodePacket(buffer);
|
||||
}
|
||||
|
||||
float DdeFaceTracker::getBlendshapeCoefficient(int index) const {
|
||||
return (index >= 0 && index < (int)_blendshapeCoefficients.size()) ? _blendshapeCoefficients[index] : 0.0f;
|
||||
}
|
||||
|
||||
void DdeFaceTracker::decodePacket(const QByteArray& buffer) {
|
||||
_lastReceiveTimestamp = usecTimestampNow();
|
||||
|
||||
if (buffer.size() > MIN_PACKET_SIZE) {
|
||||
if (!_isCalibrated) {
|
||||
calibrate();
|
||||
}
|
||||
|
||||
bool isFiltering = Menu::getInstance()->isOptionChecked(MenuOption::VelocityFilter);
|
||||
|
||||
DDEPacket packet;
|
||||
int bytesToCopy = glm::min((int)sizeof(packet), buffer.size());
|
||||
memset(&packet.name, '\n', MAX_NAME_SIZE + 1);
|
||||
memcpy(&packet, buffer.data(), bytesToCopy);
|
||||
|
||||
glm::vec3 translation;
|
||||
memcpy(&translation, packet.translation, sizeof(packet.translation));
|
||||
glm::quat rotation;
|
||||
memcpy(&rotation, &packet.rotation, sizeof(packet.rotation));
|
||||
if (_reset || (_lastMessageReceived == 0)) {
|
||||
memcpy(&_referenceTranslation, &translation, sizeof(glm::vec3));
|
||||
memcpy(&_referenceRotation, &rotation, sizeof(glm::quat));
|
||||
_reset = false;
|
||||
}
|
||||
|
||||
// Compute relative translation
|
||||
float LEAN_DAMPING_FACTOR = 75.0f;
|
||||
translation -= _referenceTranslation;
|
||||
translation /= LEAN_DAMPING_FACTOR;
|
||||
translation.x *= -1;
|
||||
if (isFiltering) {
|
||||
glm::vec3 linearVelocity = (translation - _lastHeadTranslation) / _averageMessageTime;
|
||||
const float LINEAR_VELOCITY_FILTER_STRENGTH = 0.3f;
|
||||
float velocityFilter = glm::clamp(1.0f - glm::length(linearVelocity) *
|
||||
LINEAR_VELOCITY_FILTER_STRENGTH, 0.0f, 1.0f);
|
||||
_filteredHeadTranslation = velocityFilter * _filteredHeadTranslation + (1.0f - velocityFilter) * translation;
|
||||
_lastHeadTranslation = translation;
|
||||
_headTranslation = _filteredHeadTranslation;
|
||||
} else {
|
||||
_headTranslation = translation;
|
||||
}
|
||||
|
||||
// Compute relative rotation
|
||||
rotation = glm::inverse(_referenceRotation) * rotation;
|
||||
if (isFiltering) {
|
||||
glm::quat r = glm::normalize(rotation * glm::inverse(_headRotation));
|
||||
float theta = 2 * acos(r.w);
|
||||
glm::vec3 angularVelocity;
|
||||
if (theta > EPSILON) {
|
||||
float rMag = glm::length(glm::vec3(r.x, r.y, r.z));
|
||||
angularVelocity = theta / _averageMessageTime * glm::vec3(r.x, r.y, r.z) / rMag;
|
||||
} else {
|
||||
angularVelocity = glm::vec3(0, 0, 0);
|
||||
}
|
||||
const float ANGULAR_VELOCITY_FILTER_STRENGTH = 0.3f;
|
||||
_headRotation = safeMix(_headRotation, rotation, glm::clamp(glm::length(angularVelocity) *
|
||||
ANGULAR_VELOCITY_FILTER_STRENGTH, 0.0f, 1.0f));
|
||||
} else {
|
||||
_headRotation = rotation;
|
||||
}
|
||||
|
||||
// Translate DDE coefficients to Faceshift compatible coefficients
|
||||
for (int i = 0; i < NUM_EXPRESSIONS; i++) {
|
||||
_coefficients[DDE_TO_FACESHIFT_MAPPING[i]] = packet.expressions[i];
|
||||
}
|
||||
|
||||
// Calibration
|
||||
if (_isCalibrating) {
|
||||
addCalibrationDatum();
|
||||
}
|
||||
for (int i = 0; i < NUM_FACESHIFT_BLENDSHAPES; i++) {
|
||||
_coefficients[i] -= _coefficientAverages[i];
|
||||
}
|
||||
|
||||
// Use BrowsU_C to control both brows' up and down
|
||||
float browUp = _coefficients[_browUpCenterIndex];
|
||||
if (isFiltering) {
|
||||
const float BROW_VELOCITY_FILTER_STRENGTH = 0.5f;
|
||||
float velocity = fabsf(browUp - _lastBrowUp) / _averageMessageTime;
|
||||
float velocityFilter = glm::clamp(velocity * BROW_VELOCITY_FILTER_STRENGTH, 0.0f, 1.0f);
|
||||
_filteredBrowUp = velocityFilter * browUp + (1.0f - velocityFilter) * _filteredBrowUp;
|
||||
_lastBrowUp = browUp;
|
||||
browUp = _filteredBrowUp;
|
||||
_coefficients[_browUpCenterIndex] = browUp;
|
||||
}
|
||||
_coefficients[_browUpLeftIndex] = browUp;
|
||||
_coefficients[_browUpRightIndex] = browUp;
|
||||
_coefficients[_browDownLeftIndex] = -browUp;
|
||||
_coefficients[_browDownRightIndex] = -browUp;
|
||||
|
||||
// Offset jaw open coefficient
|
||||
static const float JAW_OPEN_THRESHOLD = 0.1f;
|
||||
_coefficients[_jawOpenIndex] = _coefficients[_jawOpenIndex] - JAW_OPEN_THRESHOLD;
|
||||
|
||||
// Offset smile coefficients
|
||||
static const float SMILE_THRESHOLD = 0.5f;
|
||||
_coefficients[_mouthSmileLeftIndex] = _coefficients[_mouthSmileLeftIndex] - SMILE_THRESHOLD;
|
||||
_coefficients[_mouthSmileRightIndex] = _coefficients[_mouthSmileRightIndex] - SMILE_THRESHOLD;
|
||||
|
||||
// Eye pitch and yaw
|
||||
// EyeDown coefficients work better over both +ve and -ve values than EyeUp values.
|
||||
// EyeIn coefficients work better over both +ve and -ve values than EyeOut values.
|
||||
// Pitch and yaw values are relative to the screen.
|
||||
const float EYE_PITCH_SCALE = -1500.0f; // Sign, scale, and average to be similar to Faceshift values.
|
||||
_eyePitch = EYE_PITCH_SCALE * (_coefficients[_leftEyeDownIndex] + _coefficients[_rightEyeDownIndex]);
|
||||
const float EYE_YAW_SCALE = 2000.0f; // Scale and average to be similar to Faceshift values.
|
||||
_eyeYaw = EYE_YAW_SCALE * (_coefficients[_leftEyeInIndex] + _coefficients[_rightEyeInIndex]);
|
||||
if (isFiltering) {
|
||||
const float EYE_VELOCITY_FILTER_STRENGTH = 0.005f;
|
||||
float pitchVelocity = fabsf(_eyePitch - _lastEyePitch) / _averageMessageTime;
|
||||
float pitchVelocityFilter = glm::clamp(pitchVelocity * EYE_VELOCITY_FILTER_STRENGTH, 0.0f, 1.0f);
|
||||
_filteredEyePitch = pitchVelocityFilter * _eyePitch + (1.0f - pitchVelocityFilter) * _filteredEyePitch;
|
||||
_lastEyePitch = _eyePitch;
|
||||
_eyePitch = _filteredEyePitch;
|
||||
float yawVelocity = fabsf(_eyeYaw - _lastEyeYaw) / _averageMessageTime;
|
||||
float yawVelocityFilter = glm::clamp(yawVelocity * EYE_VELOCITY_FILTER_STRENGTH, 0.0f, 1.0f);
|
||||
_filteredEyeYaw = yawVelocityFilter * _eyeYaw + (1.0f - yawVelocityFilter) * _filteredEyeYaw;
|
||||
_lastEyeYaw = _eyeYaw;
|
||||
_eyeYaw = _filteredEyeYaw;
|
||||
}
|
||||
|
||||
// Velocity filter EyeBlink values
|
||||
const float DDE_EYEBLINK_SCALE = 3.0f;
|
||||
float eyeBlinks[] = { DDE_EYEBLINK_SCALE * _coefficients[_leftBlinkIndex],
|
||||
DDE_EYEBLINK_SCALE * _coefficients[_rightBlinkIndex] };
|
||||
if (isFiltering) {
|
||||
const float BLINK_VELOCITY_FILTER_STRENGTH = 0.3f;
|
||||
for (int i = 0; i < 2; i++) {
|
||||
float velocity = fabsf(eyeBlinks[i] - _lastEyeBlinks[i]) / _averageMessageTime;
|
||||
float velocityFilter = glm::clamp(velocity * BLINK_VELOCITY_FILTER_STRENGTH, 0.0f, 1.0f);
|
||||
_filteredEyeBlinks[i] = velocityFilter * eyeBlinks[i] + (1.0f - velocityFilter) * _filteredEyeBlinks[i];
|
||||
_lastEyeBlinks[i] = eyeBlinks[i];
|
||||
}
|
||||
}
|
||||
|
||||
// Finesse EyeBlink values
|
||||
float eyeCoefficients[2];
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::BinaryEyelidControl)) {
|
||||
if (_eyeStates[0] == EYE_UNCONTROLLED) {
|
||||
_eyeStates[0] = EYE_OPEN;
|
||||
_eyeStates[1] = EYE_OPEN;
|
||||
}
|
||||
|
||||
for (int i = 0; i < 2; i++) {
|
||||
// Scale EyeBlink values so that they can be used to control both EyeBlink and EyeOpen
|
||||
// -ve values control EyeOpen; +ve values control EyeBlink
|
||||
static const float EYE_CONTROL_THRESHOLD = 0.5f; // Resting eye value
|
||||
eyeCoefficients[i] = (_filteredEyeBlinks[i] - EYE_CONTROL_THRESHOLD) / (1.0f - EYE_CONTROL_THRESHOLD);
|
||||
|
||||
// Change to closing or opening states
|
||||
const float EYE_CONTROL_HYSTERISIS = 0.25f;
|
||||
float eyeClosingThreshold = getEyeClosingThreshold();
|
||||
float eyeOpeningThreshold = eyeClosingThreshold - EYE_CONTROL_HYSTERISIS;
|
||||
if ((_eyeStates[i] == EYE_OPEN || _eyeStates[i] == EYE_OPENING) && eyeCoefficients[i] > eyeClosingThreshold) {
|
||||
_eyeStates[i] = EYE_CLOSING;
|
||||
} else if ((_eyeStates[i] == EYE_CLOSED || _eyeStates[i] == EYE_CLOSING)
|
||||
&& eyeCoefficients[i] < eyeOpeningThreshold) {
|
||||
_eyeStates[i] = EYE_OPENING;
|
||||
}
|
||||
|
||||
const float EYELID_MOVEMENT_RATE = 10.0f; // units/second
|
||||
const float EYE_OPEN_SCALE = 0.2f;
|
||||
if (_eyeStates[i] == EYE_CLOSING) {
|
||||
// Close eyelid until it's fully closed
|
||||
float closingValue = _lastEyeCoefficients[i] + EYELID_MOVEMENT_RATE * _averageMessageTime;
|
||||
if (closingValue >= 1.0f) {
|
||||
_eyeStates[i] = EYE_CLOSED;
|
||||
eyeCoefficients[i] = 1.0f;
|
||||
} else {
|
||||
eyeCoefficients[i] = closingValue;
|
||||
}
|
||||
} else if (_eyeStates[i] == EYE_OPENING) {
|
||||
// Open eyelid until it meets the current adjusted value
|
||||
float openingValue = _lastEyeCoefficients[i] - EYELID_MOVEMENT_RATE * _averageMessageTime;
|
||||
if (openingValue < eyeCoefficients[i] * EYE_OPEN_SCALE) {
|
||||
_eyeStates[i] = EYE_OPEN;
|
||||
eyeCoefficients[i] = eyeCoefficients[i] * EYE_OPEN_SCALE;
|
||||
} else {
|
||||
eyeCoefficients[i] = openingValue;
|
||||
}
|
||||
} else if (_eyeStates[i] == EYE_OPEN) {
|
||||
// Reduce eyelid movement
|
||||
eyeCoefficients[i] = eyeCoefficients[i] * EYE_OPEN_SCALE;
|
||||
} else if (_eyeStates[i] == EYE_CLOSED) {
|
||||
// Keep eyelid fully closed
|
||||
eyeCoefficients[i] = 1.0;
|
||||
}
|
||||
}
|
||||
|
||||
if (_eyeStates[0] == EYE_OPEN && _eyeStates[1] == EYE_OPEN) {
|
||||
// Couple eyelids
|
||||
eyeCoefficients[0] = eyeCoefficients[1] = (eyeCoefficients[0] + eyeCoefficients[0]) / 2.0f;
|
||||
}
|
||||
|
||||
_lastEyeCoefficients[0] = eyeCoefficients[0];
|
||||
_lastEyeCoefficients[1] = eyeCoefficients[1];
|
||||
} else {
|
||||
_eyeStates[0] = EYE_UNCONTROLLED;
|
||||
_eyeStates[1] = EYE_UNCONTROLLED;
|
||||
|
||||
eyeCoefficients[0] = _filteredEyeBlinks[0];
|
||||
eyeCoefficients[1] = _filteredEyeBlinks[1];
|
||||
}
|
||||
|
||||
// Couple eyelid values if configured - use the most "open" value for both
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::CoupleEyelids)) {
|
||||
float eyeCoefficient = std::min(eyeCoefficients[0], eyeCoefficients[1]);
|
||||
eyeCoefficients[0] = eyeCoefficient;
|
||||
eyeCoefficients[1] = eyeCoefficient;
|
||||
}
|
||||
|
||||
// Use EyeBlink values to control both EyeBlink and EyeOpen
|
||||
if (eyeCoefficients[0] > 0) {
|
||||
_coefficients[_leftBlinkIndex] = eyeCoefficients[0];
|
||||
_coefficients[_leftEyeOpenIndex] = 0.0f;
|
||||
} else {
|
||||
_coefficients[_leftBlinkIndex] = 0.0f;
|
||||
_coefficients[_leftEyeOpenIndex] = -eyeCoefficients[0];
|
||||
}
|
||||
if (eyeCoefficients[1] > 0) {
|
||||
_coefficients[_rightBlinkIndex] = eyeCoefficients[1];
|
||||
_coefficients[_rightEyeOpenIndex] = 0.0f;
|
||||
} else {
|
||||
_coefficients[_rightBlinkIndex] = 0.0f;
|
||||
_coefficients[_rightEyeOpenIndex] = -eyeCoefficients[1];
|
||||
}
|
||||
|
||||
// Scale all coefficients
|
||||
for (int i = 0; i < NUM_EXPRESSIONS; i++) {
|
||||
_blendshapeCoefficients[i]
|
||||
= glm::clamp(DDE_COEFFICIENT_SCALES[i] * _coefficients[i], 0.0f, 1.0f);
|
||||
}
|
||||
|
||||
// Calculate average frame time
|
||||
const float FRAME_AVERAGING_FACTOR = 0.99f;
|
||||
quint64 usecsNow = usecTimestampNow();
|
||||
if (_lastMessageReceived != 0) {
|
||||
_averageMessageTime = FRAME_AVERAGING_FACTOR * _averageMessageTime
|
||||
+ (1.0f - FRAME_AVERAGING_FACTOR) * (float)(usecsNow - _lastMessageReceived) / 1000000.0f;
|
||||
}
|
||||
_lastMessageReceived = usecsNow;
|
||||
|
||||
FaceTracker::countFrame();
|
||||
|
||||
} else {
|
||||
qCWarning(interfaceapp) << "DDE Face Tracker: Decode error";
|
||||
}
|
||||
|
||||
if (_isCalibrating && _calibrationCount > CALIBRATION_SAMPLES) {
|
||||
finishCalibration();
|
||||
}
|
||||
}
|
||||
|
||||
void DdeFaceTracker::setEyeClosingThreshold(float eyeClosingThreshold) {
|
||||
_eyeClosingThreshold.set(eyeClosingThreshold);
|
||||
}
|
||||
|
||||
static const int CALIBRATION_BILLBOARD_WIDTH = 300;
|
||||
static const int CALIBRATION_BILLBOARD_HEIGHT = 120;
|
||||
static QString CALIBRATION_INSTRUCTION_MESSAGE = "Hold still to calibrate camera";
|
||||
|
||||
void DdeFaceTracker::calibrate() {
|
||||
if (!Menu::getInstance()->isOptionChecked(MenuOption::UseCamera) || _isMuted) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!_isCalibrating) {
|
||||
qCDebug(interfaceapp) << "DDE Face Tracker: Calibration started";
|
||||
|
||||
_isCalibrating = true;
|
||||
_calibrationCount = 0;
|
||||
_calibrationMessage = CALIBRATION_INSTRUCTION_MESSAGE + "\n\n";
|
||||
|
||||
// FIXME: this overlay probably doesn't work anymore
|
||||
_calibrationBillboard = new TextOverlay();
|
||||
glm::vec2 viewport = qApp->getCanvasSize();
|
||||
_calibrationBillboard->setX((viewport.x - CALIBRATION_BILLBOARD_WIDTH) / 2);
|
||||
_calibrationBillboard->setY((viewport.y - CALIBRATION_BILLBOARD_HEIGHT) / 2);
|
||||
_calibrationBillboard->setWidth(CALIBRATION_BILLBOARD_WIDTH);
|
||||
_calibrationBillboard->setHeight(CALIBRATION_BILLBOARD_HEIGHT);
|
||||
_calibrationBillboardID = qApp->getOverlays().addOverlay(_calibrationBillboard);
|
||||
|
||||
for (int i = 0; i < NUM_FACESHIFT_BLENDSHAPES; i++) {
|
||||
_calibrationValues[i] = 0.0f;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void DdeFaceTracker::addCalibrationDatum() {
|
||||
const int LARGE_TICK_INTERVAL = 30;
|
||||
const int SMALL_TICK_INTERVAL = 6;
|
||||
int samplesLeft = CALIBRATION_SAMPLES - _calibrationCount;
|
||||
if (samplesLeft % LARGE_TICK_INTERVAL == 0) {
|
||||
_calibrationMessage += QString::number(samplesLeft / LARGE_TICK_INTERVAL);
|
||||
// FIXME: set overlay text
|
||||
} else if (samplesLeft % SMALL_TICK_INTERVAL == 0) {
|
||||
_calibrationMessage += ".";
|
||||
// FIXME: set overlay text
|
||||
}
|
||||
|
||||
for (int i = 0; i < NUM_FACESHIFT_BLENDSHAPES; i++) {
|
||||
_calibrationValues[i] += _coefficients[i];
|
||||
}
|
||||
|
||||
_calibrationCount += 1;
|
||||
}
|
||||
|
||||
void DdeFaceTracker::cancelCalibration() {
|
||||
qApp->getOverlays().deleteOverlay(_calibrationBillboardID);
|
||||
_calibrationBillboard = NULL;
|
||||
_isCalibrating = false;
|
||||
qCDebug(interfaceapp) << "DDE Face Tracker: Calibration cancelled";
|
||||
}
|
||||
|
||||
void DdeFaceTracker::finishCalibration() {
|
||||
qApp->getOverlays().deleteOverlay(_calibrationBillboardID);
|
||||
_calibrationBillboard = NULL;
|
||||
_isCalibrating = false;
|
||||
_isCalibrated = true;
|
||||
|
||||
for (int i = 0; i < NUM_FACESHIFT_BLENDSHAPES; i++) {
|
||||
_coefficientAverages[i] = _calibrationValues[i] / (float)CALIBRATION_SAMPLES;
|
||||
}
|
||||
|
||||
reset();
|
||||
|
||||
qCDebug(interfaceapp) << "DDE Face Tracker: Calibration finished";
|
||||
}
|
|
@ -1,181 +0,0 @@
|
|||
//
|
||||
// DdeFaceTracker.h
|
||||
//
|
||||
//
|
||||
// Created by Clement on 8/2/14.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_DdeFaceTracker_h
|
||||
#define hifi_DdeFaceTracker_h
|
||||
|
||||
#include <QtCore/QtGlobal>
|
||||
|
||||
//Disabling dde due to random crashes with closing the socket on macos. all the accompanying code is wrapped with the ifdef HAVE_DDE. uncomment the define below to enable
|
||||
#if defined(Q_OS_WIN) || defined(Q_OS_OSX)
|
||||
//#define HAVE_DDE
|
||||
#endif
|
||||
|
||||
#include <QProcess>
|
||||
#include <QUdpSocket>
|
||||
|
||||
#include <DependencyManager.h>
|
||||
#include <ui/overlays/TextOverlay.h>
|
||||
|
||||
#include <trackers/FaceTracker.h>
|
||||
|
||||
/**jsdoc
|
||||
* The FaceTracker API helps manage facial tracking hardware.
|
||||
* @namespace FaceTracker
|
||||
*
|
||||
* @hifi-interface
|
||||
* @hifi-client-entity
|
||||
* @hifi-avatar
|
||||
*/
|
||||
|
||||
class DdeFaceTracker : public FaceTracker, public Dependency {
|
||||
Q_OBJECT
|
||||
SINGLETON_DEPENDENCY
|
||||
|
||||
public:
|
||||
virtual void init() override;
|
||||
virtual void reset() override;
|
||||
virtual void update(float deltaTime) override;
|
||||
|
||||
virtual bool isActive() const override;
|
||||
virtual bool isTracking() const override;
|
||||
|
||||
float getLeftBlink() const { return getBlendshapeCoefficient(_leftBlinkIndex); }
|
||||
float getRightBlink() const { return getBlendshapeCoefficient(_rightBlinkIndex); }
|
||||
float getLeftEyeOpen() const { return getBlendshapeCoefficient(_leftEyeOpenIndex); }
|
||||
float getRightEyeOpen() const { return getBlendshapeCoefficient(_rightEyeOpenIndex); }
|
||||
|
||||
float getBrowDownLeft() const { return getBlendshapeCoefficient(_browDownLeftIndex); }
|
||||
float getBrowDownRight() const { return getBlendshapeCoefficient(_browDownRightIndex); }
|
||||
float getBrowUpCenter() const { return getBlendshapeCoefficient(_browUpCenterIndex); }
|
||||
float getBrowUpLeft() const { return getBlendshapeCoefficient(_browUpLeftIndex); }
|
||||
float getBrowUpRight() const { return getBlendshapeCoefficient(_browUpRightIndex); }
|
||||
|
||||
float getMouthSize() const { return getBlendshapeCoefficient(_jawOpenIndex); }
|
||||
float getMouthSmileLeft() const { return getBlendshapeCoefficient(_mouthSmileLeftIndex); }
|
||||
float getMouthSmileRight() const { return getBlendshapeCoefficient(_mouthSmileRightIndex); }
|
||||
|
||||
float getEyeClosingThreshold() { return _eyeClosingThreshold.get(); }
|
||||
void setEyeClosingThreshold(float eyeClosingThreshold);
|
||||
|
||||
public slots:
|
||||
|
||||
/**jsdoc
|
||||
* @function FaceTracker.setEnabled
|
||||
* @param {boolean} enabled
|
||||
*/
|
||||
void setEnabled(bool enabled) override;
|
||||
|
||||
/**jsdoc
|
||||
* @function FaceTracker.calibrate
|
||||
*/
|
||||
void calibrate();
|
||||
|
||||
private slots:
|
||||
void processFinished(int exitCode, QProcess::ExitStatus exitStatus);
|
||||
|
||||
//sockets
|
||||
void socketErrorOccurred(QAbstractSocket::SocketError socketError);
|
||||
void readPendingDatagrams();
|
||||
void socketStateChanged(QAbstractSocket::SocketState socketState);
|
||||
|
||||
private:
|
||||
DdeFaceTracker();
|
||||
DdeFaceTracker(const QHostAddress& host, quint16 serverPort, quint16 controlPort);
|
||||
virtual ~DdeFaceTracker();
|
||||
|
||||
QProcess* _ddeProcess;
|
||||
bool _ddeStopping;
|
||||
|
||||
QHostAddress _host;
|
||||
quint16 _serverPort;
|
||||
quint16 _controlPort;
|
||||
|
||||
float getBlendshapeCoefficient(int index) const;
|
||||
void decodePacket(const QByteArray& buffer);
|
||||
|
||||
// sockets
|
||||
QUdpSocket _udpSocket;
|
||||
quint64 _lastReceiveTimestamp;
|
||||
|
||||
bool _reset;
|
||||
glm::vec3 _referenceTranslation;
|
||||
glm::quat _referenceRotation;
|
||||
|
||||
int _leftBlinkIndex;
|
||||
int _rightBlinkIndex;
|
||||
int _leftEyeDownIndex;
|
||||
int _rightEyeDownIndex;
|
||||
int _leftEyeInIndex;
|
||||
int _rightEyeInIndex;
|
||||
int _leftEyeOpenIndex;
|
||||
int _rightEyeOpenIndex;
|
||||
|
||||
int _browDownLeftIndex;
|
||||
int _browDownRightIndex;
|
||||
int _browUpCenterIndex;
|
||||
int _browUpLeftIndex;
|
||||
int _browUpRightIndex;
|
||||
|
||||
int _mouthSmileLeftIndex;
|
||||
int _mouthSmileRightIndex;
|
||||
|
||||
int _jawOpenIndex;
|
||||
|
||||
QVector<float> _coefficients;
|
||||
|
||||
quint64 _lastMessageReceived;
|
||||
float _averageMessageTime;
|
||||
|
||||
glm::vec3 _lastHeadTranslation;
|
||||
glm::vec3 _filteredHeadTranslation;
|
||||
|
||||
float _lastBrowUp;
|
||||
float _filteredBrowUp;
|
||||
|
||||
float _eyePitch; // Degrees, relative to screen
|
||||
float _eyeYaw;
|
||||
float _lastEyePitch;
|
||||
float _lastEyeYaw;
|
||||
float _filteredEyePitch;
|
||||
float _filteredEyeYaw;
|
||||
float _longTermAverageEyePitch = 0.0f;
|
||||
float _longTermAverageEyeYaw = 0.0f;
|
||||
bool _longTermAverageInitialized = false;
|
||||
|
||||
enum EyeState {
|
||||
EYE_UNCONTROLLED,
|
||||
EYE_OPEN,
|
||||
EYE_CLOSING,
|
||||
EYE_CLOSED,
|
||||
EYE_OPENING
|
||||
};
|
||||
EyeState _eyeStates[2];
|
||||
float _lastEyeBlinks[2];
|
||||
float _filteredEyeBlinks[2];
|
||||
float _lastEyeCoefficients[2];
|
||||
Setting::Handle<float> _eyeClosingThreshold;
|
||||
|
||||
QVector<float> _coefficientAverages;
|
||||
|
||||
bool _isCalibrating;
|
||||
int _calibrationCount;
|
||||
QVector<float> _calibrationValues;
|
||||
TextOverlay* _calibrationBillboard;
|
||||
QUuid _calibrationBillboardID;
|
||||
QString _calibrationMessage;
|
||||
bool _isCalibrated;
|
||||
void addCalibrationDatum();
|
||||
void cancelCalibration();
|
||||
void finishCalibration();
|
||||
};
|
||||
|
||||
#endif // hifi_DdeFaceTracker_h
|
|
@ -25,10 +25,10 @@ class LaserPointerScriptingInterface : public QObject, public Dependency {
|
|||
* represent objects for repeatedly calculating ray intersections with avatars, entities, and overlays. Ray pointers can also
|
||||
* be configured to generate events on entities and overlays intersected.
|
||||
*
|
||||
* <p class="important">Deprecated: This API is deprecated. Use {@link Pointers} instead.
|
||||
*
|
||||
* @namespace LaserPointers
|
||||
*
|
||||
* @deprecated This API is deprecated and will be removed. Use {@link Pointers} instead.
|
||||
*
|
||||
* @hifi-interface
|
||||
* @hifi-client-entity
|
||||
* @hifi-avatar
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <algorithm>
|
||||
|
||||
#include <shared/QtHelpers.h>
|
||||
#include <plugins/PluginManager.h>
|
||||
#include <plugins/DisplayPlugin.h>
|
||||
|
||||
#include "Application.h"
|
||||
|
@ -68,6 +69,27 @@ static QString getTargetDevice(bool hmd, QAudio::Mode mode) {
|
|||
return deviceName;
|
||||
}
|
||||
|
||||
static void checkHmdDefaultsChange(QAudio::Mode mode) {
|
||||
QString name;
|
||||
foreach(DisplayPluginPointer displayPlugin, PluginManager::getInstance()->getAllDisplayPlugins()) {
|
||||
if (displayPlugin && displayPlugin->isHmd()) {
|
||||
if (mode == QAudio::AudioInput) {
|
||||
name = displayPlugin->getPreferredAudioInDevice();
|
||||
} else {
|
||||
name = displayPlugin->getPreferredAudioOutDevice();
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!name.isEmpty()) {
|
||||
auto client = DependencyManager::get<AudioClient>().data();
|
||||
QMetaObject::invokeMethod(client, "setHmdAudioName",
|
||||
Q_ARG(QAudio::Mode, mode),
|
||||
Q_ARG(const QString&, name));
|
||||
}
|
||||
}
|
||||
|
||||
Qt::ItemFlags AudioDeviceList::_flags { Qt::ItemIsSelectable | Qt::ItemIsEnabled };
|
||||
|
||||
AudioDeviceList::AudioDeviceList(QAudio::Mode mode) : _mode(mode) {
|
||||
|
@ -256,13 +278,15 @@ std::shared_ptr<scripting::AudioDevice> getSimilarDevice(const QString& deviceNa
|
|||
return devices[minDistanceIndex];
|
||||
}
|
||||
|
||||
void AudioDeviceList::onDevicesChanged(const QList<HifiAudioDeviceInfo>& devices) {
|
||||
|
||||
void AudioDeviceList::onDevicesChanged(QAudio::Mode mode, const QList<HifiAudioDeviceInfo>& devices) {
|
||||
beginResetModel();
|
||||
|
||||
QList<std::shared_ptr<AudioDevice>> newDevices;
|
||||
bool hmdIsSelected = false;
|
||||
bool desktopIsSelected = false;
|
||||
|
||||
|
||||
checkHmdDefaultsChange(mode);
|
||||
if (!_backupSelectedDesktopDeviceName.isEmpty() && !_backupSelectedHMDDeviceName.isEmpty()) {
|
||||
foreach(const HifiAudioDeviceInfo& deviceInfo, devices) {
|
||||
for (bool isHMD : {false, true}) {
|
||||
|
@ -275,7 +299,6 @@ void AudioDeviceList::onDevicesChanged(const QList<HifiAudioDeviceInfo>& devices
|
|||
_selectedDesktopDevice = deviceInfo;
|
||||
backupSelectedDeviceName.clear();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -417,6 +440,9 @@ AudioDevices::AudioDevices(bool& contextIsHMD) : _contextIsHMD(contextIsHMD) {
|
|||
connect(client, &AudioClient::deviceChanged, this, &AudioDevices::onDeviceChanged, Qt::QueuedConnection);
|
||||
connect(client, &AudioClient::devicesChanged, this, &AudioDevices::onDevicesChanged, Qt::QueuedConnection);
|
||||
connect(client, &AudioClient::peakValueListChanged, &_inputs, &AudioInputDeviceList::onPeakValueListChanged, Qt::QueuedConnection);
|
||||
|
||||
checkHmdDefaultsChange(QAudio::AudioInput);
|
||||
checkHmdDefaultsChange(QAudio::AudioOutput);
|
||||
|
||||
_inputs.onDeviceChanged(client->getActiveAudioDevice(QAudio::AudioInput), contextIsHMD);
|
||||
_outputs.onDeviceChanged(client->getActiveAudioDevice(QAudio::AudioOutput), contextIsHMD);
|
||||
|
@ -425,9 +451,11 @@ AudioDevices::AudioDevices(bool& contextIsHMD) : _contextIsHMD(contextIsHMD) {
|
|||
const QList<HifiAudioDeviceInfo>& devicesInput = client->getAudioDevices(QAudio::AudioInput);
|
||||
const QList<HifiAudioDeviceInfo>& devicesOutput = client->getAudioDevices(QAudio::AudioOutput);
|
||||
|
||||
//setup devices
|
||||
_inputs.onDevicesChanged(devicesInput);
|
||||
_outputs.onDevicesChanged(devicesOutput);
|
||||
if (devicesInput.size() > 0 && devicesOutput.size() > 0) {
|
||||
//setup devices
|
||||
_inputs.onDevicesChanged(QAudio::AudioInput, devicesInput);
|
||||
_outputs.onDevicesChanged(QAudio::AudioOutput, devicesOutput);
|
||||
}
|
||||
}
|
||||
|
||||
AudioDevices::~AudioDevices() {}
|
||||
|
@ -526,14 +554,14 @@ void AudioDevices::onDevicesChanged(QAudio::Mode mode, const QList<HifiAudioDevi
|
|||
|
||||
//set devices for both contexts
|
||||
if (mode == QAudio::AudioInput) {
|
||||
_inputs.onDevicesChanged(devices);
|
||||
_inputs.onDevicesChanged(mode, devices);
|
||||
|
||||
static std::once_flag onceAfterInputDevicesChanged;
|
||||
std::call_once(onceAfterInputDevicesChanged, [&] { // we only want 'selectedDevicePlugged' signal to be handled after initial list of input devices was populated
|
||||
connect(&_inputs, &AudioDeviceList::selectedDevicePlugged, this, &AudioDevices::chooseInputDevice);
|
||||
});
|
||||
} else { // if (mode == QAudio::AudioOutput)
|
||||
_outputs.onDevicesChanged(devices);
|
||||
_outputs.onDevicesChanged(mode, devices);
|
||||
|
||||
static std::once_flag onceAfterOutputDevicesChanged;
|
||||
std::call_once(onceAfterOutputDevicesChanged, [&] { // we only want 'selectedDevicePlugged' signal to be handled after initial list of output devices was populated
|
||||
|
|
|
@ -58,7 +58,7 @@ signals:
|
|||
|
||||
protected slots:
|
||||
void onDeviceChanged(const HifiAudioDeviceInfo& device, bool isHMD);
|
||||
void onDevicesChanged(const QList<HifiAudioDeviceInfo>& devices);
|
||||
void onDevicesChanged(QAudio::Mode mode, const QList<HifiAudioDeviceInfo>& devices);
|
||||
|
||||
protected:
|
||||
friend class AudioDevices;
|
||||
|
|
|
@ -32,106 +32,187 @@ void MenuScriptingInterface::menuItemTriggered() {
|
|||
}
|
||||
|
||||
void MenuScriptingInterface::addMenu(const QString& menu, const QString& grouping) {
|
||||
QMetaObject::invokeMethod(Menu::getInstance(), "addMenu", Q_ARG(const QString&, menu), Q_ARG(const QString&, grouping));
|
||||
Menu* menuInstance = Menu::getInstance();
|
||||
if (!menuInstance) {
|
||||
return;
|
||||
}
|
||||
|
||||
QMetaObject::invokeMethod(menuInstance, "addMenu", Q_ARG(const QString&, menu), Q_ARG(const QString&, grouping));
|
||||
}
|
||||
|
||||
void MenuScriptingInterface::removeMenu(const QString& menu) {
|
||||
QMetaObject::invokeMethod(Menu::getInstance(), "removeMenu", Q_ARG(const QString&, menu));
|
||||
Menu* menuInstance = Menu::getInstance();
|
||||
if (!menuInstance) {
|
||||
return;
|
||||
}
|
||||
|
||||
QMetaObject::invokeMethod(menuInstance, "removeMenu", Q_ARG(const QString&, menu));
|
||||
}
|
||||
|
||||
bool MenuScriptingInterface::menuExists(const QString& menu) {
|
||||
Menu* menuInstance = Menu::getInstance();
|
||||
if (!menuInstance) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (QThread::currentThread() == qApp->thread()) {
|
||||
Menu* menuInstance = Menu::getInstance();
|
||||
return menuInstance && menuInstance->menuExists(menu);
|
||||
}
|
||||
|
||||
bool result { false };
|
||||
BLOCKING_INVOKE_METHOD(Menu::getInstance(), "menuExists",
|
||||
|
||||
BLOCKING_INVOKE_METHOD(menuInstance, "menuExists",
|
||||
Q_RETURN_ARG(bool, result),
|
||||
Q_ARG(const QString&, menu));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void MenuScriptingInterface::addSeparator(const QString& menuName, const QString& separatorName) {
|
||||
QMetaObject::invokeMethod(Menu::getInstance(), "addSeparator",
|
||||
Menu* menuInstance = Menu::getInstance();
|
||||
if (!menuInstance) {
|
||||
return;
|
||||
}
|
||||
|
||||
QMetaObject::invokeMethod(menuInstance, "addSeparator",
|
||||
Q_ARG(const QString&, menuName),
|
||||
Q_ARG(const QString&, separatorName));
|
||||
}
|
||||
|
||||
void MenuScriptingInterface::removeSeparator(const QString& menuName, const QString& separatorName) {
|
||||
QMetaObject::invokeMethod(Menu::getInstance(), "removeSeparator",
|
||||
Menu* menuInstance = Menu::getInstance();
|
||||
if (!menuInstance) {
|
||||
return;
|
||||
}
|
||||
|
||||
QMetaObject::invokeMethod(menuInstance, "removeSeparator",
|
||||
Q_ARG(const QString&, menuName),
|
||||
Q_ARG(const QString&, separatorName));
|
||||
}
|
||||
|
||||
void MenuScriptingInterface::addMenuItem(const MenuItemProperties& properties) {
|
||||
QMetaObject::invokeMethod(Menu::getInstance(), "addMenuItem", Q_ARG(const MenuItemProperties&, properties));
|
||||
Menu* menuInstance = Menu::getInstance();
|
||||
if (!menuInstance) {
|
||||
return;
|
||||
}
|
||||
|
||||
QMetaObject::invokeMethod(menuInstance, "addMenuItem", Q_ARG(const MenuItemProperties&, properties));
|
||||
}
|
||||
|
||||
void MenuScriptingInterface::addMenuItem(const QString& menu, const QString& menuitem, const QString& shortcutKey) {
|
||||
Menu* menuInstance = Menu::getInstance();
|
||||
if (!menuInstance) {
|
||||
return;
|
||||
}
|
||||
|
||||
MenuItemProperties properties(menu, menuitem, shortcutKey);
|
||||
QMetaObject::invokeMethod(Menu::getInstance(), "addMenuItem", Q_ARG(const MenuItemProperties&, properties));
|
||||
QMetaObject::invokeMethod(menuInstance, "addMenuItem", Q_ARG(const MenuItemProperties&, properties));
|
||||
}
|
||||
|
||||
void MenuScriptingInterface::addMenuItem(const QString& menu, const QString& menuitem) {
|
||||
Menu* menuInstance = Menu::getInstance();
|
||||
if (!menuInstance) {
|
||||
return;
|
||||
}
|
||||
|
||||
MenuItemProperties properties(menu, menuitem);
|
||||
QMetaObject::invokeMethod(Menu::getInstance(), "addMenuItem", Q_ARG(const MenuItemProperties&, properties));
|
||||
QMetaObject::invokeMethod(menuInstance, "addMenuItem", Q_ARG(const MenuItemProperties&, properties));
|
||||
}
|
||||
|
||||
void MenuScriptingInterface::removeMenuItem(const QString& menu, const QString& menuitem) {
|
||||
QMetaObject::invokeMethod(Menu::getInstance(), "removeMenuItem",
|
||||
Menu* menuInstance = Menu::getInstance();
|
||||
if (!menuInstance) {
|
||||
return;
|
||||
}
|
||||
QMetaObject::invokeMethod(menuInstance, "removeMenuItem",
|
||||
Q_ARG(const QString&, menu),
|
||||
Q_ARG(const QString&, menuitem));
|
||||
};
|
||||
|
||||
bool MenuScriptingInterface::menuItemExists(const QString& menu, const QString& menuitem) {
|
||||
Menu* menuInstance = Menu::getInstance();
|
||||
if (!menuInstance) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (QThread::currentThread() == qApp->thread()) {
|
||||
Menu* menuInstance = Menu::getInstance();
|
||||
return menuInstance && menuInstance->menuItemExists(menu, menuitem);
|
||||
}
|
||||
|
||||
bool result { false };
|
||||
BLOCKING_INVOKE_METHOD(Menu::getInstance(), "menuItemExists",
|
||||
|
||||
BLOCKING_INVOKE_METHOD(menuInstance, "menuItemExists",
|
||||
Q_RETURN_ARG(bool, result),
|
||||
Q_ARG(const QString&, menu),
|
||||
Q_ARG(const QString&, menuitem));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
bool MenuScriptingInterface::isOptionChecked(const QString& menuOption) {
|
||||
Menu* menuInstance = Menu::getInstance();
|
||||
if (!menuInstance) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (QThread::currentThread() == qApp->thread()) {
|
||||
Menu* menuInstance = Menu::getInstance();
|
||||
return menuInstance && menuInstance->isOptionChecked(menuOption);
|
||||
}
|
||||
|
||||
bool result { false };
|
||||
BLOCKING_INVOKE_METHOD(Menu::getInstance(), "isOptionChecked",
|
||||
|
||||
BLOCKING_INVOKE_METHOD(menuInstance, "isOptionChecked",
|
||||
Q_RETURN_ARG(bool, result),
|
||||
Q_ARG(const QString&, menuOption));
|
||||
return result;
|
||||
}
|
||||
|
||||
void MenuScriptingInterface::setIsOptionChecked(const QString& menuOption, bool isChecked) {
|
||||
QMetaObject::invokeMethod(Menu::getInstance(), "setIsOptionChecked",
|
||||
Menu* menuInstance = Menu::getInstance();
|
||||
if (!menuInstance) {
|
||||
return;
|
||||
}
|
||||
|
||||
QMetaObject::invokeMethod(menuInstance, "setIsOptionChecked",
|
||||
Q_ARG(const QString&, menuOption),
|
||||
Q_ARG(bool, isChecked));
|
||||
}
|
||||
|
||||
bool MenuScriptingInterface::isMenuEnabled(const QString& menuOption) {
|
||||
Menu* menuInstance = Menu::getInstance();
|
||||
if (!menuInstance) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (QThread::currentThread() == qApp->thread()) {
|
||||
Menu* menuInstance = Menu::getInstance();
|
||||
return menuInstance && menuInstance->isMenuEnabled(menuOption);
|
||||
}
|
||||
|
||||
bool result { false };
|
||||
BLOCKING_INVOKE_METHOD(Menu::getInstance(), "isMenuEnabled",
|
||||
|
||||
BLOCKING_INVOKE_METHOD(menuInstance, "isMenuEnabled",
|
||||
Q_RETURN_ARG(bool, result),
|
||||
Q_ARG(const QString&, menuOption));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void MenuScriptingInterface::setMenuEnabled(const QString& menuOption, bool isChecked) {
|
||||
QMetaObject::invokeMethod(Menu::getInstance(), "setMenuEnabled",
|
||||
Menu* menuInstance = Menu::getInstance();
|
||||
if (!menuInstance) {
|
||||
return;
|
||||
}
|
||||
|
||||
QMetaObject::invokeMethod(menuInstance, "setMenuEnabled",
|
||||
Q_ARG(const QString&, menuOption),
|
||||
Q_ARG(bool, isChecked));
|
||||
}
|
||||
|
||||
void MenuScriptingInterface::triggerOption(const QString& menuOption) {
|
||||
QMetaObject::invokeMethod(Menu::getInstance(), "triggerOption", Q_ARG(const QString&, menuOption));
|
||||
Menu* menuInstance = Menu::getInstance();
|
||||
if (!menuInstance) {
|
||||
return;
|
||||
}
|
||||
|
||||
QMetaObject::invokeMethod(menuInstance, "triggerOption", Q_ARG(const QString&, menuOption));
|
||||
}
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
|
||||
#include <AudioClient.h>
|
||||
#include <SettingHandle.h>
|
||||
#include <trackers/FaceTracker.h>
|
||||
#include <UsersScriptingInterface.h>
|
||||
|
||||
#include "Application.h"
|
||||
|
@ -76,8 +75,6 @@ void AvatarInputs::update() {
|
|||
return;
|
||||
}
|
||||
|
||||
AI_UPDATE(cameraEnabled, !Menu::getInstance()->isOptionChecked(MenuOption::NoFaceTracking));
|
||||
AI_UPDATE(cameraMuted, Menu::getInstance()->isOptionChecked(MenuOption::MuteFaceTracking));
|
||||
AI_UPDATE(isHMD, qApp->isHMDMode());
|
||||
}
|
||||
|
||||
|
@ -103,13 +100,6 @@ bool AvatarInputs::getIgnoreRadiusEnabled() const {
|
|||
return DependencyManager::get<NodeList>()->getIgnoreRadiusEnabled();
|
||||
}
|
||||
|
||||
void AvatarInputs::toggleCameraMute() {
|
||||
FaceTracker* faceTracker = qApp->getSelectedFaceTracker();
|
||||
if (faceTracker) {
|
||||
faceTracker->toggleMute();
|
||||
}
|
||||
}
|
||||
|
||||
void AvatarInputs::resetSensors() {
|
||||
qApp->resetSensors();
|
||||
}
|
||||
|
|
|
@ -35,11 +35,11 @@ class AvatarInputs : public QObject {
|
|||
* @property {boolean} cameraEnabled - <code>true</code> if webcam face tracking is enabled, <code>false</code> if it is
|
||||
* disabled.
|
||||
* <em>Read-only.</em>
|
||||
* <p class="important">Deprecated: This property is deprecated and will be removed.</p>
|
||||
* <p class="important">Deprecated: This property is deprecated and has been removed.</p>
|
||||
* @property {boolean} cameraMuted - <code>true</code> if webcam face tracking is muted (temporarily disabled),
|
||||
* <code>false</code> it if isn't.
|
||||
* <em>Read-only.</em>
|
||||
* <p class="important">Deprecated: This property is deprecated and will be removed.</p>
|
||||
* <p class="important">Deprecated: This property is deprecated and has been removed.</p>
|
||||
* @property {boolean} ignoreRadiusEnabled - <code>true</code> if the privacy shield is enabled, <code>false</code> if it
|
||||
* is disabled.
|
||||
* <em>Read-only.</em>
|
||||
|
@ -51,8 +51,6 @@ class AvatarInputs : public QObject {
|
|||
* it is hidden.
|
||||
*/
|
||||
|
||||
AI_PROPERTY(bool, cameraEnabled, false)
|
||||
AI_PROPERTY(bool, cameraMuted, false)
|
||||
AI_PROPERTY(bool, isHMD, false)
|
||||
|
||||
Q_PROPERTY(bool showAudioTools READ showAudioTools WRITE setShowAudioTools NOTIFY showAudioToolsChanged)
|
||||
|
@ -99,19 +97,17 @@ signals:
|
|||
|
||||
/**jsdoc
|
||||
* Triggered when webcam face tracking is enabled or disabled.
|
||||
* @deprecated This signal is deprecated and will be removed.
|
||||
* @deprecated This signal is deprecated and has been removed.
|
||||
* @function AvatarInputs.cameraEnabledChanged
|
||||
* @returns {Signal}
|
||||
*/
|
||||
void cameraEnabledChanged();
|
||||
|
||||
/**jsdoc
|
||||
* Triggered when webcam face tracking is muted (temporarily disabled) or unmuted.
|
||||
* @deprecated This signal is deprecated and will be removed.
|
||||
* @deprecated This signal is deprecated and has been removed.
|
||||
* @function AvatarInputs.cameraMutedChanged
|
||||
* @returns {Signal}
|
||||
*/
|
||||
void cameraMutedChanged();
|
||||
|
||||
/**jsdoc
|
||||
* Triggered when the display mode changes between desktop and HMD.
|
||||
|
@ -185,10 +181,9 @@ protected:
|
|||
|
||||
/**jsdoc
|
||||
* Toggles the muting (temporary disablement) of webcam face tracking on/off.
|
||||
* <p class="important">Deprecated: This function is deprecated and will be removed.</p>
|
||||
* <p class="important">Deprecated: This function is deprecated and has been removed.</p>
|
||||
* @function AvatarInputs.toggleCameraMute
|
||||
*/
|
||||
Q_INVOKABLE void toggleCameraMute();
|
||||
|
||||
private:
|
||||
void onAvatarEnteredIgnoreRadius();
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
|
||||
#include <AudioClient.h>
|
||||
#include <avatar/AvatarManager.h>
|
||||
#include <devices/DdeFaceTracker.h>
|
||||
#include <ScriptEngines.h>
|
||||
#include <OffscreenUi.h>
|
||||
#include <Preferences.h>
|
||||
|
@ -286,22 +285,6 @@ void setupPreferences() {
|
|||
preferences->addPreference(preference);
|
||||
}
|
||||
|
||||
static const QString FACE_TRACKING{ "Face Tracking" };
|
||||
{
|
||||
#ifdef HAVE_DDE
|
||||
auto getter = []()->float { return DependencyManager::get<DdeFaceTracker>()->getEyeClosingThreshold(); };
|
||||
auto setter = [](float value) { DependencyManager::get<DdeFaceTracker>()->setEyeClosingThreshold(value); };
|
||||
preferences->addPreference(new SliderPreference(FACE_TRACKING, "Eye Closing Threshold", getter, setter));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
auto getter = []()->float { return FaceTracker::getEyeDeflection(); };
|
||||
auto setter = [](float value) { FaceTracker::setEyeDeflection(value); };
|
||||
preferences->addPreference(new SliderPreference(FACE_TRACKING, "Eye Deflection", getter, setter));
|
||||
}
|
||||
|
||||
static const QString VR_MOVEMENT{ "VR Movement" };
|
||||
{
|
||||
auto getter = [myAvatar]()->bool { return myAvatar->getAllowTeleporting(); };
|
||||
|
|
|
@ -8,7 +8,7 @@ import HQLauncher 1.0
|
|||
Item {
|
||||
id: root
|
||||
anchors.centerIn: parent
|
||||
property string titleText: "Sign in and pick a password"
|
||||
property string titleText: "Create Your Username and Password"
|
||||
property string usernamePlaceholder: "Username"
|
||||
property string passwordPlaceholder: "Set a password (must be at least 6 characters)"
|
||||
property int marginLeft: root.width * 0.15
|
||||
|
@ -28,6 +28,7 @@ Item {
|
|||
HFTextHeader {
|
||||
id: title
|
||||
width: 481
|
||||
wrapMode: Text.WordWrap
|
||||
lineHeight: 35
|
||||
lineHeightMode: Text.FixedHeight
|
||||
text: LauncherState.lastSignupErrorMessage.length == 0 ? root.titleText : "Uh oh"
|
||||
|
@ -39,21 +40,6 @@ Item {
|
|||
}
|
||||
}
|
||||
|
||||
HFTextRegular {
|
||||
id: instruction
|
||||
width: 425
|
||||
|
||||
text: "Use the email address you applied for access with"
|
||||
visible: LauncherState.lastSignupErrorMessage.length == 0
|
||||
|
||||
anchors {
|
||||
left: root.left
|
||||
leftMargin: root.marginLeft
|
||||
top: title.bottom
|
||||
topMargin: 18
|
||||
}
|
||||
}
|
||||
|
||||
HFTextError {
|
||||
id: error
|
||||
|
||||
|
@ -88,10 +74,10 @@ Item {
|
|||
|
||||
enabled: root.enabled
|
||||
|
||||
placeholderText: "Email Address"
|
||||
placeholderText: "Verify Your Email"
|
||||
seperatorColor: Qt.rgba(1, 1, 1, 0.3)
|
||||
anchors {
|
||||
top: instruction.bottom
|
||||
top: error.visible ? error.bottom : title.bottom
|
||||
left: root.left
|
||||
leftMargin: root.marginLeft
|
||||
topMargin: 18
|
||||
|
|
|
@ -45,7 +45,7 @@ Item {
|
|||
HFTextRegular {
|
||||
id: description
|
||||
|
||||
text: "We seem to have a problem.\n Please restart Launcher."
|
||||
text: "We seem to have a problem."
|
||||
|
||||
anchors {
|
||||
top: header.bottom
|
||||
|
@ -54,6 +54,16 @@ Item {
|
|||
}
|
||||
}
|
||||
|
||||
HFTextRegular {
|
||||
text: "Please restart."
|
||||
|
||||
anchors {
|
||||
top: description.bottom
|
||||
topMargin: 1
|
||||
horizontalCenter: header.horizontalCenter
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
HFButton {
|
||||
id: button
|
||||
|
|
|
@ -77,7 +77,7 @@ Item {
|
|||
width: 430
|
||||
|
||||
text: LauncherState.lastUsedUsername
|
||||
placeholderText: "Username"
|
||||
placeholderText: "Username or Email address"
|
||||
|
||||
seperatorColor: Qt.rgba(1, 1, 1, 0.3)
|
||||
anchors {
|
||||
|
|
|
@ -2353,6 +2353,7 @@ void Rig::initAnimGraph(const QUrl& url) {
|
|||
// abort load if the previous skeleton was deleted.
|
||||
auto sharedSkeletonPtr = weakSkeletonPtr.lock();
|
||||
if (!sharedSkeletonPtr) {
|
||||
emit onLoadFailed();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2386,8 +2387,9 @@ void Rig::initAnimGraph(const QUrl& url) {
|
|||
}
|
||||
emit onLoadComplete();
|
||||
});
|
||||
connect(_animLoader.get(), &AnimNodeLoader::error, [url](int error, QString str) {
|
||||
connect(_animLoader.get(), &AnimNodeLoader::error, [this, url](int error, QString str) {
|
||||
qCritical(animation) << "Error loading: code = " << error << "str =" << str;
|
||||
emit onLoadFailed();
|
||||
});
|
||||
|
||||
connect(_networkLoader.get(), &AnimNodeLoader::success, [this, weakSkeletonPtr, networkUrl](AnimNode::Pointer nodeIn) {
|
||||
|
@ -2415,6 +2417,8 @@ void Rig::initAnimGraph(const QUrl& url) {
|
|||
connect(_networkLoader.get(), &AnimNodeLoader::error, [networkUrl](int error, QString str) {
|
||||
qCritical(animation) << "Error loading: code = " << error << "str =" << str;
|
||||
});
|
||||
} else {
|
||||
emit onLoadComplete();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -260,6 +260,7 @@ public:
|
|||
|
||||
signals:
|
||||
void onLoadComplete();
|
||||
void onLoadFailed();
|
||||
|
||||
protected:
|
||||
bool isIndexValid(int index) const { return _animSkeleton && index >= 0 && index < _animSkeleton->getNumJoints(); }
|
||||
|
|
|
@ -6,7 +6,6 @@ setup_hifi_library(Network Multimedia ${PLATFORM_QT_COMPONENTS})
|
|||
link_hifi_libraries(audio plugins)
|
||||
include_hifi_library_headers(shared)
|
||||
include_hifi_library_headers(networking)
|
||||
include_hifi_library_headers(gpu)
|
||||
|
||||
if (ANDROID)
|
||||
else ()
|
||||
|
|
|
@ -48,7 +48,6 @@
|
|||
#include <SettingHandle.h>
|
||||
#include <SharedUtil.h>
|
||||
#include <Transform.h>
|
||||
#include <plugins/DisplayPlugin.h>
|
||||
|
||||
#include "AudioClientLogging.h"
|
||||
#include "AudioLogging.h"
|
||||
|
@ -83,24 +82,32 @@ Mutex _recordMutex;
|
|||
|
||||
QString defaultAudioDeviceName(QAudio::Mode mode);
|
||||
|
||||
void AudioClient::setHmdAudioName(QAudio::Mode mode, const QString& name) {
|
||||
QWriteLocker lock(&_hmdNameLock);
|
||||
if (mode == QAudio::AudioInput) {
|
||||
_hmdInputName = name;
|
||||
} else {
|
||||
_hmdOutputName = name;
|
||||
}
|
||||
}
|
||||
|
||||
// thread-safe
|
||||
QList<HifiAudioDeviceInfo> getAvailableDevices(QAudio::Mode mode) {
|
||||
QList<HifiAudioDeviceInfo> getAvailableDevices(QAudio::Mode mode, const QString& hmdName) {
|
||||
//get hmd device name prior to locking device mutex. in case of shutdown, this thread will be locked and audio client
|
||||
//cannot properly shut down.
|
||||
QString hmdDeviceName = QString();
|
||||
QString defDeviceName = defaultAudioDeviceName(mode);
|
||||
|
||||
// NOTE: availableDevices() clobbers the Qt internal device list
|
||||
Lock lock(_deviceMutex);
|
||||
auto devices = QAudioDeviceInfo::availableDevices(mode);
|
||||
|
||||
|
||||
HifiAudioDeviceInfo defaultDesktopDevice;
|
||||
QList<HifiAudioDeviceInfo> newDevices;
|
||||
for (auto& device : devices) {
|
||||
newDevices.push_back(HifiAudioDeviceInfo(device, false, mode));
|
||||
if (device.deviceName() == defDeviceName.trimmed()) {
|
||||
defaultDesktopDevice = HifiAudioDeviceInfo(device, true, mode, HifiAudioDeviceInfo::desktop);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (defaultDesktopDevice.getDevice().isNull()) {
|
||||
|
@ -109,11 +116,11 @@ QList<HifiAudioDeviceInfo> getAvailableDevices(QAudio::Mode mode) {
|
|||
defaultDesktopDevice = HifiAudioDeviceInfo(devices.first(), true, mode, HifiAudioDeviceInfo::desktop);
|
||||
}
|
||||
newDevices.push_front(defaultDesktopDevice);
|
||||
|
||||
if (!hmdDeviceName.isNull() && !hmdDeviceName.isEmpty()) {
|
||||
|
||||
if (!hmdName.isNull()) {
|
||||
HifiAudioDeviceInfo hmdDevice;
|
||||
foreach(auto device, newDevices) {
|
||||
if (device.getDevice().deviceName() == hmdDeviceName) {
|
||||
if (device.getDevice().deviceName() == hmdName) {
|
||||
hmdDevice = HifiAudioDeviceInfo(device.getDevice(), true, mode, HifiAudioDeviceInfo::hmd);
|
||||
break;
|
||||
}
|
||||
|
@ -135,9 +142,17 @@ void AudioClient::checkDevices() {
|
|||
return;
|
||||
}
|
||||
|
||||
auto inputDevices = getAvailableDevices(QAudio::AudioInput);
|
||||
auto outputDevices = getAvailableDevices(QAudio::AudioOutput);
|
||||
|
||||
QString hmdInputName;
|
||||
QString hmdOutputName;
|
||||
{
|
||||
QReadLocker readLock(&_hmdNameLock);
|
||||
hmdInputName = _hmdInputName;
|
||||
hmdOutputName = _hmdOutputName;
|
||||
}
|
||||
|
||||
auto inputDevices = getAvailableDevices(QAudio::AudioInput, hmdInputName);
|
||||
auto outputDevices = getAvailableDevices(QAudio::AudioOutput, hmdOutputName);
|
||||
|
||||
checkDefaultChanges(inputDevices);
|
||||
checkDefaultChanges(outputDevices);
|
||||
|
||||
|
@ -320,10 +335,12 @@ AudioClient::AudioClient() {
|
|||
|
||||
connect(&_receivedAudioStream, &InboundAudioStream::mismatchedAudioCodec, this, &AudioClient::handleMismatchAudioFormat);
|
||||
|
||||
// initialize wasapi; if getAvailableDevices is called from the CheckDevicesThread before this, it will crash
|
||||
getAvailableDevices(QAudio::AudioInput);
|
||||
getAvailableDevices(QAudio::AudioOutput);
|
||||
|
||||
{
|
||||
QReadLocker readLock(&_hmdNameLock);
|
||||
// initialize wasapi; if getAvailableDevices is called from the CheckDevicesThread before this, it will crash
|
||||
getAvailableDevices(QAudio::AudioInput, _hmdInputName);
|
||||
getAvailableDevices(QAudio::AudioOutput, _hmdOutputName);
|
||||
}
|
||||
// start a thread to detect any device changes
|
||||
_checkDevicesTimer = new QTimer(this);
|
||||
const unsigned long DEVICE_CHECK_INTERVAL_MSECS = 2 * 1000;
|
||||
|
@ -422,9 +439,9 @@ void AudioClient::setAudioPaused(bool pause) {
|
|||
}
|
||||
}
|
||||
|
||||
HifiAudioDeviceInfo getNamedAudioDeviceForMode(QAudio::Mode mode, const QString& deviceName, bool isHmd=false) {
|
||||
HifiAudioDeviceInfo getNamedAudioDeviceForMode(QAudio::Mode mode, const QString& deviceName, const QString& hmdName, bool isHmd=false) {
|
||||
HifiAudioDeviceInfo result;
|
||||
foreach (HifiAudioDeviceInfo audioDevice, getAvailableDevices(mode)) {
|
||||
foreach (HifiAudioDeviceInfo audioDevice, getAvailableDevices(mode,hmdName)) {
|
||||
if (audioDevice.deviceName().trimmed() == deviceName.trimmed()) {
|
||||
if ((!isHmd && audioDevice.getDeviceType() != HifiAudioDeviceInfo::hmd) || (isHmd && audioDevice.getDeviceType() != HifiAudioDeviceInfo::desktop)) {
|
||||
result = audioDevice;
|
||||
|
@ -479,7 +496,7 @@ QString AudioClient::getWinDeviceName(wchar_t* guid) {
|
|||
|
||||
#endif
|
||||
|
||||
HifiAudioDeviceInfo defaultAudioDeviceForMode(QAudio::Mode mode) {
|
||||
HifiAudioDeviceInfo defaultAudioDeviceForMode(QAudio::Mode mode, const QString& hmdName) {
|
||||
QString deviceName = defaultAudioDeviceName(mode);
|
||||
#if defined (Q_OS_ANDROID)
|
||||
if (mode == QAudio::AudioInput) {
|
||||
|
@ -495,7 +512,7 @@ HifiAudioDeviceInfo defaultAudioDeviceForMode(QAudio::Mode mode) {
|
|||
}
|
||||
}
|
||||
#endif
|
||||
return getNamedAudioDeviceForMode(mode, deviceName);
|
||||
return getNamedAudioDeviceForMode(mode, deviceName, hmdName);
|
||||
}
|
||||
|
||||
QString defaultAudioDeviceName(QAudio::Mode mode) {
|
||||
|
@ -601,7 +618,9 @@ QString defaultAudioDeviceName(QAudio::Mode mode) {
|
|||
}
|
||||
|
||||
bool AudioClient::getNamedAudioDeviceForModeExists(QAudio::Mode mode, const QString& deviceName) {
|
||||
return (getNamedAudioDeviceForMode(mode, deviceName).deviceName() == deviceName);
|
||||
QReadLocker readLock(&_hmdNameLock);
|
||||
QString hmdName = mode == QAudio::AudioInput ? _hmdInputName : _hmdOutputName;
|
||||
return (getNamedAudioDeviceForMode(mode, deviceName, hmdName).deviceName() == deviceName);
|
||||
}
|
||||
|
||||
|
||||
|
@ -763,24 +782,16 @@ void AudioClient::start() {
|
|||
|
||||
_desiredOutputFormat = _desiredInputFormat;
|
||||
_desiredOutputFormat.setChannelCount(OUTPUT_CHANNEL_COUNT);
|
||||
|
||||
HifiAudioDeviceInfo inputDeviceInfo = defaultAudioDeviceForMode(QAudio::AudioInput);
|
||||
qCDebug(audioclient) << "The default audio input device is" << inputDeviceInfo.deviceName();
|
||||
bool inputFormatSupported = switchInputToAudioDevice(inputDeviceInfo);
|
||||
|
||||
HifiAudioDeviceInfo outputDeviceInfo = defaultAudioDeviceForMode(QAudio::AudioOutput);
|
||||
qCDebug(audioclient) << "The default audio output device is" << outputDeviceInfo.deviceName();
|
||||
bool outputFormatSupported = switchOutputToAudioDevice(outputDeviceInfo);
|
||||
|
||||
if (!inputFormatSupported) {
|
||||
qCDebug(audioclient) << "Unable to set up audio input because of a problem with input format.";
|
||||
qCDebug(audioclient) << "The closest format available is" << inputDeviceInfo.getDevice().nearestFormat(_desiredInputFormat);
|
||||
|
||||
QString inputName;
|
||||
QString outputName;
|
||||
{
|
||||
QReadLocker readLock(&_hmdNameLock);
|
||||
inputName = _hmdInputName;
|
||||
outputName = _hmdOutputName;
|
||||
}
|
||||
|
||||
if (!outputFormatSupported) {
|
||||
qCDebug(audioclient) << "Unable to set up audio output because of a problem with output format.";
|
||||
qCDebug(audioclient) << "The closest format available is" << outputDeviceInfo.getDevice().nearestFormat(_desiredOutputFormat);
|
||||
}
|
||||
|
||||
#if defined(Q_OS_ANDROID)
|
||||
connect(&_checkInputTimer, &QTimer::timeout, this, &AudioClient::checkInputTimeout);
|
||||
_checkInputTimer.start(CHECK_INPUT_READS_MSECS);
|
||||
|
@ -1007,7 +1018,12 @@ bool AudioClient::switchAudioDevice(QAudio::Mode mode, const HifiAudioDeviceInfo
|
|||
}
|
||||
|
||||
bool AudioClient::switchAudioDevice(QAudio::Mode mode, const QString& deviceName, bool isHmd) {
|
||||
return switchAudioDevice(mode, getNamedAudioDeviceForMode(mode, deviceName, isHmd));
|
||||
QString hmdName;
|
||||
{
|
||||
QReadLocker readLock(&_hmdNameLock);
|
||||
hmdName = mode == QAudio::AudioInput ? _hmdInputName : _hmdOutputName;
|
||||
}
|
||||
return switchAudioDevice(mode, getNamedAudioDeviceForMode(mode, deviceName, hmdName, isHmd));
|
||||
}
|
||||
|
||||
void AudioClient::configureReverb() {
|
||||
|
|
|
@ -243,6 +243,7 @@ public slots:
|
|||
// calling with a null QAudioDevice will use the system default
|
||||
bool switchAudioDevice(QAudio::Mode mode, const HifiAudioDeviceInfo& deviceInfo = HifiAudioDeviceInfo());
|
||||
bool switchAudioDevice(QAudio::Mode mode, const QString& deviceName, bool isHmd);
|
||||
void setHmdAudioName(QAudio::Mode mode, const QString& name);
|
||||
// Qt opensles plugin is not able to detect when the headset is plugged in
|
||||
void setHeadsetPluggedIn(bool pluggedIn);
|
||||
|
||||
|
@ -481,6 +482,9 @@ private:
|
|||
QList<HifiAudioDeviceInfo> _inputDevices;
|
||||
QList<HifiAudioDeviceInfo> _outputDevices;
|
||||
|
||||
QString _hmdInputName { QString() };
|
||||
QString _hmdOutputName{ QString() };
|
||||
|
||||
AudioFileWav _audioFileWav;
|
||||
|
||||
bool _hasReceivedFirstPacket { false };
|
||||
|
@ -505,6 +509,7 @@ private:
|
|||
|
||||
AudioSolo _solo;
|
||||
|
||||
QReadWriteLock _hmdNameLock;
|
||||
Mutex _checkDevicesMutex;
|
||||
QTimer* _checkDevicesTimer { nullptr };
|
||||
Mutex _checkPeakValuesMutex;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
set(TARGET_NAME avatars-renderer)
|
||||
setup_hifi_library(Network Script)
|
||||
link_hifi_libraries(shared shaders gpu graphics animation material-networking model-networking script-engine render render-utils image trackers entities-renderer)
|
||||
link_hifi_libraries(shared shaders gpu graphics animation material-networking model-networking script-engine render render-utils image entities-renderer)
|
||||
include_hifi_library_headers(avatars)
|
||||
include_hifi_library_headers(networking)
|
||||
include_hifi_library_headers(hfm)
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
#include <NodeList.h>
|
||||
#include <DependencyManager.h>
|
||||
#include <GeometryUtil.h>
|
||||
#include <trackers/FaceTracker.h>
|
||||
#include <Rig.h>
|
||||
#include "Logging.h"
|
||||
|
||||
|
@ -26,6 +25,22 @@ using namespace std;
|
|||
|
||||
static bool disableEyelidAdjustment { false };
|
||||
|
||||
static void updateFakeCoefficients(float leftBlink, float rightBlink, float browUp,
|
||||
float jawOpen, float mouth2, float mouth3, float mouth4, QVector<float>& coefficients) {
|
||||
|
||||
coefficients.resize(std::max((int)coefficients.size(), (int)Blendshapes::BlendshapeCount));
|
||||
qFill(coefficients.begin(), coefficients.end(), 0.0f);
|
||||
coefficients[(int)Blendshapes::EyeBlink_L] = leftBlink;
|
||||
coefficients[(int)Blendshapes::EyeBlink_R] = rightBlink;
|
||||
coefficients[(int)Blendshapes::BrowsU_C] = browUp;
|
||||
coefficients[(int)Blendshapes::BrowsU_L] = browUp;
|
||||
coefficients[(int)Blendshapes::BrowsU_R] = browUp;
|
||||
coefficients[(int)Blendshapes::JawOpen] = jawOpen;
|
||||
coefficients[(int)Blendshapes::MouthSmile_L] = coefficients[(int)Blendshapes::MouthSmile_R] = mouth4;
|
||||
coefficients[(int)Blendshapes::LipsUpperClose] = mouth2;
|
||||
coefficients[(int)Blendshapes::LipsFunnel] = mouth3;
|
||||
}
|
||||
|
||||
Head::Head(Avatar* owningAvatar) :
|
||||
HeadData(owningAvatar),
|
||||
_leftEyeLookAtID(DependencyManager::get<GeometryCache>()->allocateID()),
|
||||
|
@ -57,7 +72,8 @@ void Head::simulate(float deltaTime) {
|
|||
_longTermAverageLoudness = glm::mix(_longTermAverageLoudness, _averageLoudness, glm::min(deltaTime / AUDIO_LONG_TERM_AVERAGING_SECS, 1.0f));
|
||||
}
|
||||
|
||||
if (getHasProceduralEyeMovement()) {
|
||||
if (getProceduralAnimationFlag(HeadData::SaccadeProceduralEyeJointAnimation) &&
|
||||
!getSuppressProceduralAnimationFlag(HeadData::SaccadeProceduralEyeJointAnimation)) {
|
||||
// Update eye saccades
|
||||
const float AVERAGE_MICROSACCADE_INTERVAL = 1.0f;
|
||||
const float AVERAGE_SACCADE_INTERVAL = 6.0f;
|
||||
|
@ -80,7 +96,8 @@ void Head::simulate(float deltaTime) {
|
|||
const float BLINK_START_VARIABILITY = 0.25f;
|
||||
const float FULLY_OPEN = 0.0f;
|
||||
const float FULLY_CLOSED = 1.0f;
|
||||
if (getHasProceduralBlinkFaceMovement()) {
|
||||
if (getProceduralAnimationFlag(HeadData::BlinkProceduralBlendshapeAnimation) &&
|
||||
!getSuppressProceduralAnimationFlag(HeadData::BlinkProceduralBlendshapeAnimation)) {
|
||||
// handle automatic blinks
|
||||
// Detect transition from talking to not; force blink after that and a delay
|
||||
bool forceBlink = false;
|
||||
|
@ -136,7 +153,8 @@ void Head::simulate(float deltaTime) {
|
|||
}
|
||||
|
||||
// use data to update fake Faceshift blendshape coefficients
|
||||
if (getHasAudioEnabledFaceMovement()) {
|
||||
if (getProceduralAnimationFlag(HeadData::AudioProceduralBlendshapeAnimation) &&
|
||||
!getSuppressProceduralAnimationFlag(HeadData::AudioProceduralBlendshapeAnimation)) {
|
||||
// Update audio attack data for facial animation (eyebrows and mouth)
|
||||
float audioAttackAveragingRate = (10.0f - deltaTime * NORMAL_HZ) / 10.0f; // --> 0.9 at 60 Hz
|
||||
_audioAttack = audioAttackAveragingRate * _audioAttack +
|
||||
|
@ -158,7 +176,7 @@ void Head::simulate(float deltaTime) {
|
|||
_mouthTime = 0.0f;
|
||||
}
|
||||
|
||||
FaceTracker::updateFakeCoefficients(
|
||||
updateFakeCoefficients(
|
||||
_leftEyeBlink,
|
||||
_rightEyeBlink,
|
||||
_browAudioLift,
|
||||
|
@ -168,7 +186,8 @@ void Head::simulate(float deltaTime) {
|
|||
_mouth4,
|
||||
_transientBlendshapeCoefficients);
|
||||
|
||||
if (getHasProceduralEyeFaceMovement()) {
|
||||
if (getProceduralAnimationFlag(HeadData::LidAdjustmentProceduralBlendshapeAnimation) &&
|
||||
!getSuppressProceduralAnimationFlag(HeadData::LidAdjustmentProceduralBlendshapeAnimation)) {
|
||||
// This controls two things, the eye brow and the upper eye lid, it is driven by the vertical up/down angle of the
|
||||
// eyes relative to the head. This is to try to help prevent sleepy eyes/crazy eyes.
|
||||
applyEyelidOffset(getOrientation());
|
||||
|
@ -252,26 +271,26 @@ void Head::applyEyelidOffset(glm::quat headOrientation) {
|
|||
|
||||
float blinkUpCoefficient = -eyelidOffset;
|
||||
float blinkDownCoefficient = BLINK_DOWN_MULTIPLIER * eyelidOffset;
|
||||
|
||||
|
||||
float openUpCoefficient = eyelidOffset;
|
||||
float openDownCoefficient = OPEN_DOWN_MULTIPLIER * eyelidOffset;
|
||||
|
||||
|
||||
float browsUpCoefficient = BROW_UP_MULTIPLIER * eyelidOffset;
|
||||
float browsDownCoefficient = 0.0f;
|
||||
|
||||
bool isLookingUp = (eyePitch > 0);
|
||||
|
||||
|
||||
if (isLookingUp) {
|
||||
for (int i = 0; i < 2; i++) {
|
||||
_transientBlendshapeCoefficients[EYE_BLINK_INDICES[i]] = blinkUpCoefficient;
|
||||
_transientBlendshapeCoefficients[EYE_OPEN_INDICES[i]] = openUpCoefficient;
|
||||
_transientBlendshapeCoefficients[BROWS_U_INDICES[i]] = browsUpCoefficient;
|
||||
_transientBlendshapeCoefficients[(int)Blendshapes::EyeBlink_L + i] = blinkUpCoefficient;
|
||||
_transientBlendshapeCoefficients[(int)Blendshapes::EyeOpen_L + i] = openUpCoefficient;
|
||||
_transientBlendshapeCoefficients[(int)Blendshapes::BrowsU_L + i] = browsUpCoefficient;
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < 2; i++) {
|
||||
_transientBlendshapeCoefficients[EYE_BLINK_INDICES[i]] = blinkDownCoefficient;
|
||||
_transientBlendshapeCoefficients[EYE_OPEN_INDICES[i]] = openDownCoefficient;
|
||||
_transientBlendshapeCoefficients[BROWS_U_INDICES[i]] = browsDownCoefficient;
|
||||
_transientBlendshapeCoefficients[(int)Blendshapes::EyeBlink_L + i] = blinkDownCoefficient;
|
||||
_transientBlendshapeCoefficients[(int)Blendshapes::EyeOpen_L + i] = openDownCoefficient;
|
||||
_transientBlendshapeCoefficients[(int)Blendshapes::BrowsU_L + i] = browsDownCoefficient;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -110,14 +110,7 @@ void SkeletonModel::updateRig(float deltaTime, glm::mat4 parentTransform) {
|
|||
assert(!_owningAvatar->isMyAvatar());
|
||||
|
||||
Head* head = _owningAvatar->getHead();
|
||||
|
||||
bool eyePosesValid = !head->getHasProceduralEyeMovement();
|
||||
glm::vec3 lookAt;
|
||||
if (eyePosesValid) {
|
||||
lookAt = head->getLookAtPosition(); // don't apply no-crosseyes code etc when eyes are being tracked
|
||||
} else {
|
||||
lookAt = avoidCrossedEyes(head->getCorrectedLookAtPosition());
|
||||
}
|
||||
glm::vec3 lookAt = avoidCrossedEyes(head->getCorrectedLookAtPosition());
|
||||
|
||||
// no need to call Model::updateRig() because otherAvatars get their joint state
|
||||
// copied directly from AvtarData::_jointData (there are no Rig animations to blend)
|
||||
|
@ -161,8 +154,9 @@ void SkeletonModel::updateAttitude(const glm::quat& orientation) {
|
|||
// but just before head has been simulated.
|
||||
void SkeletonModel::simulate(float deltaTime, bool fullUpdate) {
|
||||
updateAttitude(_owningAvatar->getWorldOrientation());
|
||||
setBlendshapeCoefficients(_owningAvatar->getHead()->getSummedBlendshapeCoefficients());
|
||||
|
||||
if (fullUpdate) {
|
||||
setBlendshapeCoefficients(_owningAvatar->getHead()->getSummedBlendshapeCoefficients());
|
||||
|
||||
Parent::simulate(deltaTime, fullUpdate);
|
||||
|
||||
|
|
|
@ -110,7 +110,6 @@ AvatarData::AvatarData() :
|
|||
_targetScale(1.0f),
|
||||
_handState(0),
|
||||
_keyState(NO_KEY_DOWN),
|
||||
_forceFaceTrackerConnected(false),
|
||||
_headData(NULL),
|
||||
_errorLogExpiry(0),
|
||||
_owningAvatarMixer(),
|
||||
|
@ -154,6 +153,48 @@ float AvatarData::getDomainLimitedScale() const {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
void AvatarData::setHasScriptedBlendshapes(bool hasScriptedBlendshapes) {
|
||||
if (hasScriptedBlendshapes == _headData->getHasScriptedBlendshapes()) {
|
||||
return;
|
||||
}
|
||||
if (!hasScriptedBlendshapes) {
|
||||
// send a forced avatarData update to make sure the script can send neutal blendshapes on unload
|
||||
// without having to wait for the update loop, make sure _hasScriptedBlendShapes is still true
|
||||
// before sending the update, or else it won't send the neutal blendshapes to the receiving clients
|
||||
sendAvatarDataPacket(true);
|
||||
}
|
||||
_headData->setHasScriptedBlendshapes(hasScriptedBlendshapes);
|
||||
}
|
||||
|
||||
bool AvatarData::getHasScriptedBlendshapes() const {
|
||||
return _headData->getHasScriptedBlendshapes();
|
||||
}
|
||||
|
||||
void AvatarData::setHasProceduralBlinkFaceMovement(bool value) {
|
||||
_headData->setProceduralAnimationFlag(HeadData::BlinkProceduralBlendshapeAnimation, value);
|
||||
}
|
||||
|
||||
bool AvatarData::getHasProceduralBlinkFaceMovement() const {
|
||||
return _headData->getProceduralAnimationFlag(HeadData::BlinkProceduralBlendshapeAnimation);
|
||||
}
|
||||
|
||||
void AvatarData::setHasProceduralEyeFaceMovement(bool value) {
|
||||
_headData->setProceduralAnimationFlag(HeadData::LidAdjustmentProceduralBlendshapeAnimation, value);
|
||||
}
|
||||
|
||||
bool AvatarData::getHasProceduralEyeFaceMovement() const {
|
||||
return _headData->getProceduralAnimationFlag(HeadData::LidAdjustmentProceduralBlendshapeAnimation);
|
||||
}
|
||||
|
||||
void AvatarData::setHasAudioEnabledFaceMovement(bool value) {
|
||||
_headData->setProceduralAnimationFlag(HeadData::AudioProceduralBlendshapeAnimation, value);
|
||||
}
|
||||
|
||||
bool AvatarData::getHasAudioEnabledFaceMovement() const {
|
||||
return _headData->getProceduralAnimationFlag(HeadData::AudioProceduralBlendshapeAnimation);
|
||||
}
|
||||
|
||||
void AvatarData::setDomainMinimumHeight(float domainMinimumHeight) {
|
||||
_domainMinimumHeight = glm::clamp(domainMinimumHeight, MIN_AVATAR_HEIGHT, MAX_AVATAR_HEIGHT);
|
||||
}
|
||||
|
@ -206,9 +247,6 @@ void AvatarData::lazyInitHeadData() const {
|
|||
if (!_headData) {
|
||||
_headData = new HeadData(const_cast<AvatarData*>(this));
|
||||
}
|
||||
if (_forceFaceTrackerConnected) {
|
||||
_headData->_isFaceTrackerConnected = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -338,7 +376,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
|
|||
tranlationChangedSince(lastSentTime) ||
|
||||
parentInfoChangedSince(lastSentTime));
|
||||
hasHandControllers = _controllerLeftHandMatrixCache.isValid() || _controllerRightHandMatrixCache.isValid();
|
||||
hasFaceTrackerInfo = !dropFaceTracking && (hasFaceTracker() || getHasScriptedBlendshapes()) &&
|
||||
hasFaceTrackerInfo = !dropFaceTracking && getHasScriptedBlendshapes() &&
|
||||
(sendAll || faceTrackerInfoChangedSince(lastSentTime));
|
||||
hasJointData = !sendMinimum;
|
||||
hasJointDefaultPoseFlags = hasJointData;
|
||||
|
@ -529,27 +567,31 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
|
|||
setAtBit16(flags, HAND_STATE_FINGER_POINTING_BIT);
|
||||
}
|
||||
// face tracker state
|
||||
if (_headData->_isFaceTrackerConnected) {
|
||||
setAtBit16(flags, IS_FACE_TRACKER_CONNECTED);
|
||||
if (_headData->_hasScriptedBlendshapes || _headData->_hasInputDrivenBlendshapes) {
|
||||
setAtBit16(flags, HAS_SCRIPTED_BLENDSHAPES);
|
||||
}
|
||||
// eye tracker state
|
||||
if (!_headData->_hasProceduralEyeMovement) {
|
||||
setAtBit16(flags, IS_EYE_TRACKER_CONNECTED);
|
||||
if (_headData->getProceduralAnimationFlag(HeadData::SaccadeProceduralEyeJointAnimation) &&
|
||||
!_headData->getSuppressProceduralAnimationFlag(HeadData::SaccadeProceduralEyeJointAnimation)) {
|
||||
setAtBit16(flags, HAS_PROCEDURAL_EYE_MOVEMENT);
|
||||
}
|
||||
// referential state
|
||||
if (!parentID.isNull()) {
|
||||
setAtBit16(flags, HAS_REFERENTIAL);
|
||||
}
|
||||
// audio face movement
|
||||
if (_headData->getHasAudioEnabledFaceMovement()) {
|
||||
if (_headData->getProceduralAnimationFlag(HeadData::AudioProceduralBlendshapeAnimation) &&
|
||||
!_headData->getSuppressProceduralAnimationFlag(HeadData::AudioProceduralBlendshapeAnimation)) {
|
||||
setAtBit16(flags, AUDIO_ENABLED_FACE_MOVEMENT);
|
||||
}
|
||||
// procedural eye face movement
|
||||
if (_headData->getHasProceduralEyeFaceMovement()) {
|
||||
if (_headData->getProceduralAnimationFlag(HeadData::LidAdjustmentProceduralBlendshapeAnimation) &&
|
||||
!_headData->getSuppressProceduralAnimationFlag(HeadData::LidAdjustmentProceduralBlendshapeAnimation)) {
|
||||
setAtBit16(flags, PROCEDURAL_EYE_FACE_MOVEMENT);
|
||||
}
|
||||
// procedural blink face movement
|
||||
if (_headData->getHasProceduralBlinkFaceMovement()) {
|
||||
if (_headData->getProceduralAnimationFlag(HeadData::BlinkProceduralBlendshapeAnimation) &&
|
||||
!_headData->getSuppressProceduralAnimationFlag(HeadData::BlinkProceduralBlendshapeAnimation)) {
|
||||
setAtBit16(flags, PROCEDURAL_BLINK_FACE_MOVEMENT);
|
||||
}
|
||||
// avatar collisions enabled
|
||||
|
@ -1150,22 +1192,23 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
auto newHandState = getSemiNibbleAt(bitItems, HAND_STATE_START_BIT)
|
||||
+ (oneAtBit16(bitItems, HAND_STATE_FINGER_POINTING_BIT) ? IS_FINGER_POINTING_FLAG : 0);
|
||||
|
||||
auto newFaceTrackerConnected = oneAtBit16(bitItems, IS_FACE_TRACKER_CONNECTED);
|
||||
auto newHasntProceduralEyeMovement = oneAtBit16(bitItems, IS_EYE_TRACKER_CONNECTED);
|
||||
|
||||
auto newHasScriptedBlendshapes = oneAtBit16(bitItems, HAS_SCRIPTED_BLENDSHAPES);
|
||||
auto newHasProceduralEyeMovement = oneAtBit16(bitItems, HAS_PROCEDURAL_EYE_MOVEMENT);
|
||||
auto newHasAudioEnabledFaceMovement = oneAtBit16(bitItems, AUDIO_ENABLED_FACE_MOVEMENT);
|
||||
auto newHasProceduralEyeFaceMovement = oneAtBit16(bitItems, PROCEDURAL_EYE_FACE_MOVEMENT);
|
||||
auto newHasProceduralBlinkFaceMovement = oneAtBit16(bitItems, PROCEDURAL_BLINK_FACE_MOVEMENT);
|
||||
|
||||
auto newCollideWithOtherAvatars = oneAtBit16(bitItems, COLLIDE_WITH_OTHER_AVATARS);
|
||||
auto newHasPriority = oneAtBit16(bitItems, HAS_HERO_PRIORITY);
|
||||
|
||||
bool keyStateChanged = (_keyState != newKeyState);
|
||||
bool handStateChanged = (_handState != newHandState);
|
||||
bool faceStateChanged = (_headData->_isFaceTrackerConnected != newFaceTrackerConnected);
|
||||
bool eyeStateChanged = (_headData->_hasProceduralEyeMovement == newHasntProceduralEyeMovement);
|
||||
bool audioEnableFaceMovementChanged = (_headData->getHasAudioEnabledFaceMovement() != newHasAudioEnabledFaceMovement);
|
||||
bool proceduralEyeFaceMovementChanged = (_headData->getHasProceduralEyeFaceMovement() != newHasProceduralEyeFaceMovement);
|
||||
bool proceduralBlinkFaceMovementChanged = (_headData->getHasProceduralBlinkFaceMovement() != newHasProceduralBlinkFaceMovement);
|
||||
bool faceStateChanged = (_headData->getHasScriptedBlendshapes() != newHasScriptedBlendshapes);
|
||||
|
||||
bool eyeStateChanged = (_headData->getProceduralAnimationFlag(HeadData::SaccadeProceduralEyeJointAnimation) != newHasProceduralEyeMovement);
|
||||
bool audioEnableFaceMovementChanged = (_headData->getProceduralAnimationFlag(HeadData::AudioProceduralBlendshapeAnimation) != newHasAudioEnabledFaceMovement);
|
||||
bool proceduralEyeFaceMovementChanged = (_headData->getProceduralAnimationFlag(HeadData::LidAdjustmentProceduralBlendshapeAnimation) != newHasProceduralEyeFaceMovement);
|
||||
bool proceduralBlinkFaceMovementChanged = (_headData->getProceduralAnimationFlag(HeadData::BlinkProceduralBlendshapeAnimation) != newHasProceduralBlinkFaceMovement);
|
||||
bool collideWithOtherAvatarsChanged = (_collideWithOtherAvatars != newCollideWithOtherAvatars);
|
||||
bool hasPriorityChanged = (getHasPriority() != newHasPriority);
|
||||
bool somethingChanged = keyStateChanged || handStateChanged || faceStateChanged || eyeStateChanged || audioEnableFaceMovementChanged ||
|
||||
|
@ -1174,11 +1217,15 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
|
||||
_keyState = newKeyState;
|
||||
_handState = newHandState;
|
||||
_headData->_isFaceTrackerConnected = newFaceTrackerConnected;
|
||||
_headData->setHasProceduralEyeMovement(!newHasntProceduralEyeMovement);
|
||||
_headData->setHasAudioEnabledFaceMovement(newHasAudioEnabledFaceMovement);
|
||||
_headData->setHasProceduralEyeFaceMovement(newHasProceduralEyeFaceMovement);
|
||||
_headData->setHasProceduralBlinkFaceMovement(newHasProceduralBlinkFaceMovement);
|
||||
if (!newHasScriptedBlendshapes && getHasScriptedBlendshapes()) {
|
||||
// if scripted blendshapes have just been turned off, slam blendshapes back to zero.
|
||||
_headData->clearBlendshapeCoefficients();
|
||||
}
|
||||
_headData->setHasScriptedBlendshapes(newHasScriptedBlendshapes);
|
||||
_headData->setProceduralAnimationFlag(HeadData::SaccadeProceduralEyeJointAnimation, newHasProceduralEyeMovement);
|
||||
_headData->setProceduralAnimationFlag(HeadData::AudioProceduralBlendshapeAnimation, newHasAudioEnabledFaceMovement);
|
||||
_headData->setProceduralAnimationFlag(HeadData::LidAdjustmentProceduralBlendshapeAnimation, newHasProceduralEyeFaceMovement);
|
||||
_headData->setProceduralAnimationFlag(HeadData::BlinkProceduralBlendshapeAnimation, newHasProceduralBlinkFaceMovement);
|
||||
_collideWithOtherAvatars = newCollideWithOtherAvatars;
|
||||
setHasPriorityWithoutTimestampReset(newHasPriority);
|
||||
|
||||
|
@ -1263,7 +1310,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
sourceBuffer += sizeof(AvatarDataPacket::FaceTrackerInfo);
|
||||
|
||||
PACKET_READ_CHECK(FaceTrackerCoefficients, coefficientsSize);
|
||||
_headData->_blendshapeCoefficients.resize(numCoefficients); // make sure there's room for the copy!
|
||||
_headData->_blendshapeCoefficients.resize(std::min(numCoefficients, (int)Blendshapes::BlendshapeCount)); // make sure there's room for the copy!
|
||||
//only copy the blendshapes to headData, not the procedural face info
|
||||
memcpy(_headData->_blendshapeCoefficients.data(), sourceBuffer, coefficientsSize);
|
||||
sourceBuffer += coefficientsSize;
|
||||
|
@ -2590,6 +2637,7 @@ enum class JsonAvatarFrameVersion : int {
|
|||
JointRotationsInAbsoluteFrame,
|
||||
JointDefaultPoseBits,
|
||||
JointUnscaledTranslations,
|
||||
ARKitBlendshapes
|
||||
};
|
||||
|
||||
QJsonValue toJsonValue(const JointData& joint) {
|
||||
|
@ -2634,7 +2682,7 @@ void AvatarData::avatarEntityDataToJson(QJsonObject& root) const {
|
|||
QJsonObject AvatarData::toJson() const {
|
||||
QJsonObject root;
|
||||
|
||||
root[JSON_AVATAR_VERSION] = (int)JsonAvatarFrameVersion::JointUnscaledTranslations;
|
||||
root[JSON_AVATAR_VERSION] = (int)JsonAvatarFrameVersion::ARKitBlendshapes;
|
||||
|
||||
if (!getSkeletonModelURL().isEmpty()) {
|
||||
root[JSON_AVATAR_BODY_MODEL] = getSkeletonModelURL().toString();
|
||||
|
|
|
@ -104,12 +104,12 @@ const quint32 AVATAR_MOTION_SCRIPTABLE_BITS =
|
|||
// Procedural Collide with other avatars is enabled 12th bit
|
||||
// Procedural Has Hero Priority is enabled 13th bit
|
||||
|
||||
const int KEY_STATE_START_BIT = 0; // 1st and 2nd bits
|
||||
const int HAND_STATE_START_BIT = 2; // 3rd and 4th bits
|
||||
const int IS_FACE_TRACKER_CONNECTED = 4; // 5th bit
|
||||
const int IS_EYE_TRACKER_CONNECTED = 5; // 6th bit (was CHAT_CIRCLING)
|
||||
const int KEY_STATE_START_BIT = 0; // 1st and 2nd bits (UNUSED)
|
||||
const int HAND_STATE_START_BIT = 2; // 3rd and 4th bits (UNUSED)
|
||||
const int HAS_SCRIPTED_BLENDSHAPES = 4; // 5th bit
|
||||
const int HAS_PROCEDURAL_EYE_MOVEMENT = 5; // 6th bit
|
||||
const int HAS_REFERENTIAL = 6; // 7th bit
|
||||
const int HAND_STATE_FINGER_POINTING_BIT = 7; // 8th bit
|
||||
const int HAND_STATE_FINGER_POINTING_BIT = 7; // 8th bit (UNUSED)
|
||||
const int AUDIO_ENABLED_FACE_MOVEMENT = 8; // 9th bit
|
||||
const int PROCEDURAL_EYE_FACE_MOVEMENT = 9; // 10th bit
|
||||
const int PROCEDURAL_BLINK_FACE_MOVEMENT = 10; // 11th bit
|
||||
|
@ -325,7 +325,7 @@ namespace AvatarDataPacket {
|
|||
|
||||
// variable length structure follows
|
||||
|
||||
// only present if IS_FACE_TRACKER_CONNECTED flag is set in AvatarInfo.flags
|
||||
// only present if HAS_SCRIPTED_BLENDSHAPES flag is set in AvatarInfo.flags
|
||||
PACKED_BEGIN struct FaceTrackerInfo {
|
||||
float leftEyeBlink;
|
||||
float rightEyeBlink;
|
||||
|
@ -534,6 +534,19 @@ class AvatarData : public QObject, public SpatiallyNestable {
|
|||
* size in the virtual world. <em>Read-only.</em>
|
||||
* @property {boolean} hasPriority - <code>true</code> if the avatar is in a "hero" zone, <code>false</code> if it isn't.
|
||||
* <em>Read-only.</em>
|
||||
* @property {boolean} hasScriptedBlendshapes=false - Set this to true before using the {@link MyAvatar.setBlendshape} method,
|
||||
* after you no longer want scripted control over the blendshapes set to back to false.<br /> NOTE: this property will
|
||||
* automatically become true if the Controller system has valid facial blendshape actions.
|
||||
* @property {boolean} hasProceduralBlinkFaceMovement=true - By default avatars will blink automatically by animating facial
|
||||
* blendshapes. Set this property to <code>false</code> to disable this automatic blinking. This can be useful if you
|
||||
* wish to fully control the blink facial blendshapes via the {@link MyAvatar.setBlendshape} method.
|
||||
* @property {boolean} hasProceduralEyeFaceMovement=true - By default the avatar eye facial blendshapes will be adjusted
|
||||
* automatically as the eyes move. This will prevent the iris is never obscured by the upper or lower lids. Set this
|
||||
* property to <code>false</code> to disable this automatic movement. This can be useful if you wish to fully control
|
||||
* the eye blendshapes via the {@link MyAvatar.setBlendshape} method.
|
||||
* @property {boolean} hasAudioEnabledFaceMovement=true - By default the avatar mouth blendshapes will animate based on
|
||||
* the microphone audio. Set this property to <code>false</code> to disable that animaiton. This can be useful if you
|
||||
* wish to fully control the blink facial blendshapes via the {@link MyAvatar.setBlendshape} method.
|
||||
*/
|
||||
Q_PROPERTY(glm::vec3 position READ getWorldPosition WRITE setPositionViaScript)
|
||||
Q_PROPERTY(float scale READ getDomainLimitedScale WRITE setTargetScale)
|
||||
|
@ -575,6 +588,11 @@ class AvatarData : public QObject, public SpatiallyNestable {
|
|||
|
||||
Q_PROPERTY(bool hasPriority READ getHasPriority)
|
||||
|
||||
Q_PROPERTY(bool hasScriptedBlendshapes READ getHasScriptedBlendshapes WRITE setHasScriptedBlendshapes)
|
||||
Q_PROPERTY(bool hasProceduralBlinkFaceMovement READ getHasProceduralBlinkFaceMovement WRITE setHasProceduralBlinkFaceMovement)
|
||||
Q_PROPERTY(bool hasProceduralEyeFaceMovement READ getHasProceduralEyeFaceMovement WRITE setHasProceduralEyeFaceMovement)
|
||||
Q_PROPERTY(bool hasAudioEnabledFaceMovement READ getHasAudioEnabledFaceMovement WRITE setHasAudioEnabledFaceMovement)
|
||||
|
||||
public:
|
||||
virtual QString getName() const override { return QString("Avatar:") + _displayName; }
|
||||
|
||||
|
@ -684,10 +702,14 @@ public:
|
|||
|
||||
float getDomainLimitedScale() const;
|
||||
|
||||
virtual bool getHasScriptedBlendshapes() const { return false; }
|
||||
virtual bool getHasProceduralBlinkFaceMovement() const { return true; }
|
||||
virtual bool getHasProceduralEyeFaceMovement() const { return true; }
|
||||
virtual bool getHasAudioEnabledFaceMovement() const { return false; }
|
||||
void setHasScriptedBlendshapes(bool hasScriptedBlendshapes);
|
||||
bool getHasScriptedBlendshapes() const;
|
||||
void setHasProceduralBlinkFaceMovement(bool hasProceduralBlinkFaceMovement);
|
||||
bool getHasProceduralBlinkFaceMovement() const;
|
||||
void setHasProceduralEyeFaceMovement(bool hasProceduralEyeFaceMovement);
|
||||
bool getHasProceduralEyeFaceMovement() const;
|
||||
void setHasAudioEnabledFaceMovement(bool hasAudioEnabledFaceMovement);
|
||||
bool getHasAudioEnabledFaceMovement() const;
|
||||
|
||||
/**jsdoc
|
||||
* Gets the minimum scale allowed for this avatar in the current domain.
|
||||
|
@ -1111,13 +1133,14 @@ public:
|
|||
|
||||
/**jsdoc
|
||||
* Sets the value of a blendshape to animate your avatar's face. To enable other users to see the resulting animation of
|
||||
* your avatar's face, use {@link Avatar.setForceFaceTrackerConnected} or {@link MyAvatar.setForceFaceTrackerConnected}.
|
||||
* your avatar's face, set {@link Avatar.hasScriptedBlendshapes} to true while using this API and back to false when your
|
||||
* animation is complete.
|
||||
* @function Avatar.setBlendshape
|
||||
* @param {string} name - The name of the blendshape, per the
|
||||
* {@link https://docs.highfidelity.com/create/avatars/avatar-standards.html#blendshapes Avatar Standards}.
|
||||
* @param {number} value - A value between <code>0.0</code> and <code>1.0</code>.
|
||||
* @example <caption>Open your avatar's mouth wide.</caption>
|
||||
* MyAvatar.setForceFaceTrackerConnected(true);
|
||||
* MyAvatar.hasScriptedBlendshapes = true;
|
||||
* MyAvatar.setBlendshape("JawOpen", 1.0);
|
||||
*
|
||||
* // Note: If using from the Avatar API, replace "MyAvatar" with "Avatar".
|
||||
|
@ -1163,15 +1186,16 @@ public:
|
|||
*/
|
||||
Q_INVOKABLE virtual void clearAvatarEntity(const QUuid& entityID, bool requiresRemovalFromTree = true);
|
||||
|
||||
|
||||
/**jsdoc
|
||||
* <p class="important">Deprecated: This method is deprecated and will be removed.</p>
|
||||
* Use Avatar.hasScriptedBlendshapes property instead.
|
||||
* Enables blendshapes set using {@link Avatar.setBlendshape} or {@link MyAvatar.setBlendshape} to be transmitted to other
|
||||
* users so that they can see the animation of your avatar's face.
|
||||
* @function Avatar.setForceFaceTrackerConnected
|
||||
* @param {boolean} connected - <code>true</code> to enable blendshape changes to be transmitted to other users,
|
||||
* <code>false</code> to disable.
|
||||
*/
|
||||
Q_INVOKABLE void setForceFaceTrackerConnected(bool connected) { _forceFaceTrackerConnected = connected; }
|
||||
Q_INVOKABLE void setForceFaceTrackerConnected(bool connected) { setHasScriptedBlendshapes(connected); }
|
||||
|
||||
// key state
|
||||
void setKeyState(KeyState s) { _keyState = s; }
|
||||
|
@ -1661,7 +1685,6 @@ protected:
|
|||
bool faceTrackerInfoChangedSince(quint64 time) const { return true; } // FIXME
|
||||
|
||||
bool hasParent() const { return !getParentID().isNull(); }
|
||||
bool hasFaceTracker() const { return _headData ? _headData->_isFaceTrackerConnected : false; }
|
||||
|
||||
QByteArray packSkeletonData() const;
|
||||
QByteArray packSkeletonModelURL() const;
|
||||
|
@ -1694,7 +1717,6 @@ protected:
|
|||
// key state
|
||||
KeyState _keyState;
|
||||
|
||||
bool _forceFaceTrackerConnected;
|
||||
bool _hasNewJointData { true }; // set in AvatarData, cleared in Avatar
|
||||
|
||||
mutable HeadData* _headData { nullptr };
|
||||
|
|
|
@ -27,11 +27,13 @@ HeadData::HeadData(AvatarData* owningAvatar) :
|
|||
_basePitch(0.0f),
|
||||
_baseRoll(0.0f),
|
||||
_lookAtPosition(0.0f, 0.0f, 0.0f),
|
||||
_blendshapeCoefficients(QVector<float>(0, 0.0f)),
|
||||
_transientBlendshapeCoefficients(QVector<float>(0, 0.0f)),
|
||||
_summedBlendshapeCoefficients(QVector<float>(0, 0.0f)),
|
||||
_blendshapeCoefficients((int)Blendshapes::BlendshapeCount, 0.0f),
|
||||
_transientBlendshapeCoefficients((int)Blendshapes::BlendshapeCount, 0.0f),
|
||||
_summedBlendshapeCoefficients((int)Blendshapes::BlendshapeCount, 0.0f),
|
||||
_owningAvatar(owningAvatar)
|
||||
{
|
||||
_userProceduralAnimationFlags.assign((size_t)ProceduralAnimaitonTypeCount, true);
|
||||
_suppressProceduralAnimationFlags.assign((size_t)ProceduralAnimaitonTypeCount, false);
|
||||
computeBlendshapesLookupMap();
|
||||
}
|
||||
|
||||
|
@ -71,7 +73,7 @@ void HeadData::setOrientation(const glm::quat& orientation) {
|
|||
}
|
||||
|
||||
void HeadData::computeBlendshapesLookupMap(){
|
||||
for (int i = 0; i < NUM_FACESHIFT_BLENDSHAPES; i++) {
|
||||
for (int i = 0; i < (int)Blendshapes::BlendshapeCount; i++) {
|
||||
_blendshapeLookupMap[FACESHIFT_BLENDSHAPES[i]] = i;
|
||||
}
|
||||
}
|
||||
|
@ -81,6 +83,10 @@ int HeadData::getNumSummedBlendshapeCoefficients() const {
|
|||
return maxSize;
|
||||
}
|
||||
|
||||
void HeadData::clearBlendshapeCoefficients() {
|
||||
_blendshapeCoefficients.fill(0.0f, (int)_blendshapeCoefficients.size());
|
||||
}
|
||||
|
||||
const QVector<float>& HeadData::getSummedBlendshapeCoefficients() {
|
||||
int maxSize = std::max(_blendshapeCoefficients.size(), _transientBlendshapeCoefficients.size());
|
||||
if (_summedBlendshapeCoefficients.size() != maxSize) {
|
||||
|
@ -102,7 +108,7 @@ const QVector<float>& HeadData::getSummedBlendshapeCoefficients() {
|
|||
|
||||
void HeadData::setBlendshape(QString name, float val) {
|
||||
|
||||
//Check to see if the named blendshape exists, and then set its value if it does
|
||||
// Check to see if the named blendshape exists, and then set its value if it does
|
||||
auto it = _blendshapeLookupMap.find(name);
|
||||
if (it != _blendshapeLookupMap.end()) {
|
||||
if (_blendshapeCoefficients.size() <= it.value()) {
|
||||
|
@ -112,6 +118,19 @@ void HeadData::setBlendshape(QString name, float val) {
|
|||
_transientBlendshapeCoefficients.resize(it.value() + 1);
|
||||
}
|
||||
_blendshapeCoefficients[it.value()] = val;
|
||||
} else {
|
||||
// check to see if this is a legacy blendshape that is present in
|
||||
// ARKit blendshapes but is split. i.e. has left and right halfs.
|
||||
if (name == "LipsUpperUp") {
|
||||
_blendshapeCoefficients[(int)Blendshapes::MouthUpperUp_L] = val;
|
||||
_blendshapeCoefficients[(int)Blendshapes::MouthUpperUp_R] = val;
|
||||
} else if (name == "LipsLowerDown") {
|
||||
_blendshapeCoefficients[(int)Blendshapes::MouthLowerDown_L] = val;
|
||||
_blendshapeCoefficients[(int)Blendshapes::MouthLowerDown_R] = val;
|
||||
} else if (name == "Sneer") {
|
||||
_blendshapeCoefficients[(int)Blendshapes::NoseSneer_L] = val;
|
||||
_blendshapeCoefficients[(int)Blendshapes::NoseSneer_R] = val;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -167,14 +186,7 @@ QJsonObject HeadData::toJson() const {
|
|||
void HeadData::fromJson(const QJsonObject& json) {
|
||||
if (json.contains(JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS)) {
|
||||
auto jsonValue = json[JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS];
|
||||
if (jsonValue.isArray()) {
|
||||
QVector<float> blendshapeCoefficients;
|
||||
QJsonArray blendshapeCoefficientsJson = jsonValue.toArray();
|
||||
for (const auto& blendshapeCoefficient : blendshapeCoefficientsJson) {
|
||||
blendshapeCoefficients.push_back((float)blendshapeCoefficient.toDouble());
|
||||
}
|
||||
setBlendshapeCoefficients(blendshapeCoefficients);
|
||||
} else if (jsonValue.isObject()) {
|
||||
if (jsonValue.isObject()) {
|
||||
QJsonObject blendshapeCoefficientsJson = jsonValue.toObject();
|
||||
for (const QString& name : blendshapeCoefficientsJson.keys()) {
|
||||
float value = (float)blendshapeCoefficientsJson[name].toDouble();
|
||||
|
@ -197,39 +209,34 @@ void HeadData::fromJson(const QJsonObject& json) {
|
|||
}
|
||||
}
|
||||
|
||||
bool HeadData::getHasProceduralEyeFaceMovement() const {
|
||||
return _hasProceduralEyeFaceMovement;
|
||||
bool HeadData::getProceduralAnimationFlag(ProceduralAnimationType type) const {
|
||||
return _userProceduralAnimationFlags[(int)type];
|
||||
}
|
||||
|
||||
void HeadData::setHasProceduralEyeFaceMovement(bool hasProceduralEyeFaceMovement) {
|
||||
_hasProceduralEyeFaceMovement = hasProceduralEyeFaceMovement;
|
||||
void HeadData::setProceduralAnimationFlag(ProceduralAnimationType type, bool value) {
|
||||
_userProceduralAnimationFlags[(int)type] = value;
|
||||
}
|
||||
|
||||
bool HeadData::getHasProceduralBlinkFaceMovement() const {
|
||||
// return _hasProceduralBlinkFaceMovement;
|
||||
return _hasProceduralBlinkFaceMovement && !_isFaceTrackerConnected;
|
||||
bool HeadData::getSuppressProceduralAnimationFlag(ProceduralAnimationType type) const {
|
||||
return _suppressProceduralAnimationFlags[(int)type];
|
||||
}
|
||||
|
||||
void HeadData::setHasProceduralBlinkFaceMovement(bool hasProceduralBlinkFaceMovement) {
|
||||
_hasProceduralBlinkFaceMovement = hasProceduralBlinkFaceMovement;
|
||||
void HeadData::setSuppressProceduralAnimationFlag(ProceduralAnimationType type, bool value) {
|
||||
_suppressProceduralAnimationFlags[(int)type] = value;
|
||||
}
|
||||
|
||||
bool HeadData::getHasAudioEnabledFaceMovement() const {
|
||||
return _hasAudioEnabledFaceMovement;
|
||||
bool HeadData::getHasScriptedBlendshapes() const {
|
||||
return _hasScriptedBlendshapes;
|
||||
}
|
||||
|
||||
void HeadData::setHasAudioEnabledFaceMovement(bool hasAudioEnabledFaceMovement) {
|
||||
_hasAudioEnabledFaceMovement = hasAudioEnabledFaceMovement;
|
||||
void HeadData::setHasScriptedBlendshapes(bool value) {
|
||||
_hasScriptedBlendshapes = value;
|
||||
}
|
||||
|
||||
bool HeadData::getHasProceduralEyeMovement() const {
|
||||
return _hasProceduralEyeMovement;
|
||||
bool HeadData::getHasInputDrivenBlendshapes() const {
|
||||
return _hasInputDrivenBlendshapes;
|
||||
}
|
||||
|
||||
void HeadData::setHasProceduralEyeMovement(bool hasProceduralEyeMovement) {
|
||||
_hasProceduralEyeMovement = hasProceduralEyeMovement;
|
||||
}
|
||||
|
||||
void HeadData::setFaceTrackerConnected(bool value) {
|
||||
_isFaceTrackerConnected = value;
|
||||
void HeadData::setHasInputDrivenBlendshapes(bool value) {
|
||||
_hasInputDrivenBlendshapes = value;
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#include <glm/gtc/quaternion.hpp>
|
||||
|
||||
#include <SharedUtil.h>
|
||||
#include <FaceshiftConstants.h>
|
||||
#include <BlendshapeConstants.h>
|
||||
|
||||
// degrees
|
||||
const float MIN_HEAD_YAW = -180.0f;
|
||||
|
@ -62,6 +62,7 @@ public:
|
|||
const QVector<float>& getSummedBlendshapeCoefficients();
|
||||
int getNumSummedBlendshapeCoefficients() const;
|
||||
void setBlendshapeCoefficients(const QVector<float>& blendshapeCoefficients) { _blendshapeCoefficients = blendshapeCoefficients; }
|
||||
void clearBlendshapeCoefficients();
|
||||
|
||||
const glm::vec3& getLookAtPosition() const { return _lookAtPosition; }
|
||||
virtual void setLookAtPosition(const glm::vec3& lookAtPosition) {
|
||||
|
@ -72,17 +73,29 @@ public:
|
|||
}
|
||||
bool lookAtPositionChangedSince(quint64 time) { return _lookAtPositionChanged >= time; }
|
||||
|
||||
bool getHasProceduralEyeFaceMovement() const;
|
||||
void setHasProceduralEyeFaceMovement(bool hasProceduralEyeFaceMovement);
|
||||
bool getHasProceduralBlinkFaceMovement() const;
|
||||
void setHasProceduralBlinkFaceMovement(bool hasProceduralBlinkFaceMovement);
|
||||
bool getHasAudioEnabledFaceMovement() const;
|
||||
void setHasAudioEnabledFaceMovement(bool hasAudioEnabledFaceMovement);
|
||||
bool getHasProceduralEyeMovement() const;
|
||||
void setHasProceduralEyeMovement(bool hasProceduralEyeMovement);
|
||||
enum ProceduralAnimationType {
|
||||
AudioProceduralBlendshapeAnimation = 0,
|
||||
BlinkProceduralBlendshapeAnimation,
|
||||
LidAdjustmentProceduralBlendshapeAnimation,
|
||||
SaccadeProceduralEyeJointAnimation,
|
||||
ProceduralAnimaitonTypeCount,
|
||||
};
|
||||
|
||||
void setFaceTrackerConnected(bool value);
|
||||
bool getFaceTrackerConnected() const { return _isFaceTrackerConnected; }
|
||||
// called by scripts to enable or disable procedural blendshape or eye joint animations.
|
||||
bool getProceduralAnimationFlag(ProceduralAnimationType type) const;
|
||||
void setProceduralAnimationFlag(ProceduralAnimationType type, bool value);
|
||||
|
||||
// called by c++ to suppress, i.e. temporarily disable a procedural animation.
|
||||
bool getSuppressProceduralAnimationFlag(ProceduralAnimationType flag) const;
|
||||
void setSuppressProceduralAnimationFlag(ProceduralAnimationType flag, bool value);
|
||||
|
||||
// called by scripts to enable/disable manual adjustment of blendshapes
|
||||
void setHasScriptedBlendshapes(bool value);
|
||||
bool getHasScriptedBlendshapes() const;
|
||||
|
||||
// called by C++ code to denote the presence of manually driven blendshapes.
|
||||
void setHasInputDrivenBlendshapes(bool value);
|
||||
bool getHasInputDrivenBlendshapes() const;
|
||||
|
||||
friend class AvatarData;
|
||||
|
||||
|
@ -98,12 +111,11 @@ protected:
|
|||
glm::vec3 _lookAtPosition;
|
||||
quint64 _lookAtPositionChanged { 0 };
|
||||
|
||||
bool _hasAudioEnabledFaceMovement { true };
|
||||
bool _hasProceduralBlinkFaceMovement { true };
|
||||
bool _hasProceduralEyeFaceMovement { true };
|
||||
bool _hasProceduralEyeMovement { true };
|
||||
std::vector<bool> _userProceduralAnimationFlags;
|
||||
std::vector<bool> _suppressProceduralAnimationFlags;
|
||||
|
||||
bool _isFaceTrackerConnected { false };
|
||||
bool _hasScriptedBlendshapes { false };
|
||||
bool _hasInputDrivenBlendshapes { false };
|
||||
|
||||
float _leftEyeBlink { 0.0f };
|
||||
float _rightEyeBlink { 0.0f };
|
||||
|
|
|
@ -349,8 +349,72 @@ namespace controller {
|
|||
makePosePair(Action::HEAD, "Head"),
|
||||
makePosePair(Action::LEFT_EYE, "LeftEye"),
|
||||
makePosePair(Action::RIGHT_EYE, "RightEye"),
|
||||
makeAxisPair(Action::LEFT_EYE_BLINK, "LeftEyeBlink"),
|
||||
makeAxisPair(Action::RIGHT_EYE_BLINK, "RightEyeBlink"),
|
||||
|
||||
// blendshapes
|
||||
makeAxisPair(Action::EYEBLINK_L, "EyeBlink_L"),
|
||||
makeAxisPair(Action::EYEBLINK_R, "EyeBlink_R"),
|
||||
makeAxisPair(Action::EYESQUINT_L, "EyeSquint_L"),
|
||||
makeAxisPair(Action::EYESQUINT_R, "EyeSquint_R"),
|
||||
makeAxisPair(Action::EYEDOWN_L, "EyeDown_L"),
|
||||
makeAxisPair(Action::EYEDOWN_R, "EyeDown_R"),
|
||||
makeAxisPair(Action::EYEIN_L, "EyeIn_L"),
|
||||
makeAxisPair(Action::EYEIN_R, "EyeIn_R"),
|
||||
makeAxisPair(Action::EYEOPEN_L, "EyeOpen_L"),
|
||||
makeAxisPair(Action::EYEOPEN_R, "EyeOpen_R"),
|
||||
makeAxisPair(Action::EYEOUT_L, "EyeOut_L"),
|
||||
makeAxisPair(Action::EYEOUT_R, "EyeOut_R"),
|
||||
makeAxisPair(Action::EYEUP_L, "EyeUp_L"),
|
||||
makeAxisPair(Action::EYEUP_R, "EyeUp_R"),
|
||||
makeAxisPair(Action::BROWSD_L, "BrowsD_L"),
|
||||
makeAxisPair(Action::BROWSD_R, "BrowsD_R"),
|
||||
makeAxisPair(Action::BROWSU_C, "BrowsU_C"),
|
||||
makeAxisPair(Action::BROWSU_L, "BrowsU_L"),
|
||||
makeAxisPair(Action::BROWSU_R, "BrowsU_R"),
|
||||
makeAxisPair(Action::JAWFWD, "JawFwd"),
|
||||
makeAxisPair(Action::JAWLEFT, "JawLeft"),
|
||||
makeAxisPair(Action::JAWOPEN, "JawOpen"),
|
||||
makeAxisPair(Action::JAWRIGHT, "JawRight"),
|
||||
makeAxisPair(Action::MOUTHLEFT, "MouthLeft"),
|
||||
makeAxisPair(Action::MOUTHRIGHT, "MouthRight"),
|
||||
makeAxisPair(Action::MOUTHFROWN_L, "MouthFrown_L"),
|
||||
makeAxisPair(Action::MOUTHFROWN_R, "MouthFrown_R"),
|
||||
makeAxisPair(Action::MOUTHSMILE_L, "MouthSmile_L"),
|
||||
makeAxisPair(Action::MOUTHSMILE_R, "MouthSmile_R"),
|
||||
makeAxisPair(Action::MOUTHDIMPLE_L, "MouthDimple_L"),
|
||||
makeAxisPair(Action::MOUTHDIMPLE_R, "MouthDimple_R"),
|
||||
makeAxisPair(Action::LIPSSTRETCH_L, "LipsStretch_L"),
|
||||
makeAxisPair(Action::LIPSSTRETCH_R, "LipsStretch_R"),
|
||||
makeAxisPair(Action::LIPSUPPERCLOSE, "LipsUpperClose"),
|
||||
makeAxisPair(Action::LIPSLOWERCLOSE, "LipsLowerClose"),
|
||||
makeAxisPair(Action::LIPSUPPEROPEN, "LipsUpperOpen"),
|
||||
makeAxisPair(Action::LIPSLOWEROPEN, "LipsLowerOpen"),
|
||||
makeAxisPair(Action::LIPSFUNNEL, "LipsFunnel"),
|
||||
makeAxisPair(Action::LIPSPUCKER, "LipsPucker"),
|
||||
makeAxisPair(Action::PUFF, "Puff"),
|
||||
makeAxisPair(Action::CHEEKSQUINT_L, "CheekSquint_L"),
|
||||
makeAxisPair(Action::CHEEKSQUINT_R, "CheekSquint_R"),
|
||||
makeAxisPair(Action::MOUTHCLOSE, "MouthClose"),
|
||||
makeAxisPair(Action::MOUTHUPPERUP_L, "MouthUpperUp_L"),
|
||||
makeAxisPair(Action::MOUTHUPPERUP_R, "MouthUpperUp_R"),
|
||||
makeAxisPair(Action::MOUTHLOWERDOWN_L, "MouthLowerDown_L"),
|
||||
makeAxisPair(Action::MOUTHLOWERDOWN_R, "MouthLowerDown_R"),
|
||||
makeAxisPair(Action::MOUTHPRESS_L, "MouthPress_L"),
|
||||
makeAxisPair(Action::MOUTHPRESS_R, "MouthPress_R"),
|
||||
makeAxisPair(Action::MOUTHSHRUGLOWER, "MouthShrugLower"),
|
||||
makeAxisPair(Action::MOUTHSHRUGUPPER, "MouthShrugUpper"),
|
||||
makeAxisPair(Action::NOSESNEER_L, "NoseSneer_L"),
|
||||
makeAxisPair(Action::NOSESNEER_R, "NoseSneer_R"),
|
||||
makeAxisPair(Action::TONGUEOUT, "TongueOut"),
|
||||
makeAxisPair(Action::USERBLENDSHAPE0, "UserBlendshape0"),
|
||||
makeAxisPair(Action::USERBLENDSHAPE1, "UserBlendshape1"),
|
||||
makeAxisPair(Action::USERBLENDSHAPE2, "UserBlendshape2"),
|
||||
makeAxisPair(Action::USERBLENDSHAPE3, "UserBlendshape3"),
|
||||
makeAxisPair(Action::USERBLENDSHAPE4, "UserBlendshape4"),
|
||||
makeAxisPair(Action::USERBLENDSHAPE5, "UserBlendshape5"),
|
||||
makeAxisPair(Action::USERBLENDSHAPE6, "UserBlendshape6"),
|
||||
makeAxisPair(Action::USERBLENDSHAPE7, "UserBlendshape7"),
|
||||
makeAxisPair(Action::USERBLENDSHAPE8, "UserBlendshape8"),
|
||||
makeAxisPair(Action::USERBLENDSHAPE9, "UserBlendshape9"),
|
||||
|
||||
makePosePair(Action::LEFT_HAND_THUMB1, "LeftHandThumb1"),
|
||||
makePosePair(Action::LEFT_HAND_THUMB2, "LeftHandThumb2"),
|
||||
|
|
|
@ -183,8 +183,72 @@ enum class Action {
|
|||
|
||||
LEFT_EYE,
|
||||
RIGHT_EYE,
|
||||
LEFT_EYE_BLINK,
|
||||
RIGHT_EYE_BLINK,
|
||||
|
||||
// blendshapes
|
||||
EYEBLINK_L,
|
||||
EYEBLINK_R,
|
||||
EYESQUINT_L,
|
||||
EYESQUINT_R,
|
||||
EYEDOWN_L,
|
||||
EYEDOWN_R,
|
||||
EYEIN_L,
|
||||
EYEIN_R,
|
||||
EYEOPEN_L,
|
||||
EYEOPEN_R,
|
||||
EYEOUT_L,
|
||||
EYEOUT_R,
|
||||
EYEUP_L,
|
||||
EYEUP_R,
|
||||
BROWSD_L,
|
||||
BROWSD_R,
|
||||
BROWSU_C,
|
||||
BROWSU_L,
|
||||
BROWSU_R,
|
||||
JAWFWD,
|
||||
JAWLEFT,
|
||||
JAWOPEN,
|
||||
JAWRIGHT,
|
||||
MOUTHLEFT,
|
||||
MOUTHRIGHT,
|
||||
MOUTHFROWN_L,
|
||||
MOUTHFROWN_R,
|
||||
MOUTHSMILE_L,
|
||||
MOUTHSMILE_R,
|
||||
MOUTHDIMPLE_L,
|
||||
MOUTHDIMPLE_R,
|
||||
LIPSSTRETCH_L,
|
||||
LIPSSTRETCH_R,
|
||||
LIPSUPPERCLOSE,
|
||||
LIPSLOWERCLOSE,
|
||||
LIPSUPPEROPEN,
|
||||
LIPSLOWEROPEN,
|
||||
LIPSFUNNEL,
|
||||
LIPSPUCKER,
|
||||
PUFF,
|
||||
CHEEKSQUINT_L,
|
||||
CHEEKSQUINT_R,
|
||||
MOUTHCLOSE,
|
||||
MOUTHUPPERUP_L,
|
||||
MOUTHUPPERUP_R,
|
||||
MOUTHLOWERDOWN_L,
|
||||
MOUTHLOWERDOWN_R,
|
||||
MOUTHPRESS_L,
|
||||
MOUTHPRESS_R,
|
||||
MOUTHSHRUGLOWER,
|
||||
MOUTHSHRUGUPPER,
|
||||
NOSESNEER_L,
|
||||
NOSESNEER_R,
|
||||
TONGUEOUT,
|
||||
USERBLENDSHAPE0,
|
||||
USERBLENDSHAPE1,
|
||||
USERBLENDSHAPE2,
|
||||
USERBLENDSHAPE3,
|
||||
USERBLENDSHAPE4,
|
||||
USERBLENDSHAPE5,
|
||||
USERBLENDSHAPE6,
|
||||
USERBLENDSHAPE7,
|
||||
USERBLENDSHAPE8,
|
||||
USERBLENDSHAPE9,
|
||||
|
||||
NUM_ACTIONS
|
||||
};
|
||||
|
|
|
@ -355,8 +355,72 @@ Input::NamedVector StandardController::getAvailableInputs() const {
|
|||
makePair(HEAD, "Head"),
|
||||
makePair(LEFT_EYE, "LeftEye"),
|
||||
makePair(RIGHT_EYE, "RightEye"),
|
||||
makePair(LEFT_EYE_BLINK, "LeftEyeBlink"),
|
||||
makePair(RIGHT_EYE_BLINK, "RightEyeBlink"),
|
||||
|
||||
// blendshapes
|
||||
makePair(EYEBLINK_L, "EyeBlink_L"),
|
||||
makePair(EYEBLINK_R, "EyeBlink_R"),
|
||||
makePair(EYESQUINT_L, "EyeSquint_L"),
|
||||
makePair(EYESQUINT_R, "EyeSquint_R"),
|
||||
makePair(EYEDOWN_L, "EyeDown_L"),
|
||||
makePair(EYEDOWN_R, "EyeDown_R"),
|
||||
makePair(EYEIN_L, "EyeIn_L"),
|
||||
makePair(EYEIN_R, "EyeIn_R"),
|
||||
makePair(EYEOPEN_L, "EyeOpen_L"),
|
||||
makePair(EYEOPEN_R, "EyeOpen_R"),
|
||||
makePair(EYEOUT_L, "EyeOut_L"),
|
||||
makePair(EYEOUT_R, "EyeOut_R"),
|
||||
makePair(EYEUP_L, "EyeUp_L"),
|
||||
makePair(EYEUP_R, "EyeUp_R"),
|
||||
makePair(BROWSD_L, "BrowsD_L"),
|
||||
makePair(BROWSD_R, "BrowsD_R"),
|
||||
makePair(BROWSU_C, "BrowsU_C"),
|
||||
makePair(BROWSU_L, "BrowsU_L"),
|
||||
makePair(BROWSU_R, "BrowsU_R"),
|
||||
makePair(JAWFWD, "JawFwd"),
|
||||
makePair(JAWLEFT, "JawLeft"),
|
||||
makePair(JAWOPEN, "JawOpen"),
|
||||
makePair(JAWRIGHT, "JawRight"),
|
||||
makePair(MOUTHLEFT, "MouthLeft"),
|
||||
makePair(MOUTHRIGHT, "MouthRight"),
|
||||
makePair(MOUTHFROWN_L, "MouthFrown_L"),
|
||||
makePair(MOUTHFROWN_R, "MouthFrown_R"),
|
||||
makePair(MOUTHSMILE_L, "MouthSmile_L"),
|
||||
makePair(MOUTHSMILE_R, "MouthSmile_R"),
|
||||
makePair(MOUTHDIMPLE_L, "MouthDimple_L"),
|
||||
makePair(MOUTHDIMPLE_R, "MouthDimple_R"),
|
||||
makePair(LIPSSTRETCH_L, "LipsStretch_L"),
|
||||
makePair(LIPSSTRETCH_R, "LipsStretch_R"),
|
||||
makePair(LIPSUPPERCLOSE, "LipsUpperClose"),
|
||||
makePair(LIPSLOWERCLOSE, "LipsLowerClose"),
|
||||
makePair(LIPSUPPEROPEN, "LipsUpperOpen"),
|
||||
makePair(LIPSLOWEROPEN, "LipsLowerOpen"),
|
||||
makePair(LIPSFUNNEL, "LipsFunnel"),
|
||||
makePair(LIPSPUCKER, "LipsPucker"),
|
||||
makePair(PUFF, "Puff"),
|
||||
makePair(CHEEKSQUINT_L, "CheekSquint_L"),
|
||||
makePair(CHEEKSQUINT_R, "CheekSquint_R"),
|
||||
makePair(MOUTHCLOSE, "MouthClose"),
|
||||
makePair(MOUTHUPPERUP_L, "MouthUpperUp_L"),
|
||||
makePair(MOUTHUPPERUP_R, "MouthUpperUp_R"),
|
||||
makePair(MOUTHLOWERDOWN_L, "MouthLowerDown_L"),
|
||||
makePair(MOUTHLOWERDOWN_R, "MouthLowerDown_R"),
|
||||
makePair(MOUTHPRESS_L, "MouthPress_L"),
|
||||
makePair(MOUTHPRESS_R, "MouthPress_R"),
|
||||
makePair(MOUTHSHRUGLOWER, "MouthShrugLower"),
|
||||
makePair(MOUTHSHRUGUPPER, "MouthShrugUpper"),
|
||||
makePair(NOSESNEER_L, "NoseSneer_L"),
|
||||
makePair(NOSESNEER_R, "NoseSneer_R"),
|
||||
makePair(TONGUEOUT, "TongueOut"),
|
||||
makePair(USERBLENDSHAPE0, "UserBlendshape0"),
|
||||
makePair(USERBLENDSHAPE1, "UserBlendshape1"),
|
||||
makePair(USERBLENDSHAPE2, "UserBlendshape2"),
|
||||
makePair(USERBLENDSHAPE3, "UserBlendshape3"),
|
||||
makePair(USERBLENDSHAPE4, "UserBlendshape4"),
|
||||
makePair(USERBLENDSHAPE5, "UserBlendshape5"),
|
||||
makePair(USERBLENDSHAPE6, "UserBlendshape6"),
|
||||
makePair(USERBLENDSHAPE7, "UserBlendshape7"),
|
||||
makePair(USERBLENDSHAPE8, "UserBlendshape8"),
|
||||
makePair(USERBLENDSHAPE9, "UserBlendshape9"),
|
||||
|
||||
// Aliases, PlayStation style names
|
||||
makePair(LB, "L1"),
|
||||
|
|
|
@ -90,8 +90,73 @@ namespace controller {
|
|||
// Grips
|
||||
LEFT_GRIP,
|
||||
RIGHT_GRIP,
|
||||
LEFT_EYE_BLINK,
|
||||
RIGHT_EYE_BLINK,
|
||||
|
||||
// blendshapes
|
||||
EYEBLINK_L,
|
||||
EYEBLINK_R,
|
||||
EYESQUINT_L,
|
||||
EYESQUINT_R,
|
||||
EYEDOWN_L,
|
||||
EYEDOWN_R,
|
||||
EYEIN_L,
|
||||
EYEIN_R,
|
||||
EYEOPEN_L,
|
||||
EYEOPEN_R,
|
||||
EYEOUT_L,
|
||||
EYEOUT_R,
|
||||
EYEUP_L,
|
||||
EYEUP_R,
|
||||
BROWSD_L,
|
||||
BROWSD_R,
|
||||
BROWSU_C,
|
||||
BROWSU_L,
|
||||
BROWSU_R,
|
||||
JAWFWD,
|
||||
JAWLEFT,
|
||||
JAWOPEN,
|
||||
JAWRIGHT,
|
||||
MOUTHLEFT,
|
||||
MOUTHRIGHT,
|
||||
MOUTHFROWN_L,
|
||||
MOUTHFROWN_R,
|
||||
MOUTHSMILE_L,
|
||||
MOUTHSMILE_R,
|
||||
MOUTHDIMPLE_L,
|
||||
MOUTHDIMPLE_R,
|
||||
LIPSSTRETCH_L,
|
||||
LIPSSTRETCH_R,
|
||||
LIPSUPPERCLOSE,
|
||||
LIPSLOWERCLOSE,
|
||||
LIPSUPPEROPEN,
|
||||
LIPSLOWEROPEN,
|
||||
LIPSFUNNEL,
|
||||
LIPSPUCKER,
|
||||
PUFF,
|
||||
CHEEKSQUINT_L,
|
||||
CHEEKSQUINT_R,
|
||||
MOUTHCLOSE,
|
||||
MOUTHUPPERUP_L,
|
||||
MOUTHUPPERUP_R,
|
||||
MOUTHLOWERDOWN_L,
|
||||
MOUTHLOWERDOWN_R,
|
||||
MOUTHPRESS_L,
|
||||
MOUTHPRESS_R,
|
||||
MOUTHSHRUGLOWER,
|
||||
MOUTHSHRUGUPPER,
|
||||
NOSESNEER_L,
|
||||
NOSESNEER_R,
|
||||
TONGUEOUT,
|
||||
USERBLENDSHAPE0,
|
||||
USERBLENDSHAPE1,
|
||||
USERBLENDSHAPE2,
|
||||
USERBLENDSHAPE3,
|
||||
USERBLENDSHAPE4,
|
||||
USERBLENDSHAPE5,
|
||||
USERBLENDSHAPE6,
|
||||
USERBLENDSHAPE7,
|
||||
USERBLENDSHAPE8,
|
||||
USERBLENDSHAPE9,
|
||||
|
||||
NUM_STANDARD_AXES,
|
||||
LZ = LT,
|
||||
RZ = RT
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
#include <glm/gtx/quaternion.hpp>
|
||||
#include <glm/gtx/transform.hpp>
|
||||
|
||||
#include <FaceshiftConstants.h>
|
||||
#include <BlendshapeConstants.h>
|
||||
|
||||
#include <hfm/ModelFormatLogging.h>
|
||||
#include <hfm/HFMModelMath.h>
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
|
||||
QVariantHash FSTReader::parseMapping(QIODevice* device) {
|
||||
QVariantHash properties;
|
||||
|
||||
|
||||
QByteArray line;
|
||||
while (!(line = device->readLine()).isEmpty()) {
|
||||
if ((line = line.trimmed()).startsWith('#')) {
|
||||
|
@ -34,12 +34,10 @@ QVariantHash FSTReader::parseMapping(QIODevice* device) {
|
|||
QByteArray name = sections.at(0).trimmed();
|
||||
if (sections.size() == 2) {
|
||||
properties.insertMulti(name, sections.at(1).trimmed());
|
||||
|
||||
} else if (sections.size() == 3) {
|
||||
QVariantHash heading = properties.value(name).toHash();
|
||||
heading.insertMulti(sections.at(1).trimmed(), sections.at(2).trimmed());
|
||||
properties.insert(name, heading);
|
||||
|
||||
} else if (sections.size() >= 4) {
|
||||
QVariantHash heading = properties.value(name).toHash();
|
||||
QVariantList contents;
|
||||
|
@ -50,14 +48,56 @@ QVariantHash FSTReader::parseMapping(QIODevice* device) {
|
|||
properties.insert(name, heading);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return properties;
|
||||
}
|
||||
|
||||
static void removeBlendshape(QVariantHash& bs, const QString& key) {
|
||||
if (bs.contains(key)) {
|
||||
bs.remove(key);
|
||||
}
|
||||
}
|
||||
|
||||
static void splitBlendshapes(QVariantHash& bs, const QString& key, const QString& leftKey, const QString& rightKey) {
|
||||
if (bs.contains(key) && !(bs.contains(leftKey) || bs.contains(rightKey))) {
|
||||
// key has been split into leftKey and rightKey blendshapes
|
||||
QVariantList origShapes = bs.values(key);
|
||||
QVariantList halfShapes;
|
||||
for (int i = 0; i < origShapes.size(); i++) {
|
||||
QVariantList origShape = origShapes[i].toList();
|
||||
QVariantList halfShape;
|
||||
halfShape.append(origShape[0]);
|
||||
halfShape.append(QVariant(0.5f * origShape[1].toFloat()));
|
||||
bs.insertMulti(leftKey, halfShape);
|
||||
bs.insertMulti(rightKey, halfShape);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// convert legacy blendshapes to arkit blendshapes
|
||||
static void fixUpLegacyBlendshapes(QVariantHash& properties) {
|
||||
QVariantHash bs = properties.value("bs").toHash();
|
||||
|
||||
// These blendshapes have no ARKit equivalent, so we remove them.
|
||||
removeBlendshape(bs, "JawChew");
|
||||
removeBlendshape(bs, "ChinLowerRaise");
|
||||
removeBlendshape(bs, "ChinUpperRaise");
|
||||
|
||||
// These blendshapes are split in ARKit, we replace them with their left and right sides with a weight of 1/2.
|
||||
splitBlendshapes(bs, "LipsUpperUp", "MouthUpperUp_L", "MouthUpperUp_R");
|
||||
splitBlendshapes(bs, "LipsLowerDown", "MouthLowerDown_L", "MouthLowerDown_R");
|
||||
splitBlendshapes(bs, "Sneer", "NoseSneer_L", "NoseSneer_R");
|
||||
|
||||
// re-insert new mutated bs hash into mapping properties.
|
||||
properties.insert("bs", bs);
|
||||
}
|
||||
|
||||
QVariantHash FSTReader::readMapping(const QByteArray& data) {
|
||||
QBuffer buffer(const_cast<QByteArray*>(&data));
|
||||
buffer.open(QIODevice::ReadOnly);
|
||||
return FSTReader::parseMapping(&buffer);
|
||||
QVariantHash mapping = FSTReader::parseMapping(&buffer);
|
||||
fixUpLegacyBlendshapes(mapping);
|
||||
return mapping;
|
||||
}
|
||||
|
||||
void FSTReader::writeVariant(QBuffer& buffer, QVariantHash::const_iterator& it) {
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
#include <ResourceManager.h>
|
||||
#include <PathUtils.h>
|
||||
#include <image/ColorChannel.h>
|
||||
#include <FaceshiftConstants.h>
|
||||
#include <BlendshapeConstants.h>
|
||||
|
||||
#include "FBXSerializer.h"
|
||||
|
||||
|
|
|
@ -38,10 +38,10 @@ PacketVersion versionForPacketType(PacketType packetType) {
|
|||
return static_cast<PacketVersion>(EntityQueryPacketVersion::ConicalFrustums);
|
||||
case PacketType::AvatarIdentity:
|
||||
case PacketType::AvatarData:
|
||||
return static_cast<PacketVersion>(AvatarMixerPacketVersion::SendVerificationFailed);
|
||||
return static_cast<PacketVersion>(AvatarMixerPacketVersion::ARKitBlendshapes);
|
||||
case PacketType::BulkAvatarData:
|
||||
case PacketType::KillAvatar:
|
||||
return static_cast<PacketVersion>(AvatarMixerPacketVersion::SendVerificationFailed);
|
||||
return static_cast<PacketVersion>(AvatarMixerPacketVersion::ARKitBlendshapes);
|
||||
case PacketType::MessagesData:
|
||||
return static_cast<PacketVersion>(MessageDataVersion::TextOrBinaryData);
|
||||
// ICE packets
|
||||
|
|
|
@ -339,7 +339,8 @@ enum class AvatarMixerPacketVersion : PacketVersion {
|
|||
SendMaxTranslationDimension,
|
||||
FBXJointOrderChange,
|
||||
HandControllerSection,
|
||||
SendVerificationFailed
|
||||
SendVerificationFailed,
|
||||
ARKitBlendshapes
|
||||
};
|
||||
|
||||
enum class DomainConnectRequestVersion : PacketVersion {
|
||||
|
|
|
@ -224,13 +224,7 @@ const OculusPlatformPluginPointer PluginManager::getOculusPlatformPlugin() {
|
|||
}
|
||||
|
||||
DisplayPluginList PluginManager::getAllDisplayPlugins() {
|
||||
if (thread() != QThread::currentThread()) {
|
||||
DisplayPluginList list;
|
||||
QMetaObject::invokeMethod(this, "getAllDisplayPlugins", Qt::BlockingQueuedConnection, Q_RETURN_ARG(DisplayPluginList, list));
|
||||
return list;
|
||||
} else {
|
||||
return _displayPlugins;
|
||||
}
|
||||
return _displayPlugins;
|
||||
}
|
||||
|
||||
const DisplayPluginList& PluginManager::getDisplayPlugins() {
|
||||
|
|
|
@ -39,6 +39,8 @@
|
|||
#include "RenderUtilsLogging.h"
|
||||
#include <Trace.h>
|
||||
|
||||
#include <BlendshapeConstants.h>
|
||||
|
||||
using namespace std;
|
||||
|
||||
int nakedModelPointerTypeId = qRegisterMetaType<ModelPointer>();
|
||||
|
|
|
@ -30,8 +30,8 @@
|
|||
* format: <code>atp:/path/filename</code>. The assets may optionally be baked, in which case a request for the original
|
||||
* unbaked version of the asset is automatically redirected to the baked version. The asset data may optionally be stored as
|
||||
* compressed.</p>
|
||||
* <p>The client cache can be access directly, using <code>"atp:"</code> or <code>"cache:"</code> URLs. Interface, avatar, and
|
||||
* assignment client scripts can write to the cache. All script types can read from the cache.</p>
|
||||
* <p>The client cache can be accessed directly, using <code>"atp:"</code> or <code>"cache:"</code> URLs. Interface, avatar,
|
||||
* and assignment client scripts can write to the cache. All script types can read from the cache.</p>
|
||||
*
|
||||
* @namespace Assets
|
||||
*
|
||||
|
|
|
@ -24,6 +24,9 @@ class QScriptEngine;
|
|||
class QScriptValue;
|
||||
|
||||
/**jsdoc
|
||||
* The <code>Recording</code> API makes and plays back recordings of voice and avatar movements. Playback may be done on a
|
||||
* user's avatar or an assignment client agent (see the {@link Agent} API).
|
||||
*
|
||||
* @namespace Recording
|
||||
*
|
||||
* @hifi-interface
|
||||
|
@ -40,56 +43,79 @@ public:
|
|||
public slots:
|
||||
|
||||
/**jsdoc
|
||||
* @function Recording.loadRecording
|
||||
* @param {string} url
|
||||
* @param {Recording~loadRecordingCallback} [callback=null]
|
||||
* Called when a {@link Recording.loadRecording} call is complete.
|
||||
* @callback Recording~loadRecordingCallback
|
||||
* @param {boolean} success - <code>true</code> if the recording has successfully been loaded, <code>false</code> if it
|
||||
* hasn't.
|
||||
* @param {string} url - The URL of the recording that was requested to be loaded.
|
||||
*/
|
||||
/**jsdoc
|
||||
* Called when {@link Recording.loadRecording} is complete.
|
||||
* @callback Recording~loadRecordingCallback
|
||||
* @param {boolean} success
|
||||
* @param {string} url
|
||||
* Loads a recording so that it is ready for playing.
|
||||
* @function Recording.loadRecording
|
||||
* @param {string} url - The ATP, HTTP, or file system URL of the recording to load.
|
||||
* @param {Recording~loadRecordingCallback} [callback=null] - The function to call upon completion.
|
||||
* @example <caption>Load and play back a recording from the asset server.</caption>
|
||||
* var assetPath = Window.browseAssets();
|
||||
* print("Asset path: " + assetPath);
|
||||
*
|
||||
* if (assetPath.slice(-4) === ".hfr") {
|
||||
* Recording.loadRecording("atp:" + assetPath, function (success, url) {
|
||||
* if (!success) {
|
||||
* print("Error loading recording.");
|
||||
* return;
|
||||
* }
|
||||
* Recording.startPlaying();
|
||||
* });
|
||||
* }
|
||||
*/
|
||||
void loadRecording(const QString& url, QScriptValue callback = QScriptValue());
|
||||
|
||||
|
||||
/**jsdoc
|
||||
* Starts playing the recording currently loaded or paused.
|
||||
* @function Recording.startPlaying
|
||||
*/
|
||||
void startPlaying();
|
||||
|
||||
/**jsdoc
|
||||
* Pauses playback of the recording currently playing. Use {@link Recording.startPlaying|startPlaying} to resume playback
|
||||
* or {@link Recording.stopPlaying|stopPlaying} to stop playback.
|
||||
* @function Recording.pausePlayer
|
||||
*/
|
||||
void pausePlayer();
|
||||
|
||||
/**jsdoc
|
||||
* Stops playing the recording currently playing or paused.
|
||||
* @function Recording.stopPlaying
|
||||
*/
|
||||
void stopPlaying();
|
||||
|
||||
/**jsdoc
|
||||
* Gets whether a recording is currently playing.
|
||||
* @function Recording.isPlaying
|
||||
* @returns {boolean}
|
||||
* @returns {boolean} <code>true</code> if a recording is being played, <code>false</code> if one isn't.
|
||||
*/
|
||||
bool isPlaying() const;
|
||||
|
||||
/**jsdoc
|
||||
* Gets whether recording playback is currently paused.
|
||||
* @function Recording.isPaused
|
||||
* @returns {boolean}
|
||||
* @returns {boolean} <code>true</code> if recording playback is currently paused, <code>false</code> if it isn't.
|
||||
*/
|
||||
bool isPaused() const;
|
||||
|
||||
|
||||
/**jsdoc
|
||||
* Gets the current playback time in the loaded recording, in seconds.
|
||||
* @function Recording.playerElapsed
|
||||
* @returns {number}
|
||||
* @returns {number} The current playback time in the loaded recording, in seconds.
|
||||
*/
|
||||
float playerElapsed() const;
|
||||
|
||||
/**jsdoc
|
||||
* Gets the length of the loaded recording, in seconds.
|
||||
* @function Recording.playerLength
|
||||
* @returns {number}
|
||||
* @returns {number} The length of the recording currently loaded, in seconds
|
||||
*/
|
||||
float playerLength() const;
|
||||
|
||||
|
@ -102,132 +128,222 @@ public slots:
|
|||
void setPlayerVolume(float volume);
|
||||
|
||||
/**jsdoc
|
||||
* <p class="important">Not implemented: This method is not implemented yet.</p>
|
||||
* @function Recording.setPlayerAudioOffset
|
||||
* @param {number} audioOffset
|
||||
* @param {number} audioOffset - Audio offset.
|
||||
*/
|
||||
void setPlayerAudioOffset(float audioOffset);
|
||||
|
||||
/**jsdoc
|
||||
* Sets the current playback time in the loaded recording.
|
||||
* @function Recording.setPlayerTime
|
||||
* @param {number} time
|
||||
* @param {number} time - The current playback time, in seconds.
|
||||
*/
|
||||
void setPlayerTime(float time);
|
||||
|
||||
/**jsdoc
|
||||
* Sets whether playback should repeat in a loop.
|
||||
* @function Recording.setPlayerLoop
|
||||
* @param {boolean} loop
|
||||
* @param {boolean} loop - <code>true</code> if playback should repeat, <code>false</code> if it shouldn't.
|
||||
*/
|
||||
void setPlayerLoop(bool loop);
|
||||
|
||||
|
||||
/**jsdoc
|
||||
* Sets whether recording playback will use the display name that the recording was made with.
|
||||
* @function Recording.setPlayerUseDisplayName
|
||||
* @param {boolean} useDisplayName
|
||||
* @param {boolean} useDisplayName - <code>true</code> to have recording playback use the display name that the recording
|
||||
* was made with, <code>false</code> to have recording playback keep the current display name.
|
||||
*/
|
||||
void setPlayerUseDisplayName(bool useDisplayName);
|
||||
|
||||
/**jsdoc
|
||||
* <p><em>Not used.</em></p>
|
||||
* @function Recording.setPlayerUseAttachments
|
||||
* @param {boolean} useAttachments
|
||||
* @param {boolean} useAttachments - Use attachments.
|
||||
* @deprecated This method is deprecated and will be removed.
|
||||
*/
|
||||
void setPlayerUseAttachments(bool useAttachments);
|
||||
|
||||
/**jsdoc
|
||||
* <p><em>Not used.</em></p>
|
||||
* @function Recording.setPlayerUseHeadModel
|
||||
* @param {boolean} useHeadModel
|
||||
* @todo <strong>Note:</strong> This function currently has no effect.
|
||||
* @param {boolean} useHeadModel - Use head model.
|
||||
* @deprecated This method is deprecated and will be removed.
|
||||
*/
|
||||
void setPlayerUseHeadModel(bool useHeadModel);
|
||||
|
||||
/**jsdoc
|
||||
* Sets whether recording playback will use the avatar model that the recording was made with.
|
||||
* @function Recording.setPlayerUseSkeletonModel
|
||||
* @param {boolean} useSkeletonModel
|
||||
* @todo <strong>Note:</strong> This function currently doesn't work.
|
||||
* @param {boolean} useSkeletonModel - <code>true</code> to have recording playback use the avatar model that the recording
|
||||
* was made with, <code>false</code> to have playback use the current avatar model.
|
||||
*/
|
||||
void setPlayerUseSkeletonModel(bool useSkeletonModel);
|
||||
|
||||
/**jsdoc
|
||||
* Sets whether recordings are played at the current avatar location or the recorded location.
|
||||
* @function Recording.setPlayFromCurrentLocation
|
||||
* @param {boolean} playFromCurrentLocation
|
||||
* @param {boolean} playFromCurrentLocation - <code>true</code> to play recordings at the current avatar location,
|
||||
* <code>false</code> to play recordings at the recorded location.
|
||||
*/
|
||||
void setPlayFromCurrentLocation(bool playFromCurrentLocation);
|
||||
|
||||
|
||||
/**jsdoc
|
||||
* Gets whether recording playback will use the display name that the recording was made with.
|
||||
* @function Recording.getPlayerUseDisplayName
|
||||
* @returns {boolean}
|
||||
* @returns {boolean} <code>true</code> if recording playback will use the display name that the recording was made with,
|
||||
* <code>false</code> if playback will keep the current display name.
|
||||
*/
|
||||
bool getPlayerUseDisplayName() { return _useDisplayName; }
|
||||
|
||||
/**jsdoc
|
||||
* <p><em>Not used.</em></p>
|
||||
* @function Recording.getPlayerUseAttachments
|
||||
* @returns {boolean}
|
||||
* @returns {boolean} Use attachments.
|
||||
* @deprecated This method is deprecated and will be removed.
|
||||
*/
|
||||
bool getPlayerUseAttachments() { return _useAttachments; }
|
||||
|
||||
/**jsdoc
|
||||
* <p><em>Not used.</em></p>
|
||||
* @function Recording.getPlayerUseHeadModel
|
||||
* @returns {boolean}
|
||||
* @returns {boolean} Use head model.
|
||||
* @deprecated This method is deprecated and will be removed.
|
||||
*/
|
||||
bool getPlayerUseHeadModel() { return _useHeadModel; }
|
||||
|
||||
/**jsdoc
|
||||
* Gets whether recording playback will use the avatar model that the recording was made with.
|
||||
* @function Recording.getPlayerUseSkeletonModel
|
||||
* @returns {boolean}
|
||||
* @returns {boolean} <code>true</code> if recording playback will use the avatar model that the recording was made with,
|
||||
* <code>false</code> if playback will use the current avatar model.
|
||||
*/
|
||||
bool getPlayerUseSkeletonModel() { return _useSkeletonModel; }
|
||||
|
||||
/**jsdoc
|
||||
* Gets whether recordings are played at the current avatar location or the recorded location.
|
||||
* @function Recording.getPlayFromCurrentLocation
|
||||
* @returns {boolean}
|
||||
* @returns {boolean} <code>true</code> if recordings are played at the current avatar location, <code>false</code> if
|
||||
* played at the recorded location.
|
||||
*/
|
||||
bool getPlayFromCurrentLocation() { return _playFromCurrentLocation; }
|
||||
|
||||
|
||||
/**jsdoc
|
||||
* Starts making a recording.
|
||||
* @function Recording.startRecording
|
||||
*/
|
||||
void startRecording();
|
||||
|
||||
/**jsdoc
|
||||
* Stops making a recording. The recording may be saved using {@link Recording.saveRecording|saveRecording} or
|
||||
* {@link Recording.saveRecordingToAsset|saveRecordingToAsset}, or immediately played back with
|
||||
* {@link Recording.loadLastRecording|loadLastRecording}.
|
||||
* @function Recording.stopRecording
|
||||
*/
|
||||
void stopRecording();
|
||||
|
||||
/**jsdoc
|
||||
* Gets whether a recording is currently being made.
|
||||
* @function Recording.isRecording
|
||||
* @returns {boolean}
|
||||
* @returns {boolean} <code>true</code> if a recording is currently being made, <code>false</code> if one isn't.
|
||||
*/
|
||||
bool isRecording() const;
|
||||
|
||||
|
||||
/**jsdoc
|
||||
* Gets the duration of the recording currently being made or recently made, in seconds.
|
||||
* @function Recording.recorderElapsed
|
||||
* @returns {number}
|
||||
* @returns {number} The duration of the recording currently being made or recently made, in seconds.
|
||||
*/
|
||||
float recorderElapsed() const;
|
||||
|
||||
|
||||
/**jsdoc
|
||||
* Gets the default directory that recordings are saved in.
|
||||
* @function Recording.getDefaultRecordingSaveDirectory
|
||||
* @returns {string}
|
||||
* @returns {string} The default recording save directory.
|
||||
* @example <caption>Report the default save directory.</caption>
|
||||
* print("Default save directory: " + Recording.getDefaultRecordingSaveDirectory());
|
||||
*/
|
||||
QString getDefaultRecordingSaveDirectory();
|
||||
|
||||
/**jsdoc
|
||||
* Saves the most recently made recording to a file.
|
||||
* @function Recording.saveRecording
|
||||
* @param {string} filename
|
||||
* @param {string} filename - The path and name of the file to save the recording to.
|
||||
* @example <caption>Save a 5 second recording to a file.</caption>
|
||||
* Recording.startRecording();
|
||||
*
|
||||
* Script.setTimeout(function () {
|
||||
* Recording.stopRecording();
|
||||
* var filename = (new Date()).toISOString(); // yyyy-mm-ddThh:mm:ss.sssZ
|
||||
* filename = filename.slice(0, -5).replace(/:/g, "").replace("T", "-")
|
||||
* + ".hfr"; // yyyymmmdd-hhmmss.hfr
|
||||
* filename = Recording.getDefaultRecordingSaveDirectory() + filename;
|
||||
* Recording.saveRecording(filename);
|
||||
* print("Saved recording: " + filename);
|
||||
* }, 5000);
|
||||
*/
|
||||
void saveRecording(const QString& filename);
|
||||
|
||||
/**jsdoc
|
||||
* Called when a {@link Recording.saveRecordingToAsset} call is complete.
|
||||
* @callback Recording~saveRecordingToAssetCallback
|
||||
* @param {string} url - The URL of the recording stored in the asset server if successful, <code>""</code> if
|
||||
* unsuccessful. The URL has <code>atp:</code> as the scheme and the SHA256 hash as the filename (with no extension).
|
||||
*/
|
||||
/**jsdoc
|
||||
* Saves the most recently made recording to the domain's asset server.
|
||||
* @function Recording.saveRecordingToAsset
|
||||
* @param {function} getClipAtpUrl
|
||||
* @param {Recording~saveRecordingToAssetCallback} callback - The function to call upon completion.
|
||||
* @returns {boolean} <code>true</code> if the recording is successfully being saved, <code>false</code> if not.
|
||||
* @example <caption>Save a 5 second recording to the asset server.</caption>
|
||||
* function onSavedRecordingToAsset(url) {
|
||||
* if (url === "") {
|
||||
* print("Couldn't save recording.");
|
||||
* return;
|
||||
* }
|
||||
*
|
||||
* print("Saved recording: " + url); // atp:SHA256
|
||||
*
|
||||
* var filename = (new Date()).toISOString(); // yyyy-mm-ddThh:mm:ss.sssZ
|
||||
* filename = filename.slice(0, -5).replace(/:/g, "").replace("T", "-")
|
||||
* + ".hfr"; // yyyymmmdd-hhmmss.hfr
|
||||
* var hash = url.slice(4); // Remove leading "atp:" from url.
|
||||
* mappingPath = "/recordings/" + filename;
|
||||
* Assets.setMapping(mappingPath, hash, function (error) {
|
||||
* if (error) {
|
||||
* print("Mapping error: " + error);
|
||||
* }
|
||||
* });
|
||||
* print("Mapped recording: " + mappingPath); // /recordings/filename
|
||||
* }
|
||||
*
|
||||
* Recording.startRecording();
|
||||
*
|
||||
* Script.setTimeout(function () {
|
||||
* Recording.stopRecording();
|
||||
* var success = Recording.saveRecordingToAsset(onSavedRecordingToAsset);
|
||||
* if (!success) {
|
||||
* print("Couldn't save recording.");
|
||||
* }
|
||||
* }, 5000);
|
||||
*/
|
||||
bool saveRecordingToAsset(QScriptValue getClipAtpUrl);
|
||||
|
||||
/**jsdoc
|
||||
* Loads the most recently made recording and plays it back on your avatar.
|
||||
* @function Recording.loadLastRecording
|
||||
* @example <caption>Make a 5 second recording and immediately play it back on your avatar.</caption>
|
||||
* Recording.startRecording();
|
||||
*
|
||||
* Script.setTimeout(function () {
|
||||
* Recording.stopRecording();
|
||||
* Recording.loadLastRecording();
|
||||
* }, 5000);
|
||||
*/
|
||||
void loadLastRecording();
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
//
|
||||
// FaceshiftConstants.cpp
|
||||
// BlendshapeConstants.cpp
|
||||
//
|
||||
//
|
||||
// Created by Clement on 1/23/15.
|
||||
|
@ -9,7 +9,7 @@
|
|||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "FaceshiftConstants.h"
|
||||
#include "BlendshapeConstants.h"
|
||||
|
||||
const char* FACESHIFT_BLENDSHAPES[] = {
|
||||
"EyeBlink_L",
|
||||
|
@ -34,7 +34,6 @@ const char* FACESHIFT_BLENDSHAPES[] = {
|
|||
"JawFwd",
|
||||
"JawLeft",
|
||||
"JawOpen",
|
||||
"JawChew",
|
||||
"JawRight",
|
||||
"MouthLeft",
|
||||
"MouthRight",
|
||||
|
@ -48,34 +47,34 @@ const char* FACESHIFT_BLENDSHAPES[] = {
|
|||
"LipsStretch_R",
|
||||
"LipsUpperClose",
|
||||
"LipsLowerClose",
|
||||
"LipsUpperUp",
|
||||
"LipsLowerDown",
|
||||
"LipsUpperOpen",
|
||||
"LipsLowerOpen",
|
||||
"LipsFunnel",
|
||||
"LipsPucker",
|
||||
"ChinLowerRaise",
|
||||
"ChinUpperRaise",
|
||||
"Sneer",
|
||||
"Puff",
|
||||
"CheekSquint_L",
|
||||
"CheekSquint_R",
|
||||
"MouthClose",
|
||||
"MouthUpperUp_L",
|
||||
"MouthUpperUp_R",
|
||||
"MouthLowerDown_L",
|
||||
"MouthLowerDown_R",
|
||||
"MouthPress_L",
|
||||
"MouthPress_R",
|
||||
"MouthShrugLower",
|
||||
"MouthShrugUpper",
|
||||
"NoseSneer_L",
|
||||
"NoseSneer_R",
|
||||
"TongueOut",
|
||||
"UserBlendshape0",
|
||||
"UserBlendshape1",
|
||||
"UserBlendshape2",
|
||||
"UserBlendshape3",
|
||||
"UserBlendshape4",
|
||||
"UserBlendshape5",
|
||||
"UserBlendshape6",
|
||||
"UserBlendshape7",
|
||||
"UserBlendshape8",
|
||||
"UserBlendshape9",
|
||||
""
|
||||
};
|
||||
|
||||
const int NUM_FACESHIFT_BLENDSHAPES = sizeof(FACESHIFT_BLENDSHAPES) / sizeof(char*);
|
||||
|
||||
const int EYE_BLINK_L_INDEX = 0;
|
||||
const int EYE_BLINK_R_INDEX = 1;
|
||||
const int EYE_SQUINT_L_INDEX = 2;
|
||||
const int EYE_SQUINT_R_INDEX = 3;
|
||||
const int EYE_OPEN_L_INDEX = 8;
|
||||
const int EYE_OPEN_R_INDEX = 9;
|
||||
const int BROWS_U_L_INDEX = 17;
|
||||
const int BROWS_U_R_INDEX = 18;
|
||||
|
||||
|
||||
const int EYE_BLINK_INDICES[] = { EYE_BLINK_L_INDEX, EYE_BLINK_R_INDEX };
|
||||
const int EYE_SQUINT_INDICES[] = { EYE_SQUINT_L_INDEX, EYE_SQUINT_R_INDEX };
|
||||
const int EYE_OPEN_INDICES[] = { EYE_OPEN_L_INDEX, EYE_OPEN_R_INDEX };
|
||||
const int BROWS_U_INDICES[] = { BROWS_U_L_INDEX, BROWS_U_R_INDEX };
|
118
libraries/shared/src/BlendshapeConstants.h
Normal file
118
libraries/shared/src/BlendshapeConstants.h
Normal file
|
@ -0,0 +1,118 @@
|
|||
//
|
||||
// BlendshapeConstants.h
|
||||
//
|
||||
//
|
||||
// Created by Clement on 1/23/15.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_BlendshapeConstants_h
|
||||
#define hifi_BlendshapeConstants_h
|
||||
|
||||
/// The names of the blendshapes expected by Faceshift, terminated with an empty string.
|
||||
extern const char* FACESHIFT_BLENDSHAPES[];
|
||||
|
||||
enum class Blendshapes : int {
|
||||
EyeBlink_L = 0,
|
||||
EyeBlink_R,
|
||||
EyeSquint_L,
|
||||
EyeSquint_R,
|
||||
EyeDown_L,
|
||||
EyeDown_R,
|
||||
EyeIn_L,
|
||||
EyeIn_R,
|
||||
EyeOpen_L,
|
||||
EyeOpen_R,
|
||||
EyeOut_L,
|
||||
EyeOut_R,
|
||||
EyeUp_L,
|
||||
EyeUp_R,
|
||||
BrowsD_L,
|
||||
BrowsD_R,
|
||||
BrowsU_C,
|
||||
BrowsU_L,
|
||||
BrowsU_R,
|
||||
JawFwd,
|
||||
JawLeft,
|
||||
JawOpen,
|
||||
JawRight,
|
||||
MouthLeft,
|
||||
MouthRight,
|
||||
MouthFrown_L,
|
||||
MouthFrown_R,
|
||||
MouthSmile_L,
|
||||
MouthSmile_R,
|
||||
MouthDimple_L,
|
||||
MouthDimple_R,
|
||||
LipsStretch_L,
|
||||
LipsStretch_R,
|
||||
LipsUpperClose,
|
||||
LipsLowerClose,
|
||||
LipsUpperOpen,
|
||||
LipsLowerOpen,
|
||||
LipsFunnel,
|
||||
LipsPucker,
|
||||
Puff,
|
||||
CheekSquint_L,
|
||||
CheekSquint_R,
|
||||
MouthClose,
|
||||
MouthUpperUp_L,
|
||||
MouthUpperUp_R,
|
||||
MouthLowerDown_L,
|
||||
MouthLowerDown_R,
|
||||
MouthPress_L,
|
||||
MouthPress_R,
|
||||
MouthShrugLower,
|
||||
MouthShrugUpper,
|
||||
NoseSneer_L,
|
||||
NoseSneer_R,
|
||||
TongueOut,
|
||||
UserBlendshape0,
|
||||
UserBlendshape1,
|
||||
UserBlendshape2,
|
||||
UserBlendshape3,
|
||||
UserBlendshape4,
|
||||
UserBlendshape5,
|
||||
UserBlendshape6,
|
||||
UserBlendshape7,
|
||||
UserBlendshape8,
|
||||
UserBlendshape9,
|
||||
BlendshapeCount
|
||||
};
|
||||
|
||||
enum class LegacyBlendshpaes : int {
|
||||
JawChew, // not in ARKit
|
||||
LipsUpperUp, // split in ARKit
|
||||
LipsLowerDown, // split in ARKit
|
||||
ChinLowerRaise, // not in ARKit
|
||||
ChinUpperRaise, // not in ARKit
|
||||
Sneer, // split in ARKit
|
||||
LegacyBlendshapeCount
|
||||
};
|
||||
|
||||
// NEW in ARKit
|
||||
// * MouthClose
|
||||
// * MouthUpperUp_L
|
||||
// * MouthUpperUp_R
|
||||
// * MouthLowerDown_L
|
||||
// * MouthLowerDown_R
|
||||
// * MouthPress_L
|
||||
// * MouthPress_R
|
||||
// * MouthShrugLower
|
||||
// * MouthShrugUpper
|
||||
// * NoseSneer_L
|
||||
// * NoseSneer_R
|
||||
// * TongueOut
|
||||
|
||||
// Legacy shapes
|
||||
// * JawChew (not in ARKit)
|
||||
// * LipsUpperUp (split in ARKit)
|
||||
// * LipsLowerDown (split in ARKit)
|
||||
// * Sneer (split in ARKit)
|
||||
// * ChinLowerRaise (not in ARKit)
|
||||
// * ChinUpperRaise (not in ARKit)
|
||||
|
||||
#endif // hifi_BlendshapeConstants_h
|
|
@ -1,25 +0,0 @@
|
|||
//
|
||||
// FaceshiftConstants.h
|
||||
//
|
||||
//
|
||||
// Created by Clement on 1/23/15.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_FaceshiftConstants_h
|
||||
#define hifi_FaceshiftConstants_h
|
||||
|
||||
/// The names of the blendshapes expected by Faceshift, terminated with an empty string.
|
||||
extern const char* FACESHIFT_BLENDSHAPES[];
|
||||
/// The size of FACESHIFT_BLENDSHAPES
|
||||
extern const int NUM_FACESHIFT_BLENDSHAPES;
|
||||
// Eyes and Brows indices
|
||||
extern const int EYE_BLINK_INDICES[];
|
||||
extern const int EYE_OPEN_INDICES[];
|
||||
extern const int BROWS_U_INDICES[];
|
||||
extern const int EYE_SQUINT_INDICES[];
|
||||
|
||||
#endif // hifi_FaceshiftConstants_h
|
|
@ -1,7 +0,0 @@
|
|||
set(TARGET_NAME trackers)
|
||||
setup_hifi_library()
|
||||
GroupSources("src")
|
||||
link_hifi_libraries(shared)
|
||||
include_hifi_library_headers(octree)
|
||||
|
||||
target_bullet()
|
|
@ -1,133 +0,0 @@
|
|||
//
|
||||
// Created by Andrzej Kapolka on 4/9/14.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "FaceTracker.h"
|
||||
|
||||
#include <QTimer>
|
||||
#include <GLMHelpers.h>
|
||||
#include "Logging.h"
|
||||
//#include "Menu.h"
|
||||
|
||||
const int FPS_TIMER_DELAY = 2000; // ms
|
||||
const int FPS_TIMER_DURATION = 2000; // ms
|
||||
|
||||
const float DEFAULT_EYE_DEFLECTION = 0.25f;
|
||||
Setting::Handle<float> FaceTracker::_eyeDeflection("faceshiftEyeDeflection", DEFAULT_EYE_DEFLECTION);
|
||||
bool FaceTracker::_isMuted { true };
|
||||
|
||||
void FaceTracker::init() {
|
||||
_isInitialized = true; // FaceTracker can be used now
|
||||
}
|
||||
|
||||
inline float FaceTracker::getBlendshapeCoefficient(int index) const {
|
||||
return isValidBlendshapeIndex(index) ? glm::mix(0.0f, _blendshapeCoefficients[index], getFadeCoefficient())
|
||||
: 0.0f;
|
||||
}
|
||||
|
||||
const QVector<float>& FaceTracker::getBlendshapeCoefficients() const {
|
||||
static QVector<float> blendshapes;
|
||||
float fadeCoefficient = getFadeCoefficient();
|
||||
if (fadeCoefficient == 1.0f) {
|
||||
return _blendshapeCoefficients;
|
||||
} else {
|
||||
blendshapes.resize(_blendshapeCoefficients.size());
|
||||
for (int i = 0; i < _blendshapeCoefficients.size(); i++) {
|
||||
blendshapes[i] = glm::mix(0.0f, _blendshapeCoefficients[i], fadeCoefficient);
|
||||
}
|
||||
return blendshapes;
|
||||
}
|
||||
}
|
||||
|
||||
float FaceTracker::getFadeCoefficient() const {
|
||||
return _fadeCoefficient;
|
||||
}
|
||||
|
||||
const glm::vec3 FaceTracker::getHeadTranslation() const {
|
||||
return glm::mix(glm::vec3(0.0f), _headTranslation, getFadeCoefficient());
|
||||
}
|
||||
|
||||
const glm::quat FaceTracker::getHeadRotation() const {
|
||||
return safeMix(glm::quat(), _headRotation, getFadeCoefficient());
|
||||
}
|
||||
|
||||
void FaceTracker::update(float deltaTime) {
|
||||
// Based on exponential distributions: http://en.wikipedia.org/wiki/Exponential_distribution
|
||||
static const float EPSILON = 0.02f; // MUST BE < 1.0f
|
||||
static const float INVERSE_AT_EPSILON = -std::log(EPSILON); // So that f(1.0f) = EPSILON ~ 0.0f
|
||||
static const float RELAXATION_TIME = 0.8f; // sec
|
||||
|
||||
if (isTracking()) {
|
||||
if (_relaxationStatus == 1.0f) {
|
||||
_fadeCoefficient = 1.0f;
|
||||
return;
|
||||
}
|
||||
_relaxationStatus = glm::clamp(_relaxationStatus + deltaTime / RELAXATION_TIME, 0.0f, 1.0f);
|
||||
_fadeCoefficient = 1.0f - std::exp(-_relaxationStatus * INVERSE_AT_EPSILON);
|
||||
} else {
|
||||
if (_relaxationStatus == 0.0f) {
|
||||
_fadeCoefficient = 0.0f;
|
||||
return;
|
||||
}
|
||||
_relaxationStatus = glm::clamp(_relaxationStatus - deltaTime / RELAXATION_TIME, 0.0f, 1.0f);
|
||||
_fadeCoefficient = std::exp(-(1.0f - _relaxationStatus) * INVERSE_AT_EPSILON);
|
||||
}
|
||||
}
|
||||
|
||||
void FaceTracker::reset() {
|
||||
if (isActive() && !_isCalculatingFPS) {
|
||||
QTimer::singleShot(FPS_TIMER_DELAY, this, SLOT(startFPSTimer()));
|
||||
_isCalculatingFPS = true;
|
||||
}
|
||||
}
|
||||
|
||||
void FaceTracker::startFPSTimer() {
|
||||
_frameCount = 0;
|
||||
QTimer::singleShot(FPS_TIMER_DURATION, this, SLOT(finishFPSTimer()));
|
||||
}
|
||||
|
||||
void FaceTracker::countFrame() {
|
||||
if (_isCalculatingFPS) {
|
||||
_frameCount++;
|
||||
}
|
||||
}
|
||||
|
||||
void FaceTracker::finishFPSTimer() {
|
||||
qCDebug(trackers) << "Face tracker FPS =" << (float)_frameCount / ((float)FPS_TIMER_DURATION / 1000.0f);
|
||||
_isCalculatingFPS = false;
|
||||
}
|
||||
|
||||
void FaceTracker::toggleMute() {
|
||||
_isMuted = !_isMuted;
|
||||
emit muteToggled();
|
||||
}
|
||||
|
||||
void FaceTracker::setEyeDeflection(float eyeDeflection) {
|
||||
_eyeDeflection.set(eyeDeflection);
|
||||
}
|
||||
|
||||
void FaceTracker::updateFakeCoefficients(float leftBlink, float rightBlink, float browUp,
|
||||
float jawOpen, float mouth2, float mouth3, float mouth4, QVector<float>& coefficients) {
|
||||
const int MMMM_BLENDSHAPE = 34;
|
||||
const int FUNNEL_BLENDSHAPE = 40;
|
||||
const int SMILE_LEFT_BLENDSHAPE = 28;
|
||||
const int SMILE_RIGHT_BLENDSHAPE = 29;
|
||||
const int MAX_FAKE_BLENDSHAPE = 40; // Largest modified blendshape from above and below
|
||||
|
||||
coefficients.resize(std::max((int)coefficients.size(), MAX_FAKE_BLENDSHAPE + 1));
|
||||
qFill(coefficients.begin(), coefficients.end(), 0.0f);
|
||||
coefficients[_leftBlinkIndex] = leftBlink;
|
||||
coefficients[_rightBlinkIndex] = rightBlink;
|
||||
coefficients[_browUpCenterIndex] = browUp;
|
||||
coefficients[_browUpLeftIndex] = browUp;
|
||||
coefficients[_browUpRightIndex] = browUp;
|
||||
coefficients[_jawOpenIndex] = jawOpen;
|
||||
coefficients[SMILE_LEFT_BLENDSHAPE] = coefficients[SMILE_RIGHT_BLENDSHAPE] = mouth4;
|
||||
coefficients[MMMM_BLENDSHAPE] = mouth2;
|
||||
coefficients[FUNNEL_BLENDSHAPE] = mouth3;
|
||||
}
|
||||
|
|
@ -1,131 +0,0 @@
|
|||
//
|
||||
// Created by Andrzej Kapolka on 4/9/14.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_FaceTracker_h
|
||||
#define hifi_FaceTracker_h
|
||||
|
||||
#include <QObject>
|
||||
#include <QVector>
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
#include <glm/gtc/quaternion.hpp>
|
||||
|
||||
#include <SettingHandle.h>
|
||||
|
||||
/// Base class for face trackers (DDE, BinaryVR).
|
||||
|
||||
class FaceTracker : public QObject {
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
virtual bool isActive() const { return false; }
|
||||
virtual bool isTracking() const { return false; }
|
||||
|
||||
virtual void init();
|
||||
virtual void update(float deltaTime);
|
||||
virtual void reset();
|
||||
|
||||
float getFadeCoefficient() const;
|
||||
|
||||
const glm::vec3 getHeadTranslation() const;
|
||||
const glm::quat getHeadRotation() const;
|
||||
|
||||
float getEstimatedEyePitch() const { return _estimatedEyePitch; }
|
||||
float getEstimatedEyeYaw() const { return _estimatedEyeYaw; }
|
||||
|
||||
int getNumBlendshapes() const { return _blendshapeCoefficients.size(); }
|
||||
bool isValidBlendshapeIndex(int index) const { return index >= 0 && index < getNumBlendshapes(); }
|
||||
const QVector<float>& getBlendshapeCoefficients() const;
|
||||
float getBlendshapeCoefficient(int index) const;
|
||||
|
||||
static bool isMuted() { return _isMuted; }
|
||||
static void setIsMuted(bool isMuted) { _isMuted = isMuted; }
|
||||
|
||||
static float getEyeDeflection() { return _eyeDeflection.get(); }
|
||||
static void setEyeDeflection(float eyeDeflection);
|
||||
|
||||
static void updateFakeCoefficients(float leftBlink,
|
||||
float rightBlink,
|
||||
float browUp,
|
||||
float jawOpen,
|
||||
float mouth2,
|
||||
float mouth3,
|
||||
float mouth4,
|
||||
QVector<float>& coefficients);
|
||||
|
||||
signals:
|
||||
|
||||
/**jsdoc
|
||||
* @function FaceTracker.muteToggled
|
||||
* @returns {Signal}
|
||||
*/
|
||||
void muteToggled();
|
||||
|
||||
public slots:
|
||||
|
||||
// No JSDoc here because it's overridden in DdeFaceTracker.
|
||||
virtual void setEnabled(bool enabled) = 0;
|
||||
|
||||
/**jsdoc
|
||||
* @function FaceTracker.toggleMute
|
||||
*/
|
||||
void toggleMute();
|
||||
|
||||
/**jsdoc
|
||||
* @function FaceTracker.getMuted
|
||||
* @returns {boolean}
|
||||
*/
|
||||
bool getMuted() { return _isMuted; }
|
||||
|
||||
protected:
|
||||
virtual ~FaceTracker() {};
|
||||
|
||||
bool _isInitialized = false;
|
||||
static bool _isMuted;
|
||||
|
||||
glm::vec3 _headTranslation = glm::vec3(0.0f);
|
||||
glm::quat _headRotation = glm::quat();
|
||||
float _estimatedEyePitch = 0.0f;
|
||||
float _estimatedEyeYaw = 0.0f;
|
||||
QVector<float> _blendshapeCoefficients;
|
||||
|
||||
float _relaxationStatus = 0.0f; // Between 0.0f and 1.0f
|
||||
float _fadeCoefficient = 0.0f; // Between 0.0f and 1.0f
|
||||
|
||||
void countFrame();
|
||||
|
||||
private slots:
|
||||
void startFPSTimer();
|
||||
void finishFPSTimer();
|
||||
|
||||
private:
|
||||
bool _isCalculatingFPS = false;
|
||||
int _frameCount = 0;
|
||||
|
||||
// see http://support.faceshift.com/support/articles/35129-export-of-blendshapes
|
||||
static const int _leftBlinkIndex = 0;
|
||||
static const int _rightBlinkIndex = 1;
|
||||
static const int _leftEyeOpenIndex = 8;
|
||||
static const int _rightEyeOpenIndex = 9;
|
||||
|
||||
// Brows
|
||||
static const int _browDownLeftIndex = 14;
|
||||
static const int _browDownRightIndex = 15;
|
||||
static const int _browUpCenterIndex = 16;
|
||||
static const int _browUpLeftIndex = 17;
|
||||
static const int _browUpRightIndex = 18;
|
||||
|
||||
static const int _mouthSmileLeftIndex = 28;
|
||||
static const int _mouthSmileRightIndex = 29;
|
||||
|
||||
static const int _jawOpenIndex = 21;
|
||||
|
||||
static Setting::Handle<float> _eyeDeflection;
|
||||
};
|
||||
|
||||
#endif // hifi_FaceTracker_h
|
|
@ -1,11 +0,0 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2017/04/25
|
||||
// Copyright 2013-2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "Logging.h"
|
||||
|
||||
Q_LOGGING_CATEGORY(trackers, "hifi.trackers")
|
|
@ -1,16 +0,0 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2017/04/25
|
||||
// Copyright 2013-2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_TrackersLogging_h
|
||||
#define hifi_TrackersLogging_h
|
||||
|
||||
#include <QtCore/QLoggingCategory>
|
||||
|
||||
Q_DECLARE_LOGGING_CATEGORY(trackers)
|
||||
|
||||
#endif // hifi_TrackersLogging_h
|
|
@ -39,9 +39,10 @@ class QmlWindowClass;
|
|||
class OffscreenQmlSurface;
|
||||
|
||||
/**jsdoc
|
||||
* The <code>Tablet</code> API provides the facilities to work with the system or other tablet. In toolbar mode (Developer >
|
||||
* UI > Tablet Becomes Toolbar), the tablet's menu buttons are displayed in a toolbar and other tablet content is displayed
|
||||
* in a dialog.
|
||||
* The <code>Tablet</code> API provides the facilities to work with the system or other tablet. In toolbar mode (see Developer
|
||||
* > UI options), the tablet's menu buttons are displayed in a toolbar and other tablet content is displayed in a dialog.
|
||||
*
|
||||
* <p>See also the {@link Toolbars} API for working with toolbars.</p>
|
||||
*
|
||||
* @namespace Tablet
|
||||
*
|
||||
|
@ -98,7 +99,7 @@ public:
|
|||
void setToolbarScriptingInterface(ToolbarScriptingInterface* toolbarScriptingInterface) { _toolbarScriptingInterface = toolbarScriptingInterface; }
|
||||
|
||||
/**jsdoc
|
||||
* Gets an instance of a tablet. A new tablet is created if one with the specified ID doesn't already exist.
|
||||
* Gets an instance of a tablet. A new tablet is created if one with the specified name doesn't already exist.
|
||||
* @function Tablet.getTablet
|
||||
* @param {string} name - A unique name that identifies the tablet.
|
||||
* @returns {TabletProxy} The tablet instance.
|
||||
|
@ -210,11 +211,10 @@ private:
|
|||
Q_DECLARE_METATYPE(TabletButtonsProxyModel*);
|
||||
|
||||
/**jsdoc
|
||||
* An instance of a tablet. In toolbar mode (Developer >
|
||||
* UI > Tablet Becomes Toolbar), the tablet's menu buttons are displayed in a toolbar and other tablet content is displayed
|
||||
* in a dialog.
|
||||
* An instance of a tablet. In toolbar mode (see Developer > UI options), the tablet's menu buttons are displayed in a
|
||||
* toolbar and other tablet content is displayed in a dialog.
|
||||
*
|
||||
* <p>Create a new tablet or retrieve an existing tablet using {@link Tablet.getTablet}.</p>
|
||||
* <p>Retrieve an existing tablet or create a new tablet using {@link Tablet.getTablet}.</p>
|
||||
*
|
||||
* @class TabletProxy
|
||||
*
|
||||
|
@ -317,7 +317,7 @@ public:
|
|||
Q_INVOKABLE void returnToPreviousAppImpl(bool localSafeContext);
|
||||
|
||||
/**jsdoc
|
||||
*@function TabletProxy#loadQMLOnTopImpl
|
||||
* @function TabletProxy#loadQMLOnTopImpl
|
||||
* @deprecated This function is deprecated and will be removed.
|
||||
*/
|
||||
// Internal function, do not call from scripts.
|
||||
|
|
|
@ -19,57 +19,16 @@
|
|||
|
||||
class QQuickItem;
|
||||
|
||||
/**jsdoc
|
||||
* @class ToolbarButtonProxy
|
||||
*
|
||||
* @hifi-interface
|
||||
* @hifi-client-entity
|
||||
* @hifi-avatar
|
||||
*/
|
||||
// No JSDoc for ToolbarButtonProxy because ToolbarProxy#addButton() doesn't work.
|
||||
class ToolbarButtonProxy : public QmlWrapper {
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
ToolbarButtonProxy(QObject* qmlObject, QObject* parent = nullptr);
|
||||
|
||||
/**jsdoc
|
||||
* @function ToolbarButtonProxy#editProperties
|
||||
* @param {object} properties
|
||||
*/
|
||||
Q_INVOKABLE void editProperties(const QVariantMap& properties);
|
||||
|
||||
|
||||
// QmlWrapper methods.
|
||||
|
||||
/**jsdoc
|
||||
* @function ToolbarButtonProxy#writeProperty
|
||||
* @parm {string} propertyName
|
||||
* @param {object} propertyValue
|
||||
*/
|
||||
|
||||
/**jsdoc
|
||||
* @function ToolbarButtonProxy#writeProperties
|
||||
* @param {object} properties
|
||||
*/
|
||||
|
||||
/**jsdoc
|
||||
* @function ToolbarButtonProxy#readProperty
|
||||
* @param {string} propertyName
|
||||
* @returns {object}
|
||||
*/
|
||||
|
||||
/**jsdoc
|
||||
* @function ToolbarButtonProxy#readProperties
|
||||
* @param {string[]} propertyList
|
||||
* @returns {object}
|
||||
*/
|
||||
|
||||
signals:
|
||||
|
||||
/**jsdoc
|
||||
* @function ToolbarButtonProxy#clicked
|
||||
* @returns {Signal}
|
||||
*/
|
||||
void clicked();
|
||||
|
||||
protected:
|
||||
|
@ -80,7 +39,12 @@ protected:
|
|||
Q_DECLARE_METATYPE(ToolbarButtonProxy*);
|
||||
|
||||
/**jsdoc
|
||||
* An instance of a toolbar.
|
||||
*
|
||||
* <p>Retrieve an existing toolbar or create a new toolbar using {@link Toolbars.getToolbar}.</p>
|
||||
*
|
||||
* @class ToolbarProxy
|
||||
* @hideconstructor
|
||||
*
|
||||
* @hifi-interface
|
||||
* @hifi-client-entity
|
||||
|
@ -112,32 +76,46 @@ public:
|
|||
// QmlWrapper methods.
|
||||
|
||||
/**jsdoc
|
||||
* Sets the value of a toolbar property. A property is added to the toolbar if the named property doesn't already
|
||||
* exist.
|
||||
* @function ToolbarProxy#writeProperty
|
||||
* @parm {string} propertyName
|
||||
* @param {object} propertyValue
|
||||
* @parm {string} propertyName - The name of the property. Toolbar properties are those in the QML implementation of the
|
||||
* toolbar.
|
||||
* @param {object} propertyValue - The value of the property.
|
||||
*/
|
||||
|
||||
/**jsdoc
|
||||
* Sets the values of toolbar properties. A property is added to the toolbar if a named property doesn't already
|
||||
* exist.
|
||||
* @function ToolbarProxy#writeProperties
|
||||
* @param {object} properties
|
||||
* @param {object} properties - The names and values of the properties to set. Toolbar properties are those in the QML
|
||||
* implementation of the toolbar.
|
||||
*/
|
||||
|
||||
/**jsdoc
|
||||
* Gets the value of a toolbar property.
|
||||
* @function ToolbarProxy#readProperty
|
||||
* @param {string} propertyName
|
||||
* @returns {object}
|
||||
* @param {string} propertyName - The property name. Toolbar properties are those in the QML implementation of the toolbar.
|
||||
* @returns {object} The value of the property if the property name is valid, otherwise <code>undefined</code>.
|
||||
*/
|
||||
|
||||
/**jsdoc
|
||||
* Gets the values of toolbar properties.
|
||||
* @function ToolbarProxy#readProperties
|
||||
* @param {string[]} propertyList
|
||||
* @returns {object}
|
||||
* @param {string[]} propertyList - The names of the properties to get the values of. Toolbar properties are those in the
|
||||
* QML implementation of the toolbar.
|
||||
* @returns {object} The names and values of the specified properties. If the toolbar doesn't have a particular property
|
||||
* then the result doesn't include that property.
|
||||
*/
|
||||
};
|
||||
|
||||
Q_DECLARE_METATYPE(ToolbarProxy*);
|
||||
|
||||
/**jsdoc
|
||||
* The <code>Toolbars</code> API provides facilities to work with the system or other toolbar.
|
||||
*
|
||||
* <p>See also the {@link Tablet} API for use of the system tablet and toolbar in desktop and HMD modes.</p>
|
||||
*
|
||||
* @namespace Toolbars
|
||||
*
|
||||
* @hifi-interface
|
||||
|
@ -149,13 +127,33 @@ class ToolbarScriptingInterface : public QObject, public Dependency {
|
|||
public:
|
||||
|
||||
/**jsdoc
|
||||
* Gets an instance of a toolbar. A new toolbar is created if one with the specified name doesn't already exist.
|
||||
* @function Toolbars.getToolbar
|
||||
* @param {string} toolbarID
|
||||
* @returns {ToolbarProxy}
|
||||
* @param {string} name - A unique name that identifies the toolbar.
|
||||
* @returns {ToolbarProxy} The toolbar instance.
|
||||
*/
|
||||
Q_INVOKABLE ToolbarProxy* getToolbar(const QString& toolbarId);
|
||||
|
||||
signals:
|
||||
/**jsdoc
|
||||
* Triggered when the visibility of a toolbar changes.
|
||||
* @function Toolbars.toolbarVisibleChanged
|
||||
* @param {boolean} isVisible - <code>true</code> if the toolbar is visible, <code>false</code> if it is hidden.
|
||||
* @param {string} toolbarName - The name of the toolbar.
|
||||
* @returns {Signal}
|
||||
* @example <caption>Briefly hide the system toolbar.</caption>
|
||||
* Toolbars.toolbarVisibleChanged.connect(function(visible, name) {
|
||||
* print("Toolbar " + name + " visible changed to " + visible);
|
||||
* });
|
||||
*
|
||||
* var toolbar = Toolbars.getToolbar("com.highfidelity.interface.toolbar.system");
|
||||
* if (toolbar) {
|
||||
* toolbar.writeProperty("visible", false);
|
||||
* Script.setTimeout(function () {
|
||||
* toolbar.writeProperty("visible", true);
|
||||
* }, 2000);
|
||||
* }
|
||||
*/
|
||||
void toolbarVisibleChanged(bool isVisible, QString toolbarName);
|
||||
};
|
||||
|
||||
|
|
|
@ -26,10 +26,9 @@ exports.handlers = {
|
|||
'../../assignment-client/src/octree',
|
||||
'../../interface/src',
|
||||
'../../interface/src/assets',
|
||||
'../../interface/src/audio',
|
||||
//'../../interface/src/audio', Exlude AudioScope API from output.
|
||||
'../../interface/src/avatar',
|
||||
'../../interface/src/commerce',
|
||||
'../../interface/src/devices',
|
||||
'../../interface/src/java',
|
||||
'../../interface/src/networking',
|
||||
'../../interface/src/raypick',
|
||||
|
@ -64,7 +63,6 @@ exports.handlers = {
|
|||
'../../libraries/shared/src',
|
||||
'../../libraries/shared/src/shared',
|
||||
'../../libraries/task/src/task',
|
||||
'../../libraries/trackers/src/trackers',
|
||||
'../../libraries/ui/src',
|
||||
'../../libraries/ui/src/ui',
|
||||
'../../plugins/oculus/src',
|
||||
|
|
Loading…
Reference in a new issue