mirror of
https://github.com/overte-org/overte.git
synced 2025-04-20 03:44:02 +02:00
Merge pull request #2636 from ey6es/faceplus
Basic Faceplus integration, cleanup for handling the various face trackers.
This commit is contained in:
commit
de6b55644b
18 changed files with 495 additions and 97 deletions
4
.gitignore
vendored
4
.gitignore
vendored
|
@ -42,5 +42,9 @@ interface/external/visage/*
|
|||
interface/resources/visage/*
|
||||
!interface/resources/visage/tracker.cfg
|
||||
|
||||
# Ignore Faceplus
|
||||
interface/external/faceplus/*
|
||||
!interface/external/faceplus/readme.txt
|
||||
|
||||
# Ignore interfaceCache for Linux users
|
||||
interface/interfaceCache/
|
||||
|
|
42
cmake/modules/FindFaceplus.cmake
Normal file
42
cmake/modules/FindFaceplus.cmake
Normal file
|
@ -0,0 +1,42 @@
|
|||
# Try to find the Faceplus library
|
||||
#
|
||||
# You must provide a FACEPLUS_ROOT_DIR which contains lib and include directories
|
||||
#
|
||||
# Once done this will define
|
||||
#
|
||||
# FACEPLUS_FOUND - system found Faceplus
|
||||
# FACEPLUS_INCLUDE_DIRS - the Faceplus include directory
|
||||
# FACEPLUS_LIBRARIES - Link this to use Faceplus
|
||||
#
|
||||
# Created on 4/8/2014 by Andrzej Kapolka
|
||||
# Copyright (c) 2014 High Fidelity
|
||||
#
|
||||
|
||||
if (FACEPLUS_LIBRARIES AND FACEPLUS_INCLUDE_DIRS)
|
||||
# in cache already
|
||||
set(FACEPLUS_FOUND TRUE)
|
||||
else (FACEPLUS_LIBRARIES AND FACEPLUS_INCLUDE_DIRS)
|
||||
find_path(FACEPLUS_INCLUDE_DIRS faceplus.h ${FACEPLUS_ROOT_DIR}/include)
|
||||
|
||||
if (WIN32)
|
||||
find_library(FACEPLUS_LIBRARIES faceplus.lib ${FACEPLUS_ROOT_DIR}/win32/)
|
||||
endif (WIN32)
|
||||
|
||||
if (FACEPLUS_INCLUDE_DIRS AND FACEPLUS_LIBRARIES)
|
||||
set(FACEPLUS_FOUND TRUE)
|
||||
endif (FACEPLUS_INCLUDE_DIRS AND FACEPLUS_LIBRARIES)
|
||||
|
||||
if (FACEPLUS_FOUND)
|
||||
if (NOT FACEPLUS_FIND_QUIETLY)
|
||||
message(STATUS "Found Faceplus... ${FACEPLUS_LIBRARIES}")
|
||||
endif (NOT FACEPLUS_FIND_QUIETLY)
|
||||
else ()
|
||||
if (FACEPLUS_FIND_REQUIRED)
|
||||
message(FATAL_ERROR "Could not find Faceplus")
|
||||
endif (FACEPLUS_FIND_REQUIRED)
|
||||
endif ()
|
||||
|
||||
# show the FACEPLUS_INCLUDE_DIRS and FACEPLUS_LIBRARIES variables only in the advanced view
|
||||
mark_as_advanced(FACEPLUS_INCLUDE_DIRS FACEPLUS_LIBRARIES)
|
||||
|
||||
endif (FACEPLUS_LIBRARIES AND FACEPLUS_INCLUDE_DIRS)
|
|
@ -12,6 +12,7 @@ project(${TARGET_NAME})
|
|||
|
||||
# setup for find modules
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/../cmake/modules/")
|
||||
set(FACEPLUS_ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/external/faceplus")
|
||||
set(FACESHIFT_ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/external/faceshift")
|
||||
set(LIBOVR_ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/external/oculus")
|
||||
set(SIXENSE_ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/external/Sixense")
|
||||
|
@ -130,6 +131,7 @@ link_hifi_library(audio ${TARGET_NAME} "${ROOT_DIR}")
|
|||
link_hifi_library(script-engine ${TARGET_NAME} "${ROOT_DIR}")
|
||||
|
||||
# find any optional libraries
|
||||
find_package(Faceplus)
|
||||
find_package(Faceshift)
|
||||
find_package(LibOVR)
|
||||
find_package(Sixense)
|
||||
|
@ -163,6 +165,13 @@ if (VISAGE_FOUND AND NOT DISABLE_VISAGE)
|
|||
target_link_libraries(${TARGET_NAME} "${VISAGE_LIBRARIES}")
|
||||
endif (VISAGE_FOUND AND NOT DISABLE_VISAGE)
|
||||
|
||||
# and with Faceplus library, also for webcam feature tracking
|
||||
if (FACEPLUS_FOUND AND NOT DISABLE_FACEPLUS)
|
||||
add_definitions(-DHAVE_FACEPLUS)
|
||||
include_directories(SYSTEM "${FACEPLUS_INCLUDE_DIRS}")
|
||||
target_link_libraries(${TARGET_NAME} "${FACEPLUS_LIBRARIES}")
|
||||
endif (FACEPLUS_FOUND AND NOT DISABLE_FACEPLUS)
|
||||
|
||||
# and with LibOVR for Oculus Rift
|
||||
if (LIBOVR_FOUND AND NOT DISABLE_LIBOVR)
|
||||
add_definitions(-DHAVE_LIBOVR)
|
||||
|
|
11
interface/external/faceplus/readme.txt
vendored
Normal file
11
interface/external/faceplus/readme.txt
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
|
||||
Instructions for adding the Faceplus driver to Interface
|
||||
Andrzej Kapolka, April 8, 2014
|
||||
|
||||
1. Copy the Faceplus sdk folders (include, win32) into the interface/external/faceplus folder.
|
||||
This readme.txt should be there as well.
|
||||
|
||||
2. Copy the Faceplus DLLs from the win32 folder into your path.
|
||||
|
||||
3. Delete your build directory, run cmake and build, and you should be all set.
|
||||
|
|
@ -1341,6 +1341,12 @@ glm::vec3 Application::getMouseVoxelWorldCoordinates(const VoxelDetail& mouseVox
|
|||
(mouseVoxel.z + mouseVoxel.s / 2.f) * TREE_SCALE);
|
||||
}
|
||||
|
||||
FaceTracker* Application::getActiveFaceTracker() {
|
||||
return _faceshift.isActive() ? static_cast<FaceTracker*>(&_faceshift) :
|
||||
(_faceplus.isActive() ? static_cast<FaceTracker*>(&_faceplus) :
|
||||
(_visage.isActive() ? static_cast<FaceTracker*>(&_visage) : NULL));
|
||||
}
|
||||
|
||||
struct SendVoxelsOperationArgs {
|
||||
const unsigned char* newBaseOctCode;
|
||||
};
|
||||
|
@ -1560,8 +1566,9 @@ void Application::init() {
|
|||
}
|
||||
qDebug("Loaded settings");
|
||||
|
||||
// initialize Visage and Faceshift after loading the menu settings
|
||||
// initialize our face trackers after loading the menu settings
|
||||
_faceshift.init();
|
||||
_faceplus.init();
|
||||
_visage.init();
|
||||
|
||||
// fire off an immediate domain-server check in now that settings are loaded
|
||||
|
@ -1725,19 +1732,11 @@ void Application::updateMyAvatarLookAtPosition() {
|
|||
glm::distance(_mouseRayOrigin, _myAvatar->getHead()->calculateAverageEyePosition()));
|
||||
lookAtSpot = _mouseRayOrigin + _mouseRayDirection * qMax(minEyeDistance, distance);
|
||||
}
|
||||
bool trackerActive = false;
|
||||
float eyePitch, eyeYaw;
|
||||
if (_faceshift.isActive()) {
|
||||
eyePitch = _faceshift.getEstimatedEyePitch();
|
||||
eyeYaw = _faceshift.getEstimatedEyeYaw();
|
||||
trackerActive = true;
|
||||
|
||||
} else if (_visage.isActive()) {
|
||||
eyePitch = _visage.getEstimatedEyePitch();
|
||||
eyeYaw = _visage.getEstimatedEyeYaw();
|
||||
trackerActive = true;
|
||||
}
|
||||
if (trackerActive) {
|
||||
FaceTracker* tracker = getActiveFaceTracker();
|
||||
if (tracker) {
|
||||
float eyePitch = tracker->getEstimatedEyePitch();
|
||||
float eyeYaw = tracker->getEstimatedEyeYaw();
|
||||
|
||||
// deflect using Faceshift gaze data
|
||||
glm::vec3 origin = _myAvatar->getHead()->calculateAverageEyePosition();
|
||||
float pitchSign = (_myCamera.getMode() == CAMERA_MODE_MIRROR) ? -1.0f : 1.0f;
|
||||
|
@ -1823,15 +1822,15 @@ void Application::updateCamera(float deltaTime) {
|
|||
bool showWarnings = Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings);
|
||||
PerformanceWarning warn(showWarnings, "Application::updateCamera()");
|
||||
|
||||
if (!OculusManager::isConnected() && !TV3DManager::isConnected()) {
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::OffAxisProjection)) {
|
||||
float xSign = _myCamera.getMode() == CAMERA_MODE_MIRROR ? 1.0f : -1.0f;
|
||||
if (_faceshift.isActive()) {
|
||||
const float EYE_OFFSET_SCALE = 0.025f;
|
||||
glm::vec3 position = _faceshift.getHeadTranslation() * EYE_OFFSET_SCALE;
|
||||
_myCamera.setEyeOffsetPosition(glm::vec3(position.x * xSign, position.y, -position.z));
|
||||
updateProjectionMatrix();
|
||||
}
|
||||
if (!OculusManager::isConnected() && !TV3DManager::isConnected() &&
|
||||
Menu::getInstance()->isOptionChecked(MenuOption::OffAxisProjection)) {
|
||||
FaceTracker* tracker = getActiveFaceTracker();
|
||||
if (tracker) {
|
||||
const float EYE_OFFSET_SCALE = 0.025f;
|
||||
glm::vec3 position = tracker->getHeadTranslation() * EYE_OFFSET_SCALE;
|
||||
float xSign = (_myCamera.getMode() == CAMERA_MODE_MIRROR) ? 1.0f : -1.0f;
|
||||
_myCamera.setEyeOffsetPosition(glm::vec3(position.x * xSign, position.y, -position.z));
|
||||
updateProjectionMatrix();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,6 +52,7 @@
|
|||
#include "avatar/Avatar.h"
|
||||
#include "avatar/AvatarManager.h"
|
||||
#include "avatar/MyAvatar.h"
|
||||
#include "devices/Faceplus.h"
|
||||
#include "devices/Faceshift.h"
|
||||
#include "devices/SixenseManager.h"
|
||||
#include "devices/Visage.h"
|
||||
|
@ -176,8 +177,10 @@ public:
|
|||
bool isMouseHidden() const { return _mouseHidden; }
|
||||
const glm::vec3& getMouseRayOrigin() const { return _mouseRayOrigin; }
|
||||
const glm::vec3& getMouseRayDirection() const { return _mouseRayDirection; }
|
||||
Faceplus* getFaceplus() { return &_faceplus; }
|
||||
Faceshift* getFaceshift() { return &_faceshift; }
|
||||
Visage* getVisage() { return &_visage; }
|
||||
FaceTracker* getActiveFaceTracker();
|
||||
SixenseManager* getSixenseManager() { return &_sixenseManager; }
|
||||
BandwidthMeter* getBandwidthMeter() { return &_bandwidthMeter; }
|
||||
QUndoStack* getUndoStack() { return &_undoStack; }
|
||||
|
@ -316,6 +319,7 @@ private:
|
|||
// Various helper functions called during update()
|
||||
void updateLOD();
|
||||
void updateMouseRay();
|
||||
void updateFaceplus();
|
||||
void updateFaceshift();
|
||||
void updateVisage();
|
||||
void updateMyAvatarLookAtPosition();
|
||||
|
@ -414,9 +418,10 @@ private:
|
|||
AvatarManager _avatarManager;
|
||||
MyAvatar* _myAvatar; // TODO: move this and relevant code to AvatarManager (or MyAvatar as the case may be)
|
||||
|
||||
Faceplus _faceplus;
|
||||
Faceshift _faceshift;
|
||||
Visage _visage;
|
||||
|
||||
|
||||
SixenseManager _sixenseManager;
|
||||
|
||||
Camera _myCamera; // My view onto the world
|
||||
|
|
|
@ -303,6 +303,11 @@ Menu::Menu() :
|
|||
true,
|
||||
appInstance->getFaceshift(),
|
||||
SLOT(setTCPEnabled(bool)));
|
||||
#ifdef HAVE_FACEPLUS
|
||||
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::Faceplus, 0, true,
|
||||
appInstance->getFaceplus(), SLOT(updateEnabled()));
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_VISAGE
|
||||
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::Visage, 0, true,
|
||||
appInstance->getVisage(), SLOT(updateEnabled()));
|
||||
|
|
|
@ -253,6 +253,7 @@ namespace MenuOption {
|
|||
const QString EchoLocalAudio = "Echo Local Audio";
|
||||
const QString EchoServerAudio = "Echo Server Audio";
|
||||
const QString Enable3DTVMode = "Enable 3DTV Mode";
|
||||
const QString Faceplus = "Faceplus";
|
||||
const QString Faceshift = "Faceshift";
|
||||
const QString FilterSixense = "Smooth Sixense Movement";
|
||||
const QString FirstPerson = "First Person";
|
||||
|
|
|
@ -64,17 +64,11 @@ void Head::reset() {
|
|||
|
||||
void Head::simulate(float deltaTime, bool isMine, bool billboard) {
|
||||
// Update audio trailing average for rendering facial animations
|
||||
Faceshift* faceshift = Application::getInstance()->getFaceshift();
|
||||
Visage* visage = Application::getInstance()->getVisage();
|
||||
if (isMine) {
|
||||
_isFaceshiftConnected = false;
|
||||
if (faceshift->isActive()) {
|
||||
_blendshapeCoefficients = faceshift->getBlendshapeCoefficients();
|
||||
_isFaceshiftConnected = true;
|
||||
|
||||
} else if (visage->isActive()) {
|
||||
_blendshapeCoefficients = visage->getBlendshapeCoefficients();
|
||||
_isFaceshiftConnected = true;
|
||||
FaceTracker* faceTracker = Application::getInstance()->getActiveFaceTracker();
|
||||
if ((_isFaceshiftConnected = faceTracker)) {
|
||||
_blendshapeCoefficients = faceTracker->getBlendshapeCoefficients();
|
||||
_isFaceshiftConnected = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -156,8 +150,9 @@ void Head::simulate(float deltaTime, bool isMine, bool billboard) {
|
|||
const float BROW_LIFT_SCALE = 500.0f;
|
||||
const float JAW_OPEN_SCALE = 0.01f;
|
||||
const float JAW_OPEN_DEAD_ZONE = 0.75f;
|
||||
faceshift->updateFakeCoefficients(_leftEyeBlink, _rightEyeBlink, min(1.0f, _browAudioLift * BROW_LIFT_SCALE),
|
||||
glm::clamp(sqrt(_averageLoudness * JAW_OPEN_SCALE) - JAW_OPEN_DEAD_ZONE, 0.0f, 1.0f), _blendshapeCoefficients);
|
||||
Application::getInstance()->getFaceshift()->updateFakeCoefficients(_leftEyeBlink, _rightEyeBlink,
|
||||
min(1.0f, _browAudioLift * BROW_LIFT_SCALE), glm::clamp(sqrt(_averageLoudness * JAW_OPEN_SCALE) -
|
||||
JAW_OPEN_DEAD_ZONE, 0.0f, 1.0f), _blendshapeCoefficients);
|
||||
}
|
||||
|
||||
if (!isMine) {
|
||||
|
|
|
@ -352,24 +352,13 @@ void MyAvatar::simulate(float deltaTime) {
|
|||
|
||||
// Update avatar head rotation with sensor data
|
||||
void MyAvatar::updateFromGyros(float deltaTime) {
|
||||
Faceshift* faceshift = Application::getInstance()->getFaceshift();
|
||||
Visage* visage = Application::getInstance()->getVisage();
|
||||
glm::vec3 estimatedPosition, estimatedRotation;
|
||||
|
||||
bool trackerActive = false;
|
||||
if (faceshift->isActive()) {
|
||||
estimatedPosition = faceshift->getHeadTranslation();
|
||||
estimatedRotation = glm::degrees(safeEulerAngles(faceshift->getHeadRotation()));
|
||||
trackerActive = true;
|
||||
|
||||
} else if (visage->isActive()) {
|
||||
estimatedPosition = visage->getHeadTranslation();
|
||||
estimatedRotation = glm::degrees(safeEulerAngles(visage->getHeadRotation()));
|
||||
trackerActive = true;
|
||||
}
|
||||
|
||||
Head* head = getHead();
|
||||
if (trackerActive) {
|
||||
FaceTracker* tracker = Application::getInstance()->getActiveFaceTracker();
|
||||
if (tracker) {
|
||||
estimatedPosition = tracker->getHeadTranslation();
|
||||
estimatedRotation = glm::degrees(safeEulerAngles(tracker->getHeadRotation()));
|
||||
|
||||
// Rotate the body if the head is turned beyond the screen
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::TurnWithHead)) {
|
||||
const float TRACKER_YAW_TURN_SENSITIVITY = 0.5f;
|
||||
|
@ -384,13 +373,14 @@ void MyAvatar::updateFromGyros(float deltaTime) {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set the rotation of the avatar's head (as seen by others, not affecting view frustum)
|
||||
// to be scaled. Pitch is greater to emphasize nodding behavior / synchrony.
|
||||
const float AVATAR_HEAD_PITCH_MAGNIFY = 1.0f;
|
||||
const float AVATAR_HEAD_YAW_MAGNIFY = 1.0f;
|
||||
const float AVATAR_HEAD_ROLL_MAGNIFY = 1.0f;
|
||||
Head* head = getHead();
|
||||
head->setDeltaPitch(estimatedRotation.x * AVATAR_HEAD_PITCH_MAGNIFY);
|
||||
head->setDeltaYaw(estimatedRotation.y * AVATAR_HEAD_YAW_MAGNIFY);
|
||||
head->setDeltaRoll(estimatedRotation.z * AVATAR_HEAD_ROLL_MAGNIFY);
|
||||
|
|
17
interface/src/devices/FaceTracker.cpp
Normal file
17
interface/src/devices/FaceTracker.cpp
Normal file
|
@ -0,0 +1,17 @@
|
|||
//
|
||||
// FaceTracker.cpp
|
||||
// interface/src/devices
|
||||
//
|
||||
// Created by Andrzej Kapolka on 4/9/14.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "FaceTracker.h"
|
||||
|
||||
FaceTracker::FaceTracker() :
|
||||
_estimatedEyePitch(0.0f),
|
||||
_estimatedEyeYaw(0.0f) {
|
||||
}
|
46
interface/src/devices/FaceTracker.h
Normal file
46
interface/src/devices/FaceTracker.h
Normal file
|
@ -0,0 +1,46 @@
|
|||
//
|
||||
// FaceTracker.h
|
||||
// interface/src/devices
|
||||
//
|
||||
// Created by Andrzej Kapolka on 4/9/14.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_FaceTracker_h
|
||||
#define hifi_FaceTracker_h
|
||||
|
||||
#include <QObject>
|
||||
#include <QVector>
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
#include <glm/gtc/quaternion.hpp>
|
||||
|
||||
/// Base class for face trackers (Faceshift, Visage, Faceplus).
|
||||
class FaceTracker : public QObject {
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
|
||||
FaceTracker();
|
||||
|
||||
const glm::vec3& getHeadTranslation() const { return _headTranslation; }
|
||||
const glm::quat& getHeadRotation() const { return _headRotation; }
|
||||
|
||||
float getEstimatedEyePitch() const { return _estimatedEyePitch; }
|
||||
float getEstimatedEyeYaw() const { return _estimatedEyeYaw; }
|
||||
|
||||
const QVector<float>& getBlendshapeCoefficients() const { return _blendshapeCoefficients; }
|
||||
|
||||
protected:
|
||||
|
||||
glm::vec3 _headTranslation;
|
||||
glm::quat _headRotation;
|
||||
float _estimatedEyePitch;
|
||||
float _estimatedEyeYaw;
|
||||
QVector<float> _blendshapeCoefficients;
|
||||
};
|
||||
|
||||
#endif // hifi_FaceTracker_h
|
230
interface/src/devices/Faceplus.cpp
Normal file
230
interface/src/devices/Faceplus.cpp
Normal file
|
@ -0,0 +1,230 @@
|
|||
//
|
||||
// Faceplus.cpp
|
||||
// interface/src/devices
|
||||
//
|
||||
// Created by Andrzej Kapolka on 4/9/14.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include <QThread>
|
||||
|
||||
#ifdef HAVE_FACEPLUS
|
||||
#include <faceplus.h>
|
||||
#endif
|
||||
|
||||
#include "Application.h"
|
||||
#include "Faceplus.h"
|
||||
#include "renderer/FBXReader.h"
|
||||
|
||||
static int floatVectorMetaTypeId = qRegisterMetaType<QVector<float> >();
|
||||
|
||||
Faceplus::Faceplus() :
|
||||
_enabled(false),
|
||||
_active(false) {
|
||||
|
||||
#ifdef HAVE_FACEPLUS
|
||||
// these are ignored--any values will do
|
||||
faceplus_log_in("username", "password");
|
||||
#endif
|
||||
}
|
||||
|
||||
Faceplus::~Faceplus() {
|
||||
setEnabled(false);
|
||||
}
|
||||
|
||||
void Faceplus::init() {
|
||||
connect(Application::getInstance()->getFaceshift(), SIGNAL(connectionStateChanged()), SLOT(updateEnabled()));
|
||||
updateEnabled();
|
||||
}
|
||||
|
||||
void Faceplus::setState(const glm::quat& headRotation, float estimatedEyePitch, float estimatedEyeYaw,
|
||||
const QVector<float>& blendshapeCoefficients) {
|
||||
_headRotation = headRotation;
|
||||
_estimatedEyePitch = estimatedEyePitch;
|
||||
_estimatedEyeYaw = estimatedEyeYaw;
|
||||
_blendshapeCoefficients = blendshapeCoefficients;
|
||||
_active = true;
|
||||
}
|
||||
|
||||
void Faceplus::updateEnabled() {
|
||||
setEnabled(Menu::getInstance()->isOptionChecked(MenuOption::Faceplus) &&
|
||||
!(Menu::getInstance()->isOptionChecked(MenuOption::Faceshift) &&
|
||||
Application::getInstance()->getFaceshift()->isConnectedOrConnecting()));
|
||||
}
|
||||
|
||||
void Faceplus::setEnabled(bool enabled) {
|
||||
if (_enabled == enabled) {
|
||||
return;
|
||||
}
|
||||
if ((_enabled = enabled)) {
|
||||
_reader = new FaceplusReader();
|
||||
QThread* readerThread = new QThread(this);
|
||||
_reader->moveToThread(readerThread);
|
||||
readerThread->start();
|
||||
QMetaObject::invokeMethod(_reader, "init");
|
||||
|
||||
} else {
|
||||
QThread* readerThread = _reader->thread();
|
||||
QMetaObject::invokeMethod(_reader, "shutdown");
|
||||
readerThread->wait();
|
||||
delete readerThread;
|
||||
_active = false;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef HAVE_FACEPLUS
|
||||
static QMultiHash<QByteArray, QPair<int, float> > createChannelNameMap() {
|
||||
QMultiHash<QByteArray, QPair<QByteArray, float> > blendshapeMap;
|
||||
blendshapeMap.insert("EyeBlink_L", QPair<QByteArray, float>("Mix::Blink_Left", 1.0f));
|
||||
blendshapeMap.insert("EyeBlink_R", QPair<QByteArray, float>("Mix::Blink_Right", 1.0f));
|
||||
blendshapeMap.insert("BrowsD_L", QPair<QByteArray, float>("Mix::BrowsDown_Left", 1.0f));
|
||||
blendshapeMap.insert("BrowsD_R", QPair<QByteArray, float>("Mix::BrowsDown_Right", 1.0f));
|
||||
blendshapeMap.insert("...", QPair<QByteArray, float>("Mix::BrowsIn_Left", 1.0f));
|
||||
blendshapeMap.insert("...", QPair<QByteArray, float>("Mix::BrowsIn_Right", 1.0f));
|
||||
blendshapeMap.insert("...", QPair<QByteArray, float>("Mix::BrowsOuterLower_Left", 1.0f));
|
||||
blendshapeMap.insert("...", QPair<QByteArray, float>("Mix::BrowsOuterLower_Right", 1.0f));
|
||||
blendshapeMap.insert("BrowsU_L", QPair<QByteArray, float>("Mix::BrowsUp_Left", 10.0f));
|
||||
blendshapeMap.insert("BrowsU_R", QPair<QByteArray, float>("Mix::BrowsUp_Right", 10.0f));
|
||||
blendshapeMap.insert("EyeOpen_L", QPair<QByteArray, float>("Mix::EyesWide_Left", 1.0f));
|
||||
blendshapeMap.insert("EyeOpen_R", QPair<QByteArray, float>("Mix::EyesWide_Right", 1.0f));
|
||||
blendshapeMap.insert("MouthFrown_L", QPair<QByteArray, float>("Mix::Frown_Left", 1.0f));
|
||||
blendshapeMap.insert("MouthFrown_R", QPair<QByteArray, float>("Mix::Frown_Right", 1.0f));
|
||||
blendshapeMap.insert("JawLeft", QPair<QByteArray, float>("Mix::Jaw_RotateY_Left", 1.0f));
|
||||
blendshapeMap.insert("JawRight", QPair<QByteArray, float>("Mix::Jaw_RotateY_Right", 1.0f));
|
||||
blendshapeMap.insert("LipsLowerDown", QPair<QByteArray, float>("Mix::LowerLipDown_Left", 0.5f));
|
||||
blendshapeMap.insert("LipsLowerDown", QPair<QByteArray, float>("Mix::LowerLipDown_Right", 0.5f));
|
||||
blendshapeMap.insert("...", QPair<QByteArray, float>("Mix::LowerLipIn", 1.0f));
|
||||
blendshapeMap.insert("...", QPair<QByteArray, float>("Mix::LowerLipOut", 1.0f));
|
||||
blendshapeMap.insert("MouthLeft", QPair<QByteArray, float>("Mix::Midmouth_Left", 1.0f));
|
||||
blendshapeMap.insert("MouthRight", QPair<QByteArray, float>("Mix::Midmouth_Right", 1.0f));
|
||||
blendshapeMap.insert("...", QPair<QByteArray, float>("Mix::MouthDown", 1.0f));
|
||||
blendshapeMap.insert("...", QPair<QByteArray, float>("Mix::MouthNarrow_Left", 1.0f));
|
||||
blendshapeMap.insert("...", QPair<QByteArray, float>("Mix::MouthNarrow_Right", 1.0f));
|
||||
blendshapeMap.insert("JawOpen", QPair<QByteArray, float>("Mix::MouthOpen", 1.0f));
|
||||
blendshapeMap.insert("...", QPair<QByteArray, float>("Mix::MouthUp", 1.0f));
|
||||
blendshapeMap.insert("LipsPucker", QPair<QByteArray, float>("Mix::MouthWhistle_NarrowAdjust_Left", 0.5f));
|
||||
blendshapeMap.insert("LipsPucker", QPair<QByteArray, float>("Mix::MouthWhistle_NarrowAdjust_Right", 0.5f));
|
||||
blendshapeMap.insert("Sneer", QPair<QByteArray, float>("Mix::NoseScrunch_Left", 0.5f));
|
||||
blendshapeMap.insert("Sneer", QPair<QByteArray, float>("Mix::NoseScrunch_Right", 0.5f));
|
||||
blendshapeMap.insert("MouthSmile_L", QPair<QByteArray, float>("Mix::Smile_Left", 1.0f));
|
||||
blendshapeMap.insert("MouthSmile_R", QPair<QByteArray, float>("Mix::Smile_Right", 1.0f));
|
||||
blendshapeMap.insert("EyeSquint_L", QPair<QByteArray, float>("Mix::Squint_Left", 1.0f));
|
||||
blendshapeMap.insert("EyeSquint_R", QPair<QByteArray, float>("Mix::Squint_Right", 1.0f));
|
||||
blendshapeMap.insert("...", QPair<QByteArray, float>("Mix::UpperLipIn", 1.0f));
|
||||
blendshapeMap.insert("...", QPair<QByteArray, float>("Mix::UpperLipOut", 1.0f));
|
||||
blendshapeMap.insert("LipsUpperUp", QPair<QByteArray, float>("Mix::UpperLipUp_Left", 0.5f));
|
||||
blendshapeMap.insert("LipsUpperUp", QPair<QByteArray, float>("Mix::UpperLipUp_Right", 0.5f));
|
||||
|
||||
QMultiHash<QByteArray, QPair<int, float> > channelNameMap;
|
||||
for (int i = 0;; i++) {
|
||||
QByteArray blendshape = FACESHIFT_BLENDSHAPES[i];
|
||||
if (blendshape.isEmpty()) {
|
||||
break;
|
||||
}
|
||||
for (QMultiHash<QByteArray, QPair<QByteArray, float> >::const_iterator it = blendshapeMap.constFind(blendshape);
|
||||
it != blendshapeMap.constEnd() && it.key() == blendshape; it++) {
|
||||
channelNameMap.insert(it.value().first, QPair<int, float>(i, it.value().second));
|
||||
}
|
||||
}
|
||||
|
||||
return channelNameMap;
|
||||
}
|
||||
|
||||
static const QMultiHash<QByteArray, QPair<int, float> >& getChannelNameMap() {
|
||||
static QMultiHash<QByteArray, QPair<int, float> > channelNameMap = createChannelNameMap();
|
||||
return channelNameMap;
|
||||
}
|
||||
#endif
|
||||
|
||||
FaceplusReader::~FaceplusReader() {
|
||||
#ifdef HAVE_FACEPLUS
|
||||
if (faceplus_teardown()) {
|
||||
qDebug() << "Faceplus torn down.";
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void FaceplusReader::init() {
|
||||
#ifdef HAVE_FACEPLUS
|
||||
if (!faceplus_init("VGA")) {
|
||||
qDebug() << "Failed to initialized Faceplus.";
|
||||
return;
|
||||
}
|
||||
qDebug() << "Faceplus initialized.";
|
||||
|
||||
int channelCount = faceplus_output_channels_count();
|
||||
_outputVector.resize(channelCount);
|
||||
|
||||
int maxIndex = -1;
|
||||
_channelIndexMap.clear();
|
||||
for (int i = 0; i < channelCount; i++) {
|
||||
QByteArray name = faceplus_output_channel_name(i);
|
||||
if (name == "Head_Joint::Rotation_X") {
|
||||
_headRotationIndices[0] = i;
|
||||
|
||||
} else if (name == "Head_Joint::Rotation_Y") {
|
||||
_headRotationIndices[1] = i;
|
||||
|
||||
} else if (name == "Head_Joint::Rotation_Z") {
|
||||
_headRotationIndices[2] = i;
|
||||
|
||||
} else if (name == "Left_Eye_Joint::Rotation_X") {
|
||||
_leftEyeRotationIndices[0] = i;
|
||||
|
||||
} else if (name == "Left_Eye_Joint::Rotation_Y") {
|
||||
_leftEyeRotationIndices[1] = i;
|
||||
|
||||
} else if (name == "Right_Eye_Joint::Rotation_X") {
|
||||
_rightEyeRotationIndices[0] = i;
|
||||
|
||||
} else if (name == "Right_Eye_Joint::Rotation_Y") {
|
||||
_rightEyeRotationIndices[1] = i;
|
||||
}
|
||||
for (QMultiHash<QByteArray, QPair<int, float> >::const_iterator it = getChannelNameMap().constFind(name);
|
||||
it != getChannelNameMap().constEnd() && it.key() == name; it++) {
|
||||
_channelIndexMap.insert(i, it.value());
|
||||
maxIndex = qMax(maxIndex, it.value().first);
|
||||
}
|
||||
}
|
||||
_blendshapeCoefficients.resize(maxIndex + 1);
|
||||
|
||||
QMetaObject::invokeMethod(this, "update", Qt::QueuedConnection);
|
||||
#endif
|
||||
}
|
||||
|
||||
void FaceplusReader::shutdown() {
|
||||
deleteLater();
|
||||
thread()->quit();
|
||||
}
|
||||
|
||||
void FaceplusReader::update() {
|
||||
#ifdef HAVE_FACEPLUS
|
||||
if (!(faceplus_synchronous_track() && faceplus_current_output_vector(_outputVector.data()))) {
|
||||
QMetaObject::invokeMethod(this, "update", Qt::QueuedConnection);
|
||||
return;
|
||||
}
|
||||
glm::quat headRotation(glm::radians(glm::vec3(-_outputVector.at(_headRotationIndices[0]),
|
||||
_outputVector.at(_headRotationIndices[1]), -_outputVector.at(_headRotationIndices[2]))));
|
||||
float estimatedEyePitch = (_outputVector.at(_leftEyeRotationIndices[0]) +
|
||||
_outputVector.at(_rightEyeRotationIndices[0])) * -0.5f;
|
||||
float estimatedEyeYaw = (_outputVector.at(_leftEyeRotationIndices[1]) +
|
||||
_outputVector.at(_rightEyeRotationIndices[1])) * 0.5f;
|
||||
|
||||
qFill(_blendshapeCoefficients.begin(), _blendshapeCoefficients.end(), 0.0f);
|
||||
for (int i = 0; i < _outputVector.size(); i++) {
|
||||
for (QMultiHash<int, QPair<int, float> >::const_iterator it = _channelIndexMap.constFind(i);
|
||||
it != _channelIndexMap.constEnd() && it.key() == i; it++) {
|
||||
_blendshapeCoefficients[it.value().first] += _outputVector.at(i) * it.value().second;
|
||||
}
|
||||
}
|
||||
|
||||
QMetaObject::invokeMethod(Application::getInstance()->getFaceplus(), "setState", Q_ARG(const glm::quat&, headRotation),
|
||||
Q_ARG(float, estimatedEyePitch), Q_ARG(float, estimatedEyeYaw), Q_ARG(const QVector<float>&, _blendshapeCoefficients));
|
||||
|
||||
QMetaObject::invokeMethod(this, "update", Qt::QueuedConnection);
|
||||
#endif
|
||||
}
|
||||
|
79
interface/src/devices/Faceplus.h
Normal file
79
interface/src/devices/Faceplus.h
Normal file
|
@ -0,0 +1,79 @@
|
|||
//
|
||||
// Faceplus.h
|
||||
// interface/src/devices
|
||||
//
|
||||
// Created by Andrzej Kapolka on 4/9/14.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_Faceplus_h
|
||||
#define hifi_Faceplus_h
|
||||
|
||||
#include <QMultiHash>
|
||||
#include <QPair>
|
||||
#include <QVector>
|
||||
|
||||
#include "FaceTracker.h"
|
||||
|
||||
class FaceplusReader;
|
||||
|
||||
/// Interface for Mixamo FacePlus.
|
||||
class Faceplus : public FaceTracker {
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
|
||||
Faceplus();
|
||||
virtual ~Faceplus();
|
||||
|
||||
void init();
|
||||
|
||||
bool isActive() const { return _active; }
|
||||
|
||||
Q_INVOKABLE void setState(const glm::quat& headRotation, float estimatedEyePitch, float estimatedEyeYaw,
|
||||
const QVector<float>& blendshapeCoefficients);
|
||||
|
||||
public slots:
|
||||
|
||||
void updateEnabled();
|
||||
|
||||
private:
|
||||
|
||||
void setEnabled(bool enabled);
|
||||
|
||||
bool _enabled;
|
||||
bool _active;
|
||||
|
||||
FaceplusReader* _reader;
|
||||
};
|
||||
|
||||
Q_DECLARE_METATYPE(QVector<float>)
|
||||
|
||||
/// The reader object that lives in its own thread.
|
||||
class FaceplusReader : public QObject {
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
|
||||
virtual ~FaceplusReader();
|
||||
|
||||
Q_INVOKABLE void init();
|
||||
Q_INVOKABLE void shutdown();
|
||||
Q_INVOKABLE void update();
|
||||
|
||||
private:
|
||||
|
||||
#ifdef HAVE_FACEPLUS
|
||||
QMultiHash<int, QPair<int, float> > _channelIndexMap;
|
||||
QVector<float> _outputVector;
|
||||
int _headRotationIndices[3];
|
||||
int _leftEyeRotationIndices[2];
|
||||
int _rightEyeRotationIndices[2];
|
||||
QVector<float> _blendshapeCoefficients;
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif // hifi_Faceplus_h
|
|
@ -45,9 +45,7 @@ Faceshift::Faceshift() :
|
|||
_jawOpenIndex(21),
|
||||
_longTermAverageEyePitch(0.0f),
|
||||
_longTermAverageEyeYaw(0.0f),
|
||||
_longTermAverageInitialized(false),
|
||||
_estimatedEyePitch(0.0f),
|
||||
_estimatedEyeYaw(0.0f)
|
||||
_longTermAverageInitialized(false)
|
||||
{
|
||||
connect(&_tcpSocket, SIGNAL(connected()), SLOT(noteConnected()));
|
||||
connect(&_tcpSocket, SIGNAL(error(QAbstractSocket::SocketError)), SLOT(noteError(QAbstractSocket::SocketError)));
|
||||
|
|
|
@ -15,13 +15,12 @@
|
|||
#include <QTcpSocket>
|
||||
#include <QUdpSocket>
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
#include <glm/gtc/quaternion.hpp>
|
||||
|
||||
#include <fsbinarystream.h>
|
||||
|
||||
#include "FaceTracker.h"
|
||||
|
||||
/// Handles interaction with the Faceshift software, which provides head position/orientation and facial features.
|
||||
class Faceshift : public QObject {
|
||||
class Faceshift : public FaceTracker {
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
|
@ -34,9 +33,7 @@ public:
|
|||
|
||||
bool isActive() const;
|
||||
|
||||
const glm::quat& getHeadRotation() const { return _headRotation; }
|
||||
const glm::vec3& getHeadAngularVelocity() const { return _headAngularVelocity; }
|
||||
const glm::vec3& getHeadTranslation() const { return _headTranslation; }
|
||||
|
||||
// these pitch/yaw angles are in degrees
|
||||
float getEyeGazeLeftPitch() const { return _eyeGazeLeftPitch; }
|
||||
|
@ -45,11 +42,6 @@ public:
|
|||
float getEyeGazeRightPitch() const { return _eyeGazeRightPitch; }
|
||||
float getEyeGazeRightYaw() const { return _eyeGazeRightYaw; }
|
||||
|
||||
float getEstimatedEyePitch() const { return _estimatedEyePitch; }
|
||||
float getEstimatedEyeYaw() const { return _estimatedEyeYaw; }
|
||||
|
||||
const QVector<float>& getBlendshapeCoefficients() const { return _blendshapeCoefficients; }
|
||||
|
||||
float getLeftBlink() const { return getBlendshapeCoefficient(_leftBlinkIndex); }
|
||||
float getRightBlink() const { return getBlendshapeCoefficient(_rightBlinkIndex); }
|
||||
float getLeftEyeOpen() const { return getBlendshapeCoefficient(_leftEyeOpenIndex); }
|
||||
|
@ -102,9 +94,7 @@ private:
|
|||
bool _tracking;
|
||||
quint64 _lastTrackingStateReceived;
|
||||
|
||||
glm::quat _headRotation;
|
||||
glm::vec3 _headAngularVelocity;
|
||||
glm::vec3 _headTranslation;
|
||||
|
||||
// degrees
|
||||
float _eyeGazeLeftPitch;
|
||||
|
@ -112,8 +102,6 @@ private:
|
|||
float _eyeGazeRightPitch;
|
||||
float _eyeGazeRightYaw;
|
||||
|
||||
QVector<float> _blendshapeCoefficients;
|
||||
|
||||
int _leftBlinkIndex;
|
||||
int _rightBlinkIndex;
|
||||
int _leftEyeOpenIndex;
|
||||
|
@ -135,10 +123,6 @@ private:
|
|||
float _longTermAverageEyePitch;
|
||||
float _longTermAverageEyeYaw;
|
||||
bool _longTermAverageInitialized;
|
||||
|
||||
// degrees
|
||||
float _estimatedEyePitch;
|
||||
float _estimatedEyeYaw;
|
||||
};
|
||||
|
||||
#endif // hifi_Faceshift_h
|
||||
|
|
|
@ -37,9 +37,7 @@ const glm::vec3 DEFAULT_HEAD_ORIGIN(0.0f, 0.0f, 0.7f);
|
|||
Visage::Visage() :
|
||||
_enabled(false),
|
||||
_active(false),
|
||||
_headOrigin(DEFAULT_HEAD_ORIGIN),
|
||||
_estimatedEyePitch(0.0f),
|
||||
_estimatedEyeYaw(0.0f) {
|
||||
_headOrigin(DEFAULT_HEAD_ORIGIN) {
|
||||
|
||||
#ifdef HAVE_VISAGE
|
||||
QByteArray licensePath = Application::resourcesPath().toLatin1() + "visage/license.vlc";
|
||||
|
@ -164,6 +162,7 @@ void Visage::reset() {
|
|||
|
||||
void Visage::updateEnabled() {
|
||||
setEnabled(Menu::getInstance()->isOptionChecked(MenuOption::Visage) &&
|
||||
!Menu::getInstance()->isOptionChecked(MenuOption::Faceplus) &&
|
||||
!(Menu::getInstance()->isOptionChecked(MenuOption::Faceshift) &&
|
||||
Application::getInstance()->getFaceshift()->isConnectedOrConnecting()));
|
||||
}
|
||||
|
|
|
@ -16,8 +16,7 @@
|
|||
#include <QPair>
|
||||
#include <QVector>
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
#include <glm/gtc/quaternion.hpp>
|
||||
#include "FaceTracker.h"
|
||||
|
||||
namespace VisageSDK {
|
||||
class VisageTracker2;
|
||||
|
@ -25,7 +24,7 @@ namespace VisageSDK {
|
|||
}
|
||||
|
||||
/// Handles input from the Visage webcam feature tracking software.
|
||||
class Visage : public QObject {
|
||||
class Visage : public FaceTracker {
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
|
@ -37,14 +36,6 @@ public:
|
|||
|
||||
bool isActive() const { return _active; }
|
||||
|
||||
const glm::quat& getHeadRotation() const { return _headRotation; }
|
||||
const glm::vec3& getHeadTranslation() const { return _headTranslation; }
|
||||
|
||||
float getEstimatedEyePitch() const { return _estimatedEyePitch; }
|
||||
float getEstimatedEyeYaw() const { return _estimatedEyeYaw; }
|
||||
|
||||
const QVector<float>& getBlendshapeCoefficients() const { return _blendshapeCoefficients; }
|
||||
|
||||
void update();
|
||||
void reset();
|
||||
|
||||
|
@ -64,15 +55,8 @@ private:
|
|||
|
||||
bool _enabled;
|
||||
bool _active;
|
||||
glm::quat _headRotation;
|
||||
glm::vec3 _headTranslation;
|
||||
|
||||
|
||||
glm::vec3 _headOrigin;
|
||||
|
||||
float _estimatedEyePitch;
|
||||
float _estimatedEyeYaw;
|
||||
|
||||
QVector<float> _blendshapeCoefficients;
|
||||
};
|
||||
|
||||
#endif // hifi_Visage_h
|
||||
|
|
Loading…
Reference in a new issue