diff --git a/examples/sit.js b/examples/sit.js index c157d4854d..0f4b199855 100644 --- a/examples/sit.js +++ b/examples/sit.js @@ -255,13 +255,24 @@ function update(deltaTime){ } frame++; } + + var locationChanged = false; + if (location.hostname != oldHost) { + print("Changed domain"); + for (model in models) { + removeIndicators(models[model]); + } + oldHost = location.hostname; + locationChanged = true; + } - if (MyAvatar.position.x != avatarOldPosition.x && - MyAvatar.position.y != avatarOldPosition.y && - MyAvatar.position.z != avatarOldPosition.z) { + if (MyAvatar.position.x != avatarOldPosition.x || + MyAvatar.position.y != avatarOldPosition.y || + MyAvatar.position.z != avatarOldPosition.z || + locationChanged) { avatarOldPosition = MyAvatar.position; - var SEARCH_RADIUS = 10; + var SEARCH_RADIUS = 50; var foundModels = Models.findModels(MyAvatar.position, SEARCH_RADIUS); // Let's remove indicator that got out of radius for (model in models) { @@ -274,7 +285,10 @@ function update(deltaTime){ for (var i = 0; i < foundModels.length; ++i) { var model = foundModels[i]; if (typeof(models[model.id]) == "undefined") { - addIndicators(model); + model.properties = Models.getModelProperties(model); + if (Vec3.distance(model.properties.position, MyAvatar.position) < SEARCH_RADIUS) { + addIndicators(model); + } } } @@ -283,9 +297,9 @@ function update(deltaTime){ } } } +var oldHost = location.hostname; function addIndicators(modelID) { - modelID.properties = Models.getModelProperties(modelID); if (modelID.properties.sittingPoints.length > 0) { for (var i = 0; i < modelID.properties.sittingPoints.length; ++i) { modelID.properties.sittingPoints[i].indicator = new SeatIndicator(modelID.properties, i); diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index a5565506ca..9b54b84351 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -1494,9 +1494,10 @@ glm::vec3 Application::getMouseVoxelWorldCoordinates(const VoxelDetail& mouseVox } FaceTracker* Application::getActiveFaceTracker() { - return _faceshift.isActive() ? static_cast(&_faceshift) : + return _cara.isActive() ? static_cast(&_cara) : + (_faceshift.isActive() ? static_cast(&_faceshift) : (_faceplus.isActive() ? static_cast(&_faceplus) : - (_visage.isActive() ? static_cast(&_visage) : NULL)); + (_visage.isActive() ? static_cast(&_visage) : NULL))); } struct SendVoxelsOperationArgs { @@ -1878,6 +1879,19 @@ void Application::updateVisage() { _visage.update(); } +void Application::updateCara() { + bool showWarnings = Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings); + PerformanceWarning warn(showWarnings, "Application::updateCara()"); + + // Update Cara + _cara.update(); + + // Copy angular velocity if measured by cara, to the head + if (_cara.isActive()) { + _myAvatar->getHead()->setAngularVelocity(_cara.getHeadAngularVelocity()); + } +} + void Application::updateMyAvatarLookAtPosition() { PerformanceTimer perfTimer("lookAt"); bool showWarnings = Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings); diff --git a/interface/src/Application.h b/interface/src/Application.h index e20e4e90d3..a356b26725 100644 --- a/interface/src/Application.h +++ b/interface/src/Application.h @@ -63,6 +63,7 @@ #include "devices/PrioVR.h" #include "devices/SixenseManager.h" #include "devices/Visage.h" +#include "devices/CaraFaceTracker.h" #include "models/ModelTreeRenderer.h" #include "particles/ParticleTreeRenderer.h" #include "renderer/AmbientOcclusionEffect.h" @@ -211,6 +212,7 @@ public: Faceplus* getFaceplus() { return &_faceplus; } Faceshift* getFaceshift() { return &_faceshift; } Visage* getVisage() { return &_visage; } + CaraFaceTracker* getCara() { return &_cara; } FaceTracker* getActiveFaceTracker(); SixenseManager* getSixenseManager() { return &_sixenseManager; } PrioVR* getPrioVR() { return &_prioVR; } @@ -382,6 +384,7 @@ private: void updateFaceplus(); void updateFaceshift(); void updateVisage(); + void updateCara(); void updateMyAvatarLookAtPosition(); void updateThreads(float deltaTime); void updateMetavoxels(float deltaTime); @@ -478,6 +481,7 @@ private: Faceplus _faceplus; Faceshift _faceshift; Visage _visage; + CaraFaceTracker _cara; SixenseManager _sixenseManager; PrioVR _prioVR; diff --git a/interface/src/avatar/SkeletonModel.cpp b/interface/src/avatar/SkeletonModel.cpp index 3caaad1391..dc6a309e70 100644 --- a/interface/src/avatar/SkeletonModel.cpp +++ b/interface/src/avatar/SkeletonModel.cpp @@ -52,7 +52,8 @@ const float PALM_PRIORITY = 3.0f; void SkeletonModel::simulate(float deltaTime, bool fullUpdate) { setTranslation(_owningAvatar->getPosition()); - setRotation(_owningAvatar->getOrientation() * glm::angleAxis(PI, glm::vec3(0.0f, 1.0f, 0.0f))); + static const glm::quat refOrientation = glm::angleAxis(PI, glm::vec3(0.0f, 1.0f, 0.0f)); + setRotation(_owningAvatar->getOrientation() * refOrientation); const float MODEL_SCALE = 0.0006f; setScale(glm::vec3(1.0f, 1.0f, 1.0f) * _owningAvatar->getScale() * MODEL_SCALE); diff --git a/interface/src/devices/CaraFaceTracker.cpp b/interface/src/devices/CaraFaceTracker.cpp new file mode 100644 index 0000000000..9c67163dca --- /dev/null +++ b/interface/src/devices/CaraFaceTracker.cpp @@ -0,0 +1,459 @@ +// +// CaraFaceTracker.cpp +// interface/src/devices +// +// Created by Li Zuwei on 7/22/14. +// Copyright 2014 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#include "CaraFaceTracker.h" +#include + +//qt +#include +#include +#include +#include + +#define PI M_PI +#define RADTODEG(x) ( (x) * 180.0 / PI ) +#define DEGTORAD(x) ( (x) * PI / 180.0 ) + +static const QHostAddress CARA_FEATURE_POINT_SERVER_ADDR("127.0.0.1"); +static const quint16 CARA_FEATURE_POINT_SERVER_PORT = 36555; +static QString sampleJson = "[{\"id\":1, \ + \"face\":{\"x\":248,\"y\":64,\"width\":278,\"height\":341}, \ + \"pose\":{\"roll\":2.62934,\"pitch\":-12.2318,\"yaw\":0.936743}, \ + \"feature_points\":[314,194,326,187,340,187,354,189,367,193,409,190,421,187,435,184,448,183,459,188, \ + 388,207,389,223,390,240,391,257,377,266,384,267,392,268,399,266,407,264,331,209, \ + 341,204,354,204,364,209,353,214,341,214,410,208,420,201,433,200,443,205,434,211, \ + 421,211,362,294,372,290,383,287,393,289,404,286,415,289,426,291,418,300,407,306, \ + 394,308,382,307,371,302,383,295,394,295,404,294,404,295,393,297,383,296], \ + \"classifiers\":{\"emotion\":{\"smi\":-0.368829,\"sur\":-1.33334,\"neg\":0.00235828,\"att\":1},\"blink\":1}}]"; + +static const glm::vec3 DEFAULT_HEAD_ORIGIN(0.0f, 0.0f, 0.0f); +static const float TRANSLATION_SCALE = 1.0f; +static const int NUM_BLENDSHAPE_COEFF = 30; + +struct CaraPerson { + struct CaraPose { + float roll, pitch, yaw; + CaraPose() : + roll(0.0f), + pitch(0.0f), + yaw(0.0f) + { + } + }; + + struct CaraEmotion { + float smile, surprise, negative, attention; + CaraEmotion(): + smile(0.0f), + surprise(0.0f), + negative(0.0f), + attention(0.0f) + { + } + }; + + enum CaraBlink { + BLINK_NOT_AVAILABLE, + NO_BLINK, + BLINK + }; + + CaraPerson() : + id(-1), + blink(BLINK_NOT_AVAILABLE) + { + } + + int id; + CaraPose pose; + CaraEmotion emotion; + CaraBlink blink; + + QString toString() { + QString s = QString("id: %1, roll: %2, pitch: %3, yaw: %4, smi: %5, sur: %6, neg: %7, att: %8, blink: %9"). + arg(id). + arg(pose.roll). + arg(pose.pitch). + arg(pose.yaw). + arg(emotion.smile). + arg(emotion.surprise). + arg(emotion.negative). + arg(emotion.attention). + arg(blink); + return s; + } +}; + +class CaraPacketDecoder { +public: + static CaraPerson extractOne(const QByteArray& buffer, QJsonParseError* jsonError) { + CaraPerson person; + QJsonDocument dom = QJsonDocument::fromJson(buffer, jsonError); + + //check for errors + if(jsonError->error == QJsonParseError::NoError) { + //read the dom structure and populate the blend shapes and head poses + //qDebug() << "[Info] Cara Face Tracker Packet Parsing Successful!"; + + //begin extracting the packet + if(dom.isArray()) { + QJsonArray people = dom.array(); + //extract the first person in the array + if(people.size() > 0) { + QJsonValue val = people.at(0); + if(val.isObject()) { + QJsonObject personDOM = val.toObject(); + person.id = extractId(personDOM); + person.pose = extractPose(personDOM); + + //extract the classifier outputs + QJsonObject::const_iterator it = personDOM.constFind("classifiers"); + if(it != personDOM.constEnd()) { + QJsonObject classifierDOM = (*it).toObject(); + person.emotion = extractEmotion(classifierDOM); + person.blink = extractBlink(classifierDOM); + } + } + } + } + } + + return person; + } + +private: + static int extractId(const QJsonObject& person) { + int id = -1; + QJsonObject::const_iterator it = person.constFind("id"); + if(it != person.constEnd()) { + id = (*it).toInt(-1); + } + return id; + } + + static CaraPerson::CaraPose extractPose(const QJsonObject& person) { + CaraPerson::CaraPose pose; + QJsonObject::const_iterator it = person.constFind("pose"); + if(it != person.constEnd()) { + QJsonObject poseDOM = (*it).toObject(); + + //look for the roll, pitch, yaw; + QJsonObject::const_iterator poseIt = poseDOM.constFind("roll"); + QJsonObject::const_iterator poseEnd = poseDOM.constEnd(); + if(poseIt != poseEnd) { + pose.roll = (float)(*poseIt).toDouble(0.0); + } + poseIt = poseDOM.constFind("pitch"); + if(poseIt != poseEnd) { + pose.pitch = (float)(*poseIt).toDouble(0.0); + } + poseIt = poseDOM.constFind("yaw"); + if(poseIt != poseEnd) { + pose.yaw = (float)(*poseIt).toDouble(0.0); + } + } + return pose; + } + + static CaraPerson::CaraEmotion extractEmotion(const QJsonObject& classifiers) { + CaraPerson::CaraEmotion emotion; + QJsonObject::const_iterator it = classifiers.constFind("emotion"); + if(it != classifiers.constEnd()) { + QJsonObject emotionDOM = (*it).toObject(); + + //look for smile, surprise, negative, attention responses + QJsonObject::const_iterator emoEnd = emotionDOM.constEnd(); + QJsonObject::const_iterator emoIt = emotionDOM.constFind("smi"); + if(emoIt != emoEnd) { + emotion.smile = (float)(*emoIt).toDouble(0.0); + } + emoIt = emotionDOM.constFind("sur"); + if(emoIt != emoEnd) { + emotion.surprise = (float)(*emoIt).toDouble(0.0); + } + emoIt = emotionDOM.constFind("neg"); + if(emoIt != emoEnd) { + emotion.negative = (float)(*emoIt).toDouble(0.0); + } + emoIt = emotionDOM.constFind("att"); + if(emoIt != emoEnd) { + emotion.attention = (float)(*emoIt).toDouble(0.0); + } + } + return emotion; + } + + static CaraPerson::CaraBlink extractBlink(const QJsonObject& classifiers) { + CaraPerson::CaraBlink blink = CaraPerson::BLINK_NOT_AVAILABLE; + QJsonObject::const_iterator it = classifiers.constFind("blink"); + if(it != classifiers.constEnd()) { + int b = (*it).toInt(CaraPerson::BLINK_NOT_AVAILABLE); + switch(b) { + case CaraPerson::BLINK_NOT_AVAILABLE: + blink = CaraPerson::BLINK_NOT_AVAILABLE; + break; + case CaraPerson::NO_BLINK: + blink = CaraPerson::NO_BLINK; + break; + case CaraPerson::BLINK: + blink = CaraPerson::BLINK; + break; + default: + blink = CaraPerson::BLINK_NOT_AVAILABLE; + break; + } + } + return blink; + } +}; + +CaraFaceTracker::CaraFaceTracker() : + _lastReceiveTimestamp(0), + _previousPitch(0.0f), + _previousYaw(0.0f), + _previousRoll(0.0f), + _eyeGazeLeftPitch(0.0f), + _eyeGazeLeftYaw(0.0f), + _eyeGazeRightPitch(0.0f), + _eyeGazeRightYaw(0), + _leftBlinkIndex(0), + _rightBlinkIndex(1), + _leftEyeOpenIndex(8), + _rightEyeOpenIndex(9), + _browDownLeftIndex(14), + _browDownRightIndex(15), + _browUpCenterIndex(16), + _browUpLeftIndex(17), + _browUpRightIndex(18), + _mouthSmileLeftIndex(28), + _mouthSmileRightIndex(29), + _jawOpenIndex(21) +{ + connect(&_udpSocket, SIGNAL(readyRead()), SLOT(readPendingDatagrams())); + connect(&_udpSocket, SIGNAL(error(QAbstractSocket::SocketError)), SLOT(socketErrorOccurred(QAbstractSocket::SocketError))); + connect(&_udpSocket, SIGNAL(stateChanged(QAbstractSocket::SocketState)), SLOT(socketStateChanged(QAbstractSocket::SocketState))); + + bindTo(CARA_FEATURE_POINT_SERVER_PORT); + + _headTranslation = DEFAULT_HEAD_ORIGIN; + _blendshapeCoefficients.resize(NUM_BLENDSHAPE_COEFF); + _blendshapeCoefficients.fill(0.0f); + + //qDebug() << sampleJson; +} + +CaraFaceTracker::CaraFaceTracker(const QHostAddress& host, quint16 port) : + _lastReceiveTimestamp(0), + _previousPitch(0.0f), + _previousYaw(0.0f), + _previousRoll(0.0f), + _eyeGazeLeftPitch(0.0f), + _eyeGazeLeftYaw(0.0f), + _eyeGazeRightPitch(0.0f), + _eyeGazeRightYaw(0.0f), + _leftBlinkIndex(0), + _rightBlinkIndex(1), + _leftEyeOpenIndex(8), + _rightEyeOpenIndex(9), + _browDownLeftIndex(14), + _browDownRightIndex(15), + _browUpCenterIndex(16), + _browUpLeftIndex(17), + _browUpRightIndex(18), + _mouthSmileLeftIndex(28), + _mouthSmileRightIndex(29), + _jawOpenIndex(21) +{ + connect(&_udpSocket, SIGNAL(readyRead()), SLOT(readPendingDatagrams())); + connect(&_udpSocket, SIGNAL(error(QAbstractSocket::SocketError)), SLOT(socketErrorOccurred(QAbstractSocket::SocketError))); + connect(&_udpSocket, SIGNAL(stateChanged(QAbstractSocket::SocketState)), SIGNAL(socketStateChanged(QAbstractSocket::SocketState))); + + bindTo(host, port); + + _headTranslation = DEFAULT_HEAD_ORIGIN * TRANSLATION_SCALE; + _blendshapeCoefficients.resize(NUM_BLENDSHAPE_COEFF); //set the size of the blendshape coefficients + _blendshapeCoefficients.fill(0.0f); +} + +CaraFaceTracker::~CaraFaceTracker() { + if(_udpSocket.isOpen()) + _udpSocket.close(); +} + +void CaraFaceTracker::init() { + +} + +void CaraFaceTracker::reset() { + +} + +void CaraFaceTracker::bindTo(quint16 port) { + bindTo(QHostAddress::Any, port); +} + +void CaraFaceTracker::bindTo(const QHostAddress& host, quint16 port) { + if(_udpSocket.isOpen()) { + _udpSocket.close(); + } + _udpSocket.bind(host, port); +} + +bool CaraFaceTracker::isActive() const { + static const int ACTIVE_TIMEOUT_USECS = 3000000; //3 secs + return (usecTimestampNow() - _lastReceiveTimestamp < ACTIVE_TIMEOUT_USECS); +} + +void CaraFaceTracker::update() { + // get the euler angles relative to the window + glm::vec3 eulers = glm::degrees(safeEulerAngles(_headRotation * glm::quat(glm::radians(glm::vec3( + (_eyeGazeLeftPitch + _eyeGazeRightPitch) / 2.0f, (_eyeGazeLeftYaw + _eyeGazeRightYaw) / 2.0f, 0.0f))))); + + //TODO: integrate when cara has eye gaze estimation + + _estimatedEyePitch = eulers.x; + _estimatedEyeYaw = eulers.y; +} + +//private slots and methods +void CaraFaceTracker::socketErrorOccurred(QAbstractSocket::SocketError socketError) { + qDebug() << "[Error] Cara Face Tracker Socket Error: " << _udpSocket.errorString(); +} + +void CaraFaceTracker::socketStateChanged(QAbstractSocket::SocketState socketState) { + QString state; + switch(socketState) { + case QAbstractSocket::BoundState: + state = "Bounded"; + break; + case QAbstractSocket::ClosingState: + state = "Closing"; + break; + case QAbstractSocket::ConnectedState: + state = "Connected"; + break; + case QAbstractSocket::ConnectingState: + state = "Connecting"; + break; + case QAbstractSocket::HostLookupState: + state = "Host Lookup"; + break; + case QAbstractSocket::ListeningState: + state = "Listening"; + break; + case QAbstractSocket::UnconnectedState: + state = "Unconnected"; + break; + } + qDebug() << "[Info] Cara Face Tracker Socket: " << socketState; +} + +void CaraFaceTracker::readPendingDatagrams() { + QByteArray buffer; + while (_udpSocket.hasPendingDatagrams()) { + buffer.resize(_udpSocket.pendingDatagramSize()); + _udpSocket.readDatagram(buffer.data(), buffer.size()); + decodePacket(buffer); + } +} + +void CaraFaceTracker::decodePacket(const QByteArray& buffer) { + //decode the incoming udp packet + QJsonParseError jsonError; + CaraPerson person = CaraPacketDecoder::extractOne(buffer, &jsonError); + + if(jsonError.error == QJsonParseError::NoError) { + //do some noise filtering to the head poses + //reduce the noise first by truncating to 1 dp + person.pose.roll = glm::floor(person.pose.roll * 10) / 10; + person.pose.pitch = glm::floor(person.pose.pitch * 10) / 10; + person.pose.yaw = glm::floor(person.pose.yaw * 10) / 10; + + //qDebug() << person.toString(); + + glm::quat newRotation(glm::vec3(DEGTORAD(person.pose.pitch), DEGTORAD(person.pose.yaw), DEGTORAD(person.pose.roll))); + + // Compute angular velocity of the head + glm::quat r = newRotation * glm::inverse(_headRotation); + float theta = 2 * acos(r.w); + if (theta > EPSILON) { + float rMag = glm::length(glm::vec3(r.x, r.y, r.z)); + const float AVERAGE_CARA_FRAME_TIME = 0.033f; + const float ANGULAR_VELOCITY_MIN = 1.2f; + const float YAW_STANDARD_DEV_DEG = 2.5f; + + _headAngularVelocity = theta / AVERAGE_CARA_FRAME_TIME * glm::vec3(r.x, r.y, r.z) / rMag; + + //use the angular velocity for roll and pitch, if it's below the threshold don't move + if(glm::abs(_headAngularVelocity.x) < ANGULAR_VELOCITY_MIN) { + person.pose.pitch = _previousPitch; + } + + if(glm::abs(_headAngularVelocity.z) < ANGULAR_VELOCITY_MIN) { + person.pose.roll = _previousRoll; + } + + //for yaw, the jitter is great, you can't use angular velocity because it swings too much + //use the previous and current yaw, calculate the + //abs difference and move it the difference is above the standard deviation which is around 2.5 + // (this will introduce some jerks but will not encounter lag) + + // < the standard deviation 2.5 deg, no move + if(glm::abs(person.pose.yaw - _previousYaw) < YAW_STANDARD_DEV_DEG) { + //qDebug() << "Yaw Diff: " << glm::abs(person.pose.yaw - _previousYaw); + person.pose.yaw = _previousYaw; + } + + //update the previous angles + _previousPitch = person.pose.pitch; + _previousYaw = person.pose.yaw; + _previousRoll = person.pose.roll; + + //set the new rotation + newRotation = glm::quat(glm::vec3(DEGTORAD(person.pose.pitch), DEGTORAD(person.pose.yaw), DEGTORAD(-person.pose.roll))); + } + else { + //no change in position + newRotation = glm::quat(glm::vec3(DEGTORAD(_previousPitch), DEGTORAD(_previousYaw), DEGTORAD(-_previousRoll))); + _headAngularVelocity = glm::vec3(0,0,0); + } + + //update to new rotation angles + _headRotation = newRotation; + + //TODO: head translation, right now is 0 + + //Do Blendshapes, clip between 0.0f to 1.0f, neg should be ignored + _blendshapeCoefficients[_leftBlinkIndex] = person.blink == CaraPerson::BLINK ? 1.0f : 0.0f; + _blendshapeCoefficients[_rightBlinkIndex] = person.blink == CaraPerson::BLINK ? 1.0f : 0.0f; + + //anger and surprised are mutually exclusive so we could try use this fact to determine + //whether to down the brows or up the brows + _blendshapeCoefficients[_browDownLeftIndex] = person.emotion.negative < 0.0f ? 0.0f : person.emotion.negative; + _blendshapeCoefficients[_browDownRightIndex] = person.emotion.negative < 0.0f ? 0.0f : person.emotion.negative; + _blendshapeCoefficients[_browUpCenterIndex] = person.emotion.surprise < 0.0f ? 0.0f : person.emotion.surprise; + _blendshapeCoefficients[_browUpLeftIndex] = person.emotion.surprise < 0.0f ? 0.0f : person.emotion.surprise; + _blendshapeCoefficients[_browUpRightIndex] = person.emotion.surprise < 0.0f ? 0.0f : person.emotion.surprise; + _blendshapeCoefficients[_jawOpenIndex] = person.emotion.surprise < 0.0f ? 0.0f : person.emotion.surprise; + _blendshapeCoefficients[_mouthSmileLeftIndex] = person.emotion.smile < 0.0f ? 0.0f : person.emotion.smile; + _blendshapeCoefficients[_mouthSmileRightIndex] = person.emotion.smile < 0.0f ? 0.0f : person.emotion.smile; + } + else { + qDebug() << "[Error] Cara Face Tracker Decode Error: " << jsonError.errorString(); + } + + _lastReceiveTimestamp = usecTimestampNow(); +} + +float CaraFaceTracker::getBlendshapeCoefficient(int index) const { + return (index >= 0 && index < (int)_blendshapeCoefficients.size()) ? _blendshapeCoefficients[index] : 0.0f; +} + diff --git a/interface/src/devices/CaraFaceTracker.h b/interface/src/devices/CaraFaceTracker.h new file mode 100644 index 0000000000..f51fed0f1b --- /dev/null +++ b/interface/src/devices/CaraFaceTracker.h @@ -0,0 +1,123 @@ +// +// CaraFaceTracker.h +// interface/src/devices +// +// Created by Li Zuwei on 7/22/14. +// Copyright 2013 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#ifndef hi_fi_CaraFaceTracker_h +#define hi_fi_CaraFaceTracker_h + +#include + +#include "FaceTracker.h" + +/*! + * \class CaraFaceTracker + * + * \brief Handles interaction with the Cara software, + * which provides head position/orientation and facial features. + * \details By default, opens a udp socket with IPV4_ANY_ADDR with port 36555. + * User needs to run the Cara Face Detection UDP Client with the destination + * host address (eg: 127.0.0.1 for localhost) and destination port 36555. +**/ + +class CaraFaceTracker : public FaceTracker { + Q_OBJECT + +public: + CaraFaceTracker(); + CaraFaceTracker(const QHostAddress& host, quint16 port); + ~CaraFaceTracker(); + + //initialization + void init(); + void reset(); + + //sockets + void bindTo(quint16 port); + void bindTo(const QHostAddress& host, quint16 port); + bool isActive() const; + + //tracking + void update(); + + //head angular velocity + const glm::vec3& getHeadAngularVelocity() const { return _headAngularVelocity; } + + //eye gaze + float getEyeGazeLeftPitch() const { return _eyeGazeLeftPitch; } + float getEyeGazeLeftYaw() const { return _eyeGazeLeftYaw; } + + float getEyeGazeRightPitch() const { return _eyeGazeRightPitch; } + float getEyeGazeRightYaw() const { return _eyeGazeRightYaw; } + + //blend shapes + float getLeftBlink() const { return getBlendshapeCoefficient(_leftBlinkIndex); } + float getRightBlink() const { return getBlendshapeCoefficient(_rightBlinkIndex); } + float getLeftEyeOpen() const { return getBlendshapeCoefficient(_leftEyeOpenIndex); } + float getRightEyeOpen() const { return getBlendshapeCoefficient(_rightEyeOpenIndex); } + + float getBrowDownLeft() const { return getBlendshapeCoefficient(_browDownLeftIndex); } + float getBrowDownRight() const { return getBlendshapeCoefficient(_browDownRightIndex); } + float getBrowUpCenter() const { return getBlendshapeCoefficient(_browUpCenterIndex); } + float getBrowUpLeft() const { return getBlendshapeCoefficient(_browUpLeftIndex); } + float getBrowUpRight() const { return getBlendshapeCoefficient(_browUpRightIndex); } + + float getMouthSize() const { return getBlendshapeCoefficient(_jawOpenIndex); } + float getMouthSmileLeft() const { return getBlendshapeCoefficient(_mouthSmileLeftIndex); } + float getMouthSmileRight() const { return getBlendshapeCoefficient(_mouthSmileRightIndex); } + +private slots: + + //sockets + void socketErrorOccurred(QAbstractSocket::SocketError socketError); + void readPendingDatagrams(); + void socketStateChanged(QAbstractSocket::SocketState socketState); + +private: + void decodePacket(const QByteArray& buffer); + float getBlendshapeCoefficient(int index) const; + + // sockets + QUdpSocket _udpSocket; + quint64 _lastReceiveTimestamp; + + //head tracking + glm::vec3 _headAngularVelocity; + + //pose history + float _previousPitch; + float _previousYaw; + float _previousRoll; + + // eye gaze degrees + float _eyeGazeLeftPitch; + float _eyeGazeLeftYaw; + float _eyeGazeRightPitch; + float _eyeGazeRightYaw; + + //blend shapes + int _leftBlinkIndex; + int _rightBlinkIndex; + int _leftEyeOpenIndex; + int _rightEyeOpenIndex; + + // Brows + int _browDownLeftIndex; + int _browDownRightIndex; + int _browUpCenterIndex; + int _browUpLeftIndex; + int _browUpRightIndex; + + int _mouthSmileLeftIndex; + int _mouthSmileRightIndex; + + int _jawOpenIndex; +}; + +#endif //endif hi_fi_CaraFaceTracker_h \ No newline at end of file diff --git a/interface/src/renderer/JointState.cpp b/interface/src/renderer/JointState.cpp index 2a4372401e..316dfeb9ca 100644 --- a/interface/src/renderer/JointState.cpp +++ b/interface/src/renderer/JointState.cpp @@ -11,14 +11,17 @@ #include +#include + #include -//#include #include #include "JointState.h" JointState::JointState() : _animationPriority(0.0f), + _transformChanged(true), + _rotationIsValid(false), _positionInParentFrame(0.0f), _distanceToParent(0.0f), _fbxJoint(NULL), @@ -26,7 +29,9 @@ JointState::JointState() : } JointState::JointState(const JointState& other) : _constraint(NULL) { + _transformChanged = other._transformChanged; _transform = other._transform; + _rotationIsValid = other._rotationIsValid; _rotation = other._rotation; _rotationInConstrainedFrame = other._rotationInConstrainedFrame; _positionInParentFrame = other._positionInParentFrame; @@ -45,9 +50,21 @@ JointState::~JointState() { } } +glm::quat JointState::getRotation() const { + if (!_rotationIsValid) { + const_cast(this)->_rotation = extractRotation(_transform); + const_cast(this)->_rotationIsValid = true; + } + + return _rotation; +} + void JointState::setFBXJoint(const FBXJoint* joint) { assert(joint != NULL); _rotationInConstrainedFrame = joint->rotation; + _transformChanged = true; + _rotationIsValid = false; + // NOTE: JointState does not own the FBXJoint to which it points. _fbxJoint = joint; if (_constraint) { @@ -70,8 +87,10 @@ void JointState::updateConstraint() { void JointState::copyState(const JointState& state) { _animationPriority = state._animationPriority; + _transformChanged = state._transformChanged; _transform = state._transform; - _rotation = extractRotation(_transform); + _rotationIsValid = state._rotationIsValid; + _rotation = state._rotation; _rotationInConstrainedFrame = state._rotationInConstrainedFrame; _positionInParentFrame = state._positionInParentFrame; _distanceToParent = state._distanceToParent; @@ -88,11 +107,20 @@ void JointState::initTransform(const glm::mat4& parentTransform) { _distanceToParent = glm::length(_positionInParentFrame); } -void JointState::computeTransform(const glm::mat4& parentTransform) { +void JointState::computeTransform(const glm::mat4& parentTransform, bool parentTransformChanged, bool synchronousRotationCompute) { + if (!parentTransformChanged && !_transformChanged) { + return; + } + glm::quat rotationInParentFrame = _fbxJoint->preRotation * _rotationInConstrainedFrame * _fbxJoint->postRotation; glm::mat4 transformInParentFrame = _fbxJoint->preTransform * glm::mat4_cast(rotationInParentFrame) * _fbxJoint->postTransform; - _transform = parentTransform * glm::translate(_fbxJoint->translation) * transformInParentFrame; - _rotation = extractRotation(_transform); + glm::mat4 newTransform = parentTransform * glm::translate(_fbxJoint->translation) * transformInParentFrame; + + if (newTransform != _transform) { + _transform = newTransform; + _transformChanged = true; + _rotationIsValid = false; + } } void JointState::computeVisibleTransform(const glm::mat4& parentTransform) { @@ -103,7 +131,7 @@ void JointState::computeVisibleTransform(const glm::mat4& parentTransform) { } glm::quat JointState::getRotationInBindFrame() const { - return _rotation * _fbxJoint->inverseBindRotation; + return getRotation() * _fbxJoint->inverseBindRotation; } glm::quat JointState::getRotationInParentFrame() const { @@ -126,7 +154,7 @@ void JointState::setRotationInBindFrame(const glm::quat& rotation, float priorit // rotation is from bind- to model-frame assert(_fbxJoint != NULL); if (priority >= _animationPriority) { - glm::quat targetRotation = _rotationInConstrainedFrame * glm::inverse(_rotation) * rotation * glm::inverse(_fbxJoint->inverseBindRotation); + glm::quat targetRotation = _rotationInConstrainedFrame * glm::inverse(getRotation()) * rotation * glm::inverse(_fbxJoint->inverseBindRotation); if (constrain && _constraint) { _constraint->softClamp(targetRotation, _rotationInConstrainedFrame, 0.5f); } @@ -139,29 +167,32 @@ void JointState::clearTransformTranslation() { _transform[3][0] = 0.0f; _transform[3][1] = 0.0f; _transform[3][2] = 0.0f; + _transformChanged = true; _visibleTransform[3][0] = 0.0f; _visibleTransform[3][1] = 0.0f; _visibleTransform[3][2] = 0.0f; } void JointState::setRotation(const glm::quat& rotation, bool constrain, float priority) { - applyRotationDelta(rotation * glm::inverse(_rotation), true, priority); + applyRotationDelta(rotation * glm::inverse(getRotation()), true, priority); } void JointState::applyRotationDelta(const glm::quat& delta, bool constrain, float priority) { // NOTE: delta is in model-frame assert(_fbxJoint != NULL); - if (priority < _animationPriority) { + if (priority < _animationPriority || delta.null) { return; } _animationPriority = priority; + glm::quat targetRotation = _rotationInConstrainedFrame * glm::inverse(getRotation()) * delta * getRotation(); if (!constrain || _constraint == NULL) { // no constraints - _rotationInConstrainedFrame = _rotationInConstrainedFrame * glm::inverse(_rotation) * delta * _rotation; - _rotation = delta * _rotation; + _rotationInConstrainedFrame = targetRotation; + _transformChanged = true; + + _rotation = delta * getRotation(); return; } - glm::quat targetRotation = _rotationInConstrainedFrame * glm::inverse(_rotation) * delta * _rotation; setRotationInConstrainedFrame(targetRotation); } @@ -174,7 +205,7 @@ void JointState::mixRotationDelta(const glm::quat& delta, float mixFactor, float return; } _animationPriority = priority; - glm::quat targetRotation = _rotationInConstrainedFrame * glm::inverse(_rotation) * delta * _rotation; + glm::quat targetRotation = _rotationInConstrainedFrame * glm::inverse(getRotation()) * delta * getRotation(); if (mixFactor > 0.0f && mixFactor <= 1.0f) { targetRotation = safeMix(targetRotation, _fbxJoint->rotation, mixFactor); } @@ -198,7 +229,7 @@ void JointState::mixVisibleRotationDelta(const glm::quat& delta, float mixFactor glm::quat JointState::computeParentRotation() const { // R = Rp * Rpre * r * Rpost // Rp = R * (Rpre * r * Rpost)^ - return _rotation * glm::inverse(_fbxJoint->preRotation * _rotationInConstrainedFrame * _fbxJoint->postRotation); + return getRotation() * glm::inverse(_fbxJoint->preRotation * _rotationInConstrainedFrame * _fbxJoint->postRotation); } glm::quat JointState::computeVisibleParentRotation() const { @@ -208,6 +239,7 @@ glm::quat JointState::computeVisibleParentRotation() const { void JointState::setRotationInConstrainedFrame(const glm::quat& targetRotation) { glm::quat parentRotation = computeParentRotation(); _rotationInConstrainedFrame = targetRotation; + _transformChanged = true; // R' = Rp * Rpre * r' * Rpost _rotation = parentRotation * _fbxJoint->preRotation * _rotationInConstrainedFrame * _fbxJoint->postRotation; } @@ -233,6 +265,6 @@ const glm::vec3& JointState::getDefaultTranslationInConstrainedFrame() const { void JointState::slaveVisibleTransform() { _visibleTransform = _transform; - _visibleRotation = _rotation; + _visibleRotation = getRotation(); _visibleRotationInConstrainedFrame = _rotationInConstrainedFrame; -} +} \ No newline at end of file diff --git a/interface/src/renderer/JointState.h b/interface/src/renderer/JointState.h index 94811fe13c..81591e816b 100644 --- a/interface/src/renderer/JointState.h +++ b/interface/src/renderer/JointState.h @@ -33,7 +33,9 @@ public: void copyState(const JointState& state); void initTransform(const glm::mat4& parentTransform); - void computeTransform(const glm::mat4& parentTransform); + // if synchronousRotationCompute is true, then _transform is still computed synchronously, + // but _rotation will be asynchronously extracted + void computeTransform(const glm::mat4& parentTransform, bool parentTransformChanged = true, bool synchronousRotationCompute = false); void computeVisibleTransform(const glm::mat4& parentTransform); const glm::mat4& getVisibleTransform() const { return _visibleTransform; } @@ -41,8 +43,10 @@ public: glm::vec3 getVisiblePosition() const { return extractTranslation(_visibleTransform); } const glm::mat4& getTransform() const { return _transform; } + void resetTransformChanged() { _transformChanged = false; } + bool getTransformChanged() const { return _transformChanged; } - glm::quat getRotation() const { return _rotation; } + glm::quat getRotation() const; glm::vec3 getPosition() const { return extractTranslation(_transform); } /// \return rotation from bind to model frame @@ -104,7 +108,9 @@ private: /// debug helper function void loadBindRotation(); + bool _transformChanged; glm::mat4 _transform; // joint- to model-frame + bool _rotationIsValid; glm::quat _rotation; // joint- to model-frame glm::quat _rotationInConstrainedFrame; // rotation in frame where angular constraints would be applied glm::vec3 _positionInParentFrame; // only changes when the Model is scaled diff --git a/interface/src/renderer/Model.cpp b/interface/src/renderer/Model.cpp index 8c19c11ed3..63a94772a7 100644 --- a/interface/src/renderer/Model.cpp +++ b/interface/src/renderer/Model.cpp @@ -922,7 +922,7 @@ void Model::simulate(float deltaTime, bool fullUpdate) { void Model::simulateInternal(float deltaTime) { // NOTE: this is a recursive call that walks all attachments, and their attachments // update the world space transforms for all joints - + // update animations foreach (const AnimationHandlePointer& handle, _runningAnimations) { handle->simulate(deltaTime); @@ -931,8 +931,11 @@ void Model::simulateInternal(float deltaTime) { for (int i = 0; i < _jointStates.size(); i++) { updateJointState(i); } + for (int i = 0; i < _jointStates.size(); i++) { + _jointStates[i].resetTransformChanged(); + } - _shapesAreDirty = ! _shapes.isEmpty(); + _shapesAreDirty = !_shapes.isEmpty(); // update the attachment transforms and simulate them const FBXGeometry& geometry = _geometry->getFBXGeometry(); @@ -994,7 +997,7 @@ void Model::updateJointState(int index) { state.computeTransform(parentTransform); } else { const JointState& parentState = _jointStates.at(parentIndex); - state.computeTransform(parentState.getTransform()); + state.computeTransform(parentState.getTransform(), parentState.getTransformChanged()); } } diff --git a/interface/src/ui/overlays/BillboardOverlay.cpp b/interface/src/ui/overlays/BillboardOverlay.cpp index e7d5cef3be..7d85d54fef 100644 --- a/interface/src/ui/overlays/BillboardOverlay.cpp +++ b/interface/src/ui/overlays/BillboardOverlay.cpp @@ -17,10 +17,11 @@ BillboardOverlay::BillboardOverlay() : _fromImage(-1,-1,-1,-1), _scale(1.0f), _isFacingAvatar(true) { + _isLoaded = false; } void BillboardOverlay::render() { - if (!_visible) { + if (!_visible || !_isLoaded) { return; } @@ -85,16 +86,7 @@ void BillboardOverlay::render() { ((float)_fromImage.y() + (float)_fromImage.height()) / (float)_size.height()); glVertex2f(-x, y); } glEnd(); - } else { - glColor4f(0.5f, 0.5f, 0.5f, 1.0f); - glBegin(GL_QUADS); { - glVertex2f(-1.0f, -1.0f); - glVertex2f(1.0f, -1.0f); - glVertex2f(1.0f, 1.0f); - glVertex2f(-1.0f, 1.0f); - } glEnd(); } - } glPopMatrix(); glDisable(GL_TEXTURE_2D); @@ -167,6 +159,7 @@ void BillboardOverlay::setProperties(const QScriptValue &properties) { } void BillboardOverlay::setBillboardURL(const QUrl url) { + _isLoaded = false; QNetworkReply* reply = NetworkAccessManager::getInstance().get(QNetworkRequest(url)); connect(reply, &QNetworkReply::finished, this, &BillboardOverlay::replyFinished); } @@ -175,4 +168,5 @@ void BillboardOverlay::replyFinished() { // replace our byte array with the downloaded data QNetworkReply* reply = static_cast(sender()); _billboard = reply->readAll(); + _isLoaded = true; } diff --git a/interface/src/ui/overlays/ImageOverlay.cpp b/interface/src/ui/overlays/ImageOverlay.cpp index aeea781eb6..7104b3aced 100644 --- a/interface/src/ui/overlays/ImageOverlay.cpp +++ b/interface/src/ui/overlays/ImageOverlay.cpp @@ -24,6 +24,7 @@ ImageOverlay::ImageOverlay() : _textureBound(false), _wantClipFromImage(false) { + _isLoaded = false; } ImageOverlay::~ImageOverlay() { @@ -35,6 +36,7 @@ ImageOverlay::~ImageOverlay() { // TODO: handle setting image multiple times, how do we manage releasing the bound texture? void ImageOverlay::setImageURL(const QUrl& url) { + _isLoaded = false; NetworkAccessManager& networkAccessManager = NetworkAccessManager::getInstance(); QNetworkReply* reply = networkAccessManager.get(QNetworkRequest(url)); connect(reply, &QNetworkReply::finished, this, &ImageOverlay::replyFinished); @@ -47,10 +49,11 @@ void ImageOverlay::replyFinished() { QByteArray rawData = reply->readAll(); _textureImage.loadFromData(rawData); _renderImage = true; + _isLoaded = true; } void ImageOverlay::render() { - if (!_visible) { + if (!_visible || !_isLoaded) { return; // do nothing if we're not visible } if (_renderImage && !_textureBound) { diff --git a/interface/src/ui/overlays/ModelOverlay.cpp b/interface/src/ui/overlays/ModelOverlay.cpp index 57f098aee3..b1d55de12a 100644 --- a/interface/src/ui/overlays/ModelOverlay.cpp +++ b/interface/src/ui/overlays/ModelOverlay.cpp @@ -15,8 +15,10 @@ ModelOverlay::ModelOverlay() : _model(), _scale(1.0f), - _updateModel(false) { + _updateModel(false) +{ _model.init(); + _isLoaded = false; } void ModelOverlay::update(float deltatime) { @@ -32,6 +34,7 @@ void ModelOverlay::update(float deltatime) { } else { _model.simulate(deltatime); } + _isLoaded = _model.isActive(); } void ModelOverlay::render() { @@ -90,6 +93,7 @@ void ModelOverlay::setProperties(const QScriptValue &properties) { if (urlValue.isValid()) { _url = urlValue.toVariant().toString(); _updateModel = true; + _isLoaded = false; } QScriptValue scaleValue = properties.property("scale"); diff --git a/interface/src/ui/overlays/Overlay.cpp b/interface/src/ui/overlays/Overlay.cpp index bc7096c471..9d492c6e50 100644 --- a/interface/src/ui/overlays/Overlay.cpp +++ b/interface/src/ui/overlays/Overlay.cpp @@ -21,6 +21,7 @@ Overlay::Overlay() : _parent(NULL), + _isLoaded(true), _alpha(DEFAULT_ALPHA), _color(DEFAULT_OVERLAY_COLOR), _visible(true), diff --git a/interface/src/ui/overlays/Overlay.h b/interface/src/ui/overlays/Overlay.h index f8d6400bf6..c5329688ff 100644 --- a/interface/src/ui/overlays/Overlay.h +++ b/interface/src/ui/overlays/Overlay.h @@ -40,6 +40,7 @@ public: virtual void render() = 0; // getters + bool isLoaded() { return _isLoaded; } bool getVisible() const { return _visible; } const xColor& getColor() const { return _color; } float getAlpha() const { return _alpha; } @@ -55,6 +56,7 @@ public: protected: QGLWidget* _parent; + bool _isLoaded; float _alpha; xColor _color; bool _visible; // should the overlay be drawn at all diff --git a/interface/src/ui/overlays/Overlays.cpp b/interface/src/ui/overlays/Overlays.cpp index 581947c074..5d16bd78e5 100644 --- a/interface/src/ui/overlays/Overlays.cpp +++ b/interface/src/ui/overlays/Overlays.cpp @@ -227,11 +227,23 @@ unsigned int Overlays::getOverlayAtPoint(const glm::vec2& point) { i.previous(); unsigned int thisID = i.key(); Overlay2D* thisOverlay = static_cast(i.value()); - if (thisOverlay->getVisible() && thisOverlay->getBounds().contains(point.x, point.y, false)) { + if (thisOverlay->getVisible() && thisOverlay->isLoaded() && thisOverlay->getBounds().contains(point.x, point.y, false)) { return thisID; } } return 0; // not found } +bool Overlays::isLoaded(unsigned int id) { + QReadLocker lock(&_lock); + Overlay* overlay = _overlays2D.value(id); + if (!overlay) { + _overlays3D.value(id); + } + if (!overlay) { + return false; // not found + } + + return overlay->isLoaded(); +} diff --git a/interface/src/ui/overlays/Overlays.h b/interface/src/ui/overlays/Overlays.h index 2fbdb993f4..8bd8224f82 100644 --- a/interface/src/ui/overlays/Overlays.h +++ b/interface/src/ui/overlays/Overlays.h @@ -38,6 +38,9 @@ public slots: /// returns the top most overlay at the screen point, or 0 if not overlay at that point unsigned int getOverlayAtPoint(const glm::vec2& point); + + /// returns whether the overlay's assets are loaded or not + bool isLoaded(unsigned int id); private: QMap _overlays2D; diff --git a/libraries/shared/src/SharedUtil.cpp b/libraries/shared/src/SharedUtil.cpp index e795ea746c..57d1d7faac 100644 --- a/libraries/shared/src/SharedUtil.cpp +++ b/libraries/shared/src/SharedUtil.cpp @@ -757,8 +757,7 @@ void setTranslation(glm::mat4& matrix, const glm::vec3& translation) { glm::quat extractRotation(const glm::mat4& matrix, bool assumeOrthogonal) { // uses the iterative polar decomposition algorithm described by Ken Shoemake at // http://www.cs.wisc.edu/graphics/Courses/838-s2002/Papers/polar-decomp.pdf - // code adapted from Clyde, https://github.com/threerings/clyde/blob/master/src/main/java/com/threerings/math/Matrix4f.java - + // code adapted from Clyde, https://github.com/threerings/clyde/blob/master/core/src/main/java/com/threerings/math/Matrix4f.java // start with the contents of the upper 3x3 portion of the matrix glm::mat3 upper = glm::mat3(matrix); if (!assumeOrthogonal) {