mirror of
https://thingvellir.net/git/overte
synced 2025-03-27 23:52:03 +01:00
Merge branch 'master' of https://github.com/highfidelity/hifi into domain-server-auth
This commit is contained in:
commit
ff889882ce
17 changed files with 719 additions and 45 deletions
|
@ -255,13 +255,24 @@ function update(deltaTime){
|
|||
}
|
||||
frame++;
|
||||
}
|
||||
|
||||
var locationChanged = false;
|
||||
if (location.hostname != oldHost) {
|
||||
print("Changed domain");
|
||||
for (model in models) {
|
||||
removeIndicators(models[model]);
|
||||
}
|
||||
oldHost = location.hostname;
|
||||
locationChanged = true;
|
||||
}
|
||||
|
||||
if (MyAvatar.position.x != avatarOldPosition.x &&
|
||||
MyAvatar.position.y != avatarOldPosition.y &&
|
||||
MyAvatar.position.z != avatarOldPosition.z) {
|
||||
if (MyAvatar.position.x != avatarOldPosition.x ||
|
||||
MyAvatar.position.y != avatarOldPosition.y ||
|
||||
MyAvatar.position.z != avatarOldPosition.z ||
|
||||
locationChanged) {
|
||||
avatarOldPosition = MyAvatar.position;
|
||||
|
||||
var SEARCH_RADIUS = 10;
|
||||
var SEARCH_RADIUS = 50;
|
||||
var foundModels = Models.findModels(MyAvatar.position, SEARCH_RADIUS);
|
||||
// Let's remove indicator that got out of radius
|
||||
for (model in models) {
|
||||
|
@ -274,7 +285,10 @@ function update(deltaTime){
|
|||
for (var i = 0; i < foundModels.length; ++i) {
|
||||
var model = foundModels[i];
|
||||
if (typeof(models[model.id]) == "undefined") {
|
||||
addIndicators(model);
|
||||
model.properties = Models.getModelProperties(model);
|
||||
if (Vec3.distance(model.properties.position, MyAvatar.position) < SEARCH_RADIUS) {
|
||||
addIndicators(model);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -283,9 +297,9 @@ function update(deltaTime){
|
|||
}
|
||||
}
|
||||
}
|
||||
var oldHost = location.hostname;
|
||||
|
||||
function addIndicators(modelID) {
|
||||
modelID.properties = Models.getModelProperties(modelID);
|
||||
if (modelID.properties.sittingPoints.length > 0) {
|
||||
for (var i = 0; i < modelID.properties.sittingPoints.length; ++i) {
|
||||
modelID.properties.sittingPoints[i].indicator = new SeatIndicator(modelID.properties, i);
|
||||
|
|
|
@ -1494,9 +1494,10 @@ glm::vec3 Application::getMouseVoxelWorldCoordinates(const VoxelDetail& mouseVox
|
|||
}
|
||||
|
||||
FaceTracker* Application::getActiveFaceTracker() {
|
||||
return _faceshift.isActive() ? static_cast<FaceTracker*>(&_faceshift) :
|
||||
return _cara.isActive() ? static_cast<FaceTracker*>(&_cara) :
|
||||
(_faceshift.isActive() ? static_cast<FaceTracker*>(&_faceshift) :
|
||||
(_faceplus.isActive() ? static_cast<FaceTracker*>(&_faceplus) :
|
||||
(_visage.isActive() ? static_cast<FaceTracker*>(&_visage) : NULL));
|
||||
(_visage.isActive() ? static_cast<FaceTracker*>(&_visage) : NULL)));
|
||||
}
|
||||
|
||||
struct SendVoxelsOperationArgs {
|
||||
|
@ -1878,6 +1879,19 @@ void Application::updateVisage() {
|
|||
_visage.update();
|
||||
}
|
||||
|
||||
void Application::updateCara() {
|
||||
bool showWarnings = Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings);
|
||||
PerformanceWarning warn(showWarnings, "Application::updateCara()");
|
||||
|
||||
// Update Cara
|
||||
_cara.update();
|
||||
|
||||
// Copy angular velocity if measured by cara, to the head
|
||||
if (_cara.isActive()) {
|
||||
_myAvatar->getHead()->setAngularVelocity(_cara.getHeadAngularVelocity());
|
||||
}
|
||||
}
|
||||
|
||||
void Application::updateMyAvatarLookAtPosition() {
|
||||
PerformanceTimer perfTimer("lookAt");
|
||||
bool showWarnings = Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings);
|
||||
|
|
|
@ -63,6 +63,7 @@
|
|||
#include "devices/PrioVR.h"
|
||||
#include "devices/SixenseManager.h"
|
||||
#include "devices/Visage.h"
|
||||
#include "devices/CaraFaceTracker.h"
|
||||
#include "models/ModelTreeRenderer.h"
|
||||
#include "particles/ParticleTreeRenderer.h"
|
||||
#include "renderer/AmbientOcclusionEffect.h"
|
||||
|
@ -211,6 +212,7 @@ public:
|
|||
Faceplus* getFaceplus() { return &_faceplus; }
|
||||
Faceshift* getFaceshift() { return &_faceshift; }
|
||||
Visage* getVisage() { return &_visage; }
|
||||
CaraFaceTracker* getCara() { return &_cara; }
|
||||
FaceTracker* getActiveFaceTracker();
|
||||
SixenseManager* getSixenseManager() { return &_sixenseManager; }
|
||||
PrioVR* getPrioVR() { return &_prioVR; }
|
||||
|
@ -382,6 +384,7 @@ private:
|
|||
void updateFaceplus();
|
||||
void updateFaceshift();
|
||||
void updateVisage();
|
||||
void updateCara();
|
||||
void updateMyAvatarLookAtPosition();
|
||||
void updateThreads(float deltaTime);
|
||||
void updateMetavoxels(float deltaTime);
|
||||
|
@ -478,6 +481,7 @@ private:
|
|||
Faceplus _faceplus;
|
||||
Faceshift _faceshift;
|
||||
Visage _visage;
|
||||
CaraFaceTracker _cara;
|
||||
|
||||
SixenseManager _sixenseManager;
|
||||
PrioVR _prioVR;
|
||||
|
|
|
@ -52,7 +52,8 @@ const float PALM_PRIORITY = 3.0f;
|
|||
|
||||
void SkeletonModel::simulate(float deltaTime, bool fullUpdate) {
|
||||
setTranslation(_owningAvatar->getPosition());
|
||||
setRotation(_owningAvatar->getOrientation() * glm::angleAxis(PI, glm::vec3(0.0f, 1.0f, 0.0f)));
|
||||
static const glm::quat refOrientation = glm::angleAxis(PI, glm::vec3(0.0f, 1.0f, 0.0f));
|
||||
setRotation(_owningAvatar->getOrientation() * refOrientation);
|
||||
const float MODEL_SCALE = 0.0006f;
|
||||
setScale(glm::vec3(1.0f, 1.0f, 1.0f) * _owningAvatar->getScale() * MODEL_SCALE);
|
||||
|
||||
|
|
459
interface/src/devices/CaraFaceTracker.cpp
Normal file
459
interface/src/devices/CaraFaceTracker.cpp
Normal file
|
@ -0,0 +1,459 @@
|
|||
//
|
||||
// CaraFaceTracker.cpp
|
||||
// interface/src/devices
|
||||
//
|
||||
// Created by Li Zuwei on 7/22/14.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "CaraFaceTracker.h"
|
||||
#include <SharedUtil.h>
|
||||
|
||||
//qt
|
||||
#include <QJsonDocument>
|
||||
#include <QJsonArray>
|
||||
#include <QJsonObject>
|
||||
#include <QElapsedTimer>
|
||||
|
||||
#define PI M_PI
|
||||
#define RADTODEG(x) ( (x) * 180.0 / PI )
|
||||
#define DEGTORAD(x) ( (x) * PI / 180.0 )
|
||||
|
||||
static const QHostAddress CARA_FEATURE_POINT_SERVER_ADDR("127.0.0.1");
|
||||
static const quint16 CARA_FEATURE_POINT_SERVER_PORT = 36555;
|
||||
static QString sampleJson = "[{\"id\":1, \
|
||||
\"face\":{\"x\":248,\"y\":64,\"width\":278,\"height\":341}, \
|
||||
\"pose\":{\"roll\":2.62934,\"pitch\":-12.2318,\"yaw\":0.936743}, \
|
||||
\"feature_points\":[314,194,326,187,340,187,354,189,367,193,409,190,421,187,435,184,448,183,459,188, \
|
||||
388,207,389,223,390,240,391,257,377,266,384,267,392,268,399,266,407,264,331,209, \
|
||||
341,204,354,204,364,209,353,214,341,214,410,208,420,201,433,200,443,205,434,211, \
|
||||
421,211,362,294,372,290,383,287,393,289,404,286,415,289,426,291,418,300,407,306, \
|
||||
394,308,382,307,371,302,383,295,394,295,404,294,404,295,393,297,383,296], \
|
||||
\"classifiers\":{\"emotion\":{\"smi\":-0.368829,\"sur\":-1.33334,\"neg\":0.00235828,\"att\":1},\"blink\":1}}]";
|
||||
|
||||
static const glm::vec3 DEFAULT_HEAD_ORIGIN(0.0f, 0.0f, 0.0f);
|
||||
static const float TRANSLATION_SCALE = 1.0f;
|
||||
static const int NUM_BLENDSHAPE_COEFF = 30;
|
||||
|
||||
struct CaraPerson {
|
||||
struct CaraPose {
|
||||
float roll, pitch, yaw;
|
||||
CaraPose() :
|
||||
roll(0.0f),
|
||||
pitch(0.0f),
|
||||
yaw(0.0f)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
struct CaraEmotion {
|
||||
float smile, surprise, negative, attention;
|
||||
CaraEmotion():
|
||||
smile(0.0f),
|
||||
surprise(0.0f),
|
||||
negative(0.0f),
|
||||
attention(0.0f)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
enum CaraBlink {
|
||||
BLINK_NOT_AVAILABLE,
|
||||
NO_BLINK,
|
||||
BLINK
|
||||
};
|
||||
|
||||
CaraPerson() :
|
||||
id(-1),
|
||||
blink(BLINK_NOT_AVAILABLE)
|
||||
{
|
||||
}
|
||||
|
||||
int id;
|
||||
CaraPose pose;
|
||||
CaraEmotion emotion;
|
||||
CaraBlink blink;
|
||||
|
||||
QString toString() {
|
||||
QString s = QString("id: %1, roll: %2, pitch: %3, yaw: %4, smi: %5, sur: %6, neg: %7, att: %8, blink: %9").
|
||||
arg(id).
|
||||
arg(pose.roll).
|
||||
arg(pose.pitch).
|
||||
arg(pose.yaw).
|
||||
arg(emotion.smile).
|
||||
arg(emotion.surprise).
|
||||
arg(emotion.negative).
|
||||
arg(emotion.attention).
|
||||
arg(blink);
|
||||
return s;
|
||||
}
|
||||
};
|
||||
|
||||
class CaraPacketDecoder {
|
||||
public:
|
||||
static CaraPerson extractOne(const QByteArray& buffer, QJsonParseError* jsonError) {
|
||||
CaraPerson person;
|
||||
QJsonDocument dom = QJsonDocument::fromJson(buffer, jsonError);
|
||||
|
||||
//check for errors
|
||||
if(jsonError->error == QJsonParseError::NoError) {
|
||||
//read the dom structure and populate the blend shapes and head poses
|
||||
//qDebug() << "[Info] Cara Face Tracker Packet Parsing Successful!";
|
||||
|
||||
//begin extracting the packet
|
||||
if(dom.isArray()) {
|
||||
QJsonArray people = dom.array();
|
||||
//extract the first person in the array
|
||||
if(people.size() > 0) {
|
||||
QJsonValue val = people.at(0);
|
||||
if(val.isObject()) {
|
||||
QJsonObject personDOM = val.toObject();
|
||||
person.id = extractId(personDOM);
|
||||
person.pose = extractPose(personDOM);
|
||||
|
||||
//extract the classifier outputs
|
||||
QJsonObject::const_iterator it = personDOM.constFind("classifiers");
|
||||
if(it != personDOM.constEnd()) {
|
||||
QJsonObject classifierDOM = (*it).toObject();
|
||||
person.emotion = extractEmotion(classifierDOM);
|
||||
person.blink = extractBlink(classifierDOM);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return person;
|
||||
}
|
||||
|
||||
private:
|
||||
static int extractId(const QJsonObject& person) {
|
||||
int id = -1;
|
||||
QJsonObject::const_iterator it = person.constFind("id");
|
||||
if(it != person.constEnd()) {
|
||||
id = (*it).toInt(-1);
|
||||
}
|
||||
return id;
|
||||
}
|
||||
|
||||
static CaraPerson::CaraPose extractPose(const QJsonObject& person) {
|
||||
CaraPerson::CaraPose pose;
|
||||
QJsonObject::const_iterator it = person.constFind("pose");
|
||||
if(it != person.constEnd()) {
|
||||
QJsonObject poseDOM = (*it).toObject();
|
||||
|
||||
//look for the roll, pitch, yaw;
|
||||
QJsonObject::const_iterator poseIt = poseDOM.constFind("roll");
|
||||
QJsonObject::const_iterator poseEnd = poseDOM.constEnd();
|
||||
if(poseIt != poseEnd) {
|
||||
pose.roll = (float)(*poseIt).toDouble(0.0);
|
||||
}
|
||||
poseIt = poseDOM.constFind("pitch");
|
||||
if(poseIt != poseEnd) {
|
||||
pose.pitch = (float)(*poseIt).toDouble(0.0);
|
||||
}
|
||||
poseIt = poseDOM.constFind("yaw");
|
||||
if(poseIt != poseEnd) {
|
||||
pose.yaw = (float)(*poseIt).toDouble(0.0);
|
||||
}
|
||||
}
|
||||
return pose;
|
||||
}
|
||||
|
||||
static CaraPerson::CaraEmotion extractEmotion(const QJsonObject& classifiers) {
|
||||
CaraPerson::CaraEmotion emotion;
|
||||
QJsonObject::const_iterator it = classifiers.constFind("emotion");
|
||||
if(it != classifiers.constEnd()) {
|
||||
QJsonObject emotionDOM = (*it).toObject();
|
||||
|
||||
//look for smile, surprise, negative, attention responses
|
||||
QJsonObject::const_iterator emoEnd = emotionDOM.constEnd();
|
||||
QJsonObject::const_iterator emoIt = emotionDOM.constFind("smi");
|
||||
if(emoIt != emoEnd) {
|
||||
emotion.smile = (float)(*emoIt).toDouble(0.0);
|
||||
}
|
||||
emoIt = emotionDOM.constFind("sur");
|
||||
if(emoIt != emoEnd) {
|
||||
emotion.surprise = (float)(*emoIt).toDouble(0.0);
|
||||
}
|
||||
emoIt = emotionDOM.constFind("neg");
|
||||
if(emoIt != emoEnd) {
|
||||
emotion.negative = (float)(*emoIt).toDouble(0.0);
|
||||
}
|
||||
emoIt = emotionDOM.constFind("att");
|
||||
if(emoIt != emoEnd) {
|
||||
emotion.attention = (float)(*emoIt).toDouble(0.0);
|
||||
}
|
||||
}
|
||||
return emotion;
|
||||
}
|
||||
|
||||
static CaraPerson::CaraBlink extractBlink(const QJsonObject& classifiers) {
|
||||
CaraPerson::CaraBlink blink = CaraPerson::BLINK_NOT_AVAILABLE;
|
||||
QJsonObject::const_iterator it = classifiers.constFind("blink");
|
||||
if(it != classifiers.constEnd()) {
|
||||
int b = (*it).toInt(CaraPerson::BLINK_NOT_AVAILABLE);
|
||||
switch(b) {
|
||||
case CaraPerson::BLINK_NOT_AVAILABLE:
|
||||
blink = CaraPerson::BLINK_NOT_AVAILABLE;
|
||||
break;
|
||||
case CaraPerson::NO_BLINK:
|
||||
blink = CaraPerson::NO_BLINK;
|
||||
break;
|
||||
case CaraPerson::BLINK:
|
||||
blink = CaraPerson::BLINK;
|
||||
break;
|
||||
default:
|
||||
blink = CaraPerson::BLINK_NOT_AVAILABLE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return blink;
|
||||
}
|
||||
};
|
||||
|
||||
CaraFaceTracker::CaraFaceTracker() :
|
||||
_lastReceiveTimestamp(0),
|
||||
_previousPitch(0.0f),
|
||||
_previousYaw(0.0f),
|
||||
_previousRoll(0.0f),
|
||||
_eyeGazeLeftPitch(0.0f),
|
||||
_eyeGazeLeftYaw(0.0f),
|
||||
_eyeGazeRightPitch(0.0f),
|
||||
_eyeGazeRightYaw(0),
|
||||
_leftBlinkIndex(0),
|
||||
_rightBlinkIndex(1),
|
||||
_leftEyeOpenIndex(8),
|
||||
_rightEyeOpenIndex(9),
|
||||
_browDownLeftIndex(14),
|
||||
_browDownRightIndex(15),
|
||||
_browUpCenterIndex(16),
|
||||
_browUpLeftIndex(17),
|
||||
_browUpRightIndex(18),
|
||||
_mouthSmileLeftIndex(28),
|
||||
_mouthSmileRightIndex(29),
|
||||
_jawOpenIndex(21)
|
||||
{
|
||||
connect(&_udpSocket, SIGNAL(readyRead()), SLOT(readPendingDatagrams()));
|
||||
connect(&_udpSocket, SIGNAL(error(QAbstractSocket::SocketError)), SLOT(socketErrorOccurred(QAbstractSocket::SocketError)));
|
||||
connect(&_udpSocket, SIGNAL(stateChanged(QAbstractSocket::SocketState)), SLOT(socketStateChanged(QAbstractSocket::SocketState)));
|
||||
|
||||
bindTo(CARA_FEATURE_POINT_SERVER_PORT);
|
||||
|
||||
_headTranslation = DEFAULT_HEAD_ORIGIN;
|
||||
_blendshapeCoefficients.resize(NUM_BLENDSHAPE_COEFF);
|
||||
_blendshapeCoefficients.fill(0.0f);
|
||||
|
||||
//qDebug() << sampleJson;
|
||||
}
|
||||
|
||||
CaraFaceTracker::CaraFaceTracker(const QHostAddress& host, quint16 port) :
|
||||
_lastReceiveTimestamp(0),
|
||||
_previousPitch(0.0f),
|
||||
_previousYaw(0.0f),
|
||||
_previousRoll(0.0f),
|
||||
_eyeGazeLeftPitch(0.0f),
|
||||
_eyeGazeLeftYaw(0.0f),
|
||||
_eyeGazeRightPitch(0.0f),
|
||||
_eyeGazeRightYaw(0.0f),
|
||||
_leftBlinkIndex(0),
|
||||
_rightBlinkIndex(1),
|
||||
_leftEyeOpenIndex(8),
|
||||
_rightEyeOpenIndex(9),
|
||||
_browDownLeftIndex(14),
|
||||
_browDownRightIndex(15),
|
||||
_browUpCenterIndex(16),
|
||||
_browUpLeftIndex(17),
|
||||
_browUpRightIndex(18),
|
||||
_mouthSmileLeftIndex(28),
|
||||
_mouthSmileRightIndex(29),
|
||||
_jawOpenIndex(21)
|
||||
{
|
||||
connect(&_udpSocket, SIGNAL(readyRead()), SLOT(readPendingDatagrams()));
|
||||
connect(&_udpSocket, SIGNAL(error(QAbstractSocket::SocketError)), SLOT(socketErrorOccurred(QAbstractSocket::SocketError)));
|
||||
connect(&_udpSocket, SIGNAL(stateChanged(QAbstractSocket::SocketState)), SIGNAL(socketStateChanged(QAbstractSocket::SocketState)));
|
||||
|
||||
bindTo(host, port);
|
||||
|
||||
_headTranslation = DEFAULT_HEAD_ORIGIN * TRANSLATION_SCALE;
|
||||
_blendshapeCoefficients.resize(NUM_BLENDSHAPE_COEFF); //set the size of the blendshape coefficients
|
||||
_blendshapeCoefficients.fill(0.0f);
|
||||
}
|
||||
|
||||
CaraFaceTracker::~CaraFaceTracker() {
|
||||
if(_udpSocket.isOpen())
|
||||
_udpSocket.close();
|
||||
}
|
||||
|
||||
void CaraFaceTracker::init() {
|
||||
|
||||
}
|
||||
|
||||
void CaraFaceTracker::reset() {
|
||||
|
||||
}
|
||||
|
||||
void CaraFaceTracker::bindTo(quint16 port) {
|
||||
bindTo(QHostAddress::Any, port);
|
||||
}
|
||||
|
||||
void CaraFaceTracker::bindTo(const QHostAddress& host, quint16 port) {
|
||||
if(_udpSocket.isOpen()) {
|
||||
_udpSocket.close();
|
||||
}
|
||||
_udpSocket.bind(host, port);
|
||||
}
|
||||
|
||||
bool CaraFaceTracker::isActive() const {
|
||||
static const int ACTIVE_TIMEOUT_USECS = 3000000; //3 secs
|
||||
return (usecTimestampNow() - _lastReceiveTimestamp < ACTIVE_TIMEOUT_USECS);
|
||||
}
|
||||
|
||||
void CaraFaceTracker::update() {
|
||||
// get the euler angles relative to the window
|
||||
glm::vec3 eulers = glm::degrees(safeEulerAngles(_headRotation * glm::quat(glm::radians(glm::vec3(
|
||||
(_eyeGazeLeftPitch + _eyeGazeRightPitch) / 2.0f, (_eyeGazeLeftYaw + _eyeGazeRightYaw) / 2.0f, 0.0f)))));
|
||||
|
||||
//TODO: integrate when cara has eye gaze estimation
|
||||
|
||||
_estimatedEyePitch = eulers.x;
|
||||
_estimatedEyeYaw = eulers.y;
|
||||
}
|
||||
|
||||
//private slots and methods
|
||||
void CaraFaceTracker::socketErrorOccurred(QAbstractSocket::SocketError socketError) {
|
||||
qDebug() << "[Error] Cara Face Tracker Socket Error: " << _udpSocket.errorString();
|
||||
}
|
||||
|
||||
void CaraFaceTracker::socketStateChanged(QAbstractSocket::SocketState socketState) {
|
||||
QString state;
|
||||
switch(socketState) {
|
||||
case QAbstractSocket::BoundState:
|
||||
state = "Bounded";
|
||||
break;
|
||||
case QAbstractSocket::ClosingState:
|
||||
state = "Closing";
|
||||
break;
|
||||
case QAbstractSocket::ConnectedState:
|
||||
state = "Connected";
|
||||
break;
|
||||
case QAbstractSocket::ConnectingState:
|
||||
state = "Connecting";
|
||||
break;
|
||||
case QAbstractSocket::HostLookupState:
|
||||
state = "Host Lookup";
|
||||
break;
|
||||
case QAbstractSocket::ListeningState:
|
||||
state = "Listening";
|
||||
break;
|
||||
case QAbstractSocket::UnconnectedState:
|
||||
state = "Unconnected";
|
||||
break;
|
||||
}
|
||||
qDebug() << "[Info] Cara Face Tracker Socket: " << socketState;
|
||||
}
|
||||
|
||||
void CaraFaceTracker::readPendingDatagrams() {
|
||||
QByteArray buffer;
|
||||
while (_udpSocket.hasPendingDatagrams()) {
|
||||
buffer.resize(_udpSocket.pendingDatagramSize());
|
||||
_udpSocket.readDatagram(buffer.data(), buffer.size());
|
||||
decodePacket(buffer);
|
||||
}
|
||||
}
|
||||
|
||||
void CaraFaceTracker::decodePacket(const QByteArray& buffer) {
|
||||
//decode the incoming udp packet
|
||||
QJsonParseError jsonError;
|
||||
CaraPerson person = CaraPacketDecoder::extractOne(buffer, &jsonError);
|
||||
|
||||
if(jsonError.error == QJsonParseError::NoError) {
|
||||
//do some noise filtering to the head poses
|
||||
//reduce the noise first by truncating to 1 dp
|
||||
person.pose.roll = glm::floor(person.pose.roll * 10) / 10;
|
||||
person.pose.pitch = glm::floor(person.pose.pitch * 10) / 10;
|
||||
person.pose.yaw = glm::floor(person.pose.yaw * 10) / 10;
|
||||
|
||||
//qDebug() << person.toString();
|
||||
|
||||
glm::quat newRotation(glm::vec3(DEGTORAD(person.pose.pitch), DEGTORAD(person.pose.yaw), DEGTORAD(person.pose.roll)));
|
||||
|
||||
// Compute angular velocity of the head
|
||||
glm::quat r = newRotation * glm::inverse(_headRotation);
|
||||
float theta = 2 * acos(r.w);
|
||||
if (theta > EPSILON) {
|
||||
float rMag = glm::length(glm::vec3(r.x, r.y, r.z));
|
||||
const float AVERAGE_CARA_FRAME_TIME = 0.033f;
|
||||
const float ANGULAR_VELOCITY_MIN = 1.2f;
|
||||
const float YAW_STANDARD_DEV_DEG = 2.5f;
|
||||
|
||||
_headAngularVelocity = theta / AVERAGE_CARA_FRAME_TIME * glm::vec3(r.x, r.y, r.z) / rMag;
|
||||
|
||||
//use the angular velocity for roll and pitch, if it's below the threshold don't move
|
||||
if(glm::abs(_headAngularVelocity.x) < ANGULAR_VELOCITY_MIN) {
|
||||
person.pose.pitch = _previousPitch;
|
||||
}
|
||||
|
||||
if(glm::abs(_headAngularVelocity.z) < ANGULAR_VELOCITY_MIN) {
|
||||
person.pose.roll = _previousRoll;
|
||||
}
|
||||
|
||||
//for yaw, the jitter is great, you can't use angular velocity because it swings too much
|
||||
//use the previous and current yaw, calculate the
|
||||
//abs difference and move it the difference is above the standard deviation which is around 2.5
|
||||
// (this will introduce some jerks but will not encounter lag)
|
||||
|
||||
// < the standard deviation 2.5 deg, no move
|
||||
if(glm::abs(person.pose.yaw - _previousYaw) < YAW_STANDARD_DEV_DEG) {
|
||||
//qDebug() << "Yaw Diff: " << glm::abs(person.pose.yaw - _previousYaw);
|
||||
person.pose.yaw = _previousYaw;
|
||||
}
|
||||
|
||||
//update the previous angles
|
||||
_previousPitch = person.pose.pitch;
|
||||
_previousYaw = person.pose.yaw;
|
||||
_previousRoll = person.pose.roll;
|
||||
|
||||
//set the new rotation
|
||||
newRotation = glm::quat(glm::vec3(DEGTORAD(person.pose.pitch), DEGTORAD(person.pose.yaw), DEGTORAD(-person.pose.roll)));
|
||||
}
|
||||
else {
|
||||
//no change in position
|
||||
newRotation = glm::quat(glm::vec3(DEGTORAD(_previousPitch), DEGTORAD(_previousYaw), DEGTORAD(-_previousRoll)));
|
||||
_headAngularVelocity = glm::vec3(0,0,0);
|
||||
}
|
||||
|
||||
//update to new rotation angles
|
||||
_headRotation = newRotation;
|
||||
|
||||
//TODO: head translation, right now is 0
|
||||
|
||||
//Do Blendshapes, clip between 0.0f to 1.0f, neg should be ignored
|
||||
_blendshapeCoefficients[_leftBlinkIndex] = person.blink == CaraPerson::BLINK ? 1.0f : 0.0f;
|
||||
_blendshapeCoefficients[_rightBlinkIndex] = person.blink == CaraPerson::BLINK ? 1.0f : 0.0f;
|
||||
|
||||
//anger and surprised are mutually exclusive so we could try use this fact to determine
|
||||
//whether to down the brows or up the brows
|
||||
_blendshapeCoefficients[_browDownLeftIndex] = person.emotion.negative < 0.0f ? 0.0f : person.emotion.negative;
|
||||
_blendshapeCoefficients[_browDownRightIndex] = person.emotion.negative < 0.0f ? 0.0f : person.emotion.negative;
|
||||
_blendshapeCoefficients[_browUpCenterIndex] = person.emotion.surprise < 0.0f ? 0.0f : person.emotion.surprise;
|
||||
_blendshapeCoefficients[_browUpLeftIndex] = person.emotion.surprise < 0.0f ? 0.0f : person.emotion.surprise;
|
||||
_blendshapeCoefficients[_browUpRightIndex] = person.emotion.surprise < 0.0f ? 0.0f : person.emotion.surprise;
|
||||
_blendshapeCoefficients[_jawOpenIndex] = person.emotion.surprise < 0.0f ? 0.0f : person.emotion.surprise;
|
||||
_blendshapeCoefficients[_mouthSmileLeftIndex] = person.emotion.smile < 0.0f ? 0.0f : person.emotion.smile;
|
||||
_blendshapeCoefficients[_mouthSmileRightIndex] = person.emotion.smile < 0.0f ? 0.0f : person.emotion.smile;
|
||||
}
|
||||
else {
|
||||
qDebug() << "[Error] Cara Face Tracker Decode Error: " << jsonError.errorString();
|
||||
}
|
||||
|
||||
_lastReceiveTimestamp = usecTimestampNow();
|
||||
}
|
||||
|
||||
float CaraFaceTracker::getBlendshapeCoefficient(int index) const {
|
||||
return (index >= 0 && index < (int)_blendshapeCoefficients.size()) ? _blendshapeCoefficients[index] : 0.0f;
|
||||
}
|
||||
|
123
interface/src/devices/CaraFaceTracker.h
Normal file
123
interface/src/devices/CaraFaceTracker.h
Normal file
|
@ -0,0 +1,123 @@
|
|||
//
|
||||
// CaraFaceTracker.h
|
||||
// interface/src/devices
|
||||
//
|
||||
// Created by Li Zuwei on 7/22/14.
|
||||
// Copyright 2013 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hi_fi_CaraFaceTracker_h
|
||||
#define hi_fi_CaraFaceTracker_h
|
||||
|
||||
#include <QUdpSocket>
|
||||
|
||||
#include "FaceTracker.h"
|
||||
|
||||
/*!
|
||||
* \class CaraFaceTracker
|
||||
*
|
||||
* \brief Handles interaction with the Cara software,
|
||||
* which provides head position/orientation and facial features.
|
||||
* \details By default, opens a udp socket with IPV4_ANY_ADDR with port 36555.
|
||||
* User needs to run the Cara Face Detection UDP Client with the destination
|
||||
* host address (eg: 127.0.0.1 for localhost) and destination port 36555.
|
||||
**/
|
||||
|
||||
class CaraFaceTracker : public FaceTracker {
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
CaraFaceTracker();
|
||||
CaraFaceTracker(const QHostAddress& host, quint16 port);
|
||||
~CaraFaceTracker();
|
||||
|
||||
//initialization
|
||||
void init();
|
||||
void reset();
|
||||
|
||||
//sockets
|
||||
void bindTo(quint16 port);
|
||||
void bindTo(const QHostAddress& host, quint16 port);
|
||||
bool isActive() const;
|
||||
|
||||
//tracking
|
||||
void update();
|
||||
|
||||
//head angular velocity
|
||||
const glm::vec3& getHeadAngularVelocity() const { return _headAngularVelocity; }
|
||||
|
||||
//eye gaze
|
||||
float getEyeGazeLeftPitch() const { return _eyeGazeLeftPitch; }
|
||||
float getEyeGazeLeftYaw() const { return _eyeGazeLeftYaw; }
|
||||
|
||||
float getEyeGazeRightPitch() const { return _eyeGazeRightPitch; }
|
||||
float getEyeGazeRightYaw() const { return _eyeGazeRightYaw; }
|
||||
|
||||
//blend shapes
|
||||
float getLeftBlink() const { return getBlendshapeCoefficient(_leftBlinkIndex); }
|
||||
float getRightBlink() const { return getBlendshapeCoefficient(_rightBlinkIndex); }
|
||||
float getLeftEyeOpen() const { return getBlendshapeCoefficient(_leftEyeOpenIndex); }
|
||||
float getRightEyeOpen() const { return getBlendshapeCoefficient(_rightEyeOpenIndex); }
|
||||
|
||||
float getBrowDownLeft() const { return getBlendshapeCoefficient(_browDownLeftIndex); }
|
||||
float getBrowDownRight() const { return getBlendshapeCoefficient(_browDownRightIndex); }
|
||||
float getBrowUpCenter() const { return getBlendshapeCoefficient(_browUpCenterIndex); }
|
||||
float getBrowUpLeft() const { return getBlendshapeCoefficient(_browUpLeftIndex); }
|
||||
float getBrowUpRight() const { return getBlendshapeCoefficient(_browUpRightIndex); }
|
||||
|
||||
float getMouthSize() const { return getBlendshapeCoefficient(_jawOpenIndex); }
|
||||
float getMouthSmileLeft() const { return getBlendshapeCoefficient(_mouthSmileLeftIndex); }
|
||||
float getMouthSmileRight() const { return getBlendshapeCoefficient(_mouthSmileRightIndex); }
|
||||
|
||||
private slots:
|
||||
|
||||
//sockets
|
||||
void socketErrorOccurred(QAbstractSocket::SocketError socketError);
|
||||
void readPendingDatagrams();
|
||||
void socketStateChanged(QAbstractSocket::SocketState socketState);
|
||||
|
||||
private:
|
||||
void decodePacket(const QByteArray& buffer);
|
||||
float getBlendshapeCoefficient(int index) const;
|
||||
|
||||
// sockets
|
||||
QUdpSocket _udpSocket;
|
||||
quint64 _lastReceiveTimestamp;
|
||||
|
||||
//head tracking
|
||||
glm::vec3 _headAngularVelocity;
|
||||
|
||||
//pose history
|
||||
float _previousPitch;
|
||||
float _previousYaw;
|
||||
float _previousRoll;
|
||||
|
||||
// eye gaze degrees
|
||||
float _eyeGazeLeftPitch;
|
||||
float _eyeGazeLeftYaw;
|
||||
float _eyeGazeRightPitch;
|
||||
float _eyeGazeRightYaw;
|
||||
|
||||
//blend shapes
|
||||
int _leftBlinkIndex;
|
||||
int _rightBlinkIndex;
|
||||
int _leftEyeOpenIndex;
|
||||
int _rightEyeOpenIndex;
|
||||
|
||||
// Brows
|
||||
int _browDownLeftIndex;
|
||||
int _browDownRightIndex;
|
||||
int _browUpCenterIndex;
|
||||
int _browUpLeftIndex;
|
||||
int _browUpRightIndex;
|
||||
|
||||
int _mouthSmileLeftIndex;
|
||||
int _mouthSmileRightIndex;
|
||||
|
||||
int _jawOpenIndex;
|
||||
};
|
||||
|
||||
#endif //endif hi_fi_CaraFaceTracker_h
|
|
@ -11,14 +11,17 @@
|
|||
|
||||
#include <glm/gtx/norm.hpp>
|
||||
|
||||
#include <QThreadPool>
|
||||
|
||||
#include <AngularConstraint.h>
|
||||
//#include <GeometryUtil.h>
|
||||
#include <SharedUtil.h>
|
||||
|
||||
#include "JointState.h"
|
||||
|
||||
JointState::JointState() :
|
||||
_animationPriority(0.0f),
|
||||
_transformChanged(true),
|
||||
_rotationIsValid(false),
|
||||
_positionInParentFrame(0.0f),
|
||||
_distanceToParent(0.0f),
|
||||
_fbxJoint(NULL),
|
||||
|
@ -26,7 +29,9 @@ JointState::JointState() :
|
|||
}
|
||||
|
||||
JointState::JointState(const JointState& other) : _constraint(NULL) {
|
||||
_transformChanged = other._transformChanged;
|
||||
_transform = other._transform;
|
||||
_rotationIsValid = other._rotationIsValid;
|
||||
_rotation = other._rotation;
|
||||
_rotationInConstrainedFrame = other._rotationInConstrainedFrame;
|
||||
_positionInParentFrame = other._positionInParentFrame;
|
||||
|
@ -45,9 +50,21 @@ JointState::~JointState() {
|
|||
}
|
||||
}
|
||||
|
||||
glm::quat JointState::getRotation() const {
|
||||
if (!_rotationIsValid) {
|
||||
const_cast<JointState*>(this)->_rotation = extractRotation(_transform);
|
||||
const_cast<JointState*>(this)->_rotationIsValid = true;
|
||||
}
|
||||
|
||||
return _rotation;
|
||||
}
|
||||
|
||||
void JointState::setFBXJoint(const FBXJoint* joint) {
|
||||
assert(joint != NULL);
|
||||
_rotationInConstrainedFrame = joint->rotation;
|
||||
_transformChanged = true;
|
||||
_rotationIsValid = false;
|
||||
|
||||
// NOTE: JointState does not own the FBXJoint to which it points.
|
||||
_fbxJoint = joint;
|
||||
if (_constraint) {
|
||||
|
@ -70,8 +87,10 @@ void JointState::updateConstraint() {
|
|||
|
||||
void JointState::copyState(const JointState& state) {
|
||||
_animationPriority = state._animationPriority;
|
||||
_transformChanged = state._transformChanged;
|
||||
_transform = state._transform;
|
||||
_rotation = extractRotation(_transform);
|
||||
_rotationIsValid = state._rotationIsValid;
|
||||
_rotation = state._rotation;
|
||||
_rotationInConstrainedFrame = state._rotationInConstrainedFrame;
|
||||
_positionInParentFrame = state._positionInParentFrame;
|
||||
_distanceToParent = state._distanceToParent;
|
||||
|
@ -88,11 +107,20 @@ void JointState::initTransform(const glm::mat4& parentTransform) {
|
|||
_distanceToParent = glm::length(_positionInParentFrame);
|
||||
}
|
||||
|
||||
void JointState::computeTransform(const glm::mat4& parentTransform) {
|
||||
void JointState::computeTransform(const glm::mat4& parentTransform, bool parentTransformChanged, bool synchronousRotationCompute) {
|
||||
if (!parentTransformChanged && !_transformChanged) {
|
||||
return;
|
||||
}
|
||||
|
||||
glm::quat rotationInParentFrame = _fbxJoint->preRotation * _rotationInConstrainedFrame * _fbxJoint->postRotation;
|
||||
glm::mat4 transformInParentFrame = _fbxJoint->preTransform * glm::mat4_cast(rotationInParentFrame) * _fbxJoint->postTransform;
|
||||
_transform = parentTransform * glm::translate(_fbxJoint->translation) * transformInParentFrame;
|
||||
_rotation = extractRotation(_transform);
|
||||
glm::mat4 newTransform = parentTransform * glm::translate(_fbxJoint->translation) * transformInParentFrame;
|
||||
|
||||
if (newTransform != _transform) {
|
||||
_transform = newTransform;
|
||||
_transformChanged = true;
|
||||
_rotationIsValid = false;
|
||||
}
|
||||
}
|
||||
|
||||
void JointState::computeVisibleTransform(const glm::mat4& parentTransform) {
|
||||
|
@ -103,7 +131,7 @@ void JointState::computeVisibleTransform(const glm::mat4& parentTransform) {
|
|||
}
|
||||
|
||||
glm::quat JointState::getRotationInBindFrame() const {
|
||||
return _rotation * _fbxJoint->inverseBindRotation;
|
||||
return getRotation() * _fbxJoint->inverseBindRotation;
|
||||
}
|
||||
|
||||
glm::quat JointState::getRotationInParentFrame() const {
|
||||
|
@ -126,7 +154,7 @@ void JointState::setRotationInBindFrame(const glm::quat& rotation, float priorit
|
|||
// rotation is from bind- to model-frame
|
||||
assert(_fbxJoint != NULL);
|
||||
if (priority >= _animationPriority) {
|
||||
glm::quat targetRotation = _rotationInConstrainedFrame * glm::inverse(_rotation) * rotation * glm::inverse(_fbxJoint->inverseBindRotation);
|
||||
glm::quat targetRotation = _rotationInConstrainedFrame * glm::inverse(getRotation()) * rotation * glm::inverse(_fbxJoint->inverseBindRotation);
|
||||
if (constrain && _constraint) {
|
||||
_constraint->softClamp(targetRotation, _rotationInConstrainedFrame, 0.5f);
|
||||
}
|
||||
|
@ -139,29 +167,32 @@ void JointState::clearTransformTranslation() {
|
|||
_transform[3][0] = 0.0f;
|
||||
_transform[3][1] = 0.0f;
|
||||
_transform[3][2] = 0.0f;
|
||||
_transformChanged = true;
|
||||
_visibleTransform[3][0] = 0.0f;
|
||||
_visibleTransform[3][1] = 0.0f;
|
||||
_visibleTransform[3][2] = 0.0f;
|
||||
}
|
||||
|
||||
void JointState::setRotation(const glm::quat& rotation, bool constrain, float priority) {
|
||||
applyRotationDelta(rotation * glm::inverse(_rotation), true, priority);
|
||||
applyRotationDelta(rotation * glm::inverse(getRotation()), true, priority);
|
||||
}
|
||||
|
||||
void JointState::applyRotationDelta(const glm::quat& delta, bool constrain, float priority) {
|
||||
// NOTE: delta is in model-frame
|
||||
assert(_fbxJoint != NULL);
|
||||
if (priority < _animationPriority) {
|
||||
if (priority < _animationPriority || delta.null) {
|
||||
return;
|
||||
}
|
||||
_animationPriority = priority;
|
||||
glm::quat targetRotation = _rotationInConstrainedFrame * glm::inverse(getRotation()) * delta * getRotation();
|
||||
if (!constrain || _constraint == NULL) {
|
||||
// no constraints
|
||||
_rotationInConstrainedFrame = _rotationInConstrainedFrame * glm::inverse(_rotation) * delta * _rotation;
|
||||
_rotation = delta * _rotation;
|
||||
_rotationInConstrainedFrame = targetRotation;
|
||||
_transformChanged = true;
|
||||
|
||||
_rotation = delta * getRotation();
|
||||
return;
|
||||
}
|
||||
glm::quat targetRotation = _rotationInConstrainedFrame * glm::inverse(_rotation) * delta * _rotation;
|
||||
setRotationInConstrainedFrame(targetRotation);
|
||||
}
|
||||
|
||||
|
@ -174,7 +205,7 @@ void JointState::mixRotationDelta(const glm::quat& delta, float mixFactor, float
|
|||
return;
|
||||
}
|
||||
_animationPriority = priority;
|
||||
glm::quat targetRotation = _rotationInConstrainedFrame * glm::inverse(_rotation) * delta * _rotation;
|
||||
glm::quat targetRotation = _rotationInConstrainedFrame * glm::inverse(getRotation()) * delta * getRotation();
|
||||
if (mixFactor > 0.0f && mixFactor <= 1.0f) {
|
||||
targetRotation = safeMix(targetRotation, _fbxJoint->rotation, mixFactor);
|
||||
}
|
||||
|
@ -198,7 +229,7 @@ void JointState::mixVisibleRotationDelta(const glm::quat& delta, float mixFactor
|
|||
glm::quat JointState::computeParentRotation() const {
|
||||
// R = Rp * Rpre * r * Rpost
|
||||
// Rp = R * (Rpre * r * Rpost)^
|
||||
return _rotation * glm::inverse(_fbxJoint->preRotation * _rotationInConstrainedFrame * _fbxJoint->postRotation);
|
||||
return getRotation() * glm::inverse(_fbxJoint->preRotation * _rotationInConstrainedFrame * _fbxJoint->postRotation);
|
||||
}
|
||||
|
||||
glm::quat JointState::computeVisibleParentRotation() const {
|
||||
|
@ -208,6 +239,7 @@ glm::quat JointState::computeVisibleParentRotation() const {
|
|||
void JointState::setRotationInConstrainedFrame(const glm::quat& targetRotation) {
|
||||
glm::quat parentRotation = computeParentRotation();
|
||||
_rotationInConstrainedFrame = targetRotation;
|
||||
_transformChanged = true;
|
||||
// R' = Rp * Rpre * r' * Rpost
|
||||
_rotation = parentRotation * _fbxJoint->preRotation * _rotationInConstrainedFrame * _fbxJoint->postRotation;
|
||||
}
|
||||
|
@ -233,6 +265,6 @@ const glm::vec3& JointState::getDefaultTranslationInConstrainedFrame() const {
|
|||
|
||||
void JointState::slaveVisibleTransform() {
|
||||
_visibleTransform = _transform;
|
||||
_visibleRotation = _rotation;
|
||||
_visibleRotation = getRotation();
|
||||
_visibleRotationInConstrainedFrame = _rotationInConstrainedFrame;
|
||||
}
|
||||
}
|
|
@ -33,7 +33,9 @@ public:
|
|||
void copyState(const JointState& state);
|
||||
|
||||
void initTransform(const glm::mat4& parentTransform);
|
||||
void computeTransform(const glm::mat4& parentTransform);
|
||||
// if synchronousRotationCompute is true, then _transform is still computed synchronously,
|
||||
// but _rotation will be asynchronously extracted
|
||||
void computeTransform(const glm::mat4& parentTransform, bool parentTransformChanged = true, bool synchronousRotationCompute = false);
|
||||
|
||||
void computeVisibleTransform(const glm::mat4& parentTransform);
|
||||
const glm::mat4& getVisibleTransform() const { return _visibleTransform; }
|
||||
|
@ -41,8 +43,10 @@ public:
|
|||
glm::vec3 getVisiblePosition() const { return extractTranslation(_visibleTransform); }
|
||||
|
||||
const glm::mat4& getTransform() const { return _transform; }
|
||||
void resetTransformChanged() { _transformChanged = false; }
|
||||
bool getTransformChanged() const { return _transformChanged; }
|
||||
|
||||
glm::quat getRotation() const { return _rotation; }
|
||||
glm::quat getRotation() const;
|
||||
glm::vec3 getPosition() const { return extractTranslation(_transform); }
|
||||
|
||||
/// \return rotation from bind to model frame
|
||||
|
@ -104,7 +108,9 @@ private:
|
|||
/// debug helper function
|
||||
void loadBindRotation();
|
||||
|
||||
bool _transformChanged;
|
||||
glm::mat4 _transform; // joint- to model-frame
|
||||
bool _rotationIsValid;
|
||||
glm::quat _rotation; // joint- to model-frame
|
||||
glm::quat _rotationInConstrainedFrame; // rotation in frame where angular constraints would be applied
|
||||
glm::vec3 _positionInParentFrame; // only changes when the Model is scaled
|
||||
|
|
|
@ -922,7 +922,7 @@ void Model::simulate(float deltaTime, bool fullUpdate) {
|
|||
void Model::simulateInternal(float deltaTime) {
|
||||
// NOTE: this is a recursive call that walks all attachments, and their attachments
|
||||
// update the world space transforms for all joints
|
||||
|
||||
|
||||
// update animations
|
||||
foreach (const AnimationHandlePointer& handle, _runningAnimations) {
|
||||
handle->simulate(deltaTime);
|
||||
|
@ -931,8 +931,11 @@ void Model::simulateInternal(float deltaTime) {
|
|||
for (int i = 0; i < _jointStates.size(); i++) {
|
||||
updateJointState(i);
|
||||
}
|
||||
for (int i = 0; i < _jointStates.size(); i++) {
|
||||
_jointStates[i].resetTransformChanged();
|
||||
}
|
||||
|
||||
_shapesAreDirty = ! _shapes.isEmpty();
|
||||
_shapesAreDirty = !_shapes.isEmpty();
|
||||
|
||||
// update the attachment transforms and simulate them
|
||||
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
||||
|
@ -994,7 +997,7 @@ void Model::updateJointState(int index) {
|
|||
state.computeTransform(parentTransform);
|
||||
} else {
|
||||
const JointState& parentState = _jointStates.at(parentIndex);
|
||||
state.computeTransform(parentState.getTransform());
|
||||
state.computeTransform(parentState.getTransform(), parentState.getTransformChanged());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,10 +17,11 @@ BillboardOverlay::BillboardOverlay()
|
|||
: _fromImage(-1,-1,-1,-1),
|
||||
_scale(1.0f),
|
||||
_isFacingAvatar(true) {
|
||||
_isLoaded = false;
|
||||
}
|
||||
|
||||
void BillboardOverlay::render() {
|
||||
if (!_visible) {
|
||||
if (!_visible || !_isLoaded) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -85,16 +86,7 @@ void BillboardOverlay::render() {
|
|||
((float)_fromImage.y() + (float)_fromImage.height()) / (float)_size.height());
|
||||
glVertex2f(-x, y);
|
||||
} glEnd();
|
||||
} else {
|
||||
glColor4f(0.5f, 0.5f, 0.5f, 1.0f);
|
||||
glBegin(GL_QUADS); {
|
||||
glVertex2f(-1.0f, -1.0f);
|
||||
glVertex2f(1.0f, -1.0f);
|
||||
glVertex2f(1.0f, 1.0f);
|
||||
glVertex2f(-1.0f, 1.0f);
|
||||
} glEnd();
|
||||
}
|
||||
|
||||
} glPopMatrix();
|
||||
|
||||
glDisable(GL_TEXTURE_2D);
|
||||
|
@ -167,6 +159,7 @@ void BillboardOverlay::setProperties(const QScriptValue &properties) {
|
|||
}
|
||||
|
||||
void BillboardOverlay::setBillboardURL(const QUrl url) {
|
||||
_isLoaded = false;
|
||||
QNetworkReply* reply = NetworkAccessManager::getInstance().get(QNetworkRequest(url));
|
||||
connect(reply, &QNetworkReply::finished, this, &BillboardOverlay::replyFinished);
|
||||
}
|
||||
|
@ -175,4 +168,5 @@ void BillboardOverlay::replyFinished() {
|
|||
// replace our byte array with the downloaded data
|
||||
QNetworkReply* reply = static_cast<QNetworkReply*>(sender());
|
||||
_billboard = reply->readAll();
|
||||
_isLoaded = true;
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ ImageOverlay::ImageOverlay() :
|
|||
_textureBound(false),
|
||||
_wantClipFromImage(false)
|
||||
{
|
||||
_isLoaded = false;
|
||||
}
|
||||
|
||||
ImageOverlay::~ImageOverlay() {
|
||||
|
@ -35,6 +36,7 @@ ImageOverlay::~ImageOverlay() {
|
|||
|
||||
// TODO: handle setting image multiple times, how do we manage releasing the bound texture?
|
||||
void ImageOverlay::setImageURL(const QUrl& url) {
|
||||
_isLoaded = false;
|
||||
NetworkAccessManager& networkAccessManager = NetworkAccessManager::getInstance();
|
||||
QNetworkReply* reply = networkAccessManager.get(QNetworkRequest(url));
|
||||
connect(reply, &QNetworkReply::finished, this, &ImageOverlay::replyFinished);
|
||||
|
@ -47,10 +49,11 @@ void ImageOverlay::replyFinished() {
|
|||
QByteArray rawData = reply->readAll();
|
||||
_textureImage.loadFromData(rawData);
|
||||
_renderImage = true;
|
||||
_isLoaded = true;
|
||||
}
|
||||
|
||||
void ImageOverlay::render() {
|
||||
if (!_visible) {
|
||||
if (!_visible || !_isLoaded) {
|
||||
return; // do nothing if we're not visible
|
||||
}
|
||||
if (_renderImage && !_textureBound) {
|
||||
|
|
|
@ -15,8 +15,10 @@
|
|||
ModelOverlay::ModelOverlay()
|
||||
: _model(),
|
||||
_scale(1.0f),
|
||||
_updateModel(false) {
|
||||
_updateModel(false)
|
||||
{
|
||||
_model.init();
|
||||
_isLoaded = false;
|
||||
}
|
||||
|
||||
void ModelOverlay::update(float deltatime) {
|
||||
|
@ -32,6 +34,7 @@ void ModelOverlay::update(float deltatime) {
|
|||
} else {
|
||||
_model.simulate(deltatime);
|
||||
}
|
||||
_isLoaded = _model.isActive();
|
||||
}
|
||||
|
||||
void ModelOverlay::render() {
|
||||
|
@ -90,6 +93,7 @@ void ModelOverlay::setProperties(const QScriptValue &properties) {
|
|||
if (urlValue.isValid()) {
|
||||
_url = urlValue.toVariant().toString();
|
||||
_updateModel = true;
|
||||
_isLoaded = false;
|
||||
}
|
||||
|
||||
QScriptValue scaleValue = properties.property("scale");
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
|
||||
Overlay::Overlay() :
|
||||
_parent(NULL),
|
||||
_isLoaded(true),
|
||||
_alpha(DEFAULT_ALPHA),
|
||||
_color(DEFAULT_OVERLAY_COLOR),
|
||||
_visible(true),
|
||||
|
|
|
@ -40,6 +40,7 @@ public:
|
|||
virtual void render() = 0;
|
||||
|
||||
// getters
|
||||
bool isLoaded() { return _isLoaded; }
|
||||
bool getVisible() const { return _visible; }
|
||||
const xColor& getColor() const { return _color; }
|
||||
float getAlpha() const { return _alpha; }
|
||||
|
@ -55,6 +56,7 @@ public:
|
|||
|
||||
protected:
|
||||
QGLWidget* _parent;
|
||||
bool _isLoaded;
|
||||
float _alpha;
|
||||
xColor _color;
|
||||
bool _visible; // should the overlay be drawn at all
|
||||
|
|
|
@ -227,11 +227,23 @@ unsigned int Overlays::getOverlayAtPoint(const glm::vec2& point) {
|
|||
i.previous();
|
||||
unsigned int thisID = i.key();
|
||||
Overlay2D* thisOverlay = static_cast<Overlay2D*>(i.value());
|
||||
if (thisOverlay->getVisible() && thisOverlay->getBounds().contains(point.x, point.y, false)) {
|
||||
if (thisOverlay->getVisible() && thisOverlay->isLoaded() && thisOverlay->getBounds().contains(point.x, point.y, false)) {
|
||||
return thisID;
|
||||
}
|
||||
}
|
||||
return 0; // not found
|
||||
}
|
||||
|
||||
bool Overlays::isLoaded(unsigned int id) {
|
||||
QReadLocker lock(&_lock);
|
||||
Overlay* overlay = _overlays2D.value(id);
|
||||
if (!overlay) {
|
||||
_overlays3D.value(id);
|
||||
}
|
||||
if (!overlay) {
|
||||
return false; // not found
|
||||
}
|
||||
|
||||
return overlay->isLoaded();
|
||||
}
|
||||
|
||||
|
|
|
@ -38,6 +38,9 @@ public slots:
|
|||
|
||||
/// returns the top most overlay at the screen point, or 0 if not overlay at that point
|
||||
unsigned int getOverlayAtPoint(const glm::vec2& point);
|
||||
|
||||
/// returns whether the overlay's assets are loaded or not
|
||||
bool isLoaded(unsigned int id);
|
||||
|
||||
private:
|
||||
QMap<unsigned int, Overlay*> _overlays2D;
|
||||
|
|
|
@ -757,8 +757,7 @@ void setTranslation(glm::mat4& matrix, const glm::vec3& translation) {
|
|||
glm::quat extractRotation(const glm::mat4& matrix, bool assumeOrthogonal) {
|
||||
// uses the iterative polar decomposition algorithm described by Ken Shoemake at
|
||||
// http://www.cs.wisc.edu/graphics/Courses/838-s2002/Papers/polar-decomp.pdf
|
||||
// code adapted from Clyde, https://github.com/threerings/clyde/blob/master/src/main/java/com/threerings/math/Matrix4f.java
|
||||
|
||||
// code adapted from Clyde, https://github.com/threerings/clyde/blob/master/core/src/main/java/com/threerings/math/Matrix4f.java
|
||||
// start with the contents of the upper 3x3 portion of the matrix
|
||||
glm::mat3 upper = glm::mat3(matrix);
|
||||
if (!assumeOrthogonal) {
|
||||
|
|
Loading…
Reference in a new issue