mirror of
https://github.com/overte-org/overte.git
synced 2025-04-19 13:43:49 +02:00
Merge pull request #901 from ey6es/shifty
LED tracking and Faceshift support. Also moved all device files to "devices" source folder.
This commit is contained in:
commit
a36fb6a35b
30 changed files with 1518 additions and 94 deletions
44
cmake/modules/FindFaceshift.cmake
Normal file
44
cmake/modules/FindFaceshift.cmake
Normal file
|
@ -0,0 +1,44 @@
|
|||
# Try to find the Faceshift networking library
|
||||
#
|
||||
# You must provide a FACESHIFT_ROOT_DIR which contains lib and include directories
|
||||
#
|
||||
# Once done this will define
|
||||
#
|
||||
# FACESHIFT_FOUND - system found Faceshift
|
||||
# FACESHIFT_INCLUDE_DIRS - the Faceshift include directory
|
||||
# FACESHIFT_LIBRARIES - Link this to use Faceshift
|
||||
#
|
||||
# Created on 8/30/2013 by Andrzej Kapolka
|
||||
# Copyright (c) 2013 High Fidelity
|
||||
#
|
||||
|
||||
if (FACESHIFT_LIBRARIES AND FACESHIFT_INCLUDE_DIRS)
|
||||
# in cache already
|
||||
set(FACESHIFT_FOUND TRUE)
|
||||
else (FACESHIFT_LIBRARIES AND FACESHIFT_INCLUDE_DIRS)
|
||||
find_path(FACESHIFT_INCLUDE_DIRS fsbinarystream.h ${FACESHIFT_ROOT_DIR}/include)
|
||||
|
||||
if (APPLE)
|
||||
find_library(FACESHIFT_LIBRARIES libfaceshift.a ${FACESHIFT_ROOT_DIR}/lib/MacOS/)
|
||||
elseif (UNIX)
|
||||
find_library(FACESHIFT_LIBRARIES libfaceshift.a ${FACESHIFT_ROOT_DIR}/lib/UNIX/)
|
||||
endif ()
|
||||
|
||||
if (FACESHIFT_INCLUDE_DIRS AND FACESHIFT_LIBRARIES)
|
||||
set(FACESHIFT_FOUND TRUE)
|
||||
endif (FACESHIFT_INCLUDE_DIRS AND FACESHIFT_LIBRARIES)
|
||||
|
||||
if (FACESHIFT_FOUND)
|
||||
if (NOT FACESHIFT_FIND_QUIETLY)
|
||||
message(STATUS "Found Faceshift: ${FACESHIFT_LIBRARIES}")
|
||||
endif (NOT FACESHIFT_FIND_QUIETLY)
|
||||
else (FACESHIFT_FOUND)
|
||||
if (FACESHIFT_FIND_REQUIRED)
|
||||
message(FATAL_ERROR "Could not find Faceshift")
|
||||
endif (FACESHIFT_FIND_REQUIRED)
|
||||
endif (FACESHIFT_FOUND)
|
||||
|
||||
# show the FACESHIFT_INCLUDE_DIRS and FACESHIFT_LIBRARIES variables only in the advanced view
|
||||
mark_as_advanced(FACESHIFT_INCLUDE_DIRS FACESHIFT_LIBRARIES)
|
||||
|
||||
endif (FACESHIFT_LIBRARIES AND FACESHIFT_INCLUDE_DIRS)
|
|
@ -8,6 +8,7 @@ project(${TARGET_NAME})
|
|||
|
||||
# setup for find modules
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/../cmake/modules/")
|
||||
set(FACESHIFT_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/external/faceshift)
|
||||
set(LIBOVR_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/external/LibOVR)
|
||||
set(LIBVPX_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/external/LibVPX)
|
||||
set(LEAP_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/external/Leap)
|
||||
|
@ -37,7 +38,7 @@ configure_file(InterfaceConfig.h.in ${PROJECT_BINARY_DIR}/includes/InterfaceConf
|
|||
|
||||
# grab the implementation and header files from src dirs
|
||||
file(GLOB INTERFACE_SRCS src/*.cpp src/*.h)
|
||||
foreach(SUBDIR avatar ui renderer)
|
||||
foreach(SUBDIR avatar devices renderer ui)
|
||||
file(GLOB SUBDIR_SRCS src/${SUBDIR}/*.cpp src/${SUBDIR}/*.h)
|
||||
set(INTERFACE_SRCS ${INTERFACE_SRCS} ${SUBDIR_SRCS})
|
||||
endforeach(SUBDIR)
|
||||
|
@ -47,6 +48,7 @@ add_subdirectory(src/starfield)
|
|||
|
||||
find_package(Qt5Core REQUIRED)
|
||||
find_package(Qt5Gui REQUIRED)
|
||||
find_package(Qt5Multimedia REQUIRED)
|
||||
find_package(Qt5Network REQUIRED)
|
||||
find_package(Qt5OpenGL REQUIRED)
|
||||
find_package(Qt5Svg REQUIRED)
|
||||
|
@ -90,6 +92,7 @@ link_hifi_library(avatars ${TARGET_NAME} ${ROOT_DIR})
|
|||
link_hifi_library(audio ${TARGET_NAME} ${ROOT_DIR})
|
||||
|
||||
# find required libraries
|
||||
find_package(Faceshift)
|
||||
find_package(GLM REQUIRED)
|
||||
find_package(LibOVR)
|
||||
find_package(LibVPX)
|
||||
|
@ -108,7 +111,7 @@ if (OPENNI_FOUND AND NOT DISABLE_OPENNI)
|
|||
target_link_libraries(${TARGET_NAME} ${OPENNI_LIBRARIES})
|
||||
endif (OPENNI_FOUND AND NOT DISABLE_OPENNI)
|
||||
|
||||
qt5_use_modules(${TARGET_NAME} Core Gui Network OpenGL Svg)
|
||||
qt5_use_modules(${TARGET_NAME} Core Gui Multimedia Network OpenGL Svg)
|
||||
|
||||
# include headers for interface and InterfaceConfig.
|
||||
include_directories(
|
||||
|
@ -120,6 +123,7 @@ include_directories(
|
|||
# use system flag so warnings are supressed
|
||||
include_directories(
|
||||
SYSTEM
|
||||
${FACESHIFT_INCLUDE_DIRS}
|
||||
${GLM_INCLUDE_DIRS}
|
||||
${LIBOVR_INCLUDE_DIRS}
|
||||
${LIBVPX_INCLUDE_DIRS}
|
||||
|
@ -131,6 +135,7 @@ include_directories(
|
|||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -isystem ${OPENCV_INCLUDE_DIRS}")
|
||||
target_link_libraries(
|
||||
${TARGET_NAME}
|
||||
${FACESHIFT_LIBRARIES}
|
||||
${LIBVPX_LIBRARIES}
|
||||
${MOTIONDRIVER_LIBRARIES}
|
||||
${OPENCV_LIBRARIES}
|
||||
|
|
11
interface/external/faceshift/CMakeLists.txt
vendored
Normal file
11
interface/external/faceshift/CMakeLists.txt
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
cmake_minimum_required(VERSION 2.8)
|
||||
|
||||
set(TARGET_NAME faceshift)
|
||||
project(${TARGET_NAME})
|
||||
|
||||
# grab the implemenation and header files
|
||||
file(GLOB FACESHIFT_SRCS include/*.h src/*.cpp)
|
||||
|
||||
include_directories(include)
|
||||
|
||||
add_library(${TARGET_NAME} ${FACESHIFT_SRCS})
|
410
interface/external/faceshift/include/fsbinarystream.h
vendored
Normal file
410
interface/external/faceshift/include/fsbinarystream.h
vendored
Normal file
|
@ -0,0 +1,410 @@
|
|||
#pragma once
|
||||
|
||||
#ifndef FSBINARYSTREAM_H
|
||||
#define FSBINARYSTREAM_H
|
||||
|
||||
// ==========================================================================
|
||||
// Copyright (C) 2012 faceshift AG, and/or its licensors. All rights reserved.
|
||||
//
|
||||
// the software is free to use and provided "as is", without warranty of any kind.
|
||||
// faceshift AG does not make and hereby disclaims any express or implied
|
||||
// warranties including, but not limited to, the warranties of
|
||||
// non-infringement, merchantability or fitness for a particular purpose,
|
||||
// or arising from a course of dealing, usage, or trade practice. in no
|
||||
// event will faceshift AG and/or its licensors be liable for any lost
|
||||
// revenues, data, or profits, or special, direct, indirect, or
|
||||
// consequential damages, even if faceshift AG and/or its licensors has
|
||||
// been advised of the possibility or probability of such damages.
|
||||
// ==========================================================================
|
||||
|
||||
|
||||
/**
|
||||
* Define the HAVE_EIGEN preprocessor define, if you are using the Eigen library, it allows you to easily convert our tracked data from and to eigen
|
||||
* See fsVector3f and fsQuaternionf for more details
|
||||
**/
|
||||
|
||||
#ifdef HAVE_EIGEN
|
||||
#include <Eigen/Core>
|
||||
#include <Eigen/Geometry>
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#include <memory>
|
||||
#else
|
||||
#include <tr1/memory>
|
||||
#endif
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <stdint.h>
|
||||
|
||||
/*******************************************************************************************
|
||||
* This first part of the file contains a definition of the datastructures holding the
|
||||
* tracking results
|
||||
******************************************************************************************/
|
||||
|
||||
namespace fs {
|
||||
|
||||
/**
|
||||
* A floating point three-vector.
|
||||
*
|
||||
* To keep these networking classes as simple as possible, we do not implement the
|
||||
* vector semantics here, use Eigen for that purpose. The class just holds three named floats,
|
||||
* and you have to interpret them yourself.
|
||||
**/
|
||||
struct fsVector3f {
|
||||
float x,y,z;
|
||||
|
||||
fsVector3f() {}
|
||||
#ifdef HAVE_EIGEN
|
||||
explicit fsVector3f(const Eigen::Matrix<float,3,1> &v) : x(v[0]), y(v[1]), z(v[2]) {}
|
||||
Eigen::Map< Eigen::Matrix<float,3,1> > eigen() const { return Eigen::Map<Eigen::Matrix<float,3,1> >((float*)this); }
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* An integer three-vector.
|
||||
**/
|
||||
struct fsVector3i {
|
||||
int32_t x,y,z;
|
||||
|
||||
fsVector3i() {}
|
||||
#ifdef HAVE_EIGEN
|
||||
explicit fsVector3i(const Eigen::Matrix<int32_t,3,1> &v) : x(v[0]), y(v[1]), z(v[2]) {}
|
||||
Eigen::Map<Eigen::Matrix<int32_t,3,1> > eigen() const { return Eigen::Map<Eigen::Matrix<int32_t,3,1> >((int32_t*)this); }
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* An integer four-vector.
|
||||
**/
|
||||
struct fsVector4i {
|
||||
int32_t x,y,z,w;
|
||||
|
||||
fsVector4i() {}
|
||||
#ifdef HAVE_EIGEN
|
||||
explicit fsVector4i(const Eigen::Matrix<int32_t,4,1> &v) : x(v[0]), y(v[1]), z(v[2]), w(v[3]) {}
|
||||
Eigen::Map<Eigen::Matrix<int32_t,4,1,Eigen::DontAlign> > eigen() const { return Eigen::Map<Eigen::Matrix<int32_t,4,1,Eigen::DontAlign> >((int32_t*)this); }
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* Structure holding the data of a quaternion.
|
||||
*
|
||||
*To keep these networking classes as simple as possible, we do not implement the
|
||||
* quaternion semantics here. The class just holds four named floats, and you have to interpret them yourself.
|
||||
*
|
||||
* If you have Eigen you can just cast this class to an Eigen::Quaternionf and use it.
|
||||
*
|
||||
* The quaternion is defined as w+xi+yj+zk
|
||||
**/
|
||||
struct fsQuaternionf {
|
||||
float x,y,z,w;
|
||||
|
||||
fsQuaternionf() {}
|
||||
#ifdef HAVE_EIGEN
|
||||
explicit fsQuaternionf(const Eigen::Quaternionf &q) : x(q.x()), y(q.y()), z(q.z()), w(q.w()) {}
|
||||
Eigen::Quaternionf eigen() const { return Eigen::Quaternionf(w,x,y,z); }
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* A structure containing the data tracked for a single frame.
|
||||
**/
|
||||
class fsTrackingData {
|
||||
public:
|
||||
//! time stamp in ms
|
||||
double m_timestamp;
|
||||
|
||||
//! flag whether tracking was successful [0,1]
|
||||
bool m_trackingSuccessful;
|
||||
|
||||
//! head pose
|
||||
fsQuaternionf m_headRotation;
|
||||
fsVector3f m_headTranslation;
|
||||
|
||||
//! eye gaze in degrees
|
||||
float m_eyeGazeLeftPitch;
|
||||
float m_eyeGazeLeftYaw;
|
||||
float m_eyeGazeRightPitch;
|
||||
float m_eyeGazeRightYaw;
|
||||
|
||||
//! blendshape coefficients
|
||||
std::vector<float> m_coeffs;
|
||||
|
||||
//! marker positions - format specified in faceshift
|
||||
std::vector< fsVector3f > m_markers;
|
||||
};
|
||||
|
||||
/**
|
||||
* A structure containing vertex information
|
||||
*/
|
||||
class fsVertexData {
|
||||
public:
|
||||
//! vertex data
|
||||
std::vector<fsVector3f> m_vertices;
|
||||
|
||||
#ifdef HAVE_EIGEN
|
||||
Eigen::Map<Eigen::Matrix<float,3,Eigen::Dynamic> > eigen() { return Eigen::Map<Eigen::Matrix<float,3,Eigen::Dynamic> >((float*)m_vertices.data(),3,m_vertices.size()); }
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* A strucutre containing mesh information
|
||||
*/
|
||||
class fsMeshData {
|
||||
public:
|
||||
//! topology (quads)
|
||||
std::vector<fsVector4i> m_quads;
|
||||
|
||||
//! topology (triangles)
|
||||
std::vector<fsVector3i> m_tris;
|
||||
|
||||
//! vertex data
|
||||
fsVertexData m_vertex_data;
|
||||
|
||||
#ifdef HAVE_EIGEN
|
||||
Eigen::Map<Eigen::Matrix<int32_t,4,Eigen::Dynamic,Eigen::DontAlign> > quads_eigen() { return Eigen::Map<Eigen::Matrix<int32_t,4,Eigen::Dynamic,Eigen::DontAlign> >((int32_t*)m_quads.data(),4,m_quads.size()); }
|
||||
Eigen::Map<Eigen::Matrix<int32_t,3,Eigen::Dynamic> > tris_eigen() { return Eigen::Map<Eigen::Matrix<int32_t,3,Eigen::Dynamic> >((int32_t*)m_tris.data(),3,m_tris.size()); }
|
||||
Eigen::Map<Eigen::Matrix<float,3,Eigen::Dynamic> > vertices_eigen() { return m_vertex_data.eigen(); }
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
/*******************************************************************************************
|
||||
* Now follows a definition of datastructures encapsulating the network messages
|
||||
******************************************************************************************/
|
||||
|
||||
/** Predeclaration of the message types available in faceshift **/
|
||||
|
||||
// Inbound
|
||||
class fsMsgStartCapturing;
|
||||
class fsMsgStopCapturing;
|
||||
class fsMsgCalibrateNeutral;
|
||||
class fsMsgSendMarkerNames;
|
||||
class fsMsgSendBlendshapeNames;
|
||||
class fsMsgSendRig;
|
||||
|
||||
// Outbound
|
||||
class fsMsgTrackingState;
|
||||
class fsMsgMarkerNames;
|
||||
class fsMsgBlendshapeNames;
|
||||
class fsMsgRig;
|
||||
|
||||
/**
|
||||
* Base class of all message that faceshift is sending.
|
||||
* A class can be queried for its type, using the id() function for use in a switch statement, or by using a dynamic_cast.
|
||||
**/
|
||||
class fsMsg {
|
||||
public:
|
||||
virtual ~fsMsg() {}
|
||||
|
||||
enum MessageType {
|
||||
// Messages to control faceshift via the network
|
||||
// These are sent from the client to faceshift
|
||||
MSG_IN_START_TRACKING = 44344,
|
||||
MSG_IN_STOP_TRACKING = 44444,
|
||||
MSG_IN_CALIBRATE_NEUTRAL = 44544,
|
||||
MSG_IN_SEND_MARKER_NAMES = 44644,
|
||||
MSG_IN_SEND_BLENDSHAPE_NAMES = 44744,
|
||||
MSG_IN_SEND_RIG = 44844,
|
||||
MSG_IN_HEADPOSE_RELATIVE = 44944,
|
||||
MSG_IN_HEADPOSE_ABSOLUTE = 44945,
|
||||
|
||||
// Messages containing tracking information
|
||||
// These are sent form faceshift to the client application
|
||||
MSG_OUT_TRACKING_STATE = 33433,
|
||||
MSG_OUT_MARKER_NAMES = 33533,
|
||||
MSG_OUT_BLENDSHAPE_NAMES = 33633,
|
||||
MSG_OUT_RIG = 33733
|
||||
};
|
||||
|
||||
virtual MessageType id() const = 0;
|
||||
};
|
||||
typedef std::tr1::shared_ptr<fsMsg> fsMsgPtr;
|
||||
|
||||
|
||||
/*************
|
||||
* Inbound
|
||||
***********/
|
||||
class fsMsgStartCapturing : public fsMsg {
|
||||
public:
|
||||
virtual ~fsMsgStartCapturing() {}
|
||||
virtual MessageType id() const { return MSG_IN_START_TRACKING; }
|
||||
};
|
||||
class fsMsgStopCapturing : public fsMsg {
|
||||
public:
|
||||
virtual ~fsMsgStopCapturing() {}
|
||||
virtual MessageType id() const { return MSG_IN_STOP_TRACKING; }
|
||||
};
|
||||
class fsMsgCalibrateNeutral : public fsMsg {
|
||||
public:
|
||||
virtual ~fsMsgCalibrateNeutral() {}
|
||||
virtual MessageType id() const { return MSG_IN_CALIBRATE_NEUTRAL; }
|
||||
};
|
||||
class fsMsgSendMarkerNames : public fsMsg {
|
||||
public:
|
||||
virtual ~fsMsgSendMarkerNames() {}
|
||||
virtual MessageType id() const { return MSG_IN_SEND_MARKER_NAMES; }
|
||||
};
|
||||
class fsMsgSendBlendshapeNames : public fsMsg {
|
||||
public:
|
||||
virtual ~fsMsgSendBlendshapeNames() {}
|
||||
virtual MessageType id() const { return MSG_IN_SEND_BLENDSHAPE_NAMES; }
|
||||
};
|
||||
class fsMsgSendRig : public fsMsg {
|
||||
public:
|
||||
virtual ~fsMsgSendRig() {}
|
||||
virtual MessageType id() const { return MSG_IN_SEND_RIG; }
|
||||
};
|
||||
class fsMsgHeadPoseRelative : public fsMsg {
|
||||
public:
|
||||
virtual ~fsMsgHeadPoseRelative() {}
|
||||
virtual MessageType id() const { return MSG_IN_HEADPOSE_RELATIVE; }
|
||||
};
|
||||
class fsMsgHeadPoseAbsolute : public fsMsg {
|
||||
public:
|
||||
virtual ~fsMsgHeadPoseAbsolute() {}
|
||||
virtual MessageType id() const { return MSG_IN_HEADPOSE_ABSOLUTE; }
|
||||
};
|
||||
|
||||
/*************
|
||||
* Outbound
|
||||
***********/
|
||||
class fsMsgTrackingState : public fsMsg {
|
||||
public:
|
||||
virtual ~fsMsgTrackingState() {}
|
||||
|
||||
/* */ fsTrackingData & tracking_data() /* */ { return m_tracking_data; }
|
||||
const fsTrackingData & tracking_data() const { return m_tracking_data; }
|
||||
|
||||
virtual MessageType id() const { return MSG_OUT_TRACKING_STATE; }
|
||||
|
||||
private:
|
||||
fsTrackingData m_tracking_data;
|
||||
};
|
||||
class fsMsgMarkerNames : public fsMsg {
|
||||
public:
|
||||
virtual ~fsMsgMarkerNames() {}
|
||||
|
||||
/* */ std::vector<std::string> & marker_names() /* */ { return m_marker_names; }
|
||||
const std::vector<std::string> & marker_names() const { return m_marker_names; }
|
||||
|
||||
virtual MessageType id() const { return MSG_OUT_MARKER_NAMES; }
|
||||
private:
|
||||
std::vector<std::string> m_marker_names;
|
||||
};
|
||||
class fsMsgBlendshapeNames : public fsMsg {
|
||||
public:
|
||||
virtual ~fsMsgBlendshapeNames() {}
|
||||
|
||||
/* */ std::vector<std::string> & blendshape_names() /* */ { return m_blendshape_names; }
|
||||
const std::vector<std::string> & blendshape_names() const { return m_blendshape_names; }
|
||||
|
||||
virtual MessageType id() const { return MSG_OUT_BLENDSHAPE_NAMES; }
|
||||
private:
|
||||
std::vector<std::string> m_blendshape_names;
|
||||
};
|
||||
class fsMsgRig : public fsMsg {
|
||||
public:
|
||||
virtual ~fsMsgRig() {}
|
||||
|
||||
virtual MessageType id() const { return MSG_OUT_RIG; }
|
||||
|
||||
/* */ fsMeshData & mesh() /* */ { return m_mesh; }
|
||||
const fsMeshData & mesh() const { return m_mesh; }
|
||||
|
||||
/* */ std::vector<std::string> & blendshape_names() /* */ { return m_blendshape_names; }
|
||||
const std::vector<std::string> & blendshape_names() const { return m_blendshape_names; }
|
||||
|
||||
/* */ std::vector<fsVertexData> & blendshapes() /* */ { return m_blendshapes; }
|
||||
const std::vector<fsVertexData> & blendshapes() const { return m_blendshapes; }
|
||||
|
||||
private:
|
||||
//! neutral mesh
|
||||
fsMeshData m_mesh;
|
||||
//! blendshape names
|
||||
std::vector<std::string> m_blendshape_names;
|
||||
//! blendshapes
|
||||
std::vector<fsVertexData> m_blendshapes;
|
||||
};
|
||||
class fsMsgSignal : public fsMsg {
|
||||
MessageType m_id;
|
||||
public:
|
||||
explicit fsMsgSignal(MessageType id) : m_id(id) {}
|
||||
virtual ~fsMsgSignal() {}
|
||||
virtual MessageType id() const { return m_id; }
|
||||
};
|
||||
|
||||
/**
|
||||
* Class to parse a faceshift data stream, and to create message to write into such a stream
|
||||
*
|
||||
* This needs to be connected with your networking methods by calling
|
||||
*
|
||||
* void received(int, const char *);
|
||||
*
|
||||
* whenever new data is available. After adding received data to the parser you can parse faceshift messages using the
|
||||
*
|
||||
* std::tr1::shared_ptr<fsMsg> get_message();
|
||||
*
|
||||
* to get the next message, if a full block of data has been received. This should be iterated until no more messages are in the buffer.
|
||||
*
|
||||
* You can also use this to encode messages to send back to faceshift. This works by calling the
|
||||
*
|
||||
* void encode_message(std::string &msg_out, const fsMsg &msg);
|
||||
*
|
||||
* methods (actually the specializations existing for each of our message types). This will encode the message into a
|
||||
* binary string in msg_out. You then only need to push the resulting string over the network to faceshift.
|
||||
*
|
||||
* This class does not handle differences in endianness or other strange things that can happen when pushing data over the network.
|
||||
* Should you have to adapt this to such a system, then it should be possible to do this by changing only the write_... and read_...
|
||||
* functions in the accompanying cpp file, but so far there was no need for it.
|
||||
**/
|
||||
class fsBinaryStream {
|
||||
public:
|
||||
fsBinaryStream();
|
||||
|
||||
/**
|
||||
* Use to push data into the parser. Typically called inside of your network receiver routine
|
||||
**/
|
||||
void received(long int, const char *);
|
||||
/**
|
||||
* After pushing data, you can try to extract messages from the stream. Process messages until a null pointer is returned.
|
||||
**/
|
||||
fsMsgPtr get_message();
|
||||
/**
|
||||
* When an invalid message is received, the valid field is set to false. No attempt is made to recover from the problem, you will have to disconnect.
|
||||
**/
|
||||
bool valid() const { return m_valid; }
|
||||
void clear() { m_start = 0; m_end = 0; m_valid=true; }
|
||||
|
||||
// Inbound
|
||||
static void encode_message(std::string &msg_out, const fsMsgTrackingState &msg);
|
||||
static void encode_message(std::string &msg_out, const fsMsgStartCapturing &msg);
|
||||
static void encode_message(std::string &msg_out, const fsMsgStopCapturing &msg);
|
||||
static void encode_message(std::string &msg_out, const fsMsgCalibrateNeutral &msg);
|
||||
static void encode_message(std::string &msg_out, const fsMsgSendMarkerNames &msg);
|
||||
static void encode_message(std::string &msg_out, const fsMsgSendBlendshapeNames &msg);
|
||||
static void encode_message(std::string &msg_out, const fsMsgSendRig &msg);
|
||||
static void encode_message(std::string &msg_out, const fsMsgHeadPoseRelative &msg);
|
||||
static void encode_message(std::string &msg_out, const fsMsgHeadPoseAbsolute &msg);
|
||||
|
||||
// Outbound
|
||||
static void encode_message(std::string &msg_out, const fsTrackingData &msg);
|
||||
static void encode_message(std::string &msg_out, const fsMsgMarkerNames &msg);
|
||||
static void encode_message(std::string &msg_out, const fsMsgBlendshapeNames &msg);
|
||||
static void encode_message(std::string &msg_out, const fsMsgRig &msg);
|
||||
static void encode_message(std::string &msg_out, const fsMsgSignal &msg); // Generic Signal
|
||||
|
||||
private:
|
||||
std::string m_buffer;
|
||||
long int m_start;
|
||||
long int m_end;
|
||||
bool m_valid;
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
||||
#endif // FSBINARYSTREAM_H
|
BIN
interface/external/faceshift/lib/MacOS/libfaceshift.a
vendored
Normal file
BIN
interface/external/faceshift/lib/MacOS/libfaceshift.a
vendored
Normal file
Binary file not shown.
BIN
interface/external/faceshift/lib/UNIX/libfaceshift.a
vendored
Normal file
BIN
interface/external/faceshift/lib/UNIX/libfaceshift.a
vendored
Normal file
Binary file not shown.
502
interface/external/faceshift/src/fsbinarystream.cpp
vendored
Normal file
502
interface/external/faceshift/src/fsbinarystream.cpp
vendored
Normal file
|
@ -0,0 +1,502 @@
|
|||
// ==========================================================================
|
||||
// Copyright (C) 2012 faceshift AG, and/or its licensors. All rights reserved.
|
||||
//
|
||||
// the software is free to use and provided "as is", without warranty of any kind.
|
||||
// faceshift AG does not make and hereby disclaims any express or implied
|
||||
// warranties including, but not limited to, the warranties of
|
||||
// non-infringement, merchantability or fitness for a particular purpose,
|
||||
// or arising from a course of dealing, usage, or trade practice. in no
|
||||
// event will faceshift AG and/or its licensors be liable for any lost
|
||||
// revenues, data, or profits, or special, direct, indirect, or
|
||||
// consequential damages, even if faceshift AG and/or its licensors has
|
||||
// been advised of the possibility or probability of such damages.
|
||||
// ==========================================================================
|
||||
|
||||
#include "fsbinarystream.h"
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
|
||||
#define FSNETWORKVERSION 1
|
||||
|
||||
#ifdef FS_INTERNAL
|
||||
#include <common/log.hpp>
|
||||
#else
|
||||
#define LOG_RELEASE_ERROR(...) { printf("ERROR: %20s:%6d", __FILE__, __LINE__); printf(__VA_ARGS__); }
|
||||
#define LOG_RELEASE_WARNING(...) { printf("WARNING: %20s:%6d", __FILE__, __LINE__); printf(__VA_ARGS__); }
|
||||
#define LOG_RELEASE_INFO(...) { printf("INFO: %20s:%6d", __FILE__, __LINE__); printf(__VA_ARGS__); }
|
||||
#endif
|
||||
|
||||
|
||||
namespace fs {
|
||||
|
||||
// Ids of the submessages for the tracking state
|
||||
enum BlockId {
|
||||
BLOCKID_INFO = 101,
|
||||
BLOCKID_POSE = 102,
|
||||
BLOCKID_BLENDSHAPES = 103,
|
||||
BLOCKID_EYES = 104,
|
||||
BLOCKID_MARKERS = 105
|
||||
};
|
||||
|
||||
|
||||
typedef long int Size;
|
||||
|
||||
struct BlockHeader {
|
||||
uint16_t id;
|
||||
uint16_t version;
|
||||
uint32_t size;
|
||||
BlockHeader(uint16_t _id=0,
|
||||
uint32_t _size=0,
|
||||
uint16_t _version=FSNETWORKVERSION
|
||||
) : id(_id), version(_version), size(_size) {}
|
||||
};
|
||||
|
||||
// Interprets the data at the position start in buffer as a T and increments start by sizeof(T)
|
||||
// It should be sufficient to change/overload this function when you are on a wierd endian system
|
||||
template<class T> bool read_pod(T &value, const std::string &buffer, Size &start) {
|
||||
if(start+sizeof(T) > buffer.size()) return false;
|
||||
value = *(const T*)(&buffer[start]);
|
||||
start += sizeof(T);
|
||||
return true;
|
||||
}
|
||||
bool read_pod(std::string &value, const std::string &buffer, Size &start) {
|
||||
uint16_t len = 0;
|
||||
if(!read_pod(len, buffer, start)) return false;
|
||||
if(start+len>Size(buffer.size())) return false; // check whether we have enough data available
|
||||
value.resize(len);
|
||||
memcpy(&(value[0]), &buffer[start], len);
|
||||
start+=len;
|
||||
return true;
|
||||
}
|
||||
template<class T> bool read_vector(std::vector<T> & values, const std::string & buffer, Size & start) {
|
||||
uint32_t len = 0;
|
||||
if( !read_pod(len, buffer, start)) return false;
|
||||
if( start+len*sizeof(T) > buffer.size() ) return false;
|
||||
values.resize(len);
|
||||
for(uint32_t i = 0; i < len; ++i) {
|
||||
read_pod(values[i],buffer,start);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
template<class T> bool read_small_vector(std::vector<T> & values, const std::string & buffer, Size & start) {
|
||||
uint16_t len = 0;
|
||||
if( !read_pod(len, buffer, start)) return false;
|
||||
if( start+len*sizeof(T) > buffer.size() ) return false;
|
||||
values.resize(len);
|
||||
bool success = true;
|
||||
for(uint16_t i = 0; i < len; ++i) {
|
||||
success &= read_pod(values[i],buffer,start);
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
// Adds the bitpattern of the data to the end of the buffer.
|
||||
// It should be sufficient to change/overload this function when you are on a wierd endian system
|
||||
template <class T>
|
||||
void write_pod(std::string &buffer, const T &value) {
|
||||
Size start = buffer.size();
|
||||
buffer.resize(start + sizeof(T));
|
||||
*(T*)(&buffer[start]) = value;
|
||||
start += sizeof(T);
|
||||
}
|
||||
// special write function for strings
|
||||
void write_pod(std::string &buffer, const std::string &value) {
|
||||
uint16_t len = uint16_t(value.size()); write_pod(buffer, len);
|
||||
buffer.append(value);
|
||||
}
|
||||
template<class T> void write_vector(std::string & buffer, const std::vector<T> & values) {
|
||||
uint32_t len = values.size();
|
||||
write_pod(buffer,len);
|
||||
for(uint32_t i = 0; i < len; ++i)
|
||||
write_pod(buffer,values[i]);
|
||||
}
|
||||
template<class T> void write_small_vector(std::string & buffer, const std::vector<T> & values) {
|
||||
uint16_t len = values.size();
|
||||
write_pod(buffer,len);
|
||||
for(uint16_t i = 0; i < len; ++i)
|
||||
write_pod(buffer,values[i]);
|
||||
}
|
||||
void update_msg_size(std::string &buffer, Size start) {
|
||||
*(uint32_t*)(&buffer[start+4]) = buffer.size() - sizeof(BlockHeader) - start;
|
||||
}
|
||||
void update_msg_size(std::string &buffer) {
|
||||
*(uint32_t*)(&buffer[4]) = buffer.size() - sizeof(BlockHeader);
|
||||
}
|
||||
|
||||
static void skipHeader( Size &start) {
|
||||
start += sizeof(BlockHeader);
|
||||
}
|
||||
|
||||
//! returns whether @param data contains enough data to read the block header
|
||||
static bool headerAvailable(BlockHeader &header, const std::string &buffer, Size &start, const Size &end) {
|
||||
if (end-start >= Size(sizeof(BlockHeader))) {
|
||||
header = *(BlockHeader*)(&buffer[start]);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
//! returns whether @param data contains data for a full block
|
||||
static bool blockAvailable(const std::string &buffer, Size &start, const Size &end) {
|
||||
BlockHeader header;
|
||||
if (!headerAvailable(header, buffer, start, end)) return false;
|
||||
return end-start >= Size(sizeof(header)+header.size);
|
||||
}
|
||||
|
||||
fsBinaryStream::fsBinaryStream() : m_buffer(), m_start(0), m_end(0), m_valid(true) { m_buffer.resize(64*1024); } // Use a 64kb buffer by default
|
||||
|
||||
void fsBinaryStream::received(long int sz, const char *data) {
|
||||
|
||||
long int new_end = m_end + sz;
|
||||
if (new_end > Size(m_buffer.size()) && m_start>0) {
|
||||
// If newly received block is too large to fit into the buffer, but we already have processed data from the start of the buffer, then
|
||||
// move memory to the front of the buffer
|
||||
// The buffer only grows, such that it is always large enough to contain the largest message seen so far.
|
||||
if (m_end>m_start) memmove(&m_buffer[0], &m_buffer[0] + m_start, m_end - m_start);
|
||||
m_end = m_end - m_start;
|
||||
m_start = 0;
|
||||
new_end = m_end + sz;
|
||||
}
|
||||
|
||||
if (new_end > Size(m_buffer.size())) m_buffer.resize(1.5*new_end);
|
||||
|
||||
memcpy(&m_buffer[0] + m_end, data, sz);
|
||||
m_end += sz;
|
||||
|
||||
}
|
||||
|
||||
static bool decodeInfo(fsTrackingData & _trackingData, const std::string &buffer, Size &start) {
|
||||
bool success = true;
|
||||
success &= read_pod<double>(_trackingData.m_timestamp, buffer, start);
|
||||
unsigned char tracking_successfull = 0;
|
||||
success &= read_pod<unsigned char>( tracking_successfull, buffer, start );
|
||||
_trackingData.m_trackingSuccessful = bool(tracking_successfull);
|
||||
return success;
|
||||
}
|
||||
|
||||
static bool decodePose(fsTrackingData & _trackingData, const std::string &buffer, Size &start) {
|
||||
bool success = true;
|
||||
success &= read_pod(_trackingData.m_headRotation.x, buffer, start);
|
||||
success &= read_pod(_trackingData.m_headRotation.y, buffer, start);
|
||||
success &= read_pod(_trackingData.m_headRotation.z, buffer, start);
|
||||
success &= read_pod(_trackingData.m_headRotation.w, buffer, start);
|
||||
success &= read_pod(_trackingData.m_headTranslation.x, buffer, start);
|
||||
success &= read_pod(_trackingData.m_headTranslation.y, buffer, start);
|
||||
success &= read_pod(_trackingData.m_headTranslation.z, buffer, start);
|
||||
return success;
|
||||
}
|
||||
|
||||
static bool decodeBlendshapes(fsTrackingData & _trackingData, const std::string &buffer, Size &start) {
|
||||
return read_vector(_trackingData.m_coeffs, buffer, start);
|
||||
}
|
||||
|
||||
static bool decodeEyeGaze(fsTrackingData & _trackingData, const std::string &buffer, Size &start) {
|
||||
bool success = true;
|
||||
success &= read_pod(_trackingData.m_eyeGazeLeftPitch , buffer, start);
|
||||
success &= read_pod(_trackingData.m_eyeGazeLeftYaw , buffer, start);
|
||||
success &= read_pod(_trackingData.m_eyeGazeRightPitch, buffer, start);
|
||||
success &= read_pod(_trackingData.m_eyeGazeRightYaw , buffer, start);
|
||||
return success;
|
||||
}
|
||||
|
||||
static bool decodeMarkers(fsTrackingData & _trackingData, const std::string &buffer, Size &start) {
|
||||
return read_small_vector( _trackingData.m_markers, buffer, start );
|
||||
}
|
||||
|
||||
static bool decodeMarkerNames(fsMsgMarkerNames &_msg, const std::string &buffer, Size &start) {
|
||||
return read_small_vector(_msg.marker_names(), buffer, start);
|
||||
}
|
||||
static bool decodeBlendshapeNames(fsMsgBlendshapeNames &_msg, const std::string &buffer, Size &start) {
|
||||
return read_small_vector(_msg.blendshape_names(), buffer, start);
|
||||
}
|
||||
static bool decodeRig(fsMsgRig &_msg, const std::string &buffer, Size &start) {
|
||||
bool success = true;
|
||||
success &= read_vector(_msg.mesh().m_quads,buffer,start); // read quads
|
||||
success &= read_vector(_msg.mesh().m_tris,buffer,start); // read triangles
|
||||
success &= read_vector(_msg.mesh().m_vertex_data.m_vertices,buffer,start);// read neutral vertices
|
||||
success &= read_small_vector(_msg.blendshape_names(),buffer,start); // read names
|
||||
uint16_t bsize = 0;
|
||||
success &= read_pod(bsize,buffer,start);
|
||||
_msg.blendshapes().resize(bsize);
|
||||
for(uint16_t i = 0;i < bsize; i++)
|
||||
success &= read_vector(_msg.blendshapes()[i].m_vertices,buffer,start); // read blendshapes
|
||||
return success;
|
||||
}
|
||||
|
||||
bool is_valid_msg(int id) {
|
||||
switch(id) {
|
||||
case fsMsg::MSG_IN_START_TRACKING :
|
||||
case fsMsg::MSG_IN_STOP_TRACKING :
|
||||
case fsMsg::MSG_IN_CALIBRATE_NEUTRAL :
|
||||
case fsMsg::MSG_IN_SEND_MARKER_NAMES :
|
||||
case fsMsg::MSG_IN_SEND_BLENDSHAPE_NAMES:
|
||||
case fsMsg::MSG_IN_SEND_RIG :
|
||||
case fsMsg::MSG_IN_HEADPOSE_RELATIVE :
|
||||
case fsMsg::MSG_IN_HEADPOSE_ABSOLUTE :
|
||||
case fsMsg::MSG_OUT_TRACKING_STATE :
|
||||
case fsMsg::MSG_OUT_MARKER_NAMES :
|
||||
case fsMsg::MSG_OUT_BLENDSHAPE_NAMES :
|
||||
case fsMsg::MSG_OUT_RIG : return true;
|
||||
default:
|
||||
LOG_RELEASE_ERROR("Invalid Message ID %d", id);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
fsMsgPtr fsBinaryStream::get_message() {
|
||||
BlockHeader super_block;
|
||||
if( !headerAvailable(super_block, m_buffer, m_start, m_end) ) return fsMsgPtr();
|
||||
if (!is_valid_msg(super_block.id)) { LOG_RELEASE_ERROR("Invalid superblock id"); m_valid = false; return fsMsgPtr(); }
|
||||
if( !blockAvailable( m_buffer, m_start, m_end) ) return fsMsgPtr();
|
||||
skipHeader(m_start);
|
||||
long super_block_data_start = m_start;
|
||||
switch (super_block.id) {
|
||||
case fsMsg::MSG_IN_START_TRACKING: {
|
||||
if (super_block.size > 0) { LOG_RELEASE_ERROR("Expected Size to be 0, not %d", super_block.size); m_valid = false; return fsMsgPtr(); }
|
||||
return fsMsgPtr(new fsMsgStartCapturing() );
|
||||
}; break;
|
||||
case fsMsg::MSG_IN_STOP_TRACKING: {
|
||||
if (super_block.size > 0) { LOG_RELEASE_ERROR("Expected Size to be 0, not %d", super_block.size); m_valid = false; return fsMsgPtr(); }
|
||||
return fsMsgPtr(new fsMsgStopCapturing() );
|
||||
}; break;
|
||||
case fsMsg::MSG_IN_CALIBRATE_NEUTRAL: {
|
||||
if (super_block.size > 0) { LOG_RELEASE_ERROR("Expected Size to be 0, not %d", super_block.size); m_valid = false; return fsMsgPtr(); }
|
||||
return fsMsgPtr(new fsMsgCalibrateNeutral() );
|
||||
}; break;
|
||||
case fsMsg::MSG_IN_SEND_MARKER_NAMES: {
|
||||
if (super_block.size > 0) { LOG_RELEASE_ERROR("Expected Size to be 0, not %d", super_block.size); m_valid = false; return fsMsgPtr(); }
|
||||
return fsMsgPtr(new fsMsgSendMarkerNames() );
|
||||
}; break;
|
||||
case fsMsg::MSG_IN_SEND_BLENDSHAPE_NAMES: {
|
||||
if (super_block.size > 0) { LOG_RELEASE_ERROR("Expected Size to be 0, not %d", super_block.size); m_valid = false; return fsMsgPtr(); }
|
||||
return fsMsgPtr(new fsMsgSendBlendshapeNames() );
|
||||
}; break;
|
||||
case fsMsg::MSG_IN_SEND_RIG: {
|
||||
if (super_block.size > 0) { LOG_RELEASE_ERROR("Expected Size to be 0, not %d", super_block.size); m_valid = false; return fsMsgPtr(); }
|
||||
return fsMsgPtr(new fsMsgSendRig() );
|
||||
}; break;
|
||||
case fsMsg::MSG_IN_HEADPOSE_RELATIVE: {
|
||||
if (super_block.size > 0) { LOG_RELEASE_ERROR("Expected Size to be 0, not %d", super_block.size); m_valid = false; return fsMsgPtr(); }
|
||||
return fsMsgPtr(new fsMsgHeadPoseRelative() );
|
||||
}; break;
|
||||
case fsMsg::MSG_IN_HEADPOSE_ABSOLUTE: {
|
||||
if (super_block.size > 0) { LOG_RELEASE_ERROR("Expected Size to be 0, not %d", super_block.size); m_valid = false; return fsMsgPtr(); }
|
||||
return fsMsgPtr(new fsMsgHeadPoseAbsolute() );
|
||||
}; break;
|
||||
case fsMsg::MSG_OUT_MARKER_NAMES: {
|
||||
std::tr1::shared_ptr< fsMsgMarkerNames > msg(new fsMsgMarkerNames());
|
||||
if( !decodeMarkerNames(*msg, m_buffer, m_start )) { LOG_RELEASE_ERROR("Could not decode marker names"); m_valid = false; return fsMsgPtr(); }
|
||||
uint64_t actual_size = m_start-super_block_data_start;
|
||||
if( actual_size != super_block.size ) { LOG_RELEASE_ERROR("Block was promised to be of size %d, not %d", super_block.size, actual_size); m_valid = false; return fsMsgPtr(); }
|
||||
return msg;
|
||||
}; break;
|
||||
case fsMsg::MSG_OUT_BLENDSHAPE_NAMES: {
|
||||
std::tr1::shared_ptr< fsMsgBlendshapeNames > msg(new fsMsgBlendshapeNames() );
|
||||
if( !decodeBlendshapeNames(*msg, m_buffer, m_start) ) { LOG_RELEASE_ERROR("Could not decode blendshape names"); m_valid = false; return fsMsgPtr(); }
|
||||
uint64_t actual_size = m_start-super_block_data_start;
|
||||
if( actual_size != super_block.size ) { LOG_RELEASE_ERROR("Block was promised to be of size %d, not %d", super_block.size, actual_size); m_valid = false; return fsMsgPtr(); }
|
||||
return msg;
|
||||
}; break;
|
||||
case fsMsg::MSG_OUT_TRACKING_STATE: {
|
||||
BlockHeader sub_block;
|
||||
uint16_t num_blocks = 0;
|
||||
if( !read_pod(num_blocks, m_buffer, m_start) ) { LOG_RELEASE_ERROR("Could not read num_blocks"); m_valid = false; return fsMsgPtr(); }
|
||||
std::tr1::shared_ptr<fsMsgTrackingState> msg = std::tr1::shared_ptr<fsMsgTrackingState>(new fsMsgTrackingState());
|
||||
for(int i = 0; i < num_blocks; i++) {
|
||||
if( !headerAvailable(sub_block, m_buffer, m_start, m_end) ) { LOG_RELEASE_ERROR("could not read sub-header %d", i); m_valid = false; return fsMsgPtr(); }
|
||||
if( !blockAvailable( m_buffer, m_start, m_end) ) { LOG_RELEASE_ERROR("could not read sub-block %d", i); m_valid = false; return fsMsgPtr(); }
|
||||
skipHeader(m_start);
|
||||
long sub_block_data_start = m_start;
|
||||
bool success = true;
|
||||
switch(sub_block.id) {
|
||||
case BLOCKID_INFO: success &= decodeInfo( msg->tracking_data(), m_buffer, m_start); break;
|
||||
case BLOCKID_POSE: success &= decodePose( msg->tracking_data(), m_buffer, m_start); break;
|
||||
case BLOCKID_BLENDSHAPES: success &= decodeBlendshapes(msg->tracking_data(), m_buffer, m_start); break;
|
||||
case BLOCKID_EYES: success &= decodeEyeGaze( msg->tracking_data(), m_buffer, m_start); break;
|
||||
case BLOCKID_MARKERS: success &= decodeMarkers( msg->tracking_data(), m_buffer, m_start); break;
|
||||
default:
|
||||
LOG_RELEASE_ERROR("Unexpected subblock id %d", sub_block.id);
|
||||
m_valid = false; return msg;
|
||||
break;
|
||||
}
|
||||
if(!success) {
|
||||
LOG_RELEASE_ERROR("Could not decode subblock with id %d", sub_block.id);
|
||||
m_valid = false; return fsMsgPtr();
|
||||
}
|
||||
uint64_t actual_size = m_start-sub_block_data_start;
|
||||
if( actual_size != sub_block.size ) {
|
||||
LOG_RELEASE_ERROR("Unexpected number of bytes consumed %d instead of %d for subblock %d id:%d", actual_size, sub_block.size, i, sub_block.id);
|
||||
m_valid = false; return fsMsgPtr();
|
||||
}
|
||||
}
|
||||
uint64_t actual_size = m_start-super_block_data_start;
|
||||
if( actual_size != super_block.size ) {
|
||||
LOG_RELEASE_ERROR("Unexpected number of bytes consumed %d instead of %d", actual_size, super_block.size);
|
||||
m_valid = false; return fsMsgPtr();
|
||||
}
|
||||
return msg;
|
||||
}; break;
|
||||
case fsMsg::MSG_OUT_RIG: {
|
||||
std::tr1::shared_ptr< fsMsgRig > msg(new fsMsgRig() );
|
||||
if( !decodeRig(*msg, m_buffer, m_start) ) { LOG_RELEASE_ERROR("Could not decode rig"); m_valid = false; return fsMsgPtr(); }
|
||||
if( m_start-super_block_data_start != super_block.size ) { LOG_RELEASE_ERROR("Could not decode rig unexpected size"); m_valid = false; return fsMsgPtr(); }
|
||||
return msg;
|
||||
}; break;
|
||||
default: {
|
||||
LOG_RELEASE_ERROR("Unexpected superblock id %d", super_block.id);
|
||||
m_valid = false; return fsMsgPtr();
|
||||
}; break;
|
||||
}
|
||||
return fsMsgPtr();
|
||||
}
|
||||
|
||||
static void encodeInfo(std::string &buffer, const fsTrackingData & _trackingData) {
|
||||
BlockHeader header(BLOCKID_INFO, sizeof(double) + 1);
|
||||
write_pod(buffer, header);
|
||||
|
||||
write_pod(buffer, _trackingData.m_timestamp);
|
||||
unsigned char tracking_successfull = _trackingData.m_trackingSuccessful;
|
||||
write_pod( buffer, tracking_successfull );
|
||||
}
|
||||
|
||||
static void encodePose(std::string &buffer, const fsTrackingData & _trackingData) {
|
||||
BlockHeader header(BLOCKID_POSE, sizeof(float)*7);
|
||||
write_pod(buffer, header);
|
||||
|
||||
write_pod(buffer, _trackingData.m_headRotation.x);
|
||||
write_pod(buffer, _trackingData.m_headRotation.y);
|
||||
write_pod(buffer, _trackingData.m_headRotation.z);
|
||||
write_pod(buffer, _trackingData.m_headRotation.w);
|
||||
write_pod(buffer, _trackingData.m_headTranslation.x);
|
||||
write_pod(buffer, _trackingData.m_headTranslation.y);
|
||||
write_pod(buffer, _trackingData.m_headTranslation.z);
|
||||
}
|
||||
|
||||
static void encodeBlendshapes(std::string &buffer, const fsTrackingData & _trackingData) {
|
||||
uint32_t num_parameters = _trackingData.m_coeffs.size();
|
||||
BlockHeader header(BLOCKID_BLENDSHAPES, sizeof(uint32_t) + sizeof(float)*num_parameters);
|
||||
write_pod(buffer, header);
|
||||
write_pod(buffer, num_parameters);
|
||||
for(uint32_t i = 0; i < num_parameters; i++)
|
||||
write_pod(buffer, _trackingData.m_coeffs[i]);
|
||||
}
|
||||
|
||||
static void encodeEyeGaze(std::string &buffer, const fsTrackingData & _trackingData) {
|
||||
BlockHeader header(BLOCKID_EYES, sizeof(float)*4);
|
||||
write_pod(buffer, header);
|
||||
write_pod(buffer, _trackingData.m_eyeGazeLeftPitch );
|
||||
write_pod(buffer, _trackingData.m_eyeGazeLeftYaw );
|
||||
write_pod(buffer, _trackingData.m_eyeGazeRightPitch);
|
||||
write_pod(buffer, _trackingData.m_eyeGazeRightYaw );
|
||||
}
|
||||
|
||||
static void encodeMarkers(std::string &buffer, const fsTrackingData & _trackingData) {
|
||||
uint16_t numMarkers = _trackingData.m_markers.size();
|
||||
BlockHeader header(BLOCKID_MARKERS, sizeof(uint16_t) + sizeof(float)*3*numMarkers);
|
||||
write_pod(buffer, header);
|
||||
write_pod(buffer, numMarkers);
|
||||
for(int i = 0; i < numMarkers; i++) {
|
||||
write_pod(buffer, _trackingData.m_markers[i].x);
|
||||
write_pod(buffer, _trackingData.m_markers[i].y);
|
||||
write_pod(buffer, _trackingData.m_markers[i].z);
|
||||
}
|
||||
}
|
||||
|
||||
// Inbound
|
||||
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgTrackingState &msg) {
|
||||
encode_message(msg_out, msg.tracking_data());
|
||||
}
|
||||
|
||||
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgStartCapturing &msg) {
|
||||
BlockHeader header(msg.id());
|
||||
write_pod(msg_out, header);
|
||||
}
|
||||
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgStopCapturing &msg) {
|
||||
BlockHeader header(msg.id());
|
||||
write_pod(msg_out, header);
|
||||
}
|
||||
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgCalibrateNeutral &msg) {
|
||||
BlockHeader header(msg.id());
|
||||
write_pod(msg_out, header);
|
||||
}
|
||||
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgSendMarkerNames &msg) {
|
||||
BlockHeader header(msg.id());
|
||||
write_pod(msg_out, header);
|
||||
}
|
||||
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgSendBlendshapeNames &msg) {
|
||||
BlockHeader header(msg.id());
|
||||
write_pod(msg_out, header);
|
||||
}
|
||||
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgSendRig &msg) {
|
||||
BlockHeader header(msg.id());
|
||||
write_pod(msg_out, header);
|
||||
}
|
||||
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgHeadPoseRelative &msg) {
|
||||
BlockHeader header(msg.id());
|
||||
write_pod(msg_out, header);
|
||||
}
|
||||
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgHeadPoseAbsolute &msg) {
|
||||
BlockHeader header(msg.id());
|
||||
write_pod(msg_out, header);
|
||||
}
|
||||
|
||||
// Outbound
|
||||
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgSignal &msg) {
|
||||
BlockHeader header(msg.id());
|
||||
write_pod(msg_out, header);
|
||||
}
|
||||
void fsBinaryStream::encode_message(std::string &msg_out, const fsTrackingData &tracking_data) {
|
||||
Size start = msg_out.size();
|
||||
|
||||
BlockHeader header(fsMsg::MSG_OUT_TRACKING_STATE);
|
||||
write_pod(msg_out, header);
|
||||
|
||||
uint16_t N_blocks = 5;
|
||||
write_pod(msg_out, N_blocks);
|
||||
encodeInfo( msg_out, tracking_data);
|
||||
encodePose( msg_out, tracking_data);
|
||||
encodeBlendshapes(msg_out, tracking_data);
|
||||
encodeEyeGaze( msg_out, tracking_data);
|
||||
encodeMarkers( msg_out, tracking_data);
|
||||
|
||||
update_msg_size(msg_out, start);
|
||||
}
|
||||
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgMarkerNames &msg) {
|
||||
Size start = msg_out.size();
|
||||
|
||||
BlockHeader header(msg.id());
|
||||
write_pod(msg_out, header);
|
||||
|
||||
write_small_vector(msg_out,msg.marker_names());
|
||||
|
||||
update_msg_size(msg_out, start);
|
||||
}
|
||||
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgBlendshapeNames &msg) {
|
||||
Size start = msg_out.size();
|
||||
|
||||
BlockHeader header(msg.id());
|
||||
write_pod(msg_out, header);
|
||||
|
||||
write_small_vector(msg_out,msg.blendshape_names());
|
||||
|
||||
update_msg_size(msg_out, start);
|
||||
}
|
||||
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgRig &msg) {
|
||||
Size start = msg_out.size();
|
||||
|
||||
BlockHeader header(msg.id());
|
||||
write_pod(msg_out, header);
|
||||
|
||||
write_vector(msg_out, msg.mesh().m_quads); // write quads
|
||||
write_vector(msg_out, msg.mesh().m_tris);// write triangles
|
||||
write_vector(msg_out, msg.mesh().m_vertex_data.m_vertices);// write neutral vertices
|
||||
write_small_vector(msg_out, msg.blendshape_names());// write names
|
||||
write_pod(msg_out,uint16_t(msg.blendshapes().size()));
|
||||
for(uint16_t i = 0;i < uint16_t(msg.blendshapes().size()); i++)
|
||||
write_vector(msg_out, msg.blendshapes()[i].m_vertices); // write blendshapes
|
||||
|
||||
update_msg_size(msg_out, start);
|
||||
}
|
||||
}
|
|
@ -58,13 +58,13 @@
|
|||
|
||||
#include "Application.h"
|
||||
#include "LogDisplay.h"
|
||||
#include "LeapManager.h"
|
||||
#include "Menu.h"
|
||||
#include "OculusManager.h"
|
||||
#include "Swatch.h"
|
||||
#include "Util.h"
|
||||
#include "devices/LeapManager.h"
|
||||
#include "devices/OculusManager.h"
|
||||
#include "renderer/ProgramObject.h"
|
||||
#include "ui/TextRenderer.h"
|
||||
#include "Swatch.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
@ -1528,18 +1528,28 @@ void Application::update(float deltaTime) {
|
|||
// Set where I am looking based on my mouse ray (so that other people can see)
|
||||
glm::vec3 lookAtSpot;
|
||||
|
||||
_isLookingAtOtherAvatar = isLookingAtOtherAvatar(mouseRayOrigin, mouseRayDirection, lookAtSpot);
|
||||
// if we have faceshift, use that to compute the lookat direction
|
||||
glm::vec3 lookAtRayOrigin = mouseRayOrigin, lookAtRayDirection = mouseRayDirection;
|
||||
if (_faceshift.isActive()) {
|
||||
lookAtRayOrigin = _myAvatar.getHead().calculateAverageEyePosition();
|
||||
float averagePitch = (_faceshift.getEyeGazeLeftPitch() + _faceshift.getEyeGazeRightPitch()) / 2.0f;
|
||||
float averageYaw = (_faceshift.getEyeGazeLeftYaw() + _faceshift.getEyeGazeRightYaw()) / 2.0f;
|
||||
lookAtRayDirection = _myAvatar.getHead().getOrientation() *
|
||||
glm::quat(glm::radians(glm::vec3(averagePitch, averageYaw, 0.0f))) * glm::vec3(0.0f, 0.0f, -1.0f);
|
||||
}
|
||||
|
||||
_isLookingAtOtherAvatar = isLookingAtOtherAvatar(lookAtRayOrigin, lookAtRayDirection, lookAtSpot);
|
||||
if (_isLookingAtOtherAvatar) {
|
||||
// If the mouse is over another avatar's head...
|
||||
_myAvatar.getHead().setLookAtPosition(lookAtSpot);
|
||||
} else if (_isHoverVoxel) {
|
||||
} else if (_isHoverVoxel && !_faceshift.isActive()) {
|
||||
// Look at the hovered voxel
|
||||
lookAtSpot = getMouseVoxelWorldCoordinates(_hoverVoxel);
|
||||
_myAvatar.getHead().setLookAtPosition(lookAtSpot);
|
||||
} else {
|
||||
// Just look in direction of the mouse ray
|
||||
const float FAR_AWAY_STARE = TREE_SCALE;
|
||||
lookAtSpot = mouseRayOrigin + mouseRayDirection * FAR_AWAY_STARE;
|
||||
lookAtSpot = lookAtRayOrigin + lookAtRayDirection * FAR_AWAY_STARE;
|
||||
_myAvatar.getHead().setLookAtPosition(lookAtSpot);
|
||||
}
|
||||
|
||||
|
@ -2276,7 +2286,7 @@ void Application::displaySide(Camera& whichCamera) {
|
|||
}
|
||||
|
||||
// Render my own Avatar
|
||||
if (_myCamera.getMode() == CAMERA_MODE_MIRROR) {
|
||||
if (_myCamera.getMode() == CAMERA_MODE_MIRROR && !_faceshift.isActive()) {
|
||||
_myAvatar.getHead().setLookAtPosition(_myCamera.getPosition());
|
||||
}
|
||||
_myAvatar.render(Menu::getInstance()->isOptionChecked(MenuOption::Mirror),
|
||||
|
@ -3077,6 +3087,7 @@ void Application::resetSensors() {
|
|||
_serialHeadSensor.resetAverages();
|
||||
}
|
||||
_webcam.reset();
|
||||
_faceshift.reset();
|
||||
QCursor::setPos(_headMouseX, _headMouseY);
|
||||
_myAvatar.reset();
|
||||
_myTransmitter.resetLevels();
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
#include "GLCanvas.h"
|
||||
#include "PacketHeaders.h"
|
||||
#include "PieMenu.h"
|
||||
#include "SerialInterface.h"
|
||||
#include "Stars.h"
|
||||
#include "Swatch.h"
|
||||
#include "ToolsPalette.h"
|
||||
|
@ -43,10 +42,12 @@
|
|||
#include "VoxelPacketProcessor.h"
|
||||
#include "VoxelSystem.h"
|
||||
#include "VoxelImporter.h"
|
||||
#include "Webcam.h"
|
||||
#include "avatar/Avatar.h"
|
||||
#include "avatar/MyAvatar.h"
|
||||
#include "avatar/HandControl.h"
|
||||
#include "devices/Faceshift.h"
|
||||
#include "devices/SerialInterface.h"
|
||||
#include "devices/Webcam.h"
|
||||
#include "renderer/AmbientOcclusionEffect.h"
|
||||
#include "renderer/GeometryCache.h"
|
||||
#include "renderer/GlowEffect.h"
|
||||
|
@ -117,6 +118,7 @@ public:
|
|||
Environment* getEnvironment() { return &_environment; }
|
||||
SerialInterface* getSerialHeadSensor() { return &_serialHeadSensor; }
|
||||
Webcam* getWebcam() { return &_webcam; }
|
||||
Faceshift* getFaceshift() { return &_faceshift; }
|
||||
BandwidthMeter* getBandwidthMeter() { return &_bandwidthMeter; }
|
||||
QSettings* getSettings() { return _settings; }
|
||||
Swatch* getSwatch() { return &_swatch; }
|
||||
|
@ -259,6 +261,8 @@ private:
|
|||
|
||||
Webcam _webcam; // The webcam interface
|
||||
|
||||
Faceshift _faceshift;
|
||||
|
||||
Camera _myCamera; // My view onto the world
|
||||
Camera _viewFrustumOffsetCamera; // The camera we use to sometimes show the view frustum from an offset mode
|
||||
|
||||
|
|
|
@ -204,6 +204,13 @@ Menu::Menu() :
|
|||
appInstance->getWebcam(),
|
||||
SLOT(setSkeletonTrackingOn(bool)));
|
||||
|
||||
addCheckableActionToQMenuAndActionHash(viewMenu,
|
||||
MenuOption::LEDTracking,
|
||||
0,
|
||||
false,
|
||||
appInstance->getWebcam()->getGrabber(),
|
||||
SLOT(setLEDTrackingOn(bool)));
|
||||
|
||||
addDisabledActionAndSeparator(viewMenu, "Stats");
|
||||
addCheckableActionToQMenuAndActionHash(viewMenu, MenuOption::Stats, Qt::Key_Slash);
|
||||
addCheckableActionToQMenuAndActionHash(viewMenu, MenuOption::Log, Qt::CTRL | Qt::Key_L);
|
||||
|
@ -353,6 +360,13 @@ Menu::Menu() :
|
|||
appInstance->getWebcam()->getGrabber(),
|
||||
SLOT(setDepthOnly(bool)));
|
||||
|
||||
addCheckableActionToQMenuAndActionHash(developerMenu,
|
||||
MenuOption::Faceshift,
|
||||
0,
|
||||
false,
|
||||
appInstance->getFaceshift(),
|
||||
SLOT(setEnabled(bool)));
|
||||
|
||||
QMenu* audioDebugMenu = developerMenu->addMenu("Audio Debugging Tools");
|
||||
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::EchoAudio);
|
||||
addActionToQMenuAndActionHash(audioDebugMenu,
|
||||
|
|
|
@ -137,6 +137,7 @@ namespace MenuOption {
|
|||
const QString ExportVoxels = "Export Voxels";
|
||||
const QString HeadMouse = "Head Mouse";
|
||||
const QString FaceMode = "Cycle Face Mode";
|
||||
const QString Faceshift = "Faceshift";
|
||||
const QString FalseColorByDistance = "FALSE Color By Distance";
|
||||
const QString FalseColorBySource = "FALSE Color By Source";
|
||||
const QString FalseColorEveryOtherVoxel = "FALSE Color Every Other Randomly";
|
||||
|
@ -184,6 +185,7 @@ namespace MenuOption {
|
|||
const QString ShowTrueColors = "Show TRUE Colors";
|
||||
const QString SimulateLeapHand = "Simulate Leap Hand";
|
||||
const QString SkeletonTracking = "Skeleton Tracking";
|
||||
const QString LEDTracking = "LED Tracking";
|
||||
const QString Stars = "Stars";
|
||||
const QString Stats = "Stats";
|
||||
const QString TestPing = "Test Ping";
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
|
||||
#include <NodeList.h>
|
||||
#include <NodeTypes.h>
|
||||
#include <OculusManager.h>
|
||||
#include <PacketHeaders.h>
|
||||
#include <SharedUtil.h>
|
||||
|
||||
|
@ -23,6 +22,7 @@
|
|||
#include "Head.h"
|
||||
#include "Physics.h"
|
||||
#include "world.h"
|
||||
#include "devices/OculusManager.h"
|
||||
#include "ui/TextRenderer.h"
|
||||
|
||||
using namespace std;
|
||||
|
@ -102,11 +102,11 @@ Avatar::Avatar(Node* owningNode) :
|
|||
_isCollisionsOn(true),
|
||||
_leadingAvatar(NULL),
|
||||
_voxels(this),
|
||||
_moving(false),
|
||||
_initialized(false),
|
||||
_handHoldingPosition(0.0f, 0.0f, 0.0f),
|
||||
_maxArmLength(0.0f),
|
||||
_pelvisStandingHeight(0.0f),
|
||||
_moving(false)
|
||||
_pelvisStandingHeight(0.0f)
|
||||
{
|
||||
// give the pointer to our head to inherited _headData variable from AvatarData
|
||||
_headData = &_head;
|
||||
|
|
|
@ -22,10 +22,9 @@
|
|||
#include "Head.h"
|
||||
#include "InterfaceConfig.h"
|
||||
#include "Skeleton.h"
|
||||
#include "SerialInterface.h"
|
||||
#include "Transmitter.h"
|
||||
#include "world.h"
|
||||
|
||||
#include "devices/SerialInterface.h"
|
||||
#include "devices/Transmitter.h"
|
||||
|
||||
static const float MAX_SCALE = 1000.f;
|
||||
static const float MIN_SCALE = .005f;
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include "Avatar.h"
|
||||
#include "Head.h"
|
||||
#include "Face.h"
|
||||
#include "Webcam.h"
|
||||
#include "renderer/ProgramObject.h"
|
||||
|
||||
using namespace cv;
|
||||
|
|
|
@ -8,17 +8,22 @@
|
|||
#ifndef hifi_Hand_h
|
||||
#define hifi_Hand_h
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include <QAction>
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
|
||||
#include <SharedUtil.h>
|
||||
|
||||
#include <AvatarData.h>
|
||||
#include <HandData.h>
|
||||
|
||||
#include "Balls.h"
|
||||
#include "world.h"
|
||||
#include "InterfaceConfig.h"
|
||||
#include "SerialInterface.h"
|
||||
#include "ParticleSystem.h"
|
||||
#include <SharedUtil.h>
|
||||
#include <vector>
|
||||
#include "world.h"
|
||||
#include "devices/SerialInterface.h"
|
||||
|
||||
enum RaveLightsSetting {
|
||||
RAVE_LIGHTS_AVATAR = 0,
|
||||
|
|
|
@ -160,65 +160,81 @@ void Head::simulate(float deltaTime, bool isMine, float gyroCameraSensitivity) {
|
|||
_saccade += (_saccadeTarget - _saccade) * 0.50f;
|
||||
|
||||
// Update audio trailing average for rendering facial animations
|
||||
const float AUDIO_AVERAGING_SECS = 0.05;
|
||||
_averageLoudness = (1.f - deltaTime / AUDIO_AVERAGING_SECS) * _averageLoudness +
|
||||
(deltaTime / AUDIO_AVERAGING_SECS) * _audioLoudness;
|
||||
|
||||
// Detect transition from talking to not; force blink after that and a delay
|
||||
bool forceBlink = false;
|
||||
const float TALKING_LOUDNESS = 100.0f;
|
||||
const float BLINK_AFTER_TALKING = 0.25f;
|
||||
if (_averageLoudness > TALKING_LOUDNESS) {
|
||||
_timeWithoutTalking = 0.0f;
|
||||
|
||||
} else if (_timeWithoutTalking < BLINK_AFTER_TALKING && (_timeWithoutTalking += deltaTime) >= BLINK_AFTER_TALKING) {
|
||||
forceBlink = true;
|
||||
}
|
||||
|
||||
// Update audio attack data for facial animation (eyebrows and mouth)
|
||||
_audioAttack = 0.9 * _audioAttack + 0.1 * fabs(_audioLoudness - _lastLoudness);
|
||||
_lastLoudness = _audioLoudness;
|
||||
|
||||
const float BROW_LIFT_THRESHOLD = 100;
|
||||
if (_audioAttack > BROW_LIFT_THRESHOLD)
|
||||
_browAudioLift += sqrt(_audioAttack) * 0.00005;
|
||||
Faceshift* faceshift = Application::getInstance()->getFaceshift();
|
||||
if (isMine && faceshift->isActive()) {
|
||||
_leftEyeBlink = faceshift->getLeftBlink();
|
||||
_rightEyeBlink = faceshift->getRightBlink();
|
||||
|
||||
// set these values based on how they'll be used. if we use faceshift in the long term, we'll want a complete
|
||||
// mapping between their blendshape coefficients and our avatar features
|
||||
const float MOUTH_SIZE_SCALE = 2500.0f;
|
||||
_averageLoudness = faceshift->getMouthSize() * faceshift->getMouthSize() * MOUTH_SIZE_SCALE;
|
||||
const float BROW_HEIGHT_SCALE = 0.005f;
|
||||
_browAudioLift = faceshift->getBrowHeight() * BROW_HEIGHT_SCALE;
|
||||
|
||||
float clamp = 0.01;
|
||||
if (_browAudioLift > clamp) { _browAudioLift = clamp; }
|
||||
|
||||
_browAudioLift *= 0.7f;
|
||||
|
||||
// update eyelid blinking
|
||||
const float BLINK_SPEED = 10.0f;
|
||||
const float FULLY_OPEN = 0.0f;
|
||||
const float FULLY_CLOSED = 1.0f;
|
||||
if (_leftEyeBlinkVelocity == 0.0f && _rightEyeBlinkVelocity == 0.0f) {
|
||||
// no blinking when brows are raised; blink less with increasing loudness
|
||||
const float BASE_BLINK_RATE = 15.0f / 60.0f;
|
||||
const float ROOT_LOUDNESS_TO_BLINK_INTERVAL = 0.25f;
|
||||
if (forceBlink || (_browAudioLift < EPSILON && shouldDo(glm::max(1.0f, sqrt(_averageLoudness) *
|
||||
ROOT_LOUDNESS_TO_BLINK_INTERVAL) / BASE_BLINK_RATE, deltaTime))) {
|
||||
_leftEyeBlinkVelocity = BLINK_SPEED;
|
||||
_rightEyeBlinkVelocity = BLINK_SPEED;
|
||||
}
|
||||
} else {
|
||||
_leftEyeBlink = glm::clamp(_leftEyeBlink + _leftEyeBlinkVelocity * deltaTime, FULLY_OPEN, FULLY_CLOSED);
|
||||
_rightEyeBlink = glm::clamp(_rightEyeBlink + _rightEyeBlinkVelocity * deltaTime, FULLY_OPEN, FULLY_CLOSED);
|
||||
const float AUDIO_AVERAGING_SECS = 0.05;
|
||||
_averageLoudness = (1.f - deltaTime / AUDIO_AVERAGING_SECS) * _averageLoudness +
|
||||
(deltaTime / AUDIO_AVERAGING_SECS) * _audioLoudness;
|
||||
|
||||
if (_leftEyeBlink == FULLY_CLOSED) {
|
||||
_leftEyeBlinkVelocity = -BLINK_SPEED;
|
||||
// Detect transition from talking to not; force blink after that and a delay
|
||||
bool forceBlink = false;
|
||||
const float TALKING_LOUDNESS = 100.0f;
|
||||
const float BLINK_AFTER_TALKING = 0.25f;
|
||||
if (_averageLoudness > TALKING_LOUDNESS) {
|
||||
_timeWithoutTalking = 0.0f;
|
||||
|
||||
} else if (_leftEyeBlink == FULLY_OPEN) {
|
||||
_leftEyeBlinkVelocity = 0.0f;
|
||||
} else if (_timeWithoutTalking < BLINK_AFTER_TALKING && (_timeWithoutTalking += deltaTime) >= BLINK_AFTER_TALKING) {
|
||||
forceBlink = true;
|
||||
}
|
||||
if (_rightEyeBlink == FULLY_CLOSED) {
|
||||
_rightEyeBlinkVelocity = -BLINK_SPEED;
|
||||
|
||||
// Update audio attack data for facial animation (eyebrows and mouth)
|
||||
_audioAttack = 0.9f * _audioAttack + 0.1f * fabs(_audioLoudness - _lastLoudness);
|
||||
_lastLoudness = _audioLoudness;
|
||||
|
||||
} else if (_rightEyeBlink == FULLY_OPEN) {
|
||||
_rightEyeBlinkVelocity = 0.0f;
|
||||
const float BROW_LIFT_THRESHOLD = 100.0f;
|
||||
if (_audioAttack > BROW_LIFT_THRESHOLD) {
|
||||
_browAudioLift += sqrtf(_audioAttack) * 0.00005f;
|
||||
}
|
||||
|
||||
const float CLAMP = 0.01f;
|
||||
if (_browAudioLift > CLAMP) {
|
||||
_browAudioLift = CLAMP;
|
||||
}
|
||||
|
||||
_browAudioLift *= 0.7f;
|
||||
|
||||
const float BLINK_SPEED = 10.0f;
|
||||
const float FULLY_OPEN = 0.0f;
|
||||
const float FULLY_CLOSED = 1.0f;
|
||||
if (_leftEyeBlinkVelocity == 0.0f && _rightEyeBlinkVelocity == 0.0f) {
|
||||
// no blinking when brows are raised; blink less with increasing loudness
|
||||
const float BASE_BLINK_RATE = 15.0f / 60.0f;
|
||||
const float ROOT_LOUDNESS_TO_BLINK_INTERVAL = 0.25f;
|
||||
if (forceBlink || (_browAudioLift < EPSILON && shouldDo(glm::max(1.0f, sqrt(_averageLoudness) *
|
||||
ROOT_LOUDNESS_TO_BLINK_INTERVAL) / BASE_BLINK_RATE, deltaTime))) {
|
||||
_leftEyeBlinkVelocity = BLINK_SPEED;
|
||||
_rightEyeBlinkVelocity = BLINK_SPEED;
|
||||
}
|
||||
} else {
|
||||
_leftEyeBlink = glm::clamp(_leftEyeBlink + _leftEyeBlinkVelocity * deltaTime, FULLY_OPEN, FULLY_CLOSED);
|
||||
_rightEyeBlink = glm::clamp(_rightEyeBlink + _rightEyeBlinkVelocity * deltaTime, FULLY_OPEN, FULLY_CLOSED);
|
||||
|
||||
if (_leftEyeBlink == FULLY_CLOSED) {
|
||||
_leftEyeBlinkVelocity = -BLINK_SPEED;
|
||||
|
||||
} else if (_leftEyeBlink == FULLY_OPEN) {
|
||||
_leftEyeBlinkVelocity = 0.0f;
|
||||
}
|
||||
if (_rightEyeBlink == FULLY_CLOSED) {
|
||||
_rightEyeBlinkVelocity = -BLINK_SPEED;
|
||||
|
||||
} else if (_rightEyeBlink == FULLY_OPEN) {
|
||||
_rightEyeBlinkVelocity = 0.0f;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// based on the nature of the lookat position, determine if the eyes can look / are looking at it.
|
||||
if (USING_PHYSICAL_MOHAWK) {
|
||||
updateHairPhysics(deltaTime);
|
||||
|
|
|
@ -17,11 +17,11 @@
|
|||
|
||||
#include <VoxelConstants.h>
|
||||
|
||||
#include "Face.h"
|
||||
#include "BendyLine.h"
|
||||
#include "Face.h"
|
||||
#include "InterfaceConfig.h"
|
||||
#include "SerialInterface.h"
|
||||
#include "world.h"
|
||||
#include "devices/SerialInterface.h"
|
||||
|
||||
enum eyeContactTargets {
|
||||
LEFT_EYE,
|
||||
|
|
|
@ -6,20 +6,19 @@
|
|||
// Copyright (c) 2012 High Fidelity, Inc. All rights reserved.
|
||||
//
|
||||
|
||||
#include "MyAvatar.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include <glm/gtx/vector_angle.hpp>
|
||||
|
||||
#include <NodeList.h>
|
||||
#include <NodeTypes.h>
|
||||
#include <OculusManager.h>
|
||||
#include <PacketHeaders.h>
|
||||
#include <SharedUtil.h>
|
||||
|
||||
#include "Application.h"
|
||||
#include "MyAvatar.h"
|
||||
#include "Physics.h"
|
||||
#include "devices/OculusManager.h"
|
||||
#include "ui/TextRenderer.h"
|
||||
|
||||
using namespace std;
|
||||
|
@ -333,16 +332,25 @@ void MyAvatar::simulate(float deltaTime, Transmitter* transmitter, float gyroCam
|
|||
// Update avatar head rotation with sensor data
|
||||
void MyAvatar::updateFromGyrosAndOrWebcam(bool gyroLook,
|
||||
float pitchFromTouch) {
|
||||
Faceshift* faceshift = Application::getInstance()->getFaceshift();
|
||||
SerialInterface* gyros = Application::getInstance()->getSerialHeadSensor();
|
||||
Webcam* webcam = Application::getInstance()->getWebcam();
|
||||
glm::vec3 estimatedPosition, estimatedRotation;
|
||||
if (gyros->isActive()) {
|
||||
|
||||
if (faceshift->isActive()) {
|
||||
estimatedPosition = faceshift->getHeadTranslation();
|
||||
estimatedRotation = safeEulerAngles(faceshift->getHeadRotation());
|
||||
|
||||
} else if (gyros->isActive()) {
|
||||
estimatedRotation = gyros->getEstimatedRotation();
|
||||
|
||||
} else if (webcam->isActive()) {
|
||||
estimatedRotation = webcam->getEstimatedRotation();
|
||||
|
||||
} else if (_leadingAvatar) {
|
||||
_head.getFace().clearFrame();
|
||||
return;
|
||||
|
||||
} else {
|
||||
_head.setMousePitch(pitchFromTouch);
|
||||
_head.setPitch(pitchFromTouch);
|
||||
|
|
140
interface/src/devices/Faceshift.cpp
Normal file
140
interface/src/devices/Faceshift.cpp
Normal file
|
@ -0,0 +1,140 @@
|
|||
//
|
||||
// Faceshift.cpp
|
||||
// interface
|
||||
//
|
||||
// Created by Andrzej Kapolka on 9/3/13.
|
||||
// Copyright (c) 2013 High Fidelity, Inc. All rights reserved.
|
||||
//
|
||||
|
||||
#include <QTimer>
|
||||
|
||||
#include "Faceshift.h"
|
||||
|
||||
using namespace fs;
|
||||
using namespace std;
|
||||
|
||||
Faceshift::Faceshift() :
|
||||
_enabled(false),
|
||||
_eyeGazeLeftPitch(0.0f),
|
||||
_eyeGazeLeftYaw(0.0f),
|
||||
_eyeGazeRightPitch(0.0f),
|
||||
_eyeGazeRightYaw(0.0f),
|
||||
_leftBlink(0.0f),
|
||||
_rightBlink(0.0f),
|
||||
_leftBlinkIndex(-1),
|
||||
_rightBlinkIndex(-1),
|
||||
_browHeight(0.0f),
|
||||
_browUpCenterIndex(-1),
|
||||
_mouthSize(0.0f),
|
||||
_jawOpenIndex(-1)
|
||||
{
|
||||
connect(&_socket, SIGNAL(connected()), SLOT(noteConnected()));
|
||||
connect(&_socket, SIGNAL(error(QAbstractSocket::SocketError)), SLOT(noteError(QAbstractSocket::SocketError)));
|
||||
connect(&_socket, SIGNAL(readyRead()), SLOT(readFromSocket()));
|
||||
}
|
||||
|
||||
void Faceshift::reset() {
|
||||
if (isActive()) {
|
||||
string message;
|
||||
fsBinaryStream::encode_message(message, fsMsgCalibrateNeutral());
|
||||
send(message);
|
||||
}
|
||||
}
|
||||
|
||||
void Faceshift::setEnabled(bool enabled) {
|
||||
if ((_enabled = enabled)) {
|
||||
connectSocket();
|
||||
|
||||
} else {
|
||||
_socket.disconnectFromHost();
|
||||
}
|
||||
}
|
||||
|
||||
void Faceshift::connectSocket() {
|
||||
if (_enabled) {
|
||||
qDebug("Faceshift: Connecting...\n");
|
||||
|
||||
const quint16 FACESHIFT_PORT = 33433;
|
||||
_socket.connectToHost("localhost", FACESHIFT_PORT);
|
||||
}
|
||||
}
|
||||
|
||||
void Faceshift::noteConnected() {
|
||||
qDebug("Faceshift: Connected.\n");
|
||||
|
||||
// request the list of blendshape names
|
||||
string message;
|
||||
fsBinaryStream::encode_message(message, fsMsgSendBlendshapeNames());
|
||||
send(message);
|
||||
}
|
||||
|
||||
void Faceshift::noteError(QAbstractSocket::SocketError error) {
|
||||
qDebug() << "Faceshift: " << _socket.errorString() << "\n";
|
||||
|
||||
// reconnect after a delay
|
||||
if (_enabled) {
|
||||
QTimer::singleShot(1000, this, SLOT(connectSocket()));
|
||||
}
|
||||
}
|
||||
|
||||
void Faceshift::readFromSocket() {
|
||||
QByteArray buffer = _socket.readAll();
|
||||
_stream.received(buffer.size(), buffer.constData());
|
||||
fsMsgPtr msg;
|
||||
for (fsMsgPtr msg; (msg = _stream.get_message()); ) {
|
||||
switch (msg->id()) {
|
||||
case fsMsg::MSG_OUT_TRACKING_STATE: {
|
||||
const fsTrackingData& data = static_cast<fsMsgTrackingState*>(msg.get())->tracking_data();
|
||||
if (data.m_trackingSuccessful) {
|
||||
_headRotation = glm::quat(data.m_headRotation.w, -data.m_headRotation.x,
|
||||
data.m_headRotation.y, -data.m_headRotation.z);
|
||||
const float TRANSLATION_SCALE = 0.02f;
|
||||
_headTranslation = glm::vec3(data.m_headTranslation.x, data.m_headTranslation.y,
|
||||
-data.m_headTranslation.z) * TRANSLATION_SCALE;
|
||||
_eyeGazeLeftPitch = -data.m_eyeGazeLeftPitch;
|
||||
_eyeGazeLeftYaw = data.m_eyeGazeLeftYaw;
|
||||
_eyeGazeRightPitch = -data.m_eyeGazeRightPitch;
|
||||
_eyeGazeRightYaw = data.m_eyeGazeRightYaw;
|
||||
|
||||
if (_leftBlinkIndex != -1) {
|
||||
_leftBlink = data.m_coeffs[_leftBlinkIndex];
|
||||
}
|
||||
if (_rightBlinkIndex != -1) {
|
||||
_rightBlink = data.m_coeffs[_rightBlinkIndex];
|
||||
}
|
||||
if (_browUpCenterIndex != -1) {
|
||||
_browHeight = data.m_coeffs[_browUpCenterIndex];
|
||||
}
|
||||
if (_jawOpenIndex != -1) {
|
||||
_mouthSize = data.m_coeffs[_jawOpenIndex];
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case fsMsg::MSG_OUT_BLENDSHAPE_NAMES: {
|
||||
const vector<string>& names = static_cast<fsMsgBlendshapeNames*>(msg.get())->blendshape_names();
|
||||
for (int i = 0; i < names.size(); i++) {
|
||||
if (names[i] == "EyeBlink_L") {
|
||||
_leftBlinkIndex = i;
|
||||
|
||||
} else if (names[i] == "EyeBlink_R") {
|
||||
_rightBlinkIndex = i;
|
||||
|
||||
} else if (names[i] == "BrowsU_C") {
|
||||
_browUpCenterIndex = i;
|
||||
|
||||
} else if (names[i] == "JawOpen") {
|
||||
_jawOpenIndex = i;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Faceshift::send(const std::string& message) {
|
||||
_socket.write(message.data(), message.size());
|
||||
}
|
90
interface/src/devices/Faceshift.h
Normal file
90
interface/src/devices/Faceshift.h
Normal file
|
@ -0,0 +1,90 @@
|
|||
//
|
||||
// Faceshift.h
|
||||
// interface
|
||||
//
|
||||
// Created by Andrzej Kapolka on 9/3/13.
|
||||
// Copyright (c) 2013 High Fidelity, Inc. All rights reserved.
|
||||
//
|
||||
|
||||
#ifndef __interface__Faceshift__
|
||||
#define __interface__Faceshift__
|
||||
|
||||
#include <QTcpSocket>
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
#include <glm/gtc/quaternion.hpp>
|
||||
|
||||
#include <fsbinarystream.h>
|
||||
|
||||
/// Handles interaction with the Faceshift software, which provides head position/orientation and facial features.
|
||||
class Faceshift : public QObject {
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
|
||||
Faceshift();
|
||||
|
||||
bool isActive() const { return _socket.state() == QAbstractSocket::ConnectedState; }
|
||||
|
||||
const glm::quat& getHeadRotation() const { return _headRotation; }
|
||||
const glm::vec3& getHeadTranslation() const { return _headTranslation; }
|
||||
|
||||
float getEyeGazeLeftPitch() const { return _eyeGazeLeftPitch; }
|
||||
float getEyeGazeLeftYaw() const { return _eyeGazeLeftYaw; }
|
||||
|
||||
float getEyeGazeRightPitch() const { return _eyeGazeRightPitch; }
|
||||
float getEyeGazeRightYaw() const { return _eyeGazeRightYaw; }
|
||||
|
||||
float getLeftBlink() const { return _leftBlink; }
|
||||
float getRightBlink() const { return _rightBlink; }
|
||||
|
||||
float getBrowHeight() const { return _browHeight; }
|
||||
|
||||
float getMouthSize() const { return _mouthSize; }
|
||||
|
||||
void reset();
|
||||
|
||||
public slots:
|
||||
|
||||
void setEnabled(bool enabled);
|
||||
|
||||
private slots:
|
||||
|
||||
void connectSocket();
|
||||
void noteConnected();
|
||||
void noteError(QAbstractSocket::SocketError error);
|
||||
void readFromSocket();
|
||||
|
||||
private:
|
||||
|
||||
void send(const std::string& message);
|
||||
|
||||
QTcpSocket _socket;
|
||||
fs::fsBinaryStream _stream;
|
||||
bool _enabled;
|
||||
|
||||
glm::quat _headRotation;
|
||||
glm::vec3 _headTranslation;
|
||||
|
||||
float _eyeGazeLeftPitch;
|
||||
float _eyeGazeLeftYaw;
|
||||
|
||||
float _eyeGazeRightPitch;
|
||||
float _eyeGazeRightYaw;
|
||||
|
||||
float _leftBlink;
|
||||
float _rightBlink;
|
||||
|
||||
int _leftBlinkIndex;
|
||||
int _rightBlinkIndex;
|
||||
|
||||
float _browHeight;
|
||||
|
||||
int _browUpCenterIndex;
|
||||
|
||||
float _mouthSize;
|
||||
|
||||
int _jawOpenIndex;
|
||||
};
|
||||
|
||||
#endif /* defined(__interface__Faceshift__) */
|
|
@ -30,6 +30,7 @@ using namespace xn;
|
|||
|
||||
// register types with Qt metatype system
|
||||
int jointVectorMetaType = qRegisterMetaType<JointVector>("JointVector");
|
||||
int keyPointVectorMetaType = qRegisterMetaType<KeyPointVector>("KeyPointVector");
|
||||
int matMetaType = qRegisterMetaType<Mat>("cv::Mat");
|
||||
int rotatedRectMetaType = qRegisterMetaType<RotatedRect>("cv::RotatedRect");
|
||||
|
||||
|
@ -63,6 +64,7 @@ const float UNINITIALIZED_FACE_DEPTH = 0.0f;
|
|||
void Webcam::reset() {
|
||||
_initialFaceRect = RotatedRect();
|
||||
_initialFaceDepth = UNINITIALIZED_FACE_DEPTH;
|
||||
_initialLEDPosition = glm::vec3();
|
||||
|
||||
if (_enabled) {
|
||||
// send a message to the grabber
|
||||
|
@ -140,6 +142,14 @@ void Webcam::renderPreview(int screenWidth, int screenHeight) {
|
|||
glVertex2f(left + facePoints[3].x * xScale, top + facePoints[3].y * yScale);
|
||||
glEnd();
|
||||
|
||||
glColor3f(0.0f, 1.0f, 0.0f);
|
||||
glLineWidth(3.0f);
|
||||
for (KeyPointVector::iterator it = _keyPoints.begin(); it != _keyPoints.end(); it++) {
|
||||
renderCircle(glm::vec3(left + it->pt.x * xScale, top + it->pt.y * yScale, 0.0f),
|
||||
it->size * 0.5f, glm::vec3(0.0f, 0.0f, 1.0f), 8);
|
||||
}
|
||||
glLineWidth(1.0f);
|
||||
|
||||
const int MAX_FPS_CHARACTERS = 30;
|
||||
char fps[MAX_FPS_CHARACTERS];
|
||||
sprintf(fps, "FPS: %d", (int)(roundf(_frameCount * 1000000.0f / (usecTimestampNow() - _startTimestamp))));
|
||||
|
@ -155,10 +165,80 @@ Webcam::~Webcam() {
|
|||
delete _grabber;
|
||||
}
|
||||
|
||||
static glm::vec3 createVec3(const Point2f& pt) {
|
||||
return glm::vec3(pt.x, -pt.y, 0.0f);
|
||||
}
|
||||
|
||||
static glm::mat3 createMat3(const glm::vec3& p0, const glm::vec3& p1, const glm::vec3& p2) {
|
||||
glm::vec3 u = glm::normalize(p1 - p0);
|
||||
glm::vec3 p02 = p2 - p0;
|
||||
glm::vec3 v = glm::normalize(p02 - u * glm::dot(p02, u));
|
||||
return glm::mat3(u, v, glm::cross(u, v));
|
||||
}
|
||||
|
||||
/// Computes the 3D transform of the LED assembly from the image space location of the key points representing the LEDs.
|
||||
/// See T.D. Alter's "3D Pose from 3 Corresponding Points under Weak-Perspective Projection"
|
||||
/// (http://dspace.mit.edu/bitstream/handle/1721.1/6611/AIM-1378.pdf) and the source code to Freetrack
|
||||
/// (https://camil.dyndns.org/svn/freetrack/tags/V2.2/Freetrack/Pose.pas), which uses the same algorithm.
|
||||
static float computeTransformFromKeyPoints(const KeyPointVector& keyPoints, glm::quat& rotation, glm::vec3& position) {
|
||||
// make sure we have at least three points
|
||||
if (keyPoints.size() < 3) {
|
||||
return 0.0f;
|
||||
}
|
||||
|
||||
// bubblesort the first three points from top (greatest) to bottom (least)
|
||||
glm::vec3 i0 = createVec3(keyPoints[0].pt), i1 = createVec3(keyPoints[1].pt), i2 = createVec3(keyPoints[2].pt);
|
||||
if (i1.y > i0.y) {
|
||||
swap(i0, i1);
|
||||
}
|
||||
if (i2.y > i1.y) {
|
||||
swap(i1, i2);
|
||||
}
|
||||
if (i1.y > i0.y) {
|
||||
swap(i0, i1);
|
||||
}
|
||||
|
||||
// model space LED locations and the distances between them
|
||||
const glm::vec3 M0(2.0f, 0.0f, 0.0f), M1(0.0f, 0.0f, 0.0f), M2(0.0f, -4.0f, 0.0f);
|
||||
const float R01 = glm::distance(M0, M1), R02 = glm::distance(M0, M2), R12 = glm::distance(M1, M2);
|
||||
|
||||
// compute the distances between the image points
|
||||
float d01 = glm::distance(i0, i1), d02 = glm::distance(i0, i2), d12 = glm::distance(i1, i2);
|
||||
|
||||
// compute the terms of the quadratic
|
||||
float a = (R01 + R02 + R12) * (-R01 + R02 + R12) * (R01 - R02 + R12) * (R01 + R02 - R12);
|
||||
float b = d01 * d01 * (-R01 * R01 + R02 * R02 + R12 * R12) + d02 * d02 * (R01 * R01 - R02 * R02 + R12 * R12) +
|
||||
d12 * d12 * (R01 * R01 + R02 * R02 - R12 * R12);
|
||||
float c = (d01 + d02 + d12) * (-d01 + d02 + d12) * (d01 - d02 + d12) * (d01 + d02 - d12);
|
||||
|
||||
// compute the scale
|
||||
float s = sqrtf((b + sqrtf(b * b - a * c)) / a);
|
||||
|
||||
float sigma = (d01 * d01 + d02 * d02 - d12 * d12 <= s * s * (R01 * R01 + R02 * R02 - R12 * R12)) ? 1.0f : -1.0f;
|
||||
|
||||
float h1 = sqrtf(s * s * R01 * R01 - d01 * d01);
|
||||
float h2 = sigma * sqrtf(s * s * R02 * R02 - d02 * d02);
|
||||
|
||||
// now we can compute the 3D locations of the model points in camera-centered coordinates
|
||||
glm::vec3 m0 = glm::vec3(i0.x, i0.y, 0.0f) / s;
|
||||
glm::vec3 m1 = glm::vec3(i1.x, i1.y, h1) / s;
|
||||
glm::vec3 m2 = glm::vec3(i2.x, i2.y, h2) / s;
|
||||
|
||||
// from those and the model space locations, we can compute the transform
|
||||
glm::mat3 r1 = createMat3(M0, M1, M2);
|
||||
glm::mat3 r2 = createMat3(m0, m1, m2);
|
||||
glm::mat3 r = r2 * glm::transpose(r1);
|
||||
|
||||
position = m0 - r * M0;
|
||||
rotation = glm::quat_cast(r);
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
const float METERS_PER_MM = 1.0f / 1000.0f;
|
||||
|
||||
void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float midFaceDepth,
|
||||
float aspectRatio, const RotatedRect& faceRect, bool sending, const JointVector& joints) {
|
||||
void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float midFaceDepth, float aspectRatio,
|
||||
const RotatedRect& faceRect, bool sending, const JointVector& joints, const KeyPointVector& keyPoints) {
|
||||
if (!_enabled) {
|
||||
return; // was queued before we shut down; ignore
|
||||
}
|
||||
|
@ -210,6 +290,7 @@ void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float midF
|
|||
_faceRect = faceRect;
|
||||
_sending = sending;
|
||||
_joints = _skeletonTrackingOn ? joints : JointVector();
|
||||
_keyPoints = keyPoints;
|
||||
_frameCount++;
|
||||
|
||||
const int MAX_FPS = 60;
|
||||
|
@ -248,6 +329,31 @@ void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float midF
|
|||
_estimatedRotation = safeEulerAngles(_estimatedJoints[AVATAR_JOINT_HEAD_BASE].rotation);
|
||||
_estimatedPosition = _estimatedJoints[AVATAR_JOINT_HEAD_BASE].position;
|
||||
|
||||
} else if (!keyPoints.empty()) {
|
||||
glm::quat rotation;
|
||||
glm::vec3 position;
|
||||
float scale = computeTransformFromKeyPoints(keyPoints, rotation, position);
|
||||
if (scale > 0.0f) {
|
||||
if (_initialLEDPosition == glm::vec3()) {
|
||||
_initialLEDPosition = position;
|
||||
_estimatedPosition = glm::vec3();
|
||||
_initialLEDRotation = rotation;
|
||||
_estimatedRotation = glm::vec3();
|
||||
_initialLEDScale = scale;
|
||||
|
||||
} else {
|
||||
const float Z_SCALE = 5.0f;
|
||||
position.z += (_initialLEDScale / scale - 1.0f) * Z_SCALE;
|
||||
|
||||
const float POSITION_SMOOTHING = 0.5f;
|
||||
_estimatedPosition = glm::mix(position - _initialLEDPosition, _estimatedPosition, POSITION_SMOOTHING);
|
||||
const float ROTATION_SMOOTHING = 0.5f;
|
||||
glm::vec3 eulers = safeEulerAngles(rotation * glm::inverse(_initialLEDRotation));
|
||||
eulers.y = -eulers.y;
|
||||
eulers.z = -eulers.z;
|
||||
_estimatedRotation = glm::mix(eulers, _estimatedRotation, ROTATION_SMOOTHING);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// roll is just the angle of the face rect
|
||||
const float ROTATION_SMOOTHING = 0.95f;
|
||||
|
@ -285,8 +391,21 @@ void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float midF
|
|||
QTimer::singleShot(qMax((int)remaining / 1000, 0), _grabber, SLOT(grabFrame()));
|
||||
}
|
||||
|
||||
FrameGrabber::FrameGrabber() : _initialized(false), _videoSendMode(FULL_FRAME_VIDEO), _depthOnly(false), _capture(0),
|
||||
_searchWindow(0, 0, 0, 0), _smoothedMidFaceDepth(UNINITIALIZED_FACE_DEPTH), _colorCodec(), _depthCodec(), _frameCount(0) {
|
||||
static SimpleBlobDetector::Params createBlobDetectorParams() {
|
||||
SimpleBlobDetector::Params params;
|
||||
params.blobColor = 255;
|
||||
params.filterByArea = true;
|
||||
params.minArea = 4;
|
||||
params.maxArea = 5000;
|
||||
params.filterByCircularity = false;
|
||||
params.filterByInertia = false;
|
||||
params.filterByConvexity = false;
|
||||
return params;
|
||||
}
|
||||
|
||||
FrameGrabber::FrameGrabber() : _initialized(false), _videoSendMode(FULL_FRAME_VIDEO), _depthOnly(false), _ledTrackingOn(false),
|
||||
_capture(0), _searchWindow(0, 0, 0, 0), _smoothedMidFaceDepth(UNINITIALIZED_FACE_DEPTH), _colorCodec(), _depthCodec(),
|
||||
_frameCount(0), _blobDetector(createBlobDetectorParams()) {
|
||||
}
|
||||
|
||||
FrameGrabber::~FrameGrabber() {
|
||||
|
@ -391,6 +510,11 @@ void FrameGrabber::setDepthOnly(bool depthOnly) {
|
|||
destroyCodecs();
|
||||
}
|
||||
|
||||
void FrameGrabber::setLEDTrackingOn(bool ledTrackingOn) {
|
||||
_ledTrackingOn = ledTrackingOn;
|
||||
configureCapture();
|
||||
}
|
||||
|
||||
void FrameGrabber::reset() {
|
||||
_searchWindow = cv::Rect(0, 0, 0, 0);
|
||||
|
||||
|
@ -494,7 +618,7 @@ void FrameGrabber::grabFrame() {
|
|||
float depthBitrateMultiplier = 1.0f;
|
||||
Mat faceTransform;
|
||||
float aspectRatio;
|
||||
if (_videoSendMode == FULL_FRAME_VIDEO) {
|
||||
if (_ledTrackingOn || _videoSendMode == FULL_FRAME_VIDEO) {
|
||||
// no need to find the face if we're sending full frame video
|
||||
_smoothedFaceRect = RotatedRect(Point2f(color.cols / 2.0f, color.rows / 2.0f), Size2f(color.cols, color.rows), 0.0f);
|
||||
encodedWidth = color.cols;
|
||||
|
@ -568,6 +692,21 @@ void FrameGrabber::grabFrame() {
|
|||
aspectRatio = _smoothedFaceRect.size.width / _smoothedFaceRect.size.height;
|
||||
}
|
||||
|
||||
KeyPointVector keyPoints;
|
||||
if (_ledTrackingOn) {
|
||||
// convert to grayscale
|
||||
cvtColor(color, _grayFrame, format == GL_RGB ? CV_RGB2GRAY : CV_BGR2GRAY);
|
||||
|
||||
// apply threshold
|
||||
threshold(_grayFrame, _grayFrame, 28.0, 255.0, THRESH_BINARY);
|
||||
|
||||
// convert back so that we can see
|
||||
cvtColor(_grayFrame, color, format == GL_RGB ? CV_GRAY2RGB : CV_GRAY2BGR);
|
||||
|
||||
// find the locations of the LEDs, which should show up as blobs
|
||||
_blobDetector.detect(_grayFrame, keyPoints);
|
||||
}
|
||||
|
||||
const ushort ELEVEN_BIT_MINIMUM = 0;
|
||||
const uchar EIGHT_BIT_MIDPOINT = 128;
|
||||
double depthOffset;
|
||||
|
@ -616,7 +755,7 @@ void FrameGrabber::grabFrame() {
|
|||
_frameCount++;
|
||||
|
||||
QByteArray payload;
|
||||
if (_videoSendMode != NO_VIDEO) {
|
||||
if (!_ledTrackingOn && _videoSendMode != NO_VIDEO) {
|
||||
// start the payload off with the aspect ratio (zero for full frame)
|
||||
payload.append((const char*)&aspectRatio, sizeof(float));
|
||||
|
||||
|
@ -790,7 +929,7 @@ void FrameGrabber::grabFrame() {
|
|||
QMetaObject::invokeMethod(Application::getInstance()->getWebcam(), "setFrame",
|
||||
Q_ARG(cv::Mat, color), Q_ARG(int, format), Q_ARG(cv::Mat, _grayDepthFrame), Q_ARG(float, _smoothedMidFaceDepth),
|
||||
Q_ARG(float, aspectRatio), Q_ARG(cv::RotatedRect, _smoothedFaceRect), Q_ARG(bool, !payload.isEmpty()),
|
||||
Q_ARG(JointVector, joints));
|
||||
Q_ARG(JointVector, joints), Q_ARG(KeyPointVector, keyPoints));
|
||||
}
|
||||
|
||||
bool FrameGrabber::init() {
|
||||
|
@ -840,18 +979,28 @@ bool FrameGrabber::init() {
|
|||
cvSetCaptureProperty(_capture, CV_CAP_PROP_FRAME_WIDTH, IDEAL_FRAME_WIDTH);
|
||||
cvSetCaptureProperty(_capture, CV_CAP_PROP_FRAME_HEIGHT, IDEAL_FRAME_HEIGHT);
|
||||
|
||||
configureCapture();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void FrameGrabber::configureCapture() {
|
||||
#ifdef HAVE_OPENNI
|
||||
if (_depthGenerator.IsValid()) {
|
||||
return; // don't bother handling LED tracking with depth camera
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef __APPLE__
|
||||
configureCamera(0x5ac, 0x8510, false, 0.975, 0.5, 1.0, 0.5, true, 0.5);
|
||||
configureCamera(0x5ac, 0x8510, false, _ledTrackingOn ? 1.0 : 0.975, 0.5, 1.0, 0.5, true, 0.5);
|
||||
#else
|
||||
cvSetCaptureProperty(_capture, CV_CAP_PROP_EXPOSURE, 0.5);
|
||||
cvSetCaptureProperty(_capture, CV_CAP_PROP_CONTRAST, 0.5);
|
||||
cvSetCaptureProperty(_capture, CV_CAP_PROP_CONTRAST, _ledTrackingOn ? 1.0 : 0.5);
|
||||
cvSetCaptureProperty(_capture, CV_CAP_PROP_SATURATION, 0.5);
|
||||
cvSetCaptureProperty(_capture, CV_CAP_PROP_BRIGHTNESS, 0.5);
|
||||
cvSetCaptureProperty(_capture, CV_CAP_PROP_BRIGHTNESS, _ledTrackingOn ? 0.0 : 0.5);
|
||||
cvSetCaptureProperty(_capture, CV_CAP_PROP_HUE, 0.5);
|
||||
cvSetCaptureProperty(_capture, CV_CAP_PROP_GAIN, 0.5);
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void FrameGrabber::updateHSVFrame(const Mat& frame, int format) {
|
|
@ -10,7 +10,6 @@
|
|||
#define __interface__Webcam__
|
||||
|
||||
#include <QMetaType>
|
||||
#include <QObject>
|
||||
#include <QThread>
|
||||
#include <QVector>
|
||||
|
||||
|
@ -35,7 +34,9 @@ class FrameGrabber;
|
|||
class Joint;
|
||||
|
||||
typedef QVector<Joint> JointVector;
|
||||
typedef std::vector<cv::KeyPoint> KeyPointVector;
|
||||
|
||||
/// Handles interaction with the webcam (including depth cameras such as the Kinect).
|
||||
class Webcam : public QObject {
|
||||
Q_OBJECT
|
||||
|
||||
|
@ -68,8 +69,8 @@ public:
|
|||
public slots:
|
||||
|
||||
void setEnabled(bool enabled);
|
||||
void setFrame(const cv::Mat& color, int format, const cv::Mat& depth, float midFaceDepth,
|
||||
float aspectRatio, const cv::RotatedRect& faceRect, bool sending, const JointVector& joints);
|
||||
void setFrame(const cv::Mat& color, int format, const cv::Mat& depth, float midFaceDepth, float aspectRatio,
|
||||
const cv::RotatedRect& faceRect, bool sending, const JointVector& joints, const KeyPointVector& keyPoints);
|
||||
void setSkeletonTrackingOn(bool toggle) { _skeletonTrackingOn = toggle; };
|
||||
|
||||
private:
|
||||
|
@ -88,6 +89,11 @@ private:
|
|||
cv::RotatedRect _initialFaceRect;
|
||||
float _initialFaceDepth;
|
||||
JointVector _joints;
|
||||
KeyPointVector _keyPoints;
|
||||
|
||||
glm::quat _initialLEDRotation;
|
||||
glm::vec3 _initialLEDPosition;
|
||||
float _initialLEDScale;
|
||||
|
||||
uint64_t _startTimestamp;
|
||||
int _frameCount;
|
||||
|
@ -101,6 +107,7 @@ private:
|
|||
bool _skeletonTrackingOn;
|
||||
};
|
||||
|
||||
/// Acquires and processes video frames in a dedicated thread.
|
||||
class FrameGrabber : public QObject {
|
||||
Q_OBJECT
|
||||
|
||||
|
@ -113,6 +120,7 @@ public slots:
|
|||
|
||||
void cycleVideoSendMode();
|
||||
void setDepthOnly(bool depthOnly);
|
||||
void setLEDTrackingOn(bool ledTrackingOn);
|
||||
void reset();
|
||||
void shutdown();
|
||||
void grabFrame();
|
||||
|
@ -124,10 +132,12 @@ private:
|
|||
bool init();
|
||||
void updateHSVFrame(const cv::Mat& frame, int format);
|
||||
void destroyCodecs();
|
||||
void configureCapture();
|
||||
|
||||
bool _initialized;
|
||||
VideoSendMode _videoSendMode;
|
||||
bool _depthOnly;
|
||||
bool _ledTrackingOn;
|
||||
CvCapture* _capture;
|
||||
cv::CascadeClassifier _faceCascade;
|
||||
cv::Mat _hsvFrame;
|
||||
|
@ -147,6 +157,9 @@ private:
|
|||
QByteArray _encodedFace;
|
||||
cv::RotatedRect _smoothedFaceRect;
|
||||
|
||||
cv::SimpleBlobDetector _blobDetector;
|
||||
cv::Mat _grayFrame;
|
||||
|
||||
#ifdef HAVE_OPENNI
|
||||
xn::Context _xnContext;
|
||||
xn::DepthGenerator _depthGenerator;
|
||||
|
@ -158,6 +171,7 @@ private:
|
|||
#endif
|
||||
};
|
||||
|
||||
/// Contains the 3D transform and 2D projected position of a tracked joint.
|
||||
class Joint {
|
||||
public:
|
||||
|
||||
|
@ -171,6 +185,7 @@ public:
|
|||
};
|
||||
|
||||
Q_DECLARE_METATYPE(JointVector)
|
||||
Q_DECLARE_METATYPE(KeyPointVector)
|
||||
Q_DECLARE_METATYPE(cv::Mat)
|
||||
Q_DECLARE_METATYPE(cv::RotatedRect)
|
||||
|
Loading…
Reference in a new issue