Merge branch 'master' of github.com:worklist/hifi into assignment

This commit is contained in:
Stephen Birarda 2013-09-06 11:04:18 -07:00
commit d412e0611d
69 changed files with 1922 additions and 493 deletions

View file

@ -0,0 +1,44 @@
# Try to find the Faceshift networking library
#
# You must provide a FACESHIFT_ROOT_DIR which contains lib and include directories
#
# Once done this will define
#
# FACESHIFT_FOUND - system found Faceshift
# FACESHIFT_INCLUDE_DIRS - the Faceshift include directory
# FACESHIFT_LIBRARIES - Link this to use Faceshift
#
# Created on 8/30/2013 by Andrzej Kapolka
# Copyright (c) 2013 High Fidelity
#
if (FACESHIFT_LIBRARIES AND FACESHIFT_INCLUDE_DIRS)
# in cache already
set(FACESHIFT_FOUND TRUE)
else (FACESHIFT_LIBRARIES AND FACESHIFT_INCLUDE_DIRS)
find_path(FACESHIFT_INCLUDE_DIRS fsbinarystream.h ${FACESHIFT_ROOT_DIR}/include)
if (APPLE)
find_library(FACESHIFT_LIBRARIES libfaceshift.a ${FACESHIFT_ROOT_DIR}/lib/MacOS/)
elseif (UNIX)
find_library(FACESHIFT_LIBRARIES libfaceshift.a ${FACESHIFT_ROOT_DIR}/lib/UNIX/)
endif ()
if (FACESHIFT_INCLUDE_DIRS AND FACESHIFT_LIBRARIES)
set(FACESHIFT_FOUND TRUE)
endif (FACESHIFT_INCLUDE_DIRS AND FACESHIFT_LIBRARIES)
if (FACESHIFT_FOUND)
if (NOT FACESHIFT_FIND_QUIETLY)
message(STATUS "Found Faceshift: ${FACESHIFT_LIBRARIES}")
endif (NOT FACESHIFT_FIND_QUIETLY)
else (FACESHIFT_FOUND)
if (FACESHIFT_FIND_REQUIRED)
message(FATAL_ERROR "Could not find Faceshift")
endif (FACESHIFT_FIND_REQUIRED)
endif (FACESHIFT_FOUND)
# show the FACESHIFT_INCLUDE_DIRS and FACESHIFT_LIBRARIES variables only in the advanced view
mark_as_advanced(FACESHIFT_INCLUDE_DIRS FACESHIFT_LIBRARIES)
endif (FACESHIFT_LIBRARIES AND FACESHIFT_INCLUDE_DIRS)

View file

@ -8,6 +8,7 @@ project(${TARGET_NAME})
# setup for find modules
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/../cmake/modules/")
set(FACESHIFT_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/external/faceshift)
set(LIBOVR_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/external/LibOVR)
set(LIBVPX_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/external/LibVPX)
set(LEAP_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/external/Leap)
@ -37,7 +38,7 @@ configure_file(InterfaceConfig.h.in ${PROJECT_BINARY_DIR}/includes/InterfaceConf
# grab the implementation and header files from src dirs
file(GLOB INTERFACE_SRCS src/*.cpp src/*.h)
foreach(SUBDIR avatar ui renderer)
foreach(SUBDIR avatar devices renderer ui)
file(GLOB SUBDIR_SRCS src/${SUBDIR}/*.cpp src/${SUBDIR}/*.h)
set(INTERFACE_SRCS ${INTERFACE_SRCS} ${SUBDIR_SRCS})
endforeach(SUBDIR)
@ -47,11 +48,13 @@ add_subdirectory(src/starfield)
find_package(Qt5Core REQUIRED)
find_package(Qt5Gui REQUIRED)
find_package(Qt5Multimedia REQUIRED)
find_package(Qt5Network REQUIRED)
find_package(Qt5OpenGL REQUIRED)
find_package(Qt5Svg REQUIRED)
if (APPLE)
set(MACOSX_BUNDLE_BUNDLE_NAME Interface)
# set how the icon shows up in the Info.plist file
SET(MACOSX_BUNDLE_ICON_FILE interface.icns)
@ -89,6 +92,7 @@ link_hifi_library(avatars ${TARGET_NAME} ${ROOT_DIR})
link_hifi_library(audio ${TARGET_NAME} ${ROOT_DIR})
# find required libraries
find_package(Faceshift)
find_package(GLM REQUIRED)
find_package(LibOVR)
find_package(LibVPX)
@ -107,7 +111,7 @@ if (OPENNI_FOUND AND NOT DISABLE_OPENNI)
target_link_libraries(${TARGET_NAME} ${OPENNI_LIBRARIES})
endif (OPENNI_FOUND AND NOT DISABLE_OPENNI)
qt5_use_modules(${TARGET_NAME} Core Gui Network OpenGL Svg)
qt5_use_modules(${TARGET_NAME} Core Gui Multimedia Network OpenGL Svg)
# include headers for interface and InterfaceConfig.
include_directories(
@ -119,6 +123,7 @@ include_directories(
# use system flag so warnings are supressed
include_directories(
SYSTEM
${FACESHIFT_INCLUDE_DIRS}
${GLM_INCLUDE_DIRS}
${LIBOVR_INCLUDE_DIRS}
${LIBVPX_INCLUDE_DIRS}
@ -130,6 +135,7 @@ include_directories(
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -isystem ${OPENCV_INCLUDE_DIRS}")
target_link_libraries(
${TARGET_NAME}
${FACESHIFT_LIBRARIES}
${LIBVPX_LIBRARIES}
${MOTIONDRIVER_LIBRARIES}
${OPENCV_LIBRARIES}

View file

@ -0,0 +1,11 @@
cmake_minimum_required(VERSION 2.8)
set(TARGET_NAME faceshift)
project(${TARGET_NAME})
# grab the implemenation and header files
file(GLOB FACESHIFT_SRCS include/*.h src/*.cpp)
include_directories(include)
add_library(${TARGET_NAME} ${FACESHIFT_SRCS})

View file

@ -0,0 +1,410 @@
#pragma once
#ifndef FSBINARYSTREAM_H
#define FSBINARYSTREAM_H
// ==========================================================================
// Copyright (C) 2012 faceshift AG, and/or its licensors. All rights reserved.
//
// the software is free to use and provided "as is", without warranty of any kind.
// faceshift AG does not make and hereby disclaims any express or implied
// warranties including, but not limited to, the warranties of
// non-infringement, merchantability or fitness for a particular purpose,
// or arising from a course of dealing, usage, or trade practice. in no
// event will faceshift AG and/or its licensors be liable for any lost
// revenues, data, or profits, or special, direct, indirect, or
// consequential damages, even if faceshift AG and/or its licensors has
// been advised of the possibility or probability of such damages.
// ==========================================================================
/**
* Define the HAVE_EIGEN preprocessor define, if you are using the Eigen library, it allows you to easily convert our tracked data from and to eigen
* See fsVector3f and fsQuaternionf for more details
**/
#ifdef HAVE_EIGEN
#include <Eigen/Core>
#include <Eigen/Geometry>
#endif
#ifdef _MSC_VER
#include <memory>
#else
#include <tr1/memory>
#endif
#include <string>
#include <vector>
#include <stdint.h>
/*******************************************************************************************
* This first part of the file contains a definition of the datastructures holding the
* tracking results
******************************************************************************************/
namespace fs {
/**
* A floating point three-vector.
*
* To keep these networking classes as simple as possible, we do not implement the
* vector semantics here, use Eigen for that purpose. The class just holds three named floats,
* and you have to interpret them yourself.
**/
struct fsVector3f {
float x,y,z;
fsVector3f() {}
#ifdef HAVE_EIGEN
explicit fsVector3f(const Eigen::Matrix<float,3,1> &v) : x(v[0]), y(v[1]), z(v[2]) {}
Eigen::Map< Eigen::Matrix<float,3,1> > eigen() const { return Eigen::Map<Eigen::Matrix<float,3,1> >((float*)this); }
#endif
};
/**
* An integer three-vector.
**/
struct fsVector3i {
int32_t x,y,z;
fsVector3i() {}
#ifdef HAVE_EIGEN
explicit fsVector3i(const Eigen::Matrix<int32_t,3,1> &v) : x(v[0]), y(v[1]), z(v[2]) {}
Eigen::Map<Eigen::Matrix<int32_t,3,1> > eigen() const { return Eigen::Map<Eigen::Matrix<int32_t,3,1> >((int32_t*)this); }
#endif
};
/**
* An integer four-vector.
**/
struct fsVector4i {
int32_t x,y,z,w;
fsVector4i() {}
#ifdef HAVE_EIGEN
explicit fsVector4i(const Eigen::Matrix<int32_t,4,1> &v) : x(v[0]), y(v[1]), z(v[2]), w(v[3]) {}
Eigen::Map<Eigen::Matrix<int32_t,4,1,Eigen::DontAlign> > eigen() const { return Eigen::Map<Eigen::Matrix<int32_t,4,1,Eigen::DontAlign> >((int32_t*)this); }
#endif
};
/**
* Structure holding the data of a quaternion.
*
*To keep these networking classes as simple as possible, we do not implement the
* quaternion semantics here. The class just holds four named floats, and you have to interpret them yourself.
*
* If you have Eigen you can just cast this class to an Eigen::Quaternionf and use it.
*
* The quaternion is defined as w+xi+yj+zk
**/
struct fsQuaternionf {
float x,y,z,w;
fsQuaternionf() {}
#ifdef HAVE_EIGEN
explicit fsQuaternionf(const Eigen::Quaternionf &q) : x(q.x()), y(q.y()), z(q.z()), w(q.w()) {}
Eigen::Quaternionf eigen() const { return Eigen::Quaternionf(w,x,y,z); }
#endif
};
/**
* A structure containing the data tracked for a single frame.
**/
class fsTrackingData {
public:
//! time stamp in ms
double m_timestamp;
//! flag whether tracking was successful [0,1]
bool m_trackingSuccessful;
//! head pose
fsQuaternionf m_headRotation;
fsVector3f m_headTranslation;
//! eye gaze in degrees
float m_eyeGazeLeftPitch;
float m_eyeGazeLeftYaw;
float m_eyeGazeRightPitch;
float m_eyeGazeRightYaw;
//! blendshape coefficients
std::vector<float> m_coeffs;
//! marker positions - format specified in faceshift
std::vector< fsVector3f > m_markers;
};
/**
* A structure containing vertex information
*/
class fsVertexData {
public:
//! vertex data
std::vector<fsVector3f> m_vertices;
#ifdef HAVE_EIGEN
Eigen::Map<Eigen::Matrix<float,3,Eigen::Dynamic> > eigen() { return Eigen::Map<Eigen::Matrix<float,3,Eigen::Dynamic> >((float*)m_vertices.data(),3,m_vertices.size()); }
#endif
};
/**
* A strucutre containing mesh information
*/
class fsMeshData {
public:
//! topology (quads)
std::vector<fsVector4i> m_quads;
//! topology (triangles)
std::vector<fsVector3i> m_tris;
//! vertex data
fsVertexData m_vertex_data;
#ifdef HAVE_EIGEN
Eigen::Map<Eigen::Matrix<int32_t,4,Eigen::Dynamic,Eigen::DontAlign> > quads_eigen() { return Eigen::Map<Eigen::Matrix<int32_t,4,Eigen::Dynamic,Eigen::DontAlign> >((int32_t*)m_quads.data(),4,m_quads.size()); }
Eigen::Map<Eigen::Matrix<int32_t,3,Eigen::Dynamic> > tris_eigen() { return Eigen::Map<Eigen::Matrix<int32_t,3,Eigen::Dynamic> >((int32_t*)m_tris.data(),3,m_tris.size()); }
Eigen::Map<Eigen::Matrix<float,3,Eigen::Dynamic> > vertices_eigen() { return m_vertex_data.eigen(); }
#endif
};
/*******************************************************************************************
* Now follows a definition of datastructures encapsulating the network messages
******************************************************************************************/
/** Predeclaration of the message types available in faceshift **/
// Inbound
class fsMsgStartCapturing;
class fsMsgStopCapturing;
class fsMsgCalibrateNeutral;
class fsMsgSendMarkerNames;
class fsMsgSendBlendshapeNames;
class fsMsgSendRig;
// Outbound
class fsMsgTrackingState;
class fsMsgMarkerNames;
class fsMsgBlendshapeNames;
class fsMsgRig;
/**
* Base class of all message that faceshift is sending.
* A class can be queried for its type, using the id() function for use in a switch statement, or by using a dynamic_cast.
**/
class fsMsg {
public:
virtual ~fsMsg() {}
enum MessageType {
// Messages to control faceshift via the network
// These are sent from the client to faceshift
MSG_IN_START_TRACKING = 44344,
MSG_IN_STOP_TRACKING = 44444,
MSG_IN_CALIBRATE_NEUTRAL = 44544,
MSG_IN_SEND_MARKER_NAMES = 44644,
MSG_IN_SEND_BLENDSHAPE_NAMES = 44744,
MSG_IN_SEND_RIG = 44844,
MSG_IN_HEADPOSE_RELATIVE = 44944,
MSG_IN_HEADPOSE_ABSOLUTE = 44945,
// Messages containing tracking information
// These are sent form faceshift to the client application
MSG_OUT_TRACKING_STATE = 33433,
MSG_OUT_MARKER_NAMES = 33533,
MSG_OUT_BLENDSHAPE_NAMES = 33633,
MSG_OUT_RIG = 33733
};
virtual MessageType id() const = 0;
};
typedef std::tr1::shared_ptr<fsMsg> fsMsgPtr;
/*************
* Inbound
***********/
class fsMsgStartCapturing : public fsMsg {
public:
virtual ~fsMsgStartCapturing() {}
virtual MessageType id() const { return MSG_IN_START_TRACKING; }
};
class fsMsgStopCapturing : public fsMsg {
public:
virtual ~fsMsgStopCapturing() {}
virtual MessageType id() const { return MSG_IN_STOP_TRACKING; }
};
class fsMsgCalibrateNeutral : public fsMsg {
public:
virtual ~fsMsgCalibrateNeutral() {}
virtual MessageType id() const { return MSG_IN_CALIBRATE_NEUTRAL; }
};
class fsMsgSendMarkerNames : public fsMsg {
public:
virtual ~fsMsgSendMarkerNames() {}
virtual MessageType id() const { return MSG_IN_SEND_MARKER_NAMES; }
};
class fsMsgSendBlendshapeNames : public fsMsg {
public:
virtual ~fsMsgSendBlendshapeNames() {}
virtual MessageType id() const { return MSG_IN_SEND_BLENDSHAPE_NAMES; }
};
class fsMsgSendRig : public fsMsg {
public:
virtual ~fsMsgSendRig() {}
virtual MessageType id() const { return MSG_IN_SEND_RIG; }
};
class fsMsgHeadPoseRelative : public fsMsg {
public:
virtual ~fsMsgHeadPoseRelative() {}
virtual MessageType id() const { return MSG_IN_HEADPOSE_RELATIVE; }
};
class fsMsgHeadPoseAbsolute : public fsMsg {
public:
virtual ~fsMsgHeadPoseAbsolute() {}
virtual MessageType id() const { return MSG_IN_HEADPOSE_ABSOLUTE; }
};
/*************
* Outbound
***********/
class fsMsgTrackingState : public fsMsg {
public:
virtual ~fsMsgTrackingState() {}
/* */ fsTrackingData & tracking_data() /* */ { return m_tracking_data; }
const fsTrackingData & tracking_data() const { return m_tracking_data; }
virtual MessageType id() const { return MSG_OUT_TRACKING_STATE; }
private:
fsTrackingData m_tracking_data;
};
class fsMsgMarkerNames : public fsMsg {
public:
virtual ~fsMsgMarkerNames() {}
/* */ std::vector<std::string> & marker_names() /* */ { return m_marker_names; }
const std::vector<std::string> & marker_names() const { return m_marker_names; }
virtual MessageType id() const { return MSG_OUT_MARKER_NAMES; }
private:
std::vector<std::string> m_marker_names;
};
class fsMsgBlendshapeNames : public fsMsg {
public:
virtual ~fsMsgBlendshapeNames() {}
/* */ std::vector<std::string> & blendshape_names() /* */ { return m_blendshape_names; }
const std::vector<std::string> & blendshape_names() const { return m_blendshape_names; }
virtual MessageType id() const { return MSG_OUT_BLENDSHAPE_NAMES; }
private:
std::vector<std::string> m_blendshape_names;
};
class fsMsgRig : public fsMsg {
public:
virtual ~fsMsgRig() {}
virtual MessageType id() const { return MSG_OUT_RIG; }
/* */ fsMeshData & mesh() /* */ { return m_mesh; }
const fsMeshData & mesh() const { return m_mesh; }
/* */ std::vector<std::string> & blendshape_names() /* */ { return m_blendshape_names; }
const std::vector<std::string> & blendshape_names() const { return m_blendshape_names; }
/* */ std::vector<fsVertexData> & blendshapes() /* */ { return m_blendshapes; }
const std::vector<fsVertexData> & blendshapes() const { return m_blendshapes; }
private:
//! neutral mesh
fsMeshData m_mesh;
//! blendshape names
std::vector<std::string> m_blendshape_names;
//! blendshapes
std::vector<fsVertexData> m_blendshapes;
};
class fsMsgSignal : public fsMsg {
MessageType m_id;
public:
explicit fsMsgSignal(MessageType id) : m_id(id) {}
virtual ~fsMsgSignal() {}
virtual MessageType id() const { return m_id; }
};
/**
* Class to parse a faceshift data stream, and to create message to write into such a stream
*
* This needs to be connected with your networking methods by calling
*
* void received(int, const char *);
*
* whenever new data is available. After adding received data to the parser you can parse faceshift messages using the
*
* std::tr1::shared_ptr<fsMsg> get_message();
*
* to get the next message, if a full block of data has been received. This should be iterated until no more messages are in the buffer.
*
* You can also use this to encode messages to send back to faceshift. This works by calling the
*
* void encode_message(std::string &msg_out, const fsMsg &msg);
*
* methods (actually the specializations existing for each of our message types). This will encode the message into a
* binary string in msg_out. You then only need to push the resulting string over the network to faceshift.
*
* This class does not handle differences in endianness or other strange things that can happen when pushing data over the network.
* Should you have to adapt this to such a system, then it should be possible to do this by changing only the write_... and read_...
* functions in the accompanying cpp file, but so far there was no need for it.
**/
class fsBinaryStream {
public:
fsBinaryStream();
/**
* Use to push data into the parser. Typically called inside of your network receiver routine
**/
void received(long int, const char *);
/**
* After pushing data, you can try to extract messages from the stream. Process messages until a null pointer is returned.
**/
fsMsgPtr get_message();
/**
* When an invalid message is received, the valid field is set to false. No attempt is made to recover from the problem, you will have to disconnect.
**/
bool valid() const { return m_valid; }
void clear() { m_start = 0; m_end = 0; m_valid=true; }
// Inbound
static void encode_message(std::string &msg_out, const fsMsgTrackingState &msg);
static void encode_message(std::string &msg_out, const fsMsgStartCapturing &msg);
static void encode_message(std::string &msg_out, const fsMsgStopCapturing &msg);
static void encode_message(std::string &msg_out, const fsMsgCalibrateNeutral &msg);
static void encode_message(std::string &msg_out, const fsMsgSendMarkerNames &msg);
static void encode_message(std::string &msg_out, const fsMsgSendBlendshapeNames &msg);
static void encode_message(std::string &msg_out, const fsMsgSendRig &msg);
static void encode_message(std::string &msg_out, const fsMsgHeadPoseRelative &msg);
static void encode_message(std::string &msg_out, const fsMsgHeadPoseAbsolute &msg);
// Outbound
static void encode_message(std::string &msg_out, const fsTrackingData &msg);
static void encode_message(std::string &msg_out, const fsMsgMarkerNames &msg);
static void encode_message(std::string &msg_out, const fsMsgBlendshapeNames &msg);
static void encode_message(std::string &msg_out, const fsMsgRig &msg);
static void encode_message(std::string &msg_out, const fsMsgSignal &msg); // Generic Signal
private:
std::string m_buffer;
long int m_start;
long int m_end;
bool m_valid;
};
}
#endif // FSBINARYSTREAM_H

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,502 @@
// ==========================================================================
// Copyright (C) 2012 faceshift AG, and/or its licensors. All rights reserved.
//
// the software is free to use and provided "as is", without warranty of any kind.
// faceshift AG does not make and hereby disclaims any express or implied
// warranties including, but not limited to, the warranties of
// non-infringement, merchantability or fitness for a particular purpose,
// or arising from a course of dealing, usage, or trade practice. in no
// event will faceshift AG and/or its licensors be liable for any lost
// revenues, data, or profits, or special, direct, indirect, or
// consequential damages, even if faceshift AG and/or its licensors has
// been advised of the possibility or probability of such damages.
// ==========================================================================
#include "fsbinarystream.h"
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#define FSNETWORKVERSION 1
#ifdef FS_INTERNAL
#include <common/log.hpp>
#else
#define LOG_RELEASE_ERROR(...) { printf("ERROR: %20s:%6d", __FILE__, __LINE__); printf(__VA_ARGS__); }
#define LOG_RELEASE_WARNING(...) { printf("WARNING: %20s:%6d", __FILE__, __LINE__); printf(__VA_ARGS__); }
#define LOG_RELEASE_INFO(...) { printf("INFO: %20s:%6d", __FILE__, __LINE__); printf(__VA_ARGS__); }
#endif
namespace fs {
// Ids of the submessages for the tracking state
enum BlockId {
BLOCKID_INFO = 101,
BLOCKID_POSE = 102,
BLOCKID_BLENDSHAPES = 103,
BLOCKID_EYES = 104,
BLOCKID_MARKERS = 105
};
typedef long int Size;
struct BlockHeader {
uint16_t id;
uint16_t version;
uint32_t size;
BlockHeader(uint16_t _id=0,
uint32_t _size=0,
uint16_t _version=FSNETWORKVERSION
) : id(_id), version(_version), size(_size) {}
};
// Interprets the data at the position start in buffer as a T and increments start by sizeof(T)
// It should be sufficient to change/overload this function when you are on a wierd endian system
template<class T> bool read_pod(T &value, const std::string &buffer, Size &start) {
if(start+sizeof(T) > buffer.size()) return false;
value = *(const T*)(&buffer[start]);
start += sizeof(T);
return true;
}
bool read_pod(std::string &value, const std::string &buffer, Size &start) {
uint16_t len = 0;
if(!read_pod(len, buffer, start)) return false;
if(start+len>Size(buffer.size())) return false; // check whether we have enough data available
value.resize(len);
memcpy(&(value[0]), &buffer[start], len);
start+=len;
return true;
}
template<class T> bool read_vector(std::vector<T> & values, const std::string & buffer, Size & start) {
uint32_t len = 0;
if( !read_pod(len, buffer, start)) return false;
if( start+len*sizeof(T) > buffer.size() ) return false;
values.resize(len);
for(uint32_t i = 0; i < len; ++i) {
read_pod(values[i],buffer,start);
}
return true;
}
template<class T> bool read_small_vector(std::vector<T> & values, const std::string & buffer, Size & start) {
uint16_t len = 0;
if( !read_pod(len, buffer, start)) return false;
if( start+len*sizeof(T) > buffer.size() ) return false;
values.resize(len);
bool success = true;
for(uint16_t i = 0; i < len; ++i) {
success &= read_pod(values[i],buffer,start);
}
return success;
}
// Adds the bitpattern of the data to the end of the buffer.
// It should be sufficient to change/overload this function when you are on a wierd endian system
template <class T>
void write_pod(std::string &buffer, const T &value) {
Size start = buffer.size();
buffer.resize(start + sizeof(T));
*(T*)(&buffer[start]) = value;
start += sizeof(T);
}
// special write function for strings
void write_pod(std::string &buffer, const std::string &value) {
uint16_t len = uint16_t(value.size()); write_pod(buffer, len);
buffer.append(value);
}
template<class T> void write_vector(std::string & buffer, const std::vector<T> & values) {
uint32_t len = values.size();
write_pod(buffer,len);
for(uint32_t i = 0; i < len; ++i)
write_pod(buffer,values[i]);
}
template<class T> void write_small_vector(std::string & buffer, const std::vector<T> & values) {
uint16_t len = values.size();
write_pod(buffer,len);
for(uint16_t i = 0; i < len; ++i)
write_pod(buffer,values[i]);
}
void update_msg_size(std::string &buffer, Size start) {
*(uint32_t*)(&buffer[start+4]) = buffer.size() - sizeof(BlockHeader) - start;
}
void update_msg_size(std::string &buffer) {
*(uint32_t*)(&buffer[4]) = buffer.size() - sizeof(BlockHeader);
}
static void skipHeader( Size &start) {
start += sizeof(BlockHeader);
}
//! returns whether @param data contains enough data to read the block header
static bool headerAvailable(BlockHeader &header, const std::string &buffer, Size &start, const Size &end) {
if (end-start >= Size(sizeof(BlockHeader))) {
header = *(BlockHeader*)(&buffer[start]);
return true;
} else {
return false;
}
}
//! returns whether @param data contains data for a full block
static bool blockAvailable(const std::string &buffer, Size &start, const Size &end) {
BlockHeader header;
if (!headerAvailable(header, buffer, start, end)) return false;
return end-start >= Size(sizeof(header)+header.size);
}
fsBinaryStream::fsBinaryStream() : m_buffer(), m_start(0), m_end(0), m_valid(true) { m_buffer.resize(64*1024); } // Use a 64kb buffer by default
void fsBinaryStream::received(long int sz, const char *data) {
long int new_end = m_end + sz;
if (new_end > Size(m_buffer.size()) && m_start>0) {
// If newly received block is too large to fit into the buffer, but we already have processed data from the start of the buffer, then
// move memory to the front of the buffer
// The buffer only grows, such that it is always large enough to contain the largest message seen so far.
if (m_end>m_start) memmove(&m_buffer[0], &m_buffer[0] + m_start, m_end - m_start);
m_end = m_end - m_start;
m_start = 0;
new_end = m_end + sz;
}
if (new_end > Size(m_buffer.size())) m_buffer.resize(1.5*new_end);
memcpy(&m_buffer[0] + m_end, data, sz);
m_end += sz;
}
static bool decodeInfo(fsTrackingData & _trackingData, const std::string &buffer, Size &start) {
bool success = true;
success &= read_pod<double>(_trackingData.m_timestamp, buffer, start);
unsigned char tracking_successfull = 0;
success &= read_pod<unsigned char>( tracking_successfull, buffer, start );
_trackingData.m_trackingSuccessful = bool(tracking_successfull);
return success;
}
static bool decodePose(fsTrackingData & _trackingData, const std::string &buffer, Size &start) {
bool success = true;
success &= read_pod(_trackingData.m_headRotation.x, buffer, start);
success &= read_pod(_trackingData.m_headRotation.y, buffer, start);
success &= read_pod(_trackingData.m_headRotation.z, buffer, start);
success &= read_pod(_trackingData.m_headRotation.w, buffer, start);
success &= read_pod(_trackingData.m_headTranslation.x, buffer, start);
success &= read_pod(_trackingData.m_headTranslation.y, buffer, start);
success &= read_pod(_trackingData.m_headTranslation.z, buffer, start);
return success;
}
static bool decodeBlendshapes(fsTrackingData & _trackingData, const std::string &buffer, Size &start) {
return read_vector(_trackingData.m_coeffs, buffer, start);
}
static bool decodeEyeGaze(fsTrackingData & _trackingData, const std::string &buffer, Size &start) {
bool success = true;
success &= read_pod(_trackingData.m_eyeGazeLeftPitch , buffer, start);
success &= read_pod(_trackingData.m_eyeGazeLeftYaw , buffer, start);
success &= read_pod(_trackingData.m_eyeGazeRightPitch, buffer, start);
success &= read_pod(_trackingData.m_eyeGazeRightYaw , buffer, start);
return success;
}
static bool decodeMarkers(fsTrackingData & _trackingData, const std::string &buffer, Size &start) {
return read_small_vector( _trackingData.m_markers, buffer, start );
}
static bool decodeMarkerNames(fsMsgMarkerNames &_msg, const std::string &buffer, Size &start) {
return read_small_vector(_msg.marker_names(), buffer, start);
}
static bool decodeBlendshapeNames(fsMsgBlendshapeNames &_msg, const std::string &buffer, Size &start) {
return read_small_vector(_msg.blendshape_names(), buffer, start);
}
static bool decodeRig(fsMsgRig &_msg, const std::string &buffer, Size &start) {
bool success = true;
success &= read_vector(_msg.mesh().m_quads,buffer,start); // read quads
success &= read_vector(_msg.mesh().m_tris,buffer,start); // read triangles
success &= read_vector(_msg.mesh().m_vertex_data.m_vertices,buffer,start);// read neutral vertices
success &= read_small_vector(_msg.blendshape_names(),buffer,start); // read names
uint16_t bsize = 0;
success &= read_pod(bsize,buffer,start);
_msg.blendshapes().resize(bsize);
for(uint16_t i = 0;i < bsize; i++)
success &= read_vector(_msg.blendshapes()[i].m_vertices,buffer,start); // read blendshapes
return success;
}
bool is_valid_msg(int id) {
switch(id) {
case fsMsg::MSG_IN_START_TRACKING :
case fsMsg::MSG_IN_STOP_TRACKING :
case fsMsg::MSG_IN_CALIBRATE_NEUTRAL :
case fsMsg::MSG_IN_SEND_MARKER_NAMES :
case fsMsg::MSG_IN_SEND_BLENDSHAPE_NAMES:
case fsMsg::MSG_IN_SEND_RIG :
case fsMsg::MSG_IN_HEADPOSE_RELATIVE :
case fsMsg::MSG_IN_HEADPOSE_ABSOLUTE :
case fsMsg::MSG_OUT_TRACKING_STATE :
case fsMsg::MSG_OUT_MARKER_NAMES :
case fsMsg::MSG_OUT_BLENDSHAPE_NAMES :
case fsMsg::MSG_OUT_RIG : return true;
default:
LOG_RELEASE_ERROR("Invalid Message ID %d", id);
return false;
}
}
fsMsgPtr fsBinaryStream::get_message() {
BlockHeader super_block;
if( !headerAvailable(super_block, m_buffer, m_start, m_end) ) return fsMsgPtr();
if (!is_valid_msg(super_block.id)) { LOG_RELEASE_ERROR("Invalid superblock id"); m_valid = false; return fsMsgPtr(); }
if( !blockAvailable( m_buffer, m_start, m_end) ) return fsMsgPtr();
skipHeader(m_start);
long super_block_data_start = m_start;
switch (super_block.id) {
case fsMsg::MSG_IN_START_TRACKING: {
if (super_block.size > 0) { LOG_RELEASE_ERROR("Expected Size to be 0, not %d", super_block.size); m_valid = false; return fsMsgPtr(); }
return fsMsgPtr(new fsMsgStartCapturing() );
}; break;
case fsMsg::MSG_IN_STOP_TRACKING: {
if (super_block.size > 0) { LOG_RELEASE_ERROR("Expected Size to be 0, not %d", super_block.size); m_valid = false; return fsMsgPtr(); }
return fsMsgPtr(new fsMsgStopCapturing() );
}; break;
case fsMsg::MSG_IN_CALIBRATE_NEUTRAL: {
if (super_block.size > 0) { LOG_RELEASE_ERROR("Expected Size to be 0, not %d", super_block.size); m_valid = false; return fsMsgPtr(); }
return fsMsgPtr(new fsMsgCalibrateNeutral() );
}; break;
case fsMsg::MSG_IN_SEND_MARKER_NAMES: {
if (super_block.size > 0) { LOG_RELEASE_ERROR("Expected Size to be 0, not %d", super_block.size); m_valid = false; return fsMsgPtr(); }
return fsMsgPtr(new fsMsgSendMarkerNames() );
}; break;
case fsMsg::MSG_IN_SEND_BLENDSHAPE_NAMES: {
if (super_block.size > 0) { LOG_RELEASE_ERROR("Expected Size to be 0, not %d", super_block.size); m_valid = false; return fsMsgPtr(); }
return fsMsgPtr(new fsMsgSendBlendshapeNames() );
}; break;
case fsMsg::MSG_IN_SEND_RIG: {
if (super_block.size > 0) { LOG_RELEASE_ERROR("Expected Size to be 0, not %d", super_block.size); m_valid = false; return fsMsgPtr(); }
return fsMsgPtr(new fsMsgSendRig() );
}; break;
case fsMsg::MSG_IN_HEADPOSE_RELATIVE: {
if (super_block.size > 0) { LOG_RELEASE_ERROR("Expected Size to be 0, not %d", super_block.size); m_valid = false; return fsMsgPtr(); }
return fsMsgPtr(new fsMsgHeadPoseRelative() );
}; break;
case fsMsg::MSG_IN_HEADPOSE_ABSOLUTE: {
if (super_block.size > 0) { LOG_RELEASE_ERROR("Expected Size to be 0, not %d", super_block.size); m_valid = false; return fsMsgPtr(); }
return fsMsgPtr(new fsMsgHeadPoseAbsolute() );
}; break;
case fsMsg::MSG_OUT_MARKER_NAMES: {
std::tr1::shared_ptr< fsMsgMarkerNames > msg(new fsMsgMarkerNames());
if( !decodeMarkerNames(*msg, m_buffer, m_start )) { LOG_RELEASE_ERROR("Could not decode marker names"); m_valid = false; return fsMsgPtr(); }
uint64_t actual_size = m_start-super_block_data_start;
if( actual_size != super_block.size ) { LOG_RELEASE_ERROR("Block was promised to be of size %d, not %d", super_block.size, actual_size); m_valid = false; return fsMsgPtr(); }
return msg;
}; break;
case fsMsg::MSG_OUT_BLENDSHAPE_NAMES: {
std::tr1::shared_ptr< fsMsgBlendshapeNames > msg(new fsMsgBlendshapeNames() );
if( !decodeBlendshapeNames(*msg, m_buffer, m_start) ) { LOG_RELEASE_ERROR("Could not decode blendshape names"); m_valid = false; return fsMsgPtr(); }
uint64_t actual_size = m_start-super_block_data_start;
if( actual_size != super_block.size ) { LOG_RELEASE_ERROR("Block was promised to be of size %d, not %d", super_block.size, actual_size); m_valid = false; return fsMsgPtr(); }
return msg;
}; break;
case fsMsg::MSG_OUT_TRACKING_STATE: {
BlockHeader sub_block;
uint16_t num_blocks = 0;
if( !read_pod(num_blocks, m_buffer, m_start) ) { LOG_RELEASE_ERROR("Could not read num_blocks"); m_valid = false; return fsMsgPtr(); }
std::tr1::shared_ptr<fsMsgTrackingState> msg = std::tr1::shared_ptr<fsMsgTrackingState>(new fsMsgTrackingState());
for(int i = 0; i < num_blocks; i++) {
if( !headerAvailable(sub_block, m_buffer, m_start, m_end) ) { LOG_RELEASE_ERROR("could not read sub-header %d", i); m_valid = false; return fsMsgPtr(); }
if( !blockAvailable( m_buffer, m_start, m_end) ) { LOG_RELEASE_ERROR("could not read sub-block %d", i); m_valid = false; return fsMsgPtr(); }
skipHeader(m_start);
long sub_block_data_start = m_start;
bool success = true;
switch(sub_block.id) {
case BLOCKID_INFO: success &= decodeInfo( msg->tracking_data(), m_buffer, m_start); break;
case BLOCKID_POSE: success &= decodePose( msg->tracking_data(), m_buffer, m_start); break;
case BLOCKID_BLENDSHAPES: success &= decodeBlendshapes(msg->tracking_data(), m_buffer, m_start); break;
case BLOCKID_EYES: success &= decodeEyeGaze( msg->tracking_data(), m_buffer, m_start); break;
case BLOCKID_MARKERS: success &= decodeMarkers( msg->tracking_data(), m_buffer, m_start); break;
default:
LOG_RELEASE_ERROR("Unexpected subblock id %d", sub_block.id);
m_valid = false; return msg;
break;
}
if(!success) {
LOG_RELEASE_ERROR("Could not decode subblock with id %d", sub_block.id);
m_valid = false; return fsMsgPtr();
}
uint64_t actual_size = m_start-sub_block_data_start;
if( actual_size != sub_block.size ) {
LOG_RELEASE_ERROR("Unexpected number of bytes consumed %d instead of %d for subblock %d id:%d", actual_size, sub_block.size, i, sub_block.id);
m_valid = false; return fsMsgPtr();
}
}
uint64_t actual_size = m_start-super_block_data_start;
if( actual_size != super_block.size ) {
LOG_RELEASE_ERROR("Unexpected number of bytes consumed %d instead of %d", actual_size, super_block.size);
m_valid = false; return fsMsgPtr();
}
return msg;
}; break;
case fsMsg::MSG_OUT_RIG: {
std::tr1::shared_ptr< fsMsgRig > msg(new fsMsgRig() );
if( !decodeRig(*msg, m_buffer, m_start) ) { LOG_RELEASE_ERROR("Could not decode rig"); m_valid = false; return fsMsgPtr(); }
if( m_start-super_block_data_start != super_block.size ) { LOG_RELEASE_ERROR("Could not decode rig unexpected size"); m_valid = false; return fsMsgPtr(); }
return msg;
}; break;
default: {
LOG_RELEASE_ERROR("Unexpected superblock id %d", super_block.id);
m_valid = false; return fsMsgPtr();
}; break;
}
return fsMsgPtr();
}
static void encodeInfo(std::string &buffer, const fsTrackingData & _trackingData) {
BlockHeader header(BLOCKID_INFO, sizeof(double) + 1);
write_pod(buffer, header);
write_pod(buffer, _trackingData.m_timestamp);
unsigned char tracking_successfull = _trackingData.m_trackingSuccessful;
write_pod( buffer, tracking_successfull );
}
static void encodePose(std::string &buffer, const fsTrackingData & _trackingData) {
BlockHeader header(BLOCKID_POSE, sizeof(float)*7);
write_pod(buffer, header);
write_pod(buffer, _trackingData.m_headRotation.x);
write_pod(buffer, _trackingData.m_headRotation.y);
write_pod(buffer, _trackingData.m_headRotation.z);
write_pod(buffer, _trackingData.m_headRotation.w);
write_pod(buffer, _trackingData.m_headTranslation.x);
write_pod(buffer, _trackingData.m_headTranslation.y);
write_pod(buffer, _trackingData.m_headTranslation.z);
}
static void encodeBlendshapes(std::string &buffer, const fsTrackingData & _trackingData) {
uint32_t num_parameters = _trackingData.m_coeffs.size();
BlockHeader header(BLOCKID_BLENDSHAPES, sizeof(uint32_t) + sizeof(float)*num_parameters);
write_pod(buffer, header);
write_pod(buffer, num_parameters);
for(uint32_t i = 0; i < num_parameters; i++)
write_pod(buffer, _trackingData.m_coeffs[i]);
}
static void encodeEyeGaze(std::string &buffer, const fsTrackingData & _trackingData) {
BlockHeader header(BLOCKID_EYES, sizeof(float)*4);
write_pod(buffer, header);
write_pod(buffer, _trackingData.m_eyeGazeLeftPitch );
write_pod(buffer, _trackingData.m_eyeGazeLeftYaw );
write_pod(buffer, _trackingData.m_eyeGazeRightPitch);
write_pod(buffer, _trackingData.m_eyeGazeRightYaw );
}
static void encodeMarkers(std::string &buffer, const fsTrackingData & _trackingData) {
uint16_t numMarkers = _trackingData.m_markers.size();
BlockHeader header(BLOCKID_MARKERS, sizeof(uint16_t) + sizeof(float)*3*numMarkers);
write_pod(buffer, header);
write_pod(buffer, numMarkers);
for(int i = 0; i < numMarkers; i++) {
write_pod(buffer, _trackingData.m_markers[i].x);
write_pod(buffer, _trackingData.m_markers[i].y);
write_pod(buffer, _trackingData.m_markers[i].z);
}
}
// Inbound
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgTrackingState &msg) {
encode_message(msg_out, msg.tracking_data());
}
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgStartCapturing &msg) {
BlockHeader header(msg.id());
write_pod(msg_out, header);
}
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgStopCapturing &msg) {
BlockHeader header(msg.id());
write_pod(msg_out, header);
}
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgCalibrateNeutral &msg) {
BlockHeader header(msg.id());
write_pod(msg_out, header);
}
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgSendMarkerNames &msg) {
BlockHeader header(msg.id());
write_pod(msg_out, header);
}
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgSendBlendshapeNames &msg) {
BlockHeader header(msg.id());
write_pod(msg_out, header);
}
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgSendRig &msg) {
BlockHeader header(msg.id());
write_pod(msg_out, header);
}
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgHeadPoseRelative &msg) {
BlockHeader header(msg.id());
write_pod(msg_out, header);
}
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgHeadPoseAbsolute &msg) {
BlockHeader header(msg.id());
write_pod(msg_out, header);
}
// Outbound
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgSignal &msg) {
BlockHeader header(msg.id());
write_pod(msg_out, header);
}
void fsBinaryStream::encode_message(std::string &msg_out, const fsTrackingData &tracking_data) {
Size start = msg_out.size();
BlockHeader header(fsMsg::MSG_OUT_TRACKING_STATE);
write_pod(msg_out, header);
uint16_t N_blocks = 5;
write_pod(msg_out, N_blocks);
encodeInfo( msg_out, tracking_data);
encodePose( msg_out, tracking_data);
encodeBlendshapes(msg_out, tracking_data);
encodeEyeGaze( msg_out, tracking_data);
encodeMarkers( msg_out, tracking_data);
update_msg_size(msg_out, start);
}
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgMarkerNames &msg) {
Size start = msg_out.size();
BlockHeader header(msg.id());
write_pod(msg_out, header);
write_small_vector(msg_out,msg.marker_names());
update_msg_size(msg_out, start);
}
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgBlendshapeNames &msg) {
Size start = msg_out.size();
BlockHeader header(msg.id());
write_pod(msg_out, header);
write_small_vector(msg_out,msg.blendshape_names());
update_msg_size(msg_out, start);
}
void fsBinaryStream::encode_message(std::string &msg_out, const fsMsgRig &msg) {
Size start = msg_out.size();
BlockHeader header(msg.id());
write_pod(msg_out, header);
write_vector(msg_out, msg.mesh().m_quads); // write quads
write_vector(msg_out, msg.mesh().m_tris);// write triangles
write_vector(msg_out, msg.mesh().m_vertex_data.m_vertices);// write neutral vertices
write_small_vector(msg_out, msg.blendshape_names());// write names
write_pod(msg_out,uint16_t(msg.blendshapes().size()));
for(uint16_t i = 0;i < uint16_t(msg.blendshapes().size()); i++)
write_vector(msg_out, msg.blendshapes()[i].m_vertices); // write blendshapes
update_msg_size(msg_out, start);
}
}

View file

@ -58,13 +58,13 @@
#include "Application.h"
#include "LogDisplay.h"
#include "LeapManager.h"
#include "Menu.h"
#include "OculusManager.h"
#include "Swatch.h"
#include "Util.h"
#include "devices/LeapManager.h"
#include "devices/OculusManager.h"
#include "renderer/ProgramObject.h"
#include "ui/TextRenderer.h"
#include "Swatch.h"
using namespace std;
@ -237,6 +237,16 @@ Application::~Application() {
NodeList::getInstance()->removeHook(this);
_sharedVoxelSystem.changeTree(new VoxelTree);
_audio.shutdown();
delete Menu::getInstance();
delete _oculusProgram;
delete _settings;
delete _networkAccessManager;
delete _followMode;
delete _glWidget;
}
void Application::initializeGL() {
@ -418,10 +428,14 @@ void Application::resizeGL(int width, int height) {
glViewport(0, 0, width, height); // shouldn't this account for the menu???
updateProjectionMatrix();
glLoadIdentity();
}
void Application::updateProjectionMatrix() {
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
// On window reshape, we need to tell OpenGL about our new setting
float left, right, bottom, top, nearVal, farVal;
glm::vec4 nearClipPlane, farClipPlane;
_viewFrustum.computeOffAxisFrustum(left, right, bottom, top, nearVal, farVal, nearClipPlane, farClipPlane);
@ -435,7 +449,6 @@ void Application::resizeGL(int width, int height) {
glFrustum(left, right, bottom, top, nearVal, farVal);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
void Application::controlledBroadcastToNodes(unsigned char* broadcastData, size_t dataBytes,
@ -603,7 +616,7 @@ void Application::keyPressEvent(QKeyEvent* event) {
} else {
_myCamera.setEyeOffsetPosition(_myCamera.getEyeOffsetPosition() + glm::vec3(0, 0.001, 0));
}
resizeGL(_glWidget->width(), _glWidget->height());
updateProjectionMatrix();
break;
case Qt::Key_K:
@ -613,7 +626,7 @@ void Application::keyPressEvent(QKeyEvent* event) {
} else {
_myCamera.setEyeOffsetPosition(_myCamera.getEyeOffsetPosition() + glm::vec3(0, -0.001, 0));
}
resizeGL(_glWidget->width(), _glWidget->height());
updateProjectionMatrix();
break;
case Qt::Key_J:
@ -623,7 +636,7 @@ void Application::keyPressEvent(QKeyEvent* event) {
} else {
_myCamera.setEyeOffsetPosition(_myCamera.getEyeOffsetPosition() + glm::vec3(-0.001, 0, 0));
}
resizeGL(_glWidget->width(), _glWidget->height());
updateProjectionMatrix();
break;
case Qt::Key_M:
@ -633,7 +646,7 @@ void Application::keyPressEvent(QKeyEvent* event) {
} else {
_myCamera.setEyeOffsetPosition(_myCamera.getEyeOffsetPosition() + glm::vec3(0.001, 0, 0));
}
resizeGL(_glWidget->width(), _glWidget->height());
updateProjectionMatrix();
break;
case Qt::Key_U:
@ -643,7 +656,7 @@ void Application::keyPressEvent(QKeyEvent* event) {
} else {
_myCamera.setEyeOffsetPosition(_myCamera.getEyeOffsetPosition() + glm::vec3(0, 0, -0.001));
}
resizeGL(_glWidget->width(), _glWidget->height());
updateProjectionMatrix();
break;
case Qt::Key_Y:
@ -653,7 +666,7 @@ void Application::keyPressEvent(QKeyEvent* event) {
} else {
_myCamera.setEyeOffsetPosition(_myCamera.getEyeOffsetPosition() + glm::vec3(0, 0, 0.001));
}
resizeGL(_glWidget->width(), _glWidget->height());
updateProjectionMatrix();
break;
case Qt::Key_H:
Menu::getInstance()->triggerOption(MenuOption::Mirror);
@ -1518,18 +1531,28 @@ void Application::update(float deltaTime) {
// Set where I am looking based on my mouse ray (so that other people can see)
glm::vec3 lookAtSpot;
_isLookingAtOtherAvatar = isLookingAtOtherAvatar(mouseRayOrigin, mouseRayDirection, lookAtSpot);
// if we have faceshift, use that to compute the lookat direction
glm::vec3 lookAtRayOrigin = mouseRayOrigin, lookAtRayDirection = mouseRayDirection;
if (_faceshift.isActive()) {
lookAtRayOrigin = _myAvatar.getHead().calculateAverageEyePosition();
float averagePitch = (_faceshift.getEyeGazeLeftPitch() + _faceshift.getEyeGazeRightPitch()) / 2.0f;
float averageYaw = (_faceshift.getEyeGazeLeftYaw() + _faceshift.getEyeGazeRightYaw()) / 2.0f;
lookAtRayDirection = _myAvatar.getHead().getOrientation() *
glm::quat(glm::radians(glm::vec3(averagePitch, averageYaw, 0.0f))) * glm::vec3(0.0f, 0.0f, -1.0f);
}
_isLookingAtOtherAvatar = isLookingAtOtherAvatar(lookAtRayOrigin, lookAtRayDirection, lookAtSpot);
if (_isLookingAtOtherAvatar) {
// If the mouse is over another avatar's head...
_myAvatar.getHead().setLookAtPosition(lookAtSpot);
} else if (_isHoverVoxel) {
} else if (_isHoverVoxel && !_faceshift.isActive()) {
// Look at the hovered voxel
lookAtSpot = getMouseVoxelWorldCoordinates(_hoverVoxel);
_myAvatar.getHead().setLookAtPosition(lookAtSpot);
} else {
// Just look in direction of the mouse ray
const float FAR_AWAY_STARE = TREE_SCALE;
lookAtSpot = mouseRayOrigin + mouseRayDirection * FAR_AWAY_STARE;
lookAtSpot = lookAtRayOrigin + lookAtRayDirection * FAR_AWAY_STARE;
_myAvatar.getHead().setLookAtPosition(lookAtSpot);
}
@ -1780,6 +1803,20 @@ void Application::update(float deltaTime) {
_myCamera.setModeShiftRate(1.0f);
}
}
if (Menu::getInstance()->isOptionChecked(MenuOption::OffAxisProjection)) {
if (_faceshift.isActive()) {
const float EYE_OFFSET_SCALE = 0.005f;
glm::vec3 position = _faceshift.getHeadTranslation() * EYE_OFFSET_SCALE;
_myCamera.setEyeOffsetPosition(glm::vec3(-position.x, position.y, position.z));
updateProjectionMatrix();
} else if (_webcam.isActive()) {
const float EYE_OFFSET_SCALE = 5.0f;
_myCamera.setEyeOffsetPosition(_webcam.getEstimatedPosition() * EYE_OFFSET_SCALE);
updateProjectionMatrix();
}
}
}
// Update bandwidth dialog, if any
@ -1878,6 +1915,7 @@ void Application::updateAvatar(float deltaTime) {
_myAvatar.setCameraAspectRatio(_viewFrustum.getAspectRatio());
_myAvatar.setCameraNearClip(_viewFrustum.getNearClip());
_myAvatar.setCameraFarClip(_viewFrustum.getFarClip());
_myAvatar.setCameraEyeOffsetPosition(_viewFrustum.getEyeOffsetPosition());
NodeList* nodeList = NodeList::getInstance();
if (nodeList->getOwnerID() != UNKNOWN_NODE_ID) {
@ -2266,7 +2304,7 @@ void Application::displaySide(Camera& whichCamera) {
}
// Render my own Avatar
if (_myCamera.getMode() == CAMERA_MODE_MIRROR) {
if (_myCamera.getMode() == CAMERA_MODE_MIRROR && !_faceshift.isActive()) {
_myAvatar.getHead().setLookAtPosition(_myCamera.getPosition());
}
_myAvatar.render(Menu::getInstance()->isOptionChecked(MenuOption::Mirror),
@ -3067,6 +3105,7 @@ void Application::resetSensors() {
_serialHeadSensor.resetAverages();
}
_webcam.reset();
_faceshift.reset();
QCursor::setPos(_headMouseX, _headMouseY);
_myAvatar.reset();
_myTransmitter.resetLevels();

View file

@ -33,7 +33,6 @@
#include "GLCanvas.h"
#include "PacketHeaders.h"
#include "PieMenu.h"
#include "SerialInterface.h"
#include "Stars.h"
#include "Swatch.h"
#include "ToolsPalette.h"
@ -43,10 +42,12 @@
#include "VoxelPacketProcessor.h"
#include "VoxelSystem.h"
#include "VoxelImporter.h"
#include "Webcam.h"
#include "avatar/Avatar.h"
#include "avatar/MyAvatar.h"
#include "avatar/HandControl.h"
#include "devices/Faceshift.h"
#include "devices/SerialInterface.h"
#include "devices/Webcam.h"
#include "renderer/AmbientOcclusionEffect.h"
#include "renderer/GeometryCache.h"
#include "renderer/GlowEffect.h"
@ -117,6 +118,7 @@ public:
Environment* getEnvironment() { return &_environment; }
SerialInterface* getSerialHeadSensor() { return &_serialHeadSensor; }
Webcam* getWebcam() { return &_webcam; }
Faceshift* getFaceshift() { return &_faceshift; }
BandwidthMeter* getBandwidthMeter() { return &_bandwidthMeter; }
QSettings* getSettings() { return _settings; }
Swatch* getSwatch() { return &_swatch; }
@ -176,6 +178,7 @@ private slots:
private:
void resetCamerasOnResizeGL(Camera& camera, int width, int height);
void updateProjectionMatrix();
static bool sendVoxelsOperation(VoxelNode* node, void* extraData);
static void processAvatarVoxelURLMessage(unsigned char* packetData, size_t dataBytes);
@ -259,6 +262,8 @@ private:
Webcam _webcam; // The webcam interface
Faceshift _faceshift;
Camera _myCamera; // My view onto the world
Camera _viewFrustumOffsetCamera; // The camera we use to sometimes show the view frustum from an offset mode

View file

@ -448,7 +448,7 @@ Audio::Audio(Oscilloscope* scope, int16_t initialJitterBufferSamples) :
gettimeofday(&_lastReceiveTime, NULL);
}
Audio::~Audio() {
void Audio::shutdown() {
if (_stream) {
outputPortAudioError(Pa_CloseStream(_stream));
outputPortAudioError(Pa_Terminate());

View file

@ -33,26 +33,27 @@ class Audio : public QObject {
public:
// initializes audio I/O
Audio(Oscilloscope* scope, int16_t initialJitterBufferSamples);
~Audio();
void shutdown();
void reset();
void render(int screenWidth, int screenHeight);
void addReceivedAudioToBuffer(unsigned char* receivedData, int receivedBytes);
float getLastInputLoudness() const { return _lastInputLoudness; };
float getLastInputLoudness() const { return _lastInputLoudness; }
void setLastAcceleration(glm::vec3 lastAcceleration) { _lastAcceleration = lastAcceleration; };
void setLastVelocity(glm::vec3 lastVelocity) { _lastVelocity = lastVelocity; };
void setLastAcceleration(const glm::vec3 lastAcceleration) { _lastAcceleration = lastAcceleration; }
void setLastVelocity(const glm::vec3 lastVelocity) { _lastVelocity = lastVelocity; }
void setJitterBufferSamples(int samples) { _jitterBufferSamples = samples; };
int getJitterBufferSamples() { return _jitterBufferSamples; };
void setJitterBufferSamples(int samples) { _jitterBufferSamples = samples; }
int getJitterBufferSamples() { return _jitterBufferSamples; }
void lowPassFilter(int16_t* inputBuffer);
void startCollisionSound(float magnitude, float frequency, float noise, float duration);
float getCollisionSoundMagnitude() { return _collisionSoundMagnitude; };
float getCollisionSoundMagnitude() { return _collisionSoundMagnitude; }
void ping();
@ -61,8 +62,8 @@ public:
// The results of the analysis are written to the log.
bool eventuallyAnalyzePing();
void setListenMode(AudioRingBuffer::ListenMode mode) { _listenMode = mode; };
void setListenRadius(float radius) { _listenRadius = radius; };
void setListenMode(AudioRingBuffer::ListenMode mode) { _listenMode = mode; }
void setListenRadius(float radius) { _listenRadius = radius; }
void addListenSource(int sourceID);
void removeListenSource(int sourceID);
void clearListenSources();

View file

@ -38,6 +38,10 @@ Balls::Balls(int numberOfBalls) {
_origin = glm::vec3(0, 0, 0);
}
Balls::~Balls() {
delete[] _balls;
}
void Balls::moveOrigin(const glm::vec3& newOrigin) {
glm::vec3 delta = newOrigin - _origin;
if (glm::length(delta) > EPSILON) {

View file

@ -14,6 +14,7 @@ const int NUMBER_SPRINGS = 4;
class Balls {
public:
Balls(int numberOfBalls);
~Balls();
void simulate(float deltaTime);
void render();

View file

@ -38,13 +38,31 @@ static sockaddr getZeroAddress() {
return addr;
}
Environment::Environment()
: _initialized(false) {
}
Environment::~Environment() {
if (_initialized) {
delete _skyFromAtmosphereProgram;
delete _skyFromSpaceProgram;
}
}
void Environment::init() {
if (_initialized) {
qDebug("[ERROR] Environment is already initialized.\n");
return;
}
switchToResourcesParentIfRequired();
_skyFromAtmosphereProgram = createSkyProgram("Atmosphere", _skyFromAtmosphereUniformLocations);
_skyFromSpaceProgram = createSkyProgram("Space", _skyFromSpaceUniformLocations);
// start off with a default-constructed environment data
_data[getZeroAddress()][0];
_initialized = true;
}
void Environment::resetToDefault() {

View file

@ -22,6 +22,8 @@ class ProgramObject;
class Environment {
public:
Environment();
~Environment();
void init();
void resetToDefault();
@ -40,6 +42,7 @@ private:
void renderAtmosphere(Camera& camera, const EnvironmentData& data);
bool _initialized;
ProgramObject* _skyFromAtmosphereProgram;
ProgramObject* _skyFromSpaceProgram;

View file

@ -204,6 +204,18 @@ Menu::Menu() :
appInstance->getWebcam(),
SLOT(setSkeletonTrackingOn(bool)));
addCheckableActionToQMenuAndActionHash(viewMenu,
MenuOption::LEDTracking,
0,
false,
appInstance->getWebcam()->getGrabber(),
SLOT(setLEDTrackingOn(bool)));
addCheckableActionToQMenuAndActionHash(viewMenu,
MenuOption::OffAxisProjection,
0,
false);
addDisabledActionAndSeparator(viewMenu, "Stats");
addCheckableActionToQMenuAndActionHash(viewMenu, MenuOption::Stats, Qt::Key_Slash);
addCheckableActionToQMenuAndActionHash(viewMenu, MenuOption::Log, Qt::CTRL | Qt::Key_L);
@ -353,6 +365,13 @@ Menu::Menu() :
appInstance->getWebcam()->getGrabber(),
SLOT(setDepthOnly(bool)));
addCheckableActionToQMenuAndActionHash(developerMenu,
MenuOption::Faceshift,
0,
false,
appInstance->getFaceshift(),
SLOT(setEnabled(bool)));
QMenu* audioDebugMenu = developerMenu->addMenu("Audio Debugging Tools");
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::EchoAudio);
addActionToQMenuAndActionHash(audioDebugMenu,
@ -413,6 +432,11 @@ Menu::Menu() :
addCheckableActionToQMenuAndActionHash(developerMenu, MenuOption::DestructiveAddVoxel);
}
Menu::~Menu() {
bandwidthDetailsClosed();
voxelStatsDetailsClosed();
}
void Menu::loadSettings(QSettings* settings) {
if (!settings) {
settings = Application::getInstance()->getSettings();
@ -855,8 +879,10 @@ void Menu::bandwidthDetails() {
}
void Menu::bandwidthDetailsClosed() {
delete _bandwidthDialog;
_bandwidthDialog = NULL;
if (_bandwidthDialog) {
delete _bandwidthDialog;
_bandwidthDialog = NULL;
}
}
void Menu::voxelStatsDetails() {
@ -870,8 +896,10 @@ void Menu::voxelStatsDetails() {
}
void Menu::voxelStatsDetailsClosed() {
delete _voxelStatsDialog;
_voxelStatsDialog = NULL;
if (_voxelStatsDialog) {
delete _voxelStatsDialog;
_voxelStatsDialog = NULL;
}
}
void Menu::cycleFrustumRenderMode() {

View file

@ -31,10 +31,14 @@ struct ViewFrustumOffset {
float up;
};
class BandwidthDialog;
class VoxelStatsDialog;
class Menu : public QMenuBar {
Q_OBJECT
public:
static Menu* getInstance();
~Menu();
bool isOptionChecked(const QString& menuOption);
void triggerOption(const QString& menuOption);
@ -133,6 +137,7 @@ namespace MenuOption {
const QString ExportVoxels = "Export Voxels";
const QString HeadMouse = "Head Mouse";
const QString FaceMode = "Cycle Face Mode";
const QString Faceshift = "Faceshift";
const QString FalseColorByDistance = "FALSE Color By Distance";
const QString FalseColorBySource = "FALSE Color By Source";
const QString FalseColorEveryOtherVoxel = "FALSE Color Every Other Randomly";
@ -165,6 +170,7 @@ namespace MenuOption {
const QString LowRes = "Lower Resolution While Moving";
const QString Mirror = "Mirror";
const QString OcclusionCulling = "Occlusion Culling";
const QString OffAxisProjection = "Off-Axis Projection";
const QString Oscilloscope = "Audio Oscilloscope";
const QString Pair = "Pair";
const QString PasteVoxels = "Paste";
@ -180,6 +186,7 @@ namespace MenuOption {
const QString ShowTrueColors = "Show TRUE Colors";
const QString SimulateLeapHand = "Simulate Leap Hand";
const QString SkeletonTracking = "Skeleton Tracking";
const QString LEDTracking = "LED Tracking";
const QString Stars = "Stars";
const QString Stats = "Stats";
const QString TestPing = "Test Ping";

View file

@ -51,8 +51,12 @@ GLubyte identityIndices[] = { 0,2,1, 0,3,2, // Z-
10,11,15, 10,15,14, // Y+
4,5,6, 4,6,7 }; // Z+
VoxelSystem::VoxelSystem(float treeScale, int maxVoxels) :
NodeData(NULL), _treeScale(treeScale), _maxVoxels(maxVoxels) {
VoxelSystem::VoxelSystem(float treeScale, int maxVoxels)
: NodeData(NULL),
_treeScale(treeScale),
_maxVoxels(maxVoxels),
_initialized(false) {
_voxelsInReadArrays = _voxelsInWriteArrays = _voxelsUpdated = 0;
_writeRenderFullVBO = true;
_readRenderFullVBO = true;
@ -115,16 +119,21 @@ void VoxelSystem::clearFreeBufferIndexes() {
}
VoxelSystem::~VoxelSystem() {
glDeleteBuffers(1, &_vboVerticesID);
glDeleteBuffers(1, &_vboNormalsID);
glDeleteBuffers(1, &_vboColorsID);
glDeleteBuffers(1, &_vboIndicesID);
delete[] _readVerticesArray;
delete[] _writeVerticesArray;
delete[] _readColorsArray;
delete[] _writeColorsArray;
delete[] _writeVoxelDirtyArray;
delete[] _readVoxelDirtyArray;
if (_initialized) {
// Destroy glBuffers
glDeleteBuffers(1, &_vboVerticesID);
glDeleteBuffers(1, &_vboNormalsID);
glDeleteBuffers(1, &_vboColorsID);
glDeleteBuffers(1, &_vboIndicesID);
delete[] _readVerticesArray;
delete[] _writeVerticesArray;
delete[] _readColorsArray;
delete[] _writeColorsArray;
delete[] _writeVoxelDirtyArray;
delete[] _readVoxelDirtyArray;
}
delete _tree;
pthread_mutex_destroy(&_bufferWriteLock);
pthread_mutex_destroy(&_treeLock);
@ -536,9 +545,14 @@ glm::vec3 VoxelSystem::computeVoxelVertex(const glm::vec3& startVertex, float vo
return startVertex + glm::vec3(identityVertex[0], identityVertex[1], identityVertex[2]) * voxelScale;
}
ProgramObject* VoxelSystem::_perlinModulateProgram = 0;
ProgramObject VoxelSystem::_perlinModulateProgram;
void VoxelSystem::init() {
if (_initialized) {
qDebug("[ERROR] VoxelSystem is already initialized.\n");
return;
}
_callsToTreesToArrays = 0;
_setupNewVoxelsForDrawingLastFinished = 0;
_setupNewVoxelsForDrawingLastElapsed = 0;
@ -549,19 +563,6 @@ void VoxelSystem::init() {
_voxelsInWriteArrays = 0;
_voxelsInReadArrays = 0;
// we will track individual dirty sections with these arrays of bools
_writeVoxelDirtyArray = new bool[_maxVoxels];
memset(_writeVoxelDirtyArray, false, _maxVoxels * sizeof(bool));
_readVoxelDirtyArray = new bool[_maxVoxels];
memset(_readVoxelDirtyArray, false, _maxVoxels * sizeof(bool));
// prep the data structures for incoming voxel data
_writeVerticesArray = new GLfloat[VERTEX_POINTS_PER_VOXEL * _maxVoxels];
_readVerticesArray = new GLfloat[VERTEX_POINTS_PER_VOXEL * _maxVoxels];
_writeColorsArray = new GLubyte[VERTEX_POINTS_PER_VOXEL * _maxVoxels];
_readColorsArray = new GLubyte[VERTEX_POINTS_PER_VOXEL * _maxVoxels];
GLuint* indicesArray = new GLuint[INDICES_PER_VOXEL * _maxVoxels];
// populate the indicesArray
@ -615,20 +616,34 @@ void VoxelSystem::init() {
// delete the indices and normals arrays that are no longer needed
delete[] indicesArray;
delete[] normalsArray;
// we will track individual dirty sections with these arrays of bools
_writeVoxelDirtyArray = new bool[_maxVoxels];
memset(_writeVoxelDirtyArray, false, _maxVoxels * sizeof(bool));
_readVoxelDirtyArray = new bool[_maxVoxels];
memset(_readVoxelDirtyArray, false, _maxVoxels * sizeof(bool));
// prep the data structures for incoming voxel data
_writeVerticesArray = new GLfloat[VERTEX_POINTS_PER_VOXEL * _maxVoxels];
_readVerticesArray = new GLfloat[VERTEX_POINTS_PER_VOXEL * _maxVoxels];
_writeColorsArray = new GLubyte[VERTEX_POINTS_PER_VOXEL * _maxVoxels];
_readColorsArray = new GLubyte[VERTEX_POINTS_PER_VOXEL * _maxVoxels];
// create our simple fragment shader if we're the first system to init
if (_perlinModulateProgram != 0) {
return;
if (!_perlinModulateProgram.isLinked()) {
switchToResourcesParentIfRequired();
_perlinModulateProgram.addShaderFromSourceFile(QGLShader::Vertex, "resources/shaders/perlin_modulate.vert");
_perlinModulateProgram.addShaderFromSourceFile(QGLShader::Fragment, "resources/shaders/perlin_modulate.frag");
_perlinModulateProgram.link();
_perlinModulateProgram.bind();
_perlinModulateProgram.setUniformValue("permutationNormalTexture", 0);
_perlinModulateProgram.release();
}
switchToResourcesParentIfRequired();
_perlinModulateProgram = new ProgramObject();
_perlinModulateProgram->addShaderFromSourceFile(QGLShader::Vertex, "resources/shaders/perlin_modulate.vert");
_perlinModulateProgram->addShaderFromSourceFile(QGLShader::Fragment, "resources/shaders/perlin_modulate.frag");
_perlinModulateProgram->link();
_perlinModulateProgram->bind();
_perlinModulateProgram->setUniformValue("permutationNormalTexture", 0);
_perlinModulateProgram->release();
_initialized = true;
}
void VoxelSystem::changeTree(VoxelTree* newTree) {
@ -765,7 +780,7 @@ void VoxelSystem::applyScaleAndBindProgram(bool texture) {
glScalef(_treeScale, _treeScale, _treeScale);
if (texture) {
_perlinModulateProgram->bind();
_perlinModulateProgram.bind();
glBindTexture(GL_TEXTURE_2D, Application::getInstance()->getTextureCache()->getPermutationNormalTextureID());
}
}
@ -775,7 +790,7 @@ void VoxelSystem::removeScaleAndReleaseProgram(bool texture) {
glPopMatrix();
if (texture) {
_perlinModulateProgram->release();
_perlinModulateProgram.release();
glBindTexture(GL_TEXTURE_2D, 0);
}
}

View file

@ -138,6 +138,7 @@ private:
VoxelSystem(const VoxelSystem&);
VoxelSystem& operator= (const VoxelSystem&);
bool _initialized;
int _callsToTreesToArrays;
VoxelNodeBag _removedVoxels;
@ -209,8 +210,8 @@ private:
void updatePartialVBOs(); // multiple segments, only dirty voxels
bool _voxelsDirty;
static ProgramObject* _perlinModulateProgram;
static ProgramObject _perlinModulateProgram;
int _hookID;
std::vector<glBufferIndex> _freeIndexes;

View file

@ -13,7 +13,6 @@
#include <NodeList.h>
#include <NodeTypes.h>
#include <OculusManager.h>
#include <PacketHeaders.h>
#include <SharedUtil.h>
@ -23,6 +22,7 @@
#include "Head.h"
#include "Physics.h"
#include "world.h"
#include "devices/OculusManager.h"
#include "ui/TextRenderer.h"
using namespace std;
@ -102,11 +102,11 @@ Avatar::Avatar(Node* owningNode) :
_isCollisionsOn(true),
_leadingAvatar(NULL),
_voxels(this),
_moving(false),
_initialized(false),
_handHoldingPosition(0.0f, 0.0f, 0.0f),
_maxArmLength(0.0f),
_pelvisStandingHeight(0.0f),
_moving(false)
_pelvisStandingHeight(0.0f)
{
// give the pointer to our head to inherited _headData variable from AvatarData
_headData = &_head;

View file

@ -22,10 +22,9 @@
#include "Head.h"
#include "InterfaceConfig.h"
#include "Skeleton.h"
#include "SerialInterface.h"
#include "Transmitter.h"
#include "world.h"
#include "devices/SerialInterface.h"
#include "devices/Transmitter.h"
static const float MAX_SCALE = 1000.f;
static const float MIN_SCALE = .005f;

View file

@ -22,25 +22,38 @@ const int BONE_ELEMENTS_PER_VOXEL = BONE_ELEMENTS_PER_VERTEX * VERTICES_PER_VOXE
AvatarVoxelSystem::AvatarVoxelSystem(Avatar* avatar) :
VoxelSystem(AVATAR_TREE_SCALE, MAX_VOXELS_PER_AVATAR),
_mode(0), _avatar(avatar), _voxelReply(0) {
_initialized(false),
_mode(0),
_avatar(avatar),
_voxelReply(0) {
// we may have been created in the network thread, but we live in the main thread
moveToThread(Application::getInstance()->thread());
}
AvatarVoxelSystem::~AvatarVoxelSystem() {
delete[] _readBoneIndicesArray;
delete[] _readBoneWeightsArray;
delete[] _writeBoneIndicesArray;
delete[] _writeBoneWeightsArray;
AvatarVoxelSystem::~AvatarVoxelSystem() {
if (_initialized) {
delete[] _readBoneIndicesArray;
delete[] _readBoneWeightsArray;
delete[] _writeBoneIndicesArray;
delete[] _writeBoneWeightsArray;
glDeleteBuffers(1, &_vboBoneIndicesID);
glDeleteBuffers(1, &_vboBoneWeightsID);
}
}
ProgramObject* AvatarVoxelSystem::_skinProgram = 0;
ProgramObject AvatarVoxelSystem::_skinProgram;
int AvatarVoxelSystem::_boneMatricesLocation;
int AvatarVoxelSystem::_boneIndicesLocation;
int AvatarVoxelSystem::_boneWeightsLocation;
void AvatarVoxelSystem::init() {
if (_initialized) {
qDebug("[ERROR] AvatarVoxelSystem is already initialized.\n");
return;
}
VoxelSystem::init();
// prep the data structures for incoming voxel data
@ -61,16 +74,16 @@ void AvatarVoxelSystem::init() {
glBufferData(GL_ARRAY_BUFFER, BONE_ELEMENTS_PER_VOXEL * sizeof(GLfloat) * _maxVoxels, NULL, GL_DYNAMIC_DRAW);
// load our skin program if this is the first avatar system to initialize
if (_skinProgram != 0) {
return;
if (!_skinProgram.isLinked()) {
_skinProgram.addShaderFromSourceFile(QGLShader::Vertex, "resources/shaders/skin_voxels.vert");
_skinProgram.link();
}
_skinProgram = new ProgramObject();
_skinProgram->addShaderFromSourceFile(QGLShader::Vertex, "resources/shaders/skin_voxels.vert");
_skinProgram->link();
_boneMatricesLocation = _skinProgram->uniformLocation("boneMatrices");
_boneIndicesLocation = _skinProgram->attributeLocation("boneIndices");
_boneWeightsLocation = _skinProgram->attributeLocation("boneWeights");
_boneMatricesLocation = _skinProgram.uniformLocation("boneMatrices");
_boneIndicesLocation = _skinProgram.attributeLocation("boneIndices");
_boneWeightsLocation = _skinProgram.attributeLocation("boneWeights");
_initialized = true;
}
void AvatarVoxelSystem::removeOutOfView() {
@ -202,7 +215,7 @@ void AvatarVoxelSystem::updateVBOSegment(glBufferIndex segmentStart, glBufferInd
}
void AvatarVoxelSystem::applyScaleAndBindProgram(bool texture) {
_skinProgram->bind();
_skinProgram.bind();
// the base matrix includes centering and scale
QMatrix4x4 baseMatrix;
@ -222,21 +235,21 @@ void AvatarVoxelSystem::applyScaleAndBindProgram(bool texture) {
boneMatrices[i].translate(-bindPosition.x, -bindPosition.y, -bindPosition.z);
boneMatrices[i] *= baseMatrix;
}
_skinProgram->setUniformValueArray(_boneMatricesLocation, boneMatrices, NUM_AVATAR_JOINTS);
_skinProgram.setUniformValueArray(_boneMatricesLocation, boneMatrices, NUM_AVATAR_JOINTS);
glBindBuffer(GL_ARRAY_BUFFER, _vboBoneIndicesID);
glVertexAttribPointer(_boneIndicesLocation, BONE_ELEMENTS_PER_VERTEX, GL_UNSIGNED_BYTE, false, 0, 0);
_skinProgram->enableAttributeArray(_boneIndicesLocation);
_skinProgram.enableAttributeArray(_boneIndicesLocation);
glBindBuffer(GL_ARRAY_BUFFER, _vboBoneWeightsID);
_skinProgram->setAttributeBuffer(_boneWeightsLocation, GL_FLOAT, 0, BONE_ELEMENTS_PER_VERTEX);
_skinProgram->enableAttributeArray(_boneWeightsLocation);
_skinProgram.setAttributeBuffer(_boneWeightsLocation, GL_FLOAT, 0, BONE_ELEMENTS_PER_VERTEX);
_skinProgram.enableAttributeArray(_boneWeightsLocation);
}
void AvatarVoxelSystem::removeScaleAndReleaseProgram(bool texture) {
_skinProgram->release();
_skinProgram->disableAttributeArray(_boneIndicesLocation);
_skinProgram->disableAttributeArray(_boneWeightsLocation);
_skinProgram.release();
_skinProgram.disableAttributeArray(_boneIndicesLocation);
_skinProgram.disableAttributeArray(_boneWeightsLocation);
}
void AvatarVoxelSystem::handleVoxelDownloadProgress(qint64 bytesReceived, qint64 bytesTotal) {

View file

@ -58,6 +58,7 @@ private:
void computeBoneIndicesAndWeights(const glm::vec3& vertex, BoneIndices& indices, glm::vec4& weights) const;
bool _initialized;
int _mode;
Avatar* _avatar;
@ -73,8 +74,8 @@ private:
GLuint _vboBoneWeightsID;
QNetworkReply* _voxelReply;
static ProgramObject* _skinProgram;
static ProgramObject _skinProgram;
static int _boneMatricesLocation;
static int _boneIndicesLocation;
static int _boneWeightsLocation;

View file

@ -17,14 +17,14 @@
#include "Avatar.h"
#include "Head.h"
#include "Face.h"
#include "Webcam.h"
#include "renderer/ProgramObject.h"
using namespace cv;
ProgramObject* Face::_videoProgram = 0;
bool Face::_initialized = false;
ProgramObject Face::_videoProgram;
Face::Locations Face::_videoProgramLocations;
ProgramObject* Face::_texturedProgram = 0;
ProgramObject Face::_texturedProgram;
Face::Locations Face::_texturedProgramLocations;
GLuint Face::_vboID;
GLuint Face::_iboID;
@ -293,9 +293,9 @@ bool Face::render(float alpha) {
const int INDICES_PER_TRIANGLE = 3;
const int INDEX_COUNT = QUAD_COUNT * TRIANGLES_PER_QUAD * INDICES_PER_TRIANGLE;
if (_videoProgram == 0) {
_videoProgram = loadProgram(QString(), "colorTexture", _videoProgramLocations);
_texturedProgram = loadProgram("_textured", "permutationNormalTexture", _texturedProgramLocations);
if (!_initialized) {
loadProgram(_videoProgram, QString(), "colorTexture", _videoProgramLocations);
loadProgram(_texturedProgram, "_textured", "permutationNormalTexture", _texturedProgramLocations);
glGenBuffers(1, &_vboID);
glBindBuffer(GL_ARRAY_BUFFER, _vboID);
@ -327,7 +327,8 @@ bool Face::render(float alpha) {
}
glBufferData(GL_ELEMENT_ARRAY_BUFFER, INDEX_COUNT * sizeof(int), indices, GL_STATIC_DRAW);
delete[] indices;
_initialized = true;
} else {
glBindBuffer(GL_ARRAY_BUFFER, _vboID);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _iboID);
@ -336,14 +337,14 @@ bool Face::render(float alpha) {
glActiveTexture(GL_TEXTURE1);
ProgramObject* program = _videoProgram;
ProgramObject* program = &_videoProgram;
Locations* locations = &_videoProgramLocations;
if (_colorTextureID != 0) {
glBindTexture(GL_TEXTURE_2D, _colorTextureID);
} else {
glBindTexture(GL_TEXTURE_2D, Application::getInstance()->getTextureCache()->getPermutationNormalTextureID());
program = _texturedProgram;
program = &_texturedProgram;
locations = &_texturedProgramLocations;
}
program->bind();
@ -467,20 +468,17 @@ void Face::destroyCodecs() {
}
}
ProgramObject* Face::loadProgram(const QString& suffix, const char* secondTextureUniform, Locations& locations) {
ProgramObject* program = new ProgramObject();
program->addShaderFromSourceFile(QGLShader::Vertex, "resources/shaders/face" + suffix + ".vert");
program->addShaderFromSourceFile(QGLShader::Fragment, "resources/shaders/face" + suffix + ".frag");
program->link();
void Face::loadProgram(ProgramObject& program, const QString& suffix, const char* secondTextureUniform, Locations& locations) {
program.addShaderFromSourceFile(QGLShader::Vertex, "resources/shaders/face" + suffix + ".vert");
program.addShaderFromSourceFile(QGLShader::Fragment, "resources/shaders/face" + suffix + ".frag");
program.link();
program->bind();
program->setUniformValue("depthTexture", 0);
program->setUniformValue(secondTextureUniform, 1);
program->release();
program.bind();
program.setUniformValue("depthTexture", 0);
program.setUniformValue(secondTextureUniform, 1);
program.release();
locations.texCoordCorner = program->uniformLocation("texCoordCorner");
locations.texCoordRight = program->uniformLocation("texCoordRight");
locations.texCoordUp = program->uniformLocation("texCoordUp");
return program;
locations.texCoordCorner = program.uniformLocation("texCoordCorner");
locations.texCoordRight = program.uniformLocation("texCoordRight");
locations.texCoordUp = program.uniformLocation("texCoordUp");
}

View file

@ -77,12 +77,14 @@ private:
int texCoordUp;
};
static ProgramObject* loadProgram(const QString& suffix, const char* secondTextureUniform, Locations& locations);
static void loadProgram(ProgramObject& progam, const QString& suffix, const char* secondTextureUniform, Locations& locations);
static ProgramObject* _videoProgram;
static bool _initialized;
static ProgramObject _videoProgram;
static Locations _videoProgramLocations;
static ProgramObject* _texturedProgram;
static ProgramObject _texturedProgram;
static Locations _texturedProgramLocations;
static GLuint _vboID;

View file

@ -8,17 +8,22 @@
#ifndef hifi_Hand_h
#define hifi_Hand_h
#include <vector>
#include <QAction>
#include <glm/glm.hpp>
#include <SharedUtil.h>
#include <AvatarData.h>
#include <HandData.h>
#include "Balls.h"
#include "world.h"
#include "InterfaceConfig.h"
#include "SerialInterface.h"
#include "ParticleSystem.h"
#include <SharedUtil.h>
#include <vector>
#include "world.h"
#include "devices/SerialInterface.h"
enum RaveLightsSetting {
RAVE_LIGHTS_AVATAR = 0,

View file

@ -18,7 +18,6 @@
using namespace std;
const int MOHAWK_TRIANGLES = 50;
const bool USING_PHYSICAL_MOHAWK = true;
const float EYE_RIGHT_OFFSET = 0.27f;
const float EYE_UP_OFFSET = 0.36f;
@ -47,7 +46,7 @@ const float IRIS_RADIUS = 0.007;
const float IRIS_PROTRUSION = 0.0145f;
const char IRIS_TEXTURE_FILENAME[] = "resources/images/iris.png";
ProgramObject* Head::_irisProgram = 0;
ProgramObject Head::_irisProgram;
GLuint Head::_irisTextureID;
int Head::_eyePositionLocation;
@ -76,8 +75,7 @@ Head::Head(Avatar* owningAvatar) :
_returnSpringScale(1.0f),
_bodyRotation(0.0f, 0.0f, 0.0f),
_renderLookatVectors(false),
_mohawkTriangleFan(NULL),
_mohawkColors(NULL),
_mohawkInitialized(false),
_saccade(0.0f, 0.0f, 0.0f),
_saccadeTarget(0.0f, 0.0f, 0.0f),
_leftEyeBlink(0.0f),
@ -99,16 +97,15 @@ Head::Head(Avatar* owningAvatar) :
}
void Head::init() {
if (_irisProgram == 0) {
if (!_irisProgram.isLinked()) {
switchToResourcesParentIfRequired();
_irisProgram = new ProgramObject();
_irisProgram->addShaderFromSourceFile(QGLShader::Vertex, "resources/shaders/iris.vert");
_irisProgram->addShaderFromSourceFile(QGLShader::Fragment, "resources/shaders/iris.frag");
_irisProgram->link();
_irisProgram->setUniformValue("texture", 0);
_eyePositionLocation = _irisProgram->uniformLocation("eyePosition");
_irisProgram.addShaderFromSourceFile(QGLShader::Vertex, "resources/shaders/iris.vert");
_irisProgram.addShaderFromSourceFile(QGLShader::Fragment, "resources/shaders/iris.frag");
_irisProgram.link();
_irisProgram.setUniformValue("texture", 0);
_eyePositionLocation = _irisProgram.uniformLocation("eyePosition");
QImage image = QImage(IRIS_TEXTURE_FILENAME).convertToFormat(QImage::Format_ARGB32);
glGenTextures(1, &_irisTextureID);
@ -163,65 +160,81 @@ void Head::simulate(float deltaTime, bool isMine, float gyroCameraSensitivity) {
_saccade += (_saccadeTarget - _saccade) * 0.50f;
// Update audio trailing average for rendering facial animations
const float AUDIO_AVERAGING_SECS = 0.05;
_averageLoudness = (1.f - deltaTime / AUDIO_AVERAGING_SECS) * _averageLoudness +
(deltaTime / AUDIO_AVERAGING_SECS) * _audioLoudness;
// Detect transition from talking to not; force blink after that and a delay
bool forceBlink = false;
const float TALKING_LOUDNESS = 100.0f;
const float BLINK_AFTER_TALKING = 0.25f;
if (_averageLoudness > TALKING_LOUDNESS) {
_timeWithoutTalking = 0.0f;
} else if (_timeWithoutTalking < BLINK_AFTER_TALKING && (_timeWithoutTalking += deltaTime) >= BLINK_AFTER_TALKING) {
forceBlink = true;
}
// Update audio attack data for facial animation (eyebrows and mouth)
_audioAttack = 0.9 * _audioAttack + 0.1 * fabs(_audioLoudness - _lastLoudness);
_lastLoudness = _audioLoudness;
const float BROW_LIFT_THRESHOLD = 100;
if (_audioAttack > BROW_LIFT_THRESHOLD)
_browAudioLift += sqrt(_audioAttack) * 0.00005;
Faceshift* faceshift = Application::getInstance()->getFaceshift();
if (isMine && faceshift->isActive()) {
_leftEyeBlink = faceshift->getLeftBlink();
_rightEyeBlink = faceshift->getRightBlink();
// set these values based on how they'll be used. if we use faceshift in the long term, we'll want a complete
// mapping between their blendshape coefficients and our avatar features
const float MOUTH_SIZE_SCALE = 2500.0f;
_averageLoudness = faceshift->getMouthSize() * faceshift->getMouthSize() * MOUTH_SIZE_SCALE;
const float BROW_HEIGHT_SCALE = 0.005f;
_browAudioLift = faceshift->getBrowHeight() * BROW_HEIGHT_SCALE;
float clamp = 0.01;
if (_browAudioLift > clamp) { _browAudioLift = clamp; }
_browAudioLift *= 0.7f;
// update eyelid blinking
const float BLINK_SPEED = 10.0f;
const float FULLY_OPEN = 0.0f;
const float FULLY_CLOSED = 1.0f;
if (_leftEyeBlinkVelocity == 0.0f && _rightEyeBlinkVelocity == 0.0f) {
// no blinking when brows are raised; blink less with increasing loudness
const float BASE_BLINK_RATE = 15.0f / 60.0f;
const float ROOT_LOUDNESS_TO_BLINK_INTERVAL = 0.25f;
if (forceBlink || (_browAudioLift < EPSILON && shouldDo(glm::max(1.0f, sqrt(_averageLoudness) *
ROOT_LOUDNESS_TO_BLINK_INTERVAL) / BASE_BLINK_RATE, deltaTime))) {
_leftEyeBlinkVelocity = BLINK_SPEED;
_rightEyeBlinkVelocity = BLINK_SPEED;
}
} else {
_leftEyeBlink = glm::clamp(_leftEyeBlink + _leftEyeBlinkVelocity * deltaTime, FULLY_OPEN, FULLY_CLOSED);
_rightEyeBlink = glm::clamp(_rightEyeBlink + _rightEyeBlinkVelocity * deltaTime, FULLY_OPEN, FULLY_CLOSED);
const float AUDIO_AVERAGING_SECS = 0.05;
_averageLoudness = (1.f - deltaTime / AUDIO_AVERAGING_SECS) * _averageLoudness +
(deltaTime / AUDIO_AVERAGING_SECS) * _audioLoudness;
if (_leftEyeBlink == FULLY_CLOSED) {
_leftEyeBlinkVelocity = -BLINK_SPEED;
// Detect transition from talking to not; force blink after that and a delay
bool forceBlink = false;
const float TALKING_LOUDNESS = 100.0f;
const float BLINK_AFTER_TALKING = 0.25f;
if (_averageLoudness > TALKING_LOUDNESS) {
_timeWithoutTalking = 0.0f;
} else if (_leftEyeBlink == FULLY_OPEN) {
_leftEyeBlinkVelocity = 0.0f;
} else if (_timeWithoutTalking < BLINK_AFTER_TALKING && (_timeWithoutTalking += deltaTime) >= BLINK_AFTER_TALKING) {
forceBlink = true;
}
if (_rightEyeBlink == FULLY_CLOSED) {
_rightEyeBlinkVelocity = -BLINK_SPEED;
// Update audio attack data for facial animation (eyebrows and mouth)
_audioAttack = 0.9f * _audioAttack + 0.1f * fabs(_audioLoudness - _lastLoudness);
_lastLoudness = _audioLoudness;
} else if (_rightEyeBlink == FULLY_OPEN) {
_rightEyeBlinkVelocity = 0.0f;
const float BROW_LIFT_THRESHOLD = 100.0f;
if (_audioAttack > BROW_LIFT_THRESHOLD) {
_browAudioLift += sqrtf(_audioAttack) * 0.00005f;
}
const float CLAMP = 0.01f;
if (_browAudioLift > CLAMP) {
_browAudioLift = CLAMP;
}
_browAudioLift *= 0.7f;
const float BLINK_SPEED = 10.0f;
const float FULLY_OPEN = 0.0f;
const float FULLY_CLOSED = 1.0f;
if (_leftEyeBlinkVelocity == 0.0f && _rightEyeBlinkVelocity == 0.0f) {
// no blinking when brows are raised; blink less with increasing loudness
const float BASE_BLINK_RATE = 15.0f / 60.0f;
const float ROOT_LOUDNESS_TO_BLINK_INTERVAL = 0.25f;
if (forceBlink || (_browAudioLift < EPSILON && shouldDo(glm::max(1.0f, sqrt(_averageLoudness) *
ROOT_LOUDNESS_TO_BLINK_INTERVAL) / BASE_BLINK_RATE, deltaTime))) {
_leftEyeBlinkVelocity = BLINK_SPEED;
_rightEyeBlinkVelocity = BLINK_SPEED;
}
} else {
_leftEyeBlink = glm::clamp(_leftEyeBlink + _leftEyeBlinkVelocity * deltaTime, FULLY_OPEN, FULLY_CLOSED);
_rightEyeBlink = glm::clamp(_rightEyeBlink + _rightEyeBlinkVelocity * deltaTime, FULLY_OPEN, FULLY_CLOSED);
if (_leftEyeBlink == FULLY_CLOSED) {
_leftEyeBlinkVelocity = -BLINK_SPEED;
} else if (_leftEyeBlink == FULLY_OPEN) {
_leftEyeBlinkVelocity = 0.0f;
}
if (_rightEyeBlink == FULLY_CLOSED) {
_rightEyeBlinkVelocity = -BLINK_SPEED;
} else if (_rightEyeBlink == FULLY_OPEN) {
_rightEyeBlinkVelocity = 0.0f;
}
}
}
// based on the nature of the lookat position, determine if the eyes can look / are looking at it.
if (USING_PHYSICAL_MOHAWK) {
updateHairPhysics(deltaTime);
@ -334,9 +347,7 @@ void Head::render(float alpha) {
void Head::setScale (float scale) {
_scale = scale;
delete[] _mohawkTriangleFan;
delete[] _mohawkColors;
createMohawk();
if (USING_PHYSICAL_MOHAWK) {
@ -363,8 +374,6 @@ void Head::createMohawk() {
float height = _scale * (0.08f + randFloat() * 0.05f);
float variance = 0.03 + randFloat() * 0.03f;
const float RAD_PER_TRIANGLE = (2.3f + randFloat() * 0.2f) / (float)MOHAWK_TRIANGLES;
_mohawkTriangleFan = new glm::vec3[MOHAWK_TRIANGLES];
_mohawkColors = new glm::vec3[MOHAWK_TRIANGLES];
_mohawkTriangleFan[0] = glm::vec3(0, 0, 0);
glm::vec3 basicColor(randFloat(), randFloat(), randFloat());
_mohawkColors[0] = basicColor;
@ -382,14 +391,9 @@ void Head::createMohawk() {
void Head::renderMohawk() {
if (!_mohawkTriangleFan) {
if (!_mohawkInitialized) {
createMohawk();
// if we get here and still don't have a mohawk then we don't know who we are
// so return out since we can't render it yet
if (!_mohawkTriangleFan) {
return;
}
_mohawkInitialized = true;
}
if (USING_PHYSICAL_MOHAWK) {
@ -649,7 +653,7 @@ void Head::renderEyeBalls() {
glutSolidSphere(_scale * EYEBALL_RADIUS, 30, 30);
glPopMatrix();
_irisProgram->bind();
_irisProgram.bind();
glBindTexture(GL_TEXTURE_2D, _irisTextureID);
glEnable(GL_TEXTURE_2D);
@ -671,7 +675,7 @@ void Head::renderEyeBalls() {
_scale * IRIS_RADIUS); // flatten the iris
// this ugliness is simply to invert the model transform and get the eye position in model space
_irisProgram->setUniform(_eyePositionLocation, (glm::inverse(rotation) *
_irisProgram.setUniform(_eyePositionLocation, (glm::inverse(rotation) *
(Application::getInstance()->getCamera()->getPosition() - _leftEyePosition) +
glm::vec3(0.0f, 0.0f, _scale * IRIS_PROTRUSION)) * glm::vec3(1.0f / (_scale * IRIS_RADIUS * 2.0f),
1.0f / (_scale * IRIS_RADIUS * 2.0f), 1.0f / (_scale * IRIS_RADIUS)));
@ -695,7 +699,7 @@ void Head::renderEyeBalls() {
_scale * IRIS_RADIUS); // flatten the iris
// this ugliness is simply to invert the model transform and get the eye position in model space
_irisProgram->setUniform(_eyePositionLocation, (glm::inverse(rotation) *
_irisProgram.setUniform(_eyePositionLocation, (glm::inverse(rotation) *
(Application::getInstance()->getCamera()->getPosition() - _rightEyePosition) +
glm::vec3(0.0f, 0.0f, _scale * IRIS_PROTRUSION)) * glm::vec3(1.0f / (_scale * IRIS_RADIUS * 2.0f),
1.0f / (_scale * IRIS_RADIUS * 2.0f), 1.0f / (_scale * IRIS_RADIUS)));
@ -704,7 +708,7 @@ void Head::renderEyeBalls() {
}
glPopMatrix();
_irisProgram->release();
_irisProgram.release();
glBindTexture(GL_TEXTURE_2D, 0);
glDisable(GL_TEXTURE_2D);

View file

@ -17,11 +17,11 @@
#include <VoxelConstants.h>
#include "Face.h"
#include "BendyLine.h"
#include "Face.h"
#include "InterfaceConfig.h"
#include "SerialInterface.h"
#include "world.h"
#include "devices/SerialInterface.h"
enum eyeContactTargets {
LEFT_EYE,
@ -29,6 +29,7 @@ enum eyeContactTargets {
MOUTH
};
const int MOHAWK_TRIANGLES = 50;
const int NUM_HAIR_TUFTS = 4;
class Avatar;
@ -71,7 +72,7 @@ public:
Face& getFace() { return _face; }
const bool getReturnToCenter() const { return _returnHeadToCenter; } // Do you want head to try to return to center (depends on interface detected)
float getAverageLoudness() {return _averageLoudness;};
float getAverageLoudness() const { return _averageLoudness; }
glm::vec3 calculateAverageEyePosition() { return _leftEyePosition + (_rightEyePosition - _leftEyePosition ) * ONE_HALF; }
float yawRate;
@ -87,7 +88,7 @@ private:
glm::vec3 right;
glm::vec3 front;
};
float _renderAlpha;
bool _returnHeadToCenter;
glm::vec3 _skinColor;
@ -112,8 +113,9 @@ private:
glm::vec3 _bodyRotation;
bool _renderLookatVectors;
BendyLine _hairTuft[NUM_HAIR_TUFTS];
glm::vec3* _mohawkTriangleFan;
glm::vec3* _mohawkColors;
bool _mohawkInitialized;
glm::vec3 _mohawkTriangleFan[MOHAWK_TRIANGLES];
glm::vec3 _mohawkColors[MOHAWK_TRIANGLES];
glm::vec3 _saccade;
glm::vec3 _saccadeTarget;
float _leftEyeBlink;
@ -128,8 +130,8 @@ private:
bool _cameraFollowsHead;
float _cameraFollowHeadRate;
Face _face;
static ProgramObject* _irisProgram;
static ProgramObject _irisProgram;
static GLuint _irisTextureID;
static int _eyePositionLocation;

View file

@ -6,20 +6,19 @@
// Copyright (c) 2012 High Fidelity, Inc. All rights reserved.
//
#include "MyAvatar.h"
#include <vector>
#include <glm/gtx/vector_angle.hpp>
#include <NodeList.h>
#include <NodeTypes.h>
#include <OculusManager.h>
#include <PacketHeaders.h>
#include <SharedUtil.h>
#include "Application.h"
#include "MyAvatar.h"
#include "Physics.h"
#include "devices/OculusManager.h"
#include "ui/TextRenderer.h"
using namespace std;
@ -333,16 +332,25 @@ void MyAvatar::simulate(float deltaTime, Transmitter* transmitter, float gyroCam
// Update avatar head rotation with sensor data
void MyAvatar::updateFromGyrosAndOrWebcam(bool gyroLook,
float pitchFromTouch) {
Faceshift* faceshift = Application::getInstance()->getFaceshift();
SerialInterface* gyros = Application::getInstance()->getSerialHeadSensor();
Webcam* webcam = Application::getInstance()->getWebcam();
glm::vec3 estimatedPosition, estimatedRotation;
if (gyros->isActive()) {
if (faceshift->isActive()) {
estimatedPosition = faceshift->getHeadTranslation();
estimatedRotation = safeEulerAngles(faceshift->getHeadRotation());
} else if (gyros->isActive()) {
estimatedRotation = gyros->getEstimatedRotation();
} else if (webcam->isActive()) {
estimatedRotation = webcam->getEstimatedRotation();
} else if (_leadingAvatar) {
_head.getFace().clearFrame();
return;
} else {
_head.setMousePitch(pitchFromTouch);
_head.setPitch(pitchFromTouch);

View file

@ -0,0 +1,140 @@
//
// Faceshift.cpp
// interface
//
// Created by Andrzej Kapolka on 9/3/13.
// Copyright (c) 2013 High Fidelity, Inc. All rights reserved.
//
#include <QTimer>
#include "Faceshift.h"
using namespace fs;
using namespace std;
Faceshift::Faceshift() :
_enabled(false),
_eyeGazeLeftPitch(0.0f),
_eyeGazeLeftYaw(0.0f),
_eyeGazeRightPitch(0.0f),
_eyeGazeRightYaw(0.0f),
_leftBlink(0.0f),
_rightBlink(0.0f),
_leftBlinkIndex(-1),
_rightBlinkIndex(-1),
_browHeight(0.0f),
_browUpCenterIndex(-1),
_mouthSize(0.0f),
_jawOpenIndex(-1)
{
connect(&_socket, SIGNAL(connected()), SLOT(noteConnected()));
connect(&_socket, SIGNAL(error(QAbstractSocket::SocketError)), SLOT(noteError(QAbstractSocket::SocketError)));
connect(&_socket, SIGNAL(readyRead()), SLOT(readFromSocket()));
}
void Faceshift::reset() {
if (isActive()) {
string message;
fsBinaryStream::encode_message(message, fsMsgCalibrateNeutral());
send(message);
}
}
void Faceshift::setEnabled(bool enabled) {
if ((_enabled = enabled)) {
connectSocket();
} else {
_socket.disconnectFromHost();
}
}
void Faceshift::connectSocket() {
if (_enabled) {
qDebug("Faceshift: Connecting...\n");
const quint16 FACESHIFT_PORT = 33433;
_socket.connectToHost("localhost", FACESHIFT_PORT);
}
}
void Faceshift::noteConnected() {
qDebug("Faceshift: Connected.\n");
// request the list of blendshape names
string message;
fsBinaryStream::encode_message(message, fsMsgSendBlendshapeNames());
send(message);
}
void Faceshift::noteError(QAbstractSocket::SocketError error) {
qDebug() << "Faceshift: " << _socket.errorString() << "\n";
// reconnect after a delay
if (_enabled) {
QTimer::singleShot(1000, this, SLOT(connectSocket()));
}
}
void Faceshift::readFromSocket() {
QByteArray buffer = _socket.readAll();
_stream.received(buffer.size(), buffer.constData());
fsMsgPtr msg;
for (fsMsgPtr msg; (msg = _stream.get_message()); ) {
switch (msg->id()) {
case fsMsg::MSG_OUT_TRACKING_STATE: {
const fsTrackingData& data = static_cast<fsMsgTrackingState*>(msg.get())->tracking_data();
if (data.m_trackingSuccessful) {
_headRotation = glm::quat(data.m_headRotation.w, -data.m_headRotation.x,
data.m_headRotation.y, -data.m_headRotation.z);
const float TRANSLATION_SCALE = 0.02f;
_headTranslation = glm::vec3(data.m_headTranslation.x, data.m_headTranslation.y,
-data.m_headTranslation.z) * TRANSLATION_SCALE;
_eyeGazeLeftPitch = -data.m_eyeGazeLeftPitch;
_eyeGazeLeftYaw = data.m_eyeGazeLeftYaw;
_eyeGazeRightPitch = -data.m_eyeGazeRightPitch;
_eyeGazeRightYaw = data.m_eyeGazeRightYaw;
if (_leftBlinkIndex != -1) {
_leftBlink = data.m_coeffs[_leftBlinkIndex];
}
if (_rightBlinkIndex != -1) {
_rightBlink = data.m_coeffs[_rightBlinkIndex];
}
if (_browUpCenterIndex != -1) {
_browHeight = data.m_coeffs[_browUpCenterIndex];
}
if (_jawOpenIndex != -1) {
_mouthSize = data.m_coeffs[_jawOpenIndex];
}
}
break;
}
case fsMsg::MSG_OUT_BLENDSHAPE_NAMES: {
const vector<string>& names = static_cast<fsMsgBlendshapeNames*>(msg.get())->blendshape_names();
for (int i = 0; i < names.size(); i++) {
if (names[i] == "EyeBlink_L") {
_leftBlinkIndex = i;
} else if (names[i] == "EyeBlink_R") {
_rightBlinkIndex = i;
} else if (names[i] == "BrowsU_C") {
_browUpCenterIndex = i;
} else if (names[i] == "JawOpen") {
_jawOpenIndex = i;
}
}
break;
}
default:
break;
}
}
}
void Faceshift::send(const std::string& message) {
_socket.write(message.data(), message.size());
}

View file

@ -0,0 +1,90 @@
//
// Faceshift.h
// interface
//
// Created by Andrzej Kapolka on 9/3/13.
// Copyright (c) 2013 High Fidelity, Inc. All rights reserved.
//
#ifndef __interface__Faceshift__
#define __interface__Faceshift__
#include <QTcpSocket>
#include <glm/glm.hpp>
#include <glm/gtc/quaternion.hpp>
#include <fsbinarystream.h>
/// Handles interaction with the Faceshift software, which provides head position/orientation and facial features.
class Faceshift : public QObject {
Q_OBJECT
public:
Faceshift();
bool isActive() const { return _socket.state() == QAbstractSocket::ConnectedState; }
const glm::quat& getHeadRotation() const { return _headRotation; }
const glm::vec3& getHeadTranslation() const { return _headTranslation; }
float getEyeGazeLeftPitch() const { return _eyeGazeLeftPitch; }
float getEyeGazeLeftYaw() const { return _eyeGazeLeftYaw; }
float getEyeGazeRightPitch() const { return _eyeGazeRightPitch; }
float getEyeGazeRightYaw() const { return _eyeGazeRightYaw; }
float getLeftBlink() const { return _leftBlink; }
float getRightBlink() const { return _rightBlink; }
float getBrowHeight() const { return _browHeight; }
float getMouthSize() const { return _mouthSize; }
void reset();
public slots:
void setEnabled(bool enabled);
private slots:
void connectSocket();
void noteConnected();
void noteError(QAbstractSocket::SocketError error);
void readFromSocket();
private:
void send(const std::string& message);
QTcpSocket _socket;
fs::fsBinaryStream _stream;
bool _enabled;
glm::quat _headRotation;
glm::vec3 _headTranslation;
float _eyeGazeLeftPitch;
float _eyeGazeLeftYaw;
float _eyeGazeRightPitch;
float _eyeGazeRightYaw;
float _leftBlink;
float _rightBlink;
int _leftBlinkIndex;
int _rightBlinkIndex;
float _browHeight;
int _browUpCenterIndex;
float _mouthSize;
int _jawOpenIndex;
};
#endif /* defined(__interface__Faceshift__) */

View file

@ -29,6 +29,12 @@ Transmitter::Transmitter() :
}
Transmitter::~Transmitter() {
if (_lastReceivedPacket) {
delete _lastReceivedPacket;
}
}
void Transmitter::checkForLostTransmitter() {
// If we are in motion, check for loss of transmitter packets
if (glm::length(_estimatedRotation) > 0.f) {

View file

@ -25,6 +25,7 @@ class Transmitter
{
public:
Transmitter();
~Transmitter();
void render();
void checkForLostTransmitter();
void resetLevels();

View file

@ -30,6 +30,7 @@ using namespace xn;
// register types with Qt metatype system
int jointVectorMetaType = qRegisterMetaType<JointVector>("JointVector");
int keyPointVectorMetaType = qRegisterMetaType<KeyPointVector>("KeyPointVector");
int matMetaType = qRegisterMetaType<Mat>("cv::Mat");
int rotatedRectMetaType = qRegisterMetaType<RotatedRect>("cv::RotatedRect");
@ -63,6 +64,7 @@ const float UNINITIALIZED_FACE_DEPTH = 0.0f;
void Webcam::reset() {
_initialFaceRect = RotatedRect();
_initialFaceDepth = UNINITIALIZED_FACE_DEPTH;
_initialLEDPosition = glm::vec3();
if (_enabled) {
// send a message to the grabber
@ -140,6 +142,14 @@ void Webcam::renderPreview(int screenWidth, int screenHeight) {
glVertex2f(left + facePoints[3].x * xScale, top + facePoints[3].y * yScale);
glEnd();
glColor3f(0.0f, 1.0f, 0.0f);
glLineWidth(3.0f);
for (KeyPointVector::iterator it = _keyPoints.begin(); it != _keyPoints.end(); it++) {
renderCircle(glm::vec3(left + it->pt.x * xScale, top + it->pt.y * yScale, 0.0f),
it->size * 0.5f, glm::vec3(0.0f, 0.0f, 1.0f), 8);
}
glLineWidth(1.0f);
const int MAX_FPS_CHARACTERS = 30;
char fps[MAX_FPS_CHARACTERS];
sprintf(fps, "FPS: %d", (int)(roundf(_frameCount * 1000000.0f / (usecTimestampNow() - _startTimestamp))));
@ -155,10 +165,80 @@ Webcam::~Webcam() {
delete _grabber;
}
static glm::vec3 createVec3(const Point2f& pt) {
return glm::vec3(pt.x, -pt.y, 0.0f);
}
static glm::mat3 createMat3(const glm::vec3& p0, const glm::vec3& p1, const glm::vec3& p2) {
glm::vec3 u = glm::normalize(p1 - p0);
glm::vec3 p02 = p2 - p0;
glm::vec3 v = glm::normalize(p02 - u * glm::dot(p02, u));
return glm::mat3(u, v, glm::cross(u, v));
}
/// Computes the 3D transform of the LED assembly from the image space location of the key points representing the LEDs.
/// See T.D. Alter's "3D Pose from 3 Corresponding Points under Weak-Perspective Projection"
/// (http://dspace.mit.edu/bitstream/handle/1721.1/6611/AIM-1378.pdf) and the source code to Freetrack
/// (https://camil.dyndns.org/svn/freetrack/tags/V2.2/Freetrack/Pose.pas), which uses the same algorithm.
static float computeTransformFromKeyPoints(const KeyPointVector& keyPoints, glm::quat& rotation, glm::vec3& position) {
// make sure we have at least three points
if (keyPoints.size() < 3) {
return 0.0f;
}
// bubblesort the first three points from top (greatest) to bottom (least)
glm::vec3 i0 = createVec3(keyPoints[0].pt), i1 = createVec3(keyPoints[1].pt), i2 = createVec3(keyPoints[2].pt);
if (i1.y > i0.y) {
swap(i0, i1);
}
if (i2.y > i1.y) {
swap(i1, i2);
}
if (i1.y > i0.y) {
swap(i0, i1);
}
// model space LED locations and the distances between them
const glm::vec3 M0(2.0f, 0.0f, 0.0f), M1(0.0f, 0.0f, 0.0f), M2(0.0f, -4.0f, 0.0f);
const float R01 = glm::distance(M0, M1), R02 = glm::distance(M0, M2), R12 = glm::distance(M1, M2);
// compute the distances between the image points
float d01 = glm::distance(i0, i1), d02 = glm::distance(i0, i2), d12 = glm::distance(i1, i2);
// compute the terms of the quadratic
float a = (R01 + R02 + R12) * (-R01 + R02 + R12) * (R01 - R02 + R12) * (R01 + R02 - R12);
float b = d01 * d01 * (-R01 * R01 + R02 * R02 + R12 * R12) + d02 * d02 * (R01 * R01 - R02 * R02 + R12 * R12) +
d12 * d12 * (R01 * R01 + R02 * R02 - R12 * R12);
float c = (d01 + d02 + d12) * (-d01 + d02 + d12) * (d01 - d02 + d12) * (d01 + d02 - d12);
// compute the scale
float s = sqrtf((b + sqrtf(b * b - a * c)) / a);
float sigma = (d01 * d01 + d02 * d02 - d12 * d12 <= s * s * (R01 * R01 + R02 * R02 - R12 * R12)) ? 1.0f : -1.0f;
float h1 = sqrtf(s * s * R01 * R01 - d01 * d01);
float h2 = sigma * sqrtf(s * s * R02 * R02 - d02 * d02);
// now we can compute the 3D locations of the model points in camera-centered coordinates
glm::vec3 m0 = glm::vec3(i0.x, i0.y, 0.0f) / s;
glm::vec3 m1 = glm::vec3(i1.x, i1.y, h1) / s;
glm::vec3 m2 = glm::vec3(i2.x, i2.y, h2) / s;
// from those and the model space locations, we can compute the transform
glm::mat3 r1 = createMat3(M0, M1, M2);
glm::mat3 r2 = createMat3(m0, m1, m2);
glm::mat3 r = r2 * glm::transpose(r1);
position = m0 - r * M0;
rotation = glm::quat_cast(r);
return s;
}
const float METERS_PER_MM = 1.0f / 1000.0f;
void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float midFaceDepth,
float aspectRatio, const RotatedRect& faceRect, bool sending, const JointVector& joints) {
void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float midFaceDepth, float aspectRatio,
const RotatedRect& faceRect, bool sending, const JointVector& joints, const KeyPointVector& keyPoints) {
if (!_enabled) {
return; // was queued before we shut down; ignore
}
@ -210,6 +290,7 @@ void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float midF
_faceRect = faceRect;
_sending = sending;
_joints = _skeletonTrackingOn ? joints : JointVector();
_keyPoints = keyPoints;
_frameCount++;
const int MAX_FPS = 60;
@ -248,6 +329,31 @@ void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float midF
_estimatedRotation = safeEulerAngles(_estimatedJoints[AVATAR_JOINT_HEAD_BASE].rotation);
_estimatedPosition = _estimatedJoints[AVATAR_JOINT_HEAD_BASE].position;
} else if (!keyPoints.empty()) {
glm::quat rotation;
glm::vec3 position;
float scale = computeTransformFromKeyPoints(keyPoints, rotation, position);
if (scale > 0.0f) {
if (_initialLEDPosition == glm::vec3()) {
_initialLEDPosition = position;
_estimatedPosition = glm::vec3();
_initialLEDRotation = rotation;
_estimatedRotation = glm::vec3();
_initialLEDScale = scale;
} else {
const float Z_SCALE = 5.0f;
position.z += (_initialLEDScale / scale - 1.0f) * Z_SCALE;
const float POSITION_SMOOTHING = 0.5f;
_estimatedPosition = glm::mix(position - _initialLEDPosition, _estimatedPosition, POSITION_SMOOTHING);
const float ROTATION_SMOOTHING = 0.5f;
glm::vec3 eulers = safeEulerAngles(rotation * glm::inverse(_initialLEDRotation));
eulers.y = -eulers.y;
eulers.z = -eulers.z;
_estimatedRotation = glm::mix(eulers, _estimatedRotation, ROTATION_SMOOTHING);
}
}
} else {
// roll is just the angle of the face rect
const float ROTATION_SMOOTHING = 0.95f;
@ -285,8 +391,21 @@ void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float midF
QTimer::singleShot(qMax((int)remaining / 1000, 0), _grabber, SLOT(grabFrame()));
}
FrameGrabber::FrameGrabber() : _initialized(false), _videoSendMode(FULL_FRAME_VIDEO), _depthOnly(false), _capture(0),
_searchWindow(0, 0, 0, 0), _smoothedMidFaceDepth(UNINITIALIZED_FACE_DEPTH), _colorCodec(), _depthCodec(), _frameCount(0) {
static SimpleBlobDetector::Params createBlobDetectorParams() {
SimpleBlobDetector::Params params;
params.blobColor = 255;
params.filterByArea = true;
params.minArea = 4;
params.maxArea = 5000;
params.filterByCircularity = false;
params.filterByInertia = false;
params.filterByConvexity = false;
return params;
}
FrameGrabber::FrameGrabber() : _initialized(false), _videoSendMode(FULL_FRAME_VIDEO), _depthOnly(false), _ledTrackingOn(false),
_capture(0), _searchWindow(0, 0, 0, 0), _smoothedMidFaceDepth(UNINITIALIZED_FACE_DEPTH), _colorCodec(), _depthCodec(),
_frameCount(0), _blobDetector(createBlobDetectorParams()) {
}
FrameGrabber::~FrameGrabber() {
@ -391,6 +510,11 @@ void FrameGrabber::setDepthOnly(bool depthOnly) {
destroyCodecs();
}
void FrameGrabber::setLEDTrackingOn(bool ledTrackingOn) {
_ledTrackingOn = ledTrackingOn;
configureCapture();
}
void FrameGrabber::reset() {
_searchWindow = cv::Rect(0, 0, 0, 0);
@ -494,7 +618,7 @@ void FrameGrabber::grabFrame() {
float depthBitrateMultiplier = 1.0f;
Mat faceTransform;
float aspectRatio;
if (_videoSendMode == FULL_FRAME_VIDEO) {
if (_ledTrackingOn || _videoSendMode == FULL_FRAME_VIDEO) {
// no need to find the face if we're sending full frame video
_smoothedFaceRect = RotatedRect(Point2f(color.cols / 2.0f, color.rows / 2.0f), Size2f(color.cols, color.rows), 0.0f);
encodedWidth = color.cols;
@ -568,6 +692,21 @@ void FrameGrabber::grabFrame() {
aspectRatio = _smoothedFaceRect.size.width / _smoothedFaceRect.size.height;
}
KeyPointVector keyPoints;
if (_ledTrackingOn) {
// convert to grayscale
cvtColor(color, _grayFrame, format == GL_RGB ? CV_RGB2GRAY : CV_BGR2GRAY);
// apply threshold
threshold(_grayFrame, _grayFrame, 28.0, 255.0, THRESH_BINARY);
// convert back so that we can see
cvtColor(_grayFrame, color, format == GL_RGB ? CV_GRAY2RGB : CV_GRAY2BGR);
// find the locations of the LEDs, which should show up as blobs
_blobDetector.detect(_grayFrame, keyPoints);
}
const ushort ELEVEN_BIT_MINIMUM = 0;
const uchar EIGHT_BIT_MIDPOINT = 128;
double depthOffset;
@ -616,7 +755,7 @@ void FrameGrabber::grabFrame() {
_frameCount++;
QByteArray payload;
if (_videoSendMode != NO_VIDEO) {
if (!_ledTrackingOn && _videoSendMode != NO_VIDEO) {
// start the payload off with the aspect ratio (zero for full frame)
payload.append((const char*)&aspectRatio, sizeof(float));
@ -790,7 +929,7 @@ void FrameGrabber::grabFrame() {
QMetaObject::invokeMethod(Application::getInstance()->getWebcam(), "setFrame",
Q_ARG(cv::Mat, color), Q_ARG(int, format), Q_ARG(cv::Mat, _grayDepthFrame), Q_ARG(float, _smoothedMidFaceDepth),
Q_ARG(float, aspectRatio), Q_ARG(cv::RotatedRect, _smoothedFaceRect), Q_ARG(bool, !payload.isEmpty()),
Q_ARG(JointVector, joints));
Q_ARG(JointVector, joints), Q_ARG(KeyPointVector, keyPoints));
}
bool FrameGrabber::init() {
@ -840,18 +979,28 @@ bool FrameGrabber::init() {
cvSetCaptureProperty(_capture, CV_CAP_PROP_FRAME_WIDTH, IDEAL_FRAME_WIDTH);
cvSetCaptureProperty(_capture, CV_CAP_PROP_FRAME_HEIGHT, IDEAL_FRAME_HEIGHT);
configureCapture();
return true;
}
void FrameGrabber::configureCapture() {
#ifdef HAVE_OPENNI
if (_depthGenerator.IsValid()) {
return; // don't bother handling LED tracking with depth camera
}
#endif
#ifdef __APPLE__
configureCamera(0x5ac, 0x8510, false, 0.975, 0.5, 1.0, 0.5, true, 0.5);
configureCamera(0x5ac, 0x8510, false, _ledTrackingOn ? 1.0 : 0.975, 0.5, 1.0, 0.5, true, 0.5);
#else
cvSetCaptureProperty(_capture, CV_CAP_PROP_EXPOSURE, 0.5);
cvSetCaptureProperty(_capture, CV_CAP_PROP_CONTRAST, 0.5);
cvSetCaptureProperty(_capture, CV_CAP_PROP_CONTRAST, _ledTrackingOn ? 1.0 : 0.5);
cvSetCaptureProperty(_capture, CV_CAP_PROP_SATURATION, 0.5);
cvSetCaptureProperty(_capture, CV_CAP_PROP_BRIGHTNESS, 0.5);
cvSetCaptureProperty(_capture, CV_CAP_PROP_BRIGHTNESS, _ledTrackingOn ? 0.0 : 0.5);
cvSetCaptureProperty(_capture, CV_CAP_PROP_HUE, 0.5);
cvSetCaptureProperty(_capture, CV_CAP_PROP_GAIN, 0.5);
#endif
return true;
}
void FrameGrabber::updateHSVFrame(const Mat& frame, int format) {

View file

@ -10,7 +10,6 @@
#define __interface__Webcam__
#include <QMetaType>
#include <QObject>
#include <QThread>
#include <QVector>
@ -35,7 +34,9 @@ class FrameGrabber;
class Joint;
typedef QVector<Joint> JointVector;
typedef std::vector<cv::KeyPoint> KeyPointVector;
/// Handles interaction with the webcam (including depth cameras such as the Kinect).
class Webcam : public QObject {
Q_OBJECT
@ -68,8 +69,8 @@ public:
public slots:
void setEnabled(bool enabled);
void setFrame(const cv::Mat& color, int format, const cv::Mat& depth, float midFaceDepth,
float aspectRatio, const cv::RotatedRect& faceRect, bool sending, const JointVector& joints);
void setFrame(const cv::Mat& color, int format, const cv::Mat& depth, float midFaceDepth, float aspectRatio,
const cv::RotatedRect& faceRect, bool sending, const JointVector& joints, const KeyPointVector& keyPoints);
void setSkeletonTrackingOn(bool toggle) { _skeletonTrackingOn = toggle; };
private:
@ -88,6 +89,11 @@ private:
cv::RotatedRect _initialFaceRect;
float _initialFaceDepth;
JointVector _joints;
KeyPointVector _keyPoints;
glm::quat _initialLEDRotation;
glm::vec3 _initialLEDPosition;
float _initialLEDScale;
uint64_t _startTimestamp;
int _frameCount;
@ -101,6 +107,7 @@ private:
bool _skeletonTrackingOn;
};
/// Acquires and processes video frames in a dedicated thread.
class FrameGrabber : public QObject {
Q_OBJECT
@ -113,6 +120,7 @@ public slots:
void cycleVideoSendMode();
void setDepthOnly(bool depthOnly);
void setLEDTrackingOn(bool ledTrackingOn);
void reset();
void shutdown();
void grabFrame();
@ -124,10 +132,12 @@ private:
bool init();
void updateHSVFrame(const cv::Mat& frame, int format);
void destroyCodecs();
void configureCapture();
bool _initialized;
VideoSendMode _videoSendMode;
bool _depthOnly;
bool _ledTrackingOn;
CvCapture* _capture;
cv::CascadeClassifier _faceCascade;
cv::Mat _hsvFrame;
@ -147,6 +157,9 @@ private:
QByteArray _encodedFace;
cv::RotatedRect _smoothedFaceRect;
cv::SimpleBlobDetector _blobDetector;
cv::Mat _grayFrame;
#ifdef HAVE_OPENNI
xn::Context _xnContext;
xn::DepthGenerator _depthGenerator;
@ -158,6 +171,7 @@ private:
#endif
};
/// Contains the 3D transform and 2D projected position of a tracked joint.
class Joint {
public:
@ -171,6 +185,7 @@ public:
};
Q_DECLARE_METATYPE(JointVector)
Q_DECLARE_METATYPE(KeyPointVector)
Q_DECLARE_METATYPE(cv::Mat)
Q_DECLARE_METATYPE(cv::RotatedRect)

View file

@ -15,13 +15,28 @@
#include "ProgramObject.h"
#include "RenderUtil.h"
GlowEffect::GlowEffect() : _renderMode(DIFFUSE_ADD_MODE), _isOddFrame(false), _intensity(0.0f) {
GlowEffect::GlowEffect()
: _initialized(false),
_renderMode(DIFFUSE_ADD_MODE),
_isOddFrame(false),
_intensity(0.0f) {
}
GlowEffect::~GlowEffect() {
if (_initialized) {
delete _addProgram;
delete _horizontalBlurProgram;
delete _verticalBlurAddProgram;
delete _verticalBlurProgram;
delete _addSeparateProgram;
delete _diffuseProgram;
}
}
QOpenGLFramebufferObject* GlowEffect::getFreeFramebufferObject() const {
return (_renderMode == DIFFUSE_ADD_MODE && !_isOddFrame) ?
Application::getInstance()->getTextureCache()->getTertiaryFramebufferObject() :
Application::getInstance()->getTextureCache()->getSecondaryFramebufferObject();
Application::getInstance()->getTextureCache()->getTertiaryFramebufferObject() :
Application::getInstance()->getTextureCache()->getSecondaryFramebufferObject();
}
static ProgramObject* createProgram(const QString& name) {
@ -37,6 +52,11 @@ static ProgramObject* createProgram(const QString& name) {
}
void GlowEffect::init() {
if (_initialized) {
qDebug("[ERROR] GlowEffeect is already initialized.\n");
return;
}
switchToResourcesParentIfRequired();
_addProgram = createProgram("glow_add");
@ -59,6 +79,8 @@ void GlowEffect::init() {
_diffuseProgram->release();
_diffusionScaleLocation = _diffuseProgram->uniformLocation("diffusionScale");
_initialized = true;
}
void GlowEffect::prepare() {

View file

@ -21,8 +21,8 @@ class GlowEffect : public QObject {
Q_OBJECT
public:
GlowEffect();
~GlowEffect();
/// Returns a pointer to the framebuffer object that the glow effect is *not* using for persistent state
/// (either the secondary or the tertiary).
@ -53,6 +53,8 @@ private:
enum RenderMode { ADD_MODE, BLUR_ADD_MODE, BLUR_PERSIST_ADD_MODE, DIFFUSE_ADD_MODE, RENDER_MODE_COUNT };
bool _initialized;
RenderMode _renderMode;
ProgramObject* _addProgram;
ProgramObject* _horizontalBlurProgram;

View file

@ -49,9 +49,9 @@ private:
QOpenGLFramebufferObject* createFramebufferObject();
GLuint _permutationNormalTextureID;
QOpenGLFramebufferObject* _primaryFramebufferObject;
GLuint _primaryDepthTextureID;
QOpenGLFramebufferObject* _primaryFramebufferObject;
QOpenGLFramebufferObject* _secondaryFramebufferObject;
QOpenGLFramebufferObject* _tertiaryFramebufferObject;
};

View file

@ -70,6 +70,10 @@ namespace starfield {
_renderer(0l) {
}
~Controller() {
delete _renderer;
}
#if !STARFIELD_MULTITHREADING
#define lock
#define _(x)

View file

@ -38,6 +38,12 @@ BandwidthDialog::BandwidthDialog(QWidget* parent, BandwidthMeter* model) :
}
}
BandwidthDialog::~BandwidthDialog() {
for (int i = 0; i < BandwidthMeter::N_STREAMS; ++i) {
delete _labels[i];
}
}
void BandwidthDialog::paintEvent(QPaintEvent* event) {
// Update labels

View file

@ -18,9 +18,9 @@
class BandwidthDialog : public QDialog {
Q_OBJECT
public:
// Sets up the UI based on the configuration of the BandwidthMeter
BandwidthDialog(QWidget* parent, BandwidthMeter* model);
~BandwidthDialog();
signals:

View file

@ -30,7 +30,7 @@ VoxelStatsDialog::VoxelStatsDialog(QWidget* parent, VoxelSceneStats* model) :
this->QDialog::setLayout(form);
// Setup labels
for (int i = 0; i < (int)VoxelSceneStats::ITEM_COUNT; i++) {
for (int i = 0; i < VoxelSceneStats::ITEM_COUNT; i++) {
VoxelSceneStats::Item item = (VoxelSceneStats::Item)(i);
VoxelSceneStats::ItemInfo& itemInfo = _model->getItemInfo(item);
QLabel* label = _labels[item] = new QLabel();
@ -53,11 +53,17 @@ VoxelStatsDialog::VoxelStatsDialog(QWidget* parent, VoxelSceneStats* model) :
}
}
VoxelStatsDialog::~VoxelStatsDialog() {
for (int i = 0; i < VoxelSceneStats::ITEM_COUNT; ++i) {
delete _labels[i];
}
}
void VoxelStatsDialog::paintEvent(QPaintEvent* event) {
// Update labels
char strBuf[256];
for (int i = 0; i < (int)VoxelSceneStats::ITEM_COUNT; i++) {
for (int i = 0; i < VoxelSceneStats::ITEM_COUNT; i++) {
VoxelSceneStats::Item item = (VoxelSceneStats::Item)(i);
QLabel* label = _labels[item];
snprintf(strBuf, sizeof(strBuf), "%s", _model->getItemValue(item));

View file

@ -19,6 +19,7 @@ class VoxelStatsDialog : public QDialog {
public:
// Sets up the UI
VoxelStatsDialog(QWidget* parent, VoxelSceneStats* model);
~VoxelStatsDialog();
signals:
void closed();

View file

@ -165,6 +165,8 @@ int AvatarData::getBroadcastData(unsigned char* destinationBuffer) {
destinationBuffer += packFloatRatioToTwoByte(destinationBuffer, _cameraAspectRatio);
destinationBuffer += packClipValueToTwoByte(destinationBuffer, _cameraNearClip);
destinationBuffer += packClipValueToTwoByte(destinationBuffer, _cameraFarClip);
memcpy(destinationBuffer, &_cameraEyeOffsetPosition, sizeof(_cameraEyeOffsetPosition));
destinationBuffer += sizeof(_cameraEyeOffsetPosition);
// chat message
*destinationBuffer++ = _chatMessage.size();
@ -274,6 +276,8 @@ int AvatarData::parseData(unsigned char* sourceBuffer, int numBytes) {
sourceBuffer += unpackFloatRatioFromTwoByte(sourceBuffer,_cameraAspectRatio);
sourceBuffer += unpackClipValueFromTwoByte(sourceBuffer,_cameraNearClip);
sourceBuffer += unpackClipValueFromTwoByte(sourceBuffer,_cameraFarClip);
memcpy(&_cameraEyeOffsetPosition, sourceBuffer, sizeof(_cameraEyeOffsetPosition));
sourceBuffer += sizeof(_cameraEyeOffsetPosition);
// the rest is a chat message
int chatMessageSize = *sourceBuffer++;

View file

@ -73,21 +73,22 @@ public:
void setBodyYaw(float bodyYaw) { _bodyYaw = bodyYaw; }
float getBodyPitch() const { return _bodyPitch; }
void setBodyPitch(float bodyPitch) { _bodyPitch = bodyPitch; }
float getBodyRoll() const {return _bodyRoll; }
float getBodyRoll() const { return _bodyRoll; }
void setBodyRoll(float bodyRoll) { _bodyRoll = bodyRoll; }
// Hand State
void setHandState(char s) { _handState = s; };
char getHandState() const {return _handState; };
void setHandState(char s) { _handState = s; }
char getHandState() const { return _handState; }
// getters for camera details
const glm::vec3& getCameraPosition() const { return _cameraPosition; };
const glm::vec3& getCameraPosition() const { return _cameraPosition; }
const glm::quat& getCameraOrientation() const { return _cameraOrientation; }
float getCameraFov() const { return _cameraFov; }
float getCameraAspectRatio() const { return _cameraAspectRatio; }
float getCameraNearClip() const { return _cameraNearClip; }
float getCameraFarClip() const { return _cameraFarClip; }
const glm::vec3& getCameraEyeOffsetPosition() const { return _cameraEyeOffsetPosition; }
glm::vec3 calculateCameraDirection() const;
@ -98,6 +99,7 @@ public:
void setCameraAspectRatio(float aspectRatio) { _cameraAspectRatio = aspectRatio; }
void setCameraNearClip(float nearClip) { _cameraNearClip = nearClip; }
void setCameraFarClip(float farClip) { _cameraFarClip = farClip; }
void setCameraEyeOffsetPosition(const glm::vec3& eyeOffsetPosition) { _cameraEyeOffsetPosition = eyeOffsetPosition; }
// key state
void setKeyState(KeyState s) { _keyState = s; }
@ -151,6 +153,7 @@ protected:
float _cameraAspectRatio;
float _cameraNearClip;
float _cameraFarClip;
glm::vec3 _cameraEyeOffsetPosition;
// key state
KeyState _keyState;

View file

@ -32,12 +32,12 @@ public:
NetworkPacket(sockaddr& address, unsigned char* packetData, ssize_t packetLength);
sockaddr& getAddress() { return _address; };
ssize_t getLength() const { return _packetLength; };
unsigned char* getData() { return &_packetData[0]; };
sockaddr& getAddress() { return _address; }
ssize_t getLength() const { return _packetLength; }
unsigned char* getData() { return &_packetData[0]; }
const sockaddr& getAddress() const { return _address; };
const unsigned char* getData() const { return &_packetData[0]; };
const sockaddr& getAddress() const { return _address; }
const unsigned char* getData() const { return &_packetData[0]; }
private:
void copyContents(const sockaddr& address, const unsigned char* packetData, ssize_t packetLength);

View file

@ -58,15 +58,15 @@ public:
NodeData* getLinkedData() const { return _linkedData; }
void setLinkedData(NodeData* linkedData) { _linkedData = linkedData; }
bool isAlive() const { return _isAlive; };
void setAlive(bool isAlive) { _isAlive = isAlive; };
bool isAlive() const { return _isAlive; }
void setAlive(bool isAlive) { _isAlive = isAlive; }
void recordBytesReceived(int bytesReceived);
float getAverageKilobitsPerSecond();
float getAveragePacketsPerSecond();
int getPingMs() const { return _pingMs; };
void setPingMs(int pingMs) { _pingMs = pingMs; };
int getPingMs() const { return _pingMs; }
void setPingMs(int pingMs) { _pingMs = pingMs; }
void lock() { pthread_mutex_lock(&_mutex); }
void unlock() { pthread_mutex_unlock(&_mutex); }

View file

@ -69,7 +69,7 @@ public:
NODE_TYPE getOwnerType() const { return _ownerType; }
void setOwnerType(NODE_TYPE ownerType) { _ownerType = ownerType; }
const char* getDomainHostname() const { return _domainHostname; };
const char* getDomainHostname() const { return _domainHostname; }
void setDomainHostname(const char* domainHostname);
void setDomainIP(const char* domainIP);
@ -83,7 +83,7 @@ public:
UDPSocket* getNodeSocket() { return &_nodeSocket; }
unsigned short int getSocketListenPort() const { return _nodeSocket.getListeningPort(); };
unsigned short int getSocketListenPort() const { return _nodeSocket.getListeningPort(); }
void(*linkedDataCreateCallback)(Node *);
@ -161,9 +161,8 @@ private:
class NodeListIterator : public std::iterator<std::input_iterator_tag, Node> {
public:
NodeListIterator(const NodeList* nodeList, int nodeIndex);
~NodeListIterator() {};
int getNodeIndex() { return _nodeIndex; };
int getNodeIndex() { return _nodeIndex; }
NodeListIterator& operator=(const NodeListIterator& otherValue);

View file

@ -20,7 +20,7 @@ PACKET_VERSION versionForPacketType(PACKET_TYPE type) {
return 1;
case PACKET_TYPE_HEAD_DATA:
return 4;
return 5;
case PACKET_TYPE_AVATAR_FACE_VIDEO:
return 1;

View file

@ -35,9 +35,9 @@ private:
public:
std::string group;
PerfStatHistory(): count(0), totalTime(0.0) {};
PerfStatHistory(): count(0), totalTime(0.0) {}
PerfStatHistory(std::string myGroup, double initialTime, long int initialCount) :
count(initialCount), totalTime(initialTime), group(myGroup) {};
count(initialCount), totalTime(initialTime), group(myGroup) {}
void recordTime(double thisTime) {
totalTime+=thisTime;
@ -94,7 +94,7 @@ public:
_start(usecTimestampNow()),
_message(message),
_renderWarningsOn(renderWarnings),
_alwaysDisplay(alwaysDisplay) { };
_alwaysDisplay(alwaysDisplay) { }
~PerformanceWarning();
};

View file

@ -1,42 +0,0 @@
//
// PointerStack.cpp
// hifi
//
// Created by Brad Hefta-Gaub on 5/11/2013
// Copyright (c) 2013 High Fidelity, Inc. All rights reserved.
//
#include "PointerStack.h"
#include <stdio.h>
PointerStack::~PointerStack() {
deleteAll();
}
void PointerStack::deleteAll() {
if (_elements) {
delete[] _elements;
}
_elements = NULL;
_elementsInUse = 0;
_sizeOfElementsArray = 0;
}
const int GROW_BY = 100;
void PointerStack::growAndPush(void* element) {
//printf("PointerStack::growAndPush() _sizeOfElementsArray=%d",_sizeOfElementsArray);
void** oldElements = _elements;
_elements = new void* [_sizeOfElementsArray + GROW_BY];
_sizeOfElementsArray += GROW_BY;
// If we had an old stack...
if (oldElements) {
// copy old elements into the new stack
memcpy(_elements, oldElements, _elementsInUse * sizeof(void*));
delete[] oldElements;
}
_elements[_elementsInUse] = element;
_elementsInUse++;
}

View file

@ -1,59 +0,0 @@
//
// PointerStack.h
// hifi
//
// Created by Brad Hefta-Gaub on 4/25/2013
// Copyright (c) 2013 High Fidelity, Inc. All rights reserved.
//
//
#ifndef __hifi__PointerStack__
#define __hifi__PointerStack__
#include <cstring> // for NULL
class PointerStack {
public:
PointerStack() :
_elements(NULL),
_elementsInUse(0),
_sizeOfElementsArray(0) {};
~PointerStack();
void push(void* element) {
if (_sizeOfElementsArray < _elementsInUse + 1) {
return growAndPush(element);
}
_elements[_elementsInUse] = element;
_elementsInUse++;
};
void* pop() {
if (_elementsInUse) {
// get the last element
void* element = _elements[_elementsInUse - 1];
// reduce the count
_elementsInUse--;
return element;
}
return NULL;
};
void* top() const { return (_elementsInUse) ? _elements[_elementsInUse - 1] : NULL; }
bool isEmpty() const { return (_elementsInUse == 0); };
bool empty() const { return (_elementsInUse == 0); };
int count() const { return _elementsInUse; };
int size() const { return _elementsInUse; };
private:
void growAndPush(void* element);
void deleteAll();
void** _elements;
int _elementsInUse;
int _sizeOfElementsArray;
};
#endif /* defined(__hifi__PointerStack__) */

View file

@ -106,7 +106,7 @@ int removeFromSortedArrays(void* value, void** valueArray, float* keyArray, int*
// Helper Class for debugging
class debug {
public:
static const char* valueOf(bool checkValue) { return checkValue ? "yes" : "no"; };
static const char* valueOf(bool checkValue) { return checkValue ? "yes" : "no"; }
};
#endif /* defined(__hifi__SharedUtil__) */

View file

@ -16,7 +16,7 @@ class StDev {
void addValue(float v);
float getAverage();
float getStDev();
int getSamples() {return sampleCount;};
int getSamples() const { return sampleCount; }
private:
float * data;
int sampleCount;

View file

@ -108,6 +108,13 @@ TagList::TagList(std::stringstream &ss) :
}
}
TagList::~TagList() {
while (!_data.empty()) {
delete _data.back();
_data.pop_back();
}
}
TagCompound::TagCompound(std::stringstream &ss) :
Tag(TAG_Compound, ss),
_size(0),
@ -145,6 +152,13 @@ TagCompound::TagCompound(std::stringstream &ss) :
}
}
TagCompound::~TagCompound() {
while (!_data.empty()) {
delete _data.back();
_data.pop_back();
}
}
TagIntArray::TagIntArray(std::stringstream &ss) : Tag(TAG_Int_Array, ss) {
_size = ss.get() << 24 | ss.get() << 16 | ss.get() << 8 | ss.get();

View file

@ -124,6 +124,7 @@ private:
class TagList : public Tag {
public:
TagList(std::stringstream &ss);
~TagList();
int getTagId() const {return _tagId;}
int getSize () const {return _size; }
@ -138,6 +139,7 @@ private:
class TagCompound : public Tag {
public:
TagCompound(std::stringstream &ss);
~TagCompound();
int getSize () const {return _size; }
std::list<Tag*> getData () const {return _data; }

View file

@ -34,8 +34,8 @@ public:
VoxelNode(unsigned char * octalCode); // regular constructor
~VoxelNode();
unsigned char* getOctalCode() const { return _octalCode; };
VoxelNode* getChildAtIndex(int childIndex) const { return _children[childIndex]; };
unsigned char* getOctalCode() const { return _octalCode; }
VoxelNode* getChildAtIndex(int childIndex) const { return _children[childIndex]; }
void deleteChildAtIndex(int childIndex);
VoxelNode* removeChildAtIndex(int childIndex);
VoxelNode* addChildAtIndex(int childIndex);
@ -45,15 +45,15 @@ public:
void setRandomColor(int minimumBrightness);
bool collapseIdenticalLeaves();
const AABox& getAABox() const { return _box; };
const glm::vec3& getCenter() const { return _box.getCenter(); };
const glm::vec3& getCorner() const { return _box.getCorner(); };
float getScale() const { return _box.getSize().x; /* voxelScale = (1 / powf(2, *node->getOctalCode())); */ };
int getLevel() const { return *_octalCode + 1; /* one based or zero based? this doesn't correctly handle 2 byte case */ };
const AABox& getAABox() const { return _box; }
const glm::vec3& getCenter() const { return _box.getCenter(); }
const glm::vec3& getCorner() const { return _box.getCorner(); }
float getScale() const { return _box.getSize().x; } // voxelScale = (1 / powf(2, *node->getOctalCode())); }
int getLevel() const { return *_octalCode + 1; } // one based or zero based? this doesn't correctly handle 2 byte case
float getEnclosingRadius() const;
bool isColored() const { return (_trueColor[3]==1); };
bool isColored() const { return _trueColor[3] == 1; }
bool isInView(const ViewFrustum& viewFrustum) const;
ViewFrustum::location inFrustum(const ViewFrustum& viewFrustum) const;
float distanceToCamera(const ViewFrustum& viewFrustum) const;
@ -68,18 +68,18 @@ public:
bool isLeaf() const { return _childCount == 0; }
int getChildCount() const { return _childCount; }
void printDebugDetails(const char* label) const;
bool isDirty() const { return _isDirty; };
void clearDirtyBit() { _isDirty = false; };
bool hasChangedSince(uint64_t time) const { return (_lastChanged > time); };
void markWithChangedTime() { _lastChanged = usecTimestampNow(); };
uint64_t getLastChanged() const { return _lastChanged; };
bool isDirty() const { return _isDirty; }
void clearDirtyBit() { _isDirty = false; }
bool hasChangedSince(uint64_t time) const { return (_lastChanged > time); }
void markWithChangedTime() { _lastChanged = usecTimestampNow(); }
uint64_t getLastChanged() const { return _lastChanged; }
void handleSubtreeChanged(VoxelTree* myTree);
glBufferIndex getBufferIndex() const { return _glBufferIndex; };
bool isKnownBufferIndex() const { return (_glBufferIndex != GLBUFFER_INDEX_UNKNOWN); };
void setBufferIndex(glBufferIndex index) { _glBufferIndex = index; };
VoxelSystem* getVoxelSystem() const { return _voxelSystem; };
void setVoxelSystem(VoxelSystem* voxelSystem) { _voxelSystem = voxelSystem; };
glBufferIndex getBufferIndex() const { return _glBufferIndex; }
bool isKnownBufferIndex() const { return (_glBufferIndex != GLBUFFER_INDEX_UNKNOWN); }
void setBufferIndex(glBufferIndex index) { _glBufferIndex = index; }
VoxelSystem* getVoxelSystem() const { return _voxelSystem; }
void setVoxelSystem(VoxelSystem* voxelSystem) { _voxelSystem = voxelSystem; }
// Used by VoxelSystem for rendering in/out of view and LOD
@ -89,10 +89,10 @@ public:
#ifndef NO_FALSE_COLOR // !NO_FALSE_COLOR means, does have false color
void setFalseColor(colorPart red, colorPart green, colorPart blue);
void setFalseColored(bool isFalseColored);
bool getFalseColored() { return _falseColored; };
bool getFalseColored() { return _falseColored; }
void setColor(const nodeColor& color);
const nodeColor& getTrueColor() const { return _trueColor; };
const nodeColor& getColor() const { return _currentColor; };
const nodeColor& getTrueColor() const { return _trueColor; }
const nodeColor& getColor() const { return _currentColor; }
#else
void setFalseColor(colorPart red, colorPart green, colorPart blue) { /* no op */ };
void setFalseColored(bool isFalseColored) { /* no op */ };
@ -103,18 +103,18 @@ public:
const nodeColor& getColor() const { return _trueColor; };
#endif
void setDensity(float density) { _density = density; };
float getDensity() const { return _density; };
void setSourceID(uint16_t sourceID) { _sourceID = sourceID; };
uint16_t getSourceID() const { return _sourceID; };
void setDensity(float density) { _density = density; }
float getDensity() const { return _density; }
void setSourceID(uint16_t sourceID) { _sourceID = sourceID; }
uint16_t getSourceID() const { return _sourceID; }
static void addDeleteHook(VoxelNodeDeleteHook* hook);
static void removeDeleteHook(VoxelNodeDeleteHook* hook);
void recalculateSubTreeNodeCount();
unsigned long getSubTreeNodeCount() const { return _subtreeNodeCount; };
unsigned long getSubTreeInternalNodeCount() const { return _subtreeNodeCount - _subtreeLeafNodeCount; };
unsigned long getSubTreeLeafNodeCount() const { return _subtreeLeafNodeCount; };
unsigned long getSubTreeNodeCount() const { return _subtreeNodeCount; }
unsigned long getSubTreeInternalNodeCount() const { return _subtreeNodeCount - _subtreeLeafNodeCount; }
unsigned long getSubTreeLeafNodeCount() const { return _subtreeLeafNodeCount; }
private:
void calculateAABox();

View file

@ -27,8 +27,8 @@ public:
bool contains(VoxelNode* node); // is this node in the bag?
void remove(VoxelNode* node); // remove a specific item from the bag
bool isEmpty() const { return (_elementsInUse == 0); };
int count() const { return _elementsInUse; };
bool isEmpty() const { return (_elementsInUse == 0); }
int count() const { return _elementsInUse; }
void deleteAll();

View file

@ -19,19 +19,19 @@ class BoundingBox {
public:
enum { BOTTOM_LEFT, BOTTOM_RIGHT, TOP_RIGHT, TOP_LEFT, VERTEX_COUNT };
BoundingBox(glm::vec2 corner, glm::vec2 size) : corner(corner), size(size), _set(true) {};
BoundingBox() : _set(false) {};
BoundingBox(const glm::vec2 corner, const glm::vec2 size) : corner(corner), size(size), _set(true) {}
BoundingBox() : _set(false) {}
glm::vec2 corner;
glm::vec2 size;
bool contains(const BoundingBox& box) const;
bool contains(const glm::vec2& point) const;
bool pointInside(const glm::vec2& point) const { return contains(point); };
bool pointInside(const glm::vec2& point) const { return contains(point); }
void explandToInclude(const BoundingBox& box);
float area() const { return size.x * size.y; };
float area() const { return size.x * size.y; }
int getVertexCount() const { return VERTEX_COUNT; };
int getVertexCount() const { return VERTEX_COUNT; }
glm::vec2 getVertex(int vertexNumber) const;
BoundingBox topHalf() const;
@ -66,23 +66,23 @@ public:
_vertexCount(vertexCount),
_maxX(-FLT_MAX), _maxY(-FLT_MAX), _minX(FLT_MAX), _minY(FLT_MAX),
_distance(0)
{ };
{ }
~VoxelProjectedPolygon() { };
const ProjectedVertices& getVertices() const { return _vertices; };
const glm::vec2& getVertex(int i) const { return _vertices[i]; };
~VoxelProjectedPolygon() { }
const ProjectedVertices& getVertices() const { return _vertices; }
const glm::vec2& getVertex(int i) const { return _vertices[i]; }
void setVertex(int vertex, const glm::vec2& point);
int getVertexCount() const { return _vertexCount; };
void setVertexCount(int vertexCount) { _vertexCount = vertexCount; };
float getDistance() const { return _distance; }
void setDistance(float distance) { _distance = distance; }
bool getAnyInView() const { return _anyInView; };
void setAnyInView(bool anyInView) { _anyInView = anyInView; };
bool getAllInView() const { return _allInView; };
void setAllInView(bool allInView) { _allInView = allInView; };
void setProjectionType(unsigned char type) { _projectionType = type; };
unsigned char getProjectionType() const { return _projectionType; };
int getVertexCount() const { return _vertexCount; }
float getDistance() const { return _distance; }
bool getAnyInView() const { return _anyInView; }
bool getAllInView() const { return _allInView; }
unsigned char getProjectionType() const { return _projectionType; }
void setVertexCount(int vertexCount) { _vertexCount = vertexCount; }
void setDistance(float distance) { _distance = distance; }
void setAnyInView(bool anyInView) { _anyInView = anyInView; }
void setAllInView(bool allInView) { _allInView = allInView; }
void setProjectionType(unsigned char type) { _projectionType = type; }
bool pointInside(const glm::vec2& point, bool* matchesVertex = NULL) const;

View file

@ -120,7 +120,7 @@ public:
/// Returns details about items tracked by VoxelSceneStats
/// \param Item item The item from the stats you're interested in.
ItemInfo& getItemInfo(Item item) { return _ITEMS[item]; };
ItemInfo& getItemInfo(Item item) { return _ITEMS[item]; }
/// Returns a UI formatted value of an item tracked by VoxelSceneStats
/// \param Item item The item from the stats you're interested in.

View file

@ -70,61 +70,6 @@ VoxelTree::~VoxelTree() {
pthread_mutex_destroy(&_deletePendingSetLock);
}
void VoxelTree::recurseTreeWithOperationDistanceSortedTimed(PointerStack* stackOfNodes, long allowedTime,
RecurseVoxelTreeOperation operation,
const glm::vec3& point, void* extraData) {
long long start = usecTimestampNow();
// start case, stack empty, so start with root...
if (stackOfNodes->empty()) {
stackOfNodes->push(rootNode);
}
while (!stackOfNodes->empty()) {
VoxelNode* node = (VoxelNode*)stackOfNodes->top();
stackOfNodes->pop();
if (operation(node, extraData)) {
//sortChildren... CLOSEST to FURTHEST
// determine the distance sorted order of our children
VoxelNode* sortedChildren[NUMBER_OF_CHILDREN];
float distancesToChildren[NUMBER_OF_CHILDREN];
int indexOfChildren[NUMBER_OF_CHILDREN]; // not really needed
int currentCount = 0;
for (int i = 0; i < NUMBER_OF_CHILDREN; i++) {
VoxelNode* childNode = node->getChildAtIndex(i);
if (childNode) {
// chance to optimize, doesn't need to be actual distance!! Could be distance squared
float distanceSquared = childNode->distanceSquareToPoint(point);
currentCount = insertIntoSortedArrays((void*)childNode, distanceSquared, i,
(void**)&sortedChildren, (float*)&distancesToChildren,
(int*)&indexOfChildren, currentCount, NUMBER_OF_CHILDREN);
}
}
//iterate sorted children FURTHEST to CLOSEST
for (int i = currentCount-1; i >= 0; i--) {
VoxelNode* child = sortedChildren[i];
stackOfNodes->push(child);
}
}
// at this point, we can check to see if we should bail for timing reasons
// because if we bail at this point, then reenter the while, we will basically
// be back to processing the stack from same place we left off, and all can proceed normally
long long now = usecTimestampNow();
long elapsedTime = now - start;
if (elapsedTime > allowedTime) {
return; // caller responsible for calling us again to finish the job!
}
}
}
// Recurses voxel tree calling the RecurseVoxelTreeOperation function for each node.
// stops recursion if operation function returns false.
void VoxelTree::recurseTreeWithOperation(RecurseVoxelTreeOperation operation, void* extraData) {

View file

@ -10,7 +10,6 @@
#define __hifi__VoxelTree__
#include <set>
#include <PointerStack.h>
#include <SimpleMovingAverage.h>
#include "CoverageMap.h"
@ -158,10 +157,10 @@ public:
int encodeTreeBitstream(VoxelNode* node, unsigned char* outputBuffer, int availableBytes, VoxelNodeBag& bag,
EncodeBitstreamParams& params) ;
bool isDirty() const { return _isDirty; };
void clearDirtyBit() { _isDirty = false; };
void setDirtyBit() { _isDirty = true; };
unsigned long int getNodesChangedFromBitstream() const { return _nodesChangedFromBitstream; };
bool isDirty() const { return _isDirty; }
void clearDirtyBit() { _isDirty = false; }
void setDirtyBit() { _isDirty = true; }
unsigned long int getNodesChangedFromBitstream() const { return _nodesChangedFromBitstream; }
bool findRayIntersection(const glm::vec3& origin, const glm::vec3& direction,
VoxelNode*& node, float& distance, BoxFace& face);
@ -189,12 +188,6 @@ public:
void recurseNodeWithOperation(VoxelNode* node, RecurseVoxelTreeOperation operation, void* extraData);
void recurseNodeWithOperationDistanceSorted(VoxelNode* node, RecurseVoxelTreeOperation operation,
const glm::vec3& point, void* extraData);
void recurseTreeWithOperationDistanceSortedTimed(PointerStack* stackOfNodes, long allowedTime,
RecurseVoxelTreeOperation operation,
const glm::vec3& point, void* extraData);
signals:
void importSize(float x, float y, float z);
void importProgress(int progress);

View file

@ -73,6 +73,7 @@ bool VoxelNodeData::updateCurrentViewFrustum() {
newestViewFrustum.setAspectRatio(getCameraAspectRatio());
newestViewFrustum.setNearClip(getCameraNearClip());
newestViewFrustum.setFarClip(getCameraFarClip());
newestViewFrustum.setEyeOffsetPosition(getCameraEyeOffsetPosition());
// if there has been a change, then recalculate
if (!newestViewFrustum.matches(_currentViewFrustum)) {