fixing bad merge

This commit is contained in:
stojce 2014-01-17 20:01:49 +01:00
parent 36d21e812d
commit 4c77d9f1d2
24 changed files with 0 additions and 1916 deletions

View file

@ -1,50 +0,0 @@
# Find the OpenCV library
#
# You must provide an OPENCV_ROOT_DIR which contains lib and include directories
#
# Once done this will define
#
# OPENCV_FOUND - system found OpenCV
# OPENCV_INCLUDE_DIRS - the OpenCV include directory
# OPENCV_LIBRARIES - Link this to use OpenCV
#
# Created on 6/13/2013 by Andrzej Kapolka
# Copyright (c) 2013 High Fidelity
#
if (OPENCV_LIBRARIES AND OPENCV_INCLUDE_DIRS)
# in cache already
set(OPENCV_FOUND TRUE)
else (OPENCV_LIBRARIES AND OPENCV_INCLUDE_DIRS)
find_path(OPENCV_INCLUDE_DIRS opencv2/opencv.hpp ${OPENCV_ROOT_DIR}/include)
foreach (MODULE core flann imgproc photo video features2d objdetect calib3d ml highgui contrib)
if (APPLE)
find_library(OPENCV_LIBRARY_${MODULE} libopencv_${MODULE}.a ${OPENCV_ROOT_DIR}/lib/MacOS/)
elseif (UNIX)
find_library(OPENCV_LIBRARY_${MODULE} libopencv_${MODULE}.a ${OPENCV_ROOT_DIR}/lib/UNIX/)
elseif (WIN32)
find_library(OPENCV_LIBRARY_${MODULE} opencv_${MODULE}.lib ${OPENCV_ROOT_DIR}/lib/Win32/)
endif ()
set(MODULE_LIBRARIES ${OPENCV_LIBRARY_${MODULE}} ${MODULE_LIBRARIES})
endforeach (MODULE)
set(OPENCV_LIBRARIES ${MODULE_LIBRARIES} CACHE STRING "OpenCV library paths")
if (OPENCV_INCLUDE_DIRS AND OPENCV_LIBRARIES)
set(OPENCV_FOUND TRUE)
endif (OPENCV_INCLUDE_DIRS AND OPENCV_LIBRARIES)
if (OPENCV_FOUND)
if (NOT OPENCV_FIND_QUIETLY)
message(STATUS "Found OpenCV: ${OPENCV_LIBRARIES}")
endif (NOT OPENCV_FIND_QUIETLY)
else (OPENCV_FOUND)
if (OPENCV_FIND_REQUIRED)
message(FATAL_ERROR "Could not find OpenCV")
endif (OPENCV_FIND_REQUIRED)
endif (OPENCV_FOUND)
# show the OPENCV_INCLUDE_DIRS and OPENCV_LIBRARIES variables only in the advanced view
mark_as_advanced(OPENCV_INCLUDE_DIRS OPENCV_LIBRARIES)
endif (OPENCV_LIBRARIES AND OPENCV_INCLUDE_DIRS)

Binary file not shown.

Binary file not shown.

View file

@ -1,501 +0,0 @@
//
// VideoFace.cpp
// interface
//
// Created by Andrzej Kapolka on 7/11/13.
// Copyright (c) 2013 High Fidelity, Inc. All rights reserved.
//
#include <glm/gtx/quaternion.hpp>
#ifdef HAVE_LIBVPX
#include <vpx_decoder.h>
#include <vp8dx.h>
#endif
#include <PacketHeaders.h>
#include "Application.h"
#include "Avatar.h"
#include "Head.h"
#include "VideoFace.h"
#include "renderer/ProgramObject.h"
using namespace cv;
bool VideoFace::_initialized = false;
ProgramObject VideoFace::_videoProgram;
VideoFace::Locations VideoFace::_videoProgramLocations;
ProgramObject VideoFace::_texturedProgram;
VideoFace::Locations VideoFace::_texturedProgramLocations;
GLuint VideoFace::_vboID;
GLuint VideoFace::_iboID;
VideoFace::VideoFace(Head* owningHead) : _owningHead(owningHead), _renderMode(MESH),
_colorTextureID(0), _depthTextureID(0),
#ifdef HAVE_LIBVPX
_colorCodec(), _depthCodec(),
#endif
_frameCount(0) {
// we may have been created in the network thread, but we live in the main thread
moveToThread(Application::getInstance()->thread());
}
VideoFace::~VideoFace() {
#ifdef HAVE_LIBVPX
if (_colorCodec.name != 0) {
vpx_codec_destroy(&_colorCodec);
// delete our texture, since we know that we own it
if (_colorTextureID != 0) {
glDeleteTextures(1, &_colorTextureID);
}
}
if (_depthCodec.name != 0) {
vpx_codec_destroy(&_depthCodec);
// delete our texture, since we know that we own it
if (_depthTextureID != 0) {
glDeleteTextures(1, &_depthTextureID);
}
}
#endif
}
void VideoFace::setFrameFromWebcam() {
Webcam* webcam = Application::getInstance()->getWebcam();
if (webcam->isSending()) {
_colorTextureID = webcam->getColorTextureID();
_depthTextureID = webcam->getDepthTextureID();
_textureSize = webcam->getTextureSize();
_textureRect = webcam->getFaceRect();
_aspectRatio = webcam->getAspectRatio();
} else {
clearFrame();
}
}
void VideoFace::clearFrame() {
_colorTextureID = 0;
_depthTextureID = 0;
}
int VideoFace::processVideoMessage(unsigned char* packetData, size_t dataBytes) {
#ifdef HAVE_LIBVPX
unsigned char* packetPosition = packetData;
int frameCount = *(uint32_t*)packetPosition;
packetPosition += sizeof(uint32_t);
int frameSize = *(uint32_t*)packetPosition;
packetPosition += sizeof(uint32_t);
int frameOffset = *(uint32_t*)packetPosition;
packetPosition += sizeof(uint32_t);
if (frameCount < _frameCount) { // old frame; ignore
return dataBytes;
} else if (frameCount > _frameCount) { // new frame; reset
_frameCount = frameCount;
_frameBytesRemaining = frameSize;
_arrivingFrame.resize(frameSize);
}
int payloadSize = dataBytes - (packetPosition - packetData);
memcpy(_arrivingFrame.data() + frameOffset, packetPosition, payloadSize);
if ((_frameBytesRemaining -= payloadSize) > 0) {
return dataBytes; // wait for the rest of the frame
}
if (frameSize == 0) {
// destroy the codecs, if we have any
destroyCodecs();
// disables video data
QMetaObject::invokeMethod(this, "setFrame", Q_ARG(cv::Mat, Mat()),
Q_ARG(cv::Mat, Mat()), Q_ARG(float, 0.0f));
return dataBytes;
}
// the switch between full frame or depth only modes requires us to reinit the codecs
float aspectRatio = *(const float*)_arrivingFrame.constData();
size_t colorSize = *(const size_t*)(_arrivingFrame.constData() + sizeof(float));
bool fullFrame = (aspectRatio == FULL_FRAME_ASPECT);
bool depthOnly = (colorSize == 0);
if (fullFrame != _lastFullFrame || depthOnly != _lastDepthOnly) {
destroyCodecs();
_lastFullFrame = fullFrame;
_lastDepthOnly = depthOnly;
}
// read the color data, if non-empty
Mat color;
const uint8_t* colorData = (const uint8_t*)(_arrivingFrame.constData() + sizeof(float) + sizeof(size_t));
if (colorSize > 0) {
if (_colorCodec.name == 0) {
// initialize decoder context
vpx_codec_dec_init(&_colorCodec, vpx_codec_vp8_dx(), 0, 0);
}
vpx_codec_decode(&_colorCodec, colorData, colorSize, 0, 0);
vpx_codec_iter_t iterator = 0;
vpx_image_t* image;
while ((image = vpx_codec_get_frame(&_colorCodec, &iterator)) != 0) {
// convert from YV12 to RGB: see http://www.fourcc.org/yuv.php and
// http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html#cvtcolor
color.create(image->d_h, image->d_w, CV_8UC3);
uchar* yline = image->planes[0];
uchar* vline = image->planes[1];
uchar* uline = image->planes[2];
const int RED_V_WEIGHT = (int)(1.403 * 256);
const int GREEN_V_WEIGHT = (int)(0.714 * 256);
const int GREEN_U_WEIGHT = (int)(0.344 * 256);
const int BLUE_U_WEIGHT = (int)(1.773 * 256);
for (int i = 0; i < image->d_h; i += 2) {
uchar* ysrc = yline;
uchar* vsrc = vline;
uchar* usrc = uline;
for (int j = 0; j < image->d_w; j += 2) {
uchar* tl = color.ptr(i, j);
uchar* tr = color.ptr(i, j + 1);
uchar* bl = color.ptr(i + 1, j);
uchar* br = color.ptr(i + 1, j + 1);
int v = *vsrc++ - 128;
int u = *usrc++ - 128;
int redOffset = (RED_V_WEIGHT * v) >> 8;
int greenOffset = (GREEN_V_WEIGHT * v + GREEN_U_WEIGHT * u) >> 8;
int blueOffset = (BLUE_U_WEIGHT * u) >> 8;
int ytl = ysrc[0];
int ytr = ysrc[1];
int ybl = ysrc[image->w];
int ybr = ysrc[image->w + 1];
ysrc += 2;
tl[0] = saturate_cast<uchar>(ytl + redOffset);
tl[1] = saturate_cast<uchar>(ytl - greenOffset);
tl[2] = saturate_cast<uchar>(ytl + blueOffset);
tr[0] = saturate_cast<uchar>(ytr + redOffset);
tr[1] = saturate_cast<uchar>(ytr - greenOffset);
tr[2] = saturate_cast<uchar>(ytr + blueOffset);
bl[0] = saturate_cast<uchar>(ybl + redOffset);
bl[1] = saturate_cast<uchar>(ybl - greenOffset);
bl[2] = saturate_cast<uchar>(ybl + blueOffset);
br[0] = saturate_cast<uchar>(ybr + redOffset);
br[1] = saturate_cast<uchar>(ybr - greenOffset);
br[2] = saturate_cast<uchar>(ybr + blueOffset);
}
yline += image->stride[0] * 2;
vline += image->stride[1];
uline += image->stride[2];
}
}
} else if (_colorCodec.name != 0) {
vpx_codec_destroy(&_colorCodec);
_colorCodec.name = 0;
}
// read the depth data, if non-empty
Mat depth;
const uint8_t* depthData = colorData + colorSize;
int depthSize = _arrivingFrame.size() - ((const char*)depthData - _arrivingFrame.constData());
if (depthSize > 0) {
if (_depthCodec.name == 0) {
// initialize decoder context
vpx_codec_dec_init(&_depthCodec, vpx_codec_vp8_dx(), 0, 0);
}
vpx_codec_decode(&_depthCodec, depthData, depthSize, 0, 0);
vpx_codec_iter_t iterator = 0;
vpx_image_t* image;
while ((image = vpx_codec_get_frame(&_depthCodec, &iterator)) != 0) {
depth.create(image->d_h, image->d_w, CV_8UC1);
uchar* yline = image->planes[0];
uchar* vline = image->planes[1];
const uchar EIGHT_BIT_MAXIMUM = 255;
const uchar MASK_THRESHOLD = 192;
for (int i = 0; i < image->d_h; i += 2) {
uchar* ysrc = yline;
uchar* vsrc = vline;
for (int j = 0; j < image->d_w; j += 2) {
if (*vsrc++ < MASK_THRESHOLD) {
*depth.ptr(i, j) = EIGHT_BIT_MAXIMUM;
*depth.ptr(i, j + 1) = EIGHT_BIT_MAXIMUM;
*depth.ptr(i + 1, j) = EIGHT_BIT_MAXIMUM;
*depth.ptr(i + 1, j + 1) = EIGHT_BIT_MAXIMUM;
} else {
*depth.ptr(i, j) = ysrc[0];
*depth.ptr(i, j + 1) = ysrc[1];
*depth.ptr(i + 1, j) = ysrc[image->stride[0]];
*depth.ptr(i + 1, j + 1) = ysrc[image->stride[0] + 1];
}
ysrc += 2;
}
yline += image->stride[0] * 2;
vline += image->stride[1];
}
}
} else if (_depthCodec.name != 0) {
vpx_codec_destroy(&_depthCodec);
_depthCodec.name = 0;
}
QMetaObject::invokeMethod(this, "setFrame", Q_ARG(cv::Mat, color),
Q_ARG(cv::Mat, depth), Q_ARG(float, aspectRatio));
#endif
return dataBytes;
}
bool VideoFace::render(float alpha) {
#ifndef HAVE_LIBVPX
return false;
#else
if (!isActive()) {
return false;
}
glPushMatrix();
glTranslatef(_owningHead->getPosition().x, _owningHead->getPosition().y, _owningHead->getPosition().z);
glm::quat orientation = _owningHead->getOrientation();
glm::vec3 axis = glm::axis(orientation);
glRotatef(glm::angle(orientation), axis.x, axis.y, axis.z);
float aspect, xScale, zScale;
if (_aspectRatio == FULL_FRAME_ASPECT) {
aspect = _textureSize.width / _textureSize.height;
const float FULL_FRAME_SCALE = 0.5f;
xScale = FULL_FRAME_SCALE * _owningHead->getScale();
zScale = xScale * 0.3f;
glPushMatrix();
glTranslatef(0.0f, -0.2f, 0.0f);
glScalef(0.5f * xScale, xScale / aspect, zScale);
glColor4f(1.0f, 1.0f, 1.0f, alpha);
Application::getInstance()->getGeometryCache()->renderHalfCylinder(25, 20);
glPopMatrix();
} else {
aspect = _aspectRatio;
xScale = BODY_BALL_RADIUS_HEAD_BASE * _owningHead->getScale();
zScale = xScale * 1.5f;
glTranslatef(0.0f, -xScale * 0.75f, -xScale);
}
glScalef(xScale, xScale / aspect, zScale);
glColor4f(1.0f, 1.0f, 1.0f, alpha);
Point2f points[4];
_textureRect.points(points);
if (_depthTextureID != 0) {
const int VERTEX_WIDTH = 100;
const int VERTEX_HEIGHT = 100;
const int VERTEX_COUNT = VERTEX_WIDTH * VERTEX_HEIGHT;
const int ELEMENTS_PER_VERTEX = 2;
const int BUFFER_ELEMENTS = VERTEX_COUNT * ELEMENTS_PER_VERTEX;
const int QUAD_WIDTH = VERTEX_WIDTH - 1;
const int QUAD_HEIGHT = VERTEX_HEIGHT - 1;
const int QUAD_COUNT = QUAD_WIDTH * QUAD_HEIGHT;
const int TRIANGLES_PER_QUAD = 2;
const int INDICES_PER_TRIANGLE = 3;
const int INDEX_COUNT = QUAD_COUNT * TRIANGLES_PER_QUAD * INDICES_PER_TRIANGLE;
if (!_initialized) {
loadProgram(_videoProgram, QString(), "colorTexture", _videoProgramLocations);
loadProgram(_texturedProgram, "_textured", "permutationNormalTexture", _texturedProgramLocations);
glGenBuffers(1, &_vboID);
glBindBuffer(GL_ARRAY_BUFFER, _vboID);
float* vertices = new float[BUFFER_ELEMENTS];
float* vertexPosition = vertices;
for (int i = 0; i < VERTEX_HEIGHT; i++) {
for (int j = 0; j < VERTEX_WIDTH; j++) {
*vertexPosition++ = j / (float)(VERTEX_WIDTH - 1);
*vertexPosition++ = i / (float)(VERTEX_HEIGHT - 1);
}
}
glBufferData(GL_ARRAY_BUFFER, BUFFER_ELEMENTS * sizeof(float), vertices, GL_STATIC_DRAW);
delete[] vertices;
glGenBuffers(1, &_iboID);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _iboID);
int* indices = new int[INDEX_COUNT];
int* indexPosition = indices;
for (int i = 0; i < QUAD_HEIGHT; i++) {
for (int j = 0; j < QUAD_WIDTH; j++) {
*indexPosition++ = i * VERTEX_WIDTH + j;
*indexPosition++ = (i + 1) * VERTEX_WIDTH + j;
*indexPosition++ = i * VERTEX_WIDTH + j + 1;
*indexPosition++ = i * VERTEX_WIDTH + j + 1;
*indexPosition++ = (i + 1) * VERTEX_WIDTH + j;
*indexPosition++ = (i + 1) * VERTEX_WIDTH + j + 1;
}
}
glBufferData(GL_ELEMENT_ARRAY_BUFFER, INDEX_COUNT * sizeof(int), indices, GL_STATIC_DRAW);
delete[] indices;
_initialized = true;
} else {
glBindBuffer(GL_ARRAY_BUFFER, _vboID);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _iboID);
}
glBindTexture(GL_TEXTURE_2D, _depthTextureID);
glActiveTexture(GL_TEXTURE1);
ProgramObject* program = &_videoProgram;
Locations* locations = &_videoProgramLocations;
if (_colorTextureID != 0) {
glBindTexture(GL_TEXTURE_2D, _colorTextureID);
} else {
glBindTexture(GL_TEXTURE_2D, Application::getInstance()->getTextureCache()->getPermutationNormalTextureID());
program = &_texturedProgram;
locations = &_texturedProgramLocations;
}
program->bind();
program->setUniformValue(locations->texCoordCorner,
points[0].x / _textureSize.width, points[0].y / _textureSize.height);
program->setUniformValue(locations->texCoordRight,
(points[3].x - points[0].x) / _textureSize.width, (points[3].y - points[0].y) / _textureSize.height);
program->setUniformValue(locations->texCoordUp,
(points[1].x - points[0].x) / _textureSize.width, (points[1].y - points[0].y) / _textureSize.height);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(2, GL_FLOAT, 0, 0);
glEnable(GL_ALPHA_TEST);
glAlphaFunc(GL_EQUAL, 1.0f);
if (_renderMode == MESH) {
glDrawRangeElementsEXT(GL_TRIANGLES, 0, VERTEX_COUNT - 1, INDEX_COUNT, GL_UNSIGNED_INT, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
} else { // _renderMode == POINTS
glPointSize(5.0f);
glDrawArrays(GL_POINTS, 0, VERTEX_COUNT);
glPointSize(1.0f);
}
glDisable(GL_ALPHA_TEST);
glDisableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, 0);
program->release();
glBindTexture(GL_TEXTURE_2D, 0);
glActiveTexture(GL_TEXTURE0);
} else {
glBindTexture(GL_TEXTURE_2D, _colorTextureID);
glEnable(GL_TEXTURE_2D);
glBegin(GL_QUADS);
glTexCoord2f(points[0].x / _textureSize.width, points[0].y / _textureSize.height);
glVertex3f(0.5f, -0.5f, 0.0f);
glTexCoord2f(points[1].x / _textureSize.width, points[1].y / _textureSize.height);
glVertex3f(0.5f, 0.5f, 0.0f);
glTexCoord2f(points[2].x / _textureSize.width, points[2].y / _textureSize.height);
glVertex3f(-0.5f, 0.5f, 0.0f);
glTexCoord2f(points[3].x / _textureSize.width, points[3].y / _textureSize.height);
glVertex3f(-0.5f, -0.5f, 0.0f);
glEnd();
glDisable(GL_TEXTURE_2D);
}
glBindTexture(GL_TEXTURE_2D, 0);
glPopMatrix();
return true;
#endif
}
void VideoFace::cycleRenderMode() {
_renderMode = (RenderMode)((_renderMode + 1) % RENDER_MODE_COUNT);
}
void VideoFace::setFrame(const cv::Mat& color, const cv::Mat& depth, float aspectRatio) {
Size2f textureSize = _textureSize;
if (!color.empty()) {
bool generate = (_colorTextureID == 0);
if (generate) {
glGenTextures(1, &_colorTextureID);
}
glBindTexture(GL_TEXTURE_2D, _colorTextureID);
if (_textureSize.width != color.cols || _textureSize.height != color.rows || generate) {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, color.cols, color.rows, 0, GL_RGB, GL_UNSIGNED_BYTE, color.ptr());
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
textureSize = color.size();
_textureRect = RotatedRect(Point2f(color.cols * 0.5f, color.rows * 0.5f), textureSize, 0.0f);
} else {
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, color.cols, color.rows, GL_RGB, GL_UNSIGNED_BYTE, color.ptr());
}
} else if (_colorTextureID != 0) {
glDeleteTextures(1, &_colorTextureID);
_colorTextureID = 0;
}
if (!depth.empty()) {
bool generate = (_depthTextureID == 0);
if (generate) {
glGenTextures(1, &_depthTextureID);
}
glBindTexture(GL_TEXTURE_2D, _depthTextureID);
if (_textureSize.width != depth.cols || _textureSize.height != depth.rows || generate) {
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, depth.cols, depth.rows, 0,
GL_LUMINANCE, GL_UNSIGNED_BYTE, depth.ptr());
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
textureSize = depth.size();
_textureRect = RotatedRect(Point2f(depth.cols * 0.5f, depth.rows * 0.5f), textureSize, 0.0f);
} else {
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, depth.cols, depth.rows, GL_LUMINANCE, GL_UNSIGNED_BYTE, depth.ptr());
}
} else if (_depthTextureID != 0) {
glDeleteTextures(1, &_depthTextureID);
_depthTextureID = 0;
}
glBindTexture(GL_TEXTURE_2D, 0);
_aspectRatio = aspectRatio;
_textureSize = textureSize;
}
void VideoFace::destroyCodecs() {
#ifdef HAVE_LIBVPX
if (_colorCodec.name != 0) {
vpx_codec_destroy(&_colorCodec);
_colorCodec.name = 0;
}
if (_depthCodec.name != 0) {
vpx_codec_destroy(&_depthCodec);
_depthCodec.name = 0;
}
#endif
}
void VideoFace::loadProgram(ProgramObject& program, const QString& suffix, const char* secondTextureUniform, Locations& locations) {
program.addShaderFromSourceFile(QGLShader::Vertex, "resources/shaders/face" + suffix + ".vert");
program.addShaderFromSourceFile(QGLShader::Fragment, "resources/shaders/face" + suffix + ".frag");
program.link();
program.bind();
program.setUniformValue("depthTexture", 0);
program.setUniformValue(secondTextureUniform, 1);
program.release();
locations.texCoordCorner = program.uniformLocation("texCoordCorner");
locations.texCoordRight = program.uniformLocation("texCoordRight");
locations.texCoordUp = program.uniformLocation("texCoordUp");
}

View file

@ -1,98 +0,0 @@
//
// VideoFace.h
// interface
//
// Created by Andrzej Kapolka on 7/11/13.
// Copyright (c) 2013 High Fidelity, Inc. All rights reserved.
//
#ifndef __interface__VideoFace__
#define __interface__VideoFace__
#include <QObject>
#include <opencv2/opencv.hpp>
#ifdef HAVE_LIBVPX
#include <vpx_codec.h>
#endif
#include "InterfaceConfig.h"
class Head;
class ProgramObject;
const float FULL_FRAME_ASPECT = 0.0f;
class VideoFace : public QObject {
Q_OBJECT
public:
VideoFace(Head* owningHead);
~VideoFace();
bool isActive() const { return _colorTextureID != 0 || _depthTextureID != 0; }
bool isFullFrame() const { return isActive() && _aspectRatio == FULL_FRAME_ASPECT; }
void setFrameFromWebcam();
void clearFrame();
int processVideoMessage(unsigned char* packetData, size_t dataBytes);
bool render(float alpha);
public slots:
void cycleRenderMode();
private slots:
void setFrame(const cv::Mat& color, const cv::Mat& depth, float aspectRatio);
private:
enum RenderMode { MESH, POINTS, RENDER_MODE_COUNT };
void destroyCodecs();
Head* _owningHead;
RenderMode _renderMode;
GLuint _colorTextureID;
GLuint _depthTextureID;
cv::Size2f _textureSize;
cv::RotatedRect _textureRect;
float _aspectRatio;
#ifdef HAVE_LIBVPX
vpx_codec_ctx_t _colorCodec;
vpx_codec_ctx_t _depthCodec;
#endif
bool _lastFullFrame;
bool _lastDepthOnly;
QByteArray _arrivingFrame;
int _frameCount;
int _frameBytesRemaining;
struct Locations {
int texCoordCorner;
int texCoordRight;
int texCoordUp;
};
static void loadProgram(ProgramObject& progam, const QString& suffix, const char* secondTextureUniform, Locations& locations);
static bool _initialized;
static ProgramObject _videoProgram;
static Locations _videoProgramLocations;
static ProgramObject _texturedProgram;
static Locations _texturedProgramLocations;
static GLuint _vboID;
static GLuint _iboID;
};
#endif /* defined(__interface__VideoFace__) */

File diff suppressed because it is too large Load diff

View file

@ -1,207 +0,0 @@
//
// Webcam.h
// interface
//
// Created by Andrzej Kapolka on 6/17/13.
// Copyright (c) 2013 High Fidelity, Inc. All rights reserved.
//
#ifndef __interface__Webcam__
#define __interface__Webcam__
#include <QMetaType>
#include <QThread>
#include <QVector>
#include <glm/glm.hpp>
#include <glm/gtc/quaternion.hpp>
#include <opencv2/opencv.hpp>
#if defined(HAVE_OPENNI) && !defined(Q_MOC_RUN)
#include <XnCppWrapper.h>
#endif
#ifdef HAVE_LIBVPX
#include <vpx_codec.h>
#endif
#include "InterfaceConfig.h"
class QImage;
struct CvCapture;
class FrameGrabber;
class Joint;
typedef QVector<Joint> JointVector;
typedef std::vector<cv::KeyPoint> KeyPointVector;
/// Handles interaction with the webcam (including depth cameras such as the Kinect).
class Webcam : public QObject {
Q_OBJECT
public:
Webcam();
~Webcam();
FrameGrabber* getGrabber() { return _grabber; }
bool isActive() const { return _active; }
bool isSending() const { return _sending; }
GLuint getColorTextureID() const { return _colorTextureID; }
GLuint getDepthTextureID() const { return _depthTextureID; }
const cv::Size2f& getTextureSize() const { return _textureSize; }
float getAspectRatio() const { return _aspectRatio; }
const cv::RotatedRect& getFaceRect() const { return _faceRect; }
const glm::vec3& getEstimatedPosition() const { return _estimatedPosition; }
const glm::vec3& getEstimatedRotation() const { return _estimatedRotation; }
const JointVector& getEstimatedJoints() const { return _estimatedJoints; }
void reset();
void renderPreview(int screenWidth, int screenHeight);
public slots:
void setEnabled(bool enabled);
void setFrame(const cv::Mat& color, int format, const cv::Mat& depth, float midFaceDepth, float aspectRatio,
const cv::RotatedRect& faceRect, bool sending, const JointVector& joints, const KeyPointVector& keyPoints);
void setSkeletonTrackingOn(bool toggle) { _skeletonTrackingOn = toggle; };
private:
QThread _grabberThread;
FrameGrabber* _grabber;
bool _enabled;
bool _active;
bool _sending;
GLuint _colorTextureID;
GLuint _depthTextureID;
cv::Size2f _textureSize;
float _aspectRatio;
cv::RotatedRect _faceRect;
cv::RotatedRect _initialFaceRect;
float _initialFaceDepth;
JointVector _joints;
KeyPointVector _keyPoints;
glm::quat _initialLEDRotation;
glm::vec3 _initialLEDPosition;
float _initialLEDScale;
uint64_t _startTimestamp;
int _frameCount;
uint64_t _lastFrameTimestamp;
glm::vec3 _estimatedPosition;
glm::vec3 _estimatedRotation;
JointVector _estimatedJoints;
bool _skeletonTrackingOn;
};
#ifdef HAVE_LIBVPX
/// Acquires and processes video frames in a dedicated thread.
class FrameGrabber : public QObject {
Q_OBJECT
public:
FrameGrabber();
virtual ~FrameGrabber();
public slots:
void cycleVideoSendMode();
void setDepthOnly(bool depthOnly);
void setLEDTrackingOn(bool ledTrackingOn);
void reset();
void shutdown();
void grabFrame();
private:
enum VideoSendMode { NO_VIDEO, FACE_VIDEO, FULL_FRAME_VIDEO, VIDEO_SEND_MODE_COUNT };
bool init();
void updateHSVFrame(const cv::Mat& frame, int format);
void destroyCodecs();
void configureCapture();
bool _initialized;
VideoSendMode _videoSendMode;
bool _depthOnly;
bool _ledTrackingOn;
CvCapture* _capture;
cv::CascadeClassifier _faceCascade;
cv::Mat _hsvFrame;
cv::Mat _mask;
cv::SparseMat _histogram;
cv::Mat _backProject;
cv::Rect _searchWindow;
cv::Mat _grayDepthFrame;
float _smoothedMidFaceDepth;
#ifdef HAVE_LIBVPX
vpx_codec_ctx_t _colorCodec;
vpx_codec_ctx_t _depthCodec;
#endif
int _frameCount;
cv::Mat _faceColor;
cv::Mat _faceDepth;
cv::Mat _smoothedFaceDepth;
QByteArray _encodedFace;
cv::RotatedRect _smoothedFaceRect;
cv::SimpleBlobDetector _blobDetector;
cv::Mat _grayFrame;
#ifdef HAVE_OPENNI
xn::Context _xnContext;
xn::DepthGenerator _depthGenerator;
xn::ImageGenerator _imageGenerator;
xn::UserGenerator _userGenerator;
xn::DepthMetaData _depthMetaData;
xn::ImageMetaData _imageMetaData;
XnUserID _userID;
#endif
};
#endif //def HAVE_LIBVPX
/// Contains the 3D transform and 2D projected position of a tracked joint.
class Joint {
public:
Joint(const glm::vec3& position, const glm::quat& rotation, const glm::vec3& projected);
Joint();
bool isValid;
glm::vec3 position;
glm::quat rotation;
glm::vec3 projected;
};
Q_DECLARE_METATYPE(JointVector)
Q_DECLARE_METATYPE(KeyPointVector)
#ifdef HAVE_LIBVPX
Q_DECLARE_METATYPE(cv::Mat)
Q_DECLARE_METATYPE(cv::RotatedRect)
#endif //def HAVE_LIBVPX
#endif /* defined(__interface__Webcam__) */