More work on full frame sending.

This commit is contained in:
Andrzej Kapolka 2013-08-01 13:00:01 -07:00
parent a86ceb2759
commit ba9b5888d2
6 changed files with 57 additions and 40 deletions

View file

@ -17,9 +17,6 @@ uniform vec2 texCoordRight;
// the texture coordinate vector from bottom to the top
uniform vec2 texCoordUp;
// the aspect ratio of the image
uniform float aspectRatio;
// the depth texture
uniform sampler2D depthTexture;
@ -31,6 +28,5 @@ void main(void) {
const float MIN_VISIBLE_DEPTH = 1.0 / 255.0;
const float MAX_VISIBLE_DEPTH = 254.0 / 255.0;
gl_FrontColor = vec4(1.0, 1.0, 1.0, step(MIN_VISIBLE_DEPTH, depth) * (1.0 - step(MAX_VISIBLE_DEPTH, depth)));
gl_Position = gl_ModelViewProjectionMatrix * vec4(0.5 - gl_Vertex.x,
(gl_Vertex.y - 0.5) / aspectRatio, depth * 2.0 - 2.0, 1.0);
gl_Position = gl_ModelViewProjectionMatrix * vec4(0.5 - gl_Vertex.x, gl_Vertex.y - 0.5, depth - 0.5, 1.0);
}

View file

@ -19,6 +19,7 @@
#include "Application.h"
#include "Webcam.h"
#include "avatar/Face.h"
using namespace cv;
using namespace std;
@ -155,7 +156,7 @@ Webcam::~Webcam() {
const float METERS_PER_MM = 1.0f / 1000.0f;
void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float midFaceDepth,
const RotatedRect& faceRect, const JointVector& joints) {
float aspectRatio, const RotatedRect& faceRect, const JointVector& joints) {
IplImage colorImage = color;
glPixelStorei(GL_UNPACK_ROW_LENGTH, colorImage.widthStep / 3);
if (_colorTextureID == 0) {
@ -192,7 +193,8 @@ void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float midF
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
glBindTexture(GL_TEXTURE_2D, 0);
// store our face rect and joints, update our frame count for fps computation
// store our various data, update our frame count for fps computation
_aspectRatio = aspectRatio;
_faceRect = faceRect;
_joints = joints;
_frameCount++;
@ -538,6 +540,10 @@ void FrameGrabber::grabFrame() {
_smoothedFaceRect.angle = glm::mix(faceRect.angle, _smoothedFaceRect.angle, FACE_RECT_SMOOTHING);
}
// the aspect ratio is derived from the face rect dimensions unless we're full-frame
float aspectRatio = (_videoSendMode == FULL_FRAME_VIDEO) ? FULL_FRAME_ASPECT :
(_smoothedFaceRect.size.width / _smoothedFaceRect.size.height);
if (_videoSendMode != NO_VIDEO) {
if (_colorCodec.name == 0) {
// initialize encoder context(s)
@ -628,10 +634,9 @@ void FrameGrabber::grabFrame() {
// encode the frame
vpx_codec_encode(&_colorCodec, &vpxImage, ++_frameCount, 1, 0, VPX_DL_REALTIME);
// start the payload off with the aspect ratio (zero for no face)
// start the payload off with the aspect ratio (zero for full frame)
QByteArray payload(sizeof(float), 0);
*(float*)payload.data() = (_videoSendMode == FACE_VIDEO) ?
(_smoothedFaceRect.size.width / _smoothedFaceRect.size.height) : 0.0f;
*(float*)payload.data() = aspectRatio;
// extract the encoded frame
vpx_codec_iter_t iterator = 0;
@ -740,7 +745,7 @@ void FrameGrabber::grabFrame() {
QMetaObject::invokeMethod(Application::getInstance()->getWebcam(), "setFrame",
Q_ARG(cv::Mat, color), Q_ARG(int, format), Q_ARG(cv::Mat, _grayDepthFrame), Q_ARG(float, _smoothedMidFaceDepth),
Q_ARG(cv::RotatedRect, _smoothedFaceRect), Q_ARG(JointVector, joints));
Q_ARG(float, aspectRatio), Q_ARG(cv::RotatedRect, _smoothedFaceRect), Q_ARG(JointVector, joints));
}
bool FrameGrabber::init() {

View file

@ -50,6 +50,8 @@ public:
GLuint getDepthTextureID() const { return _depthTextureID; }
const cv::Size2f& getTextureSize() const { return _textureSize; }
float getAspectRatio() const { return _aspectRatio; }
const cv::RotatedRect& getFaceRect() const { return _faceRect; }
const glm::vec3& getEstimatedPosition() const { return _estimatedPosition; }
@ -63,7 +65,7 @@ public slots:
void setEnabled(bool enabled);
void setFrame(const cv::Mat& color, int format, const cv::Mat& depth, float midFaceDepth,
const cv::RotatedRect& faceRect, const JointVector& joints);
float aspectRatio, const cv::RotatedRect& faceRect, const JointVector& joints);
private:
@ -75,6 +77,7 @@ private:
GLuint _colorTextureID;
GLuint _depthTextureID;
cv::Size2f _textureSize;
float _aspectRatio;
cv::RotatedRect _faceRect;
cv::RotatedRect _initialFaceRect;
float _initialFaceDepth;

View file

@ -314,10 +314,7 @@ void Avatar::updateFromGyrosAndOrWebcam(bool gyroLook,
estimatedPosition = webcam->getEstimatedPosition();
// apply face data
_head.getFace().setColorTextureID(webcam->getColorTextureID());
_head.getFace().setDepthTextureID(webcam->getDepthTextureID());
_head.getFace().setTextureSize(webcam->getTextureSize());
_head.getFace().setTextureRect(webcam->getFaceRect());
_head.getFace().setFrameFromWebcam();
// compute and store the joint rotations
const JointVector& joints = webcam->getEstimatedJoints();
@ -334,7 +331,7 @@ void Avatar::updateFromGyrosAndOrWebcam(bool gyroLook,
}
}
} else {
_head.getFace().setColorTextureID(0);
_head.getFace().clearFrame();
}
_head.setPitch(estimatedRotation.x * amplifyAngle.x + pitchFromTouch);
_head.setYaw(estimatedRotation.y * amplifyAngle.y + yawFromTouch);
@ -1300,9 +1297,15 @@ float Avatar::getBallRenderAlpha(int ball, bool lookingInMirror) const {
}
void Avatar::renderBody(bool lookingInMirror, bool renderAvatarBalls) {
// Render the body as balls and cones
if (renderAvatarBalls || !_voxels.getVoxelURL().isValid()) {
if (_head.getFace().isFullFrame()) {
// Render the full-frame video
float alpha = getBallRenderAlpha(BODY_BALL_HEAD_BASE, lookingInMirror);
if (alpha > 0.0f) {
_head.getFace().render(1.0f);
}
} else if (renderAvatarBalls || !_voxels.getVoxelURL().isValid()) {
// Render the body as balls and cones
for (int b = 0; b < NUM_AVATAR_BODY_BALLS; b++) {
float alpha = getBallRenderAlpha(b, lookingInMirror);

View file

@ -17,6 +17,7 @@
#include "Avatar.h"
#include "Head.h"
#include "Face.h"
#include "Webcam.h"
#include "renderer/ProgramObject.h"
using namespace cv;
@ -25,7 +26,6 @@ ProgramObject* Face::_program = 0;
int Face::_texCoordCornerLocation;
int Face::_texCoordRightLocation;
int Face::_texCoordUpLocation;
int Face::_aspectRatioLocation;
GLuint Face::_vboID;
GLuint Face::_iboID;
@ -55,11 +55,19 @@ Face::~Face() {
}
}
void Face::setTextureRect(const cv::RotatedRect& textureRect) {
_textureRect = textureRect;
_aspectRatio = _textureRect.size.width / _textureRect.size.height;
void Face::setFrameFromWebcam() {
Webcam* webcam = Application::getInstance()->getWebcam();
_colorTextureID = webcam->getColorTextureID();
_depthTextureID = webcam->getDepthTextureID();
_textureSize = webcam->getTextureSize();
_textureRect = webcam->getFaceRect();
_aspectRatio = webcam->getAspectRatio();
}
void Face::clearFrame() {
_colorTextureID = 0;
}
int Face::processVideoMessage(unsigned char* packetData, size_t dataBytes) {
if (_colorCodec.name == 0) {
// initialize decoder context
@ -210,17 +218,20 @@ bool Face::render(float alpha) {
glm::vec3 axis = glm::axis(orientation);
glRotatef(glm::angle(orientation), axis.x, axis.y, axis.z);
float aspect, scale;
if (_aspectRatio == 0.0f) {
float aspect, xScale, zScale;
if (_aspectRatio == FULL_FRAME_ASPECT) {
aspect = _textureSize.width / _textureSize.height;
const float FULL_FRAME_SCALE = 0.5f;
scale = FULL_FRAME_SCALE * _owningHead->getScale();
xScale = FULL_FRAME_SCALE * _owningHead->getScale();
zScale = xScale * 0.3f;
} else {
aspect = _aspectRatio;
scale = BODY_BALL_RADIUS_HEAD_BASE * _owningHead->getScale();
xScale = BODY_BALL_RADIUS_HEAD_BASE * _owningHead->getScale();
zScale = xScale * 1.5f;
glTranslatef(0.0f, -xScale * 0.75f, -xScale);
}
glScalef(scale, scale, scale);
glScalef(xScale, xScale / aspect, zScale);
glColor4f(1.0f, 1.0f, 1.0f, alpha);
@ -254,7 +265,6 @@ bool Face::render(float alpha) {
_texCoordCornerLocation = _program->uniformLocation("texCoordCorner");
_texCoordRightLocation = _program->uniformLocation("texCoordRight");
_texCoordUpLocation = _program->uniformLocation("texCoordUp");
_aspectRatioLocation = _program->uniformLocation("aspectRatio");
glGenBuffers(1, &_vboID);
glBindBuffer(GL_ARRAY_BUFFER, _vboID);
@ -303,7 +313,6 @@ bool Face::render(float alpha) {
(points[3].x - points[0].x) / _textureSize.width, (points[3].y - points[0].y) / _textureSize.height);
_program->setUniformValue(_texCoordUpLocation,
(points[1].x - points[0].x) / _textureSize.width, (points[1].y - points[0].y) / _textureSize.height);
_program->setUniformValue(_aspectRatioLocation, aspect);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(2, GL_FLOAT, 0, 0);
@ -335,13 +344,13 @@ bool Face::render(float alpha) {
glBegin(GL_QUADS);
glTexCoord2f(points[0].x / _textureSize.width, points[0].y / _textureSize.height);
glVertex3f(0.5f, -0.5f / aspect, -0.5f);
glVertex3f(0.5f, -0.5f, 0.0f);
glTexCoord2f(points[1].x / _textureSize.width, points[1].y / _textureSize.height);
glVertex3f(0.5f, 0.5f / aspect, -0.5f);
glVertex3f(0.5f, 0.5f, 0.0f);
glTexCoord2f(points[2].x / _textureSize.width, points[2].y / _textureSize.height);
glVertex3f(-0.5f, 0.5f / aspect, -0.5f);
glVertex3f(-0.5f, 0.5f, 0.0f);
glTexCoord2f(points[3].x / _textureSize.width, points[3].y / _textureSize.height);
glVertex3f(-0.5f, -0.5f / aspect, -0.5f);
glVertex3f(-0.5f, -0.5f, 0.0f);
glEnd();
glDisable(GL_TEXTURE_2D);

View file

@ -20,6 +20,8 @@
class Head;
class ProgramObject;
const float FULL_FRAME_ASPECT = 0.0f;
class Face : public QObject {
Q_OBJECT
@ -28,10 +30,10 @@ public:
Face(Head* owningHead);
~Face();
void setColorTextureID(GLuint colorTextureID) { _colorTextureID = colorTextureID; }
void setDepthTextureID(GLuint depthTextureID) { _depthTextureID = depthTextureID; }
void setTextureSize(const cv::Size2f& textureSize) { _textureSize = textureSize; }
void setTextureRect(const cv::RotatedRect& textureRect);
bool isFullFrame() const { return _colorTextureID != 0 && _aspectRatio == FULL_FRAME_ASPECT; }
void setFrameFromWebcam();
void clearFrame();
int processVideoMessage(unsigned char* packetData, size_t dataBytes);
@ -68,7 +70,6 @@ private:
static int _texCoordCornerLocation;
static int _texCoordRightLocation;
static int _texCoordUpLocation;
static int _aspectRatioLocation;
static GLuint _vboID;
static GLuint _iboID;
};