mirror of
https://github.com/overte-org/overte.git
synced 2025-04-23 01:13:32 +02:00
Merge pull request #802 from ey6es/master
Added a textured rendering mode for depth data that doesn't use video input; rather, it computes normals from the depth and uses local lighting.
This commit is contained in:
commit
7583e1cd14
12 changed files with 565 additions and 315 deletions
83
interface/resources/shaders/face_textured.frag
Normal file
83
interface/resources/shaders/face_textured.frag
Normal file
|
@ -0,0 +1,83 @@
|
|||
#version 120
|
||||
|
||||
//
|
||||
// face_textured.frag
|
||||
// fragment shader
|
||||
//
|
||||
// Created by Andrzej Kapolka on 8/6/13.
|
||||
// Copyright (c) 2013 High Fidelity, Inc. All rights reserved.
|
||||
//
|
||||
|
||||
// the texture coordinate vector from left to right
|
||||
uniform vec2 texCoordRight;
|
||||
|
||||
// the texture coordinate vector from bottom to the top
|
||||
uniform vec2 texCoordUp;
|
||||
|
||||
// the permutation/normal texture
|
||||
uniform sampler2D permutationNormalTexture;
|
||||
|
||||
// the depth texture
|
||||
uniform sampler2D depthTexture;
|
||||
|
||||
// the position in model space
|
||||
varying vec3 position;
|
||||
|
||||
// returns the gradient at a single corner of our sampling cube
|
||||
vec3 grad(vec3 location) {
|
||||
float p1 = texture2D(permutationNormalTexture, vec2(location.x / 256.0, 0.25)).r;
|
||||
float p2 = texture2D(permutationNormalTexture, vec2(p1 + location.y / 256.0, 0.25)).r;
|
||||
return texture2D(permutationNormalTexture, vec2(p2 + location.z / 256.0, 0.75)).xyz * 2.0 - vec3(1.0, 1.0, 1.0);
|
||||
}
|
||||
|
||||
// returns the perlin noise value for the specified location
|
||||
float perlin(vec3 location) {
|
||||
vec3 floors = floor(location);
|
||||
vec3 ceils = ceil(location);
|
||||
vec3 fff = grad(floors);
|
||||
vec3 ffc = grad(vec3(floors.x, floors.y, ceils.z));
|
||||
vec3 fcf = grad(vec3(floors.x, ceils.y, floors.z));
|
||||
vec3 fcc = grad(vec3(floors.x, ceils.y, ceils.z));
|
||||
vec3 cff = grad(vec3(ceils.x, floors.y, floors.z));
|
||||
vec3 cfc = grad(vec3(ceils.x, floors.y, ceils.z));
|
||||
vec3 ccf = grad(vec3(ceils.x, ceils.y, floors.z));
|
||||
vec3 ccc = grad(ceils);
|
||||
vec3 ffracts = fract(location);
|
||||
vec3 cfracts = ffracts - vec3(1.0, 1.0, 1.0);
|
||||
vec3 params = ffracts*ffracts*(3.0 - 2.0*ffracts);
|
||||
|
||||
float fffv = dot(fff, ffracts);
|
||||
float ffcv = dot(ffc, vec3(ffracts.x, ffracts.y, cfracts.z));
|
||||
float fcfv = dot(fcf, vec3(ffracts.x, cfracts.y, ffracts.z));
|
||||
float fccv = dot(fcc, vec3(ffracts.x, cfracts.y, cfracts.z));
|
||||
float cffv = dot(cff, vec3(cfracts.x, ffracts.y, ffracts.z));
|
||||
float cfcv = dot(cfc, vec3(cfracts.x, ffracts.y, cfracts.z));
|
||||
float ccfv = dot(ccf, vec3(cfracts.x, cfracts.y, ffracts.z));
|
||||
float cccv = dot(ccc, cfracts);
|
||||
|
||||
return mix(
|
||||
mix(mix(fffv, cffv, params.x), mix(fcfv, ccfv, params.x), params.y),
|
||||
mix(mix(ffcv, cfcv, params.x), mix(fccv, cccv, params.x), params.y),
|
||||
params.z);
|
||||
}
|
||||
|
||||
void main(void) {
|
||||
// compute normal from adjacent depth values
|
||||
float left = texture2D(depthTexture, gl_TexCoord[0].st - texCoordRight * 0.01).r;
|
||||
float right = texture2D(depthTexture, gl_TexCoord[0].st + texCoordRight * 0.01).r;
|
||||
float bottom = texture2D(depthTexture, gl_TexCoord[0].st - texCoordUp * 0.01).r;
|
||||
float top = texture2D(depthTexture, gl_TexCoord[0].st + texCoordUp * 0.01).r;
|
||||
vec3 normal = normalize(gl_NormalMatrix * vec3(left - right, top - bottom, -0.05));
|
||||
|
||||
// compute the specular component (sans exponent) based on the normal OpenGL lighting model
|
||||
float specular = max(0.0, dot(normalize(gl_LightSource[0].position.xyz + vec3(0.0, 0.0, 1.0)), normal));
|
||||
|
||||
// the base color is a subtle marble texture produced by modulating the phase of a sine wave by perlin noise
|
||||
vec3 color = mix(vec3(1.0, 1.0, 1.0), vec3(0.75, 0.75, 0.75),
|
||||
sin(dot(position, vec3(25.0, 25.0, 25.0)) + 2.0 * perlin(position * 10.0)));
|
||||
|
||||
// standard lighting
|
||||
gl_FragColor = vec4(color * ( gl_LightModel.ambient.rgb + /* gl_LightSource[0].ambient.rgb + */
|
||||
gl_LightSource[0].diffuse.rgb * max(0.0, dot(normal, gl_LightSource[0].position.xyz))) +
|
||||
pow(specular, gl_FrontMaterial.shininess) * gl_FrontLightProduct[0].specular.rgb, gl_Color.a);
|
||||
}
|
38
interface/resources/shaders/face_textured.vert
Normal file
38
interface/resources/shaders/face_textured.vert
Normal file
|
@ -0,0 +1,38 @@
|
|||
#version 120
|
||||
|
||||
//
|
||||
// face_textured.vert
|
||||
// vertex shader
|
||||
//
|
||||
// Created by Andrzej Kapolka on 8/6/13.
|
||||
// Copyright (c) 2013 High Fidelity, Inc. All rights reserved.
|
||||
//
|
||||
|
||||
// the lower left texture coordinate
|
||||
uniform vec2 texCoordCorner;
|
||||
|
||||
// the texture coordinate vector from left to right
|
||||
uniform vec2 texCoordRight;
|
||||
|
||||
// the texture coordinate vector from bottom to the top
|
||||
uniform vec2 texCoordUp;
|
||||
|
||||
// the depth texture
|
||||
uniform sampler2D depthTexture;
|
||||
|
||||
// the position in model space
|
||||
varying vec3 position;
|
||||
|
||||
void main(void) {
|
||||
gl_TexCoord[0] = vec4(texCoordCorner + gl_Vertex.x * texCoordRight + gl_Vertex.y * texCoordUp, 0.0, 1.0);
|
||||
float depth = texture2D(depthTexture, gl_TexCoord[0].st).r;
|
||||
|
||||
// store the model space vertex
|
||||
position = gl_Vertex.xyz;
|
||||
|
||||
// set alpha to zero for invalid depth values
|
||||
const float MIN_VISIBLE_DEPTH = 1.0 / 255.0;
|
||||
const float MAX_VISIBLE_DEPTH = 254.0 / 255.0;
|
||||
gl_FrontColor = vec4(1.0, 1.0, 1.0, step(MIN_VISIBLE_DEPTH, depth) * (1.0 - step(MAX_VISIBLE_DEPTH, depth)));
|
||||
gl_Position = gl_ModelViewProjectionMatrix * vec4(0.5 - gl_Vertex.x, gl_Vertex.y - 0.5, depth - 0.5, 1.0);
|
||||
}
|
|
@ -1952,10 +1952,11 @@ void Application::initMenu() {
|
|||
_testPing->setChecked(true);
|
||||
(_fullScreenMode = optionsMenu->addAction("Fullscreen", this, SLOT(setFullscreen(bool)), Qt::Key_F))->setCheckable(true);
|
||||
optionsMenu->addAction("Webcam", &_webcam, SLOT(setEnabled(bool)))->setCheckable(true);
|
||||
optionsMenu->addAction("Toggle Skeleton Tracking", &_webcam, SLOT(setSkeletonTrackingOn(bool)))->setCheckable(true);
|
||||
optionsMenu->addAction("Skeleton Tracking", &_webcam, SLOT(setSkeletonTrackingOn(bool)))->setCheckable(true);
|
||||
(_wantCollisionsOn = optionsMenu->addAction("Turn collisions On", this, SLOT(toggleWantCollisionsOn())))->setCheckable(true);
|
||||
_wantCollisionsOn->setChecked(true);
|
||||
optionsMenu->addAction("Cycle Webcam Send Mode", _webcam.getGrabber(), SLOT(cycleVideoSendMode()));
|
||||
optionsMenu->addAction("Webcam Texture", _webcam.getGrabber(), SLOT(setDepthOnly(bool)))->setCheckable(true);
|
||||
optionsMenu->addAction("Go Home", this, SLOT(goHome()), Qt::CTRL | Qt::Key_G);
|
||||
|
||||
QMenu* audioMenu = menuBar->addMenu("Audio");
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include "Environment.h"
|
||||
#include "PacketHeaders.h"
|
||||
#include "ParticleSystem.h"
|
||||
#include "renderer/GeometryCache.h"
|
||||
#include "SerialInterface.h"
|
||||
#include "Stars.h"
|
||||
#include "Swatch.h"
|
||||
|
@ -43,6 +42,8 @@
|
|||
#include "PieMenu.h"
|
||||
#include "avatar/Avatar.h"
|
||||
#include "avatar/HandControl.h"
|
||||
#include "renderer/GeometryCache.h"
|
||||
#include "renderer/TextureCache.h"
|
||||
#include "ui/BandwidthDialog.h"
|
||||
#include "ui/ChatEntry.h"
|
||||
#include "ui/VoxelStatsDialog.h"
|
||||
|
@ -116,6 +117,7 @@ public:
|
|||
|
||||
QNetworkAccessManager* getNetworkAccessManager() { return _networkAccessManager; }
|
||||
GeometryCache* getGeometryCache() { return &_geometryCache; }
|
||||
TextureCache* getTextureCache() { return &_textureCache; }
|
||||
|
||||
void resetSongMixMenuItem();
|
||||
void setupWorldLight(Camera& whichCamera);
|
||||
|
@ -438,6 +440,7 @@ private:
|
|||
int _hmdWarpParamLocation;
|
||||
|
||||
GeometryCache _geometryCache;
|
||||
TextureCache _textureCache;
|
||||
|
||||
ParticleSystem _particleSystem;
|
||||
|
||||
|
|
|
@ -17,8 +17,6 @@
|
|||
#include <fstream> // to load voxels from file
|
||||
#include <pthread.h>
|
||||
|
||||
#include <glm/gtc/random.hpp>
|
||||
|
||||
#include <OctalCode.h>
|
||||
#include <PacketHeaders.h>
|
||||
#include <PerfStat.h>
|
||||
|
@ -493,7 +491,6 @@ glm::vec3 VoxelSystem::computeVoxelVertex(const glm::vec3& startVertex, float vo
|
|||
}
|
||||
|
||||
ProgramObject* VoxelSystem::_perlinModulateProgram = 0;
|
||||
GLuint VoxelSystem::_permutationNormalTextureID = 0;
|
||||
|
||||
void VoxelSystem::init() {
|
||||
|
||||
|
@ -585,29 +582,9 @@ void VoxelSystem::init() {
|
|||
_perlinModulateProgram->addShaderFromSourceFile(QGLShader::Fragment, "resources/shaders/perlin_modulate.frag");
|
||||
_perlinModulateProgram->link();
|
||||
|
||||
_perlinModulateProgram->bind();
|
||||
_perlinModulateProgram->setUniformValue("permutationNormalTexture", 0);
|
||||
|
||||
// create the permutation/normal texture
|
||||
glGenTextures(1, &_permutationNormalTextureID);
|
||||
glBindTexture(GL_TEXTURE_2D, _permutationNormalTextureID);
|
||||
|
||||
// the first line consists of random permutation offsets
|
||||
unsigned char data[256 * 2 * 3];
|
||||
for (int i = 0; i < 256 * 3; i++) {
|
||||
data[i] = rand() % 256;
|
||||
}
|
||||
// the next, random unit normals
|
||||
for (int i = 256 * 3; i < 256 * 3 * 2; i += 3) {
|
||||
glm::vec3 randvec = glm::sphericalRand(1.0f);
|
||||
data[i] = ((randvec.x + 1.0f) / 2.0f) * 255.0f;
|
||||
data[i + 1] = ((randvec.y + 1.0f) / 2.0f) * 255.0f;
|
||||
data[i + 2] = ((randvec.z + 1.0f) / 2.0f) * 255.0f;
|
||||
}
|
||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 256, 2, 0, GL_RGB, GL_UNSIGNED_BYTE, data);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
|
||||
|
||||
glBindTexture(GL_TEXTURE_2D, 0);
|
||||
_perlinModulateProgram->release();
|
||||
}
|
||||
|
||||
void VoxelSystem::updateFullVBOs() {
|
||||
|
@ -734,7 +711,7 @@ void VoxelSystem::applyScaleAndBindProgram(bool texture) {
|
|||
|
||||
if (texture) {
|
||||
_perlinModulateProgram->bind();
|
||||
glBindTexture(GL_TEXTURE_2D, _permutationNormalTextureID);
|
||||
glBindTexture(GL_TEXTURE_2D, Application::getInstance()->getTextureCache()->getPermutationNormalTextureID());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -196,7 +196,6 @@ private:
|
|||
bool _voxelsDirty;
|
||||
|
||||
static ProgramObject* _perlinModulateProgram;
|
||||
static GLuint _permutationNormalTextureID;
|
||||
|
||||
int _hookID;
|
||||
std::vector<glBufferIndex> _freeIndexes;
|
||||
|
|
|
@ -71,26 +71,28 @@ void Webcam::reset() {
|
|||
}
|
||||
|
||||
void Webcam::renderPreview(int screenWidth, int screenHeight) {
|
||||
if (_enabled && _colorTextureID != 0) {
|
||||
glBindTexture(GL_TEXTURE_2D, _colorTextureID);
|
||||
if (_enabled) {
|
||||
glEnable(GL_TEXTURE_2D);
|
||||
glColor3f(1.0f, 1.0f, 1.0f);
|
||||
glBegin(GL_QUADS);
|
||||
const int PREVIEW_HEIGHT = 200;
|
||||
int previewWidth = _textureSize.width * PREVIEW_HEIGHT / _textureSize.height;
|
||||
int top = screenHeight - 600;
|
||||
int left = screenWidth - previewWidth - 10;
|
||||
|
||||
glTexCoord2f(0, 0);
|
||||
glVertex2f(left, top);
|
||||
glTexCoord2f(1, 0);
|
||||
glVertex2f(left + previewWidth, top);
|
||||
glTexCoord2f(1, 1);
|
||||
glVertex2f(left + previewWidth, top + PREVIEW_HEIGHT);
|
||||
glTexCoord2f(0, 1);
|
||||
glVertex2f(left, top + PREVIEW_HEIGHT);
|
||||
glEnd();
|
||||
|
||||
|
||||
const int PREVIEW_HEIGHT = 200;
|
||||
int previewWidth = _textureSize.width * PREVIEW_HEIGHT / _textureSize.height;
|
||||
int top = screenHeight - 600;
|
||||
int left = screenWidth - previewWidth - 10;
|
||||
if (_colorTextureID != 0) {
|
||||
glBindTexture(GL_TEXTURE_2D, _colorTextureID);
|
||||
glBegin(GL_QUADS);
|
||||
glTexCoord2f(0, 0);
|
||||
glVertex2f(left, top);
|
||||
glTexCoord2f(1, 0);
|
||||
glVertex2f(left + previewWidth, top);
|
||||
glTexCoord2f(1, 1);
|
||||
glVertex2f(left + previewWidth, top + PREVIEW_HEIGHT);
|
||||
glTexCoord2f(0, 1);
|
||||
glVertex2f(left, top + PREVIEW_HEIGHT);
|
||||
glEnd();
|
||||
}
|
||||
|
||||
if (_depthTextureID != 0) {
|
||||
glBindTexture(GL_TEXTURE_2D, _depthTextureID);
|
||||
glBegin(GL_QUADS);
|
||||
|
@ -157,22 +159,26 @@ const float METERS_PER_MM = 1.0f / 1000.0f;
|
|||
|
||||
void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float midFaceDepth,
|
||||
float aspectRatio, const RotatedRect& faceRect, bool sending, const JointVector& joints) {
|
||||
IplImage colorImage = color;
|
||||
glPixelStorei(GL_UNPACK_ROW_LENGTH, colorImage.widthStep / 3);
|
||||
if (_colorTextureID == 0) {
|
||||
glGenTextures(1, &_colorTextureID);
|
||||
glBindTexture(GL_TEXTURE_2D, _colorTextureID);
|
||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, _textureSize.width = colorImage.width, _textureSize.height = colorImage.height,
|
||||
0, format, GL_UNSIGNED_BYTE, colorImage.imageData);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
qDebug("Capturing video at %gx%g.\n", _textureSize.width, _textureSize.height);
|
||||
|
||||
} else {
|
||||
glBindTexture(GL_TEXTURE_2D, _colorTextureID);
|
||||
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, _textureSize.width, _textureSize.height, format,
|
||||
GL_UNSIGNED_BYTE, colorImage.imageData);
|
||||
if (!color.empty()) {
|
||||
IplImage colorImage = color;
|
||||
glPixelStorei(GL_UNPACK_ROW_LENGTH, colorImage.widthStep / 3);
|
||||
if (_colorTextureID == 0) {
|
||||
glGenTextures(1, &_colorTextureID);
|
||||
glBindTexture(GL_TEXTURE_2D, _colorTextureID);
|
||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, _textureSize.width = colorImage.width, _textureSize.height = colorImage.height,
|
||||
0, format, GL_UNSIGNED_BYTE, colorImage.imageData);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
|
||||
} else {
|
||||
glBindTexture(GL_TEXTURE_2D, _colorTextureID);
|
||||
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, _textureSize.width, _textureSize.height, format,
|
||||
GL_UNSIGNED_BYTE, colorImage.imageData);
|
||||
}
|
||||
} else if (_colorTextureID != 0) {
|
||||
glDeleteTextures(1, &_colorTextureID);
|
||||
_colorTextureID = 0;
|
||||
}
|
||||
|
||||
|
||||
if (!depth.empty()) {
|
||||
IplImage depthImage = depth;
|
||||
glPixelStorei(GL_UNPACK_ROW_LENGTH, depthImage.widthStep);
|
||||
|
@ -189,6 +195,9 @@ void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float midF
|
|||
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, _textureSize.width, _textureSize.height, GL_LUMINANCE,
|
||||
GL_UNSIGNED_BYTE, depthImage.imageData);
|
||||
}
|
||||
} else if (_depthTextureID != 0) {
|
||||
glDeleteTextures(1, &_depthTextureID);
|
||||
_depthTextureID = 0;
|
||||
}
|
||||
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
|
||||
glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
@ -273,8 +282,8 @@ void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float midF
|
|||
QTimer::singleShot(qMax((int)remaining / 1000, 0), _grabber, SLOT(grabFrame()));
|
||||
}
|
||||
|
||||
FrameGrabber::FrameGrabber() : _initialized(false), _videoSendMode(FULL_FRAME_VIDEO), _capture(0), _searchWindow(0, 0, 0, 0),
|
||||
_smoothedMidFaceDepth(UNINITIALIZED_FACE_DEPTH), _colorCodec(), _depthCodec(), _frameCount(0) {
|
||||
FrameGrabber::FrameGrabber() : _initialized(false), _videoSendMode(FULL_FRAME_VIDEO), _depthOnly(false), _capture(0),
|
||||
_searchWindow(0, 0, 0, 0), _smoothedMidFaceDepth(UNINITIALIZED_FACE_DEPTH), _colorCodec(), _depthCodec(), _frameCount(0) {
|
||||
}
|
||||
|
||||
FrameGrabber::~FrameGrabber() {
|
||||
|
@ -374,6 +383,11 @@ void FrameGrabber::cycleVideoSendMode() {
|
|||
destroyCodecs();
|
||||
}
|
||||
|
||||
void FrameGrabber::setDepthOnly(bool depthOnly) {
|
||||
_depthOnly = depthOnly;
|
||||
destroyCodecs();
|
||||
}
|
||||
|
||||
void FrameGrabber::reset() {
|
||||
_searchWindow = cv::Rect(0, 0, 0, 0);
|
||||
|
||||
|
@ -479,7 +493,7 @@ void FrameGrabber::grabFrame() {
|
|||
encodedWidth = color.cols;
|
||||
encodedHeight = color.rows;
|
||||
aspectRatio = FULL_FRAME_ASPECT;
|
||||
colorBitrateMultiplier = 4.0f;
|
||||
colorBitrateMultiplier = depthBitrateMultiplier = 4.0f;
|
||||
|
||||
} else {
|
||||
// if we don't have a search window (yet), try using the face cascade
|
||||
|
@ -591,108 +605,129 @@ void FrameGrabber::grabFrame() {
|
|||
depth.convertTo(_grayDepthFrame, CV_8UC1, 1.0, depthOffset);
|
||||
}
|
||||
|
||||
QByteArray payload;
|
||||
// increment the frame count that identifies frames
|
||||
_frameCount++;
|
||||
|
||||
QByteArray payload;
|
||||
if (_videoSendMode != NO_VIDEO) {
|
||||
if (_colorCodec.name == 0) {
|
||||
// initialize encoder context(s)
|
||||
vpx_codec_enc_cfg_t codecConfig;
|
||||
vpx_codec_enc_config_default(vpx_codec_vp8_cx(), &codecConfig, 0);
|
||||
codecConfig.rc_target_bitrate = ENCODED_FACE_WIDTH * ENCODED_FACE_HEIGHT * colorBitrateMultiplier *
|
||||
codecConfig.rc_target_bitrate / codecConfig.g_w / codecConfig.g_h;
|
||||
codecConfig.g_w = encodedWidth;
|
||||
codecConfig.g_h = encodedHeight;
|
||||
vpx_codec_enc_init(&_colorCodec, vpx_codec_vp8_cx(), &codecConfig, 0);
|
||||
|
||||
if (!depth.empty()) {
|
||||
codecConfig.rc_target_bitrate *= depthBitrateMultiplier;
|
||||
vpx_codec_enc_init(&_depthCodec, vpx_codec_vp8_cx(), &codecConfig, 0);
|
||||
}
|
||||
}
|
||||
|
||||
Mat transform;
|
||||
if (_videoSendMode == FACE_VIDEO) {
|
||||
// resize/rotate face into encoding rectangle
|
||||
_faceColor.create(encodedHeight, encodedWidth, CV_8UC3);
|
||||
warpAffine(color, _faceColor, faceTransform, _faceColor.size());
|
||||
|
||||
} else {
|
||||
_faceColor = color;
|
||||
}
|
||||
|
||||
// convert from RGB to YV12: see http://www.fourcc.org/yuv.php and
|
||||
// http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html#cvtcolor
|
||||
// start the payload off with the aspect ratio (zero for full frame)
|
||||
payload.append((const char*)&aspectRatio, sizeof(float));
|
||||
|
||||
// prepare the image in which we'll store the data
|
||||
const int ENCODED_BITS_PER_Y = 8;
|
||||
const int ENCODED_BITS_PER_VU = 2;
|
||||
const int ENCODED_BITS_PER_PIXEL = ENCODED_BITS_PER_Y + 2 * ENCODED_BITS_PER_VU;
|
||||
const int BITS_PER_BYTE = 8;
|
||||
_encodedFace.resize(encodedWidth * encodedHeight * ENCODED_BITS_PER_PIXEL / BITS_PER_BYTE);
|
||||
vpx_image_t vpxImage;
|
||||
vpx_img_wrap(&vpxImage, VPX_IMG_FMT_YV12, encodedWidth, encodedHeight, 1,
|
||||
(unsigned char*)_encodedFace.data());
|
||||
uchar* yline = vpxImage.planes[0];
|
||||
uchar* vline = vpxImage.planes[1];
|
||||
uchar* uline = vpxImage.planes[2];
|
||||
const int Y_RED_WEIGHT = (int)(0.299 * 256);
|
||||
const int Y_GREEN_WEIGHT = (int)(0.587 * 256);
|
||||
const int Y_BLUE_WEIGHT = (int)(0.114 * 256);
|
||||
const int V_RED_WEIGHT = (int)(0.713 * 256);
|
||||
const int U_BLUE_WEIGHT = (int)(0.564 * 256);
|
||||
int redIndex = 0;
|
||||
int greenIndex = 1;
|
||||
int blueIndex = 2;
|
||||
if (format == GL_BGR) {
|
||||
redIndex = 2;
|
||||
blueIndex = 0;
|
||||
}
|
||||
for (int i = 0; i < encodedHeight; i += 2) {
|
||||
uchar* ydest = yline;
|
||||
uchar* vdest = vline;
|
||||
uchar* udest = uline;
|
||||
for (int j = 0; j < encodedWidth; j += 2) {
|
||||
uchar* tl = _faceColor.ptr(i, j);
|
||||
uchar* tr = _faceColor.ptr(i, j + 1);
|
||||
uchar* bl = _faceColor.ptr(i + 1, j);
|
||||
uchar* br = _faceColor.ptr(i + 1, j + 1);
|
||||
|
||||
ydest[0] = (tl[redIndex] * Y_RED_WEIGHT + tl[1] * Y_GREEN_WEIGHT + tl[blueIndex] * Y_BLUE_WEIGHT) >> 8;
|
||||
ydest[1] = (tr[redIndex] * Y_RED_WEIGHT + tr[1] * Y_GREEN_WEIGHT + tr[blueIndex] * Y_BLUE_WEIGHT) >> 8;
|
||||
ydest[vpxImage.stride[0]] = (bl[redIndex] * Y_RED_WEIGHT + bl[greenIndex] *
|
||||
Y_GREEN_WEIGHT + bl[blueIndex] * Y_BLUE_WEIGHT) >> 8;
|
||||
ydest[vpxImage.stride[0] + 1] = (br[redIndex] * Y_RED_WEIGHT + br[greenIndex] *
|
||||
Y_GREEN_WEIGHT + br[blueIndex] * Y_BLUE_WEIGHT) >> 8;
|
||||
ydest += 2;
|
||||
|
||||
int totalRed = tl[redIndex] + tr[redIndex] + bl[redIndex] + br[redIndex];
|
||||
int totalGreen = tl[greenIndex] + tr[greenIndex] + bl[greenIndex] + br[greenIndex];
|
||||
int totalBlue = tl[blueIndex] + tr[blueIndex] + bl[blueIndex] + br[blueIndex];
|
||||
int totalY = (totalRed * Y_RED_WEIGHT + totalGreen * Y_GREEN_WEIGHT + totalBlue * Y_BLUE_WEIGHT) >> 8;
|
||||
|
||||
*vdest++ = (((totalRed - totalY) * V_RED_WEIGHT) >> 10) + 128;
|
||||
*udest++ = (((totalBlue - totalY) * U_BLUE_WEIGHT) >> 10) + 128;
|
||||
vpx_img_wrap(&vpxImage, VPX_IMG_FMT_YV12, encodedWidth, encodedHeight, 1, (unsigned char*)_encodedFace.data());
|
||||
|
||||
if (!_depthOnly || depth.empty()) {
|
||||
if (_colorCodec.name == 0) {
|
||||
// initialize encoder context
|
||||
vpx_codec_enc_cfg_t codecConfig;
|
||||
vpx_codec_enc_config_default(vpx_codec_vp8_cx(), &codecConfig, 0);
|
||||
codecConfig.rc_target_bitrate = ENCODED_FACE_WIDTH * ENCODED_FACE_HEIGHT * colorBitrateMultiplier *
|
||||
codecConfig.rc_target_bitrate / codecConfig.g_w / codecConfig.g_h;
|
||||
codecConfig.g_w = encodedWidth;
|
||||
codecConfig.g_h = encodedHeight;
|
||||
vpx_codec_enc_init(&_colorCodec, vpx_codec_vp8_cx(), &codecConfig, 0);
|
||||
}
|
||||
yline += vpxImage.stride[0] * 2;
|
||||
vline += vpxImage.stride[1];
|
||||
uline += vpxImage.stride[2];
|
||||
}
|
||||
|
||||
// encode the frame
|
||||
vpx_codec_encode(&_colorCodec, &vpxImage, ++_frameCount, 1, 0, VPX_DL_REALTIME);
|
||||
if (_videoSendMode == FACE_VIDEO) {
|
||||
// resize/rotate face into encoding rectangle
|
||||
_faceColor.create(encodedHeight, encodedWidth, CV_8UC3);
|
||||
warpAffine(color, _faceColor, faceTransform, _faceColor.size());
|
||||
|
||||
// start the payload off with the aspect ratio (zero for full frame)
|
||||
payload.append((const char*)&aspectRatio, sizeof(float));
|
||||
|
||||
// extract the encoded frame
|
||||
vpx_codec_iter_t iterator = 0;
|
||||
const vpx_codec_cx_pkt_t* packet;
|
||||
while ((packet = vpx_codec_get_cx_data(&_colorCodec, &iterator)) != 0) {
|
||||
if (packet->kind == VPX_CODEC_CX_FRAME_PKT) {
|
||||
// prepend the length, which will indicate whether there's a depth frame too
|
||||
payload.append((const char*)&packet->data.frame.sz, sizeof(packet->data.frame.sz));
|
||||
payload.append((const char*)packet->data.frame.buf, packet->data.frame.sz);
|
||||
} else {
|
||||
_faceColor = color;
|
||||
}
|
||||
}
|
||||
|
||||
// convert from RGB to YV12: see http://www.fourcc.org/yuv.php and
|
||||
// http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html#cvtcolor
|
||||
uchar* yline = vpxImage.planes[0];
|
||||
uchar* vline = vpxImage.planes[1];
|
||||
uchar* uline = vpxImage.planes[2];
|
||||
const int Y_RED_WEIGHT = (int)(0.299 * 256);
|
||||
const int Y_GREEN_WEIGHT = (int)(0.587 * 256);
|
||||
const int Y_BLUE_WEIGHT = (int)(0.114 * 256);
|
||||
const int V_RED_WEIGHT = (int)(0.713 * 256);
|
||||
const int U_BLUE_WEIGHT = (int)(0.564 * 256);
|
||||
int redIndex = 0;
|
||||
int greenIndex = 1;
|
||||
int blueIndex = 2;
|
||||
if (format == GL_BGR) {
|
||||
redIndex = 2;
|
||||
blueIndex = 0;
|
||||
}
|
||||
for (int i = 0; i < encodedHeight; i += 2) {
|
||||
uchar* ydest = yline;
|
||||
uchar* vdest = vline;
|
||||
uchar* udest = uline;
|
||||
for (int j = 0; j < encodedWidth; j += 2) {
|
||||
uchar* tl = _faceColor.ptr(i, j);
|
||||
uchar* tr = _faceColor.ptr(i, j + 1);
|
||||
uchar* bl = _faceColor.ptr(i + 1, j);
|
||||
uchar* br = _faceColor.ptr(i + 1, j + 1);
|
||||
|
||||
ydest[0] = (tl[redIndex] * Y_RED_WEIGHT + tl[1] * Y_GREEN_WEIGHT + tl[blueIndex] * Y_BLUE_WEIGHT) >> 8;
|
||||
ydest[1] = (tr[redIndex] * Y_RED_WEIGHT + tr[1] * Y_GREEN_WEIGHT + tr[blueIndex] * Y_BLUE_WEIGHT) >> 8;
|
||||
ydest[vpxImage.stride[0]] = (bl[redIndex] * Y_RED_WEIGHT + bl[greenIndex] *
|
||||
Y_GREEN_WEIGHT + bl[blueIndex] * Y_BLUE_WEIGHT) >> 8;
|
||||
ydest[vpxImage.stride[0] + 1] = (br[redIndex] * Y_RED_WEIGHT + br[greenIndex] *
|
||||
Y_GREEN_WEIGHT + br[blueIndex] * Y_BLUE_WEIGHT) >> 8;
|
||||
ydest += 2;
|
||||
|
||||
int totalRed = tl[redIndex] + tr[redIndex] + bl[redIndex] + br[redIndex];
|
||||
int totalGreen = tl[greenIndex] + tr[greenIndex] + bl[greenIndex] + br[greenIndex];
|
||||
int totalBlue = tl[blueIndex] + tr[blueIndex] + bl[blueIndex] + br[blueIndex];
|
||||
int totalY = (totalRed * Y_RED_WEIGHT + totalGreen * Y_GREEN_WEIGHT + totalBlue * Y_BLUE_WEIGHT) >> 8;
|
||||
|
||||
*vdest++ = (((totalRed - totalY) * V_RED_WEIGHT) >> 10) + 128;
|
||||
*udest++ = (((totalBlue - totalY) * U_BLUE_WEIGHT) >> 10) + 128;
|
||||
}
|
||||
yline += vpxImage.stride[0] * 2;
|
||||
vline += vpxImage.stride[1];
|
||||
uline += vpxImage.stride[2];
|
||||
}
|
||||
|
||||
// encode the frame
|
||||
vpx_codec_encode(&_colorCodec, &vpxImage, _frameCount, 1, 0, VPX_DL_REALTIME);
|
||||
|
||||
// extract the encoded frame
|
||||
vpx_codec_iter_t iterator = 0;
|
||||
const vpx_codec_cx_pkt_t* packet;
|
||||
while ((packet = vpx_codec_get_cx_data(&_colorCodec, &iterator)) != 0) {
|
||||
if (packet->kind == VPX_CODEC_CX_FRAME_PKT) {
|
||||
// prepend the length, which will indicate whether there's a depth frame too
|
||||
payload.append((const char*)&packet->data.frame.sz, sizeof(packet->data.frame.sz));
|
||||
payload.append((const char*)packet->data.frame.buf, packet->data.frame.sz);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// zero length indicates no color info
|
||||
const size_t ZERO_SIZE = 0;
|
||||
payload.append((const char*)&ZERO_SIZE, sizeof(size_t));
|
||||
|
||||
// we can use more bits for depth
|
||||
depthBitrateMultiplier *= 2.0f;
|
||||
|
||||
// don't bother reporting the color
|
||||
color = Mat();
|
||||
}
|
||||
|
||||
if (!depth.empty()) {
|
||||
if (_depthCodec.name == 0) {
|
||||
// initialize encoder context
|
||||
vpx_codec_enc_cfg_t codecConfig;
|
||||
vpx_codec_enc_config_default(vpx_codec_vp8_cx(), &codecConfig, 0);
|
||||
codecConfig.rc_target_bitrate = ENCODED_FACE_WIDTH * ENCODED_FACE_HEIGHT * depthBitrateMultiplier *
|
||||
codecConfig.rc_target_bitrate / codecConfig.g_w / codecConfig.g_h;
|
||||
codecConfig.g_w = encodedWidth;
|
||||
codecConfig.g_h = encodedHeight;
|
||||
vpx_codec_enc_init(&_depthCodec, vpx_codec_vp8_cx(), &codecConfig, 0);
|
||||
}
|
||||
|
||||
// convert with mask
|
||||
uchar* yline = vpxImage.planes[0];
|
||||
uchar* vline = vpxImage.planes[1];
|
||||
|
|
|
@ -112,6 +112,7 @@ public:
|
|||
public slots:
|
||||
|
||||
void cycleVideoSendMode();
|
||||
void setDepthOnly(bool depthOnly);
|
||||
void reset();
|
||||
void shutdown();
|
||||
void grabFrame();
|
||||
|
@ -126,6 +127,7 @@ private:
|
|||
|
||||
bool _initialized;
|
||||
VideoSendMode _videoSendMode;
|
||||
bool _depthOnly;
|
||||
CvCapture* _capture;
|
||||
cv::CascadeClassifier _faceCascade;
|
||||
cv::Mat _hsvFrame;
|
||||
|
|
|
@ -22,10 +22,10 @@
|
|||
|
||||
using namespace cv;
|
||||
|
||||
ProgramObject* Face::_program = 0;
|
||||
int Face::_texCoordCornerLocation;
|
||||
int Face::_texCoordRightLocation;
|
||||
int Face::_texCoordUpLocation;
|
||||
ProgramObject* Face::_videoProgram = 0;
|
||||
Face::Locations Face::_videoProgramLocations;
|
||||
ProgramObject* Face::_texturedProgram = 0;
|
||||
Face::Locations Face::_texturedProgramLocations;
|
||||
GLuint Face::_vboID;
|
||||
GLuint Face::_iboID;
|
||||
|
||||
|
@ -71,6 +71,7 @@ void Face::setFrameFromWebcam() {
|
|||
|
||||
void Face::clearFrame() {
|
||||
_colorTextureID = 0;
|
||||
_depthTextureID = 0;
|
||||
}
|
||||
|
||||
int Face::processVideoMessage(unsigned char* packetData, size_t dataBytes) {
|
||||
|
@ -111,128 +112,140 @@ int Face::processVideoMessage(unsigned char* packetData, size_t dataBytes) {
|
|||
return dataBytes;
|
||||
}
|
||||
|
||||
// the switch from full frame to not (or vice versa) requires us to reinit the codecs
|
||||
// the switch between full frame or depth only modes requires us to reinit the codecs
|
||||
float aspectRatio = *(const float*)_arrivingFrame.constData();
|
||||
size_t colorSize = *(const size_t*)(_arrivingFrame.constData() + sizeof(float));
|
||||
bool fullFrame = (aspectRatio == FULL_FRAME_ASPECT);
|
||||
if (fullFrame != _lastFullFrame) {
|
||||
bool depthOnly = (colorSize == 0);
|
||||
if (fullFrame != _lastFullFrame || depthOnly != _lastDepthOnly) {
|
||||
destroyCodecs();
|
||||
_lastFullFrame = fullFrame;
|
||||
_lastDepthOnly = depthOnly;
|
||||
}
|
||||
|
||||
if (_colorCodec.name == 0) {
|
||||
// initialize decoder context
|
||||
vpx_codec_dec_init(&_colorCodec, vpx_codec_vp8_dx(), 0, 0);
|
||||
}
|
||||
|
||||
size_t colorSize = *(const size_t*)(_arrivingFrame.constData() + sizeof(float));
|
||||
// read the color data, if non-empty
|
||||
Mat color;
|
||||
const uint8_t* colorData = (const uint8_t*)(_arrivingFrame.constData() + sizeof(float) + sizeof(size_t));
|
||||
vpx_codec_decode(&_colorCodec, colorData, colorSize, 0, 0);
|
||||
vpx_codec_iter_t iterator = 0;
|
||||
vpx_image_t* image;
|
||||
while ((image = vpx_codec_get_frame(&_colorCodec, &iterator)) != 0) {
|
||||
// convert from YV12 to RGB: see http://www.fourcc.org/yuv.php and
|
||||
// http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html#cvtcolor
|
||||
Mat color(image->d_h, image->d_w, CV_8UC3);
|
||||
uchar* yline = image->planes[0];
|
||||
uchar* vline = image->planes[1];
|
||||
uchar* uline = image->planes[2];
|
||||
const int RED_V_WEIGHT = (int)(1.403 * 256);
|
||||
const int GREEN_V_WEIGHT = (int)(0.714 * 256);
|
||||
const int GREEN_U_WEIGHT = (int)(0.344 * 256);
|
||||
const int BLUE_U_WEIGHT = (int)(1.773 * 256);
|
||||
for (int i = 0; i < image->d_h; i += 2) {
|
||||
uchar* ysrc = yline;
|
||||
uchar* vsrc = vline;
|
||||
uchar* usrc = uline;
|
||||
for (int j = 0; j < image->d_w; j += 2) {
|
||||
uchar* tl = color.ptr(i, j);
|
||||
uchar* tr = color.ptr(i, j + 1);
|
||||
uchar* bl = color.ptr(i + 1, j);
|
||||
uchar* br = color.ptr(i + 1, j + 1);
|
||||
|
||||
int v = *vsrc++ - 128;
|
||||
int u = *usrc++ - 128;
|
||||
|
||||
int redOffset = (RED_V_WEIGHT * v) >> 8;
|
||||
int greenOffset = (GREEN_V_WEIGHT * v + GREEN_U_WEIGHT * u) >> 8;
|
||||
int blueOffset = (BLUE_U_WEIGHT * u) >> 8;
|
||||
|
||||
int ytl = ysrc[0];
|
||||
int ytr = ysrc[1];
|
||||
int ybl = ysrc[image->w];
|
||||
int ybr = ysrc[image->w + 1];
|
||||
ysrc += 2;
|
||||
|
||||
tl[0] = saturate_cast<uchar>(ytl + redOffset);
|
||||
tl[1] = saturate_cast<uchar>(ytl - greenOffset);
|
||||
tl[2] = saturate_cast<uchar>(ytl + blueOffset);
|
||||
|
||||
tr[0] = saturate_cast<uchar>(ytr + redOffset);
|
||||
tr[1] = saturate_cast<uchar>(ytr - greenOffset);
|
||||
tr[2] = saturate_cast<uchar>(ytr + blueOffset);
|
||||
|
||||
bl[0] = saturate_cast<uchar>(ybl + redOffset);
|
||||
bl[1] = saturate_cast<uchar>(ybl - greenOffset);
|
||||
bl[2] = saturate_cast<uchar>(ybl + blueOffset);
|
||||
|
||||
br[0] = saturate_cast<uchar>(ybr + redOffset);
|
||||
br[1] = saturate_cast<uchar>(ybr - greenOffset);
|
||||
br[2] = saturate_cast<uchar>(ybr + blueOffset);
|
||||
}
|
||||
yline += image->stride[0] * 2;
|
||||
vline += image->stride[1];
|
||||
uline += image->stride[2];
|
||||
if (colorSize > 0) {
|
||||
if (_colorCodec.name == 0) {
|
||||
// initialize decoder context
|
||||
vpx_codec_dec_init(&_colorCodec, vpx_codec_vp8_dx(), 0, 0);
|
||||
}
|
||||
Mat depth;
|
||||
|
||||
const uint8_t* depthData = colorData + colorSize;
|
||||
int depthSize = _arrivingFrame.size() - ((const char*)depthData - _arrivingFrame.constData());
|
||||
if (depthSize > 0) {
|
||||
if (_depthCodec.name == 0) {
|
||||
// initialize decoder context
|
||||
vpx_codec_dec_init(&_depthCodec, vpx_codec_vp8_dx(), 0, 0);
|
||||
}
|
||||
vpx_codec_decode(&_depthCodec, depthData, depthSize, 0, 0);
|
||||
vpx_codec_iter_t iterator = 0;
|
||||
vpx_image_t* image;
|
||||
while ((image = vpx_codec_get_frame(&_depthCodec, &iterator)) != 0) {
|
||||
depth.create(image->d_h, image->d_w, CV_8UC1);
|
||||
uchar* yline = image->planes[0];
|
||||
uchar* vline = image->planes[1];
|
||||
const uchar EIGHT_BIT_MAXIMUM = 255;
|
||||
const uchar MASK_THRESHOLD = 192;
|
||||
for (int i = 0; i < image->d_h; i += 2) {
|
||||
uchar* ysrc = yline;
|
||||
uchar* vsrc = vline;
|
||||
for (int j = 0; j < image->d_w; j += 2) {
|
||||
if (*vsrc++ < MASK_THRESHOLD) {
|
||||
*depth.ptr(i, j) = EIGHT_BIT_MAXIMUM;
|
||||
*depth.ptr(i, j + 1) = EIGHT_BIT_MAXIMUM;
|
||||
*depth.ptr(i + 1, j) = EIGHT_BIT_MAXIMUM;
|
||||
*depth.ptr(i + 1, j + 1) = EIGHT_BIT_MAXIMUM;
|
||||
|
||||
} else {
|
||||
*depth.ptr(i, j) = ysrc[0];
|
||||
*depth.ptr(i, j + 1) = ysrc[1];
|
||||
*depth.ptr(i + 1, j) = ysrc[image->stride[0]];
|
||||
*depth.ptr(i + 1, j + 1) = ysrc[image->stride[0] + 1];
|
||||
}
|
||||
ysrc += 2;
|
||||
}
|
||||
yline += image->stride[0] * 2;
|
||||
vline += image->stride[1];
|
||||
vpx_codec_decode(&_colorCodec, colorData, colorSize, 0, 0);
|
||||
vpx_codec_iter_t iterator = 0;
|
||||
vpx_image_t* image;
|
||||
while ((image = vpx_codec_get_frame(&_colorCodec, &iterator)) != 0) {
|
||||
// convert from YV12 to RGB: see http://www.fourcc.org/yuv.php and
|
||||
// http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html#cvtcolor
|
||||
color.create(image->d_h, image->d_w, CV_8UC3);
|
||||
uchar* yline = image->planes[0];
|
||||
uchar* vline = image->planes[1];
|
||||
uchar* uline = image->planes[2];
|
||||
const int RED_V_WEIGHT = (int)(1.403 * 256);
|
||||
const int GREEN_V_WEIGHT = (int)(0.714 * 256);
|
||||
const int GREEN_U_WEIGHT = (int)(0.344 * 256);
|
||||
const int BLUE_U_WEIGHT = (int)(1.773 * 256);
|
||||
for (int i = 0; i < image->d_h; i += 2) {
|
||||
uchar* ysrc = yline;
|
||||
uchar* vsrc = vline;
|
||||
uchar* usrc = uline;
|
||||
for (int j = 0; j < image->d_w; j += 2) {
|
||||
uchar* tl = color.ptr(i, j);
|
||||
uchar* tr = color.ptr(i, j + 1);
|
||||
uchar* bl = color.ptr(i + 1, j);
|
||||
uchar* br = color.ptr(i + 1, j + 1);
|
||||
|
||||
int v = *vsrc++ - 128;
|
||||
int u = *usrc++ - 128;
|
||||
|
||||
int redOffset = (RED_V_WEIGHT * v) >> 8;
|
||||
int greenOffset = (GREEN_V_WEIGHT * v + GREEN_U_WEIGHT * u) >> 8;
|
||||
int blueOffset = (BLUE_U_WEIGHT * u) >> 8;
|
||||
|
||||
int ytl = ysrc[0];
|
||||
int ytr = ysrc[1];
|
||||
int ybl = ysrc[image->w];
|
||||
int ybr = ysrc[image->w + 1];
|
||||
ysrc += 2;
|
||||
|
||||
tl[0] = saturate_cast<uchar>(ytl + redOffset);
|
||||
tl[1] = saturate_cast<uchar>(ytl - greenOffset);
|
||||
tl[2] = saturate_cast<uchar>(ytl + blueOffset);
|
||||
|
||||
tr[0] = saturate_cast<uchar>(ytr + redOffset);
|
||||
tr[1] = saturate_cast<uchar>(ytr - greenOffset);
|
||||
tr[2] = saturate_cast<uchar>(ytr + blueOffset);
|
||||
|
||||
bl[0] = saturate_cast<uchar>(ybl + redOffset);
|
||||
bl[1] = saturate_cast<uchar>(ybl - greenOffset);
|
||||
bl[2] = saturate_cast<uchar>(ybl + blueOffset);
|
||||
|
||||
br[0] = saturate_cast<uchar>(ybr + redOffset);
|
||||
br[1] = saturate_cast<uchar>(ybr - greenOffset);
|
||||
br[2] = saturate_cast<uchar>(ybr + blueOffset);
|
||||
}
|
||||
yline += image->stride[0] * 2;
|
||||
vline += image->stride[1];
|
||||
uline += image->stride[2];
|
||||
}
|
||||
}
|
||||
QMetaObject::invokeMethod(this, "setFrame", Q_ARG(cv::Mat, color),
|
||||
Q_ARG(cv::Mat, depth), Q_ARG(float, aspectRatio));
|
||||
} else if (_colorCodec.name != 0) {
|
||||
vpx_codec_destroy(&_colorCodec);
|
||||
_colorCodec.name = 0;
|
||||
}
|
||||
|
||||
// read the depth data, if non-empty
|
||||
Mat depth;
|
||||
const uint8_t* depthData = colorData + colorSize;
|
||||
int depthSize = _arrivingFrame.size() - ((const char*)depthData - _arrivingFrame.constData());
|
||||
if (depthSize > 0) {
|
||||
if (_depthCodec.name == 0) {
|
||||
// initialize decoder context
|
||||
vpx_codec_dec_init(&_depthCodec, vpx_codec_vp8_dx(), 0, 0);
|
||||
}
|
||||
vpx_codec_decode(&_depthCodec, depthData, depthSize, 0, 0);
|
||||
vpx_codec_iter_t iterator = 0;
|
||||
vpx_image_t* image;
|
||||
while ((image = vpx_codec_get_frame(&_depthCodec, &iterator)) != 0) {
|
||||
depth.create(image->d_h, image->d_w, CV_8UC1);
|
||||
uchar* yline = image->planes[0];
|
||||
uchar* vline = image->planes[1];
|
||||
const uchar EIGHT_BIT_MAXIMUM = 255;
|
||||
const uchar MASK_THRESHOLD = 192;
|
||||
for (int i = 0; i < image->d_h; i += 2) {
|
||||
uchar* ysrc = yline;
|
||||
uchar* vsrc = vline;
|
||||
for (int j = 0; j < image->d_w; j += 2) {
|
||||
if (*vsrc++ < MASK_THRESHOLD) {
|
||||
*depth.ptr(i, j) = EIGHT_BIT_MAXIMUM;
|
||||
*depth.ptr(i, j + 1) = EIGHT_BIT_MAXIMUM;
|
||||
*depth.ptr(i + 1, j) = EIGHT_BIT_MAXIMUM;
|
||||
*depth.ptr(i + 1, j + 1) = EIGHT_BIT_MAXIMUM;
|
||||
|
||||
} else {
|
||||
*depth.ptr(i, j) = ysrc[0];
|
||||
*depth.ptr(i, j + 1) = ysrc[1];
|
||||
*depth.ptr(i + 1, j) = ysrc[image->stride[0]];
|
||||
*depth.ptr(i + 1, j + 1) = ysrc[image->stride[0] + 1];
|
||||
}
|
||||
ysrc += 2;
|
||||
}
|
||||
yline += image->stride[0] * 2;
|
||||
vline += image->stride[1];
|
||||
}
|
||||
}
|
||||
} else if (_depthCodec.name != 0) {
|
||||
vpx_codec_destroy(&_depthCodec);
|
||||
_depthCodec.name = 0;
|
||||
}
|
||||
QMetaObject::invokeMethod(this, "setFrame", Q_ARG(cv::Mat, color),
|
||||
Q_ARG(cv::Mat, depth), Q_ARG(float, aspectRatio));
|
||||
|
||||
return dataBytes;
|
||||
}
|
||||
|
||||
bool Face::render(float alpha) {
|
||||
if (_colorTextureID == 0 || _textureRect.size.area() == 0) {
|
||||
if (!isActive()) {
|
||||
return false;
|
||||
}
|
||||
glPushMatrix();
|
||||
|
@ -275,20 +288,9 @@ bool Face::render(float alpha) {
|
|||
const int INDICES_PER_TRIANGLE = 3;
|
||||
const int INDEX_COUNT = QUAD_COUNT * TRIANGLES_PER_QUAD * INDICES_PER_TRIANGLE;
|
||||
|
||||
if (_program == 0) {
|
||||
_program = new ProgramObject();
|
||||
_program->addShaderFromSourceFile(QGLShader::Vertex, "resources/shaders/face.vert");
|
||||
_program->addShaderFromSourceFile(QGLShader::Fragment, "resources/shaders/face.frag");
|
||||
_program->link();
|
||||
|
||||
_program->bind();
|
||||
_program->setUniformValue("depthTexture", 0);
|
||||
_program->setUniformValue("colorTexture", 1);
|
||||
_program->release();
|
||||
|
||||
_texCoordCornerLocation = _program->uniformLocation("texCoordCorner");
|
||||
_texCoordRightLocation = _program->uniformLocation("texCoordRight");
|
||||
_texCoordUpLocation = _program->uniformLocation("texCoordUp");
|
||||
if (_videoProgram == 0) {
|
||||
_videoProgram = loadProgram(QString(), "colorTexture", _videoProgramLocations);
|
||||
_texturedProgram = loadProgram("_textured", "permutationNormalTexture", _texturedProgramLocations);
|
||||
|
||||
glGenBuffers(1, &_vboID);
|
||||
glBindBuffer(GL_ARRAY_BUFFER, _vboID);
|
||||
|
@ -328,14 +330,23 @@ bool Face::render(float alpha) {
|
|||
glBindTexture(GL_TEXTURE_2D, _depthTextureID);
|
||||
|
||||
glActiveTexture(GL_TEXTURE1);
|
||||
glBindTexture(GL_TEXTURE_2D, _colorTextureID);
|
||||
|
||||
_program->bind();
|
||||
_program->setUniformValue(_texCoordCornerLocation,
|
||||
ProgramObject* program = _videoProgram;
|
||||
Locations* locations = &_videoProgramLocations;
|
||||
if (_colorTextureID != 0) {
|
||||
glBindTexture(GL_TEXTURE_2D, _colorTextureID);
|
||||
|
||||
} else {
|
||||
glBindTexture(GL_TEXTURE_2D, Application::getInstance()->getTextureCache()->getPermutationNormalTextureID());
|
||||
program = _texturedProgram;
|
||||
locations = &_texturedProgramLocations;
|
||||
}
|
||||
program->bind();
|
||||
program->setUniformValue(locations->texCoordCorner,
|
||||
points[0].x / _textureSize.width, points[0].y / _textureSize.height);
|
||||
_program->setUniformValue(_texCoordRightLocation,
|
||||
program->setUniformValue(locations->texCoordRight,
|
||||
(points[3].x - points[0].x) / _textureSize.width, (points[3].y - points[0].y) / _textureSize.height);
|
||||
_program->setUniformValue(_texCoordUpLocation,
|
||||
program->setUniformValue(locations->texCoordUp,
|
||||
(points[1].x - points[0].x) / _textureSize.width, (points[1].y - points[0].y) / _textureSize.height);
|
||||
glEnableClientState(GL_VERTEX_ARRAY);
|
||||
glVertexPointer(2, GL_FLOAT, 0, 0);
|
||||
|
@ -357,7 +368,7 @@ bool Face::render(float alpha) {
|
|||
|
||||
glDisableClientState(GL_VERTEX_ARRAY);
|
||||
glBindBuffer(GL_ARRAY_BUFFER, 0);
|
||||
_program->release();
|
||||
program->release();
|
||||
|
||||
glBindTexture(GL_TEXTURE_2D, 0);
|
||||
glActiveTexture(GL_TEXTURE0);
|
||||
|
@ -392,53 +403,52 @@ void Face::cycleRenderMode() {
|
|||
}
|
||||
|
||||
void Face::setFrame(const cv::Mat& color, const cv::Mat& depth, float aspectRatio) {
|
||||
if (color.empty()) {
|
||||
// release our textures, if any; there's no more video
|
||||
if (_colorTextureID != 0) {
|
||||
glDeleteTextures(1, &_colorTextureID);
|
||||
_colorTextureID = 0;
|
||||
Size2f textureSize = _textureSize;
|
||||
if (!color.empty()) {
|
||||
bool generate = (_colorTextureID == 0);
|
||||
if (generate) {
|
||||
glGenTextures(1, &_colorTextureID);
|
||||
}
|
||||
if (_depthTextureID != 0) {
|
||||
glDeleteTextures(1, &_depthTextureID);
|
||||
_depthTextureID = 0;
|
||||
glBindTexture(GL_TEXTURE_2D, _colorTextureID);
|
||||
if (_textureSize.width != color.cols || _textureSize.height != color.rows || generate) {
|
||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, color.cols, color.rows, 0, GL_RGB, GL_UNSIGNED_BYTE, color.ptr());
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
textureSize = color.size();
|
||||
_textureRect = RotatedRect(Point2f(color.cols * 0.5f, color.rows * 0.5f), textureSize, 0.0f);
|
||||
|
||||
} else {
|
||||
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, color.cols, color.rows, GL_RGB, GL_UNSIGNED_BYTE, color.ptr());
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (_colorTextureID == 0) {
|
||||
glGenTextures(1, &_colorTextureID);
|
||||
}
|
||||
glBindTexture(GL_TEXTURE_2D, _colorTextureID);
|
||||
bool recreateTextures = (_textureSize.width != color.cols || _textureSize.height != color.rows);
|
||||
if (recreateTextures) {
|
||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, color.cols, color.rows, 0, GL_RGB, GL_UNSIGNED_BYTE, color.ptr());
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
_textureSize = color.size();
|
||||
_textureRect = RotatedRect(Point2f(color.cols * 0.5f, color.rows * 0.5f), _textureSize, 0.0f);
|
||||
|
||||
} else {
|
||||
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, color.cols, color.rows, GL_RGB, GL_UNSIGNED_BYTE, color.ptr());
|
||||
} else if (_colorTextureID != 0) {
|
||||
glDeleteTextures(1, &_colorTextureID);
|
||||
_colorTextureID = 0;
|
||||
}
|
||||
|
||||
if (!depth.empty()) {
|
||||
if (_depthTextureID == 0) {
|
||||
bool generate = (_depthTextureID == 0);
|
||||
if (generate) {
|
||||
glGenTextures(1, &_depthTextureID);
|
||||
}
|
||||
glBindTexture(GL_TEXTURE_2D, _depthTextureID);
|
||||
if (recreateTextures) {
|
||||
if (_textureSize.width != depth.cols || _textureSize.height != depth.rows || generate) {
|
||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, depth.cols, depth.rows, 0,
|
||||
GL_LUMINANCE, GL_UNSIGNED_BYTE, depth.ptr());
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
|
||||
textureSize = depth.size();
|
||||
_textureRect = RotatedRect(Point2f(depth.cols * 0.5f, depth.rows * 0.5f), textureSize, 0.0f);
|
||||
|
||||
} else {
|
||||
glBindTexture(GL_TEXTURE_2D, _depthTextureID);
|
||||
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, depth.cols, depth.rows, GL_LUMINANCE, GL_UNSIGNED_BYTE, depth.ptr());
|
||||
}
|
||||
} else if (_depthTextureID != 0) {
|
||||
glDeleteTextures(1, &_depthTextureID);
|
||||
_depthTextureID = 0;
|
||||
}
|
||||
glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
_aspectRatio = aspectRatio;
|
||||
_textureSize = textureSize;
|
||||
}
|
||||
|
||||
void Face::destroyCodecs() {
|
||||
|
@ -451,3 +461,21 @@ void Face::destroyCodecs() {
|
|||
_depthCodec.name = 0;
|
||||
}
|
||||
}
|
||||
|
||||
ProgramObject* Face::loadProgram(const QString& suffix, const char* secondTextureUniform, Locations& locations) {
|
||||
ProgramObject* program = new ProgramObject();
|
||||
program->addShaderFromSourceFile(QGLShader::Vertex, "resources/shaders/face" + suffix + ".vert");
|
||||
program->addShaderFromSourceFile(QGLShader::Fragment, "resources/shaders/face" + suffix + ".frag");
|
||||
program->link();
|
||||
|
||||
program->bind();
|
||||
program->setUniformValue("depthTexture", 0);
|
||||
program->setUniformValue(secondTextureUniform, 1);
|
||||
program->release();
|
||||
|
||||
locations.texCoordCorner = program->uniformLocation("texCoordCorner");
|
||||
locations.texCoordRight = program->uniformLocation("texCoordRight");
|
||||
locations.texCoordUp = program->uniformLocation("texCoordUp");
|
||||
|
||||
return program;
|
||||
}
|
||||
|
|
|
@ -30,7 +30,8 @@ public:
|
|||
Face(Head* owningHead);
|
||||
~Face();
|
||||
|
||||
bool isFullFrame() const { return _colorTextureID != 0 && _aspectRatio == FULL_FRAME_ASPECT; }
|
||||
bool isActive() const { return _colorTextureID != 0 || _depthTextureID != 0; }
|
||||
bool isFullFrame() const { return isActive() && _aspectRatio == FULL_FRAME_ASPECT; }
|
||||
|
||||
void setFrameFromWebcam();
|
||||
void clearFrame();
|
||||
|
@ -64,15 +65,26 @@ private:
|
|||
vpx_codec_ctx_t _colorCodec;
|
||||
vpx_codec_ctx_t _depthCodec;
|
||||
bool _lastFullFrame;
|
||||
bool _lastDepthOnly;
|
||||
|
||||
QByteArray _arrivingFrame;
|
||||
int _frameCount;
|
||||
int _frameBytesRemaining;
|
||||
|
||||
static ProgramObject* _program;
|
||||
static int _texCoordCornerLocation;
|
||||
static int _texCoordRightLocation;
|
||||
static int _texCoordUpLocation;
|
||||
struct Locations {
|
||||
int texCoordCorner;
|
||||
int texCoordRight;
|
||||
int texCoordUp;
|
||||
};
|
||||
|
||||
static ProgramObject* loadProgram(const QString& suffix, const char* secondTextureUniform, Locations& locations);
|
||||
|
||||
static ProgramObject* _videoProgram;
|
||||
static Locations _videoProgramLocations;
|
||||
|
||||
static ProgramObject* _texturedProgram;
|
||||
static Locations _texturedProgramLocations;
|
||||
|
||||
static GLuint _vboID;
|
||||
static GLuint _iboID;
|
||||
};
|
||||
|
|
45
interface/src/renderer/TextureCache.cpp
Normal file
45
interface/src/renderer/TextureCache.cpp
Normal file
|
@ -0,0 +1,45 @@
|
|||
//
|
||||
// TextureCache.cpp
|
||||
// interface
|
||||
//
|
||||
// Created by Andrzej Kapolka on 8/6/13.
|
||||
// Copyright (c) 2013 High Fidelity, Inc. All rights reserved.
|
||||
|
||||
#include <glm/gtc/random.hpp>
|
||||
|
||||
#include "TextureCache.h"
|
||||
|
||||
TextureCache::TextureCache() : _permutationNormalTextureID(0) {
|
||||
}
|
||||
|
||||
TextureCache::~TextureCache() {
|
||||
if (_permutationNormalTextureID != 0) {
|
||||
glDeleteTextures(1, &_permutationNormalTextureID);
|
||||
}
|
||||
}
|
||||
|
||||
GLuint TextureCache::getPermutationNormalTextureID() {
|
||||
if (_permutationNormalTextureID == 0) {
|
||||
glGenTextures(1, &_permutationNormalTextureID);
|
||||
glBindTexture(GL_TEXTURE_2D, _permutationNormalTextureID);
|
||||
|
||||
// the first line consists of random permutation offsets
|
||||
unsigned char data[256 * 2 * 3];
|
||||
for (int i = 0; i < 256 * 3; i++) {
|
||||
data[i] = rand() % 256;
|
||||
}
|
||||
// the next, random unit normals
|
||||
for (int i = 256 * 3; i < 256 * 3 * 2; i += 3) {
|
||||
glm::vec3 randvec = glm::sphericalRand(1.0f);
|
||||
data[i] = ((randvec.x + 1.0f) / 2.0f) * 255.0f;
|
||||
data[i + 1] = ((randvec.y + 1.0f) / 2.0f) * 255.0f;
|
||||
data[i + 2] = ((randvec.z + 1.0f) / 2.0f) * 255.0f;
|
||||
}
|
||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 256, 2, 0, GL_RGB, GL_UNSIGNED_BYTE, data);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
|
||||
|
||||
glBindTexture(GL_TEXTURE_2D, 0);
|
||||
}
|
||||
return _permutationNormalTextureID;
|
||||
}
|
27
interface/src/renderer/TextureCache.h
Normal file
27
interface/src/renderer/TextureCache.h
Normal file
|
@ -0,0 +1,27 @@
|
|||
//
|
||||
// TextureCache.h
|
||||
// interface
|
||||
//
|
||||
// Created by Andrzej Kapolka on 8/6/13.
|
||||
// Copyright (c) 2013 High Fidelity, Inc. All rights reserved.
|
||||
//
|
||||
|
||||
#ifndef __interface__TextureCache__
|
||||
#define __interface__TextureCache__
|
||||
|
||||
#include "InterfaceConfig.h"
|
||||
|
||||
class TextureCache {
|
||||
public:
|
||||
|
||||
TextureCache();
|
||||
~TextureCache();
|
||||
|
||||
GLuint getPermutationNormalTextureID();
|
||||
|
||||
private:
|
||||
|
||||
GLuint _permutationNormalTextureID;
|
||||
};
|
||||
|
||||
#endif /* defined(__interface__TextureCache__) */
|
Loading…
Reference in a new issue