Merge remote-tracking branch 'upstream/master' into particles

This commit is contained in:
Jeffrey Ventrella 2013-08-02 11:00:10 -07:00
commit 6ebe33bb0b
17 changed files with 698 additions and 442 deletions

View file

@ -55,10 +55,10 @@ int main(int argc, const char * argv[])
// domain server
bool isLocalMode = cmdOptionExists(argc, argv, "--local");
if (isLocalMode) {
printf("NOTE: Running in Local Mode!\n");
printf("NOTE: Running in local mode!\n");
} else {
printf("--------------------------------------------------\n");
printf("NOTE: Running in EC2 Mode. \n");
printf("NOTE: Not running in local mode. \n");
printf("If you're a developer testing a local system, you\n");
printf("probably want to include --local on command line.\n");
printf("--------------------------------------------------\n");
@ -104,9 +104,10 @@ int main(int argc, const char * argv[])
// so hardcode the EC2 public address for now
if (nodePublicAddress.sin_addr.s_addr == serverLocalAddress) {
// If we're not running "local" then we do replace the IP
// with the EC2 IP. Otherwise, we use our normal public IP
// with 0. This designates to clients that the server is reachable
// at the same IP address
if (!isLocalMode) {
nodePublicAddress.sin_addr.s_addr = 895283510; // local IP in this format...
nodePublicAddress.sin_addr.s_addr = 0;
destinationSocket = (sockaddr*) &nodeLocalAddress;
}
}

View file

@ -17,9 +17,6 @@ uniform vec2 texCoordRight;
// the texture coordinate vector from bottom to the top
uniform vec2 texCoordUp;
// the aspect ratio of the image
uniform float aspectRatio;
// the depth texture
uniform sampler2D depthTexture;
@ -31,6 +28,5 @@ void main(void) {
const float MIN_VISIBLE_DEPTH = 1.0 / 255.0;
const float MAX_VISIBLE_DEPTH = 254.0 / 255.0;
gl_FrontColor = vec4(1.0, 1.0, 1.0, step(MIN_VISIBLE_DEPTH, depth) * (1.0 - step(MAX_VISIBLE_DEPTH, depth)));
gl_Position = gl_ModelViewProjectionMatrix * vec4(0.5 - gl_Vertex.x,
(gl_Vertex.y - 0.5) / aspectRatio, depth * 2.0 - 2.0, 1.0);
gl_Position = gl_ModelViewProjectionMatrix * vec4(0.5 - gl_Vertex.x, gl_Vertex.y - 0.5, depth - 0.5, 1.0);
}

View file

@ -965,14 +965,15 @@ void Application::sendAvatarFaceVideoMessage(int frameCount, const QByteArray& d
int headerSize = packetPosition - packet;
// break the data up into submessages of the maximum size
// break the data up into submessages of the maximum size (at least one, for zero-length packets)
*offsetPosition = 0;
while (*offsetPosition < data.size()) {
do {
int payloadSize = min(data.size() - (int)*offsetPosition, MAX_PACKET_SIZE - headerSize);
memcpy(packetPosition, data.constData() + *offsetPosition, payloadSize);
getInstance()->controlledBroadcastToNodes(packet, headerSize + payloadSize, &NODE_TYPE_AVATAR_MIXER, 1);
*offsetPosition += payloadSize;
}
} while (*offsetPosition < data.size());
}
// Every second, check the frame rates and other stuff
@ -1495,16 +1496,18 @@ bool Application::sendVoxelsOperation(VoxelNode* node, void* extraData) {
uint64_t now = usecTimestampNow();
// dynamically sleep until we need to fire off the next set of voxels
const uint64_t CLIENT_TO_SERVER_VOXEL_SEND_INTERVAL_USECS = 1000 * 5; // 1 packet every 10 milliseconds
uint64_t elapsed = now - args->lastSendTime;
int usecToSleep = CLIENT_TO_SERVER_VOXEL_SEND_INTERVAL_USECS - elapsed;
if (usecToSleep > 0) {
qDebug("sendVoxelsOperation: packet: %d bytes:%lld elapsed %lld usecs, sleeping for %d usecs!\n",
args->packetsSent, (long long int)args->bytesSent, (long long int)elapsed, usecToSleep);
//qDebug("sendVoxelsOperation: packet: %d bytes:%lld elapsed %lld usecs, sleeping for %d usecs!\n",
// args->packetsSent, (long long int)args->bytesSent, (long long int)elapsed, usecToSleep);
Application::getInstance()->timer();
usleep(usecToSleep);
} else {
qDebug("sendVoxelsOperation: packet: %d bytes:%lld elapsed %lld usecs, no need to sleep!\n",
args->packetsSent, (long long int)args->bytesSent, (long long int)elapsed);
//qDebug("sendVoxelsOperation: packet: %d bytes:%lld elapsed %lld usecs, no need to sleep!\n",
// args->packetsSent, (long long int)args->bytesSent, (long long int)elapsed);
}
args->lastSendTime = now;
}
@ -1572,15 +1575,29 @@ void Application::importVoxelsToClipboard() {
void Application::importVoxels() {
QString desktopLocation = QStandardPaths::writableLocation(QStandardPaths::DesktopLocation);
QString fileNameString = QFileDialog::getOpenFileName(_glWidget, tr("Import Voxels"), desktopLocation,
QStringList fileNameStringList = QFileDialog::getOpenFileNames(_glWidget, tr("Import Voxels"), desktopLocation,
tr(IMPORT_FILE_TYPES));
// remember the "selected" voxel point before we do any importing...
float originalX = _mouseVoxel.x;
float originalZ = _mouseVoxel.z;
const int PNG_TYPE_NAME_LENGTH = 4;
const int SVO_TYPE_NAME_LENGTH = 4;
const int SCH_TYPE_NAME_LENGTH = 10;
for (int i = 0; i < fileNameStringList.size(); i++) {
QString fileNameString = fileNameStringList.at(i);
QByteArray fileNameAscii = fileNameString.toLocal8Bit();
const char* fileName = fileNameAscii.data();
int fileTypeNameLength = 0;
VoxelTree importVoxels;
if (fileNameString.endsWith(".png", Qt::CaseInsensitive)) {
QImage pngImage = QImage(fileName);
fileTypeNameLength = PNG_TYPE_NAME_LENGTH;
if (pngImage.height() != pngImage.width()) {
qDebug("ERROR: Bad PNG size: height != width.\n");
return;
@ -1597,10 +1614,29 @@ void Application::importVoxels() {
importVoxels.readFromSquareARGB32Pixels(pixels, pngImage.height());
} else if (fileNameString.endsWith(".svo", Qt::CaseInsensitive)) {
importVoxels.readFromSVOFile(fileName);
fileTypeNameLength = SVO_TYPE_NAME_LENGTH;
} else if (fileNameString.endsWith(".schematic", Qt::CaseInsensitive)) {
importVoxels.readFromSchematicFile(fileName);
fileTypeNameLength = SCH_TYPE_NAME_LENGTH;
}
int indexOfFirstPeriod = fileNameString.indexOf('.');
QString fileCoord = fileNameString.mid(indexOfFirstPeriod + 1,
fileNameString.length() - indexOfFirstPeriod - fileTypeNameLength - 1);
indexOfFirstPeriod = fileCoord.indexOf('.');
QString columnNumString = fileCoord.right(fileCoord.length() - indexOfFirstPeriod - 1);
QString rowNumString = fileCoord.left(indexOfFirstPeriod);
int columnNum = columnNumString.toFloat();
int rowNum = rowNumString.toFloat();
qDebug("columnNum: %d\t rowNum: %d\n", columnNum, rowNum);
_mouseVoxel.x = originalX + (columnNum - 1) * _mouseVoxel.s;
_mouseVoxel.z = originalZ + (rowNum - 1) * _mouseVoxel.s;
VoxelNode* selectedNode = _voxels.getVoxelAt(_mouseVoxel.x, _mouseVoxel.y, _mouseVoxel.z, _mouseVoxel.s);
// Recurse the Import Voxels tree, where everything is root relative, and send all the colored voxels to
@ -1625,17 +1661,41 @@ void Application::importVoxels() {
args.newBaseOctCode = calculatedOctCode = pointToVoxel(_mouseVoxel.x, _mouseVoxel.y, _mouseVoxel.z, _mouseVoxel.s);
}
qDebug("column:%d, row:%d, voxel:%f,%f,%f,%f\n", columnNum, rowNum, _mouseVoxel.x, _mouseVoxel.y, _mouseVoxel.z, _mouseVoxel.s );
// send the insert/paste of these voxels
importVoxels.recurseTreeWithOperation(sendVoxelsOperation, &args);
// If we have voxels left in the packet, then send the packet
if (args.bufferInUse > (numBytesPacketHeader + sizeof(unsigned short int))) {
controlledBroadcastToNodes(args.messageBuffer, args.bufferInUse, & NODE_TYPE_VOXEL_SERVER, 1);
args.packetsSent++;
args.bytesSent += args.bufferInUse;
uint64_t now = usecTimestampNow();
// dynamically sleep until we need to fire off the next set of voxels
uint64_t elapsed = now - args.lastSendTime;
int usecToSleep = CLIENT_TO_SERVER_VOXEL_SEND_INTERVAL_USECS - elapsed;
if (usecToSleep > 0) {
//qDebug("after sendVoxelsOperation: packet: %d bytes:%lld elapsed %lld usecs, sleeping for %d usecs!\n",
// args.packetsSent, (long long int)args.bytesSent, (long long int)elapsed, usecToSleep);
usleep(usecToSleep);
} else {
//qDebug("after sendVoxelsOperation: packet: %d bytes:%lld elapsed %lld usecs, no need to sleep!\n",
// args.packetsSent, (long long int)args.bytesSent, (long long int)elapsed);
}
args.lastSendTime = now;
}
if (calculatedOctCode) {
delete[] calculatedOctCode;
}
}
// restore the main window's active state
_window->activateWindow();
}
@ -1728,6 +1788,7 @@ void Application::initMenu() {
_testPing->setChecked(true);
(_fullScreenMode = optionsMenu->addAction("Fullscreen", this, SLOT(setFullscreen(bool)), Qt::Key_F))->setCheckable(true);
optionsMenu->addAction("Webcam", &_webcam, SLOT(setEnabled(bool)))->setCheckable(true);
optionsMenu->addAction("Cycle Webcam Send Mode", _webcam.getGrabber(), SLOT(cycleVideoSendMode()));
optionsMenu->addAction("Go Home", this, SLOT(goHome()));
QMenu* renderMenu = menuBar->addMenu("Render");
@ -1755,7 +1816,6 @@ void Application::initMenu() {
(_renderLookatIndicatorOn = renderMenu->addAction("Lookat Indicator"))->setCheckable(true);
_renderLookatIndicatorOn->setChecked(true);
(_renderParticleSystemOn = renderMenu->addAction("Particle System"))->setCheckable(true);
_renderParticleSystemOn->setChecked(true);
(_manualFirstPerson = renderMenu->addAction(
"First Person", this, SLOT(setRenderFirstPerson(bool)), Qt::Key_P))->setCheckable(true);
(_manualThirdPerson = renderMenu->addAction(
@ -2677,13 +2737,14 @@ void Application::displaySide(Camera& whichCamera) {
if (_mouseVoxel.s != 0) {
glDisable(GL_LIGHTING);
glPushMatrix();
glScalef(TREE_SCALE, TREE_SCALE, TREE_SCALE);
renderMouseVoxelGrid(_mouseVoxel.x, _mouseVoxel.y, _mouseVoxel.z, _mouseVoxel.s);
if (_addVoxelMode->isChecked()) {
// use a contrasting color so that we can see what we're doing
glColor3ub(_mouseVoxel.red + 128, _mouseVoxel.green + 128, _mouseVoxel.blue + 128);
} else {
glColor3ub(_mouseVoxel.red, _mouseVoxel.green, _mouseVoxel.blue);
}
glScalef(TREE_SCALE, TREE_SCALE, TREE_SCALE);
glTranslatef(_mouseVoxel.x + _mouseVoxel.s*0.5f,
_mouseVoxel.y + _mouseVoxel.s*0.5f,
_mouseVoxel.z + _mouseVoxel.s*0.5f);
@ -3462,6 +3523,7 @@ void* Application::networkReceive(void* args) {
}
if (NodeList::getInstance()->getNodeSocket()->receive(&senderAddress, app->_incomingPacket, &bytesReceived)) {
app->_packetCount++;
app->_bytesCount += bytesReceived;
@ -3506,13 +3568,11 @@ void* Application::networkReceive(void* args) {
Node* voxelServer = NodeList::getInstance()->soloNodeOfType(NODE_TYPE_VOXEL_SERVER);
if (voxelServer && socketMatch(voxelServer->getActiveSocket(), &senderAddress)) {
voxelServer->lock();
if (messageData[0] == PACKET_TYPE_ENVIRONMENT_DATA) {
app->_environment.parseData(&senderAddress, messageData, messageLength);
} else {
app->_voxels.parseData(messageData, messageLength);
}
voxelServer->unlock();
}
}

View file

@ -365,7 +365,33 @@ void renderGroundPlaneGrid(float size, float impact) {
glEnd();
}
void renderMouseVoxelGrid(const float& mouseVoxelX, const float& mouseVoxelY, const float& mouseVoxelZ, const float& mouseVoxelS) {
glm::vec3 origin = glm::vec3(mouseVoxelX, mouseVoxelY, mouseVoxelZ);
glLineWidth(3.0);
const int HALF_GRID_DIMENSIONS = 4;
glBegin(GL_LINES);
glm::vec3 xColor(0.0, 0.6, 0.0);
glColor3fv(&xColor.x);
glVertex3f(origin.x + HALF_GRID_DIMENSIONS * mouseVoxelS, 0, origin.z);
glVertex3f(origin.x - HALF_GRID_DIMENSIONS * mouseVoxelS, 0, origin.z);
glm::vec3 zColor(0.0, 0.0, 0.6);
glColor3fv(&zColor.x);
glVertex3f(origin.x, 0, origin.z + HALF_GRID_DIMENSIONS * mouseVoxelS);
glVertex3f(origin.x, 0, origin.z - HALF_GRID_DIMENSIONS * mouseVoxelS);
glm::vec3 yColor(0.6, 0.0, 0.0);
glColor3fv(&yColor.x);
glVertex3f(origin.x, 0, origin.z);
glVertex3f(origin.x, origin.y, origin.z);
glEnd();
}
void renderDiskShadow(glm::vec3 position, glm::vec3 upDirection, float radius, float darkness) {

View file

@ -59,8 +59,9 @@ double diffclock(timeval *clock1,timeval *clock2);
void renderGroundPlaneGrid(float size, float impact);
void renderCollisionOverlay(int width, int height, float magnitude);
void renderMouseVoxelGrid(const float& mouseVoxelX, const float& mouseVoxelY, const float& mouseVoxelZ, const float& mouseVoxelS);
void renderCollisionOverlay(int width, int height, float magnitude);
void renderDiskShadow(glm::vec3 position, glm::vec3 upDirection, float radius, float darkness);

View file

@ -19,6 +19,7 @@
#include "Application.h"
#include "Webcam.h"
#include "avatar/Face.h"
using namespace cv;
using namespace std;
@ -154,8 +155,8 @@ Webcam::~Webcam() {
const float METERS_PER_MM = 1.0f / 1000.0f;
void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float meanFaceDepth,
const RotatedRect& faceRect, const JointVector& joints) {
void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float midFaceDepth,
float aspectRatio, const RotatedRect& faceRect, bool sending, const JointVector& joints) {
IplImage colorImage = color;
glPixelStorei(GL_UNPACK_ROW_LENGTH, colorImage.widthStep / 3);
if (_colorTextureID == 0) {
@ -192,8 +193,10 @@ void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float mean
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
glBindTexture(GL_TEXTURE_2D, 0);
// store our face rect and joints, update our frame count for fps computation
// store our various data, update our frame count for fps computation
_aspectRatio = aspectRatio;
_faceRect = faceRect;
_sending = sending;
_joints = joints;
_frameCount++;
@ -242,18 +245,18 @@ void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float mean
if (_initialFaceRect.size.area() == 0) {
_initialFaceRect = _faceRect;
_estimatedPosition = glm::vec3();
_initialFaceDepth = meanFaceDepth;
_initialFaceDepth = midFaceDepth;
} else {
float proportion, z;
if (meanFaceDepth == UNINITIALIZED_FACE_DEPTH) {
if (midFaceDepth == UNINITIALIZED_FACE_DEPTH) {
proportion = sqrtf(_initialFaceRect.size.area() / (float)_faceRect.size.area());
const float INITIAL_DISTANCE_TO_CAMERA = 0.333f;
z = INITIAL_DISTANCE_TO_CAMERA * proportion - INITIAL_DISTANCE_TO_CAMERA;
} else {
z = (meanFaceDepth - _initialFaceDepth) * METERS_PER_MM;
proportion = meanFaceDepth / _initialFaceDepth;
z = (midFaceDepth - _initialFaceDepth) * METERS_PER_MM;
proportion = midFaceDepth / _initialFaceDepth;
}
const float POSITION_SCALE = 0.5f;
_estimatedPosition = glm::vec3(
@ -270,8 +273,8 @@ void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float mean
QTimer::singleShot(qMax((int)remaining / 1000, 0), _grabber, SLOT(grabFrame()));
}
FrameGrabber::FrameGrabber() : _initialized(false), _capture(0), _searchWindow(0, 0, 0, 0),
_smoothedMeanFaceDepth(UNINITIALIZED_FACE_DEPTH), _colorCodec(), _depthCodec(), _frameCount(0) {
FrameGrabber::FrameGrabber() : _initialized(false), _videoSendMode(FULL_FRAME_VIDEO), _capture(0), _searchWindow(0, 0, 0, 0),
_smoothedMidFaceDepth(UNINITIALIZED_FACE_DEPTH), _colorCodec(), _depthCodec(), _frameCount(0) {
}
FrameGrabber::~FrameGrabber() {
@ -364,6 +367,13 @@ static void XN_CALLBACK_TYPE calibrationCompleted(SkeletonCapability& capability
}
#endif
void FrameGrabber::cycleVideoSendMode() {
_videoSendMode = (VideoSendMode)((_videoSendMode + 1) % VIDEO_SEND_MODE_COUNT);
_searchWindow = cv::Rect(0, 0, 0, 0);
destroyCodecs();
}
void FrameGrabber::reset() {
_searchWindow = cv::Rect(0, 0, 0, 0);
@ -379,14 +389,7 @@ void FrameGrabber::shutdown() {
cvReleaseCapture(&_capture);
_capture = 0;
}
if (_colorCodec.name != 0) {
vpx_codec_destroy(&_colorCodec);
_colorCodec.name = 0;
}
if (_depthCodec.name != 0) {
vpx_codec_destroy(&_depthCodec);
_depthCodec.name = 0;
}
destroyCodecs();
_initialized = false;
thread()->quit();
@ -462,6 +465,19 @@ void FrameGrabber::grabFrame() {
color = image;
}
int encodedWidth;
int encodedHeight;
int depthBitrateMultiplier = 1;
Mat faceTransform;
float aspectRatio;
if (_videoSendMode == FULL_FRAME_VIDEO) {
// no need to find the face if we're sending full frame video
_smoothedFaceRect = RotatedRect(Point2f(color.cols / 2.0f, color.rows / 2.0f), Size2f(color.cols, color.rows), 0.0f);
encodedWidth = color.cols;
encodedHeight = color.rows;
aspectRatio = FULL_FRAME_ASPECT;
} else {
// if we don't have a search window (yet), try using the face cascade
int channels = 0;
float ranges[] = { 0, 180 };
@ -494,25 +510,11 @@ void FrameGrabber::grabFrame() {
Rect imageBounds(0, 0, color.cols, color.rows);
_searchWindow = Rect(clip(faceBounds.tl(), imageBounds), clip(faceBounds.br(), imageBounds));
}
const int ENCODED_FACE_WIDTH = 128;
const int ENCODED_FACE_HEIGHT = 128;
if (_colorCodec.name == 0) {
// initialize encoder context(s)
vpx_codec_enc_cfg_t codecConfig;
vpx_codec_enc_config_default(vpx_codec_vp8_cx(), &codecConfig, 0);
codecConfig.rc_target_bitrate = ENCODED_FACE_WIDTH * ENCODED_FACE_HEIGHT *
codecConfig.rc_target_bitrate / codecConfig.g_w / codecConfig.g_h;
codecConfig.g_w = ENCODED_FACE_WIDTH;
codecConfig.g_h = ENCODED_FACE_HEIGHT;
vpx_codec_enc_init(&_colorCodec, vpx_codec_vp8_cx(), &codecConfig, 0);
if (!depth.empty()) {
int DEPTH_BITRATE_MULTIPLIER = 2;
codecConfig.rc_target_bitrate *= 2;
vpx_codec_enc_init(&_depthCodec, vpx_codec_vp8_cx(), &codecConfig, 0);
}
}
encodedWidth = ENCODED_FACE_WIDTH;
encodedHeight = ENCODED_FACE_HEIGHT;
depthBitrateMultiplier = 2;
// correct for 180 degree rotations
if (faceRect.angle < -90.0f) {
@ -535,22 +537,96 @@ void FrameGrabber::grabFrame() {
_smoothedFaceRect.angle = glm::mix(faceRect.angle, _smoothedFaceRect.angle, FACE_RECT_SMOOTHING);
}
// resize/rotate face into encoding rectangle
_faceColor.create(ENCODED_FACE_WIDTH, ENCODED_FACE_HEIGHT, CV_8UC3);
// use the face rect to compute the face transform, aspect ratio
Point2f sourcePoints[4];
_smoothedFaceRect.points(sourcePoints);
Point2f destPoints[] = { Point2f(0, ENCODED_FACE_HEIGHT), Point2f(0, 0), Point2f(ENCODED_FACE_WIDTH, 0) };
Mat transform = getAffineTransform(sourcePoints, destPoints);
warpAffine(color, _faceColor, transform, _faceColor.size());
Point2f destPoints[] = { Point2f(0, encodedHeight), Point2f(0, 0), Point2f(encodedWidth, 0) };
faceTransform = getAffineTransform(sourcePoints, destPoints);
aspectRatio = _smoothedFaceRect.size.width / _smoothedFaceRect.size.height;
}
// convert from RGB to YV12
const ushort ELEVEN_BIT_MINIMUM = 0;
const uchar EIGHT_BIT_MIDPOINT = 128;
double depthOffset;
if (!depth.empty()) {
if (_videoSendMode == FACE_VIDEO) {
// warp the face depth without interpolation (because it will contain invalid zero values)
_faceDepth.create(encodedHeight, encodedWidth, CV_16UC1);
warpAffine(depth, _faceDepth, faceTransform, _faceDepth.size(), INTER_NEAREST);
} else {
_faceDepth = depth;
}
_smoothedFaceDepth.create(encodedHeight, encodedWidth, CV_16UC1);
// smooth the depth over time
const ushort ELEVEN_BIT_MAXIMUM = 2047;
const float DEPTH_SMOOTHING = 0.25f;
ushort* src = _faceDepth.ptr<ushort>();
ushort* dest = _smoothedFaceDepth.ptr<ushort>();
ushort minimumDepth = numeric_limits<ushort>::max();
for (int i = 0; i < encodedHeight; i++) {
for (int j = 0; j < encodedWidth; j++) {
ushort depth = *src++;
if (depth != ELEVEN_BIT_MINIMUM && depth != ELEVEN_BIT_MAXIMUM) {
minimumDepth = min(minimumDepth, depth);
*dest = (*dest == ELEVEN_BIT_MINIMUM) ? depth : (ushort)glm::mix(depth, *dest, DEPTH_SMOOTHING);
}
dest++;
}
}
const ushort MINIMUM_DEPTH_OFFSET = 64;
const float FIXED_MID_DEPTH = 640.0f;
float midFaceDepth = (_videoSendMode == FACE_VIDEO) ? (minimumDepth + MINIMUM_DEPTH_OFFSET) : FIXED_MID_DEPTH;
// smooth the mid face depth over time
const float MID_FACE_DEPTH_SMOOTHING = 0.5f;
_smoothedMidFaceDepth = (_smoothedMidFaceDepth == UNINITIALIZED_FACE_DEPTH) ? midFaceDepth :
glm::mix(midFaceDepth, _smoothedMidFaceDepth, MID_FACE_DEPTH_SMOOTHING);
// convert from 11 to 8 bits for preview/local display
depthOffset = EIGHT_BIT_MIDPOINT - _smoothedMidFaceDepth;
depth.convertTo(_grayDepthFrame, CV_8UC1, 1.0, depthOffset);
}
QByteArray payload;
if (_videoSendMode != NO_VIDEO) {
if (_colorCodec.name == 0) {
// initialize encoder context(s)
vpx_codec_enc_cfg_t codecConfig;
vpx_codec_enc_config_default(vpx_codec_vp8_cx(), &codecConfig, 0);
codecConfig.rc_target_bitrate = encodedWidth * encodedHeight *
codecConfig.rc_target_bitrate / codecConfig.g_w / codecConfig.g_h;
codecConfig.g_w = encodedWidth;
codecConfig.g_h = encodedHeight;
vpx_codec_enc_init(&_colorCodec, vpx_codec_vp8_cx(), &codecConfig, 0);
if (!depth.empty()) {
codecConfig.rc_target_bitrate *= depthBitrateMultiplier;
vpx_codec_enc_init(&_depthCodec, vpx_codec_vp8_cx(), &codecConfig, 0);
}
}
Mat transform;
if (_videoSendMode == FACE_VIDEO) {
// resize/rotate face into encoding rectangle
_faceColor.create(encodedHeight, encodedWidth, CV_8UC3);
warpAffine(color, _faceColor, faceTransform, _faceColor.size());
} else {
_faceColor = color;
}
// convert from RGB to YV12: see http://www.fourcc.org/yuv.php and
// http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html#cvtcolor
const int ENCODED_BITS_PER_Y = 8;
const int ENCODED_BITS_PER_VU = 2;
const int ENCODED_BITS_PER_PIXEL = ENCODED_BITS_PER_Y + 2 * ENCODED_BITS_PER_VU;
const int BITS_PER_BYTE = 8;
_encodedFace.resize(ENCODED_FACE_WIDTH * ENCODED_FACE_HEIGHT * ENCODED_BITS_PER_PIXEL / BITS_PER_BYTE);
_encodedFace.resize(encodedWidth * encodedHeight * ENCODED_BITS_PER_PIXEL / BITS_PER_BYTE);
vpx_image_t vpxImage;
vpx_img_wrap(&vpxImage, VPX_IMG_FMT_YV12, ENCODED_FACE_WIDTH, ENCODED_FACE_HEIGHT, 1, (unsigned char*)_encodedFace.data());
vpx_img_wrap(&vpxImage, VPX_IMG_FMT_YV12, encodedWidth, encodedHeight, 1,
(unsigned char*)_encodedFace.data());
uchar* yline = vpxImage.planes[0];
uchar* vline = vpxImage.planes[1];
uchar* uline = vpxImage.planes[2];
@ -566,11 +642,11 @@ void FrameGrabber::grabFrame() {
redIndex = 2;
blueIndex = 0;
}
for (int i = 0; i < ENCODED_FACE_HEIGHT; i += 2) {
for (int i = 0; i < encodedHeight; i += 2) {
uchar* ydest = yline;
uchar* vdest = vline;
uchar* udest = uline;
for (int j = 0; j < ENCODED_FACE_WIDTH; j += 2) {
for (int j = 0; j < encodedWidth; j += 2) {
uchar* tl = _faceColor.ptr(i, j);
uchar* tr = _faceColor.ptr(i, j + 1);
uchar* bl = _faceColor.ptr(i + 1, j);
@ -600,9 +676,8 @@ void FrameGrabber::grabFrame() {
// encode the frame
vpx_codec_encode(&_colorCodec, &vpxImage, ++_frameCount, 1, 0, VPX_DL_REALTIME);
// start the payload off with the aspect ratio
QByteArray payload(sizeof(float), 0);
*(float*)payload.data() = _smoothedFaceRect.size.width / _smoothedFaceRect.size.height;
// start the payload off with the aspect ratio (zero for full frame)
payload.append((const char*)&aspectRatio, sizeof(float));
// extract the encoded frame
vpx_codec_iter_t iterator = 0;
@ -616,60 +691,31 @@ void FrameGrabber::grabFrame() {
}
if (!depth.empty()) {
// warp the face depth without interpolation (because it will contain invalid zero values)
_faceDepth.create(ENCODED_FACE_WIDTH, ENCODED_FACE_HEIGHT, CV_16UC1);
warpAffine(depth, _faceDepth, transform, _faceDepth.size(), INTER_NEAREST);
// find the mean of the valid values
qint64 depthTotal = 0;
qint64 depthSamples = 0;
ushort* src = _faceDepth.ptr<ushort>();
const ushort ELEVEN_BIT_MINIMUM = 0;
const ushort ELEVEN_BIT_MAXIMUM = 2047;
for (int i = 0; i < ENCODED_FACE_HEIGHT; i++) {
for (int j = 0; j < ENCODED_FACE_WIDTH; j++) {
ushort depth = *src++;
if (depth != ELEVEN_BIT_MINIMUM && depth != ELEVEN_BIT_MAXIMUM) {
depthTotal += depth;
depthSamples++;
}
}
}
float mean = (depthSamples == 0) ? UNINITIALIZED_FACE_DEPTH : depthTotal / (float)depthSamples;
// smooth the mean over time
const float DEPTH_OFFSET_SMOOTHING = 0.95f;
_smoothedMeanFaceDepth = (_smoothedMeanFaceDepth == UNINITIALIZED_FACE_DEPTH) ? mean :
glm::mix(mean, _smoothedMeanFaceDepth, DEPTH_OFFSET_SMOOTHING);
// convert from 11 to 8 bits for preview/local display
const uchar EIGHT_BIT_MIDPOINT = 128;
double depthOffset = EIGHT_BIT_MIDPOINT - _smoothedMeanFaceDepth;
depth.convertTo(_grayDepthFrame, CV_8UC1, 1.0, depthOffset);
// likewise for the encoded representation
// convert with mask
uchar* yline = vpxImage.planes[0];
uchar* vline = vpxImage.planes[1];
uchar* uline = vpxImage.planes[2];
const uchar EIGHT_BIT_MAXIMUM = 255;
for (int i = 0; i < ENCODED_FACE_HEIGHT; i += 2) {
for (int i = 0; i < encodedHeight; i += 2) {
uchar* ydest = yline;
uchar* vdest = vline;
uchar* udest = uline;
for (int j = 0; j < ENCODED_FACE_WIDTH; j += 2) {
ushort tl = *_faceDepth.ptr<ushort>(i, j);
ushort tr = *_faceDepth.ptr<ushort>(i, j + 1);
ushort bl = *_faceDepth.ptr<ushort>(i + 1, j);
ushort br = *_faceDepth.ptr<ushort>(i + 1, j + 1);
for (int j = 0; j < encodedWidth; j += 2) {
ushort tl = *_smoothedFaceDepth.ptr<ushort>(i, j);
ushort tr = *_smoothedFaceDepth.ptr<ushort>(i, j + 1);
ushort bl = *_smoothedFaceDepth.ptr<ushort>(i + 1, j);
ushort br = *_smoothedFaceDepth.ptr<ushort>(i + 1, j + 1);
uchar mask = EIGHT_BIT_MAXIMUM;
ydest[0] = (tl == ELEVEN_BIT_MINIMUM) ? (mask = EIGHT_BIT_MIDPOINT) : saturate_cast<uchar>(tl + depthOffset);
ydest[1] = (tr == ELEVEN_BIT_MINIMUM) ? (mask = EIGHT_BIT_MIDPOINT) : saturate_cast<uchar>(tr + depthOffset);
ydest[vpxImage.stride[0]] = (bl == ELEVEN_BIT_MINIMUM) ?
(mask = EIGHT_BIT_MIDPOINT) : saturate_cast<uchar>(bl + depthOffset);
ydest[vpxImage.stride[0] + 1] = (br == ELEVEN_BIT_MINIMUM) ?
(mask = EIGHT_BIT_MIDPOINT) : saturate_cast<uchar>(br + depthOffset);
ydest[0] = (tl == ELEVEN_BIT_MINIMUM) ? (mask = EIGHT_BIT_MIDPOINT) :
saturate_cast<uchar>(tl + depthOffset);
ydest[1] = (tr == ELEVEN_BIT_MINIMUM) ? (mask = EIGHT_BIT_MIDPOINT) :
saturate_cast<uchar>(tr + depthOffset);
ydest[vpxImage.stride[0]] = (bl == ELEVEN_BIT_MINIMUM) ? (mask = EIGHT_BIT_MIDPOINT) :
saturate_cast<uchar>(bl + depthOffset);
ydest[vpxImage.stride[0] + 1] = (br == ELEVEN_BIT_MINIMUM) ? (mask = EIGHT_BIT_MIDPOINT) :
saturate_cast<uchar>(br + depthOffset);
ydest += 2;
*vdest++ = mask;
@ -692,13 +738,15 @@ void FrameGrabber::grabFrame() {
}
}
}
}
QMetaObject::invokeMethod(Application::getInstance(), "sendAvatarFaceVideoMessage",
Q_ARG(int, _frameCount), Q_ARG(QByteArray, payload));
QMetaObject::invokeMethod(Application::getInstance()->getWebcam(), "setFrame",
Q_ARG(cv::Mat, color), Q_ARG(int, format), Q_ARG(cv::Mat, _grayDepthFrame), Q_ARG(float, _smoothedMeanFaceDepth),
Q_ARG(cv::RotatedRect, _smoothedFaceRect), Q_ARG(JointVector, joints));
Q_ARG(cv::Mat, color), Q_ARG(int, format), Q_ARG(cv::Mat, _grayDepthFrame), Q_ARG(float, _smoothedMidFaceDepth),
Q_ARG(float, aspectRatio), Q_ARG(cv::RotatedRect, _smoothedFaceRect), Q_ARG(bool, !payload.isEmpty()),
Q_ARG(JointVector, joints));
}
bool FrameGrabber::init() {
@ -767,6 +815,17 @@ void FrameGrabber::updateHSVFrame(const Mat& frame, int format) {
inRange(_hsvFrame, Scalar(0, 55, 65), Scalar(180, 256, 256), _mask);
}
void FrameGrabber::destroyCodecs() {
if (_colorCodec.name != 0) {
vpx_codec_destroy(&_colorCodec);
_colorCodec.name = 0;
}
if (_depthCodec.name != 0) {
vpx_codec_destroy(&_depthCodec);
_depthCodec.name = 0;
}
}
Joint::Joint(const glm::vec3& position, const glm::quat& rotation, const glm::vec3& projected) :
isValid(true), position(position), rotation(rotation), projected(projected) {
}

View file

@ -44,12 +44,17 @@ public:
Webcam();
~Webcam();
FrameGrabber* getGrabber() { return _grabber; }
bool isActive() const { return _active; }
bool isSending() const { return _sending; }
GLuint getColorTextureID() const { return _colorTextureID; }
GLuint getDepthTextureID() const { return _depthTextureID; }
const cv::Size2f& getTextureSize() const { return _textureSize; }
float getAspectRatio() const { return _aspectRatio; }
const cv::RotatedRect& getFaceRect() const { return _faceRect; }
const glm::vec3& getEstimatedPosition() const { return _estimatedPosition; }
@ -62,8 +67,8 @@ public:
public slots:
void setEnabled(bool enabled);
void setFrame(const cv::Mat& color, int format, const cv::Mat& depth, float meanFaceDepth,
const cv::RotatedRect& faceRect, const JointVector& joints);
void setFrame(const cv::Mat& color, int format, const cv::Mat& depth, float midFaceDepth,
float aspectRatio, const cv::RotatedRect& faceRect, bool sending, const JointVector& joints);
private:
@ -72,9 +77,11 @@ private:
bool _enabled;
bool _active;
bool _sending;
GLuint _colorTextureID;
GLuint _depthTextureID;
cv::Size2f _textureSize;
float _aspectRatio;
cv::RotatedRect _faceRect;
cv::RotatedRect _initialFaceRect;
float _initialFaceDepth;
@ -100,16 +107,21 @@ public:
public slots:
void cycleVideoSendMode();
void reset();
void shutdown();
void grabFrame();
private:
enum VideoSendMode { NO_VIDEO, FACE_VIDEO, FULL_FRAME_VIDEO, VIDEO_SEND_MODE_COUNT };
bool init();
void updateHSVFrame(const cv::Mat& frame, int format);
void destroyCodecs();
bool _initialized;
VideoSendMode _videoSendMode;
CvCapture* _capture;
cv::CascadeClassifier _faceCascade;
cv::Mat _hsvFrame;
@ -118,13 +130,14 @@ private:
cv::Mat _backProject;
cv::Rect _searchWindow;
cv::Mat _grayDepthFrame;
float _smoothedMeanFaceDepth;
float _smoothedMidFaceDepth;
vpx_codec_ctx_t _colorCodec;
vpx_codec_ctx_t _depthCodec;
int _frameCount;
cv::Mat _faceColor;
cv::Mat _faceDepth;
cv::Mat _smoothedFaceDepth;
QByteArray _encodedFace;
cv::RotatedRect _smoothedFaceRect;

View file

@ -314,10 +314,7 @@ void Avatar::updateFromGyrosAndOrWebcam(bool gyroLook,
estimatedPosition = webcam->getEstimatedPosition();
// apply face data
_head.getFace().setColorTextureID(webcam->getColorTextureID());
_head.getFace().setDepthTextureID(webcam->getDepthTextureID());
_head.getFace().setTextureSize(webcam->getTextureSize());
_head.getFace().setTextureRect(webcam->getFaceRect());
_head.getFace().setFrameFromWebcam();
// compute and store the joint rotations
const JointVector& joints = webcam->getEstimatedJoints();
@ -334,7 +331,7 @@ void Avatar::updateFromGyrosAndOrWebcam(bool gyroLook,
}
}
} else {
_head.getFace().setColorTextureID(0);
_head.getFace().clearFrame();
}
_head.setPitch(estimatedRotation.x * amplifyAngle.x + pitchFromTouch);
_head.setYaw(estimatedRotation.y * amplifyAngle.y + yawFromTouch);
@ -1301,8 +1298,14 @@ float Avatar::getBallRenderAlpha(int ball, bool lookingInMirror) const {
void Avatar::renderBody(bool lookingInMirror, bool renderAvatarBalls) {
if (_head.getFace().isFullFrame()) {
// Render the full-frame video
float alpha = getBallRenderAlpha(BODY_BALL_HEAD_BASE, lookingInMirror);
if (alpha > 0.0f) {
_head.getFace().render(1.0f);
}
} else if (renderAvatarBalls || !_voxels.getVoxelURL().isValid()) {
// Render the body as balls and cones
if (renderAvatarBalls || !_voxels.getVoxelURL().isValid()) {
for (int b = 0; b < NUM_AVATAR_BODY_BALLS; b++) {
float alpha = getBallRenderAlpha(b, lookingInMirror);

View file

@ -17,6 +17,7 @@
#include "Avatar.h"
#include "Head.h"
#include "Face.h"
#include "Webcam.h"
#include "renderer/ProgramObject.h"
using namespace cv;
@ -25,7 +26,6 @@ ProgramObject* Face::_program = 0;
int Face::_texCoordCornerLocation;
int Face::_texCoordRightLocation;
int Face::_texCoordUpLocation;
int Face::_aspectRatioLocation;
GLuint Face::_vboID;
GLuint Face::_iboID;
@ -55,17 +55,25 @@ Face::~Face() {
}
}
void Face::setTextureRect(const cv::RotatedRect& textureRect) {
_textureRect = textureRect;
_aspectRatio = _textureRect.size.width / _textureRect.size.height;
void Face::setFrameFromWebcam() {
Webcam* webcam = Application::getInstance()->getWebcam();
if (webcam->isSending()) {
_colorTextureID = webcam->getColorTextureID();
_depthTextureID = webcam->getDepthTextureID();
_textureSize = webcam->getTextureSize();
_textureRect = webcam->getFaceRect();
_aspectRatio = webcam->getAspectRatio();
} else {
clearFrame();
}
}
void Face::clearFrame() {
_colorTextureID = 0;
}
int Face::processVideoMessage(unsigned char* packetData, size_t dataBytes) {
if (_colorCodec.name == 0) {
// initialize decoder context
vpx_codec_dec_init(&_colorCodec, vpx_codec_vp8_dx(), 0, 0);
}
// skip the header
unsigned char* packetPosition = packetData;
int frameCount = *(uint32_t*)packetPosition;
@ -89,15 +97,41 @@ int Face::processVideoMessage(unsigned char* packetData, size_t dataBytes) {
int payloadSize = dataBytes - (packetPosition - packetData);
memcpy(_arrivingFrame.data() + frameOffset, packetPosition, payloadSize);
if ((_frameBytesRemaining -= payloadSize) <= 0) {
if ((_frameBytesRemaining -= payloadSize) > 0) {
return dataBytes; // wait for the rest of the frame
}
if (frameSize == 0) {
// destroy the codecs, if we have any
destroyCodecs();
// disables video data
QMetaObject::invokeMethod(this, "setFrame", Q_ARG(cv::Mat, Mat()),
Q_ARG(cv::Mat, Mat()), Q_ARG(float, 0.0f));
return dataBytes;
}
// the switch from full frame to not (or vice versa) requires us to reinit the codecs
float aspectRatio = *(const float*)_arrivingFrame.constData();
bool fullFrame = (aspectRatio == FULL_FRAME_ASPECT);
if (fullFrame != _lastFullFrame) {
destroyCodecs();
_lastFullFrame = fullFrame;
}
if (_colorCodec.name == 0) {
// initialize decoder context
vpx_codec_dec_init(&_colorCodec, vpx_codec_vp8_dx(), 0, 0);
}
size_t colorSize = *(const size_t*)(_arrivingFrame.constData() + sizeof(float));
const uint8_t* colorData = (const uint8_t*)(_arrivingFrame.constData() + sizeof(float) + sizeof(size_t));
vpx_codec_decode(&_colorCodec, colorData, colorSize, 0, 0);
vpx_codec_iter_t iterator = 0;
vpx_image_t* image;
while ((image = vpx_codec_get_frame(&_colorCodec, &iterator)) != 0) {
// convert from YV12 to RGB
// convert from YV12 to RGB: see http://www.fourcc.org/yuv.php and
// http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html#cvtcolor
Mat color(image->d_h, image->d_w, CV_8UC3);
uchar* yline = image->planes[0];
uchar* vline = image->planes[1];
@ -193,7 +227,6 @@ int Face::processVideoMessage(unsigned char* packetData, size_t dataBytes) {
QMetaObject::invokeMethod(this, "setFrame", Q_ARG(cv::Mat, color),
Q_ARG(cv::Mat, depth), Q_ARG(float, aspectRatio));
}
}
return dataBytes;
}
@ -208,8 +241,21 @@ bool Face::render(float alpha) {
glm::quat orientation = _owningHead->getOrientation();
glm::vec3 axis = glm::axis(orientation);
glRotatef(glm::angle(orientation), axis.x, axis.y, axis.z);
float scale = BODY_BALL_RADIUS_HEAD_BASE * _owningHead->getScale();
glScalef(scale, scale, scale);
float aspect, xScale, zScale;
if (_aspectRatio == FULL_FRAME_ASPECT) {
aspect = _textureSize.width / _textureSize.height;
const float FULL_FRAME_SCALE = 0.5f;
xScale = FULL_FRAME_SCALE * _owningHead->getScale();
zScale = xScale * 0.3f;
} else {
aspect = _aspectRatio;
xScale = BODY_BALL_RADIUS_HEAD_BASE * _owningHead->getScale();
zScale = xScale * 1.5f;
glTranslatef(0.0f, -xScale * 0.75f, -xScale);
}
glScalef(xScale, xScale / aspect, zScale);
glColor4f(1.0f, 1.0f, 1.0f, alpha);
@ -243,7 +289,6 @@ bool Face::render(float alpha) {
_texCoordCornerLocation = _program->uniformLocation("texCoordCorner");
_texCoordRightLocation = _program->uniformLocation("texCoordRight");
_texCoordUpLocation = _program->uniformLocation("texCoordUp");
_aspectRatioLocation = _program->uniformLocation("aspectRatio");
glGenBuffers(1, &_vboID);
glBindBuffer(GL_ARRAY_BUFFER, _vboID);
@ -292,7 +337,6 @@ bool Face::render(float alpha) {
(points[3].x - points[0].x) / _textureSize.width, (points[3].y - points[0].y) / _textureSize.height);
_program->setUniformValue(_texCoordUpLocation,
(points[1].x - points[0].x) / _textureSize.width, (points[1].y - points[0].y) / _textureSize.height);
_program->setUniformValue(_aspectRatioLocation, _aspectRatio);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(2, GL_FLOAT, 0, 0);
@ -324,13 +368,13 @@ bool Face::render(float alpha) {
glBegin(GL_QUADS);
glTexCoord2f(points[0].x / _textureSize.width, points[0].y / _textureSize.height);
glVertex3f(0.5f, -0.5f / _aspectRatio, -0.5f);
glVertex3f(0.5f, -0.5f, 0.0f);
glTexCoord2f(points[1].x / _textureSize.width, points[1].y / _textureSize.height);
glVertex3f(0.5f, 0.5f / _aspectRatio, -0.5f);
glVertex3f(0.5f, 0.5f, 0.0f);
glTexCoord2f(points[2].x / _textureSize.width, points[2].y / _textureSize.height);
glVertex3f(-0.5f, 0.5f / _aspectRatio, -0.5f);
glVertex3f(-0.5f, 0.5f, 0.0f);
glTexCoord2f(points[3].x / _textureSize.width, points[3].y / _textureSize.height);
glVertex3f(-0.5f, -0.5f / _aspectRatio, -0.5f);
glVertex3f(-0.5f, -0.5f, 0.0f);
glEnd();
glDisable(GL_TEXTURE_2D);
@ -348,23 +392,40 @@ void Face::cycleRenderMode() {
}
void Face::setFrame(const cv::Mat& color, const cv::Mat& depth, float aspectRatio) {
if (color.empty()) {
// release our textures, if any; there's no more video
if (_colorTextureID != 0) {
glDeleteTextures(1, &_colorTextureID);
_colorTextureID = 0;
}
if (_depthTextureID != 0) {
glDeleteTextures(1, &_depthTextureID);
_depthTextureID = 0;
}
return;
}
if (_colorTextureID == 0) {
glGenTextures(1, &_colorTextureID);
}
glBindTexture(GL_TEXTURE_2D, _colorTextureID);
bool recreateTextures = (_textureSize.width != color.cols || _textureSize.height != color.rows);
if (recreateTextures) {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, color.cols, color.rows, 0, GL_RGB, GL_UNSIGNED_BYTE, color.ptr());
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
_textureSize = color.size();
_textureRect = RotatedRect(Point2f(color.cols * 0.5f, color.rows * 0.5f), _textureSize, 0.0f);
} else {
glBindTexture(GL_TEXTURE_2D, _colorTextureID);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, color.cols, color.rows, GL_RGB, GL_UNSIGNED_BYTE, color.ptr());
}
if (!depth.empty()) {
if (_depthTextureID == 0) {
glGenTextures(1, &_depthTextureID);
}
glBindTexture(GL_TEXTURE_2D, _depthTextureID);
if (recreateTextures) {
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, depth.cols, depth.rows, 0,
GL_LUMINANCE, GL_UNSIGNED_BYTE, depth.ptr());
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
@ -380,3 +441,13 @@ void Face::setFrame(const cv::Mat& color, const cv::Mat& depth, float aspectRati
_aspectRatio = aspectRatio;
}
void Face::destroyCodecs() {
if (_colorCodec.name != 0) {
vpx_codec_destroy(&_colorCodec);
_colorCodec.name = 0;
}
if (_depthCodec.name != 0) {
vpx_codec_destroy(&_depthCodec);
_depthCodec.name = 0;
}
}

View file

@ -20,6 +20,8 @@
class Head;
class ProgramObject;
const float FULL_FRAME_ASPECT = 0.0f;
class Face : public QObject {
Q_OBJECT
@ -28,10 +30,10 @@ public:
Face(Head* owningHead);
~Face();
void setColorTextureID(GLuint colorTextureID) { _colorTextureID = colorTextureID; }
void setDepthTextureID(GLuint depthTextureID) { _depthTextureID = depthTextureID; }
void setTextureSize(const cv::Size2f& textureSize) { _textureSize = textureSize; }
void setTextureRect(const cv::RotatedRect& textureRect);
bool isFullFrame() const { return _colorTextureID != 0 && _aspectRatio == FULL_FRAME_ASPECT; }
void setFrameFromWebcam();
void clearFrame();
int processVideoMessage(unsigned char* packetData, size_t dataBytes);
@ -49,6 +51,8 @@ private:
enum RenderMode { MESH, POINTS, RENDER_MODE_COUNT };
void destroyCodecs();
Head* _owningHead;
RenderMode _renderMode;
GLuint _colorTextureID;
@ -59,6 +63,7 @@ private:
vpx_codec_ctx_t _colorCodec;
vpx_codec_ctx_t _depthCodec;
bool _lastFullFrame;
QByteArray _arrivingFrame;
int _frameCount;
@ -68,7 +73,6 @@ private:
static int _texCoordCornerLocation;
static int _texCoordRightLocation;
static int _texCoordUpLocation;
static int _aspectRatioLocation;
static GLuint _vboID;
static GLuint _iboID;
};

View file

@ -8,13 +8,9 @@ set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/../../cm
set(TARGET_NAME avatars)
find_package(Qt5Core)
include(${MACRO_DIR}/SetupHifiLibrary.cmake)
setup_hifi_library(${TARGET_NAME})
qt5_use_modules(${TARGET_NAME} Core)
include(${MACRO_DIR}/IncludeGLM.cmake)
include_glm(${TARGET_NAME} ${ROOT_DIR})

View file

@ -55,6 +55,9 @@ void HandData::encodeRemoteData(std::vector<glm::vec3>& fingerVectors) {
for (size_t i = 0; i < getNumPalms(); ++i) {
PalmData& palm = getPalms()[i];
if (!palm.isActive()) {
continue;
}
fingerVectors.push_back(palm.getRawPosition());
fingerVectors.push_back(palm.getRawNormal());
for (size_t f = 0; f < palm.getNumFingers(); ++f) {

View file

@ -6,13 +6,9 @@ set(MACRO_DIR ${ROOT_DIR}/cmake/macros)
set(TARGET_NAME shared)
project(${TARGET_NAME})
find_package(Qt5Core REQUIRED)
include(${MACRO_DIR}/SetupHifiLibrary.cmake)
setup_hifi_library(${TARGET_NAME})
qt5_use_modules(${TARGET_NAME} Core)
set(EXTERNAL_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/external)
if (WIN32)

View file

@ -352,6 +352,12 @@ int NodeList::processDomainServerList(unsigned char* packetData, size_t dataByte
readPtr += unpackSocket(readPtr, (sockaddr*) &nodePublicSocket);
readPtr += unpackSocket(readPtr, (sockaddr*) &nodeLocalSocket);
// if the public socket address is 0 then it's reachable at the same IP
// as the domain server
if (nodePublicSocket.sin_addr.s_addr == 0) {
inet_aton(_domainIP, &nodePublicSocket.sin_addr);
}
addOrUpdateNode((sockaddr*) &nodePublicSocket, (sockaddr*) &nodeLocalSocket, nodeType, nodeId);
}

View file

@ -8,9 +8,13 @@ set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/../../cm
set(TARGET_NAME voxels)
find_package(Qt5Widgets REQUIRED)
include(${MACRO_DIR}/SetupHifiLibrary.cmake)
setup_hifi_library(${TARGET_NAME})
qt5_use_modules(${TARGET_NAME} Widgets)
include(${MACRO_DIR}/IncludeGLM.cmake)
include_glm(${TARGET_NAME} ${ROOT_DIR})

View file

@ -41,4 +41,6 @@ const glBufferIndex GLBUFFER_INDEX_UNKNOWN = ULONG_MAX;
const float SIXTY_FPS_IN_MILLISECONDS = 1000.0f / 60.0f;
const float VIEW_CULLING_RATE_IN_MILLISECONDS = 1000.0f; // once a second is fine
const uint64_t CLIENT_TO_SERVER_VOXEL_SEND_INTERVAL_USECS = 1000 * 5; // 1 packet every 50 milliseconds
#endif

View file

@ -61,6 +61,7 @@ bool wantColorRandomizer = false;
bool debugVoxelSending = false;
bool shouldShowAnimationDebug = false;
bool displayVoxelStats = false;
bool debugVoxelReceiving = false;
EnvironmentData environmentData[3];
@ -426,6 +427,8 @@ void attachVoxelNodeDataToNode(Node* newNode) {
}
}
int receivedPacketCount = 0;
int main(int argc, const char * argv[]) {
pthread_mutex_init(&::treeLock, NULL);
@ -455,6 +458,10 @@ int main(int argc, const char * argv[]) {
::debugVoxelSending = cmdOptionExists(argc, argv, DEBUG_VOXEL_SENDING);
printf("debugVoxelSending=%s\n", debug::valueOf(::debugVoxelSending));
const char* DEBUG_VOXEL_RECEIVING = "--debugVoxelReceiving";
::debugVoxelReceiving = cmdOptionExists(argc, argv, DEBUG_VOXEL_RECEIVING);
printf("debugVoxelReceiving=%s\n", debug::valueOf(::debugVoxelReceiving));
const char* WANT_ANIMATION_DEBUG = "--shouldShowAnimationDebug";
::shouldShowAnimationDebug = cmdOptionExists(argc, argv, WANT_ANIMATION_DEBUG);
printf("shouldShowAnimationDebug=%s\n", debug::valueOf(::shouldShowAnimationDebug));
@ -584,12 +591,20 @@ int main(int argc, const char * argv[]) {
destructive ? "PACKET_TYPE_SET_VOXEL_DESTRUCTIVE" : "PACKET_TYPE_SET_VOXEL",
::shouldShowAnimationDebug);
::receivedPacketCount++;
unsigned short int itemNumber = (*((unsigned short int*)(packetData + numBytesPacketHeader)));
if (::shouldShowAnimationDebug) {
printf("got %s - command from client receivedBytes=%ld itemNumber=%d\n",
destructive ? "PACKET_TYPE_SET_VOXEL_DESTRUCTIVE" : "PACKET_TYPE_SET_VOXEL",
receivedBytes,itemNumber);
}
if (::debugVoxelReceiving) {
printf("got %s - %d command from client receivedBytes=%ld itemNumber=%d\n",
destructive ? "PACKET_TYPE_SET_VOXEL_DESTRUCTIVE" : "PACKET_TYPE_SET_VOXEL",
::receivedPacketCount, receivedBytes,itemNumber);
}
int atByte = numBytesPacketHeader + sizeof(itemNumber);
unsigned char* voxelData = (unsigned char*)&packetData[atByte];
while (atByte < receivedBytes) {