From bd3c235fe4178d1e86c5b0cca27163164307dd99 Mon Sep 17 00:00:00 2001 From: Andrzej Kapolka Date: Tue, 6 Aug 2013 16:23:58 -0700 Subject: [PATCH] Switching between textured and untextured modes. --- .../resources/shaders/face_textured.frag | 1 + interface/src/Application.cpp | 3 +- interface/src/Webcam.cpp | 285 ++++++++++-------- interface/src/Webcam.h | 2 + interface/src/avatar/Face.cpp | 22 +- interface/src/avatar/Face.h | 1 + 6 files changed, 179 insertions(+), 135 deletions(-) diff --git a/interface/resources/shaders/face_textured.frag b/interface/resources/shaders/face_textured.frag index fefefc6817..2170074e25 100644 --- a/interface/resources/shaders/face_textured.frag +++ b/interface/resources/shaders/face_textured.frag @@ -72,6 +72,7 @@ void main(void) { // compute the specular component (sans exponent) based on the normal OpenGL lighting model float specular = max(0.0, dot(normalize(gl_LightSource[0].position.xyz + vec3(0.0, 0.0, 1.0)), normal)); + // the base color is a subtle marble texture produced by modulating the phase of a sine wave by perlin noise vec3 color = mix(vec3(1.0, 1.0, 1.0), vec3(0.75, 0.75, 0.75), sin(dot(position, vec3(25.0, 25.0, 25.0)) + 2.0 * perlin(position * 10.0))); diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index 66f414d38d..1077eda7d9 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -1943,8 +1943,9 @@ void Application::initMenu() { _testPing->setChecked(true); (_fullScreenMode = optionsMenu->addAction("Fullscreen", this, SLOT(setFullscreen(bool)), Qt::Key_F))->setCheckable(true); optionsMenu->addAction("Webcam", &_webcam, SLOT(setEnabled(bool)))->setCheckable(true); - optionsMenu->addAction("Toggle Skeleton Tracking", &_webcam, SLOT(setSkeletonTrackingOn(bool)))->setCheckable(true); + optionsMenu->addAction("Skeleton Tracking", &_webcam, SLOT(setSkeletonTrackingOn(bool)))->setCheckable(true); optionsMenu->addAction("Cycle Webcam Send Mode", _webcam.getGrabber(), SLOT(cycleVideoSendMode())); + optionsMenu->addAction("Webcam Texture", _webcam.getGrabber(), SLOT(setDepthOnly(bool)))->setCheckable(true); optionsMenu->addAction("Go Home", this, SLOT(goHome()), Qt::CTRL | Qt::Key_G); QMenu* audioMenu = menuBar->addMenu("Audio"); diff --git a/interface/src/Webcam.cpp b/interface/src/Webcam.cpp index 4d2edcac37..0deae62e46 100644 --- a/interface/src/Webcam.cpp +++ b/interface/src/Webcam.cpp @@ -71,26 +71,28 @@ void Webcam::reset() { } void Webcam::renderPreview(int screenWidth, int screenHeight) { - if (_enabled && _colorTextureID != 0) { - glBindTexture(GL_TEXTURE_2D, _colorTextureID); + if (_enabled) { glEnable(GL_TEXTURE_2D); glColor3f(1.0f, 1.0f, 1.0f); - glBegin(GL_QUADS); - const int PREVIEW_HEIGHT = 200; - int previewWidth = _textureSize.width * PREVIEW_HEIGHT / _textureSize.height; - int top = screenHeight - 600; - int left = screenWidth - previewWidth - 10; - - glTexCoord2f(0, 0); - glVertex2f(left, top); - glTexCoord2f(1, 0); - glVertex2f(left + previewWidth, top); - glTexCoord2f(1, 1); - glVertex2f(left + previewWidth, top + PREVIEW_HEIGHT); - glTexCoord2f(0, 1); - glVertex2f(left, top + PREVIEW_HEIGHT); - glEnd(); - + + const int PREVIEW_HEIGHT = 200; + int previewWidth = _textureSize.width * PREVIEW_HEIGHT / _textureSize.height; + int top = screenHeight - 600; + int left = screenWidth - previewWidth - 10; + if (_colorTextureID != 0) { + glBindTexture(GL_TEXTURE_2D, _colorTextureID); + glBegin(GL_QUADS); + glTexCoord2f(0, 0); + glVertex2f(left, top); + glTexCoord2f(1, 0); + glVertex2f(left + previewWidth, top); + glTexCoord2f(1, 1); + glVertex2f(left + previewWidth, top + PREVIEW_HEIGHT); + glTexCoord2f(0, 1); + glVertex2f(left, top + PREVIEW_HEIGHT); + glEnd(); + } + if (_depthTextureID != 0) { glBindTexture(GL_TEXTURE_2D, _depthTextureID); glBegin(GL_QUADS); @@ -157,22 +159,26 @@ const float METERS_PER_MM = 1.0f / 1000.0f; void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float midFaceDepth, float aspectRatio, const RotatedRect& faceRect, bool sending, const JointVector& joints) { - IplImage colorImage = color; - glPixelStorei(GL_UNPACK_ROW_LENGTH, colorImage.widthStep / 3); - if (_colorTextureID == 0) { - glGenTextures(1, &_colorTextureID); - glBindTexture(GL_TEXTURE_2D, _colorTextureID); - glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, _textureSize.width = colorImage.width, _textureSize.height = colorImage.height, - 0, format, GL_UNSIGNED_BYTE, colorImage.imageData); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); - qDebug("Capturing video at %gx%g.\n", _textureSize.width, _textureSize.height); - - } else { - glBindTexture(GL_TEXTURE_2D, _colorTextureID); - glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, _textureSize.width, _textureSize.height, format, - GL_UNSIGNED_BYTE, colorImage.imageData); + if (!color.empty()) { + IplImage colorImage = color; + glPixelStorei(GL_UNPACK_ROW_LENGTH, colorImage.widthStep / 3); + if (_colorTextureID == 0) { + glGenTextures(1, &_colorTextureID); + glBindTexture(GL_TEXTURE_2D, _colorTextureID); + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, _textureSize.width = colorImage.width, _textureSize.height = colorImage.height, + 0, format, GL_UNSIGNED_BYTE, colorImage.imageData); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + + } else { + glBindTexture(GL_TEXTURE_2D, _colorTextureID); + glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, _textureSize.width, _textureSize.height, format, + GL_UNSIGNED_BYTE, colorImage.imageData); + } + } else if (_colorTextureID != 0) { + glDeleteTextures(1, &_colorTextureID); + _colorTextureID = 0; } - + if (!depth.empty()) { IplImage depthImage = depth; glPixelStorei(GL_UNPACK_ROW_LENGTH, depthImage.widthStep); @@ -189,6 +195,9 @@ void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float midF glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, _textureSize.width, _textureSize.height, GL_LUMINANCE, GL_UNSIGNED_BYTE, depthImage.imageData); } + } else if (_depthTextureID != 0) { + glDeleteTextures(1, &_depthTextureID); + _depthTextureID = 0; } glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); glBindTexture(GL_TEXTURE_2D, 0); @@ -273,8 +282,8 @@ void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float midF QTimer::singleShot(qMax((int)remaining / 1000, 0), _grabber, SLOT(grabFrame())); } -FrameGrabber::FrameGrabber() : _initialized(false), _videoSendMode(FULL_FRAME_VIDEO), _capture(0), _searchWindow(0, 0, 0, 0), - _smoothedMidFaceDepth(UNINITIALIZED_FACE_DEPTH), _colorCodec(), _depthCodec(), _frameCount(0) { +FrameGrabber::FrameGrabber() : _initialized(false), _videoSendMode(FULL_FRAME_VIDEO), _depthOnly(false), _capture(0), + _searchWindow(0, 0, 0, 0), _smoothedMidFaceDepth(UNINITIALIZED_FACE_DEPTH), _colorCodec(), _depthCodec(), _frameCount(0) { } FrameGrabber::~FrameGrabber() { @@ -374,6 +383,11 @@ void FrameGrabber::cycleVideoSendMode() { destroyCodecs(); } +void FrameGrabber::setDepthOnly(bool depthOnly) { + _depthOnly = depthOnly; + destroyCodecs(); +} + void FrameGrabber::reset() { _searchWindow = cv::Rect(0, 0, 0, 0); @@ -479,7 +493,7 @@ void FrameGrabber::grabFrame() { encodedWidth = color.cols; encodedHeight = color.rows; aspectRatio = FULL_FRAME_ASPECT; - colorBitrateMultiplier = 4.0f; + colorBitrateMultiplier = depthBitrateMultiplier = 4.0f; } else { // if we don't have a search window (yet), try using the face cascade @@ -591,108 +605,129 @@ void FrameGrabber::grabFrame() { depth.convertTo(_grayDepthFrame, CV_8UC1, 1.0, depthOffset); } - QByteArray payload; + // increment the frame count that identifies frames + _frameCount++; + + QByteArray payload; if (_videoSendMode != NO_VIDEO) { - if (_colorCodec.name == 0) { - // initialize encoder context(s) - vpx_codec_enc_cfg_t codecConfig; - vpx_codec_enc_config_default(vpx_codec_vp8_cx(), &codecConfig, 0); - codecConfig.rc_target_bitrate = ENCODED_FACE_WIDTH * ENCODED_FACE_HEIGHT * colorBitrateMultiplier * - codecConfig.rc_target_bitrate / codecConfig.g_w / codecConfig.g_h; - codecConfig.g_w = encodedWidth; - codecConfig.g_h = encodedHeight; - vpx_codec_enc_init(&_colorCodec, vpx_codec_vp8_cx(), &codecConfig, 0); - - if (!depth.empty()) { - codecConfig.rc_target_bitrate *= depthBitrateMultiplier; - vpx_codec_enc_init(&_depthCodec, vpx_codec_vp8_cx(), &codecConfig, 0); - } - } - - Mat transform; - if (_videoSendMode == FACE_VIDEO) { - // resize/rotate face into encoding rectangle - _faceColor.create(encodedHeight, encodedWidth, CV_8UC3); - warpAffine(color, _faceColor, faceTransform, _faceColor.size()); - - } else { - _faceColor = color; - } - - // convert from RGB to YV12: see http://www.fourcc.org/yuv.php and - // http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html#cvtcolor + // start the payload off with the aspect ratio (zero for full frame) + payload.append((const char*)&aspectRatio, sizeof(float)); + + // prepare the image in which we'll store the data const int ENCODED_BITS_PER_Y = 8; const int ENCODED_BITS_PER_VU = 2; const int ENCODED_BITS_PER_PIXEL = ENCODED_BITS_PER_Y + 2 * ENCODED_BITS_PER_VU; const int BITS_PER_BYTE = 8; _encodedFace.resize(encodedWidth * encodedHeight * ENCODED_BITS_PER_PIXEL / BITS_PER_BYTE); vpx_image_t vpxImage; - vpx_img_wrap(&vpxImage, VPX_IMG_FMT_YV12, encodedWidth, encodedHeight, 1, - (unsigned char*)_encodedFace.data()); - uchar* yline = vpxImage.planes[0]; - uchar* vline = vpxImage.planes[1]; - uchar* uline = vpxImage.planes[2]; - const int Y_RED_WEIGHT = (int)(0.299 * 256); - const int Y_GREEN_WEIGHT = (int)(0.587 * 256); - const int Y_BLUE_WEIGHT = (int)(0.114 * 256); - const int V_RED_WEIGHT = (int)(0.713 * 256); - const int U_BLUE_WEIGHT = (int)(0.564 * 256); - int redIndex = 0; - int greenIndex = 1; - int blueIndex = 2; - if (format == GL_BGR) { - redIndex = 2; - blueIndex = 0; - } - for (int i = 0; i < encodedHeight; i += 2) { - uchar* ydest = yline; - uchar* vdest = vline; - uchar* udest = uline; - for (int j = 0; j < encodedWidth; j += 2) { - uchar* tl = _faceColor.ptr(i, j); - uchar* tr = _faceColor.ptr(i, j + 1); - uchar* bl = _faceColor.ptr(i + 1, j); - uchar* br = _faceColor.ptr(i + 1, j + 1); - - ydest[0] = (tl[redIndex] * Y_RED_WEIGHT + tl[1] * Y_GREEN_WEIGHT + tl[blueIndex] * Y_BLUE_WEIGHT) >> 8; - ydest[1] = (tr[redIndex] * Y_RED_WEIGHT + tr[1] * Y_GREEN_WEIGHT + tr[blueIndex] * Y_BLUE_WEIGHT) >> 8; - ydest[vpxImage.stride[0]] = (bl[redIndex] * Y_RED_WEIGHT + bl[greenIndex] * - Y_GREEN_WEIGHT + bl[blueIndex] * Y_BLUE_WEIGHT) >> 8; - ydest[vpxImage.stride[0] + 1] = (br[redIndex] * Y_RED_WEIGHT + br[greenIndex] * - Y_GREEN_WEIGHT + br[blueIndex] * Y_BLUE_WEIGHT) >> 8; - ydest += 2; - - int totalRed = tl[redIndex] + tr[redIndex] + bl[redIndex] + br[redIndex]; - int totalGreen = tl[greenIndex] + tr[greenIndex] + bl[greenIndex] + br[greenIndex]; - int totalBlue = tl[blueIndex] + tr[blueIndex] + bl[blueIndex] + br[blueIndex]; - int totalY = (totalRed * Y_RED_WEIGHT + totalGreen * Y_GREEN_WEIGHT + totalBlue * Y_BLUE_WEIGHT) >> 8; - - *vdest++ = (((totalRed - totalY) * V_RED_WEIGHT) >> 10) + 128; - *udest++ = (((totalBlue - totalY) * U_BLUE_WEIGHT) >> 10) + 128; + vpx_img_wrap(&vpxImage, VPX_IMG_FMT_YV12, encodedWidth, encodedHeight, 1, (unsigned char*)_encodedFace.data()); + + if (!_depthOnly || depth.empty()) { + if (_colorCodec.name == 0) { + // initialize encoder context + vpx_codec_enc_cfg_t codecConfig; + vpx_codec_enc_config_default(vpx_codec_vp8_cx(), &codecConfig, 0); + codecConfig.rc_target_bitrate = ENCODED_FACE_WIDTH * ENCODED_FACE_HEIGHT * colorBitrateMultiplier * + codecConfig.rc_target_bitrate / codecConfig.g_w / codecConfig.g_h; + codecConfig.g_w = encodedWidth; + codecConfig.g_h = encodedHeight; + vpx_codec_enc_init(&_colorCodec, vpx_codec_vp8_cx(), &codecConfig, 0); } - yline += vpxImage.stride[0] * 2; - vline += vpxImage.stride[1]; - uline += vpxImage.stride[2]; - } - // encode the frame - vpx_codec_encode(&_colorCodec, &vpxImage, ++_frameCount, 1, 0, VPX_DL_REALTIME); + if (_videoSendMode == FACE_VIDEO) { + // resize/rotate face into encoding rectangle + _faceColor.create(encodedHeight, encodedWidth, CV_8UC3); + warpAffine(color, _faceColor, faceTransform, _faceColor.size()); - // start the payload off with the aspect ratio (zero for full frame) - payload.append((const char*)&aspectRatio, sizeof(float)); - - // extract the encoded frame - vpx_codec_iter_t iterator = 0; - const vpx_codec_cx_pkt_t* packet; - while ((packet = vpx_codec_get_cx_data(&_colorCodec, &iterator)) != 0) { - if (packet->kind == VPX_CODEC_CX_FRAME_PKT) { - // prepend the length, which will indicate whether there's a depth frame too - payload.append((const char*)&packet->data.frame.sz, sizeof(packet->data.frame.sz)); - payload.append((const char*)packet->data.frame.buf, packet->data.frame.sz); + } else { + _faceColor = color; } - } + // convert from RGB to YV12: see http://www.fourcc.org/yuv.php and + // http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html#cvtcolor + uchar* yline = vpxImage.planes[0]; + uchar* vline = vpxImage.planes[1]; + uchar* uline = vpxImage.planes[2]; + const int Y_RED_WEIGHT = (int)(0.299 * 256); + const int Y_GREEN_WEIGHT = (int)(0.587 * 256); + const int Y_BLUE_WEIGHT = (int)(0.114 * 256); + const int V_RED_WEIGHT = (int)(0.713 * 256); + const int U_BLUE_WEIGHT = (int)(0.564 * 256); + int redIndex = 0; + int greenIndex = 1; + int blueIndex = 2; + if (format == GL_BGR) { + redIndex = 2; + blueIndex = 0; + } + for (int i = 0; i < encodedHeight; i += 2) { + uchar* ydest = yline; + uchar* vdest = vline; + uchar* udest = uline; + for (int j = 0; j < encodedWidth; j += 2) { + uchar* tl = _faceColor.ptr(i, j); + uchar* tr = _faceColor.ptr(i, j + 1); + uchar* bl = _faceColor.ptr(i + 1, j); + uchar* br = _faceColor.ptr(i + 1, j + 1); + + ydest[0] = (tl[redIndex] * Y_RED_WEIGHT + tl[1] * Y_GREEN_WEIGHT + tl[blueIndex] * Y_BLUE_WEIGHT) >> 8; + ydest[1] = (tr[redIndex] * Y_RED_WEIGHT + tr[1] * Y_GREEN_WEIGHT + tr[blueIndex] * Y_BLUE_WEIGHT) >> 8; + ydest[vpxImage.stride[0]] = (bl[redIndex] * Y_RED_WEIGHT + bl[greenIndex] * + Y_GREEN_WEIGHT + bl[blueIndex] * Y_BLUE_WEIGHT) >> 8; + ydest[vpxImage.stride[0] + 1] = (br[redIndex] * Y_RED_WEIGHT + br[greenIndex] * + Y_GREEN_WEIGHT + br[blueIndex] * Y_BLUE_WEIGHT) >> 8; + ydest += 2; + + int totalRed = tl[redIndex] + tr[redIndex] + bl[redIndex] + br[redIndex]; + int totalGreen = tl[greenIndex] + tr[greenIndex] + bl[greenIndex] + br[greenIndex]; + int totalBlue = tl[blueIndex] + tr[blueIndex] + bl[blueIndex] + br[blueIndex]; + int totalY = (totalRed * Y_RED_WEIGHT + totalGreen * Y_GREEN_WEIGHT + totalBlue * Y_BLUE_WEIGHT) >> 8; + + *vdest++ = (((totalRed - totalY) * V_RED_WEIGHT) >> 10) + 128; + *udest++ = (((totalBlue - totalY) * U_BLUE_WEIGHT) >> 10) + 128; + } + yline += vpxImage.stride[0] * 2; + vline += vpxImage.stride[1]; + uline += vpxImage.stride[2]; + } + + // encode the frame + vpx_codec_encode(&_colorCodec, &vpxImage, _frameCount, 1, 0, VPX_DL_REALTIME); + + // extract the encoded frame + vpx_codec_iter_t iterator = 0; + const vpx_codec_cx_pkt_t* packet; + while ((packet = vpx_codec_get_cx_data(&_colorCodec, &iterator)) != 0) { + if (packet->kind == VPX_CODEC_CX_FRAME_PKT) { + // prepend the length, which will indicate whether there's a depth frame too + payload.append((const char*)&packet->data.frame.sz, sizeof(packet->data.frame.sz)); + payload.append((const char*)packet->data.frame.buf, packet->data.frame.sz); + } + } + } else { + // zero length indicates no color info + const size_t ZERO_SIZE = 0; + payload.append((const char*)&ZERO_SIZE, sizeof(size_t)); + + // we can use more bits for depth + depthBitrateMultiplier *= 2.0f; + + // don't bother reporting the color + color = Mat(); + } + if (!depth.empty()) { + if (_depthCodec.name == 0) { + // initialize encoder context + vpx_codec_enc_cfg_t codecConfig; + vpx_codec_enc_config_default(vpx_codec_vp8_cx(), &codecConfig, 0); + codecConfig.rc_target_bitrate = ENCODED_FACE_WIDTH * ENCODED_FACE_HEIGHT * depthBitrateMultiplier * + codecConfig.rc_target_bitrate / codecConfig.g_w / codecConfig.g_h; + codecConfig.g_w = encodedWidth; + codecConfig.g_h = encodedHeight; + vpx_codec_enc_init(&_depthCodec, vpx_codec_vp8_cx(), &codecConfig, 0); + } + // convert with mask uchar* yline = vpxImage.planes[0]; uchar* vline = vpxImage.planes[1]; diff --git a/interface/src/Webcam.h b/interface/src/Webcam.h index aee87e1bc7..16b9339eb0 100644 --- a/interface/src/Webcam.h +++ b/interface/src/Webcam.h @@ -112,6 +112,7 @@ public: public slots: void cycleVideoSendMode(); + void setDepthOnly(bool depthOnly); void reset(); void shutdown(); void grabFrame(); @@ -126,6 +127,7 @@ private: bool _initialized; VideoSendMode _videoSendMode; + bool _depthOnly; CvCapture* _capture; cv::CascadeClassifier _faceCascade; cv::Mat _hsvFrame; diff --git a/interface/src/avatar/Face.cpp b/interface/src/avatar/Face.cpp index 9573cd4e4b..22ce3c0727 100644 --- a/interface/src/avatar/Face.cpp +++ b/interface/src/avatar/Face.cpp @@ -112,18 +112,20 @@ int Face::processVideoMessage(unsigned char* packetData, size_t dataBytes) { return dataBytes; } - // the switch from full frame to not (or vice versa) requires us to reinit the codecs + // the switch between full frame or depth only modes requires us to reinit the codecs float aspectRatio = *(const float*)_arrivingFrame.constData(); + size_t colorSize = *(const size_t*)(_arrivingFrame.constData() + sizeof(float)); bool fullFrame = (aspectRatio == FULL_FRAME_ASPECT); - if (fullFrame != _lastFullFrame) { + bool depthOnly = (colorSize == 0); + if (fullFrame != _lastFullFrame || depthOnly != _lastDepthOnly) { destroyCodecs(); _lastFullFrame = fullFrame; + _lastDepthOnly = depthOnly; } // read the color data, if non-empty Mat color; const uint8_t* colorData = (const uint8_t*)(_arrivingFrame.constData() + sizeof(float) + sizeof(size_t)); - size_t colorSize = *(const size_t*)(_arrivingFrame.constData() + sizeof(float)); if (colorSize > 0) { if (_colorCodec.name == 0) { // initialize decoder context @@ -331,7 +333,7 @@ bool Face::render(float alpha) { ProgramObject* program = _videoProgram; Locations* locations = &_videoProgramLocations; - if (false && _colorTextureID != 0) { + if (_colorTextureID != 0) { glBindTexture(GL_TEXTURE_2D, _colorTextureID); } else { @@ -401,13 +403,14 @@ void Face::cycleRenderMode() { } void Face::setFrame(const cv::Mat& color, const cv::Mat& depth, float aspectRatio) { - Size2f textureSize; + Size2f textureSize = _textureSize; if (!color.empty()) { - if (_colorTextureID == 0) { + bool generate = (_colorTextureID == 0); + if (generate) { glGenTextures(1, &_colorTextureID); } glBindTexture(GL_TEXTURE_2D, _colorTextureID); - if (_textureSize.width != color.cols || _textureSize.height != color.rows) { + if (_textureSize.width != color.cols || _textureSize.height != color.rows || generate) { glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, color.cols, color.rows, 0, GL_RGB, GL_UNSIGNED_BYTE, color.ptr()); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); textureSize = color.size(); @@ -422,11 +425,12 @@ void Face::setFrame(const cv::Mat& color, const cv::Mat& depth, float aspectRati } if (!depth.empty()) { - if (_depthTextureID == 0) { + bool generate = (_depthTextureID == 0); + if (generate) { glGenTextures(1, &_depthTextureID); } glBindTexture(GL_TEXTURE_2D, _depthTextureID); - if (_textureSize.width != depth.cols || _textureSize.height != depth.rows) { + if (_textureSize.width != depth.cols || _textureSize.height != depth.rows || generate) { glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, depth.cols, depth.rows, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, depth.ptr()); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); diff --git a/interface/src/avatar/Face.h b/interface/src/avatar/Face.h index 5e82148f8d..f3e681ff72 100644 --- a/interface/src/avatar/Face.h +++ b/interface/src/avatar/Face.h @@ -65,6 +65,7 @@ private: vpx_codec_ctx_t _colorCodec; vpx_codec_ctx_t _depthCodec; bool _lastFullFrame; + bool _lastDepthOnly; QByteArray _arrivingFrame; int _frameCount;