mirror of
https://github.com/JulianGro/overte.git
synced 2025-04-09 16:52:28 +02:00
more LIBVPX workarounds
This commit is contained in:
parent
62cf9d5b8c
commit
c90c1e4063
2 changed files with 32 additions and 14 deletions
|
@ -32,12 +32,17 @@ GLuint VideoFace::_vboID;
|
|||
GLuint VideoFace::_iboID;
|
||||
|
||||
VideoFace::VideoFace(Head* owningHead) : _owningHead(owningHead), _renderMode(MESH),
|
||||
_colorTextureID(0), _depthTextureID(0), _colorCodec(), _depthCodec(), _frameCount(0) {
|
||||
_colorTextureID(0), _depthTextureID(0),
|
||||
#ifdef HAVE_LIBVPX
|
||||
_colorCodec(), _depthCodec(),
|
||||
#endif
|
||||
_frameCount(0) {
|
||||
// we may have been created in the network thread, but we live in the main thread
|
||||
moveToThread(Application::getInstance()->thread());
|
||||
}
|
||||
|
||||
VideoFace::~VideoFace() {
|
||||
#ifdef HAVE_LIBVPX
|
||||
if (_colorCodec.name != 0) {
|
||||
vpx_codec_destroy(&_colorCodec);
|
||||
|
||||
|
@ -55,6 +60,7 @@ VideoFace::~VideoFace() {
|
|||
glDeleteTextures(1, &_depthTextureID);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void VideoFace::setFrameFromWebcam() {
|
||||
|
@ -76,7 +82,8 @@ void VideoFace::clearFrame() {
|
|||
}
|
||||
|
||||
int VideoFace::processVideoMessage(unsigned char* packetData, size_t dataBytes) {
|
||||
unsigned char* packetPosition = packetData;
|
||||
#ifdef HAVE_LIBVPX
|
||||
unsigned char* packetPosition = packetData;
|
||||
|
||||
int frameCount = *(uint32_t*)packetPosition;
|
||||
packetPosition += sizeof(uint32_t);
|
||||
|
@ -199,7 +206,8 @@ int VideoFace::processVideoMessage(unsigned char* packetData, size_t dataBytes)
|
|||
Mat depth;
|
||||
const uint8_t* depthData = colorData + colorSize;
|
||||
int depthSize = _arrivingFrame.size() - ((const char*)depthData - _arrivingFrame.constData());
|
||||
if (depthSize > 0) {
|
||||
|
||||
if (depthSize > 0) {
|
||||
if (_depthCodec.name == 0) {
|
||||
// initialize decoder context
|
||||
vpx_codec_dec_init(&_depthCodec, vpx_codec_vp8_dx(), 0, 0);
|
||||
|
@ -241,11 +249,15 @@ int VideoFace::processVideoMessage(unsigned char* packetData, size_t dataBytes)
|
|||
}
|
||||
QMetaObject::invokeMethod(this, "setFrame", Q_ARG(cv::Mat, color),
|
||||
Q_ARG(cv::Mat, depth), Q_ARG(float, aspectRatio));
|
||||
#endif
|
||||
|
||||
return dataBytes;
|
||||
}
|
||||
|
||||
bool VideoFace::render(float alpha) {
|
||||
#ifndef HAVE_LIBVPX
|
||||
return false;
|
||||
#else
|
||||
if (!isActive()) {
|
||||
return false;
|
||||
}
|
||||
|
@ -404,6 +416,7 @@ bool VideoFace::render(float alpha) {
|
|||
glPopMatrix();
|
||||
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
void VideoFace::cycleRenderMode() {
|
||||
|
@ -460,6 +473,7 @@ void VideoFace::setFrame(const cv::Mat& color, const cv::Mat& depth, float aspec
|
|||
}
|
||||
|
||||
void VideoFace::destroyCodecs() {
|
||||
#ifdef HAVE_LIBVPX
|
||||
if (_colorCodec.name != 0) {
|
||||
vpx_codec_destroy(&_colorCodec);
|
||||
_colorCodec.name = 0;
|
||||
|
@ -468,6 +482,7 @@ void VideoFace::destroyCodecs() {
|
|||
vpx_codec_destroy(&_depthCodec);
|
||||
_depthCodec.name = 0;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void VideoFace::loadProgram(ProgramObject& program, const QString& suffix, const char* secondTextureUniform, Locations& locations) {
|
||||
|
|
|
@ -43,7 +43,8 @@ Webcam::Webcam() : _enabled(false), _active(false), _colorTextureID(0), _depthTe
|
|||
}
|
||||
|
||||
void Webcam::setEnabled(bool enabled) {
|
||||
if (_enabled == enabled) {
|
||||
#ifdef HAVE_LIBVPX
|
||||
if (_enabled == enabled) {
|
||||
return;
|
||||
}
|
||||
if ((_enabled = enabled)) {
|
||||
|
@ -59,11 +60,13 @@ void Webcam::setEnabled(bool enabled) {
|
|||
QMetaObject::invokeMethod(_grabber, "shutdown");
|
||||
_active = false;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
const float UNINITIALIZED_FACE_DEPTH = 0.0f;
|
||||
|
||||
void Webcam::reset() {
|
||||
#ifdef HAVE_LIBVPX
|
||||
_initialFaceRect = RotatedRect();
|
||||
_initialFaceDepth = UNINITIALIZED_FACE_DEPTH;
|
||||
_initialLEDPosition = glm::vec3();
|
||||
|
@ -72,10 +75,12 @@ void Webcam::reset() {
|
|||
// send a message to the grabber
|
||||
QMetaObject::invokeMethod(_grabber, "reset");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void Webcam::renderPreview(int screenWidth, int screenHeight) {
|
||||
if (_enabled) {
|
||||
#ifdef HAVE_LIBVPX
|
||||
if (_enabled) {
|
||||
glEnable(GL_TEXTURE_2D);
|
||||
glColor3f(1.0f, 1.0f, 1.0f);
|
||||
|
||||
|
@ -157,14 +162,17 @@ void Webcam::renderPreview(int screenWidth, int screenHeight) {
|
|||
sprintf(fps, "FPS: %d", (int)(roundf(_frameCount * 1000000.0f / (usecTimestampNow() - _startTimestamp))));
|
||||
drawtext(left, top + PREVIEW_HEIGHT + 20, 0.10, 0, 1, 0, fps);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
Webcam::~Webcam() {
|
||||
#ifdef HAVE_LIBVPX
|
||||
// stop the grabber thread
|
||||
_grabberThread.quit();
|
||||
_grabberThread.wait();
|
||||
|
||||
delete _grabber;
|
||||
#endif
|
||||
}
|
||||
|
||||
static glm::vec3 createVec3(const Point2f& pt) {
|
||||
|
@ -241,6 +249,7 @@ const float METERS_PER_MM = 1.0f / 1000.0f;
|
|||
|
||||
void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float midFaceDepth, float aspectRatio,
|
||||
const RotatedRect& faceRect, bool sending, const JointVector& joints, const KeyPointVector& keyPoints) {
|
||||
#ifdef HAVE_LIBVPX
|
||||
if (!_enabled) {
|
||||
return; // was queued before we shut down; ignore
|
||||
}
|
||||
|
@ -391,6 +400,7 @@ void Webcam::setFrame(const Mat& color, int format, const Mat& depth, float midF
|
|||
|
||||
// let the grabber know we're ready for the next frame
|
||||
QTimer::singleShot(qMax((int)remaining / 1000, 0), _grabber, SLOT(grabFrame()));
|
||||
#endif
|
||||
}
|
||||
|
||||
static SimpleBlobDetector::Params createBlobDetectorParams() {
|
||||
|
@ -551,6 +561,7 @@ static Point clip(const Point& point, const Rect& bounds) {
|
|||
}
|
||||
|
||||
void FrameGrabber::grabFrame() {
|
||||
#ifdef HAVE_LIBVPX
|
||||
if (!(_initialized || init())) {
|
||||
return;
|
||||
}
|
||||
|
@ -772,9 +783,7 @@ void FrameGrabber::grabFrame() {
|
|||
_encodedFace.resize(encodedWidth * encodedHeight * ENCODED_BITS_PER_PIXEL / BITS_PER_BYTE);
|
||||
vpx_image_t vpxImage;
|
||||
vpx_img_wrap(&vpxImage, VPX_IMG_FMT_YV12, encodedWidth, encodedHeight, 1, (unsigned char*)_encodedFace.data());
|
||||
|
||||
if (!_depthOnly || depth.empty()) {
|
||||
#ifdef HAVE_LIBVPX
|
||||
if (_colorCodec.name == 0) {
|
||||
// initialize encoder context
|
||||
vpx_codec_enc_cfg_t codecConfig;
|
||||
|
@ -785,7 +794,6 @@ void FrameGrabber::grabFrame() {
|
|||
codecConfig.g_h = encodedHeight;
|
||||
vpx_codec_enc_init(&_colorCodec, vpx_codec_vp8_cx(), &codecConfig, 0);
|
||||
}
|
||||
#endif
|
||||
if (_videoSendMode == FACE_VIDEO) {
|
||||
// resize/rotate face into encoding rectangle
|
||||
_faceColor.create(encodedHeight, encodedWidth, CV_8UC3);
|
||||
|
@ -844,7 +852,6 @@ void FrameGrabber::grabFrame() {
|
|||
}
|
||||
|
||||
// encode the frame
|
||||
#ifdef HAVE_LIBVPX
|
||||
vpx_codec_encode(&_colorCodec, &vpxImage, _frameCount, 1, 0, VPX_DL_REALTIME);
|
||||
// extract the encoded frame
|
||||
vpx_codec_iter_t iterator = 0;
|
||||
|
@ -856,7 +863,6 @@ void FrameGrabber::grabFrame() {
|
|||
payload.append((const char*)packet->data.frame.buf, packet->data.frame.sz);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
// zero length indicates no color info
|
||||
const size_t ZERO_SIZE = 0;
|
||||
|
@ -870,7 +876,6 @@ void FrameGrabber::grabFrame() {
|
|||
}
|
||||
|
||||
if (!depth.empty()) {
|
||||
#ifdef HAVE_LIBVPX
|
||||
if (_depthCodec.name == 0) {
|
||||
// initialize encoder context
|
||||
vpx_codec_enc_cfg_t codecConfig;
|
||||
|
@ -881,7 +886,6 @@ void FrameGrabber::grabFrame() {
|
|||
codecConfig.g_h = encodedHeight;
|
||||
vpx_codec_enc_init(&_depthCodec, vpx_codec_vp8_cx(), &codecConfig, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
// convert with mask
|
||||
uchar* yline = vpxImage.planes[0];
|
||||
|
@ -918,7 +922,6 @@ void FrameGrabber::grabFrame() {
|
|||
uline += vpxImage.stride[2];
|
||||
}
|
||||
|
||||
#ifdef HAVE_LIBVPX
|
||||
// encode the frame
|
||||
vpx_codec_encode(&_depthCodec, &vpxImage, _frameCount, 1, 0, VPX_DL_REALTIME);
|
||||
|
||||
|
@ -930,7 +933,6 @@ void FrameGrabber::grabFrame() {
|
|||
payload.append((const char*)packet->data.frame.buf, packet->data.frame.sz);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -941,6 +943,7 @@ void FrameGrabber::grabFrame() {
|
|||
Q_ARG(cv::Mat, color), Q_ARG(int, format), Q_ARG(cv::Mat, _grayDepthFrame), Q_ARG(float, _smoothedMidFaceDepth),
|
||||
Q_ARG(float, aspectRatio), Q_ARG(cv::RotatedRect, _smoothedFaceRect), Q_ARG(bool, !payload.isEmpty()),
|
||||
Q_ARG(JointVector, joints), Q_ARG(KeyPointVector, keyPoints));
|
||||
#endif
|
||||
}
|
||||
|
||||
bool FrameGrabber::init() {
|
||||
|
|
Loading…
Reference in a new issue