Merge pull request #3449 from birarda/clement-recording-changes

Rec file format tweaks (on behalf of @Atlante45)
This commit is contained in:
Stephen Birarda 2014-09-18 10:24:48 -07:00
commit 9b16f8ea6a
10 changed files with 1327 additions and 719 deletions

View file

@ -210,14 +210,14 @@ function mousePressEvent(event) {
}
} else if (saveIcon === toolBar.clicked(clickedOverlay)) {
if (!MyAvatar.isRecording() && !MyAvatar.isPlaying() && MyAvatar.playerLength() != 0) {
recordingFile = Window.save("Save recording to file", ".", "*.rec");
recordingFile = Window.save("Save recording to file", ".", "Recordings (*.hfr)");
if (!(recordingFile === "null" || recordingFile === null || recordingFile === "")) {
MyAvatar.saveRecording(recordingFile);
}
}
} else if (loadIcon === toolBar.clicked(clickedOverlay)) {
if (!MyAvatar.isRecording() && !MyAvatar.isPlaying()) {
recordingFile = Window.browse("Load recorcding from file", ".", "*.rec");
recordingFile = Window.browse("Load recorcding from file", ".", "Recordings (*.hfr *.rec *.HFR *.REC)");
if (!(recordingFile === "null" || recordingFile === null || recordingFile === "")) {
MyAvatar.loadRecording(recordingFile);
}

View file

@ -49,10 +49,11 @@ typedef unsigned long long quint64;
#include <Node.h>
#include "HandData.h"
#include "HeadData.h"
#include "Player.h"
#include "Recorder.h"
#include "Referential.h"
#include "HeadData.h"
#include "HandData.h"
// avatar motion behaviors
const quint32 AVATAR_MOTION_KEYBOARD_MOTOR_ENABLED = 1U << 0;

View file

@ -0,0 +1,236 @@
//
// Player.cpp
//
//
// Created by Clement on 9/17/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <GLMHelpers.h>
#include <NodeList.h>
#include <StreamUtils.h>
#include "AvatarData.h"
#include "Player.h"
Player::Player(AvatarData* avatar) :
_recording(new Recording()),
_avatar(avatar),
_audioThread(NULL),
_playFromCurrentPosition(true),
_loop(false),
_useAttachments(true),
_useDisplayName(true),
_useHeadURL(true),
_useSkeletonURL(true)
{
_timer.invalidate();
_options.setLoop(false);
_options.setVolume(1.0f);
}
bool Player::isPlaying() const {
return _timer.isValid();
}
qint64 Player::elapsed() const {
if (isPlaying()) {
return _timer.elapsed();
} else {
return 0;
}
}
void Player::startPlaying() {
if (_recording && _recording->getFrameNumber() > 0) {
_currentContext.globalTimestamp = usecTimestampNow();
_currentContext.domain = NodeList::getInstance()->getDomainHandler().getHostname();
_currentContext.position = _avatar->getPosition();
_currentContext.orientation = _avatar->getOrientation();
_currentContext.scale = _avatar->getTargetScale();
_currentContext.headModel = _avatar->getFaceModelURL().toString();
_currentContext.skeletonModel = _avatar->getSkeletonModelURL().toString();
_currentContext.displayName = _avatar->getDisplayName();
_currentContext.attachments = _avatar->getAttachmentData();
_currentContext.orientationInv = glm::inverse(_currentContext.orientation);
RecordingContext& context = _recording->getContext();
if (_useAttachments) {
_avatar->setAttachmentData(context.attachments);
}
if (_useDisplayName) {
_avatar->setDisplayName(context.displayName);
}
if (_useHeadURL) {
_avatar->setFaceModelURL(context.headModel);
}
if (_useSkeletonURL) {
_avatar->setSkeletonModelURL(context.skeletonModel);
}
bool wantDebug = false;
if (wantDebug) {
qDebug() << "Player::startPlaying(): Recording Context";
qDebug() << "Domain:" << _currentContext.domain;
qDebug() << "Position:" << _currentContext.position;
qDebug() << "Orientation:" << _currentContext.orientation;
qDebug() << "Scale:" << _currentContext.scale;
qDebug() << "Head URL:" << _currentContext.headModel;
qDebug() << "Skeleton URL:" << _currentContext.skeletonModel;
qDebug() << "Display Name:" << _currentContext.displayName;
qDebug() << "Num Attachments:" << _currentContext.attachments.size();
for (int i = 0; i < _currentContext.attachments.size(); ++i) {
qDebug() << "Model URL:" << _currentContext.attachments[i].modelURL;
qDebug() << "Joint Name:" << _currentContext.attachments[i].jointName;
qDebug() << "Translation:" << _currentContext.attachments[i].translation;
qDebug() << "Rotation:" << _currentContext.attachments[i].rotation;
qDebug() << "Scale:" << _currentContext.attachments[i].scale;
}
}
// Fake faceshift connection
_avatar->setForceFaceshiftConnected(true);
qDebug() << "Recorder::startPlaying()";
_currentFrame = 0;
setupAudioThread();
_timer.start();
}
}
void Player::stopPlaying() {
if (!isPlaying()) {
return;
}
_timer.invalidate();
cleanupAudioThread();
_avatar->clearJointsData();
// Turn off fake faceshift connection
_avatar->setForceFaceshiftConnected(false);
if (_useAttachments) {
_avatar->setAttachmentData(_currentContext.attachments);
}
if (_useDisplayName) {
_avatar->setDisplayName(_currentContext.displayName);
}
if (_useHeadURL) {
_avatar->setFaceModelURL(_currentContext.headModel);
}
if (_useSkeletonURL) {
_avatar->setSkeletonModelURL(_currentContext.skeletonModel);
}
qDebug() << "Recorder::stopPlaying()";
}
void Player::setupAudioThread() {
_audioThread = new QThread();
_options.setPosition(_avatar->getPosition());
_options.setOrientation(_avatar->getOrientation());
_injector.reset(new AudioInjector(_recording->getAudio(), _options), &QObject::deleteLater);
_injector->moveToThread(_audioThread);
_audioThread->start();
QMetaObject::invokeMethod(_injector.data(), "injectAudio", Qt::QueuedConnection);
}
void Player::cleanupAudioThread() {
_injector->stop();
QObject::connect(_injector.data(), &AudioInjector::finished,
_injector.data(), &AudioInjector::deleteLater);
QObject::connect(_injector.data(), &AudioInjector::destroyed,
_audioThread, &QThread::quit);
QObject::connect(_audioThread, &QThread::finished,
_audioThread, &QThread::deleteLater);
_injector.clear();
_audioThread = NULL;
}
void Player::loopRecording() {
cleanupAudioThread();
setupAudioThread();
_currentFrame = 0;
_timer.restart();
}
void Player::loadFromFile(QString file) {
if (_recording) {
_recording->clear();
} else {
_recording = RecordingPointer(new Recording());
}
readRecordingFromFile(_recording, file);
}
void Player::loadRecording(RecordingPointer recording) {
_recording = recording;
}
void Player::play() {
computeCurrentFrame();
if (_currentFrame < 0 || (_currentFrame >= _recording->getFrameNumber() - 1)) {
if (_loop) {
loopRecording();
} else {
stopPlaying();
}
return;
}
const RecordingContext* context = &_recording->getContext();
if (_playFromCurrentPosition) {
context = &_currentContext;
}
const RecordingFrame& currentFrame = _recording->getFrame(_currentFrame);
_avatar->setPosition(context->position + context->orientation * currentFrame.getTranslation());
_avatar->setOrientation(context->orientation * currentFrame.getRotation());
_avatar->setTargetScale(context->scale * currentFrame.getScale());
_avatar->setJointRotations(currentFrame.getJointRotations());
HeadData* head = const_cast<HeadData*>(_avatar->getHeadData());
if (head) {
head->setBlendshapeCoefficients(currentFrame.getBlendshapeCoefficients());
head->setLeanSideways(currentFrame.getLeanSideways());
head->setLeanForward(currentFrame.getLeanForward());
glm::vec3 eulers = glm::degrees(safeEulerAngles(currentFrame.getHeadRotation()));
head->setFinalPitch(eulers.x);
head->setFinalYaw(eulers.y);
head->setFinalRoll(eulers.z);
head->setLookAtPosition(currentFrame.getLookAtPosition());
} else {
qDebug() << "WARNING: Player couldn't find head data.";
}
_options.setPosition(_avatar->getPosition());
_options.setOrientation(_avatar->getOrientation());
_injector->setOptions(_options);
}
void Player::setPlayFromCurrentLocation(bool playFromCurrentLocation) {
_playFromCurrentPosition = playFromCurrentLocation;
}
bool Player::computeCurrentFrame() {
if (!isPlaying()) {
_currentFrame = -1;
return false;
}
if (_currentFrame < 0) {
_currentFrame = 0;
}
while (_currentFrame < _recording->getFrameNumber() - 1 &&
_recording->getFrameTimestamp(_currentFrame) < _timer.elapsed()) {
++_currentFrame;
}
return true;
}

View file

@ -0,0 +1,78 @@
//
// Player.h
//
//
// Created by Clement on 9/17/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_Player_h
#define hifi_Player_h
#include <AudioInjector.h>
#include <QElapsedTimer>
#include "Recording.h"
class AvatarData;
class Player;
typedef QSharedPointer<Player> PlayerPointer;
typedef QWeakPointer<Player> WeakPlayerPointer;
/// Plays back a recording
class Player {
public:
Player(AvatarData* avatar);
bool isPlaying() const;
qint64 elapsed() const;
RecordingPointer getRecording() const { return _recording; }
public slots:
void startPlaying();
void stopPlaying();
void loadFromFile(QString file);
void loadRecording(RecordingPointer recording);
void play();
void setPlayFromCurrentLocation(bool playFromCurrentPosition);
void setLoop(bool loop) { _loop = loop; }
void useAttachements(bool useAttachments) { _useAttachments = useAttachments; }
void useDisplayName(bool useDisplayName) { _useDisplayName = useDisplayName; }
void useHeadModel(bool useHeadURL) { _useHeadURL = useHeadURL; }
void useSkeletonModel(bool useSkeletonURL) { _useSkeletonURL = useSkeletonURL; }
private:
void setupAudioThread();
void cleanupAudioThread();
void loopRecording();
bool computeCurrentFrame();
QElapsedTimer _timer;
RecordingPointer _recording;
int _currentFrame;
QSharedPointer<AudioInjector> _injector;
AudioInjectorOptions _options;
AvatarData* _avatar;
QThread* _audioThread;
RecordingContext _currentContext;
bool _playFromCurrentPosition;
bool _loop;
bool _useAttachments;
bool _useDisplayName;
bool _useHeadURL;
bool _useSkeletonURL;
};
#endif // hifi_Player_h

View file

@ -9,94 +9,14 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <GLMHelpers.h>
#include <NetworkAccessManager.h>
#include <QEventLoop>
#include <QFile>
#include <QMetaObject>
#include <QObject>
#include <GLMHelpers.h>
#include <NodeList.h>
#include <StreamUtils.h>
#include "AvatarData.h"
#include "Recorder.h"
void RecordingFrame::setBlendshapeCoefficients(QVector<float> blendshapeCoefficients) {
_blendshapeCoefficients = blendshapeCoefficients;
}
void RecordingFrame::setJointRotations(QVector<glm::quat> jointRotations) {
_jointRotations = jointRotations;
}
void RecordingFrame::setTranslation(glm::vec3 translation) {
_translation = translation;
}
void RecordingFrame::setRotation(glm::quat rotation) {
_rotation = rotation;
}
void RecordingFrame::setScale(float scale) {
_scale = scale;
}
void RecordingFrame::setHeadRotation(glm::quat headRotation) {
_headRotation = headRotation;
}
void RecordingFrame::setLeanSideways(float leanSideways) {
_leanSideways = leanSideways;
}
void RecordingFrame::setLeanForward(float leanForward) {
_leanForward = leanForward;
}
Recording::Recording() : _audio(NULL) {
}
Recording::~Recording() {
delete _audio;
}
int Recording::getLength() const {
if (_timestamps.isEmpty()) {
return 0;
}
return _timestamps.last();
}
qint32 Recording::getFrameTimestamp(int i) const {
if (i >= _timestamps.size()) {
return getLength();
}
return _timestamps[i];
}
const RecordingFrame& Recording::getFrame(int i) const {
assert(i < _timestamps.size());
return _frames[i];
}
void Recording::addFrame(int timestamp, RecordingFrame &frame) {
_timestamps << timestamp;
_frames << frame;
}
void Recording::addAudioPacket(QByteArray byteArray) {
if (!_audio) {
_audio = new Sound(byteArray);
}
_audio->append(byteArray);
}
void Recording::clear() {
_timestamps.clear();
_frames.clear();
delete _audio;
_audio = NULL;
}
Recorder::Recorder(AvatarData* avatar) :
_recording(new Recording()),
_avatar(avatar)
@ -119,24 +39,44 @@ qint64 Recorder::elapsed() const {
void Recorder::startRecording() {
qDebug() << "Recorder::startRecording()";
_recording->clear();
RecordingContext& context = _recording->getContext();
context.globalTimestamp = usecTimestampNow();
context.domain = NodeList::getInstance()->getDomainHandler().getHostname();
context.position = _avatar->getPosition();
context.orientation = _avatar->getOrientation();
context.scale = _avatar->getTargetScale();
context.headModel = _avatar->getFaceModelURL().toString();
context.skeletonModel = _avatar->getSkeletonModelURL().toString();
context.displayName = _avatar->getDisplayName();
context.attachments = _avatar->getAttachmentData();
context.orientationInv = glm::inverse(context.orientation);
bool wantDebug = false;
if (wantDebug) {
qDebug() << "Recorder::startRecording(): Recording Context";
qDebug() << "Global timestamp:" << context.globalTimestamp;
qDebug() << "Domain:" << context.domain;
qDebug() << "Position:" << context.position;
qDebug() << "Orientation:" << context.orientation;
qDebug() << "Scale:" << context.scale;
qDebug() << "Head URL:" << context.headModel;
qDebug() << "Skeleton URL:" << context.skeletonModel;
qDebug() << "Display Name:" << context.displayName;
qDebug() << "Num Attachments:" << context.attachments.size();
for (int i = 0; i < context.attachments.size(); ++i) {
qDebug() << "Model URL:" << context.attachments[i].modelURL;
qDebug() << "Joint Name:" << context.attachments[i].jointName;
qDebug() << "Translation:" << context.attachments[i].translation;
qDebug() << "Rotation:" << context.attachments[i].rotation;
qDebug() << "Scale:" << context.attachments[i].scale;
}
}
_timer.start();
RecordingFrame frame;
frame.setBlendshapeCoefficients(_avatar->getHeadData()->getBlendshapeCoefficients());
frame.setJointRotations(_avatar->getJointRotations());
frame.setTranslation(_avatar->getPosition());
frame.setRotation(_avatar->getOrientation());
frame.setScale(_avatar->getTargetScale());
const HeadData* head = _avatar->getHeadData();
glm::quat rotation = glm::quat(glm::radians(glm::vec3(head->getFinalPitch(),
head->getFinalYaw(),
head->getFinalRoll())));
frame.setHeadRotation(rotation);
frame.setLeanForward(_avatar->getHeadData()->getLeanForward());
frame.setLeanSideways(_avatar->getHeadData()->getLeanSideways());
_recording->addFrame(0, frame);
record();
}
void Recorder::stopRecording() {
@ -156,22 +96,41 @@ void Recorder::saveToFile(QString file) {
void Recorder::record() {
if (isRecording()) {
const RecordingFrame& referenceFrame = _recording->getFrame(0);
const RecordingContext& context = _recording->getContext();
RecordingFrame frame;
frame.setBlendshapeCoefficients(_avatar->getHeadData()->getBlendshapeCoefficients());
frame.setJointRotations(_avatar->getJointRotations());
frame.setTranslation(_avatar->getPosition() - referenceFrame.getTranslation());
frame.setRotation(glm::inverse(referenceFrame.getRotation()) * _avatar->getOrientation());
frame.setScale(_avatar->getTargetScale() / referenceFrame.getScale());
frame.setTranslation(context.orientationInv * (_avatar->getPosition() - context.position));
frame.setRotation(context.orientationInv * _avatar->getOrientation());
frame.setScale(_avatar->getTargetScale() / context.scale);
const HeadData* head = _avatar->getHeadData();
glm::quat rotation = glm::quat(glm::radians(glm::vec3(head->getFinalPitch(),
head->getFinalYaw(),
head->getFinalRoll())));
frame.setHeadRotation(rotation);
frame.setLeanForward(_avatar->getHeadData()->getLeanForward());
frame.setLeanSideways(_avatar->getHeadData()->getLeanSideways());
if (head) {
glm::vec3 rotationDegrees = glm::vec3(head->getFinalPitch(),
head->getFinalYaw(),
head->getFinalRoll());
frame.setHeadRotation(glm::quat(glm::radians(rotationDegrees)));
frame.setLeanForward(head->getLeanForward());
frame.setLeanSideways(head->getLeanSideways());
glm::vec3 relativeLookAt = context.orientationInv *
(head->getLookAtPosition() - context.position);
frame.setLookAtPosition(relativeLookAt);
}
bool wantDebug = false;
if (wantDebug) {
qDebug() << "Recording frame #" << _recording->getFrameNumber();
qDebug() << "Blendshapes:" << frame.getBlendshapeCoefficients().size();
qDebug() << "JointRotations:" << frame.getJointRotations().size();
qDebug() << "Translation:" << frame.getTranslation();
qDebug() << "Rotation:" << frame.getRotation();
qDebug() << "Scale:" << frame.getScale();
qDebug() << "Head rotation:" << frame.getHeadRotation();
qDebug() << "Lean Forward:" << frame.getLeanForward();
qDebug() << "Lean Sideways:" << frame.getLeanSideways();
qDebug() << "LookAtPosition:" << frame.getLookAtPosition();
}
_recording->addFrame(_timer.elapsed(), frame);
}
@ -181,473 +140,3 @@ void Recorder::record(char* samples, int size) {
QByteArray byteArray(samples, size);
_recording->addAudioPacket(byteArray);
}
Player::Player(AvatarData* avatar) :
_recording(new Recording()),
_avatar(avatar),
_audioThread(NULL),
_startingScale(1.0f),
_playFromCurrentPosition(true),
_loop(false)
{
_timer.invalidate();
_options.setLoop(false);
_options.setVolume(1.0f);
}
bool Player::isPlaying() const {
return _timer.isValid();
}
qint64 Player::elapsed() const {
if (isPlaying()) {
return _timer.elapsed();
} else {
return 0;
}
}
glm::quat Player::getHeadRotation() {
if (!computeCurrentFrame()) {
qWarning() << "Incorrect use of Player::getHeadRotation()";
return glm::quat();
}
if (_currentFrame == 0) {
return _recording->getFrame(_currentFrame).getHeadRotation();
}
return _recording->getFrame(0).getHeadRotation() *
_recording->getFrame(_currentFrame).getHeadRotation();
}
float Player::getLeanSideways() {
if (!computeCurrentFrame()) {
qWarning() << "Incorrect use of Player::getLeanSideways()";
return 0.0f;
}
return _recording->getFrame(_currentFrame).getLeanSideways();
}
float Player::getLeanForward() {
if (!computeCurrentFrame()) {
qWarning() << "Incorrect use of Player::getLeanForward()";
return 0.0f;
}
return _recording->getFrame(_currentFrame).getLeanForward();
}
void Player::startPlaying() {
if (_recording && _recording->getFrameNumber() > 0) {
qDebug() << "Recorder::startPlaying()";
_currentFrame = 0;
// Setup audio thread
_audioThread = new QThread();
_options.setPosition(_avatar->getPosition());
_options.setOrientation(_avatar->getOrientation());
_injector.reset(new AudioInjector(_recording->getAudio(), _options), &QObject::deleteLater);
_injector->moveToThread(_audioThread);
_audioThread->start();
QMetaObject::invokeMethod(_injector.data(), "injectAudio", Qt::QueuedConnection);
// Fake faceshift connection
_avatar->setForceFaceshiftConnected(true);
if (_playFromCurrentPosition) {
_startingPosition = _avatar->getPosition();
_startingRotation = _avatar->getOrientation();
_startingScale = _avatar->getTargetScale();
} else {
_startingPosition = _recording->getFrame(0).getTranslation();
_startingRotation = _recording->getFrame(0).getRotation();
_startingScale = _recording->getFrame(0).getScale();
}
_timer.start();
}
}
void Player::stopPlaying() {
if (!isPlaying()) {
return;
}
_timer.invalidate();
_avatar->clearJointsData();
// Cleanup audio thread
_injector->stop();
QObject::connect(_injector.data(), &AudioInjector::finished,
_injector.data(), &AudioInjector::deleteLater);
QObject::connect(_injector.data(), &AudioInjector::destroyed,
_audioThread, &QThread::quit);
QObject::connect(_audioThread, &QThread::finished,
_audioThread, &QThread::deleteLater);
_injector.clear();
_audioThread = NULL;
// Turn off fake faceshift connection
_avatar->setForceFaceshiftConnected(false);
qDebug() << "Recorder::stopPlaying()";
}
void Player::loadFromFile(QString file) {
if (_recording) {
_recording->clear();
} else {
_recording = RecordingPointer(new Recording());
}
readRecordingFromFile(_recording, file);
}
void Player::loadRecording(RecordingPointer recording) {
_recording = recording;
}
void Player::play() {
computeCurrentFrame();
if (_currentFrame < 0 || (_currentFrame >= _recording->getFrameNumber() - 1)) {
// If it's the end of the recording, stop playing
stopPlaying();
if (_loop) {
startPlaying();
}
return;
}
if (_currentFrame == 0) {
// Don't play frame 0
// only meant to store absolute values
return;
}
_avatar->setPosition(_startingPosition +
glm::inverse(_recording->getFrame(0).getRotation()) * _startingRotation *
_recording->getFrame(_currentFrame).getTranslation());
_avatar->setOrientation(_startingRotation *
_recording->getFrame(_currentFrame).getRotation());
_avatar->setTargetScale(_startingScale *
_recording->getFrame(_currentFrame).getScale());
_avatar->setJointRotations(_recording->getFrame(_currentFrame).getJointRotations());
HeadData* head = const_cast<HeadData*>(_avatar->getHeadData());
if (head) {
head->setBlendshapeCoefficients(_recording->getFrame(_currentFrame).getBlendshapeCoefficients());
head->setLeanSideways(_recording->getFrame(_currentFrame).getLeanSideways());
head->setLeanForward(_recording->getFrame(_currentFrame).getLeanForward());
glm::vec3 eulers = glm::degrees(safeEulerAngles(_recording->getFrame(_currentFrame).getHeadRotation()));
head->setFinalPitch(eulers.x);
head->setFinalYaw(eulers.y);
head->setFinalRoll(eulers.z);
}
_options.setPosition(_avatar->getPosition());
_options.setOrientation(_avatar->getOrientation());
_injector->setOptions(_options);
}
void Player::setPlayFromCurrentLocation(bool playFromCurrentLocation) {
_playFromCurrentPosition = playFromCurrentLocation;
}
void Player::setLoop(bool loop) {
_loop = loop;
}
bool Player::computeCurrentFrame() {
if (!isPlaying()) {
_currentFrame = -1;
return false;
}
if (_currentFrame < 0) {
_currentFrame = 0;
}
while (_currentFrame < _recording->getFrameNumber() - 1 &&
_recording->getFrameTimestamp(_currentFrame) < _timer.elapsed()) {
++_currentFrame;
}
return true;
}
void writeRecordingToFile(RecordingPointer recording, QString filename) {
if (!recording || recording->getFrameNumber() < 1) {
qDebug() << "Can't save empty recording";
return;
}
qDebug() << "Writing recording to " << filename << ".";
QElapsedTimer timer;
QFile file(filename);
if (!file.open(QIODevice::WriteOnly)){
return;
}
timer.start();
QDataStream fileStream(&file);
fileStream << recording->_timestamps;
RecordingFrame& baseFrame = recording->_frames[0];
int totalLength = 0;
// Blendshape coefficients
fileStream << baseFrame._blendshapeCoefficients;
totalLength += baseFrame._blendshapeCoefficients.size();
// Joint Rotations
int jointRotationSize = baseFrame._jointRotations.size();
fileStream << jointRotationSize;
for (int i = 0; i < jointRotationSize; ++i) {
fileStream << baseFrame._jointRotations[i].x << baseFrame._jointRotations[i].y << baseFrame._jointRotations[i].z << baseFrame._jointRotations[i].w;
}
totalLength += jointRotationSize;
// Translation
fileStream << baseFrame._translation.x << baseFrame._translation.y << baseFrame._translation.z;
totalLength += 1;
// Rotation
fileStream << baseFrame._rotation.x << baseFrame._rotation.y << baseFrame._rotation.z << baseFrame._rotation.w;
totalLength += 1;
// Scale
fileStream << baseFrame._scale;
totalLength += 1;
// Head Rotation
fileStream << baseFrame._headRotation.x << baseFrame._headRotation.y << baseFrame._headRotation.z << baseFrame._headRotation.w;
totalLength += 1;
// Lean Sideways
fileStream << baseFrame._leanSideways;
totalLength += 1;
// Lean Forward
fileStream << baseFrame._leanForward;
totalLength += 1;
for (int i = 1; i < recording->_timestamps.size(); ++i) {
QBitArray mask(totalLength);
int maskIndex = 0;
QByteArray buffer;
QDataStream stream(&buffer, QIODevice::WriteOnly);
RecordingFrame& previousFrame = recording->_frames[i - 1];
RecordingFrame& frame = recording->_frames[i];
// Blendshape coefficients
for (int i = 0; i < frame._blendshapeCoefficients.size(); ++i) {
if (frame._blendshapeCoefficients[i] != previousFrame._blendshapeCoefficients[i]) {
stream << frame._blendshapeCoefficients[i];
mask.setBit(maskIndex);
}
maskIndex++;
}
// Joint Rotations
for (int i = 0; i < frame._jointRotations.size(); ++i) {
if (frame._jointRotations[i] != previousFrame._jointRotations[i]) {
stream << frame._jointRotations[i].x << frame._jointRotations[i].y << frame._jointRotations[i].z << frame._jointRotations[i].w;
mask.setBit(maskIndex);
}
maskIndex++;
}
// Translation
if (frame._translation != previousFrame._translation) {
stream << frame._translation.x << frame._translation.y << frame._translation.z;
mask.setBit(maskIndex);
}
maskIndex++;
// Rotation
if (frame._rotation != previousFrame._rotation) {
stream << frame._rotation.x << frame._rotation.y << frame._rotation.z << frame._rotation.w;
mask.setBit(maskIndex);
}
maskIndex++;
// Scale
if (frame._scale != previousFrame._scale) {
stream << frame._scale;
mask.setBit(maskIndex);
}
maskIndex++;
// Head Rotation
if (frame._headRotation != previousFrame._headRotation) {
stream << frame._headRotation.x << frame._headRotation.y << frame._headRotation.z << frame._headRotation.w;
mask.setBit(maskIndex);
}
maskIndex++;
// Lean Sideways
if (frame._leanSideways != previousFrame._leanSideways) {
stream << frame._leanSideways;
mask.setBit(maskIndex);
}
maskIndex++;
// Lean Forward
if (frame._leanForward != previousFrame._leanForward) {
stream << frame._leanForward;
mask.setBit(maskIndex);
}
maskIndex++;
fileStream << mask;
fileStream << buffer;
}
fileStream << recording->_audio->getByteArray();
qDebug() << "Wrote " << file.size() << " bytes in " << timer.elapsed() << " ms.";
}
RecordingPointer readRecordingFromFile(RecordingPointer recording, QString filename) {
QElapsedTimer timer;
timer.start();
QByteArray byteArray;
QUrl url(filename);
if (url.scheme() == "http" || url.scheme() == "https" || url.scheme() == "ftp") {
qDebug() << "Downloading recording at" << url;
NetworkAccessManager& networkAccessManager = NetworkAccessManager::getInstance();
QNetworkReply* reply = networkAccessManager.get(QNetworkRequest(url));
QEventLoop loop;
QObject::connect(reply, SIGNAL(finished()), &loop, SLOT(quit()));
loop.exec();
if (reply->error() != QNetworkReply::NoError) {
qDebug() << "Error while downloading recording: " << reply->error();
reply->deleteLater();
return recording;
}
byteArray = reply->readAll();
reply->deleteLater();
} else {
qDebug() << "Reading recording from " << filename << ".";
QFile file(filename);
if (!file.open(QIODevice::ReadOnly)){
return recording;
}
byteArray = file.readAll();
file.close();
}
if (!recording) {
recording.reset(new Recording());
}
QDataStream fileStream(byteArray);
fileStream >> recording->_timestamps;
RecordingFrame baseFrame;
// Blendshape coefficients
fileStream >> baseFrame._blendshapeCoefficients;
// Joint Rotations
int jointRotationSize;
fileStream >> jointRotationSize;
baseFrame._jointRotations.resize(jointRotationSize);
for (int i = 0; i < jointRotationSize; ++i) {
fileStream >> baseFrame._jointRotations[i].x >> baseFrame._jointRotations[i].y >> baseFrame._jointRotations[i].z >> baseFrame._jointRotations[i].w;
}
fileStream >> baseFrame._translation.x >> baseFrame._translation.y >> baseFrame._translation.z;
fileStream >> baseFrame._rotation.x >> baseFrame._rotation.y >> baseFrame._rotation.z >> baseFrame._rotation.w;
fileStream >> baseFrame._scale;
fileStream >> baseFrame._headRotation.x >> baseFrame._headRotation.y >> baseFrame._headRotation.z >> baseFrame._headRotation.w;
fileStream >> baseFrame._leanSideways;
fileStream >> baseFrame._leanForward;
recording->_frames << baseFrame;
for (int i = 1; i < recording->_timestamps.size(); ++i) {
QBitArray mask;
QByteArray buffer;
QDataStream stream(&buffer, QIODevice::ReadOnly);
RecordingFrame frame;
RecordingFrame& previousFrame = recording->_frames.last();
fileStream >> mask;
fileStream >> buffer;
int maskIndex = 0;
// Blendshape Coefficients
frame._blendshapeCoefficients.resize(baseFrame._blendshapeCoefficients.size());
for (int i = 0; i < baseFrame._blendshapeCoefficients.size(); ++i) {
if (mask[maskIndex++]) {
stream >> frame._blendshapeCoefficients[i];
} else {
frame._blendshapeCoefficients[i] = previousFrame._blendshapeCoefficients[i];
}
}
// Joint Rotations
frame._jointRotations.resize(baseFrame._jointRotations.size());
for (int i = 0; i < baseFrame._jointRotations.size(); ++i) {
if (mask[maskIndex++]) {
stream >> frame._jointRotations[i].x >> frame._jointRotations[i].y >> frame._jointRotations[i].z >> frame._jointRotations[i].w;
} else {
frame._jointRotations[i] = previousFrame._jointRotations[i];
}
}
if (mask[maskIndex++]) {
stream >> frame._translation.x >> frame._translation.y >> frame._translation.z;
} else {
frame._translation = previousFrame._translation;
}
if (mask[maskIndex++]) {
stream >> frame._rotation.x >> frame._rotation.y >> frame._rotation.z >> frame._rotation.w;
} else {
frame._rotation = previousFrame._rotation;
}
if (mask[maskIndex++]) {
stream >> frame._scale;
} else {
frame._scale = previousFrame._scale;
}
if (mask[maskIndex++]) {
stream >> frame._headRotation.x >> frame._headRotation.y >> frame._headRotation.z >> frame._headRotation.w;
} else {
frame._headRotation = previousFrame._headRotation;
}
if (mask[maskIndex++]) {
stream >> frame._leanSideways;
} else {
frame._leanSideways = previousFrame._leanSideways;
}
if (mask[maskIndex++]) {
stream >> frame._leanForward;
} else {
frame._leanForward = previousFrame._leanForward;
}
recording->_frames << frame;
}
QByteArray audioArray;
fileStream >> audioArray;
recording->addAudioPacket(audioArray);
qDebug() << "Read " << byteArray.size() << " bytes in " << timer.elapsed() << " ms.";
return recording;
}

View file

@ -12,98 +12,18 @@
#ifndef hifi_Recorder_h
#define hifi_Recorder_h
#include <QBitArray>
#include <QElapsedTimer>
#include <QHash>
#include <QSharedPointer>
#include <QVector>
#include <QWeakPointer>
#include "Recording.h"
#include <glm/glm.hpp>
#include <glm/gtx/quaternion.hpp>
#include <AudioInjector.h>
#include <SharedUtil.h>
#include <Sound.h>
template<class C>
class QSharedPointer;
class AttachmentData;
class AvatarData;
class Recorder;
class Recording;
class Player;
typedef QSharedPointer<Recording> RecordingPointer;
typedef QSharedPointer<Recorder> RecorderPointer;
typedef QWeakPointer<Recorder> WeakRecorderPointer;
typedef QSharedPointer<Player> PlayerPointer;
typedef QWeakPointer<Player> WeakPlayerPointer;
/// Stores the different values associated to one recording frame
class RecordingFrame {
public:
QVector<float> getBlendshapeCoefficients() const { return _blendshapeCoefficients; }
QVector<glm::quat> getJointRotations() const { return _jointRotations; }
glm::vec3 getTranslation() const { return _translation; }
glm::quat getRotation() const { return _rotation; }
float getScale() const { return _scale; }
glm::quat getHeadRotation() const { return _headRotation; }
float getLeanSideways() const { return _leanSideways; }
float getLeanForward() const { return _leanForward; }
protected:
void setBlendshapeCoefficients(QVector<float> blendshapeCoefficients);
void setJointRotations(QVector<glm::quat> jointRotations);
void setTranslation(glm::vec3 translation);
void setRotation(glm::quat rotation);
void setScale(float scale);
void setHeadRotation(glm::quat headRotation);
void setLeanSideways(float leanSideways);
void setLeanForward(float leanForward);
private:
QVector<float> _blendshapeCoefficients;
QVector<glm::quat> _jointRotations;
glm::vec3 _translation;
glm::quat _rotation;
float _scale;
glm::quat _headRotation;
float _leanSideways;
float _leanForward;
friend class Recorder;
friend void writeRecordingToFile(RecordingPointer recording, QString file);
friend RecordingPointer readRecordingFromFile(RecordingPointer recording, QString file);
};
/// Stores a recording
class Recording {
public:
Recording();
~Recording();
bool isEmpty() const { return _timestamps.isEmpty(); }
int getLength() const; // in ms
int getFrameNumber() const { return _frames.size(); }
qint32 getFrameTimestamp(int i) const;
const RecordingFrame& getFrame(int i) const;
Sound* getAudio() const { return _audio; }
protected:
void addFrame(int timestamp, RecordingFrame& frame);
void addAudioPacket(QByteArray byteArray);
void clear();
private:
QVector<qint32> _timestamps;
QVector<RecordingFrame> _frames;
Sound* _audio;
friend class Recorder;
friend class Player;
friend void writeRecordingToFile(RecordingPointer recording, QString file);
friend RecordingPointer readRecordingFromFile(RecordingPointer recording, QString file);
};
/// Records a recording
class Recorder {
@ -129,54 +49,5 @@ private:
AvatarData* _avatar;
};
/// Plays back a recording
class Player {
public:
Player(AvatarData* avatar);
bool isPlaying() const;
qint64 elapsed() const;
RecordingPointer getRecording() const { return _recording; }
// Those should only be called if isPlaying() returns true
glm::quat getHeadRotation();
float getLeanSideways();
float getLeanForward();
public slots:
void startPlaying();
void stopPlaying();
void loadFromFile(QString file);
void loadRecording(RecordingPointer recording);
void play();
void setPlayFromCurrentLocation(bool playFromCurrentLocation);
void setLoop(bool loop);
private:
bool computeCurrentFrame();
QElapsedTimer _timer;
RecordingPointer _recording;
int _currentFrame;
QSharedPointer<AudioInjector> _injector;
AudioInjectorOptions _options;
AvatarData* _avatar;
QThread* _audioThread;
glm::vec3 _startingPosition;
glm::quat _startingRotation;
float _startingScale;
bool _playFromCurrentPosition;
bool _loop;
};
void writeRecordingToFile(RecordingPointer recording, QString file);
RecordingPointer readRecordingFromFile(RecordingPointer recording, QString file);
#endif // hifi_Recorder_h

View file

@ -0,0 +1,806 @@
//
// Recording.cpp
//
//
// Created by Clement on 9/17/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <GLMHelpers.h>
#include <NetworkAccessManager.h>
#include <NodeList.h>
#include <Sound.h>
#include <StreamUtils.h>
#include <QBitArray>
#include <QElapsedTimer>
#include <QEventLoop>
#include <QFile>
#include <QFileInfo>
#include <QMessageBox>
#include <QPair>
#include "AvatarData.h"
#include "Recording.h"
// HFR file format magic number (Inspired by PNG)
// (decimal) 17 72 70 82 13 10 26 10
// (hexadecimal) 11 48 46 52 0d 0a 1a 0a
// (ASCII C notation) \021 H F R \r \n \032 \n
static const int MAGIC_NUMBER_SIZE = 8;
static const char MAGIC_NUMBER[MAGIC_NUMBER_SIZE] = {17, 72, 70, 82, 13, 10, 26, 10};
// Version (Major, Minor)
static const QPair<quint8, quint8> VERSION(0, 1);
int SCALE_RADIX = 10;
int BLENDSHAPE_RADIX = 15;
int LEAN_RADIX = 7;
void RecordingFrame::setBlendshapeCoefficients(QVector<float> blendshapeCoefficients) {
_blendshapeCoefficients = blendshapeCoefficients;
}
Recording::Recording() : _audio(NULL) {
}
Recording::~Recording() {
delete _audio;
}
int Recording::getLength() const {
if (_timestamps.isEmpty()) {
return 0;
}
return _timestamps.last();
}
qint32 Recording::getFrameTimestamp(int i) const {
if (i >= _timestamps.size()) {
return getLength();
}
return _timestamps[i];
}
const RecordingFrame& Recording::getFrame(int i) const {
assert(i < _timestamps.size());
return _frames[i];
}
void Recording::addFrame(int timestamp, RecordingFrame &frame) {
_timestamps << timestamp;
_frames << frame;
}
void Recording::addAudioPacket(QByteArray byteArray) {
if (!_audio) {
_audio = new Sound(byteArray);
return;
}
_audio->append(byteArray);
}
void Recording::clear() {
_timestamps.clear();
_frames.clear();
delete _audio;
_audio = NULL;
}
void writeVec3(QDataStream& stream, glm::vec3 value) {
unsigned char buffer[sizeof(value)];
memcpy(buffer, &value, sizeof(value));
stream.writeRawData(reinterpret_cast<char*>(buffer), sizeof(value));
}
bool readVec3(QDataStream& stream, glm::vec3& value) {
unsigned char buffer[sizeof(value)];
stream.readRawData(reinterpret_cast<char*>(buffer), sizeof(value));
memcpy(&value, buffer, sizeof(value));
return true;
}
void writeQuat(QDataStream& stream, glm::quat value) {
unsigned char buffer[256];
int writtenToBuffer = packOrientationQuatToBytes(buffer, value);
stream.writeRawData(reinterpret_cast<char*>(buffer), writtenToBuffer);
}
bool readQuat(QDataStream& stream, glm::quat& value) {
int quatByteSize = 4 * 2; // 4 floats * 2 bytes
unsigned char buffer[256];
stream.readRawData(reinterpret_cast<char*>(buffer), quatByteSize);
int readFromBuffer = unpackOrientationQuatFromBytes(buffer, value);
if (readFromBuffer != quatByteSize) {
return false;
}
return true;
}
void writeFloat(QDataStream& stream, float value, int radix) {
unsigned char buffer[256];
int writtenToBuffer = packFloatScalarToSignedTwoByteFixed(buffer, value, radix);
stream.writeRawData(reinterpret_cast<char*>(buffer), writtenToBuffer);
}
bool readFloat(QDataStream& stream, float& value, int radix) {
int floatByteSize = 2; // 1 floats * 2 bytes
int16_t buffer[256];
stream.readRawData(reinterpret_cast<char*>(buffer), floatByteSize);
int readFromBuffer = unpackFloatScalarFromSignedTwoByteFixed(buffer, &value, radix);
if (readFromBuffer != floatByteSize) {
return false;
}
return true;
}
void writeRecordingToFile(RecordingPointer recording, QString filename) {
if (!recording || recording->getFrameNumber() < 1) {
qDebug() << "Can't save empty recording";
return;
}
QElapsedTimer timer;
QFile file(filename);
if (!file.open(QIODevice::ReadWrite | QIODevice::Truncate)){
qDebug() << "Couldn't open " << filename;
return;
}
timer.start();
qDebug() << "Writing recording to " << filename << ".";
QDataStream fileStream(&file);
// HEADER
file.write(MAGIC_NUMBER, MAGIC_NUMBER_SIZE); // Magic number
fileStream << VERSION; // File format version
const qint64 dataOffsetPos = file.pos();
fileStream << (quint16)0; // Save two empty bytes for the data offset
const qint64 dataLengthPos = file.pos();
fileStream << (quint32)0; // Save four empty bytes for the data offset
const quint64 crc16Pos = file.pos();
fileStream << (quint16)0; // Save two empty bytes for the CRC-16
// METADATA
// TODO
// Write data offset
quint16 dataOffset = file.pos();
file.seek(dataOffsetPos);
fileStream << dataOffset;
file.seek(dataOffset);
// CONTEXT
RecordingContext& context = recording->getContext();
// Global Timestamp
fileStream << context.globalTimestamp;
// Domain
fileStream << context.domain;
// Position
writeVec3(fileStream, context.position);
// Orientation
writeQuat(fileStream, context.orientation);
// Scale
writeFloat(fileStream, context.scale, SCALE_RADIX);
// Head model
fileStream << context.headModel;
// Skeleton model
fileStream << context.skeletonModel;
// Display name
fileStream << context.displayName;
// Attachements
fileStream << (quint8)context.attachments.size();
foreach (AttachmentData data, context.attachments) {
// Model
fileStream << data.modelURL.toString();
// Joint name
fileStream << data.jointName;
// Position
writeVec3(fileStream, data.translation);
// Orientation
writeQuat(fileStream, data.rotation);
// Scale
writeFloat(fileStream, data.scale, SCALE_RADIX);
}
// RECORDING
fileStream << recording->_timestamps;
QBitArray mask;
quint32 numBlendshapes = 0;
quint32 numJoints = 0;
for (int i = 0; i < recording->_timestamps.size(); ++i) {
mask.fill(false);
int maskIndex = 0;
QByteArray buffer;
QDataStream stream(&buffer, QIODevice::WriteOnly);
RecordingFrame& previousFrame = recording->_frames[(i != 0) ? i - 1 : i];
RecordingFrame& frame = recording->_frames[i];
// Blendshape Coefficients
if (i == 0) {
numBlendshapes = frame.getBlendshapeCoefficients().size();
stream << numBlendshapes;
mask.resize(mask.size() + numBlendshapes);
}
for (int j = 0; j < numBlendshapes; ++j) {
if (i == 0 ||
frame._blendshapeCoefficients[j] != previousFrame._blendshapeCoefficients[j]) {
writeFloat(stream, frame.getBlendshapeCoefficients()[j], BLENDSHAPE_RADIX);
mask.setBit(maskIndex);
}
++maskIndex;
}
// Joint Rotations
if (i == 0) {
numJoints = frame.getJointRotations().size();
stream << numJoints;
mask.resize(mask.size() + numJoints);
}
for (int j = 0; j < numJoints; ++j) {
if (i == 0 ||
frame._jointRotations[j] != previousFrame._jointRotations[j]) {
writeQuat(stream, frame._jointRotations[j]);
mask.setBit(maskIndex);
}
maskIndex++;
}
// Translation
if (i == 0) {
mask.resize(mask.size() + 1);
}
if (i == 0 || frame._translation != previousFrame._translation) {
writeVec3(stream, frame._translation);
mask.setBit(maskIndex);
}
maskIndex++;
// Rotation
if (i == 0) {
mask.resize(mask.size() + 1);
}
if (i == 0 || frame._rotation != previousFrame._rotation) {
writeQuat(stream, frame._rotation);
mask.setBit(maskIndex);
}
maskIndex++;
// Scale
if (i == 0) {
mask.resize(mask.size() + 1);
}
if (i == 0 || frame._scale != previousFrame._scale) {
writeFloat(stream, frame._scale, SCALE_RADIX);
mask.setBit(maskIndex);
}
maskIndex++;
// Head Rotation
if (i == 0) {
mask.resize(mask.size() + 1);
}
if (i == 0 || frame._headRotation != previousFrame._headRotation) {
writeQuat(stream, frame._headRotation);
mask.setBit(maskIndex);
}
maskIndex++;
// Lean Sideways
if (i == 0) {
mask.resize(mask.size() + 1);
}
if (i == 0 || frame._leanSideways != previousFrame._leanSideways) {
writeFloat(stream, frame._leanSideways, LEAN_RADIX);
mask.setBit(maskIndex);
}
maskIndex++;
// Lean Forward
if (i == 0) {
mask.resize(mask.size() + 1);
}
if (i == 0 || frame._leanForward != previousFrame._leanForward) {
writeFloat(stream, frame._leanForward, LEAN_RADIX);
mask.setBit(maskIndex);
}
maskIndex++;
// LookAt Position
if (i == 0) {
mask.resize(mask.size() + 1);
}
if (i == 0 || frame._lookAtPosition != previousFrame._lookAtPosition) {
writeVec3(stream, frame._lookAtPosition);
mask.setBit(maskIndex);
}
maskIndex++;
fileStream << mask;
fileStream << buffer;
}
fileStream << recording->_audio->getByteArray();
qint64 writtingTime = timer.restart();
// Write data length and CRC-16
quint32 dataLength = file.pos() - dataOffset;
file.seek(dataOffset); // Go to beginning of data for checksum
quint16 crc16 = qChecksum(file.readAll().constData(), dataLength);
file.seek(dataLengthPos);
fileStream << dataLength;
file.seek(crc16Pos);
fileStream << crc16;
file.seek(dataOffset + dataLength);
bool wantDebug = true;
if (wantDebug) {
qDebug() << "[DEBUG] WRITE recording";
qDebug() << "Header:";
qDebug() << "File Format version:" << VERSION;
qDebug() << "Data length:" << dataLength;
qDebug() << "Data offset:" << dataOffset;
qDebug() << "CRC-16:" << crc16;
qDebug() << "Context block:";
qDebug() << "Global timestamp:" << context.globalTimestamp;
qDebug() << "Domain:" << context.domain;
qDebug() << "Position:" << context.position;
qDebug() << "Orientation:" << context.orientation;
qDebug() << "Scale:" << context.scale;
qDebug() << "Head Model:" << context.headModel;
qDebug() << "Skeleton Model:" << context.skeletonModel;
qDebug() << "Display Name:" << context.displayName;
qDebug() << "Num Attachments:" << context.attachments.size();
for (int i = 0; i < context.attachments.size(); ++i) {
qDebug() << "Model URL:" << context.attachments[i].modelURL;
qDebug() << "Joint Name:" << context.attachments[i].jointName;
qDebug() << "Translation:" << context.attachments[i].translation;
qDebug() << "Rotation:" << context.attachments[i].rotation;
qDebug() << "Scale:" << context.attachments[i].scale;
}
qDebug() << "Recording:";
qDebug() << "Total frames:" << recording->getFrameNumber();
qDebug() << "Audio array:" << recording->getAudio()->getByteArray().size();
}
qint64 checksumTime = timer.elapsed();
qDebug() << "Wrote" << file.size() << "bytes in" << writtingTime + checksumTime << "ms. (" << checksumTime << "ms for checksum)";
}
RecordingPointer readRecordingFromFile(RecordingPointer recording, QString filename) {
QByteArray byteArray;
QUrl url(filename);
QElapsedTimer timer;
timer.start(); // timer used for debug informations (download/parsing time)
// Aquire the data and place it in byteArray
// Return if data unavailable
if (url.scheme() == "http" || url.scheme() == "https" || url.scheme() == "ftp") {
// Download file if necessary
qDebug() << "Downloading recording at" << url;
NetworkAccessManager& networkAccessManager = NetworkAccessManager::getInstance();
QNetworkReply* reply = networkAccessManager.get(QNetworkRequest(url));
QEventLoop loop;
QObject::connect(reply, SIGNAL(finished()), &loop, SLOT(quit()));
loop.exec(); // wait for file
if (reply->error() != QNetworkReply::NoError) {
qDebug() << "Error while downloading recording: " << reply->error();
reply->deleteLater();
return recording;
}
byteArray = reply->readAll();
reply->deleteLater();
// print debug + restart timer
qDebug() << "Downloaded " << byteArray.size() << " bytes in " << timer.restart() << " ms.";
} else {
// If local file, just read it.
qDebug() << "Reading recording from " << filename << ".";
QFile file(filename);
if (!file.open(QIODevice::ReadOnly)){
qDebug() << "Could not open local file: " << url;
return recording;
}
byteArray = file.readAll();
file.close();
}
if (filename.endsWith(".rec") || filename.endsWith(".REC")) {
qDebug() << "Old .rec format";
QMessageBox::warning(NULL,
QString("Old recording format"),
QString("Converting your file to the new format."),
QMessageBox::Ok);
readRecordingFromRecFile(recording, filename, byteArray);
return recording;
} else if (!filename.endsWith(".hfr") && !filename.endsWith(".HFR")) {
qDebug() << "File extension not recognized";
}
// Reset the recording passed in the arguments
if (!recording) {
recording.reset(new Recording());
}
QDataStream fileStream(byteArray);
// HEADER
QByteArray magicNumber(MAGIC_NUMBER, MAGIC_NUMBER_SIZE);
if (!byteArray.startsWith(magicNumber)) {
qDebug() << "ERROR: This is not a .HFR file. (Magic Number incorrect)";
return recording;
}
fileStream.skipRawData(MAGIC_NUMBER_SIZE);
QPair<quint8, quint8> version;
fileStream >> version; // File format version
if (version != VERSION) {
qDebug() << "ERROR: This file format version is not supported.";
return recording;
}
quint16 dataOffset = 0;
fileStream >> dataOffset;
quint32 dataLength = 0;
fileStream >> dataLength;
quint16 crc16 = 0;
fileStream >> crc16;
// Check checksum
quint16 computedCRC16 = qChecksum(byteArray.constData() + dataOffset, dataLength);
if (computedCRC16 != crc16) {
qDebug() << "Checksum does not match. Bailling!";
recording.clear();
return recording;
}
// METADATA
// TODO
// CONTEXT
RecordingContext& context = recording->getContext();
// Global Timestamp
fileStream >> context.globalTimestamp;
// Domain
fileStream >> context.domain;
// Position
if (!readVec3(fileStream, context.position)) {
qDebug() << "Couldn't read file correctly. (Invalid vec3)";
recording.clear();
return recording;
}
// Orientation
if (!readQuat(fileStream, context.orientation)) {
qDebug() << "Couldn't read file correctly. (Invalid quat)";
recording.clear();
return recording;
}
// Scale
if (!readFloat(fileStream, context.scale, SCALE_RADIX)) {
qDebug() << "Couldn't read file correctly. (Invalid float)";
recording.clear();
return recording;
}
// Head model
fileStream >> context.headModel;
// Skeleton model
fileStream >> context.skeletonModel;
// Display Name
fileStream >> context.displayName;
// Attachements
quint8 numAttachments = 0;
fileStream >> numAttachments;
for (int i = 0; i < numAttachments; ++i) {
AttachmentData data;
// Model
QString modelURL;
fileStream >> modelURL;
data.modelURL = modelURL;
// Joint name
fileStream >> data.jointName;
// Translation
if (!readVec3(fileStream, data.translation)) {
qDebug() << "Couldn't read attachment correctly. (Invalid vec3)";
continue;
}
// Rotation
if (!readQuat(fileStream, data.rotation)) {
qDebug() << "Couldn't read attachment correctly. (Invalid quat)";
continue;
}
// Scale
if (!readFloat(fileStream, data.scale, SCALE_RADIX)) {
qDebug() << "Couldn't read attachment correctly. (Invalid float)";
continue;
}
context.attachments << data;
}
quint32 numBlendshapes = 0;
quint32 numJoints = 0;
// RECORDING
fileStream >> recording->_timestamps;
for (int i = 0; i < recording->_timestamps.size(); ++i) {
QBitArray mask;
QByteArray buffer;
QDataStream stream(&buffer, QIODevice::ReadOnly);
RecordingFrame frame;
RecordingFrame& previousFrame = (i == 0) ? frame : recording->_frames.last();
fileStream >> mask;
fileStream >> buffer;
int maskIndex = 0;
// Blendshape Coefficients
if (i == 0) {
stream >> numBlendshapes;
}
frame._blendshapeCoefficients.resize(numBlendshapes);
for (int j = 0; j < numBlendshapes; ++j) {
if (!mask[maskIndex++] || !readFloat(stream, frame._blendshapeCoefficients[j], BLENDSHAPE_RADIX)) {
frame._blendshapeCoefficients[j] = previousFrame._blendshapeCoefficients[j];
}
}
// Joint Rotations
if (i == 0) {
stream >> numJoints;
}
frame._jointRotations.resize(numJoints);
for (int j = 0; j < numJoints; ++j) {
if (!mask[maskIndex++] || !readQuat(stream, frame._jointRotations[j])) {
frame._jointRotations[j] = previousFrame._jointRotations[j];
}
}
if (!mask[maskIndex++] || !readVec3(stream, frame._translation)) {
frame._translation = previousFrame._translation;
}
if (!mask[maskIndex++] || !readQuat(stream, frame._rotation)) {
frame._rotation = previousFrame._rotation;
}
if (!mask[maskIndex++] || !readFloat(stream, frame._scale, SCALE_RADIX)) {
frame._scale = previousFrame._scale;
}
if (!mask[maskIndex++] || !readQuat(stream, frame._headRotation)) {
frame._headRotation = previousFrame._headRotation;
}
if (!mask[maskIndex++] || !readFloat(stream, frame._leanSideways, LEAN_RADIX)) {
frame._leanSideways = previousFrame._leanSideways;
}
if (!mask[maskIndex++] || !readFloat(stream, frame._leanForward, LEAN_RADIX)) {
frame._leanForward = previousFrame._leanForward;
}
if (!mask[maskIndex++] || !readVec3(stream, frame._lookAtPosition)) {
frame._lookAtPosition = previousFrame._lookAtPosition;
}
recording->_frames << frame;
}
QByteArray audioArray;
fileStream >> audioArray;
recording->addAudioPacket(audioArray);
bool wantDebug = true;
if (wantDebug) {
qDebug() << "[DEBUG] READ recording";
qDebug() << "Header:";
qDebug() << "File Format version:" << VERSION;
qDebug() << "Data length:" << dataLength;
qDebug() << "Data offset:" << dataOffset;
qDebug() << "CRC-16:" << crc16;
qDebug() << "Context block:";
qDebug() << "Global timestamp:" << context.globalTimestamp;
qDebug() << "Domain:" << context.domain;
qDebug() << "Position:" << context.position;
qDebug() << "Orientation:" << context.orientation;
qDebug() << "Scale:" << context.scale;
qDebug() << "Head Model:" << context.headModel;
qDebug() << "Skeleton Model:" << context.skeletonModel;
qDebug() << "Display Name:" << context.displayName;
qDebug() << "Num Attachments:" << numAttachments;
for (int i = 0; i < numAttachments; ++i) {
qDebug() << "Model URL:" << context.attachments[i].modelURL;
qDebug() << "Joint Name:" << context.attachments[i].jointName;
qDebug() << "Translation:" << context.attachments[i].translation;
qDebug() << "Rotation:" << context.attachments[i].rotation;
qDebug() << "Scale:" << context.attachments[i].scale;
}
qDebug() << "Recording:";
qDebug() << "Total frames:" << recording->getFrameNumber();
qDebug() << "Audio array:" << recording->getAudio()->getByteArray().size();
}
qDebug() << "Read " << byteArray.size() << " bytes in " << timer.elapsed() << " ms.";
return recording;
}
RecordingPointer readRecordingFromRecFile(RecordingPointer recording, QString filename, QByteArray byteArray) {
QElapsedTimer timer;
timer.start();
if (!recording) {
recording.reset(new Recording());
}
QDataStream fileStream(byteArray);
fileStream >> recording->_timestamps;
RecordingFrame baseFrame;
// Blendshape coefficients
fileStream >> baseFrame._blendshapeCoefficients;
// Joint Rotations
int jointRotationSize;
fileStream >> jointRotationSize;
baseFrame._jointRotations.resize(jointRotationSize);
for (int i = 0; i < jointRotationSize; ++i) {
fileStream >> baseFrame._jointRotations[i].x >> baseFrame._jointRotations[i].y >> baseFrame._jointRotations[i].z >> baseFrame._jointRotations[i].w;
}
fileStream >> baseFrame._translation.x >> baseFrame._translation.y >> baseFrame._translation.z;
fileStream >> baseFrame._rotation.x >> baseFrame._rotation.y >> baseFrame._rotation.z >> baseFrame._rotation.w;
fileStream >> baseFrame._scale;
fileStream >> baseFrame._headRotation.x >> baseFrame._headRotation.y >> baseFrame._headRotation.z >> baseFrame._headRotation.w;
fileStream >> baseFrame._leanSideways;
fileStream >> baseFrame._leanForward;
// Fake context
RecordingContext& context = recording->getContext();
context.globalTimestamp = usecTimestampNow();
context.domain = NodeList::getInstance()->getDomainHandler().getHostname();
context.position = glm::vec3(144.5f, 3.3f, 181.3f);
context.orientation = glm::angleAxis(glm::radians(-92.5f), glm::vec3(0, 1, 0));;
context.scale = baseFrame._scale;
context.headModel = "http://public.highfidelity.io/models/heads/Emily_v4.fst";
context.skeletonModel = "http://public.highfidelity.io/models/skeletons/EmilyCutMesh_A.fst";
context.displayName = "Leslie";
context.attachments.clear();
AttachmentData data;
data.modelURL = "http://public.highfidelity.io/models/attachments/fbx.fst";
data.jointName = "RightHand" ;
data.translation = glm::vec3(0.04f, 0.07f, 0.0f);
data.rotation = glm::angleAxis(glm::radians(102.0f), glm::vec3(0, 1, 0));
data.scale = 0.20f;
context.attachments << data;
context.orientationInv = glm::inverse(context.orientation);
baseFrame._translation = glm::vec3();
baseFrame._rotation = glm::quat();
baseFrame._scale = 1.0f;
recording->_frames << baseFrame;
for (int i = 1; i < recording->_timestamps.size(); ++i) {
QBitArray mask;
QByteArray buffer;
QDataStream stream(&buffer, QIODevice::ReadOnly);
RecordingFrame frame;
RecordingFrame& previousFrame = recording->_frames.last();
fileStream >> mask;
fileStream >> buffer;
int maskIndex = 0;
// Blendshape Coefficients
frame._blendshapeCoefficients.resize(baseFrame._blendshapeCoefficients.size());
for (int i = 0; i < baseFrame._blendshapeCoefficients.size(); ++i) {
if (mask[maskIndex++]) {
stream >> frame._blendshapeCoefficients[i];
} else {
frame._blendshapeCoefficients[i] = previousFrame._blendshapeCoefficients[i];
}
}
// Joint Rotations
frame._jointRotations.resize(baseFrame._jointRotations.size());
for (int i = 0; i < baseFrame._jointRotations.size(); ++i) {
if (mask[maskIndex++]) {
stream >> frame._jointRotations[i].x >> frame._jointRotations[i].y >> frame._jointRotations[i].z >> frame._jointRotations[i].w;
} else {
frame._jointRotations[i] = previousFrame._jointRotations[i];
}
}
if (mask[maskIndex++]) {
stream >> frame._translation.x >> frame._translation.y >> frame._translation.z;
frame._translation = context.orientationInv * frame._translation;
} else {
frame._translation = previousFrame._translation;
}
if (mask[maskIndex++]) {
stream >> frame._rotation.x >> frame._rotation.y >> frame._rotation.z >> frame._rotation.w;
} else {
frame._rotation = previousFrame._rotation;
}
if (mask[maskIndex++]) {
stream >> frame._scale;
} else {
frame._scale = previousFrame._scale;
}
if (mask[maskIndex++]) {
stream >> frame._headRotation.x >> frame._headRotation.y >> frame._headRotation.z >> frame._headRotation.w;
} else {
frame._headRotation = previousFrame._headRotation;
}
if (mask[maskIndex++]) {
stream >> frame._leanSideways;
} else {
frame._leanSideways = previousFrame._leanSideways;
}
if (mask[maskIndex++]) {
stream >> frame._leanForward;
} else {
frame._leanForward = previousFrame._leanForward;
}
recording->_frames << frame;
}
QByteArray audioArray;
fileStream >> audioArray;
// Cut down audio if necessary
int SAMPLE_RATE = 48000; // 48 kHz
int SAMPLE_SIZE = 2; // 16 bits
int MSEC_PER_SEC = 1000;
int audioLength = recording->getLength() * SAMPLE_SIZE * (SAMPLE_RATE / MSEC_PER_SEC);
audioArray.chop(audioArray.size() - audioLength);
recording->addAudioPacket(audioArray);
qDebug() << "Read " << byteArray.size() << " bytes in " << timer.elapsed() << " ms.";
// Set new filename
if (filename.startsWith("http") || filename.startsWith("https") || filename.startsWith("ftp")) {
filename = QUrl(filename).fileName();
}
if (filename.endsWith(".rec") || filename.endsWith(".REC")) {
filename.chop(qstrlen(".rec"));
}
filename.append(".hfr");
filename = QFileInfo(filename).absoluteFilePath();
// Set recording to new format
writeRecordingToFile(recording, filename);
QMessageBox::warning(NULL,
QString("New recording location"),
QString("The new recording was saved at:\n" + filename),
QMessageBox::Ok);
qDebug() << "Recording has been successfully converted at" << filename;
return recording;
}

View file

@ -0,0 +1,127 @@
//
// Recording.h
//
//
// Created by Clement on 9/17/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_Recording_h
#define hifi_Recording_h
#include <QString>
#include <QVector>
#include <glm/glm.hpp>
#include <glm/gtx/quaternion.hpp>
template<class C>
class QSharedPointer;
class AttachmentData;
class Recording;
class RecordingFrame;
class Sound;
typedef QSharedPointer<Recording> RecordingPointer;
/// Stores recordings static data
class RecordingContext {
public:
quint64 globalTimestamp;
QString domain;
glm::vec3 position;
glm::quat orientation;
float scale;
QString headModel;
QString skeletonModel;
QString displayName;
QVector<AttachmentData> attachments;
// This avoids recomputation every frame while recording.
glm::quat orientationInv;
};
/// Stores a recording
class Recording {
public:
Recording();
~Recording();
bool isEmpty() const { return _timestamps.isEmpty(); }
int getLength() const; // in ms
RecordingContext& getContext() { return _context; }
int getFrameNumber() const { return _frames.size(); }
qint32 getFrameTimestamp(int i) const;
const RecordingFrame& getFrame(int i) const;
Sound* getAudio() const { return _audio; }
protected:
void addFrame(int timestamp, RecordingFrame& frame);
void addAudioPacket(QByteArray byteArray);
void clear();
private:
RecordingContext _context;
QVector<qint32> _timestamps;
QVector<RecordingFrame> _frames;
Sound* _audio;
friend class Recorder;
friend class Player;
friend void writeRecordingToFile(RecordingPointer recording, QString file);
friend RecordingPointer readRecordingFromFile(RecordingPointer recording, QString file);
friend RecordingPointer readRecordingFromRecFile(RecordingPointer recording, QString filename, QByteArray byteArray);
};
/// Stores the different values associated to one recording frame
class RecordingFrame {
public:
QVector<float> getBlendshapeCoefficients() const { return _blendshapeCoefficients; }
QVector<glm::quat> getJointRotations() const { return _jointRotations; }
glm::vec3 getTranslation() const { return _translation; }
glm::quat getRotation() const { return _rotation; }
float getScale() const { return _scale; }
glm::quat getHeadRotation() const { return _headRotation; }
float getLeanSideways() const { return _leanSideways; }
float getLeanForward() const { return _leanForward; }
glm::vec3 getLookAtPosition() const { return _lookAtPosition; }
protected:
void setBlendshapeCoefficients(QVector<float> blendshapeCoefficients);
void setJointRotations(QVector<glm::quat> jointRotations) { _jointRotations = jointRotations; }
void setTranslation(glm::vec3 translation) { _translation = translation; }
void setRotation(glm::quat rotation) { _rotation = rotation; }
void setScale(float scale) { _scale = scale; }
void setHeadRotation(glm::quat headRotation) { _headRotation = headRotation; }
void setLeanSideways(float leanSideways) { _leanSideways = leanSideways; }
void setLeanForward(float leanForward) { _leanForward = leanForward; }
void setLookAtPosition(glm::vec3 lookAtPosition) { _lookAtPosition = lookAtPosition; }
private:
QVector<float> _blendshapeCoefficients;
QVector<glm::quat> _jointRotations;
glm::vec3 _translation;
glm::quat _rotation;
float _scale;
glm::quat _headRotation;
float _leanSideways;
float _leanForward;
glm::vec3 _lookAtPosition;
friend class Recorder;
friend void writeRecordingToFile(RecordingPointer recording, QString file);
friend RecordingPointer readRecordingFromFile(RecordingPointer recording, QString file);
friend RecordingPointer readRecordingFromRecFile(RecordingPointer recording, QString filename, QByteArray byteArray);
};
void writeRecordingToFile(RecordingPointer recording, QString filename);
RecordingPointer readRecordingFromFile(RecordingPointer recording, QString filename);
RecordingPointer readRecordingFromRecFile(RecordingPointer recording, QString filename, QByteArray byteArray);
#endif // hifi_Recording_h

View file

@ -83,7 +83,7 @@ int Referential::pack(unsigned char* destinationBuffer) const {
int Referential::unpack(const unsigned char* sourceBuffer) {
const unsigned char* startPosition = sourceBuffer;
_type = (Type)*sourceBuffer++;
if (_type < 0 || _type >= NUM_TYPE) {
if (_type < 0 || _type >= NUM_TYPES) {
_type = UNKNOWN;
}
memcpy(&_version, sourceBuffer, sizeof(_version));

View file

@ -26,7 +26,7 @@ public:
JOINT,
AVATAR,
NUM_TYPE
NUM_TYPES
};
Referential(const unsigned char*& sourceBuffer, AvatarData* avatar);