Merge pull request #6462 from jherico/rec5

Blendshapes and attachments in recordings, plus other minor fixes.
This commit is contained in:
Brad Hefta-Gaub 2015-11-24 10:01:17 -08:00
commit d32e500464
14 changed files with 297 additions and 1669 deletions

View file

@ -40,7 +40,6 @@
#include "Menu.h"
#include "ModelReferential.h"
#include "Physics.h"
#include "Recorder.h"
#include "Util.h"
#include "world.h"
#include "InterfaceLogging.h"

View file

@ -49,7 +49,6 @@
#include "ModelReferential.h"
#include "MyAvatar.h"
#include "Physics.h"
#include "Recorder.h"
#include "Util.h"
#include "InterfaceLogging.h"
#include "DebugDraw.h"
@ -181,10 +180,22 @@ MyAvatar::MyAvatar(RigPointer rig) :
setPosition(dummyAvatar.getPosition());
setOrientation(dummyAvatar.getOrientation());
// FIXME attachments
// FIXME joints
// FIXME head lean
// FIXME head orientation
if (!dummyAvatar.getAttachmentData().isEmpty()) {
setAttachmentData(dummyAvatar.getAttachmentData());
}
auto headData = dummyAvatar.getHeadData();
if (headData && _headData) {
// blendshapes
if (!headData->getBlendshapeCoefficients().isEmpty()) {
_headData->setBlendshapeCoefficients(headData->getBlendshapeCoefficients());
}
// head lean
_headData->setLeanForward(headData->getLeanForward());
_headData->setLeanSideways(headData->getLeanSideways());
// head orientation
_headData->setLookAtPosition(headData->getLookAtPosition());
}
});
}

View file

@ -1297,8 +1297,51 @@ void AvatarData::updateJointMappings() {
}
}
AttachmentData::AttachmentData() :
scale(1.0f) {
static const QString JSON_ATTACHMENT_URL = QStringLiteral("modelUrl");
static const QString JSON_ATTACHMENT_JOINT_NAME = QStringLiteral("jointName");
static const QString JSON_ATTACHMENT_TRANSFORM = QStringLiteral("transform");
QJsonObject AttachmentData::toJson() const {
QJsonObject result;
if (modelURL.isValid() && !modelURL.isEmpty()) {
result[JSON_ATTACHMENT_URL] = modelURL.toString();
}
if (!jointName.isEmpty()) {
result[JSON_ATTACHMENT_JOINT_NAME] = jointName;
}
// FIXME the transform constructor that takes rot/scale/translation
// doesn't return the correct value for isIdentity()
Transform transform;
transform.setRotation(rotation);
transform.setScale(scale);
transform.setTranslation(translation);
if (!transform.isIdentity()) {
result[JSON_ATTACHMENT_TRANSFORM] = Transform::toJson(transform);
}
return result;
}
void AttachmentData::fromJson(const QJsonObject& json) {
if (json.contains(JSON_ATTACHMENT_URL)) {
const QString modelURLTemp = json[JSON_ATTACHMENT_URL].toString();
if (modelURLTemp != modelURL.toString()) {
modelURL = modelURLTemp;
}
}
if (json.contains(JSON_ATTACHMENT_JOINT_NAME)) {
const QString jointNameTemp = json[JSON_ATTACHMENT_JOINT_NAME].toString();
if (jointNameTemp != jointName) {
jointName = jointNameTemp;
}
}
if (json.contains(JSON_ATTACHMENT_TRANSFORM)) {
Transform transform = Transform::fromJson(json[JSON_ATTACHMENT_TRANSFORM]);
translation = transform.getTranslation();
rotation = transform.getRotation();
scale = transform.getScale().x;
}
}
bool AttachmentData::operator==(const AttachmentData& other) const {
@ -1399,15 +1442,11 @@ static const QString JSON_AVATAR_BASIS = QStringLiteral("basisTransform");
static const QString JSON_AVATAR_RELATIVE = QStringLiteral("relativeTransform");
static const QString JSON_AVATAR_JOINT_ARRAY = QStringLiteral("jointArray");
static const QString JSON_AVATAR_HEAD = QStringLiteral("head");
static const QString JSON_AVATAR_HEAD_ROTATION = QStringLiteral("rotation");
static const QString JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS = QStringLiteral("blendShapes");
static const QString JSON_AVATAR_HEAD_LEAN_FORWARD = QStringLiteral("leanForward");
static const QString JSON_AVATAR_HEAD_LEAN_SIDEWAYS = QStringLiteral("leanSideways");
static const QString JSON_AVATAR_HEAD_LOOKAT = QStringLiteral("lookAt");
static const QString JSON_AVATAR_HEAD_MODEL = QStringLiteral("headModel");
static const QString JSON_AVATAR_BODY_MODEL = QStringLiteral("bodyModel");
static const QString JSON_AVATAR_DISPLAY_NAME = QStringLiteral("displayName");
static const QString JSON_AVATAR_ATTACHEMENTS = QStringLiteral("attachments");
static const QString JSON_AVATAR_SCALE = QStringLiteral("scale");
QJsonValue toJsonValue(const JointData& joint) {
QJsonArray result;
@ -1428,93 +1467,84 @@ JointData jointDataFromJsonValue(const QJsonValue& json) {
return result;
}
// Every frame will store both a basis for the recording and a relative transform
// This allows the application to decide whether playback should be relative to an avatar's
// transform at the start of playback, or relative to the transform of the recorded
// avatar
QByteArray AvatarData::toFrame(const AvatarData& avatar) {
QJsonObject AvatarData::toJson() const {
QJsonObject root;
if (!avatar.getFaceModelURL().isEmpty()) {
root[JSON_AVATAR_HEAD_MODEL] = avatar.getFaceModelURL().toString();
if (!getFaceModelURL().isEmpty()) {
root[JSON_AVATAR_HEAD_MODEL] = getFaceModelURL().toString();
}
if (!avatar.getSkeletonModelURL().isEmpty()) {
root[JSON_AVATAR_BODY_MODEL] = avatar.getSkeletonModelURL().toString();
if (!getSkeletonModelURL().isEmpty()) {
root[JSON_AVATAR_BODY_MODEL] = getSkeletonModelURL().toString();
}
if (!avatar.getDisplayName().isEmpty()) {
root[JSON_AVATAR_DISPLAY_NAME] = avatar.getDisplayName();
if (!getDisplayName().isEmpty()) {
root[JSON_AVATAR_DISPLAY_NAME] = getDisplayName();
}
if (!avatar.getAttachmentData().isEmpty()) {
// FIXME serialize attachment data
if (!getAttachmentData().isEmpty()) {
QJsonArray attachmentsJson;
for (auto attachment : getAttachmentData()) {
attachmentsJson.push_back(attachment.toJson());
}
root[JSON_AVATAR_ATTACHEMENTS] = attachmentsJson;
}
auto recordingBasis = avatar.getRecordingBasis();
auto recordingBasis = getRecordingBasis();
if (recordingBasis) {
root[JSON_AVATAR_BASIS] = Transform::toJson(*recordingBasis);
// Find the relative transform
auto relativeTransform = recordingBasis->relativeTransform(avatar.getTransform());
root[JSON_AVATAR_RELATIVE] = Transform::toJson(relativeTransform);
auto relativeTransform = recordingBasis->relativeTransform(getTransform());
if (!relativeTransform.isIdentity()) {
root[JSON_AVATAR_RELATIVE] = Transform::toJson(relativeTransform);
}
} else {
root[JSON_AVATAR_RELATIVE] = Transform::toJson(avatar.getTransform());
root[JSON_AVATAR_RELATIVE] = Transform::toJson(getTransform());
}
auto scale = getTargetScale();
if (scale != 1.0f) {
root[JSON_AVATAR_SCALE] = scale;
}
// Skeleton pose
QJsonArray jointArray;
for (const auto& joint : avatar.getRawJointData()) {
for (const auto& joint : getRawJointData()) {
jointArray.push_back(toJsonValue(joint));
}
root[JSON_AVATAR_JOINT_ARRAY] = jointArray;
const HeadData* head = avatar.getHeadData();
const HeadData* head = getHeadData();
if (head) {
QJsonObject headJson;
QJsonArray blendshapeCoefficients;
for (const auto& blendshapeCoefficient : head->getBlendshapeCoefficients()) {
blendshapeCoefficients.push_back(blendshapeCoefficient);
auto headJson = head->toJson();
if (!headJson.isEmpty()) {
root[JSON_AVATAR_HEAD] = headJson;
}
headJson[JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS] = blendshapeCoefficients;
headJson[JSON_AVATAR_HEAD_ROTATION] = toJsonValue(head->getRawOrientation());
headJson[JSON_AVATAR_HEAD_LEAN_FORWARD] = QJsonValue(head->getLeanForward());
headJson[JSON_AVATAR_HEAD_LEAN_SIDEWAYS] = QJsonValue(head->getLeanSideways());
vec3 relativeLookAt = glm::inverse(avatar.getOrientation()) *
(head->getLookAtPosition() - avatar.getPosition());
headJson[JSON_AVATAR_HEAD_LOOKAT] = toJsonValue(relativeLookAt);
root[JSON_AVATAR_HEAD] = headJson;
}
return QJsonDocument(root).toBinaryData();
return root;
}
void AvatarData::fromFrame(const QByteArray& frameData, AvatarData& result) {
QJsonDocument doc = QJsonDocument::fromBinaryData(frameData);
#ifdef WANT_JSON_DEBUG
qDebug() << doc.toJson(QJsonDocument::JsonFormat::Indented);
#endif
QJsonObject root = doc.object();
if (root.contains(JSON_AVATAR_HEAD_MODEL)) {
auto faceModelURL = root[JSON_AVATAR_HEAD_MODEL].toString();
if (faceModelURL != result.getFaceModelURL().toString()) {
void AvatarData::fromJson(const QJsonObject& json) {
if (json.contains(JSON_AVATAR_HEAD_MODEL)) {
auto faceModelURL = json[JSON_AVATAR_HEAD_MODEL].toString();
if (faceModelURL != getFaceModelURL().toString()) {
QUrl faceModel(faceModelURL);
if (faceModel.isValid()) {
result.setFaceModelURL(faceModel);
setFaceModelURL(faceModel);
}
}
}
if (root.contains(JSON_AVATAR_BODY_MODEL)) {
auto bodyModelURL = root[JSON_AVATAR_BODY_MODEL].toString();
if (bodyModelURL != result.getSkeletonModelURL().toString()) {
result.setSkeletonModelURL(bodyModelURL);
if (json.contains(JSON_AVATAR_BODY_MODEL)) {
auto bodyModelURL = json[JSON_AVATAR_BODY_MODEL].toString();
if (bodyModelURL != getSkeletonModelURL().toString()) {
setSkeletonModelURL(bodyModelURL);
}
}
if (root.contains(JSON_AVATAR_DISPLAY_NAME)) {
auto newDisplayName = root[JSON_AVATAR_DISPLAY_NAME].toString();
if (newDisplayName != result.getDisplayName()) {
result.setDisplayName(newDisplayName);
if (json.contains(JSON_AVATAR_DISPLAY_NAME)) {
auto newDisplayName = json[JSON_AVATAR_DISPLAY_NAME].toString();
if (newDisplayName != getDisplayName()) {
setDisplayName(newDisplayName);
}
}
}
if (root.contains(JSON_AVATAR_RELATIVE)) {
if (json.contains(JSON_AVATAR_RELATIVE)) {
// During playback you can either have the recording basis set to the avatar current state
// meaning that all playback is relative to this avatars starting position, or
// the basis can be loaded from the recording, meaning the playback is relative to the
@ -1522,70 +1552,83 @@ void AvatarData::fromFrame(const QByteArray& frameData, AvatarData& result) {
// The first is more useful for playing back recordings on your own avatar, while
// the latter is more useful for playing back other avatars within your scene.
auto currentBasis = result.getRecordingBasis();
auto currentBasis = getRecordingBasis();
if (!currentBasis) {
currentBasis = std::make_shared<Transform>(Transform::fromJson(root[JSON_AVATAR_BASIS]));
currentBasis = std::make_shared<Transform>(Transform::fromJson(json[JSON_AVATAR_BASIS]));
}
auto relativeTransform = Transform::fromJson(root[JSON_AVATAR_RELATIVE]);
auto relativeTransform = Transform::fromJson(json[JSON_AVATAR_RELATIVE]);
auto worldTransform = currentBasis->worldTransform(relativeTransform);
result.setPosition(worldTransform.getTranslation());
result.setOrientation(worldTransform.getRotation());
// TODO: find a way to record/playback the Scale of the avatar
//result.setTargetScale(worldTransform.getScale().x);
setPosition(worldTransform.getTranslation());
setOrientation(worldTransform.getRotation());
}
if (json.contains(JSON_AVATAR_SCALE)) {
setTargetScale((float)json[JSON_AVATAR_SCALE].toDouble());
}
if (root.contains(JSON_AVATAR_ATTACHEMENTS)) {
// FIXME de-serialize attachment data
if (json.contains(JSON_AVATAR_ATTACHEMENTS) && json[JSON_AVATAR_ATTACHEMENTS].isArray()) {
QJsonArray attachmentsJson = json[JSON_AVATAR_ATTACHEMENTS].toArray();
QVector<AttachmentData> attachments;
for (auto attachmentJson : attachmentsJson) {
AttachmentData attachment;
attachment.fromJson(attachmentJson.toObject());
attachments.push_back(attachment);
}
setAttachmentData(attachments);
}
// Joint rotations are relative to the avatar, so they require no basis correction
if (root.contains(JSON_AVATAR_JOINT_ARRAY)) {
if (json.contains(JSON_AVATAR_JOINT_ARRAY)) {
QVector<JointData> jointArray;
QJsonArray jointArrayJson = root[JSON_AVATAR_JOINT_ARRAY].toArray();
QJsonArray jointArrayJson = json[JSON_AVATAR_JOINT_ARRAY].toArray();
jointArray.reserve(jointArrayJson.size());
int i = 0;
for (const auto& jointJson : jointArrayJson) {
auto joint = jointDataFromJsonValue(jointJson);
jointArray.push_back(joint);
result.setJointData(i, joint.rotation, joint.translation);
result._jointData[i].rotationSet = true; // Have to do that to broadcast the avatar new pose
setJointData(i, joint.rotation, joint.translation);
_jointData[i].rotationSet = true; // Have to do that to broadcast the avatar new pose
i++;
}
result.setRawJointData(jointArray);
setRawJointData(jointArray);
}
#if 0
// Most head data is relative to the avatar, and needs no basis correction,
// but the lookat vector does need correction
HeadData* head = result._headData;
if (head && root.contains(JSON_AVATAR_HEAD)) {
QJsonObject headJson = root[JSON_AVATAR_HEAD].toObject();
if (headJson.contains(JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS)) {
QVector<float> blendshapeCoefficients;
QJsonArray blendshapeCoefficientsJson = headJson[JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS].toArray();
for (const auto& blendshapeCoefficient : blendshapeCoefficientsJson) {
blendshapeCoefficients.push_back((float)blendshapeCoefficient.toDouble());
}
head->setBlendshapeCoefficients(blendshapeCoefficients);
}
if (headJson.contains(JSON_AVATAR_HEAD_ROTATION)) {
head->setOrientation(quatFromJsonValue(headJson[JSON_AVATAR_HEAD_ROTATION]));
}
if (headJson.contains(JSON_AVATAR_HEAD_LEAN_FORWARD)) {
head->setLeanForward((float)headJson[JSON_AVATAR_HEAD_LEAN_FORWARD].toDouble());
}
if (headJson.contains(JSON_AVATAR_HEAD_LEAN_SIDEWAYS)) {
head->setLeanSideways((float)headJson[JSON_AVATAR_HEAD_LEAN_SIDEWAYS].toDouble());
}
if (headJson.contains(JSON_AVATAR_HEAD_LOOKAT)) {
auto relativeLookAt = vec3FromJsonValue(headJson[JSON_AVATAR_HEAD_LOOKAT]);
if (glm::length2(relativeLookAt) > 0.01) {
head->setLookAtPosition((result.getOrientation() * relativeLookAt) + result.getPosition());
}
if (json.contains(JSON_AVATAR_HEAD)) {
if (!_headData) {
_headData = new HeadData(this);
}
_headData->fromJson(json[JSON_AVATAR_HEAD].toObject());
}
}
// Every frame will store both a basis for the recording and a relative transform
// This allows the application to decide whether playback should be relative to an avatar's
// transform at the start of playback, or relative to the transform of the recorded
// avatar
QByteArray AvatarData::toFrame(const AvatarData& avatar) {
QJsonObject root = avatar.toJson();
#ifdef WANT_JSON_DEBUG
{
QJsonObject obj = root;
obj.remove(JSON_AVATAR_JOINT_ARRAY);
qDebug().noquote() << QJsonDocument(obj).toJson(QJsonDocument::JsonFormat::Indented);
}
#endif
return QJsonDocument(root).toBinaryData();
}
void AvatarData::fromFrame(const QByteArray& frameData, AvatarData& result) {
QJsonDocument doc = QJsonDocument::fromBinaryData(frameData);
#ifdef WANT_JSON_DEBUG
{
QJsonObject obj = doc.object();
obj.remove(JSON_AVATAR_JOINT_ARRAY);
qDebug().noquote() << QJsonDocument(obj).toJson(QJsonDocument::JsonFormat::Indented);
}
#endif
result.fromJson(doc.object());
}

View file

@ -342,6 +342,8 @@ public:
void clearRecordingBasis();
TransformPointer getRecordingBasis() const;
void setRecordingBasis(TransformPointer recordingBasis = TransformPointer());
QJsonObject toJson() const;
void fromJson(const QJsonObject& json);
public slots:
void sendAvatarDataPacket();
@ -449,13 +451,14 @@ public:
QString jointName;
glm::vec3 translation;
glm::quat rotation;
float scale;
AttachmentData();
float scale { 1.0f };
bool isValid() const { return modelURL.isValid(); }
bool operator==(const AttachmentData& other) const;
QJsonObject toJson() const;
void fromJson(const QJsonObject& json);
};
QDataStream& operator<<(QDataStream& out, const AttachmentData& attachment);

View file

@ -9,13 +9,18 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <glm/gtx/quaternion.hpp>
#include "HeadData.h"
#include <mutex>
#include <QtCore/QJsonObject>
#include <QtCore/QJsonArray>
#include <FaceshiftConstants.h>
#include <GLMHelpers.h>
#include <shared/JSONHelpers.h>
#include "AvatarData.h"
#include "HeadData.h"
/// The names of the blendshapes expected by Faceshift, terminated with an empty string.
extern const char* FACESHIFT_BLENDSHAPES[];
@ -58,6 +63,7 @@ glm::quat HeadData::getOrientation() const {
return _owningAvatar->getOrientation() * getRawOrientation();
}
void HeadData::setOrientation(const glm::quat& orientation) {
// rotate body about vertical axis
glm::quat bodyOrientation = _owningAvatar->getOrientation();
@ -72,19 +78,24 @@ void HeadData::setOrientation(const glm::quat& orientation) {
_baseRoll = eulers.z;
}
void HeadData::setBlendshape(QString name, float val) {
static bool hasInitializedLookupMap = false;
//Lazily construct a lookup map from the blendshapes
static const QMap<QString, int>& getBlendshapesLookupMap() {
static std::once_flag once;
static QMap<QString, int> blendshapeLookupMap;
//Lazily construct a lookup map from the blendshapes
if (!hasInitializedLookupMap) {
std::call_once(once, [&] {
for (int i = 0; i < NUM_FACESHIFT_BLENDSHAPES; i++) {
blendshapeLookupMap[FACESHIFT_BLENDSHAPES[i]] = i;
blendshapeLookupMap[FACESHIFT_BLENDSHAPES[i]] = i;
}
hasInitializedLookupMap = true;
}
});
return blendshapeLookupMap;
}
void HeadData::setBlendshape(QString name, float val) {
const auto& blendshapeLookupMap = getBlendshapesLookupMap();
//Check to see if the named blendshape exists, and then set its value if it does
QMap<QString, int>::iterator it = blendshapeLookupMap.find(name);
auto it = blendshapeLookupMap.find(name);
if (it != blendshapeLookupMap.end()) {
if (_blendshapeCoefficients.size() <= it.value()) {
_blendshapeCoefficients.resize(it.value() + 1);
@ -92,3 +103,85 @@ void HeadData::setBlendshape(QString name, float val) {
_blendshapeCoefficients[it.value()] = val;
}
}
static const QString JSON_AVATAR_HEAD_ROTATION = QStringLiteral("rotation");
static const QString JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS = QStringLiteral("blendShapes");
static const QString JSON_AVATAR_HEAD_LEAN_FORWARD = QStringLiteral("leanForward");
static const QString JSON_AVATAR_HEAD_LEAN_SIDEWAYS = QStringLiteral("leanSideways");
static const QString JSON_AVATAR_HEAD_LOOKAT = QStringLiteral("lookAt");
QJsonObject HeadData::toJson() const {
QJsonObject headJson;
const auto& blendshapeLookupMap = getBlendshapesLookupMap();
QJsonObject blendshapesJson;
for (auto name : blendshapeLookupMap.keys()) {
auto index = blendshapeLookupMap[name];
if (index >= _blendshapeCoefficients.size()) {
continue;
}
auto value = _blendshapeCoefficients[index];
if (value == 0.0f) {
continue;
}
blendshapesJson[name] = value;
}
if (!blendshapesJson.isEmpty()) {
headJson[JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS] = blendshapesJson;
}
if (getRawOrientation() != quat()) {
headJson[JSON_AVATAR_HEAD_ROTATION] = toJsonValue(getRawOrientation());
}
if (getLeanForward() != 0.0f) {
headJson[JSON_AVATAR_HEAD_LEAN_FORWARD] = getLeanForward();
}
if (getLeanSideways() != 0.0f) {
headJson[JSON_AVATAR_HEAD_LEAN_SIDEWAYS] = getLeanSideways();
}
auto lookat = getLookAtPosition();
if (lookat != vec3()) {
vec3 relativeLookAt = glm::inverse(_owningAvatar->getOrientation()) *
(getLookAtPosition() - _owningAvatar->getPosition());
headJson[JSON_AVATAR_HEAD_LOOKAT] = toJsonValue(relativeLookAt);
}
return headJson;
}
void HeadData::fromJson(const QJsonObject& json) {
if (json.contains(JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS)) {
auto jsonValue = json[JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS];
if (jsonValue.isArray()) {
QVector<float> blendshapeCoefficients;
QJsonArray blendshapeCoefficientsJson = jsonValue.toArray();
for (const auto& blendshapeCoefficient : blendshapeCoefficientsJson) {
blendshapeCoefficients.push_back((float)blendshapeCoefficient.toDouble());
setBlendshapeCoefficients(blendshapeCoefficients);
}
} else if (jsonValue.isObject()) {
QJsonObject blendshapeCoefficientsJson = jsonValue.toObject();
for (const QString& name : blendshapeCoefficientsJson.keys()) {
float value = (float)blendshapeCoefficientsJson[name].toDouble();
setBlendshape(name, value);
}
} else {
qWarning() << "Unable to deserialize head json: " << jsonValue;
}
}
if (json.contains(JSON_AVATAR_HEAD_ROTATION)) {
setOrientation(quatFromJsonValue(json[JSON_AVATAR_HEAD_ROTATION]));
}
if (json.contains(JSON_AVATAR_HEAD_LEAN_FORWARD)) {
setLeanForward((float)json[JSON_AVATAR_HEAD_LEAN_FORWARD].toDouble());
}
if (json.contains(JSON_AVATAR_HEAD_LEAN_SIDEWAYS)) {
setLeanSideways((float)json[JSON_AVATAR_HEAD_LEAN_SIDEWAYS].toDouble());
}
if (json.contains(JSON_AVATAR_HEAD_LOOKAT)) {
auto relativeLookAt = vec3FromJsonValue(json[JSON_AVATAR_HEAD_LOOKAT]);
if (glm::length2(relativeLookAt) > 0.01f) {
setLookAtPosition((_owningAvatar->getOrientation() * relativeLookAt) + _owningAvatar->getPosition());
}
}
}

View file

@ -28,6 +28,7 @@ const float MIN_HEAD_ROLL = -50.0f;
const float MAX_HEAD_ROLL = 50.0f;
class AvatarData;
class QJsonObject;
class HeadData {
public:
@ -83,6 +84,9 @@ public:
friend class AvatarData;
QJsonObject toJson() const;
void fromJson(const QJsonObject& json);
protected:
// degrees
float _baseYaw;

View file

@ -1,443 +0,0 @@
//
// Player.cpp
//
//
// Created by Clement on 9/17/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#if 0
#include <AudioConstants.h>
#include <GLMHelpers.h>
#include <NodeList.h>
#include <StreamUtils.h>
#include "AvatarData.h"
#include "AvatarLogging.h"
#include "Player.h"
static const int INVALID_FRAME = -1;
Player::Player(AvatarData* avatar) :
_avatar(avatar),
_recording(new Recording()),
_currentFrame(INVALID_FRAME),
_frameInterpolationFactor(0.0f),
_pausedFrame(INVALID_FRAME),
_timerOffset(0),
_audioOffset(0),
_audioThread(NULL),
_playFromCurrentPosition(true),
_loop(false),
_useAttachments(true),
_useDisplayName(true),
_useHeadURL(true),
_useSkeletonURL(true)
{
_timer.invalidate();
}
bool Player::isPlaying() const {
return _timer.isValid();
}
bool Player::isPaused() const {
return (_pausedFrame != INVALID_FRAME);
}
qint64 Player::elapsed() const {
if (isPlaying()) {
return _timerOffset + _timer.elapsed();
} else if (isPaused()) {
return _timerOffset;
} else {
return 0;
}
}
void Player::startPlaying() {
if (!_recording || _recording->getFrameNumber() <= 1) {
return;
}
if (!isPaused()) {
_currentContext.globalTimestamp = usecTimestampNow();
_currentContext.domain = DependencyManager::get<NodeList>()->getDomainHandler().getHostname();
_currentContext.position = _avatar->getPosition();
_currentContext.orientation = _avatar->getOrientation();
_currentContext.scale = _avatar->getTargetScale();
_currentContext.headModel = _avatar->getFaceModelURL().toString();
_currentContext.skeletonModel = _avatar->getSkeletonModelURL().toString();
_currentContext.displayName = _avatar->getDisplayName();
_currentContext.attachments = _avatar->getAttachmentData();
_currentContext.orientationInv = glm::inverse(_currentContext.orientation);
RecordingContext& context = _recording->getContext();
if (_useAttachments) {
_avatar->setAttachmentData(context.attachments);
}
if (_useDisplayName) {
_avatar->setDisplayName(context.displayName);
}
if (_useHeadURL) {
_avatar->setFaceModelURL(context.headModel);
}
if (_useSkeletonURL) {
_avatar->setSkeletonModelURL(context.skeletonModel);
}
bool wantDebug = false;
if (wantDebug) {
qCDebug(avatars) << "Player::startPlaying(): Recording Context";
qCDebug(avatars) << "Domain:" << _currentContext.domain;
qCDebug(avatars) << "Position:" << _currentContext.position;
qCDebug(avatars) << "Orientation:" << _currentContext.orientation;
qCDebug(avatars) << "Scale:" << _currentContext.scale;
qCDebug(avatars) << "Head URL:" << _currentContext.headModel;
qCDebug(avatars) << "Skeleton URL:" << _currentContext.skeletonModel;
qCDebug(avatars) << "Display Name:" << _currentContext.displayName;
qCDebug(avatars) << "Num Attachments:" << _currentContext.attachments.size();
for (int i = 0; i < _currentContext.attachments.size(); ++i) {
qCDebug(avatars) << "Model URL:" << _currentContext.attachments[i].modelURL;
qCDebug(avatars) << "Joint Name:" << _currentContext.attachments[i].jointName;
qCDebug(avatars) << "Translation:" << _currentContext.attachments[i].translation;
qCDebug(avatars) << "Rotation:" << _currentContext.attachments[i].rotation;
qCDebug(avatars) << "Scale:" << _currentContext.attachments[i].scale;
}
}
// Fake faceshift connection
_avatar->setForceFaceTrackerConnected(true);
qCDebug(avatars) << "Recorder::startPlaying()";
setupAudioThread();
_currentFrame = 0;
_timerOffset = 0;
_timer.start();
} else {
qCDebug(avatars) << "Recorder::startPlaying(): Unpause";
setupAudioThread();
_timer.start();
setCurrentFrame(_pausedFrame);
_pausedFrame = INVALID_FRAME;
}
}
void Player::stopPlaying() {
if (!isPlaying()) {
return;
}
_pausedFrame = INVALID_FRAME;
_timer.invalidate();
cleanupAudioThread();
_avatar->clearJointsData();
// Turn off fake face tracker connection
_avatar->setForceFaceTrackerConnected(false);
if (_useAttachments) {
_avatar->setAttachmentData(_currentContext.attachments);
}
if (_useDisplayName) {
_avatar->setDisplayName(_currentContext.displayName);
}
if (_useHeadURL) {
_avatar->setFaceModelURL(_currentContext.headModel);
}
if (_useSkeletonURL) {
_avatar->setSkeletonModelURL(_currentContext.skeletonModel);
}
qCDebug(avatars) << "Recorder::stopPlaying()";
}
void Player::pausePlayer() {
_timerOffset = elapsed();
_timer.invalidate();
cleanupAudioThread();
_pausedFrame = _currentFrame;
qCDebug(avatars) << "Recorder::pausePlayer()";
}
void Player::setupAudioThread() {
_audioThread = new QThread();
_audioThread->setObjectName("Player Audio Thread");
_options.position = _avatar->getPosition();
_options.orientation = _avatar->getOrientation();
_options.stereo = _recording->numberAudioChannel() == 2;
_injector.reset(new AudioInjector(_recording->getAudioData(), _options), &QObject::deleteLater);
_injector->moveToThread(_audioThread);
_audioThread->start();
QMetaObject::invokeMethod(_injector.data(), "injectAudio", Qt::QueuedConnection);
}
void Player::cleanupAudioThread() {
_injector->stop();
QObject::connect(_injector.data(), &AudioInjector::finished,
_injector.data(), &AudioInjector::deleteLater);
QObject::connect(_injector.data(), &AudioInjector::destroyed,
_audioThread, &QThread::quit);
QObject::connect(_audioThread, &QThread::finished,
_audioThread, &QThread::deleteLater);
_injector.clear();
_audioThread = NULL;
}
void Player::loopRecording() {
cleanupAudioThread();
setupAudioThread();
_currentFrame = 0;
_timerOffset = 0;
_timer.restart();
}
void Player::loadFromFile(const QString& file) {
if (_recording) {
_recording->clear();
} else {
_recording = QSharedPointer<Recording>();
}
readRecordingFromFile(_recording, file);
_pausedFrame = INVALID_FRAME;
}
void Player::loadRecording(RecordingPointer recording) {
_recording = recording;
_pausedFrame = INVALID_FRAME;
}
void Player::play() {
computeCurrentFrame();
if (_currentFrame < 0 || (_currentFrame >= _recording->getFrameNumber() - 2)) { // -2 because of interpolation
if (_loop) {
loopRecording();
} else {
stopPlaying();
}
return;
}
const RecordingContext* context = &_recording->getContext();
if (_playFromCurrentPosition) {
context = &_currentContext;
}
const RecordingFrame& currentFrame = _recording->getFrame(_currentFrame);
const RecordingFrame& nextFrame = _recording->getFrame(_currentFrame + 1);
glm::vec3 translation = glm::mix(currentFrame.getTranslation(),
nextFrame.getTranslation(),
_frameInterpolationFactor);
_avatar->setPosition(context->position + context->orientation * translation);
glm::quat rotation = safeMix(currentFrame.getRotation(),
nextFrame.getRotation(),
_frameInterpolationFactor);
_avatar->setOrientation(context->orientation * rotation);
float scale = glm::mix(currentFrame.getScale(),
nextFrame.getScale(),
_frameInterpolationFactor);
_avatar->setTargetScale(context->scale * scale);
// Joint array playback
// FIXME: THis is still using a deprecated path to assign the joint orientation since setting the full RawJointData array doesn't
// work for Avatar. We need to fix this working with the animation team
const auto& prevJointArray = currentFrame.getJointArray();
const auto& nextJointArray = currentFrame.getJointArray();
QVector<JointData> jointArray(prevJointArray.size());
QVector<glm::quat> jointRotations(prevJointArray.size()); // FIXME: remove once the setRawJointData is fixed
QVector<glm::vec3> jointTranslations(prevJointArray.size()); // FIXME: remove once the setRawJointData is fixed
for (int i = 0; i < jointArray.size(); i++) {
const auto& prevJoint = prevJointArray[i];
const auto& nextJoint = nextJointArray[i];
auto& joint = jointArray[i];
// Rotation
joint.rotationSet = prevJoint.rotationSet || nextJoint.rotationSet;
if (joint.rotationSet) {
joint.rotation = safeMix(prevJoint.rotation, nextJoint.rotation, _frameInterpolationFactor);
jointRotations[i] = joint.rotation; // FIXME: remove once the setRawJointData is fixed
}
joint.translationSet = prevJoint.translationSet || nextJoint.translationSet;
if (joint.translationSet) {
joint.translation = glm::mix(prevJoint.translation, nextJoint.translation, _frameInterpolationFactor);
jointTranslations[i] = joint.translation; // FIXME: remove once the setRawJointData is fixed
}
}
// _avatar->setRawJointData(jointArray); // FIXME: Enable once the setRawJointData is fixed
_avatar->setJointRotations(jointRotations); // FIXME: remove once the setRawJointData is fixed
// _avatar->setJointTranslations(jointTranslations); // FIXME: remove once the setRawJointData is fixed
HeadData* head = const_cast<HeadData*>(_avatar->getHeadData());
if (head) {
// Make sure fake face tracker connection doesn't get turned off
_avatar->setForceFaceTrackerConnected(true);
QVector<float> blendCoef(currentFrame.getBlendshapeCoefficients().size());
for (int i = 0; i < currentFrame.getBlendshapeCoefficients().size(); ++i) {
blendCoef[i] = glm::mix(currentFrame.getBlendshapeCoefficients()[i],
nextFrame.getBlendshapeCoefficients()[i],
_frameInterpolationFactor);
}
head->setBlendshapeCoefficients(blendCoef);
float leanSideways = glm::mix(currentFrame.getLeanSideways(),
nextFrame.getLeanSideways(),
_frameInterpolationFactor);
head->setLeanSideways(leanSideways);
float leanForward = glm::mix(currentFrame.getLeanForward(),
nextFrame.getLeanForward(),
_frameInterpolationFactor);
head->setLeanForward(leanForward);
glm::quat headRotation = safeMix(currentFrame.getHeadRotation(),
nextFrame.getHeadRotation(),
_frameInterpolationFactor);
glm::vec3 eulers = glm::degrees(safeEulerAngles(headRotation));
head->setFinalPitch(eulers.x);
head->setFinalYaw(eulers.y);
head->setFinalRoll(eulers.z);
glm::vec3 lookAt = glm::mix(currentFrame.getLookAtPosition(),
nextFrame.getLookAtPosition(),
_frameInterpolationFactor);
head->setLookAtPosition(context->position + context->orientation * lookAt);
} else {
qCDebug(avatars) << "WARNING: Player couldn't find head data.";
}
_options.position = _avatar->getPosition();
_options.orientation = _avatar->getOrientation();
_injector->setOptions(_options);
}
void Player::setCurrentFrame(int currentFrame) {
if (_recording && currentFrame >= _recording->getFrameNumber()) {
stopPlaying();
return;
}
_currentFrame = currentFrame;
_timerOffset = _recording->getFrameTimestamp(_currentFrame);
if (isPlaying()) {
_timer.start();
setAudioInjectorPosition();
} else {
_pausedFrame = _currentFrame;
}
}
void Player::setCurrentTime(int currentTime) {
if (currentTime >= _recording->getLength()) {
stopPlaying();
return;
}
// Find correct frame
int lowestBound = 0;
int highestBound = _recording->getFrameNumber() - 1;
while (lowestBound + 1 != highestBound) {
assert(lowestBound < highestBound);
int bestGuess = lowestBound +
(highestBound - lowestBound) *
(float)(currentTime - _recording->getFrameTimestamp(lowestBound)) /
(float)(_recording->getFrameTimestamp(highestBound) - _recording->getFrameTimestamp(lowestBound));
if (_recording->getFrameTimestamp(bestGuess) <= currentTime) {
if (currentTime < _recording->getFrameTimestamp(bestGuess + 1)) {
lowestBound = bestGuess;
highestBound = bestGuess + 1;
} else {
lowestBound = bestGuess + 1;
}
} else {
if (_recording->getFrameTimestamp(bestGuess - 1) <= currentTime) {
lowestBound = bestGuess - 1;
highestBound = bestGuess;
} else {
highestBound = bestGuess - 1;
}
}
}
setCurrentFrame(lowestBound);
}
void Player::setVolume(float volume) {
_options.volume = volume;
if (_injector) {
_injector->setOptions(_options);
}
qCDebug(avatars) << "New volume: " << volume;
}
void Player::setAudioOffset(int audioOffset) {
_audioOffset = audioOffset;
}
void Player::setAudioInjectorPosition() {
int MSEC_PER_SEC = 1000;
int FRAME_SIZE = sizeof(AudioConstants::AudioSample) * _recording->numberAudioChannel();
int currentAudioFrame = elapsed() * FRAME_SIZE * (AudioConstants::SAMPLE_RATE / MSEC_PER_SEC);
_injector->setCurrentSendOffset(currentAudioFrame);
}
void Player::setPlayFromCurrentLocation(bool playFromCurrentLocation) {
_playFromCurrentPosition = playFromCurrentLocation;
}
bool Player::computeCurrentFrame() {
if (!isPlaying()) {
_currentFrame = INVALID_FRAME;
return false;
}
if (_currentFrame < 0) {
_currentFrame = 0;
}
qint64 elapsed = glm::clamp(Player::elapsed() - _audioOffset, (qint64)0, (qint64)_recording->getLength());
while (_currentFrame < _recording->getFrameNumber() &&
_recording->getFrameTimestamp(_currentFrame) < elapsed) {
++_currentFrame;
}
while(_currentFrame > 0 &&
_recording->getFrameTimestamp(_currentFrame) > elapsed) {
--_currentFrame;
}
if (_currentFrame == _recording->getFrameNumber() - 1) {
--_currentFrame;
_frameInterpolationFactor = 1.0f;
} else {
qint64 currentTimestamps = _recording->getFrameTimestamp(_currentFrame);
qint64 nextTimestamps = _recording->getFrameTimestamp(_currentFrame + 1);
_frameInterpolationFactor = (float)(elapsed - currentTimestamps) /
(float)(nextTimestamps - currentTimestamps);
}
if (_frameInterpolationFactor < 0.0f || _frameInterpolationFactor > 1.0f) {
_frameInterpolationFactor = 0.0f;
qCDebug(avatars) << "Invalid frame interpolation value: overriding";
}
return true;
}
#endif

View file

@ -1,94 +0,0 @@
//
// Player.h
//
//
// Created by Clement on 9/17/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_Player_h
#define hifi_Player_h
#include <recording/Forward.h>
#if 0
#include <AudioInjector.h>
#include <QElapsedTimer>
#include "Recording.h"
class AvatarData;
class Player;
typedef QSharedPointer<Player> PlayerPointer;
typedef QWeakPointer<Player> WeakPlayerPointer;
/// Plays back a recording
class Player {
public:
Player(AvatarData* avatar);
bool isPlaying() const;
bool isPaused() const;
qint64 elapsed() const;
RecordingPointer getRecording() const { return _recording; }
int getCurrentFrame() const { return _currentFrame; }
public slots:
void startPlaying();
void stopPlaying();
void pausePlayer();
void loadFromFile(const QString& file);
void loadRecording(RecordingPointer recording);
void play();
void setCurrentFrame(int currentFrame);
void setCurrentTime(int currentTime);
void setVolume(float volume);
void setAudioOffset(int audioOffset);
void setPlayFromCurrentLocation(bool playFromCurrentPosition);
void setLoop(bool loop) { _loop = loop; }
void useAttachements(bool useAttachments) { _useAttachments = useAttachments; }
void useDisplayName(bool useDisplayName) { _useDisplayName = useDisplayName; }
void useHeadModel(bool useHeadURL) { _useHeadURL = useHeadURL; }
void useSkeletonModel(bool useSkeletonURL) { _useSkeletonURL = useSkeletonURL; }
private:
void setupAudioThread();
void cleanupAudioThread();
void loopRecording();
void setAudioInjectorPosition();
bool computeCurrentFrame();
AvatarData* _avatar;
RecordingPointer _recording;
int _currentFrame;
float _frameInterpolationFactor;
int _pausedFrame;
QElapsedTimer _timer;
int _timerOffset;
int _audioOffset;
QThread* _audioThread;
QSharedPointer<AudioInjector> _injector;
AudioInjectorOptions _options;
RecordingContext _currentContext;
bool _playFromCurrentPosition;
bool _loop;
bool _useAttachments;
bool _useDisplayName;
bool _useHeadURL;
bool _useSkeletonURL;
};
#endif
#endif // hifi_Player_h

View file

@ -1,147 +0,0 @@
//
// Recorder.cpp
//
//
// Created by Clement on 8/7/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#if 0
#include <GLMHelpers.h>
#include <NodeList.h>
#include <StreamUtils.h>
#include "AvatarData.h"
#include "AvatarLogging.h"
#include "Recorder.h"
Recorder::Recorder(AvatarData* avatar) :
_recording(new Recording()),
_avatar(avatar)
{
_timer.invalidate();
}
bool Recorder::isRecording() const {
return _timer.isValid();
}
qint64 Recorder::elapsed() const {
if (isRecording()) {
return _timer.elapsed();
} else {
return 0;
}
}
void Recorder::startRecording() {
qCDebug(avatars) << "Recorder::startRecording()";
_recording->clear();
RecordingContext& context = _recording->getContext();
context.globalTimestamp = usecTimestampNow();
context.domain = DependencyManager::get<NodeList>()->getDomainHandler().getHostname();
context.position = _avatar->getPosition();
context.orientation = _avatar->getOrientation();
context.scale = _avatar->getTargetScale();
context.headModel = _avatar->getFaceModelURL().toString();
context.skeletonModel = _avatar->getSkeletonModelURL().toString();
context.displayName = _avatar->getDisplayName();
context.attachments = _avatar->getAttachmentData();
context.orientationInv = glm::inverse(context.orientation);
bool wantDebug = false;
if (wantDebug) {
qCDebug(avatars) << "Recorder::startRecording(): Recording Context";
qCDebug(avatars) << "Global timestamp:" << context.globalTimestamp;
qCDebug(avatars) << "Domain:" << context.domain;
qCDebug(avatars) << "Position:" << context.position;
qCDebug(avatars) << "Orientation:" << context.orientation;
qCDebug(avatars) << "Scale:" << context.scale;
qCDebug(avatars) << "Head URL:" << context.headModel;
qCDebug(avatars) << "Skeleton URL:" << context.skeletonModel;
qCDebug(avatars) << "Display Name:" << context.displayName;
qCDebug(avatars) << "Num Attachments:" << context.attachments.size();
for (int i = 0; i < context.attachments.size(); ++i) {
qCDebug(avatars) << "Model URL:" << context.attachments[i].modelURL;
qCDebug(avatars) << "Joint Name:" << context.attachments[i].jointName;
qCDebug(avatars) << "Translation:" << context.attachments[i].translation;
qCDebug(avatars) << "Rotation:" << context.attachments[i].rotation;
qCDebug(avatars) << "Scale:" << context.attachments[i].scale;
}
}
_timer.start();
record();
}
void Recorder::stopRecording() {
qCDebug(avatars) << "Recorder::stopRecording()";
_timer.invalidate();
qCDebug(avatars).nospace() << "Recorded " << _recording->getFrameNumber() << " during " << _recording->getLength() << " msec (" << _recording->getFrameNumber() / (_recording->getLength() / 1000.0f) << " fps)";
}
void Recorder::saveToFile(const QString& file) {
if (_recording->isEmpty()) {
qCDebug(avatars) << "Cannot save recording to file, recording is empty.";
}
writeRecordingToFile(_recording, file);
}
void Recorder::record() {
if (isRecording()) {
const RecordingContext& context = _recording->getContext();
RecordingFrame frame;
frame.setBlendshapeCoefficients(_avatar->getHeadData()->getBlendshapeCoefficients());
// Capture the full skeleton joint data
auto& jointData = _avatar->getRawJointData();
frame.setJointArray(jointData);
frame.setTranslation(context.orientationInv * (_avatar->getPosition() - context.position));
frame.setRotation(context.orientationInv * _avatar->getOrientation());
frame.setScale(_avatar->getTargetScale() / context.scale);
const HeadData* head = _avatar->getHeadData();
if (head) {
glm::vec3 rotationDegrees = glm::vec3(head->getFinalPitch(),
head->getFinalYaw(),
head->getFinalRoll());
frame.setHeadRotation(glm::quat(glm::radians(rotationDegrees)));
frame.setLeanForward(head->getLeanForward());
frame.setLeanSideways(head->getLeanSideways());
glm::vec3 relativeLookAt = context.orientationInv *
(head->getLookAtPosition() - context.position);
frame.setLookAtPosition(relativeLookAt);
}
bool wantDebug = false;
if (wantDebug) {
qCDebug(avatars) << "Recording frame #" << _recording->getFrameNumber();
qCDebug(avatars) << "Blendshapes:" << frame.getBlendshapeCoefficients().size();
qCDebug(avatars) << "JointArray:" << frame.getJointArray().size();
qCDebug(avatars) << "Translation:" << frame.getTranslation();
qCDebug(avatars) << "Rotation:" << frame.getRotation();
qCDebug(avatars) << "Scale:" << frame.getScale();
qCDebug(avatars) << "Head rotation:" << frame.getHeadRotation();
qCDebug(avatars) << "Lean Forward:" << frame.getLeanForward();
qCDebug(avatars) << "Lean Sideways:" << frame.getLeanSideways();
qCDebug(avatars) << "LookAtPosition:" << frame.getLookAtPosition();
}
_recording->addFrame(_timer.elapsed(), frame);
}
}
void Recorder::recordAudio(const QByteArray& audioByteArray) {
_recording->addAudioPacket(audioByteArray);
}
#endif

View file

@ -1,57 +0,0 @@
//
// Recorder.h
// libraries/avatars/src
//
// Created by Clement on 8/7/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_Recorder_h
#define hifi_Recorder_h
#include <recording/Forward.h>
#if 0
#include "Recording.h"
template<class C>
class QSharedPointer;
class AttachmentData;
class AvatarData;
class Recorder;
class Recording;
typedef QSharedPointer<Recorder> RecorderPointer;
typedef QWeakPointer<Recorder> WeakRecorderPointer;
/// Records a recording
class Recorder : public QObject {
Q_OBJECT
public:
Recorder(AvatarData* avatar);
bool isRecording() const;
qint64 elapsed() const;
RecordingPointer getRecording() const { return _recording; }
public slots:
void startRecording();
void stopRecording();
void saveToFile(const QString& file);
void record();
void recordAudio(const QByteArray& audioArray);
private:
QElapsedTimer _timer;
RecordingPointer _recording;
AvatarData* _avatar;
};
#endif
#endif // hifi_Recorder_h

View file

@ -1,663 +0,0 @@
//
// Recording.cpp
//
//
// Created by Clement on 9/17/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#if 0
#include <AudioConstants.h>
#include <GLMHelpers.h>
#include <NetworkAccessManager.h>
#include <NodeList.h>
#include <Sound.h>
#include <StreamUtils.h>
#include <QBitArray>
#include <QElapsedTimer>
#include <QEventLoop>
#include <QFile>
#include <QFileInfo>
#include <QPair>
#include "AvatarData.h"
#include "AvatarLogging.h"
#include "Recording.h"
// HFR file format magic number (Inspired by PNG)
// (decimal) 17 72 70 82 13 10 26 10
// (hexadecimal) 11 48 46 52 0d 0a 1a 0a
// (ASCII C notation) \021 H F R \r \n \032 \n
static const int MAGIC_NUMBER_SIZE = 8;
static const char MAGIC_NUMBER[MAGIC_NUMBER_SIZE] = {17, 72, 70, 82, 13, 10, 26, 10};
// Version (Major, Minor)
static const QPair<quint8, quint8> VERSION(0, 2);
int SCALE_RADIX = 10;
int BLENDSHAPE_RADIX = 15;
int LEAN_RADIX = 7;
void RecordingFrame::setBlendshapeCoefficients(QVector<float> blendshapeCoefficients) {
_blendshapeCoefficients = blendshapeCoefficients;
}
int Recording::getLength() const {
if (_timestamps.isEmpty()) {
return 0;
}
return _timestamps.last();
}
qint32 Recording::getFrameTimestamp(int i) const {
if (i >= _timestamps.size()) {
return getLength();
}
if (i < 0) {
return 0;
}
return _timestamps[i];
}
const RecordingFrame& Recording::getFrame(int i) const {
assert(i < _timestamps.size());
return _frames[i];
}
int Recording::numberAudioChannel() const {
// Check for stereo audio
float MSEC_PER_SEC = 1000.0f;
float channelLength = ((float)getLength() / MSEC_PER_SEC) * AudioConstants::SAMPLE_RATE *
sizeof(AudioConstants::AudioSample);
return glm::round((float)getAudioData().size() / channelLength);
}
void Recording::addFrame(int timestamp, RecordingFrame &frame) {
_timestamps << timestamp;
_frames << frame;
}
void Recording::clear() {
_timestamps.clear();
_frames.clear();
_audioData.clear();
}
void writeVec3(QDataStream& stream, const glm::vec3& value) {
unsigned char buffer[sizeof(value)];
memcpy(buffer, &value, sizeof(value));
stream.writeRawData(reinterpret_cast<char*>(buffer), sizeof(value));
}
bool readVec3(QDataStream& stream, glm::vec3& value) {
unsigned char buffer[sizeof(value)];
stream.readRawData(reinterpret_cast<char*>(buffer), sizeof(value));
memcpy(&value, buffer, sizeof(value));
return true;
}
void writeQuat(QDataStream& stream, const glm::quat& value) {
unsigned char buffer[256];
int writtenToBuffer = packOrientationQuatToBytes(buffer, value);
stream.writeRawData(reinterpret_cast<char*>(buffer), writtenToBuffer);
}
bool readQuat(QDataStream& stream, glm::quat& value) {
int quatByteSize = 4 * 2; // 4 floats * 2 bytes
unsigned char buffer[256];
stream.readRawData(reinterpret_cast<char*>(buffer), quatByteSize);
int readFromBuffer = unpackOrientationQuatFromBytes(buffer, value);
if (readFromBuffer != quatByteSize) {
return false;
}
return true;
}
bool readFloat(QDataStream& stream, float& value, int radix) {
int floatByteSize = 2; // 1 floats * 2 bytes
int16_t buffer[256];
stream.readRawData(reinterpret_cast<char*>(buffer), floatByteSize);
int readFromBuffer = unpackFloatScalarFromSignedTwoByteFixed(buffer, &value, radix);
if (readFromBuffer != floatByteSize) {
return false;
}
return true;
}
void writeRecordingToFile(RecordingPointer recording, const QString& filename) {
if (!recording || recording->getFrameNumber() < 1) {
qCDebug(avatars) << "Can't save empty recording";
return;
}
QElapsedTimer timer;
QFile file(filename);
if (!file.open(QIODevice::ReadWrite | QIODevice::Truncate)){
qCDebug(avatars) << "Couldn't open " << filename;
return;
}
timer.start();
qCDebug(avatars) << "Writing recording to " << filename << ".";
QDataStream fileStream(&file);
// HEADER
file.write(MAGIC_NUMBER, MAGIC_NUMBER_SIZE); // Magic number
fileStream << VERSION; // File format version
const qint64 dataOffsetPos = file.pos();
fileStream << (quint16)0; // Save two empty bytes for the data offset
const qint64 dataLengthPos = file.pos();
fileStream << (quint32)0; // Save four empty bytes for the data offset
const quint64 crc16Pos = file.pos();
fileStream << (quint16)0; // Save two empty bytes for the CRC-16
// METADATA
// TODO
// Write data offset
quint16 dataOffset = file.pos();
file.seek(dataOffsetPos);
fileStream << dataOffset;
file.seek(dataOffset);
// CONTEXT
RecordingContext& context = recording->getContext();
// Global Timestamp
fileStream << context.globalTimestamp;
// Domain
fileStream << context.domain;
// Position
writeVec3(fileStream, context.position);
// Orientation
writeQuat(fileStream, context.orientation);
// Scale
fileStream << context.scale;
// Head model
fileStream << context.headModel;
// Skeleton model
fileStream << context.skeletonModel;
// Display name
fileStream << context.displayName;
// Attachements
fileStream << (quint8)context.attachments.size();
foreach (AttachmentData data, context.attachments) {
// Model
fileStream << data.modelURL.toString();
// Joint name
fileStream << data.jointName;
// Position
writeVec3(fileStream, data.translation);
// Orientation
writeQuat(fileStream, data.rotation);
// Scale
fileStream << data.scale;
}
// RECORDING
fileStream << recording->_timestamps;
QBitArray mask;
quint32 numBlendshapes = 0;
quint32 numJoints = 0;
for (int i = 0; i < recording->_timestamps.size(); ++i) {
mask.fill(false);
int maskIndex = 0;
QByteArray buffer;
QDataStream stream(&buffer, QIODevice::WriteOnly);
RecordingFrame& previousFrame = recording->_frames[(i != 0) ? i - 1 : i];
RecordingFrame& frame = recording->_frames[i];
// Blendshape Coefficients
if (i == 0) {
numBlendshapes = frame.getBlendshapeCoefficients().size();
stream << numBlendshapes;
mask.resize(mask.size() + numBlendshapes);
}
for (quint32 j = 0; j < numBlendshapes; ++j) {
if (i == 0 ||
frame._blendshapeCoefficients[j] != previousFrame._blendshapeCoefficients[j]) {
stream << frame.getBlendshapeCoefficients()[j];
mask.setBit(maskIndex);
}
++maskIndex;
}
const auto& jointArray = frame.getJointArray();
if (i == 0) {
numJoints = jointArray.size();
stream << numJoints;
// 2 fields per joints
mask.resize(mask.size() + numJoints * 2);
}
for (quint32 j = 0; j < numJoints; j++) {
const auto& joint = jointArray[j];
if (true) { //(joint.rotationSet) {
writeQuat(stream, joint.rotation);
mask.setBit(maskIndex);
}
maskIndex++;
if (joint.translationSet) {
writeVec3(stream, joint.translation);
mask.setBit(maskIndex);
}
maskIndex++;
}
// Translation
if (i == 0) {
mask.resize(mask.size() + 1);
}
if (i == 0 || frame._translation != previousFrame._translation) {
writeVec3(stream, frame._translation);
mask.setBit(maskIndex);
}
maskIndex++;
// Rotation
if (i == 0) {
mask.resize(mask.size() + 1);
}
if (i == 0 || frame._rotation != previousFrame._rotation) {
writeQuat(stream, frame._rotation);
mask.setBit(maskIndex);
}
maskIndex++;
// Scale
if (i == 0) {
mask.resize(mask.size() + 1);
}
if (i == 0 || frame._scale != previousFrame._scale) {
stream << frame._scale;
mask.setBit(maskIndex);
}
maskIndex++;
// Head Rotation
if (i == 0) {
mask.resize(mask.size() + 1);
}
if (i == 0 || frame._headRotation != previousFrame._headRotation) {
writeQuat(stream, frame._headRotation);
mask.setBit(maskIndex);
}
maskIndex++;
// Lean Sideways
if (i == 0) {
mask.resize(mask.size() + 1);
}
if (i == 0 || frame._leanSideways != previousFrame._leanSideways) {
stream << frame._leanSideways;
mask.setBit(maskIndex);
}
maskIndex++;
// Lean Forward
if (i == 0) {
mask.resize(mask.size() + 1);
}
if (i == 0 || frame._leanForward != previousFrame._leanForward) {
stream << frame._leanForward;
mask.setBit(maskIndex);
}
maskIndex++;
// LookAt Position
if (i == 0) {
mask.resize(mask.size() + 1);
}
if (i == 0 || frame._lookAtPosition != previousFrame._lookAtPosition) {
writeVec3(stream, frame._lookAtPosition);
mask.setBit(maskIndex);
}
maskIndex++;
fileStream << mask;
fileStream << buffer;
}
fileStream << recording->getAudioData();
qint64 writingTime = timer.restart();
// Write data length and CRC-16
quint32 dataLength = file.pos() - dataOffset;
file.seek(dataOffset); // Go to beginning of data for checksum
quint16 crc16 = qChecksum(file.readAll().constData(), dataLength);
file.seek(dataLengthPos);
fileStream << dataLength;
file.seek(crc16Pos);
fileStream << crc16;
file.seek(dataOffset + dataLength);
bool wantDebug = true;
if (wantDebug) {
qCDebug(avatars) << "[DEBUG] WRITE recording";
qCDebug(avatars) << "Header:";
qCDebug(avatars) << "File Format version:" << VERSION;
qCDebug(avatars) << "Data length:" << dataLength;
qCDebug(avatars) << "Data offset:" << dataOffset;
qCDebug(avatars) << "CRC-16:" << crc16;
qCDebug(avatars) << "Context block:";
qCDebug(avatars) << "Global timestamp:" << context.globalTimestamp;
qCDebug(avatars) << "Domain:" << context.domain;
qCDebug(avatars) << "Position:" << context.position;
qCDebug(avatars) << "Orientation:" << context.orientation;
qCDebug(avatars) << "Scale:" << context.scale;
qCDebug(avatars) << "Head Model:" << context.headModel;
qCDebug(avatars) << "Skeleton Model:" << context.skeletonModel;
qCDebug(avatars) << "Display Name:" << context.displayName;
qCDebug(avatars) << "Num Attachments:" << context.attachments.size();
for (int i = 0; i < context.attachments.size(); ++i) {
qCDebug(avatars) << "Model URL:" << context.attachments[i].modelURL;
qCDebug(avatars) << "Joint Name:" << context.attachments[i].jointName;
qCDebug(avatars) << "Translation:" << context.attachments[i].translation;
qCDebug(avatars) << "Rotation:" << context.attachments[i].rotation;
qCDebug(avatars) << "Scale:" << context.attachments[i].scale;
}
qCDebug(avatars) << "Recording:";
qCDebug(avatars) << "Total frames:" << recording->getFrameNumber();
qCDebug(avatars) << "Audio array:" << recording->getAudioData().size();
}
qint64 checksumTime = timer.elapsed();
qCDebug(avatars) << "Wrote" << file.size() << "bytes in" << writingTime + checksumTime << "ms. (" << checksumTime << "ms for checksum)";
}
RecordingPointer readRecordingFromFile(RecordingPointer recording, const QString& filename) {
QByteArray byteArray;
QUrl url(filename);
QElapsedTimer timer;
timer.start(); // timer used for debug informations (download/parsing time)
// Aquire the data and place it in byteArray
// Return if data unavailable
if (url.scheme() == "http" || url.scheme() == "https" || url.scheme() == "ftp") {
// Download file if necessary
qCDebug(avatars) << "Downloading recording at" << url;
QNetworkAccessManager& networkAccessManager = NetworkAccessManager::getInstance();
QNetworkRequest networkRequest = QNetworkRequest(url);
networkRequest.setHeader(QNetworkRequest::UserAgentHeader, HIGH_FIDELITY_USER_AGENT);
QNetworkReply* reply = networkAccessManager.get(networkRequest);
QEventLoop loop;
QObject::connect(reply, SIGNAL(finished()), &loop, SLOT(quit()));
loop.exec(); // wait for file
if (reply->error() != QNetworkReply::NoError) {
qCDebug(avatars) << "Error while downloading recording: " << reply->error();
reply->deleteLater();
return recording;
}
byteArray = reply->readAll();
reply->deleteLater();
// print debug + restart timer
qCDebug(avatars) << "Downloaded " << byteArray.size() << " bytes in " << timer.restart() << " ms.";
} else {
// If local file, just read it.
qCDebug(avatars) << "Reading recording from " << filename << ".";
QFile file(filename);
if (!file.open(QIODevice::ReadOnly)){
qCDebug(avatars) << "Could not open local file: " << url;
return recording;
}
byteArray = file.readAll();
file.close();
}
if (!filename.endsWith(".hfr") && !filename.endsWith(".HFR")) {
qCDebug(avatars) << "File extension not recognized";
}
// Reset the recording passed in the arguments
if (!recording) {
recording = QSharedPointer<Recording>::create();
}
QDataStream fileStream(byteArray);
// HEADER
QByteArray magicNumber(MAGIC_NUMBER, MAGIC_NUMBER_SIZE);
if (!byteArray.startsWith(magicNumber)) {
qCDebug(avatars) << "ERROR: This is not a .HFR file. (Magic Number incorrect)";
return recording;
}
fileStream.skipRawData(MAGIC_NUMBER_SIZE);
QPair<quint8, quint8> version;
fileStream >> version; // File format version
if (version != VERSION && version != QPair<quint8, quint8>(0,1)) {
qCDebug(avatars) << "ERROR: This file format version is not supported.";
return recording;
}
quint16 dataOffset = 0;
fileStream >> dataOffset;
quint32 dataLength = 0;
fileStream >> dataLength;
quint16 crc16 = 0;
fileStream >> crc16;
// Check checksum
quint16 computedCRC16 = qChecksum(byteArray.constData() + dataOffset, dataLength);
if (computedCRC16 != crc16) {
qCDebug(avatars) << "Checksum does not match. Bailling!";
recording.clear();
return recording;
}
// METADATA
// TODO
// CONTEXT
RecordingContext& context = recording->getContext();
// Global Timestamp
fileStream >> context.globalTimestamp;
// Domain
fileStream >> context.domain;
// Position
if (!readVec3(fileStream, context.position)) {
qCDebug(avatars) << "Couldn't read file correctly. (Invalid vec3)";
recording.clear();
return recording;
}
// Orientation
if (!readQuat(fileStream, context.orientation)) {
qCDebug(avatars) << "Couldn't read file correctly. (Invalid quat)";
recording.clear();
return recording;
}
// Scale
if (version == QPair<quint8, quint8>(0,1)) {
readFloat(fileStream, context.scale, SCALE_RADIX);
} else {
fileStream >> context.scale;
}
// Head model
fileStream >> context.headModel;
// Skeleton model
fileStream >> context.skeletonModel;
// Display Name
fileStream >> context.displayName;
// Attachements
quint8 numAttachments = 0;
fileStream >> numAttachments;
for (int i = 0; i < numAttachments; ++i) {
AttachmentData data;
// Model
QString modelURL;
fileStream >> modelURL;
data.modelURL = modelURL;
// Joint name
fileStream >> data.jointName;
// Translation
if (!readVec3(fileStream, data.translation)) {
qCDebug(avatars) << "Couldn't read attachment correctly. (Invalid vec3)";
continue;
}
// Rotation
if (!readQuat(fileStream, data.rotation)) {
qCDebug(avatars) << "Couldn't read attachment correctly. (Invalid quat)";
continue;
}
// Scale
if (version == QPair<quint8, quint8>(0,1)) {
readFloat(fileStream, data.scale, SCALE_RADIX);
} else {
fileStream >> data.scale;
}
context.attachments << data;
}
quint32 numBlendshapes = 0;
quint32 numJoints = 0;
// RECORDING
fileStream >> recording->_timestamps;
for (int i = 0; i < recording->_timestamps.size(); ++i) {
QBitArray mask;
QByteArray buffer;
QDataStream stream(&buffer, QIODevice::ReadOnly);
RecordingFrame frame;
RecordingFrame& previousFrame = (i == 0) ? frame : recording->_frames.last();
fileStream >> mask;
fileStream >> buffer;
int maskIndex = 0;
// Blendshape Coefficients
if (i == 0) {
stream >> numBlendshapes;
}
frame._blendshapeCoefficients.resize(numBlendshapes);
for (quint32 j = 0; j < numBlendshapes; ++j) {
if (!mask[maskIndex++]) {
frame._blendshapeCoefficients[j] = previousFrame._blendshapeCoefficients[j];
} else if (version == QPair<quint8, quint8>(0,1)) {
readFloat(stream, frame._blendshapeCoefficients[j], BLENDSHAPE_RADIX);
} else {
stream >> frame._blendshapeCoefficients[j];
}
}
// Joint Array
if (i == 0) {
stream >> numJoints;
}
frame._jointArray.resize(numJoints);
for (quint32 j = 0; j < numJoints; ++j) {
auto& joint = frame._jointArray[2];
if (mask[maskIndex++] && readQuat(stream, joint.rotation)) {
joint.rotationSet = true;
} else {
joint.rotationSet = false;
}
if (mask[maskIndex++] || readVec3(stream, joint.translation)) {
joint.translationSet = true;
} else {
joint.translationSet = false;
}
}
if (!mask[maskIndex++] || !readVec3(stream, frame._translation)) {
frame._translation = previousFrame._translation;
}
if (!mask[maskIndex++] || !readQuat(stream, frame._rotation)) {
frame._rotation = previousFrame._rotation;
}
if (!mask[maskIndex++]) {
frame._scale = previousFrame._scale;
} else if (version == QPair<quint8, quint8>(0,1)) {
readFloat(stream, frame._scale, SCALE_RADIX);
} else {
stream >> frame._scale;
}
if (!mask[maskIndex++] || !readQuat(stream, frame._headRotation)) {
frame._headRotation = previousFrame._headRotation;
}
if (!mask[maskIndex++]) {
frame._leanSideways = previousFrame._leanSideways;
} else if (version == QPair<quint8, quint8>(0,1)) {
readFloat(stream, frame._leanSideways, LEAN_RADIX);
} else {
stream >> frame._leanSideways;
}
if (!mask[maskIndex++]) {
frame._leanForward = previousFrame._leanForward;
} else if (version == QPair<quint8, quint8>(0,1)) {
readFloat(stream, frame._leanForward, LEAN_RADIX);
} else {
stream >> frame._leanForward;
}
if (!mask[maskIndex++] || !readVec3(stream, frame._lookAtPosition)) {
frame._lookAtPosition = previousFrame._lookAtPosition;
}
recording->_frames << frame;
}
QByteArray audioArray;
fileStream >> audioArray;
recording->addAudioPacket(audioArray);
bool wantDebug = true;
if (wantDebug) {
qCDebug(avatars) << "[DEBUG] READ recording";
qCDebug(avatars) << "Header:";
qCDebug(avatars) << "File Format version:" << VERSION;
qCDebug(avatars) << "Data length:" << dataLength;
qCDebug(avatars) << "Data offset:" << dataOffset;
qCDebug(avatars) << "CRC-16:" << crc16;
qCDebug(avatars) << "Context block:";
qCDebug(avatars) << "Global timestamp:" << context.globalTimestamp;
qCDebug(avatars) << "Domain:" << context.domain;
qCDebug(avatars) << "Position:" << context.position;
qCDebug(avatars) << "Orientation:" << context.orientation;
qCDebug(avatars) << "Scale:" << context.scale;
qCDebug(avatars) << "Head Model:" << context.headModel;
qCDebug(avatars) << "Skeleton Model:" << context.skeletonModel;
qCDebug(avatars) << "Display Name:" << context.displayName;
qCDebug(avatars) << "Num Attachments:" << numAttachments;
for (int i = 0; i < numAttachments; ++i) {
qCDebug(avatars) << "Model URL:" << context.attachments[i].modelURL;
qCDebug(avatars) << "Joint Name:" << context.attachments[i].jointName;
qCDebug(avatars) << "Translation:" << context.attachments[i].translation;
qCDebug(avatars) << "Rotation:" << context.attachments[i].rotation;
qCDebug(avatars) << "Scale:" << context.attachments[i].scale;
}
qCDebug(avatars) << "Recording:";
qCDebug(avatars) << "Total frames:" << recording->getFrameNumber();
qCDebug(avatars) << "Audio array:" << recording->getAudioData().size();
}
qCDebug(avatars) << "Read " << byteArray.size() << " bytes in " << timer.elapsed() << " ms.";
return recording;
}
#endif

View file

@ -1,131 +0,0 @@
//
// Recording.h
//
//
// Created by Clement on 9/17/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_Recording_h
#define hifi_Recording_h
#if 0
#include <QString>
#include <QVector>
#include <glm/glm.hpp>
#include <glm/gtx/quaternion.hpp>
template<class C>
class QSharedPointer;
class AttachmentData;
class Recording;
class RecordingFrame;
class Sound;
class JointData;
typedef QSharedPointer<Recording> RecordingPointer;
/// Stores recordings static data
class RecordingContext {
public:
quint64 globalTimestamp;
QString domain;
glm::vec3 position;
glm::quat orientation;
float scale;
QString headModel;
QString skeletonModel;
QString displayName;
QVector<AttachmentData> attachments;
// This avoids recomputation every frame while recording.
glm::quat orientationInv;
};
/// Stores a recording
class Recording {
public:
bool isEmpty() const { return _timestamps.isEmpty(); }
int getLength() const; // in ms
RecordingContext& getContext() { return _context; }
int getFrameNumber() const { return _frames.size(); }
qint32 getFrameTimestamp(int i) const;
const RecordingFrame& getFrame(int i) const;
const QByteArray& getAudioData() const { return _audioData; }
int numberAudioChannel() const;
protected:
void addFrame(int timestamp, RecordingFrame& frame);
void addAudioPacket(const QByteArray& byteArray) { _audioData.append(byteArray); }
void clear();
private:
RecordingContext _context;
QVector<qint32> _timestamps;
QVector<RecordingFrame> _frames;
QByteArray _audioData;
friend class Recorder;
friend class Player;
friend void writeRecordingToFile(RecordingPointer recording, const QString& file);
friend RecordingPointer readRecordingFromFile(RecordingPointer recording, const QString& file);
friend RecordingPointer readRecordingFromRecFile(RecordingPointer recording, const QString& filename,
const QByteArray& byteArray);
};
/// Stores the different values associated to one recording frame
class RecordingFrame {
public:
QVector<float> getBlendshapeCoefficients() const { return _blendshapeCoefficients; }
QVector<JointData> getJointArray() const { return _jointArray; }
glm::vec3 getTranslation() const { return _translation; }
glm::quat getRotation() const { return _rotation; }
float getScale() const { return _scale; }
glm::quat getHeadRotation() const { return _headRotation; }
float getLeanSideways() const { return _leanSideways; }
float getLeanForward() const { return _leanForward; }
glm::vec3 getLookAtPosition() const { return _lookAtPosition; }
protected:
void setBlendshapeCoefficients(QVector<float> blendshapeCoefficients);
void setJointArray(const QVector<JointData>& jointArray) { _jointArray = jointArray; }
void setTranslation(const glm::vec3& translation) { _translation = translation; }
void setRotation(const glm::quat& rotation) { _rotation = rotation; }
void setScale(float scale) { _scale = scale; }
void setHeadRotation(glm::quat headRotation) { _headRotation = headRotation; }
void setLeanSideways(float leanSideways) { _leanSideways = leanSideways; }
void setLeanForward(float leanForward) { _leanForward = leanForward; }
void setLookAtPosition(const glm::vec3& lookAtPosition) { _lookAtPosition = lookAtPosition; }
private:
QVector<float> _blendshapeCoefficients;
QVector<JointData> _jointArray;
glm::vec3 _translation;
glm::quat _rotation;
float _scale;
glm::quat _headRotation;
float _leanSideways;
float _leanForward;
glm::vec3 _lookAtPosition;
friend class Recorder;
friend void writeRecordingToFile(RecordingPointer recording, const QString& file);
friend RecordingPointer readRecordingFromFile(RecordingPointer recording, const QString& file);
friend RecordingPointer readRecordingFromRecFile(RecordingPointer recording, const QString& filename,
const QByteArray& byteArray);
};
void writeRecordingToFile(RecordingPointer recording, const QString& filename);
RecordingPointer readRecordingFromFile(RecordingPointer recording, const QString& filename);
RecordingPointer readRecordingFromRecFile(RecordingPointer recording, const QString& filename, const QByteArray& byteArray);
#endif
#endif // hifi_Recording_h

View file

@ -56,10 +56,12 @@ bool RecordingScriptingInterface::loadRecording(const QString& url) {
using namespace recording;
auto loader = ClipCache::instance().getClipLoader(url);
QEventLoop loop;
QObject::connect(loader.data(), &Resource::loaded, &loop, &QEventLoop::quit);
QObject::connect(loader.data(), &Resource::failed, &loop, &QEventLoop::quit);
loop.exec();
if (!loader->isLoaded()) {
QEventLoop loop;
QObject::connect(loader.data(), &Resource::loaded, &loop, &QEventLoop::quit);
QObject::connect(loader.data(), &Resource::failed, &loop, &QEventLoop::quit);
loop.exec();
}
if (!loader->isLoaded()) {
qWarning() << "Clip failed to load from " << url;

View file

@ -128,17 +128,25 @@ QJsonObject Transform::toJson(const Transform& transform) {
}
QJsonObject result;
auto json = toJsonValue(transform.getTranslation());
if (!json.isNull()) {
result[JSON_TRANSLATION] = json;
if (transform.getTranslation() != vec3()) {
auto json = toJsonValue(transform.getTranslation());
if (!json.isNull()) {
result[JSON_TRANSLATION] = json;
}
}
json = toJsonValue(transform.getRotation());
if (!json.isNull()) {
result[JSON_ROTATION] = json;
if (transform.getRotation() != quat()) {
auto json = toJsonValue(transform.getRotation());
if (!json.isNull()) {
result[JSON_ROTATION] = json;
}
}
json = toJsonValue(transform.getScale());
if (!json.isNull()) {
result[JSON_SCALE] = json;
if (transform.getScale() != vec3(1.0f)) {
auto json = toJsonValue(transform.getScale());
if (!json.isNull()) {
result[JSON_SCALE] = json;
}
}
return result;
}