Avatar recording work in progress

This commit is contained in:
Brad Davis 2015-11-10 10:19:53 -08:00
parent 315b5e9da2
commit f521be10fe
32 changed files with 777 additions and 156 deletions

View file

@ -5,7 +5,7 @@ setup_hifi_project(Core Gui Network Script Quick Widgets WebSockets)
# link in the shared libraries
link_hifi_libraries(
audio avatars octree environment gpu model fbx entities
networking animation shared script-engine embedded-webserver
networking animation recording shared script-engine embedded-webserver
controllers physics
)

View file

@ -35,7 +35,10 @@
#include <TextRenderer3D.h>
#include <UserActivityLogger.h>
#include <AnimDebugDraw.h>
#include <recording/Deck.h>
#include <recording/Recorder.h>
#include <recording/Clip.h>
#include <recording/Frame.h>
#include "devices/Faceshift.h"
@ -77,6 +80,10 @@ const QString& DEFAULT_AVATAR_COLLISION_SOUND_URL = "https://hifi-public.s3.amaz
const float MyAvatar::ZOOM_MIN = 0.5f;
const float MyAvatar::ZOOM_MAX = 25.0f;
const float MyAvatar::ZOOM_DEFAULT = 1.5f;
static const QString HEADER_NAME = "com.highfidelity.recording.AvatarData";
static recording::FrameType AVATAR_FRAME_TYPE = recording::Frame::TYPE_INVALID;
static std::once_flag frameTypeRegistration;
MyAvatar::MyAvatar(RigPointer rig) :
Avatar(rig),
@ -112,6 +119,19 @@ MyAvatar::MyAvatar(RigPointer rig) :
_audioListenerMode(FROM_HEAD),
_hmdAtRestDetector(glm::vec3(0), glm::quat())
{
using namespace recording;
std::call_once(frameTypeRegistration, [] {
AVATAR_FRAME_TYPE = recording::Frame::registerFrameType(HEADER_NAME);
});
// FIXME how to deal with driving multiple avatars locally?
Frame::registerFrameHandler(AVATAR_FRAME_TYPE, [this](Frame::Pointer frame) {
qDebug() << "Playback of avatar frame length: " << frame->data.size();
avatarStateFromFrame(frame->data, this);
});
for (int i = 0; i < MAX_DRIVE_KEYS; i++) {
_driveKeys[i] = 0.0f;
}
@ -235,14 +255,12 @@ void MyAvatar::update(float deltaTime) {
simulate(deltaTime);
}
extern QByteArray avatarStateToFrame(const AvatarData* _avatar);
extern void avatarStateFromFrame(const QByteArray& frameData, AvatarData* _avatar);
void MyAvatar::simulate(float deltaTime) {
PerformanceTimer perfTimer("simulate");
// Play back recording
if (_player && _player->isPlaying()) {
_player->play();
}
if (_scale != _targetScale) {
float scale = (1.0f - SMOOTHING_RATIO) * _scale + SMOOTHING_RATIO * _targetScale;
setScale(scale);
@ -310,7 +328,7 @@ void MyAvatar::simulate(float deltaTime) {
// Record avatars movements.
if (_recorder && _recorder->isRecording()) {
_recorder->record();
_recorder->recordFrame(AVATAR_FRAME_TYPE, avatarStateToFrame(this));
}
// consider updating our billboard
@ -580,33 +598,35 @@ bool MyAvatar::isRecording() {
return _recorder && _recorder->isRecording();
}
qint64 MyAvatar::recorderElapsed() {
float MyAvatar::recorderElapsed() {
if (QThread::currentThread() != thread()) {
float result;
QMetaObject::invokeMethod(this, "recorderElapsed", Qt::BlockingQueuedConnection,
Q_RETURN_ARG(float, result));
return result;
}
if (!_recorder) {
return 0;
}
if (QThread::currentThread() != thread()) {
qint64 result;
QMetaObject::invokeMethod(this, "recorderElapsed", Qt::BlockingQueuedConnection,
Q_RETURN_ARG(qint64, result));
return result;
}
return _recorder->elapsed();
return (float)_recorder->position() / MSECS_PER_SECOND;
}
QMetaObject::Connection _audioClientRecorderConnection;
void MyAvatar::startRecording() {
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "startRecording", Qt::BlockingQueuedConnection);
return;
}
if (!_recorder) {
_recorder = QSharedPointer<Recorder>::create(this);
}
_recorder = std::make_shared<recording::Recorder>();
// connect to AudioClient's signal so we get input audio
auto audioClient = DependencyManager::get<AudioClient>();
connect(audioClient.data(), &AudioClient::inputReceived, _recorder.data(),
&Recorder::recordAudio, Qt::QueuedConnection);
_recorder->startRecording();
_audioClientRecorderConnection = connect(audioClient.data(), &AudioClient::inputReceived, [] {
// FIXME, missing audio data handling
});
setRecordingBasis();
_recorder->start();
}
void MyAvatar::stopRecording() {
@ -618,15 +638,14 @@ void MyAvatar::stopRecording() {
return;
}
if (_recorder) {
// stop grabbing audio from the AudioClient
auto audioClient = DependencyManager::get<AudioClient>();
disconnect(audioClient.data(), 0, _recorder.data(), 0);
_recorder->stopRecording();
QObject::disconnect(_audioClientRecorderConnection);
_audioClientRecorderConnection = QMetaObject::Connection();
_recorder->stop();
clearRecordingBasis();
}
}
void MyAvatar::saveRecording(QString filename) {
void MyAvatar::saveRecording(const QString& filename) {
if (!_recorder) {
qCDebug(interfaceapp) << "There is no recording to save";
return;
@ -636,8 +655,10 @@ void MyAvatar::saveRecording(QString filename) {
Q_ARG(QString, filename));
return;
}
if (_recorder) {
_recorder->saveToFile(filename);
auto clip = _recorder->getClip();
recording::Clip::toFile(filename, clip);
}
}
@ -646,15 +667,18 @@ void MyAvatar::loadLastRecording() {
QMetaObject::invokeMethod(this, "loadLastRecording", Qt::BlockingQueuedConnection);
return;
}
if (!_recorder) {
if (!_recorder || !_recorder->getClip()) {
qCDebug(interfaceapp) << "There is no recording to load";
return;
}
if (!_player) {
_player = QSharedPointer<Player>::create(this);
_player = std::make_shared<recording::Deck>();
}
_player->loadRecording(_recorder->getRecording());
_player->queueClip(_recorder->getClip());
_player->play();
}
void MyAvatar::startAnimation(const QString& url, float fps, float priority,

View file

@ -257,10 +257,10 @@ public slots:
bool setJointReferential(const QUuid& id, int jointIndex);
bool isRecording();
qint64 recorderElapsed();
float recorderElapsed();
void startRecording();
void stopRecording();
void saveRecording(QString filename);
void saveRecording(const QString& filename);
void loadLastRecording();
virtual void rebuildSkeletonBody() override;
@ -311,8 +311,8 @@ private:
const glm::vec3& translation = glm::vec3(), const glm::quat& rotation = glm::quat(), float scale = 1.0f,
bool allowDuplicates = false, bool useSaved = true) override;
const RecorderPointer getRecorder() const { return _recorder; }
const PlayerPointer getPlayer() const { return _player; }
const recording::RecorderPointer getRecorder() const { return _recorder; }
const recording::DeckPointer getPlayer() const { return _player; }
//void beginFollowingHMD();
//bool shouldFollowHMD() const;
@ -360,7 +360,7 @@ private:
eyeContactTarget _eyeContactTarget;
RecorderPointer _recorder;
recording::RecorderPointer _recorder;
glm::vec3 _trackedHeadPosition;

View file

@ -1,3 +1,3 @@
set(TARGET_NAME avatars)
setup_hifi_library(Network Script)
link_hifi_libraries(audio shared networking)
link_hifi_libraries(audio shared networking recording)

View file

@ -16,6 +16,9 @@
#include <QtCore/QDataStream>
#include <QtCore/QThread>
#include <QtCore/QUuid>
#include <QtCore/QJsonDocument>
#include <QtCore/QJsonArray>
#include <QtCore/QJsonObject>
#include <QtNetwork/QNetworkReply>
#include <QtNetwork/QNetworkRequest>
@ -25,6 +28,10 @@
#include <GLMHelpers.h>
#include <StreamUtils.h>
#include <UUID.h>
#include <shared/JSONHelpers.h>
#include <shared/UniformTransform.h>
#include <recording/Deck.h>
#include <recording/Clip.h>
#include "AvatarLogging.h"
#include "AvatarData.h"
@ -62,7 +69,6 @@ AvatarData::AvatarData() :
_targetVelocity(0.0f),
_localAABox(DEFAULT_LOCAL_AABOX_CORNER, DEFAULT_LOCAL_AABOX_SCALE)
{
}
AvatarData::~AvatarData() {
@ -791,7 +797,7 @@ bool AvatarData::isPaused() {
return _player && _player->isPaused();
}
qint64 AvatarData::playerElapsed() {
float AvatarData::playerElapsed() {
if (!_player) {
return 0;
}
@ -801,10 +807,10 @@ qint64 AvatarData::playerElapsed() {
Q_RETURN_ARG(qint64, result));
return result;
}
return _player->elapsed();
return (float)_player->position() / MSECS_PER_SECOND;
}
qint64 AvatarData::playerLength() {
float AvatarData::playerLength() {
if (!_player) {
return 0;
}
@ -814,28 +820,24 @@ qint64 AvatarData::playerLength() {
Q_RETURN_ARG(qint64, result));
return result;
}
return _player->getRecording()->getLength();
return _player->length() / MSECS_PER_SECOND;
}
int AvatarData::playerCurrentFrame() {
return (_player) ? _player->getCurrentFrame() : 0;
}
int AvatarData::playerFrameNumber() {
return (_player && _player->getRecording()) ? _player->getRecording()->getFrameNumber() : 0;
}
void AvatarData::loadRecording(QString filename) {
void AvatarData::loadRecording(const QString& filename) {
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "loadRecording", Qt::BlockingQueuedConnection,
Q_ARG(QString, filename));
return;
}
if (!_player) {
_player = QSharedPointer<Player>::create(this);
using namespace recording;
ClipPointer clip = Clip::fromFile(filename);
if (!clip) {
qWarning() << "Unable to load clip data from " << filename;
}
_player->loadFromFile(filename);
_player = std::make_shared<Deck>();
_player->queueClip(clip);
}
void AvatarData::startPlaying() {
@ -843,70 +845,56 @@ void AvatarData::startPlaying() {
QMetaObject::invokeMethod(this, "startPlaying", Qt::BlockingQueuedConnection);
return;
}
if (!_player) {
_player = QSharedPointer<Player>::create(this);
qWarning() << "No clip loaded for playback";
return;
}
_player->startPlaying();
setRecordingBasis();
_player->play();
}
void AvatarData::setPlayerVolume(float volume) {
if (_player) {
_player->setVolume(volume);
}
// FIXME
}
void AvatarData::setPlayerAudioOffset(int audioOffset) {
if (_player) {
_player->setAudioOffset(audioOffset);
}
void AvatarData::setPlayerAudioOffset(float audioOffset) {
// FIXME
}
void AvatarData::setPlayerFrame(unsigned int frame) {
if (_player) {
_player->setCurrentFrame(frame);
}
}
void AvatarData::setPlayerTime(float time) {
if (!_player) {
qWarning() << "No player active";
return;
}
void AvatarData::setPlayerTime(unsigned int time) {
if (_player) {
_player->setCurrentTime(time);
}
_player->seek(time * MSECS_PER_SECOND);
}
void AvatarData::setPlayFromCurrentLocation(bool playFromCurrentLocation) {
if (_player) {
_player->setPlayFromCurrentLocation(playFromCurrentLocation);
}
// FIXME
}
void AvatarData::setPlayerLoop(bool loop) {
if (_player) {
_player->setLoop(loop);
_player->loop(loop);
}
}
void AvatarData::setPlayerUseDisplayName(bool useDisplayName) {
if(_player) {
_player->useDisplayName(useDisplayName);
}
// FIXME
}
void AvatarData::setPlayerUseAttachments(bool useAttachments) {
if(_player) {
_player->useAttachements(useAttachments);
}
// FIXME
}
void AvatarData::setPlayerUseHeadModel(bool useHeadModel) {
if(_player) {
_player->useHeadModel(useHeadModel);
}
// FIXME
}
void AvatarData::setPlayerUseSkeletonModel(bool useSkeletonModel) {
if(_player) {
_player->useSkeletonModel(useSkeletonModel);
}
// FIXME
}
void AvatarData::play() {
@ -920,6 +908,10 @@ void AvatarData::play() {
}
}
std::shared_ptr<UniformTransform> AvatarData::getRecordingBasis() const {
return _recordingBasis;
}
void AvatarData::pausePlayer() {
if (!_player) {
return;
@ -929,7 +921,7 @@ void AvatarData::pausePlayer() {
return;
}
if (_player) {
_player->pausePlayer();
_player->pause();
}
}
@ -942,7 +934,7 @@ void AvatarData::stopPlaying() {
return;
}
if (_player) {
_player->stopPlaying();
_player->stop();
}
}
@ -1514,3 +1506,177 @@ void registerAvatarTypes(QScriptEngine* engine) {
new AttachmentDataObject(), QScriptEngine::ScriptOwnership));
}
void AvatarData::setRecordingBasis(std::shared_ptr<UniformTransform> recordingBasis) {
if (!recordingBasis) {
recordingBasis = std::make_shared<UniformTransform>();
recordingBasis->rotation = getOrientation();
recordingBasis->translation = getPosition();
recordingBasis->scale = getTargetScale();
}
_recordingBasis = recordingBasis;
}
void AvatarData::clearRecordingBasis() {
_recordingBasis.reset();
}
static const QString JSON_AVATAR_BASIS = QStringLiteral("basisTransform");
static const QString JSON_AVATAR_RELATIVE = QStringLiteral("relativeTransform");
static const QString JSON_AVATAR_JOINT_ROTATIONS = QStringLiteral("jointRotations");
static const QString JSON_AVATAR_HEAD = QStringLiteral("head");
static const QString JSON_AVATAR_HEAD_ROTATION = QStringLiteral("rotation");
static const QString JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS = QStringLiteral("blendShapes");
static const QString JSON_AVATAR_HEAD_LEAN_FORWARD = QStringLiteral("leanForward");
static const QString JSON_AVATAR_HEAD_LEAN_SIDEWAYS = QStringLiteral("leanSideways");
static const QString JSON_AVATAR_HEAD_LOOKAT = QStringLiteral("lookAt");
static const QString JSON_AVATAR_HEAD_MODEL = QStringLiteral("headModel");
static const QString JSON_AVATAR_BODY_MODEL = QStringLiteral("bodyModel");
static const QString JSON_AVATAR_DISPLAY_NAME = QStringLiteral("displayName");
static const QString JSON_AVATAR_ATTACHEMENTS = QStringLiteral("attachments");
// Every frame will store both a basis for the recording and a relative transform
// This allows the application to decide whether playback should be relative to an avatar's
// transform at the start of playback, or relative to the transform of the recorded
// avatar
QByteArray avatarStateToFrame(const AvatarData* _avatar) {
QJsonObject root;
if (!_avatar->getFaceModelURL().isEmpty()) {
root[JSON_AVATAR_HEAD_MODEL] = _avatar->getFaceModelURL().toString();
}
if (!_avatar->getSkeletonModelURL().isEmpty()) {
root[JSON_AVATAR_BODY_MODEL] = _avatar->getSkeletonModelURL().toString();
}
if (!_avatar->getDisplayName().isEmpty()) {
root[JSON_AVATAR_DISPLAY_NAME] = _avatar->getDisplayName();
}
if (!_avatar->getAttachmentData().isEmpty()) {
// FIXME serialize attachment data
}
auto recordingBasis = _avatar->getRecordingBasis();
if (recordingBasis) {
// FIXME if the resulting relative basis is identity, we shouldn't record anything
// Record the transformation basis
root[JSON_AVATAR_BASIS] = recordingBasis->toJson();
// Record the relative transform
auto relativeTransform = recordingBasis->relativeTransform(
UniformTransform(_avatar->getPosition(), _avatar->getOrientation(), _avatar->getTargetScale()));
root[JSON_AVATAR_RELATIVE] = relativeTransform.toJson();
}
QJsonArray jointRotations;
for (const auto& jointRotation : _avatar->getJointRotations()) {
jointRotations.push_back(toJsonValue(jointRotation));
}
root[JSON_AVATAR_JOINT_ROTATIONS] = jointRotations;
const HeadData* head = _avatar->getHeadData();
if (head) {
QJsonObject headJson;
QJsonArray blendshapeCoefficients;
for (const auto& blendshapeCoefficient : head->getBlendshapeCoefficients()) {
blendshapeCoefficients.push_back(blendshapeCoefficient);
}
headJson[JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS] = blendshapeCoefficients;
headJson[JSON_AVATAR_HEAD_ROTATION] = toJsonValue(head->getRawOrientation());
headJson[JSON_AVATAR_HEAD_LEAN_FORWARD] = QJsonValue(head->getLeanForward());
headJson[JSON_AVATAR_HEAD_LEAN_SIDEWAYS] = QJsonValue(head->getLeanSideways());
vec3 relativeLookAt = glm::inverse(_avatar->getOrientation()) *
(head->getLookAtPosition() - _avatar->getPosition());
headJson[JSON_AVATAR_HEAD_LOOKAT] = toJsonValue(relativeLookAt);
root[JSON_AVATAR_HEAD] = headJson;
}
return QJsonDocument(root).toBinaryData();
}
void avatarStateFromFrame(const QByteArray& frameData, AvatarData* _avatar) {
QJsonDocument doc = QJsonDocument::fromBinaryData(frameData);
QJsonObject root = doc.object();
if (root.contains(JSON_AVATAR_HEAD_MODEL)) {
auto faceModelURL = root[JSON_AVATAR_HEAD_MODEL].toString();
if (faceModelURL != _avatar->getFaceModelURL().toString()) {
_avatar->setFaceModelURL(faceModelURL);
}
}
if (root.contains(JSON_AVATAR_BODY_MODEL)) {
auto bodyModelURL = root[JSON_AVATAR_BODY_MODEL].toString();
if (bodyModelURL != _avatar->getSkeletonModelURL().toString()) {
_avatar->setSkeletonModelURL(bodyModelURL);
}
}
if (root.contains(JSON_AVATAR_DISPLAY_NAME)) {
auto newDisplayName = root[JSON_AVATAR_DISPLAY_NAME].toString();
if (newDisplayName != _avatar->getDisplayName()) {
_avatar->setDisplayName(newDisplayName);
}
}
// During playback you can either have the recording basis set to the avatar current state
// meaning that all playback is relative to this avatars starting position, or
// the basis can be loaded from the recording, meaning the playback is relative to the
// original avatar location
// The first is more useful for playing back recordings on your own avatar, while
// the latter is more useful for playing back other avatars within your scene.
auto currentBasis = _avatar->getRecordingBasis();
if (!currentBasis) {
currentBasis = UniformTransform::parseJson(root[JSON_AVATAR_BASIS]);
}
auto relativeTransform = UniformTransform::parseJson(root[JSON_AVATAR_RELATIVE]);
auto worldTransform = currentBasis->worldTransform(*relativeTransform);
_avatar->setPosition(worldTransform.translation);
_avatar->setOrientation(worldTransform.rotation);
_avatar->setTargetScale(worldTransform.scale);
#if 0
if (root.contains(JSON_AVATAR_ATTACHEMENTS)) {
// FIXME de-serialize attachment data
}
// Joint rotations are relative to the avatar, so they require no basis correction
if (root.contains(JSON_AVATAR_JOINT_ROTATIONS)) {
QVector<quat> jointRotations;
QJsonArray jointRotationsJson = root[JSON_AVATAR_JOINT_ROTATIONS].toArray();
jointRotations.reserve(jointRotationsJson.size());
for (const auto& jointRotationJson : jointRotationsJson) {
jointRotations.push_back(quatFromJsonValue(jointRotationJson));
}
}
// Most head data is relative to the avatar, and needs no basis correction,
// but the lookat vector does need correction
HeadData* head = _avatar->_headData;
if (head && root.contains(JSON_AVATAR_HEAD)) {
QJsonObject headJson = root[JSON_AVATAR_HEAD].toObject();
if (headJson.contains(JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS)) {
QVector<float> blendshapeCoefficients;
QJsonArray blendshapeCoefficientsJson = headJson[JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS].toArray();
for (const auto& blendshapeCoefficient : blendshapeCoefficientsJson) {
blendshapeCoefficients.push_back((float)blendshapeCoefficient.toDouble());
}
head->setBlendshapeCoefficients(blendshapeCoefficients);
}
if (headJson.contains(JSON_AVATAR_HEAD_ROTATION)) {
head->setOrientation(quatFromJsonValue(headJson[JSON_AVATAR_HEAD_ROTATION]));
}
if (headJson.contains(JSON_AVATAR_HEAD_LEAN_FORWARD)) {
head->setLeanForward((float)headJson[JSON_AVATAR_HEAD_LEAN_FORWARD].toDouble());
}
if (headJson.contains(JSON_AVATAR_HEAD_LEAN_SIDEWAYS)) {
head->setLeanSideways((float)headJson[JSON_AVATAR_HEAD_LEAN_SIDEWAYS].toDouble());
}
if (headJson.contains(JSON_AVATAR_HEAD_LOOKAT)) {
auto relativeLookAt = vec3FromJsonValue(headJson[JSON_AVATAR_HEAD_LOOKAT]);
if (glm::length2(relativeLookAt) > 0.01) {
head->setLookAtPosition((_avatar->getOrientation() * relativeLookAt) + _avatar->getPosition());
}
}
}
#endif
}

View file

@ -134,6 +134,7 @@ class QDataStream;
class AttachmentData;
class JointData;
struct UniformTransform;
class AvatarData : public QObject {
Q_OBJECT
@ -332,6 +333,11 @@ public:
bool shouldDie() const { return _owningAvatarMixer.isNull() || getUsecsSinceLastUpdate() > AVATAR_SILENCE_THRESHOLD_USECS; }
void clearRecordingBasis();
std::shared_ptr<UniformTransform> getRecordingBasis() const;
void setRecordingBasis(std::shared_ptr<UniformTransform> recordingBasis = std::shared_ptr<UniformTransform>());
public slots:
void sendAvatarDataPacket();
void sendIdentityPacket();
@ -344,17 +350,13 @@ public slots:
bool isPlaying();
bool isPaused();
qint64 playerElapsed();
qint64 playerLength();
int playerCurrentFrame();
int playerFrameNumber();
void loadRecording(QString filename);
float playerElapsed();
float playerLength();
void loadRecording(const QString& filename);
void startPlaying();
void setPlayerVolume(float volume);
void setPlayerAudioOffset(int audioOffset);
void setPlayerFrame(unsigned int frame);
void setPlayerTime(unsigned int time);
void setPlayerAudioOffset(float audioOffset);
void setPlayerTime(float time);
void setPlayFromCurrentLocation(bool playFromCurrentLocation);
void setPlayerLoop(bool loop);
void setPlayerUseDisplayName(bool useDisplayName);
@ -364,7 +366,7 @@ public slots:
void play();
void pausePlayer();
void stopPlaying();
protected:
QUuid _sessionUUID;
glm::vec3 _position = START_LOCATION;
@ -418,7 +420,7 @@ protected:
QWeakPointer<Node> _owningAvatarMixer;
PlayerPointer _player;
recording::DeckPointer _player;
/// Loads the joint indices, names from the FST file (if any)
virtual void updateJointMappings();
@ -432,8 +434,13 @@ protected:
SimpleMovingAverage _averageBytesReceived;
QMutex avatarLock; // Name is redundant, but it aids searches.
// During recording, this holds the starting position, orientation & scale of the recorded avatar
// During playback, it holds the
std::shared_ptr<UniformTransform> _recordingBasis;
private:
friend void avatarStateFromFrame(const QByteArray& frameData, AvatarData* _avatar);
static QUrl _defaultFullAvatarModelUrl;
// privatize the copy constructor and assignment operator so they cannot be called
AvatarData(const AvatarData&);

View file

@ -42,8 +42,20 @@ HeadData::HeadData(AvatarData* owningAvatar) :
}
glm::quat HeadData::getRawOrientation() const {
return glm::quat(glm::radians(glm::vec3(_basePitch, _baseYaw, _baseRoll)));
}
void HeadData::setRawOrientation(const glm::quat& q) {
auto euler = glm::eulerAngles(q);
_basePitch = euler.x;
_baseYaw = euler.y;
_baseRoll = euler.z;
}
glm::quat HeadData::getOrientation() const {
return _owningAvatar->getOrientation() * glm::quat(glm::radians(glm::vec3(_basePitch, _baseYaw, _baseRoll)));
return _owningAvatar->getOrientation() * getRawOrientation();
}
void HeadData::setOrientation(const glm::quat& orientation) {

View file

@ -48,6 +48,8 @@ public:
virtual float getFinalYaw() const { return _baseYaw; }
virtual float getFinalPitch() const { return _basePitch; }
virtual float getFinalRoll() const { return _baseRoll; }
virtual glm::quat getRawOrientation() const;
virtual void setRawOrientation(const glm::quat& orientation);
glm::quat getOrientation() const;
void setOrientation(const glm::quat& orientation);

View file

@ -9,6 +9,8 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#if 0
#include <AudioConstants.h>
#include <GLMHelpers.h>
#include <NodeList.h>
@ -438,3 +440,4 @@ bool Player::computeCurrentFrame() {
return true;
}
#endif

View file

@ -12,6 +12,9 @@
#ifndef hifi_Player_h
#define hifi_Player_h
#include <recording/Forward.h>
#if 0
#include <AudioInjector.h>
#include <QElapsedTimer>
@ -86,5 +89,6 @@ private:
bool _useHeadURL;
bool _useSkeletonURL;
};
#endif
#endif // hifi_Player_h

View file

@ -10,6 +10,7 @@
//
#if 0
#include <GLMHelpers.h>
#include <NodeList.h>
#include <StreamUtils.h>
@ -143,3 +144,4 @@ void Recorder::record() {
void Recorder::recordAudio(const QByteArray& audioByteArray) {
_recording->addAudioPacket(audioByteArray);
}
#endif

View file

@ -12,6 +12,9 @@
#ifndef hifi_Recorder_h
#define hifi_Recorder_h
#include <recording/Forward.h>
#if 0
#include "Recording.h"
template<class C>
@ -49,6 +52,6 @@ private:
AvatarData* _avatar;
};
#endif
#endif // hifi_Recorder_h

View file

@ -9,6 +9,7 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#if 0
#include <AudioConstants.h>
#include <GLMHelpers.h>
#include <NetworkAccessManager.h>
@ -659,3 +660,4 @@ RecordingPointer readRecordingFromFile(RecordingPointer recording, const QString
return recording;
}
#endif

View file

@ -12,6 +12,8 @@
#ifndef hifi_Recording_h
#define hifi_Recording_h
#if 0
#include <QString>
#include <QVector>
@ -124,5 +126,6 @@ private:
void writeRecordingToFile(RecordingPointer recording, const QString& filename);
RecordingPointer readRecordingFromFile(RecordingPointer recording, const QString& filename);
RecordingPointer readRecordingFromRecFile(RecordingPointer recording, const QString& filename, const QByteArray& byteArray);
#endif
#endif // hifi_Recording_h

View file

@ -35,7 +35,7 @@ Clip::Pointer Clip::duplicate() {
Clip::Pointer result = std::make_shared<BufferClip>();
Locker lock(_mutex);
float currentPosition = position();
Time currentPosition = position();
seek(0);
Frame::Pointer frame = nextFrame();

View file

@ -28,11 +28,11 @@ public:
Pointer duplicate();
virtual float duration() const = 0;
virtual Time duration() const = 0;
virtual size_t frameCount() const = 0;
virtual void seek(float offset) = 0;
virtual float position() const = 0;
virtual void seek(Time offset) = 0;
virtual Time position() const = 0;
virtual FramePointer peekFrame() const = 0;
virtual FramePointer nextFrame() = 0;

View file

@ -6,7 +6,116 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "Deck.h"
#include <NumericalConstants.h>
#include <SharedUtil.h>
// FIXME -- DO NOT include headers in empty CPP files, it produces warnings. Once we define new symbols
// and some actual code here, we can uncomment this include.
//#include "Deck.h"
#include "Clip.h"
#include "Frame.h"
#include "Logging.h"
using namespace recording;
void Deck::queueClip(ClipPointer clip, Time timeOffset) {
if (!clip) {
qCWarning(recordingLog) << "Clip invalid, ignoring";
return;
}
// FIXME if the time offset is not zero, wrap the clip in a OffsetClip wrapper
_clips.push_back(clip);
}
void Deck::play() {
if (_pause) {
_pause = false;
_startEpoch = usecTimestampNow() - (_position * USECS_PER_MSEC);
emit playbackStateChanged();
processFrames();
}
}
void Deck::pause() {
if (!_pause) {
_pause = true;
emit playbackStateChanged();
}
}
Clip::Pointer Deck::getNextClip() {
Clip::Pointer result;
Time soonestFramePosition = INVALID_TIME;
for (const auto& clip : _clips) {
Time nextFramePosition = clip->position();
if (nextFramePosition < soonestFramePosition) {
result = clip;
soonestFramePosition = nextFramePosition;
}
}
return result;
}
void Deck::seek(Time position) {
_position = position;
// FIXME reset the frames to the appropriate spot
for (auto& clip : _clips) {
clip->seek(position);
}
if (!_pause) {
// FIXME what if the timer is already running?
processFrames();
}
}
Time Deck::position() const {
if (_pause) {
return _position;
}
return (usecTimestampNow() - _startEpoch) / USECS_PER_MSEC;
}
static const Time MIN_FRAME_WAIT_INTERVAL_MS = 1;
void Deck::processFrames() {
if (_pause) {
return;
}
_position = position();
auto triggerPosition = _position + MIN_FRAME_WAIT_INTERVAL_MS;
Clip::Pointer nextClip;
for (nextClip = getNextClip(); nextClip; nextClip = getNextClip()) {
// If the clip is too far in the future, just break out of the handling loop
Time framePosition = nextClip->position();
if (framePosition > triggerPosition) {
break;
}
// Handle the frame and advance the clip
Frame::handleFrame(nextClip->nextFrame());
}
if (!nextClip) {
qCDebug(recordingLog) << "No more frames available";
// No more frames available, so handle the end of playback
if (_loop) {
qCDebug(recordingLog) << "Looping enabled, seeking back to beginning";
// If we have looping enabled, start the playback over
seek(0);
} else {
// otherwise pause playback
pause();
}
return;
}
// If we have more clip frames available, set the timer for the next one
Time nextClipPosition = nextClip->position();
Time interval = nextClipPosition - _position;
_timer.singleShot(interval, [this] {
processFrames();
});
}

View file

@ -10,26 +10,62 @@
#ifndef hifi_Recording_Deck_h
#define hifi_Recording_Deck_h
#include "Forward.h"
#include <utility>
#include <list>
#include <QtCore/QObject>
#include <QtCore/QTimer>
#include "Forward.h"
class QIODevice;
namespace recording {
class Deck : public QObject {
Q_OBJECT
public:
using Pointer = std::shared_ptr<Deck>;
Deck(QObject* parent = nullptr) : QObject(parent) {}
virtual ~Deck();
// Place a clip on the deck for recording or playback
void queueClip(ClipPointer clip, float timeOffset = 0.0f);
void play(float timeOffset = 0.0f);
void reposition(float timeOffsetDelta);
void setPlaybackSpeed(float rate);
void queueClip(ClipPointer clip, Time timeOffset = 0.0f);
void play();
bool isPlaying() { return !_pause; }
void pause();
bool isPaused() const { return _pause; }
void stop() { pause(); seek(0.0f); }
Time length() const { return _length; }
void loop(bool enable = true) { _loop = enable; }
bool isLooping() const { return _loop; }
Time position() const;
void seek(Time position);
void setPlaybackSpeed(float factor) { _playbackSpeed = factor; }
float getPlaybackSpeed() { return _playbackSpeed; }
signals:
void playbackStateChanged();
private:
using Clips = std::list<ClipPointer>;
ClipPointer getNextClip();
void processFrames();
QTimer _timer;
Clips _clips;
quint64 _startEpoch { 0 };
Time _position { 0 };
float _playbackSpeed { 1.0f };
bool _pause { true };
bool _loop { false };
Time _length { 0 };
};
}

View file

@ -12,11 +12,18 @@
#include <memory>
#include <list>
#include <limits>
namespace recording {
using Time = uint32_t;
static const Time INVALID_TIME = std::numeric_limits<uint32_t>::max();
using FrameType = uint16_t;
using FrameSize = uint16_t;
struct Frame;
using FramePointer = std::shared_ptr<Frame>;

View file

@ -82,7 +82,8 @@ FrameType Frame::registerFrameType(const QString& frameTypeName) {
Q_ASSERT(headerType == Frame::TYPE_HEADER);
Q_UNUSED(headerType); // FIXME - build system on unix still not upgraded to Qt 5.5.1 so Q_ASSERT still produces warnings
});
return frameTypes.registerValue(frameTypeName);
auto result = frameTypes.registerValue(frameTypeName);
return result;
}
QMap<QString, FrameType> Frame::getFrameTypes() {
@ -102,3 +103,16 @@ Frame::Handler Frame::registerFrameHandler(FrameType type, Handler handler) {
handlerMap[type] = handler;
return result;
}
void Frame::handleFrame(const Frame::Pointer& frame) {
Handler handler;
{
Locker lock(mutex);
auto iterator = handlerMap.find(frame->type);
if (iterator == handlerMap.end()) {
return;
}
handler = *iterator;
}
handler(frame);
}

View file

@ -26,7 +26,7 @@ public:
static const FrameType TYPE_INVALID = 0xFFFF;
static const FrameType TYPE_HEADER = 0x0;
FrameType type { TYPE_INVALID };
float timeOffset { 0 };
Time timeOffset { 0 };
QByteArray data;
Frame() {}
@ -37,6 +37,7 @@ public:
static QMap<QString, FrameType> getFrameTypes();
static QMap<FrameType, QString> getFrameTypeNames();
static Handler registerFrameHandler(FrameType type, Handler handler);
static void handleFrame(const Pointer& frame);
};
}

View file

@ -9,25 +9,35 @@
#include "Recorder.h"
#include <NumericalConstants.h>
#include <SharedUtil.h>
#include "impl/BufferClip.h"
#include "Frame.h"
using namespace recording;
Recorder::~Recorder() {
}
Time Recorder::position() {
return 0.0f;
}
void Recorder::start() {
if (!_recording) {
_recording = true;
if (!_clip) {
_clip = std::make_shared<BufferClip>();
}
_startEpoch = usecTimestampNow();
_timer.start();
emit recordingStateChanged();
}
}
void Recorder::stop() {
if (!_recording) {
if (_recording) {
_recording = false;
_elapsed = _timer.elapsed();
emit recordingStateChanged();
@ -50,13 +60,11 @@ void Recorder::recordFrame(FrameType type, QByteArray frameData) {
Frame::Pointer frame = std::make_shared<Frame>();
frame->type = type;
frame->data = frameData;
frame->timeOffset = (float)(_elapsed + _timer.elapsed()) / MSECS_PER_SECOND;
frame->timeOffset = (usecTimestampNow() - _startEpoch) / USECS_PER_MSEC;
_clip->addFrame(frame);
}
ClipPointer Recorder::getClip() {
auto result = _clip;
_clip.reset();
return result;
return _clip;
}

View file

@ -20,18 +20,23 @@ namespace recording {
// An interface for interacting with clips, creating them by recording or
// playing them back. Also serialization to and from files / network sources
class Recorder : public QObject {
Q_OBJECT
public:
using Pointer = std::shared_ptr<Recorder>;
Recorder(QObject* parent = nullptr) : QObject(parent) {}
virtual ~Recorder();
Time position();
// Start recording frames
void start();
// Stop recording
void stop();
// Test if recording is active
bool isRecording();
// Erase the currently recorded content
void clear();
@ -46,7 +51,8 @@ signals:
private:
QElapsedTimer _timer;
ClipPointer _clip;
quint64 _elapsed;
quint64 _elapsed { 0 };
quint64 _startEpoch { 0 };
bool _recording { false };
};

View file

@ -8,24 +8,26 @@
#include "BufferClip.h"
#include <NumericalConstants.h>
#include "../Frame.h"
using namespace recording;
void BufferClip::seek(float offset) {
void BufferClip::seek(Time offset) {
Locker lock(_mutex);
auto itr = std::lower_bound(_frames.begin(), _frames.end(), offset,
[](Frame::Pointer a, float b)->bool{
auto itr = std::lower_bound(_frames.begin(), _frames.end(), offset,
[](Frame::Pointer a, Time b)->bool {
return a->timeOffset < b;
}
);
_frameIndex = itr - _frames.begin();
}
float BufferClip::position() const {
Time BufferClip::position() const {
Locker lock(_mutex);
float result = std::numeric_limits<float>::max();
Time result = INVALID_TIME;
if (_frameIndex < _frames.size()) {
result = _frames[_frameIndex]->timeOffset;
}
@ -77,7 +79,7 @@ void BufferClip::reset() {
_frameIndex = 0;
}
float BufferClip::duration() const {
Time BufferClip::duration() const {
if (_frames.empty()) {
return 0;
}

View file

@ -22,11 +22,11 @@ public:
virtual ~BufferClip() {}
virtual float duration() const override;
virtual Time duration() const override;
virtual size_t frameCount() const override;
virtual void seek(float offset) override;
virtual float position() const override;
virtual void seek(Time offset) override;
virtual Time position() const override;
virtual FramePointer peekFrame() const override;
virtual FramePointer nextFrame() override;

View file

@ -22,7 +22,7 @@
using namespace recording;
static const qint64 MINIMUM_FRAME_SIZE = sizeof(FrameType) + sizeof(float) + sizeof(uint16_t);
static const qint64 MINIMUM_FRAME_SIZE = sizeof(FrameType) + sizeof(Time) + sizeof(FrameSize);
static const QString FRAME_TYPE_MAP = QStringLiteral("frameTypes");
@ -60,10 +60,10 @@ FrameHeaderList parseFrameHeaders(uchar* const start, const qint64& size) {
FrameHeader header;
memcpy(&(header.type), current, sizeof(FrameType));
current += sizeof(FrameType);
memcpy(&(header.timeOffset), current, sizeof(float));
current += sizeof(float);
memcpy(&(header.size), current, sizeof(uint16_t));
current += sizeof(uint16_t);
memcpy(&(header.timeOffset), current, sizeof(Time));
current += sizeof(Time);
memcpy(&(header.size), current, sizeof(FrameSize));
current += sizeof(FrameSize);
header.fileOffset = current - start;
if (end - current < header.size) {
current = end;
@ -117,6 +117,7 @@ FileClip::FileClip(const QString& fileName) : _file(fileName) {
qWarning() << "Header missing frame type map, invalid file";
return;
}
qDebug() << translationMap;
// Update the loaded headers with the frame data
_frameHeaders.reserve(parsedFrameHeaders.size());
@ -132,16 +133,21 @@ FileClip::FileClip(const QString& fileName) : _file(fileName) {
// FIXME move to frame?
bool writeFrame(QIODevice& output, const Frame& frame) {
if (frame.type == Frame::TYPE_INVALID) {
qWarning() << "Attempting to write invalid frame";
return true;
}
auto written = output.write((char*)&(frame.type), sizeof(FrameType));
if (written != sizeof(FrameType)) {
return false;
}
written = output.write((char*)&(frame.timeOffset), sizeof(float));
if (written != sizeof(float)) {
written = output.write((char*)&(frame.timeOffset), sizeof(Time));
if (written != sizeof(Time)) {
return false;
}
uint16_t dataSize = frame.data.size();
written = output.write((char*)&dataSize, sizeof(uint16_t));
written = output.write((char*)&dataSize, sizeof(FrameSize));
if (written != sizeof(uint16_t)) {
return false;
}
@ -201,19 +207,19 @@ FileClip::~FileClip() {
}
}
void FileClip::seek(float offset) {
void FileClip::seek(Time offset) {
Locker lock(_mutex);
auto itr = std::lower_bound(_frameHeaders.begin(), _frameHeaders.end(), offset,
[](const FrameHeader& a, float b)->bool {
[](const FrameHeader& a, Time b)->bool {
return a.timeOffset < b;
}
);
_frameIndex = itr - _frameHeaders.begin();
}
float FileClip::position() const {
Time FileClip::position() const {
Locker lock(_mutex);
float result = std::numeric_limits<float>::max();
Time result = INVALID_TIME;
if (_frameIndex < _frameHeaders.size()) {
result = _frameHeaders[_frameIndex].timeOffset;
}
@ -260,7 +266,7 @@ void FileClip::addFrame(FramePointer) {
throw std::runtime_error("File clips are read only");
}
float FileClip::duration() const {
Time FileClip::duration() const {
if (_frameHeaders.empty()) {
return 0;
}

View file

@ -26,11 +26,11 @@ public:
FileClip(const QString& file);
virtual ~FileClip();
virtual float duration() const override;
virtual Time duration() const override;
virtual size_t frameCount() const override;
virtual void seek(float offset) override;
virtual float position() const override;
virtual void seek(Time offset) override;
virtual Time position() const override;
virtual FramePointer peekFrame() const override;
virtual FramePointer nextFrame() override;
@ -45,7 +45,7 @@ public:
struct FrameHeader {
FrameType type;
float timeOffset;
Time timeOffset;
uint16_t size;
quint64 fileOffset;
};

View file

@ -1,3 +1,3 @@
set(TARGET_NAME script-engine)
setup_hifi_library(Gui Network Script WebSockets Widgets)
link_hifi_libraries(shared networking octree gpu procedural model model-networking fbx entities controllers animation audio physics)
link_hifi_libraries(shared networking octree gpu procedural model model-networking recording avatars fbx entities controllers animation audio physics)

View file

@ -0,0 +1,57 @@
//
// Created by Bradley Austin Davis on 2015/11/09
// Copyright 2013-2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "JSONHelpers.h"
#include <QtCore/QJsonValue>
#include <QtCore/QJsonObject>
#include <QtCore/QJsonArray>
template <typename T>
QJsonValue glmToJson(const T& t) {
static const T DEFAULT_VALUE = T();
if (t == DEFAULT_VALUE) {
return QJsonValue();
}
QJsonArray result;
for (auto i = 0; i < t.length(); ++i) {
result.push_back(t[i]);
}
return result;
}
template <typename T>
T glmFromJson(const QJsonValue& json) {
static const T DEFAULT_VALUE = T();
T result;
if (json.isArray()) {
QJsonArray array = json.toArray();
size_t length = std::min(array.size(), result.length());
for (size_t i = 0; i < length; ++i) {
result[i] = (float)array[i].toDouble();
}
}
return result;
}
QJsonValue toJsonValue(const quat& q) {
return glmToJson(q);
}
QJsonValue toJsonValue(const vec3& v) {
return glmToJson(v);
}
quat quatFromJsonValue(const QJsonValue& q) {
return glmFromJson<quat>(q);
}
vec3 vec3FromJsonValue(const QJsonValue& v) {
return glmFromJson<vec3>(v);
}

View file

@ -0,0 +1,23 @@
//
// Created by Bradley Austin Davis on 2015/11/09
// Copyright 2013-2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#pragma once
#ifndef hifi_Shared_JSONHelpers_h
#define hifi_Shared_JSONHelpers_h
#include "../GLMHelpers.h"
QJsonValue toJsonValue(const quat& q);
QJsonValue toJsonValue(const vec3& q);
quat quatFromJsonValue(const QJsonValue& q);
vec3 vec3FromJsonValue(const QJsonValue& q);
#endif

View file

@ -0,0 +1,84 @@
//
// Created by Bradley Austin Davis on 2015/11/09
// Copyright 2013-2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "UniformTransform.h"
#include "JSONHelpers.h"
#include <QtCore/QJsonValue>
#include <QtCore/QJsonObject>
#include <QtCore/QJsonArray>
#include <glm/gtc/matrix_transform.hpp>
const float UniformTransform::DEFAULT_SCALE = 1.0f;
std::shared_ptr<UniformTransform> UniformTransform::parseJson(const QJsonValue& basis) {
std::shared_ptr<UniformTransform> result = std::make_shared<UniformTransform>();
result->fromJson(basis);
return result;
}
static const QString JSON_TRANSLATION = QStringLiteral("translation");
static const QString JSON_ROTATION = QStringLiteral("rotation");
static const QString JSON_SCALE = QStringLiteral("scale");
void UniformTransform::fromJson(const QJsonValue& basisValue) {
if (!basisValue.isObject()) {
return;
}
QJsonObject basis = basisValue.toObject();
if (basis.contains(JSON_ROTATION)) {
rotation = quatFromJsonValue(basis[JSON_ROTATION]);
}
if (basis.contains(JSON_TRANSLATION)) {
translation = vec3FromJsonValue(basis[JSON_TRANSLATION]);
}
if (basis.contains(JSON_SCALE)) {
scale = (float)basis[JSON_SCALE].toDouble();
}
}
glm::mat4 toMat4(const UniformTransform& transform) {
return glm::translate(glm::mat4(), transform.translation) * glm::mat4_cast(transform.rotation);
}
UniformTransform fromMat4(const glm::mat4& m) {
UniformTransform result;
result.translation = vec3(m[3]);
result.rotation = glm::quat_cast(m);
return result;
}
UniformTransform UniformTransform::relativeTransform(const UniformTransform& worldTransform) const {
UniformTransform result = fromMat4(glm::inverse(toMat4(*this)) * toMat4(worldTransform));
result.scale = scale / worldTransform.scale;
return result;
}
UniformTransform UniformTransform::worldTransform(const UniformTransform& relativeTransform) const {
UniformTransform result = fromMat4(toMat4(*this) * toMat4(relativeTransform));
result.scale = relativeTransform.scale * scale;
return result;
}
QJsonObject UniformTransform::toJson() const {
QJsonObject result;
auto json = toJsonValue(translation);
if (!json.isNull()) {
result[JSON_TRANSLATION] = json;
}
json = toJsonValue(rotation);
if (!json.isNull()) {
result[JSON_ROTATION] = json;
}
if (scale != DEFAULT_SCALE) {
result[JSON_SCALE] = scale;
}
return result;
}

View file

@ -0,0 +1,40 @@
//
// Created by Bradley Austin Davis on 2015/11/09
// Copyright 2013-2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#pragma once
#ifndef hifi_Shared_UniformTransform_h
#define hifi_Shared_UniformTransform_h
#include "../GLMHelpers.h"
class QJsonValue;
struct UniformTransform {
static const float DEFAULT_SCALE;
glm::vec3 translation;
glm::quat rotation;
float scale { DEFAULT_SCALE };
UniformTransform() {}
UniformTransform(const glm::vec3& translation, const glm::quat& rotation, const float& scale)
: translation(translation), rotation(rotation), scale(scale) {}
UniformTransform relativeTransform(const UniformTransform& worldTransform) const;
glm::vec3 relativeVector(const UniformTransform& worldTransform) const;
UniformTransform worldTransform(const UniformTransform& relativeTransform) const;
glm::vec3 worldVector(const UniformTransform& relativeTransform) const;
QJsonObject toJson() const;
void fromJson(const QJsonValue& json);
static std::shared_ptr<UniformTransform> parseJson(const QJsonValue& json);
};
#endif