Move recording interface back to float/seconds

This commit is contained in:
Brad Davis 2015-11-13 17:56:14 -08:00
parent 35f7a1cf10
commit cb26fc67fc
34 changed files with 1270 additions and 680 deletions

View file

@ -15,11 +15,11 @@ Script.include("../../libraries/toolBars.js");
var recordingFile = "recording.rec";
function setPlayerOptions() {
MyAvatar.setPlayFromCurrentLocation(true);
MyAvatar.setPlayerUseDisplayName(false);
MyAvatar.setPlayerUseAttachments(false);
MyAvatar.setPlayerUseHeadModel(false);
MyAvatar.setPlayerUseSkeletonModel(false);
Recording.setPlayFromCurrentLocation(true);
Recording.setPlayerUseDisplayName(false);
Recording.setPlayerUseAttachments(false);
Recording.setPlayerUseHeadModel(false);
Recording.setPlayerUseSkeletonModel(false);
}
var windowDimensions = Controller.getViewportDimensions();
@ -64,16 +64,16 @@ function setupToolBar() {
x: 0, y: 0,
width: Tool.IMAGE_WIDTH,
height: Tool.IMAGE_HEIGHT,
alpha: MyAvatar.isPlaying() ? ALPHA_OFF : ALPHA_ON,
alpha: Recording.isPlaying() ? ALPHA_OFF : ALPHA_ON,
visible: true
}, true, !MyAvatar.isRecording());
}, true, !Recording.isRecording());
var playLoopWidthFactor = 1.65;
playIcon = toolBar.addTool({
imageURL: TOOL_ICON_URL + "play-pause.svg",
width: playLoopWidthFactor * Tool.IMAGE_WIDTH,
height: Tool.IMAGE_HEIGHT,
alpha: (MyAvatar.isRecording() || MyAvatar.playerLength() === 0) ? ALPHA_OFF : ALPHA_ON,
alpha: (Recording.isRecording() || Recording.playerLength() === 0) ? ALPHA_OFF : ALPHA_ON,
visible: true
}, false);
@ -82,7 +82,7 @@ function setupToolBar() {
subImage: { x: 0, y: 0, width: playLoopWidthFactor * Tool.IMAGE_WIDTH, height: Tool.IMAGE_HEIGHT },
width: playLoopWidthFactor * Tool.IMAGE_WIDTH,
height: Tool.IMAGE_HEIGHT,
alpha: (MyAvatar.isRecording() || MyAvatar.playerLength() === 0) ? ALPHA_OFF : ALPHA_ON,
alpha: (Recording.isRecording() || Recording.playerLength() === 0) ? ALPHA_OFF : ALPHA_ON,
visible: true
}, false);
@ -93,7 +93,7 @@ function setupToolBar() {
imageURL: TOOL_ICON_URL + "recording-save.svg",
width: Tool.IMAGE_WIDTH,
height: Tool.IMAGE_HEIGHT,
alpha: (MyAvatar.isRecording() || MyAvatar.isPlaying() || MyAvatar.playerLength() === 0) ? ALPHA_OFF : ALPHA_ON,
alpha: (Recording.isRecording() || Recording.isPlaying() || Recording.playerLength() === 0) ? ALPHA_OFF : ALPHA_ON,
visible: true
}, false);
@ -101,7 +101,7 @@ function setupToolBar() {
imageURL: TOOL_ICON_URL + "recording-upload.svg",
width: Tool.IMAGE_WIDTH,
height: Tool.IMAGE_HEIGHT,
alpha: (MyAvatar.isRecording() || MyAvatar.isPlaying()) ? ALPHA_OFF : ALPHA_ON,
alpha: (Recording.isRecording() || Recording.isPlaying()) ? ALPHA_OFF : ALPHA_ON,
visible: true
}, false);
}
@ -147,23 +147,23 @@ function setupTimer() {
function updateTimer() {
var text = "";
if (MyAvatar.isRecording()) {
text = formatTime(MyAvatar.recorderElapsed());
if (Recording.isRecording()) {
text = formatTime(Recording.recorderElapsed());
} else {
text = formatTime(MyAvatar.playerElapsed()) + " / " +
formatTime(MyAvatar.playerLength());
text = formatTime(Recording.playerElapsed()) + " / " +
formatTime(Recording.playerLength());
}
Overlays.editOverlay(timer, {
text: text
})
toolBar.changeSpacing(text.length * 8 + ((MyAvatar.isRecording()) ? 15 : 0), spacing);
toolBar.changeSpacing(text.length * 8 + ((Recording.isRecording()) ? 15 : 0), spacing);
if (MyAvatar.isRecording()) {
if (Recording.isRecording()) {
slider.pos = 1.0;
} else if (MyAvatar.playerLength() > 0) {
slider.pos = MyAvatar.playerElapsed() / MyAvatar.playerLength();
} else if (Recording.playerLength() > 0) {
slider.pos = Recording.playerElapsed() / Recording.playerLength();
}
Overlays.editOverlay(slider.foreground, {
@ -217,77 +217,77 @@ function moveUI() {
function mousePressEvent(event) {
clickedOverlay = Overlays.getOverlayAtPoint({ x: event.x, y: event.y });
if (recordIcon === toolBar.clicked(clickedOverlay, false) && !MyAvatar.isPlaying()) {
if (!MyAvatar.isRecording()) {
MyAvatar.startRecording();
if (recordIcon === toolBar.clicked(clickedOverlay, false) && !Recording.isPlaying()) {
if (!Recording.isRecording()) {
Recording.startRecording();
toolBar.selectTool(recordIcon, false);
toolBar.setAlpha(ALPHA_OFF, playIcon);
toolBar.setAlpha(ALPHA_OFF, playLoopIcon);
toolBar.setAlpha(ALPHA_OFF, saveIcon);
toolBar.setAlpha(ALPHA_OFF, loadIcon);
} else {
MyAvatar.stopRecording();
Recording.stopRecording();
toolBar.selectTool(recordIcon, true );
MyAvatar.loadLastRecording();
Recording.loadLastRecording();
toolBar.setAlpha(ALPHA_ON, playIcon);
toolBar.setAlpha(ALPHA_ON, playLoopIcon);
toolBar.setAlpha(ALPHA_ON, saveIcon);
toolBar.setAlpha(ALPHA_ON, loadIcon);
}
} else if (playIcon === toolBar.clicked(clickedOverlay) && !MyAvatar.isRecording()) {
if (MyAvatar.isPlaying()) {
MyAvatar.pausePlayer();
} else if (playIcon === toolBar.clicked(clickedOverlay) && !Recording.isRecording()) {
if (Recording.isPlaying()) {
Recording.pausePlayer();
toolBar.setAlpha(ALPHA_ON, recordIcon);
toolBar.setAlpha(ALPHA_ON, saveIcon);
toolBar.setAlpha(ALPHA_ON, loadIcon);
} else if (MyAvatar.playerLength() > 0) {
} else if (Recording.playerLength() > 0) {
setPlayerOptions();
MyAvatar.setPlayerLoop(false);
MyAvatar.startPlaying();
Recording.setPlayerLoop(false);
Recording.startPlaying();
toolBar.setAlpha(ALPHA_OFF, recordIcon);
toolBar.setAlpha(ALPHA_OFF, saveIcon);
toolBar.setAlpha(ALPHA_OFF, loadIcon);
watchStop = true;
}
} else if (playLoopIcon === toolBar.clicked(clickedOverlay) && !MyAvatar.isRecording()) {
if (MyAvatar.isPlaying()) {
MyAvatar.pausePlayer();
} else if (playLoopIcon === toolBar.clicked(clickedOverlay) && !Recording.isRecording()) {
if (Recording.isPlaying()) {
Recording.pausePlayer();
toolBar.setAlpha(ALPHA_ON, recordIcon);
toolBar.setAlpha(ALPHA_ON, saveIcon);
toolBar.setAlpha(ALPHA_ON, loadIcon);
} else if (MyAvatar.playerLength() > 0) {
} else if (Recording.playerLength() > 0) {
setPlayerOptions();
MyAvatar.setPlayerLoop(true);
MyAvatar.startPlaying();
Recording.setPlayerLoop(true);
Recording.startPlaying();
toolBar.setAlpha(ALPHA_OFF, recordIcon);
toolBar.setAlpha(ALPHA_OFF, saveIcon);
toolBar.setAlpha(ALPHA_OFF, loadIcon);
}
} else if (saveIcon === toolBar.clicked(clickedOverlay)) {
if (!MyAvatar.isRecording() && !MyAvatar.isPlaying() && MyAvatar.playerLength() != 0) {
if (!Recording.isRecording() && !Recording.isPlaying() && Recording.playerLength() != 0) {
recordingFile = Window.save("Save recording to file", ".", "Recordings (*.hfr)");
if (!(recordingFile === "null" || recordingFile === null || recordingFile === "")) {
MyAvatar.saveRecording(recordingFile);
Recording.saveRecording(recordingFile);
}
}
} else if (loadIcon === toolBar.clicked(clickedOverlay)) {
if (!MyAvatar.isRecording() && !MyAvatar.isPlaying()) {
if (!Recording.isRecording() && !Recording.isPlaying()) {
recordingFile = Window.browse("Load recorcding from file", ".", "Recordings (*.hfr *.rec *.HFR *.REC)");
if (!(recordingFile === "null" || recordingFile === null || recordingFile === "")) {
MyAvatar.loadRecording(recordingFile);
Recording.loadRecording(recordingFile);
}
if (MyAvatar.playerLength() > 0) {
if (Recording.playerLength() > 0) {
toolBar.setAlpha(ALPHA_ON, playIcon);
toolBar.setAlpha(ALPHA_ON, playLoopIcon);
toolBar.setAlpha(ALPHA_ON, saveIcon);
}
}
} else if (MyAvatar.playerLength() > 0 &&
} else if (Recording.playerLength() > 0 &&
slider.x < event.x && event.x < slider.x + slider.w &&
slider.y < event.y && event.y < slider.y + slider.h) {
isSliding = true;
slider.pos = (event.x - slider.x) / slider.w;
MyAvatar.setPlayerTime(slider.pos * MyAvatar.playerLength());
Recording.setPlayerTime(slider.pos * Recording.playerLength());
}
}
var isSliding = false;
@ -296,10 +296,10 @@ function mouseMoveEvent(event) {
if (isSliding) {
slider.pos = (event.x - slider.x) / slider.w;
if (slider.pos < 0.0 || slider.pos > 1.0) {
MyAvatar.stopPlaying();
Recording.stopPlaying();
slider.pos = 0.0;
}
MyAvatar.setPlayerTime(slider.pos * MyAvatar.playerLength());
Recording.setPlayerTime(slider.pos * Recording.playerLength());
}
}
@ -316,7 +316,7 @@ function update() {
updateTimer();
if (watchStop && !MyAvatar.isPlaying()) {
if (watchStop && !Recording.isPlaying()) {
watchStop = false;
toolBar.setAlpha(ALPHA_ON, recordIcon);
toolBar.setAlpha(ALPHA_ON, saveIcon);
@ -325,11 +325,11 @@ function update() {
}
function scriptEnding() {
if (MyAvatar.isRecording()) {
MyAvatar.stopRecording();
if (Recording.isRecording()) {
Recording.stopRecording();
}
if (MyAvatar.isPlaying()) {
MyAvatar.stopPlaying();
if (Recording.isPlaying()) {
Recording.stopPlaying();
}
toolBar.cleanup();
Overlays.deleteOverlay(timer);

View file

@ -0,0 +1,105 @@
//
// Created by Bradley Austin Davis on 2015/11/14
// Copyright 2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
import Hifi 1.0
import QtQuick 2.4
import "controls"
import "styles"
VrDialog {
id: root
HifiConstants { id: hifi }
property real spacing: hifi.layout.spacing
property real outerSpacing: hifi.layout.spacing * 2
objectName: "RecorderDialog"
destroyOnInvisible: false
destroyOnCloseButton: false
contentImplicitWidth: recorderDialog.width
contentImplicitHeight: recorderDialog.height
RecorderDialog {
id: recorderDialog
x: root.clientX; y: root.clientY
width: 800
height: 128
signal play()
signal rewind()
onPlay: {
console.log("Pressed play")
player.isPlaying = !player.isPlaying
}
onRewind: {
console.log("Pressed rewind")
player.position = 0
}
Row {
height: 32
ButtonAwesome {
id: cmdRecord
visible: root.showRecordButton
width: 32; height: 32
text: "\uf111"
iconColor: "red"
onClicked: {
console.log("Pressed record")
status.text = "Recording";
}
}
}
Text {
id: status
anchors.top: parent.top
anchors.right: parent.right
width: 128
text: "Idle"
}
Player {
id: player
y: root.clientY + 64
height: 64
anchors.left: parent.left
anchors.right: parent.right
anchors.bottom: parent.bottom
// onClicked: {
// if (recordTimer.running) {
// recordTimer.stop();
// }
// recordTimer.start();
// }
Timer {
id: recordTimer;
interval: 1000; running: false; repeat: false
onTriggered: {
console.log("Recording: " + MyAvatar.isRecording())
MyAvatar.startRecording();
console.log("Recording: " + MyAvatar.isRecording())
}
}
}
Component.onCompleted: {
player.play.connect(play)
player.rewind.connect(rewind)
}
}
}

View file

@ -0,0 +1,21 @@
import QtQuick 2.3
import QtQuick.Controls 1.3 as Original
import QtQuick.Controls.Styles 1.3 as OriginalStyles
import "."
import "../styles"
Original.Button {
property color iconColor: "black"
FontLoader { id: iconFont; source: "../../fonts/fontawesome-webfont.ttf"; }
style: OriginalStyles.ButtonStyle {
label: Text {
renderType: Text.NativeRendering
verticalAlignment: Text.AlignVCenter
horizontalAlignment: Text.AlignHCenter
font.family: iconFont.name
font.pointSize: 20
color: control.enabled ? control.iconColor : "gray"
text: control.text
}
}
}

View file

@ -0,0 +1,89 @@
//
// AddressBarDialog.qml
//
// Created by Austin Davis on 2015/04/14
// Copyright 2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
//import Hifi 1.0
import QtQuick 2.4
import QtQuick.Controls 1.2
import QtQuick.Dialogs 1.2
import QtQuick.Controls.Styles 1.2
import "../styles"
Item {
id: root
signal play()
signal rewind()
property real duration: 100
property real position: 50
property bool isPlaying: false
implicitHeight: 64
implicitWidth: 640
Item {
anchors.top: parent.top
anchors.left: parent.left
anchors.right: parent.right
height: root.height / 2
Text {
id: labelCurrent
anchors.top: parent.top
anchors.bottom: parent.bottom
anchors.left: parent.left
horizontalAlignment: Text.AlignHCenter
verticalAlignment: Text.AlignVCenter
width: 56
text: "00:00:00"
}
Slider {
value: root.position / root.duration
anchors.top: parent.top
anchors.topMargin: 2
anchors.bottomMargin: 2
anchors.bottom: parent.bottom
anchors.left: labelCurrent.right
anchors.leftMargin: 4
anchors.right: labelDuration.left
anchors.rightMargin: 4
}
Text {
id: labelDuration
anchors.top: parent.top
anchors.bottom: parent.bottom
anchors.right: parent.right
horizontalAlignment: Text.AlignHCenter
verticalAlignment: Text.AlignVCenter
width: 56
text: "00:00:00"
}
}
Row {
anchors.bottom: parent.bottom
anchors.horizontalCenter: parent.horizontalCenter
height: root.height / 2;
ButtonAwesome {
id: cmdPlay
anchors.top: parent.top
anchors.bottom: parent.bottom
text: isPlaying ? "\uf04c" : "\uf04b"
width: root.height / 2;
onClicked: root.play();
}
ButtonAwesome {
id: cmdRewind
anchors.top: parent.top
anchors.bottom: parent.bottom
width: root.height / 2
text: "\uf04a"
onClicked: root.rewind();
}
}
}

View file

@ -94,6 +94,8 @@
#include <UserActivityLogger.h>
#include <UUID.h>
#include <VrMenu.h>
#include <recording/Deck.h>
#include <recording/Recorder.h>
#include "AnimDebugDraw.h"
#include "AudioClient.h"
@ -124,6 +126,7 @@
#include "scripting/LocationScriptingInterface.h"
#include "scripting/MenuScriptingInterface.h"
#include "scripting/SettingsScriptingInterface.h"
#include "scripting/RecordingScriptingInterface.h"
#include "scripting/WebWindowClass.h"
#include "scripting/WindowScriptingInterface.h"
#include "scripting/ControllerScriptingInterface.h"
@ -132,6 +135,7 @@
#endif
#include "Stars.h"
#include "ui/AddressBarDialog.h"
#include "ui/RecorderDialog.h"
#include "ui/AvatarInputs.h"
#include "ui/AssetUploadDialogFactory.h"
#include "ui/DataWebDialog.h"
@ -295,6 +299,8 @@ bool setupEssentials(int& argc, char** argv) {
Setting::init();
// Set dependencies
DependencyManager::set<recording::Deck>();
DependencyManager::set<recording::Recorder>();
DependencyManager::set<AddressManager>();
DependencyManager::set<NodeList>(NodeType::Agent, listenPort);
DependencyManager::set<GeometryCache>();
@ -319,6 +325,7 @@ bool setupEssentials(int& argc, char** argv) {
DependencyManager::set<ResourceCacheSharedItems>();
DependencyManager::set<DesktopScriptingInterface>();
DependencyManager::set<EntityScriptingInterface>();
DependencyManager::set<RecordingScriptingInterface>();
DependencyManager::set<WindowScriptingInterface>();
DependencyManager::set<HMDScriptingInterface>();
@ -996,6 +1003,7 @@ void Application::initializeGL() {
void Application::initializeUi() {
AddressBarDialog::registerType();
RecorderDialog::registerType();
ErrorDialog::registerType();
LoginDialog::registerType();
MessageDialog::registerType();
@ -1011,6 +1019,7 @@ void Application::initializeUi() {
offscreenUi->load("RootMenu.qml");
auto scriptingInterface = DependencyManager::get<controller::ScriptingInterface>();
offscreenUi->getRootContext()->setContextProperty("Controller", scriptingInterface.data());
offscreenUi->getRootContext()->setContextProperty("MyAvatar", getMyAvatar());
_glWidget->installEventFilter(offscreenUi.data());
VrMenu::load();
VrMenu::executeQueuedLambdas();
@ -1580,8 +1589,9 @@ void Application::keyPressEvent(QKeyEvent* event) {
case Qt::Key_X:
if (isMeta && isShifted) {
auto offscreenUi = DependencyManager::get<OffscreenUi>();
offscreenUi->load("TestControllers.qml");
// auto offscreenUi = DependencyManager::get<OffscreenUi>();
// offscreenUi->load("TestControllers.qml");
RecorderDialog::toggle();
}
break;
@ -3969,6 +3979,7 @@ void Application::registerScriptEngineWithApplicationServices(ScriptEngine* scri
RayToOverlayIntersectionResultFromScriptValue);
scriptEngine->registerGlobalObject("Desktop", DependencyManager::get<DesktopScriptingInterface>().data());
scriptEngine->registerGlobalObject("Recording", DependencyManager::get<RecordingScriptingInterface>().data());
scriptEngine->registerGlobalObject("Window", DependencyManager::get<WindowScriptingInterface>().data());
scriptEngine->registerGetterSetter("location", LocationScriptingInterface::locationGetter,

View file

@ -14,6 +14,7 @@
#include <DependencyManager.h>
#include <DeferredLightingEffect.h>
#include <NodeList.h>
#include <recording/Deck.h>
#include "Application.h"
#include "Avatar.h"
@ -91,9 +92,9 @@ void Head::simulate(float deltaTime, bool isMine, bool billboard) {
if (isMine) {
MyAvatar* myAvatar = static_cast<MyAvatar*>(_owningAvatar);
auto player = DependencyManager::get<recording::Deck>();
// Only use face trackers when not playing back a recording.
if (!myAvatar->isPlaying()) {
if (!player->isPlaying()) {
FaceTracker* faceTracker = qApp->getActiveFaceTracker();
_isFaceTrackerConnected = faceTracker != NULL && !faceTracker->isMuted();
if (_isFaceTrackerConnected) {

View file

@ -80,10 +80,6 @@ const QString& DEFAULT_AVATAR_COLLISION_SOUND_URL = "https://hifi-public.s3.amaz
const float MyAvatar::ZOOM_MIN = 0.5f;
const float MyAvatar::ZOOM_MAX = 25.0f;
const float MyAvatar::ZOOM_DEFAULT = 1.5f;
static const QString HEADER_NAME = "com.highfidelity.recording.AvatarData";
static recording::FrameType AVATAR_FRAME_TYPE = recording::Frame::TYPE_INVALID;
static std::once_flag frameTypeRegistration;
MyAvatar::MyAvatar(RigPointer rig) :
Avatar(rig),
@ -121,17 +117,6 @@ MyAvatar::MyAvatar(RigPointer rig) :
{
using namespace recording;
std::call_once(frameTypeRegistration, [] {
AVATAR_FRAME_TYPE = recording::Frame::registerFrameType(HEADER_NAME);
});
// FIXME how to deal with driving multiple avatars locally?
Frame::registerFrameHandler(AVATAR_FRAME_TYPE, [this](Frame::ConstPointer frame) {
qDebug() << "Playback of avatar frame length: " << frame->data.size();
avatarStateFromFrame(frame->data, this);
});
for (int i = 0; i < MAX_DRIVE_KEYS; i++) {
_driveKeys[i] = 0.0f;
}
@ -326,8 +311,10 @@ void MyAvatar::simulate(float deltaTime) {
}
// Record avatars movements.
if (_recorder && _recorder->isRecording()) {
_recorder->recordFrame(AVATAR_FRAME_TYPE, avatarStateToFrame(this));
auto recorder = DependencyManager::get<recording::Recorder>();
if (recorder->isRecording()) {
static const recording::FrameType FRAME_TYPE = recording::Frame::registerFrameType(AvatarData::FRAME_NAME);
recorder->recordFrame(FRAME_TYPE, toFrame(*this));
}
// consider updating our billboard
@ -403,8 +390,8 @@ void MyAvatar::updateFromTrackers(float deltaTime) {
glm::vec3 estimatedPosition, estimatedRotation;
bool inHmd = qApp->getAvatarUpdater()->isHMDMode();
if (isPlaying() && inHmd) {
bool playing = DependencyManager::get<recording::Deck>()->isPlaying();
if (inHmd && playing) {
return;
}
@ -455,7 +442,7 @@ void MyAvatar::updateFromTrackers(float deltaTime) {
Head* head = getHead();
if (inHmd || isPlaying()) {
if (inHmd || playing) {
head->setDeltaPitch(estimatedRotation.x);
head->setDeltaYaw(estimatedRotation.y);
head->setDeltaRoll(estimatedRotation.z);
@ -572,102 +559,6 @@ bool MyAvatar::setJointReferential(const QUuid& id, int jointIndex) {
}
}
bool MyAvatar::isRecording() {
if (!_recorder) {
return false;
}
if (QThread::currentThread() != thread()) {
bool result;
QMetaObject::invokeMethod(this, "isRecording", Qt::BlockingQueuedConnection,
Q_RETURN_ARG(bool, result));
return result;
}
return _recorder && _recorder->isRecording();
}
float MyAvatar::recorderElapsed() {
if (QThread::currentThread() != thread()) {
float result;
QMetaObject::invokeMethod(this, "recorderElapsed", Qt::BlockingQueuedConnection,
Q_RETURN_ARG(float, result));
return result;
}
if (!_recorder) {
return 0;
}
return (float)_recorder->position() / (float) MSECS_PER_SECOND;
}
QMetaObject::Connection _audioClientRecorderConnection;
void MyAvatar::startRecording() {
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "startRecording", Qt::BlockingQueuedConnection);
return;
}
_recorder = std::make_shared<recording::Recorder>();
// connect to AudioClient's signal so we get input audio
auto audioClient = DependencyManager::get<AudioClient>();
_audioClientRecorderConnection = connect(audioClient.data(), &AudioClient::inputReceived, [] {
// FIXME, missing audio data handling
});
setRecordingBasis();
_recorder->start();
}
void MyAvatar::stopRecording() {
if (!_recorder) {
return;
}
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "stopRecording", Qt::BlockingQueuedConnection);
return;
}
if (_recorder) {
QObject::disconnect(_audioClientRecorderConnection);
_audioClientRecorderConnection = QMetaObject::Connection();
_recorder->stop();
clearRecordingBasis();
}
}
void MyAvatar::saveRecording(const QString& filename) {
if (!_recorder) {
qCDebug(interfaceapp) << "There is no recording to save";
return;
}
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "saveRecording", Qt::BlockingQueuedConnection,
Q_ARG(QString, filename));
return;
}
if (_recorder) {
auto clip = _recorder->getClip();
recording::Clip::toFile(filename, clip);
}
}
void MyAvatar::loadLastRecording() {
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "loadLastRecording", Qt::BlockingQueuedConnection);
return;
}
if (!_recorder || !_recorder->getClip()) {
qCDebug(interfaceapp) << "There is no recording to load";
return;
}
if (!_player) {
_player = std::make_shared<recording::Deck>();
}
_player->queueClip(_recorder->getClip());
_player->play();
}
void MyAvatar::startAnimation(const QString& url, float fps, float priority,
bool loop, bool hold, float firstFrame, float lastFrame, const QStringList& maskedJoints) {
if (QThread::currentThread() != thread()) {

View file

@ -254,13 +254,6 @@ public slots:
bool setModelReferential(const QUuid& id);
bool setJointReferential(const QUuid& id, int jointIndex);
bool isRecording();
float recorderElapsed();
void startRecording();
void stopRecording();
void saveRecording(const QString& filename);
void loadLastRecording();
virtual void rebuildSkeletonBody() override;
bool getEnableRigAnimations() const { return _rig->getEnableRig(); }
@ -309,9 +302,6 @@ private:
const glm::vec3& translation = glm::vec3(), const glm::quat& rotation = glm::quat(), float scale = 1.0f,
bool allowDuplicates = false, bool useSaved = true) override;
const recording::RecorderPointer getRecorder() const { return _recorder; }
const recording::DeckPointer getPlayer() const { return _player; }
//void beginFollowingHMD();
//bool shouldFollowHMD() const;
//void followHMD(float deltaTime);
@ -358,8 +348,6 @@ private:
eyeContactTarget _eyeContactTarget;
recording::RecorderPointer _recorder;
glm::vec3 _trackedHeadPosition;
Setting::Handle<float> _realWorldFieldOfView;

View file

@ -13,6 +13,7 @@
#include <QMultiMap>
#include <DeferredLightingEffect.h>
#include <recording/Deck.h>
#include "Application.h"
#include "Avatar.h"
@ -247,8 +248,8 @@ void SkeletonModel::simulate(float deltaTime, bool fullUpdate) {
}
MyAvatar* myAvatar = static_cast<MyAvatar*>(_owningAvatar);
if (myAvatar->isPlaying()) {
// Don't take inputs if playing back a recording.
auto player = DependencyManager::get<recording::Deck>();
if (player->isPlaying()) {
return;
}

View file

@ -0,0 +1,328 @@
//
// Created by Bradley Austin Davis on 2015/11/13
// Copyright 2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "RecordingScriptingInterface.h"
#include <recording/Deck.h>
#include <recording/Recorder.h>
#include <recording/Clip.h>
#include <recording/Frame.h>
#include <NumericalConstants.h>
#include <AudioClient.h>
#include <AudioConstants.h>
#include "avatar/AvatarManager.h"
#include "Application.h"
#include "InterfaceLogging.h"
typedef int16_t AudioSample;
using namespace recording;
// FIXME move to somewhere audio related?
static const QString AUDIO_FRAME_NAME = "com.highfidelity.recording.Audio";
RecordingScriptingInterface::RecordingScriptingInterface() {
static const recording::FrameType AVATAR_FRAME_TYPE = recording::Frame::registerFrameType(AvatarData::FRAME_NAME);
// FIXME how to deal with driving multiple avatars locally?
Frame::registerFrameHandler(AVATAR_FRAME_TYPE, [this](Frame::ConstPointer frame) {
processAvatarFrame(frame);
});
static const recording::FrameType AUDIO_FRAME_TYPE = recording::Frame::registerFrameType(AUDIO_FRAME_NAME);
Frame::registerFrameHandler(AUDIO_FRAME_TYPE, [this](Frame::ConstPointer frame) {
processAudioFrame(frame);
});
_player = DependencyManager::get<Deck>();
_recorder = DependencyManager::get<Recorder>();
auto audioClient = DependencyManager::get<AudioClient>();
connect(audioClient.data(), &AudioClient::inputReceived, this, &RecordingScriptingInterface::processAudioInput);
}
bool RecordingScriptingInterface::isPlaying() {
return _player->isPlaying();
}
bool RecordingScriptingInterface::isPaused() {
return _player->isPaused();
}
float RecordingScriptingInterface::playerElapsed() {
return (float)_player->position() / MSECS_PER_SECOND;
}
float RecordingScriptingInterface::playerLength() {
return _player->length() / MSECS_PER_SECOND;
}
void RecordingScriptingInterface::loadRecording(const QString& filename) {
using namespace recording;
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "loadRecording", Qt::BlockingQueuedConnection,
Q_ARG(QString, filename));
return;
}
ClipPointer clip = Clip::fromFile(filename);
if (!clip) {
qWarning() << "Unable to load clip data from " << filename;
}
_player->queueClip(clip);
}
void RecordingScriptingInterface::startPlaying() {
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "startPlaying", Qt::BlockingQueuedConnection);
return;
}
auto myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar();
// Playback from the current position
if (_playFromCurrentLocation) {
_dummyAvatar.setRecordingBasis(std::make_shared<Transform>(myAvatar->getTransform()));
} else {
_dummyAvatar.clearRecordingBasis();
}
_player->play();
}
void RecordingScriptingInterface::setPlayerVolume(float volume) {
// FIXME
}
void RecordingScriptingInterface::setPlayerAudioOffset(float audioOffset) {
// FIXME
}
void RecordingScriptingInterface::setPlayerTime(float time) {
_player->seek(time * MSECS_PER_SECOND);
}
void RecordingScriptingInterface::setPlayFromCurrentLocation(bool playFromCurrentLocation) {
_playFromCurrentLocation = playFromCurrentLocation;
}
void RecordingScriptingInterface::setPlayerLoop(bool loop) {
_player->loop(loop);
}
void RecordingScriptingInterface::setPlayerUseDisplayName(bool useDisplayName) {
_useDisplayName = useDisplayName;
}
void RecordingScriptingInterface::setPlayerUseAttachments(bool useAttachments) {
_useAttachments = useAttachments;
}
void RecordingScriptingInterface::setPlayerUseHeadModel(bool useHeadModel) {
_useHeadModel = useHeadModel;
}
void RecordingScriptingInterface::setPlayerUseSkeletonModel(bool useSkeletonModel) {
_useSkeletonModel = useSkeletonModel;
}
void RecordingScriptingInterface::play() {
_player->play();
}
void RecordingScriptingInterface::pausePlayer() {
_player->pause();
}
void RecordingScriptingInterface::stopPlaying() {
_player->stop();
}
bool RecordingScriptingInterface::isRecording() {
return _recorder->isRecording();
}
float RecordingScriptingInterface::recorderElapsed() {
return _recorder->position();
}
void RecordingScriptingInterface::startRecording() {
if (_recorder->isRecording()) {
qCWarning(interfaceapp) << "Recorder is already running";
return;
}
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "startRecording", Qt::BlockingQueuedConnection);
return;
}
_recordingEpoch = Frame::epochForFrameTime(0);
_audioRecordingBuffer.clear();
auto myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar();
myAvatar->setRecordingBasis();
_recorder->start();
}
float calculateAudioTime(const QByteArray& audio) {
static const float AUDIO_BYTES_PER_SECOND = AudioConstants::SAMPLE_RATE * sizeof(AudioConstants::AudioSample);
return (float)audio.size() / AUDIO_BYTES_PER_SECOND;
}
void injectAudioFrame(Clip::Pointer& clip, Frame::Time time, const QByteArray& audio) {
static const recording::FrameType AUDIO_FRAME_TYPE = recording::Frame::registerFrameType(AUDIO_FRAME_NAME);
clip->addFrame(std::make_shared<Frame>(AUDIO_FRAME_TYPE, time, audio));
}
// Detect too much audio in a single frame, or too much deviation between
// the expected audio length and the computed audio length
bool shouldStartNewAudioFrame(const QByteArray& currentAudioFrame, float expectedAudioLength) {
if (currentAudioFrame.isEmpty()) {
return true;
}
// 100 milliseconds
float actualAudioLength = calculateAudioTime(currentAudioFrame);
static const float MAX_AUDIO_PACKET_DURATION = 1.0f;
if (actualAudioLength >= MAX_AUDIO_PACKET_DURATION) {
return true;
}
float deviation = std::abs(actualAudioLength - expectedAudioLength);
qDebug() << "Checking buffer deviation current length ";
qDebug() << "Actual: " << actualAudioLength;
qDebug() << "Expected: " << expectedAudioLength;
qDebug() << "Deviation: " << deviation;
static const float MAX_AUDIO_DEVIATION = 0.1f;
if (deviation >= MAX_AUDIO_PACKET_DURATION) {
return true;
}
return false;
}
void injectAudioFrames(Clip::Pointer& clip, const QList<QPair<recording::Frame::Time, QByteArray>>& audioBuffer) {
Frame::Time lastAudioStartTime = 0;
QByteArray audioFrameBuffer;
for (const auto& audioPacket : audioBuffer) {
float expectedAudioLength = Frame::frameTimeToSeconds(audioPacket.first - lastAudioStartTime);
if (shouldStartNewAudioFrame(audioFrameBuffer, expectedAudioLength)) {
// Time to start a new frame, inject the old one if it exists
if (audioFrameBuffer.size()) {
injectAudioFrame(clip, lastAudioStartTime, audioFrameBuffer);
audioFrameBuffer.clear();
}
lastAudioStartTime = audioPacket.first;
}
audioFrameBuffer.append(audioPacket.second);
}
}
void RecordingScriptingInterface::stopRecording() {
_recorder->stop();
_lastClip = _recorder->getClip();
// post-process the audio into discreet chunks based on times of received samples
injectAudioFrames(_lastClip, _audioRecordingBuffer);
_audioRecordingBuffer.clear();
_lastClip->seek(0);
Frame::ConstPointer frame;
while (frame = _lastClip->nextFrame()) {
qDebug() << "Frame time " << frame->timeOffset << " size " << frame->data.size();
}
_lastClip->seek(0);
auto myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar();
myAvatar->clearRecordingBasis();
}
void RecordingScriptingInterface::saveRecording(const QString& filename) {
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "saveRecording", Qt::BlockingQueuedConnection,
Q_ARG(QString, filename));
return;
}
if (!_lastClip) {
qWarning() << "There is no recording to save";
return;
}
recording::Clip::toFile(filename, _lastClip);
}
void RecordingScriptingInterface::loadLastRecording() {
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "loadLastRecording", Qt::BlockingQueuedConnection);
return;
}
if (!_lastClip) {
qCDebug(interfaceapp) << "There is no recording to load";
return;
}
_player->queueClip(_lastClip);
_player->play();
}
void RecordingScriptingInterface::processAvatarFrame(const Frame::ConstPointer& frame) {
Q_ASSERT(QThread::currentThread() == thread());
AvatarData::fromFrame(frame->data, _dummyAvatar);
auto myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar();
if (_useHeadModel && _dummyAvatar.getFaceModelURL().isValid() &&
(_dummyAvatar.getFaceModelURL() != myAvatar->getFaceModelURL())) {
// FIXME
//myAvatar->setFaceModelURL(_dummyAvatar.getFaceModelURL());
}
if (_useSkeletonModel && _dummyAvatar.getSkeletonModelURL().isValid() &&
(_dummyAvatar.getSkeletonModelURL() != myAvatar->getSkeletonModelURL())) {
// FIXME
//myAvatar->useFullAvatarURL()
}
if (_useDisplayName && _dummyAvatar.getDisplayName() != myAvatar->getDisplayName()) {
myAvatar->setDisplayName(_dummyAvatar.getDisplayName());
}
myAvatar->setPosition(_dummyAvatar.getPosition());
myAvatar->setOrientation(_dummyAvatar.getOrientation());
// FIXME attachments
// FIXME joints
// FIXME head lean
// FIXME head orientation
}
void RecordingScriptingInterface::processAudioInput(const QByteArray& audio) {
if (_recorder->isRecording()) {
auto audioFrameTime = Frame::frameTimeFromEpoch(_recordingEpoch);
_audioRecordingBuffer.push_back({ audioFrameTime, audio });
qDebug() << "Got sound packet of size " << audio.size() << " At time " << audioFrameTime;
}
}
void RecordingScriptingInterface::processAudioFrame(const recording::FrameConstPointer& frame) {
AudioInjectorOptions options;
auto myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar();
options.position = myAvatar->getPosition();
options.orientation = myAvatar->getOrientation();
// FIXME store the audio format (sample rate, bits, stereo) in the frame
options.stereo = false;
// FIXME move audio injector to a thread pool model?
AudioInjector::playSoundAndDelete(frame->data, options, nullptr);
}

View file

@ -0,0 +1,80 @@
//
// Created by Bradley Austin Davis on 2015/11/13
// Copyright 2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_RecordingScriptingInterface_h
#define hifi_RecordingScriptingInterface_h
#include <atomic>
#include <QObject>
#include <DependencyManager.h>
#include <recording/Forward.h>
#include <recording/Frame.h>
#include <AvatarData.h>
class RecordingScriptingInterface : public QObject, public Dependency {
Q_OBJECT
public:
RecordingScriptingInterface();
public slots:
bool isPlaying();
bool isPaused();
float playerElapsed();
float playerLength();
void loadRecording(const QString& filename);
void startPlaying();
void setPlayerVolume(float volume);
void setPlayerAudioOffset(float audioOffset);
void setPlayerTime(float time);
void setPlayFromCurrentLocation(bool playFromCurrentLocation);
void setPlayerLoop(bool loop);
void setPlayerUseDisplayName(bool useDisplayName);
void setPlayerUseAttachments(bool useAttachments);
void setPlayerUseHeadModel(bool useHeadModel);
void setPlayerUseSkeletonModel(bool useSkeletonModel);
void play();
void pausePlayer();
void stopPlaying();
bool isRecording();
float recorderElapsed();
void startRecording();
void stopRecording();
void saveRecording(const QString& filename);
void loadLastRecording();
signals:
void playbackStateChanged();
// Should this occur for any frame or just for seek calls?
void playbackPositionChanged();
void looped();
private:
using Mutex = std::recursive_mutex;
using Locker = std::unique_lock<Mutex>;
using Flag = std::atomic<bool>;
void processAvatarFrame(const recording::FrameConstPointer& frame);
void processAudioFrame(const recording::FrameConstPointer& frame);
void processAudioInput(const QByteArray& audioData);
QSharedPointer<recording::Deck> _player;
QSharedPointer<recording::Recorder> _recorder;
QList<QPair<recording::Frame::Time, QByteArray>> _audioRecordingBuffer;
quint64 _recordingEpoch { 0 };
Flag _playFromCurrentLocation { true };
Flag _useDisplayName { false };
Flag _useHeadModel { false };
Flag _useAttachments { false };
Flag _useSkeletonModel { false };
recording::ClipPointer _lastClip;
AvatarData _dummyAvatar;
};
#endif // hifi_RecordingScriptingInterface_h

View file

@ -0,0 +1,22 @@
//
// Created by Bradley Austin Davis on 2015/11/14
// Copyright 2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "RecorderDialog.h"
#include <QMessageBox>
#include "DependencyManager.h"
HIFI_QML_DEF(RecorderDialog)
RecorderDialog::RecorderDialog(QQuickItem* parent) : OffscreenQmlDialog(parent) {
}
void RecorderDialog::hide() {
((QQuickItem*)parent())->setEnabled(false);
}

View file

@ -0,0 +1,28 @@
//
// Created by Bradley Austin Davis on 2015/11/14
// Copyright 2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#pragma once
#ifndef hifi_RecorderDialog_h
#define hifi_RecorderDialog_h
#include <OffscreenQmlDialog.h>
class RecorderDialog : public OffscreenQmlDialog {
Q_OBJECT
HIFI_QML_DECL
public:
RecorderDialog(QQuickItem* parent = nullptr);
signals:
protected:
void hide();
};
#endif

View file

@ -33,8 +33,7 @@
#include <StreamUtils.h>
#include <UUID.h>
#include <shared/JSONHelpers.h>
#include <recording/Deck.h>
#include <recording/Clip.h>
#include <recording/Frame.h>
#include "AvatarLogging.h"
@ -45,6 +44,9 @@ using namespace std;
const glm::vec3 DEFAULT_LOCAL_AABOX_CORNER(-0.5f);
const glm::vec3 DEFAULT_LOCAL_AABOX_SCALE(1.0f);
const QString AvatarData::FRAME_NAME = "com.highfidelity.recording.AvatarData";
static std::once_flag frameTypeRegistration;
AvatarData::AvatarData() :
_sessionUUID(),
_position(0.0f),
@ -791,155 +793,10 @@ bool AvatarData::hasReferential() {
return _referential != NULL;
}
bool AvatarData::isPlaying() {
return _player && _player->isPlaying();
}
bool AvatarData::isPaused() {
return _player && _player->isPaused();
}
float AvatarData::playerElapsed() {
if (!_player) {
return 0;
}
if (QThread::currentThread() != thread()) {
float result;
QMetaObject::invokeMethod(this, "playerElapsed", Qt::BlockingQueuedConnection,
Q_RETURN_ARG(float, result));
return result;
}
return (float)_player->position() / (float) MSECS_PER_SECOND;
}
float AvatarData::playerLength() {
if (!_player) {
return 0;
}
if (QThread::currentThread() != thread()) {
float result;
QMetaObject::invokeMethod(this, "playerLength", Qt::BlockingQueuedConnection,
Q_RETURN_ARG(float, result));
return result;
}
return (float)_player->length() / (float) MSECS_PER_SECOND;
}
void AvatarData::loadRecording(const QString& filename) {
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "loadRecording", Qt::BlockingQueuedConnection,
Q_ARG(QString, filename));
return;
}
using namespace recording;
ClipPointer clip = Clip::fromFile(filename);
if (!clip) {
qWarning() << "Unable to load clip data from " << filename;
}
_player = std::make_shared<Deck>();
_player->queueClip(clip);
}
void AvatarData::startPlaying() {
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "startPlaying", Qt::BlockingQueuedConnection);
return;
}
if (!_player) {
qWarning() << "No clip loaded for playback";
return;
}
setRecordingBasis();
_player->play();
}
void AvatarData::setPlayerVolume(float volume) {
// FIXME
}
void AvatarData::setPlayerAudioOffset(float audioOffset) {
// FIXME
}
void AvatarData::setPlayerTime(float time) {
if (!_player) {
qWarning() << "No player active";
return;
}
_player->seek(time * MSECS_PER_SECOND);
}
void AvatarData::setPlayFromCurrentLocation(bool playFromCurrentLocation) {
// FIXME
}
void AvatarData::setPlayerLoop(bool loop) {
if (_player) {
_player->loop(loop);
}
}
void AvatarData::setPlayerUseDisplayName(bool useDisplayName) {
// FIXME
}
void AvatarData::setPlayerUseAttachments(bool useAttachments) {
// FIXME
}
void AvatarData::setPlayerUseHeadModel(bool useHeadModel) {
// FIXME
}
void AvatarData::setPlayerUseSkeletonModel(bool useSkeletonModel) {
// FIXME
}
void AvatarData::play() {
if (isPlaying()) {
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "play", Qt::BlockingQueuedConnection);
return;
}
_player->play();
}
}
std::shared_ptr<Transform> AvatarData::getRecordingBasis() const {
return _recordingBasis;
}
void AvatarData::pausePlayer() {
if (!_player) {
return;
}
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "pausePlayer", Qt::BlockingQueuedConnection);
return;
}
if (_player) {
_player->pause();
}
}
void AvatarData::stopPlaying() {
if (!_player) {
return;
}
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "stopPlaying", Qt::BlockingQueuedConnection);
return;
}
if (_player) {
_player->stop();
}
}
void AvatarData::changeReferential(Referential* ref) {
delete _referential;
_referential = ref;
@ -1568,26 +1425,26 @@ JointData jointDataFromJsonValue(const QJsonValue& json) {
// This allows the application to decide whether playback should be relative to an avatar's
// transform at the start of playback, or relative to the transform of the recorded
// avatar
QByteArray avatarStateToFrame(const AvatarData* _avatar) {
QByteArray AvatarData::toFrame(const AvatarData& avatar) {
QJsonObject root;
if (!_avatar->getFaceModelURL().isEmpty()) {
root[JSON_AVATAR_HEAD_MODEL] = _avatar->getFaceModelURL().toString();
if (!avatar.getFaceModelURL().isEmpty()) {
root[JSON_AVATAR_HEAD_MODEL] = avatar.getFaceModelURL().toString();
}
if (!_avatar->getSkeletonModelURL().isEmpty()) {
root[JSON_AVATAR_BODY_MODEL] = _avatar->getSkeletonModelURL().toString();
if (!avatar.getSkeletonModelURL().isEmpty()) {
root[JSON_AVATAR_BODY_MODEL] = avatar.getSkeletonModelURL().toString();
}
if (!_avatar->getDisplayName().isEmpty()) {
root[JSON_AVATAR_DISPLAY_NAME] = _avatar->getDisplayName();
if (!avatar.getDisplayName().isEmpty()) {
root[JSON_AVATAR_DISPLAY_NAME] = avatar.getDisplayName();
}
if (!_avatar->getAttachmentData().isEmpty()) {
if (!avatar.getAttachmentData().isEmpty()) {
// FIXME serialize attachment data
}
auto recordingBasis = _avatar->getRecordingBasis();
auto recordingBasis = avatar.getRecordingBasis();
if (recordingBasis) {
// Find the relative transform
auto relativeTransform = recordingBasis->relativeTransform(_avatar->getTransform());
auto relativeTransform = recordingBasis->relativeTransform(avatar.getTransform());
// if the resulting relative basis is identity, we shouldn't record anything
if (!relativeTransform.isIdentity()) {
@ -1595,17 +1452,17 @@ QByteArray avatarStateToFrame(const AvatarData* _avatar) {
root[JSON_AVATAR_BASIS] = Transform::toJson(*recordingBasis);
}
} else {
root[JSON_AVATAR_RELATIVE] = Transform::toJson(_avatar->getTransform());
root[JSON_AVATAR_RELATIVE] = Transform::toJson(avatar.getTransform());
}
// Skeleton pose
QJsonArray jointArray;
for (const auto& joint : _avatar->getRawJointData()) {
for (const auto& joint : avatar.getRawJointData()) {
jointArray.push_back(toJsonValue(joint));
}
root[JSON_AVATAR_JOINT_ARRAY] = jointArray;
const HeadData* head = _avatar->getHeadData();
const HeadData* head = avatar.getHeadData();
if (head) {
QJsonObject headJson;
QJsonArray blendshapeCoefficients;
@ -1616,8 +1473,8 @@ QByteArray avatarStateToFrame(const AvatarData* _avatar) {
headJson[JSON_AVATAR_HEAD_ROTATION] = toJsonValue(head->getRawOrientation());
headJson[JSON_AVATAR_HEAD_LEAN_FORWARD] = QJsonValue(head->getLeanForward());
headJson[JSON_AVATAR_HEAD_LEAN_SIDEWAYS] = QJsonValue(head->getLeanSideways());
vec3 relativeLookAt = glm::inverse(_avatar->getOrientation()) *
(head->getLookAtPosition() - _avatar->getPosition());
vec3 relativeLookAt = glm::inverse(avatar.getOrientation()) *
(head->getLookAtPosition() - avatar.getPosition());
headJson[JSON_AVATAR_HEAD_LOOKAT] = toJsonValue(relativeLookAt);
root[JSON_AVATAR_HEAD] = headJson;
}
@ -1625,26 +1482,29 @@ QByteArray avatarStateToFrame(const AvatarData* _avatar) {
return QJsonDocument(root).toBinaryData();
}
void avatarStateFromFrame(const QByteArray& frameData, AvatarData* _avatar) {
void AvatarData::fromFrame(const QByteArray& frameData, AvatarData& result) {
QJsonDocument doc = QJsonDocument::fromBinaryData(frameData);
QJsonObject root = doc.object();
if (root.contains(JSON_AVATAR_HEAD_MODEL)) {
auto faceModelURL = root[JSON_AVATAR_HEAD_MODEL].toString();
if (faceModelURL != _avatar->getFaceModelURL().toString()) {
_avatar->setFaceModelURL(faceModelURL);
if (faceModelURL != result.getFaceModelURL().toString()) {
QUrl faceModel(faceModelURL);
if (faceModel.isValid()) {
result.setFaceModelURL(faceModel);
}
}
}
if (root.contains(JSON_AVATAR_BODY_MODEL)) {
auto bodyModelURL = root[JSON_AVATAR_BODY_MODEL].toString();
if (bodyModelURL != _avatar->getSkeletonModelURL().toString()) {
_avatar->setSkeletonModelURL(bodyModelURL);
if (bodyModelURL != result.getSkeletonModelURL().toString()) {
result.setSkeletonModelURL(bodyModelURL);
}
}
if (root.contains(JSON_AVATAR_DISPLAY_NAME)) {
auto newDisplayName = root[JSON_AVATAR_DISPLAY_NAME].toString();
if (newDisplayName != _avatar->getDisplayName()) {
_avatar->setDisplayName(newDisplayName);
if (newDisplayName != result.getDisplayName()) {
result.setDisplayName(newDisplayName);
}
}
@ -1656,18 +1516,18 @@ void avatarStateFromFrame(const QByteArray& frameData, AvatarData* _avatar) {
// The first is more useful for playing back recordings on your own avatar, while
// the latter is more useful for playing back other avatars within your scene.
auto currentBasis = _avatar->getRecordingBasis();
auto currentBasis = result.getRecordingBasis();
if (!currentBasis) {
currentBasis = std::make_shared<Transform>(Transform::fromJson(root[JSON_AVATAR_BASIS]));
}
auto relativeTransform = Transform::fromJson(root[JSON_AVATAR_RELATIVE]);
auto worldTransform = currentBasis->worldTransform(relativeTransform);
_avatar->setPosition(worldTransform.getTranslation());
_avatar->setOrientation(worldTransform.getRotation());
result.setPosition(worldTransform.getTranslation());
result.setOrientation(worldTransform.getRotation());
// TODO: find a way to record/playback the Scale of the avatar
//_avatar->setTargetScale(worldTransform.getScale().x);
//result.setTargetScale(worldTransform.getScale().x);
}
@ -1689,13 +1549,13 @@ void avatarStateFromFrame(const QByteArray& frameData, AvatarData* _avatar) {
for (const auto& joint : jointArray) {
jointRotations.push_back(joint.rotation);
}
_avatar->setJointRotations(jointRotations);
result.setJointRotations(jointRotations);
}
#if 0
// Most head data is relative to the avatar, and needs no basis correction,
// but the lookat vector does need correction
HeadData* head = _avatar->_headData;
HeadData* head = result._headData;
if (head && root.contains(JSON_AVATAR_HEAD)) {
QJsonObject headJson = root[JSON_AVATAR_HEAD].toObject();
if (headJson.contains(JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS)) {
@ -1718,7 +1578,7 @@ void avatarStateFromFrame(const QByteArray& frameData, AvatarData* _avatar) {
if (headJson.contains(JSON_AVATAR_HEAD_LOOKAT)) {
auto relativeLookAt = vec3FromJsonValue(headJson[JSON_AVATAR_HEAD_LOOKAT]);
if (glm::length2(relativeLookAt) > 0.01) {
head->setLookAtPosition((_avatar->getOrientation() * relativeLookAt) + _avatar->getPosition());
head->setLookAtPosition((result.getOrientation() * relativeLookAt) + result.getPosition());
}
}
}

View file

@ -50,13 +50,12 @@ typedef unsigned long long quint64;
#include <Node.h>
#include <RegisteredMetaTypes.h>
#include <SimpleMovingAverage.h>
#include <recording/Forward.h>
#include "AABox.h"
#include "HandData.h"
#include "HeadData.h"
#include "PathUtils.h"
#include "Player.h"
#include "Recorder.h"
#include "Referential.h"
using AvatarSharedPointer = std::shared_ptr<AvatarData>;
@ -165,7 +164,13 @@ class AvatarData : public QObject {
Q_PROPERTY(QStringList jointNames READ getJointNames)
Q_PROPERTY(QUuid sessionUUID READ getSessionUUID)
public:
static const QString FRAME_NAME;
static void fromFrame(const QByteArray& frameData, AvatarData& avatar);
static QByteArray toFrame(const AvatarData& avatar);
AvatarData();
virtual ~AvatarData();
@ -348,25 +353,6 @@ public slots:
void setJointMappingsFromNetworkReply();
void setSessionUUID(const QUuid& sessionUUID) { _sessionUUID = sessionUUID; }
bool hasReferential();
bool isPlaying();
bool isPaused();
float playerElapsed();
float playerLength();
void loadRecording(const QString& filename);
void startPlaying();
void setPlayerVolume(float volume);
void setPlayerAudioOffset(float audioOffset);
void setPlayerTime(float time);
void setPlayFromCurrentLocation(bool playFromCurrentLocation);
void setPlayerLoop(bool loop);
void setPlayerUseDisplayName(bool useDisplayName);
void setPlayerUseAttachments(bool useAttachments);
void setPlayerUseHeadModel(bool useHeadModel);
void setPlayerUseSkeletonModel(bool useSkeletonModel);
void play();
void pausePlayer();
void stopPlaying();
protected:
QUuid _sessionUUID;
@ -421,8 +407,6 @@ protected:
QWeakPointer<Node> _owningAvatarMixer;
recording::DeckPointer _player;
/// Loads the joint indices, names from the FST file (if any)
virtual void updateJointMappings();
void changeReferential(Referential* ref);
@ -437,7 +421,7 @@ protected:
QMutex avatarLock; // Name is redundant, but it aids searches.
// During recording, this holds the starting position, orientation & scale of the recorded avatar
// During playback, it holds the
// During playback, it holds the origin from which to play the relative positions in the clip
TransformPointer _recordingBasis;
private:

View file

@ -26,7 +26,7 @@
#include "OffscreenGlCanvas.h"
// FIXME move to threaded rendering with Qt 5.5
// #define QML_THREADED
//#define QML_THREADED
// Time between receiving a request to render the offscreen UI actually triggering
// the render. Could possibly be increased depending on the framerate we expect to
@ -72,7 +72,7 @@ public:
OffscreenGlCanvas::create(shareContext);
#ifdef QML_THREADED
// Qt 5.5
// _renderControl->prepareThread(_renderThread);
_renderControl->prepareThread(_renderThread);
_context->moveToThread(&_thread);
moveToThread(&_thread);
_thread.setObjectName("QML Thread");

View file

@ -23,7 +23,7 @@ Clip::Pointer Clip::fromFile(const QString& filePath) {
return result;
}
void Clip::toFile(const QString& filePath, Clip::Pointer clip) {
void Clip::toFile(const QString& filePath, const Clip::ConstPointer& clip) {
FileClip::write(filePath, clip->duplicate());
}
@ -31,19 +31,10 @@ Clip::Pointer Clip::newClip() {
return std::make_shared<BufferClip>();
}
Clip::Pointer Clip::duplicate() {
Clip::Pointer result = std::make_shared<BufferClip>();
Locker lock(_mutex);
Time currentPosition = position();
seek(0);
auto frame = nextFrame();
while (frame) {
result->addFrame(frame);
frame = nextFrame();
}
seek(currentPosition);
return result;
void Clip::seek(float offset) {
seekFrameTime(Frame::secondsToFrameTime(offset));
}
float Clip::position() const {
return Frame::frameTimeToSeconds(positionFrameTime());
};

View file

@ -16,6 +16,8 @@
#include <QtCore/QObject>
#include "Frame.h"
class QIODevice;
namespace recording {
@ -23,16 +25,22 @@ namespace recording {
class Clip {
public:
using Pointer = std::shared_ptr<Clip>;
using ConstPointer = std::shared_ptr<const Clip>;
virtual ~Clip() {}
Pointer duplicate();
virtual Pointer duplicate() const = 0;
virtual Time duration() const = 0;
virtual QString getName() const = 0;
virtual float duration() const = 0;
virtual size_t frameCount() const = 0;
virtual void seek(Time offset) = 0;
virtual Time position() const = 0;
virtual void seek(float offset) final;
virtual float position() const final;
virtual void seekFrameTime(Frame::Time offset) = 0;
virtual Frame::Time positionFrameTime() const = 0;
virtual FrameConstPointer peekFrame() const = 0;
virtual FrameConstPointer nextFrame() = 0;
@ -40,7 +48,7 @@ public:
virtual void addFrame(FrameConstPointer) = 0;
static Pointer fromFile(const QString& filePath);
static void toFile(const QString& filePath, Pointer clip);
static void toFile(const QString& filePath, const ConstPointer& clip);
static Pointer newClip();
protected:

View file

@ -14,31 +14,46 @@
#include "Clip.h"
#include "Frame.h"
#include "Logging.h"
#include "impl/OffsetClip.h"
using namespace recording;
void Deck::queueClip(ClipPointer clip, Time timeOffset) {
Deck::Deck(QObject* parent)
: QObject(parent) {}
void Deck::queueClip(ClipPointer clip, float timeOffset) {
Locker lock(_mutex);
if (!clip) {
qCWarning(recordingLog) << "Clip invalid, ignoring";
return;
}
// FIXME if the time offset is not zero, wrap the clip in a OffsetClip wrapper
// FIXME disabling multiple clips for now
_clips.clear();
// if the time offset is not zero, wrap in an OffsetClip
if (timeOffset != 0.0f) {
clip = std::make_shared<OffsetClip>(clip, timeOffset);
}
_clips.push_back(clip);
_length = std::max(_length, clip->duration());
}
void Deck::play() {
Locker lock(_mutex);
if (_pause) {
_pause = false;
_startEpoch = usecTimestampNow() - (_position * USECS_PER_MSEC);
_startEpoch = Frame::epochForFrameTime(_position);
emit playbackStateChanged();
processFrames();
}
}
void Deck::pause() {
Locker lock(_mutex);
if (!_pause) {
_pause = true;
emit playbackStateChanged();
@ -47,9 +62,9 @@ void Deck::pause() {
Clip::Pointer Deck::getNextClip() {
Clip::Pointer result;
Time soonestFramePosition = INVALID_TIME;
auto soonestFramePosition = Frame::INVALID_TIME;
for (const auto& clip : _clips) {
Time nextFramePosition = clip->position();
auto nextFramePosition = clip->positionFrameTime();
if (nextFramePosition < soonestFramePosition) {
result = clip;
soonestFramePosition = nextFramePosition;
@ -58,11 +73,16 @@ Clip::Pointer Deck::getNextClip() {
return result;
}
void Deck::seek(Time position) {
_position = position;
// FIXME reset the frames to the appropriate spot
void Deck::seek(float position) {
Locker lock(_mutex);
_position = Frame::secondsToFrameTime(position);
// Recompute the start epoch
_startEpoch = Frame::epochForFrameTime(_position);
// reset the clips to the appropriate spot
for (auto& clip : _clips) {
clip->seek(position);
clip->seekFrameTime(_position);
}
if (!_pause) {
@ -71,35 +91,46 @@ void Deck::seek(Time position) {
}
}
Time Deck::position() const {
if (_pause) {
return _position;
float Deck::position() const {
Locker lock(_mutex);
auto currentPosition = _position;
if (!_pause) {
currentPosition = Frame::frameTimeFromEpoch(_startEpoch);
}
return (usecTimestampNow() - _startEpoch) / USECS_PER_MSEC;
return Frame::frameTimeToSeconds(currentPosition);
}
static const Time MIN_FRAME_WAIT_INTERVAL_MS = 1;
static const Frame::Time MIN_FRAME_WAIT_INTERVAL = Frame::secondsToFrameTime(0.001f);
static const Frame::Time MAX_FRAME_PROCESSING_TIME = Frame::secondsToFrameTime(0.002f);
void Deck::processFrames() {
Locker lock(_mutex);
if (_pause) {
return;
}
_position = position();
auto triggerPosition = _position + MIN_FRAME_WAIT_INTERVAL_MS;
auto startingPosition = Frame::frameTimeFromEpoch(_startEpoch);
auto triggerPosition = startingPosition + MIN_FRAME_WAIT_INTERVAL;
Clip::Pointer nextClip;
// FIXME add code to start dropping frames if we fall behind.
// Alternatively, add code to cache frames here and then process only the last frame of a given type
// ... the latter will work for Avatar, but not well for audio I suspect.
for (nextClip = getNextClip(); nextClip; nextClip = getNextClip()) {
// If the clip is too far in the future, just break out of the handling loop
Time framePosition = nextClip->position();
if (framePosition > triggerPosition) {
auto currentPosition = Frame::frameTimeFromEpoch(_startEpoch);
if ((currentPosition - startingPosition) >= MAX_FRAME_PROCESSING_TIME) {
qCWarning(recordingLog) << "Exceeded maximum frame processing time, breaking early";
break;
}
// If the clip is too far in the future, just break out of the handling loop
Frame::Time framePosition = nextClip->positionFrameTime();
if (framePosition > triggerPosition) {
break;
}
// Handle the frame and advance the clip
Frame::handleFrame(nextClip->nextFrame());
}
if (!nextClip) {
qCDebug(recordingLog) << "No more frames available";
// No more frames available, so handle the end of playback
@ -107,6 +138,9 @@ void Deck::processFrames() {
qCDebug(recordingLog) << "Looping enabled, seeking back to beginning";
// If we have looping enabled, start the playback over
seek(0);
// FIXME configure the recording scripting interface to reset the avatar basis on a loop
// if doing relative movement
emit looped();
} else {
// otherwise pause playback
pause();
@ -115,9 +149,67 @@ void Deck::processFrames() {
}
// If we have more clip frames available, set the timer for the next one
Time nextClipPosition = nextClip->position();
Time interval = nextClipPosition - _position;
_position = Frame::frameTimeFromEpoch(_startEpoch);
auto nextFrameTime = nextClip->positionFrameTime();
auto interval = Frame::frameTimeToMilliseconds(nextFrameTime - _position);
_timer.singleShot(interval, [this] {
processFrames();
});
}
void Deck::removeClip(const ClipConstPointer& clip) {
Locker lock(_mutex);
std::remove_if(_clips.begin(), _clips.end(), [&](const Clip::ConstPointer& testClip)->bool {
return (clip == testClip);
});
}
void Deck::removeClip(const QString& clipName) {
Locker lock(_mutex);
std::remove_if(_clips.begin(), _clips.end(), [&](const Clip::ConstPointer& clip)->bool {
return (clip->getName() == clipName);
});
}
void Deck::removeAllClips() {
Locker lock(_mutex);
_clips.clear();
}
Deck::ClipList Deck::getClips(const QString& clipName) const {
Locker lock(_mutex);
ClipList result = _clips;
return result;
}
bool Deck::isPlaying() {
Locker lock(_mutex);
return !_pause;
}
bool Deck::isPaused() const {
Locker lock(_mutex);
return _pause;
}
void Deck::stop() {
Locker lock(_mutex);
pause();
seek(0.0f);
}
float Deck::length() const {
Locker lock(_mutex);
return _length;
}
void Deck::loop(bool enable) {
Locker lock(_mutex);
_loop = enable;
}
bool Deck::isLooping() const {
Locker lock(_mutex);
return _loop;
}

View file

@ -12,56 +12,70 @@
#include <utility>
#include <list>
#include <mutex>
#include <QtCore/QObject>
#include <QtCore/QTimer>
#include <QtCore/QList>
#include <DependencyManager.h>
#include "Forward.h"
#include "Frame.h"
namespace recording {
class Deck : public QObject {
class Deck : public QObject, public ::Dependency {
Q_OBJECT
public:
using ClipList = std::list<ClipPointer>;
using Pointer = std::shared_ptr<Deck>;
Deck(QObject* parent = nullptr) : QObject(parent) {}
Deck(QObject* parent = nullptr);
// Place a clip on the deck for recording or playback
void queueClip(ClipPointer clip, Time timeOffset = 0.0f);
void queueClip(ClipPointer clip, float timeOffset = 0.0f);
void removeClip(const ClipConstPointer& clip);
void removeClip(const QString& clipName);
void removeAllClips();
ClipList getClips(const QString& clipName) const;
void play();
bool isPlaying() { return !_pause; }
bool isPlaying();
void pause();
bool isPaused() const { return _pause; }
bool isPaused() const;
void stop() { pause(); seek(0.0f); }
void stop();
Time length() const { return _length; }
float length() const;
void loop(bool enable = true) { _loop = enable; }
bool isLooping() const { return _loop; }
void loop(bool enable = true);
bool isLooping() const;
Time position() const;
void seek(Time position);
float position() const;
void seek(float position);
signals:
void playbackStateChanged();
void looped();
private:
using Clips = std::list<ClipPointer>;
using Mutex = std::recursive_mutex;
using Locker = std::unique_lock<Mutex>;
ClipPointer getNextClip();
void processFrames();
mutable Mutex _mutex;
QTimer _timer;
Clips _clips;
ClipList _clips;
quint64 _startEpoch { 0 };
Time _position { 0 };
Frame::Time _position { 0 };
bool _pause { true };
bool _loop { false };
Time _length { 0 };
float _length { 0 };
};
}

View file

@ -16,10 +16,6 @@
namespace recording {
using Time = uint32_t;
static const Time INVALID_TIME = std::numeric_limits<uint32_t>::max();
using FrameType = uint16_t;
using FrameSize = uint16_t;
@ -36,16 +32,14 @@ class Clip;
using ClipPointer = std::shared_ptr<Clip>;
using ClipConstPointer = std::shared_ptr<const Clip>;
// An interface for playing back clips
class Deck;
using DeckPointer = std::shared_ptr<Deck>;
// An interface for recording a single clip
class Recorder;
using RecorderPointer = std::shared_ptr<Recorder>;
}
#endif

View file

@ -12,6 +12,9 @@
#include <QtCore/QMap>
#include <NumericalConstants.h>
#include <SharedUtil.h>
using namespace recording;
// FIXME move to shared
@ -73,7 +76,31 @@ using Locker = std::unique_lock<Mutex>;
static Mutex mutex;
static std::once_flag once;
float FrameHeader::frameTimeToSeconds(Frame::Time frameTime) {
float result = frameTime;
result /= MSECS_PER_SECOND;
return result;
}
uint32_t FrameHeader::frameTimeToMilliseconds(Frame::Time frameTime) {
return frameTime;
}
Frame::Time FrameHeader::frameTimeFromEpoch(quint64 epoch) {
auto intervalMicros = (usecTimestampNow() - epoch);
intervalMicros /= USECS_PER_MSEC;
return (Frame::Time)(intervalMicros);
}
quint64 FrameHeader::epochForFrameTime(Time frameTime) {
auto epoch = usecTimestampNow();
epoch -= (frameTime * USECS_PER_MSEC);
return epoch;
}
Frame::Time FrameHeader::secondsToFrameTime(float seconds) {
return (Time)(seconds * MSECS_PER_SECOND);
}
FrameType Frame::registerFrameType(const QString& frameTypeName) {
Locker lock(mutex);

View file

@ -13,26 +13,46 @@
#include "Forward.h"
#include <functional>
#include <stdint.h>
#include <QtCore/QObject>
namespace recording {
struct Frame {
struct FrameHeader {
using Time = uint32_t;
static const Time INVALID_TIME = UINT32_MAX;
static const FrameType TYPE_INVALID = 0xFFFF;
static const FrameType TYPE_HEADER = 0x0;
static Time secondsToFrameTime(float seconds);
static float frameTimeToSeconds(Time frameTime);
static uint32_t frameTimeToMilliseconds(Time frameTime);
static Time frameTimeFromEpoch(quint64 epoch);
static quint64 epochForFrameTime(Time frameTime);
FrameType type { TYPE_INVALID };
Time timeOffset { 0 }; // milliseconds
FrameHeader() {}
FrameHeader(FrameType type, Time timeOffset)
: type(type), timeOffset(timeOffset) { }
};
struct Frame : public FrameHeader {
public:
using Pointer = std::shared_ptr<Frame>;
using ConstPointer = std::shared_ptr<const Frame>;
using Handler = std::function<void(Frame::ConstPointer frame)>;
static const FrameType TYPE_INVALID = 0xFFFF;
static const FrameType TYPE_HEADER = 0x0;
FrameType type { TYPE_INVALID };
Time timeOffset { 0 }; // milliseconds
QByteArray data;
Frame() {}
Frame(FrameType type, float timeOffset, const QByteArray& data)
: type(type), timeOffset(timeOffset), data(data) {}
: FrameHeader(type, timeOffset), data(data) { }
static FrameType registerFrameType(const QString& frameTypeName);
static QMap<QString, FrameType> getFrameTypes();

View file

@ -16,20 +16,23 @@
using namespace recording;
Recorder::~Recorder() {
Recorder::Recorder(QObject* parent)
: QObject(parent) {}
}
Time Recorder::position() {
float Recorder::position() {
Locker lock(_mutex);
if (_clip) {
return _clip->duration();
}
return 0.0f;
}
void Recorder::start() {
Locker lock(_mutex);
if (!_recording) {
_recording = true;
if (!_clip) {
_clip = std::make_shared<BufferClip>();
}
// FIXME for now just record a new clip every time
_clip = std::make_shared<BufferClip>();
_startEpoch = usecTimestampNow();
_timer.start();
emit recordingStateChanged();
@ -37,6 +40,7 @@ void Recorder::start() {
}
void Recorder::stop() {
Locker lock(_mutex);
if (_recording) {
_recording = false;
_elapsed = _timer.elapsed();
@ -45,14 +49,17 @@ void Recorder::stop() {
}
bool Recorder::isRecording() {
Locker lock(_mutex);
return _recording;
}
void Recorder::clear() {
Locker lock(_mutex);
_clip.reset();
}
void Recorder::recordFrame(FrameType type, QByteArray frameData) {
Locker lock(_mutex);
if (!_recording || !_clip) {
return;
}
@ -65,6 +72,7 @@ void Recorder::recordFrame(FrameType type, QByteArray frameData) {
}
ClipPointer Recorder::getClip() {
Locker lock(_mutex);
return _clip;
}

View file

@ -10,24 +10,25 @@
#ifndef hifi_Recording_Recorder_h
#define hifi_Recording_Recorder_h
#include "Forward.h"
#include <mutex>
#include <QtCore/QObject>
#include <QtCore/QElapsedTimer>
#include <DependencyManager.h>
#include "Forward.h"
namespace recording {
// An interface for interacting with clips, creating them by recording or
// playing them back. Also serialization to and from files / network sources
class Recorder : public QObject {
class Recorder : public QObject, public Dependency {
Q_OBJECT
public:
using Pointer = std::shared_ptr<Recorder>;
Recorder(QObject* parent = nullptr);
Recorder(QObject* parent = nullptr) : QObject(parent) {}
virtual ~Recorder();
Time position();
float position();
// Start recording frames
void start();
@ -49,6 +50,10 @@ signals:
void recordingStateChanged();
private:
using Mutex = std::recursive_mutex;
using Locker = std::unique_lock<Mutex>;
Mutex _mutex;
QElapsedTimer _timer;
ClipPointer _clip;
quint64 _elapsed { 0 };

View file

@ -0,0 +1,100 @@
//
// Created by Bradley Austin Davis 2015/11/05
// Copyright 2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#pragma once
#ifndef hifi_Recording_Impl_ArrayClip_h
#define hifi_Recording_Impl_ArrayClip_h
#include "../Clip.h"
#include <mutex>
namespace recording {
template <typename T>
class ArrayClip : public Clip {
public:
virtual float duration() const override {
Locker lock(_mutex);
if (_frames.empty()) {
return 0;
}
return Frame::frameTimeToSeconds((*_frames.rbegin()).timeOffset);
}
virtual size_t frameCount() const override {
Locker lock(_mutex);
return _frames.size();
}
Clip::Pointer duplicate() const {
auto result = newClip();
Locker lock(_mutex);
for (size_t i = 0; i < _frames.size(); ++i) {
result->addFrame(readFrame(i));
}
return result;
}
virtual void seekFrameTime(Frame::Time offset) {
Locker lock(_mutex);
auto itr = std::lower_bound(_frames.begin(), _frames.end(), offset,
[](const T& a, Frame::Time b)->bool {
return a.timeOffset < b;
}
);
_frameIndex = itr - _frames.begin();
}
virtual Frame::Time positionFrameTime() const override {
Locker lock(_mutex);
Frame::Time result = Frame::INVALID_TIME;
if (_frameIndex < _frames.size()) {
result = _frames[_frameIndex].timeOffset;
}
return result;
}
virtual FrameConstPointer peekFrame() const override {
Locker lock(_mutex);
FrameConstPointer result;
if (_frameIndex < _frames.size()) {
result = readFrame(_frameIndex);
}
return result;
}
virtual FrameConstPointer nextFrame() override {
Locker lock(_mutex);
FrameConstPointer result;
if (_frameIndex < _frames.size()) {
result = readFrame(_frameIndex++);
}
return result;
}
virtual void skipFrame() override {
Locker lock(_mutex);
if (_frameIndex < _frames.size()) {
++_frameIndex;
}
}
protected:
virtual void reset() override {
_frameIndex = 0;
}
virtual FrameConstPointer readFrame(size_t index) const = 0;
std::vector<T> _frames;
mutable size_t _frameIndex { 0 };
};
}
#endif

View file

@ -8,85 +8,40 @@
#include "BufferClip.h"
#include <NumericalConstants.h>
#include <QtCore/QDebug>
#include <NumericalConstants.h>
#include "../Frame.h"
using namespace recording;
void BufferClip::seek(Time offset) {
Locker lock(_mutex);
auto itr = std::lower_bound(_frames.begin(), _frames.end(), offset,
[](Frame::ConstPointer a, Time b)->bool {
return a->timeOffset < b;
}
);
_frameIndex = itr - _frames.begin();
QString BufferClip::getName() const {
return _name;
}
Time BufferClip::position() const {
Locker lock(_mutex);
Time result = INVALID_TIME;
if (_frameIndex < _frames.size()) {
result = _frames[_frameIndex]->timeOffset;
}
return result;
}
FrameConstPointer BufferClip::peekFrame() const {
Locker lock(_mutex);
FrameConstPointer result;
if (_frameIndex < _frames.size()) {
result = _frames[_frameIndex];
}
return result;
}
FrameConstPointer BufferClip::nextFrame() {
Locker lock(_mutex);
FrameConstPointer result;
if (_frameIndex < _frames.size()) {
result = _frames[_frameIndex];
++_frameIndex;
}
return result;
}
void BufferClip::addFrame(FrameConstPointer newFrame) {
if (newFrame->timeOffset < 0.0f) {
throw std::runtime_error("Frames may not have negative time offsets");
}
auto currentPosition = position();
seek(newFrame->timeOffset);
{
Locker lock(_mutex);
_frames.insert(_frames.begin() + _frameIndex, newFrame);
}
seek(currentPosition);
}
void BufferClip::skipFrame() {
Locker lock(_mutex);
if (_frameIndex < _frames.size()) {
++_frameIndex;
auto itr = std::lower_bound(_frames.begin(), _frames.end(), newFrame->timeOffset,
[](const Frame& a, Frame::Time b)->bool {
return a.timeOffset < b;
}
);
auto newFrameIndex = itr - _frames.begin();
//qDebug() << "Adding frame with time offset " << newFrame->timeOffset << " @ index " << newFrameIndex;
_frames.insert(_frames.begin() + newFrameIndex, Frame(*newFrame));
}
// Internal only function, needs no locking
FrameConstPointer BufferClip::readFrame(size_t frameIndex) const {
FramePointer result;
if (frameIndex < _frames.size()) {
result = std::make_shared<Frame>(_frames[frameIndex]);
}
return result;
}
void BufferClip::reset() {
Locker lock(_mutex);
_frameIndex = 0;
}
Time BufferClip::duration() const {
if (_frames.empty()) {
return 0;
}
return (*_frames.rbegin())->timeOffset;
}
size_t BufferClip::frameCount() const {
return _frames.size();
}

View file

@ -10,33 +10,22 @@
#ifndef hifi_Recording_Impl_BufferClip_h
#define hifi_Recording_Impl_BufferClip_h
#include "../Clip.h"
#include "ArrayClip.h"
#include <mutex>
#include <QtCore/QUuid>
namespace recording {
class BufferClip : public Clip {
class BufferClip : public ArrayClip<Frame> {
public:
using Pointer = std::shared_ptr<BufferClip>;
virtual ~BufferClip() {}
virtual Time duration() const override;
virtual size_t frameCount() const override;
virtual void seek(Time offset) override;
virtual Time position() const override;
virtual FrameConstPointer peekFrame() const override;
virtual FrameConstPointer nextFrame() override;
virtual void skipFrame() override;
virtual QString getName() const override;
virtual void addFrame(FrameConstPointer) override;
private:
virtual void reset() override;
std::vector<FrameConstPointer> _frames;
virtual FrameConstPointer readFrame(size_t index) const override;
QString _name { QUuid().toString() };
mutable size_t _frameIndex { 0 };
};

View file

@ -18,15 +18,15 @@
#include "../Frame.h"
#include "../Logging.h"
#include "BufferClip.h"
using namespace recording;
static const qint64 MINIMUM_FRAME_SIZE = sizeof(FrameType) + sizeof(Time) + sizeof(FrameSize);
static const qint64 MINIMUM_FRAME_SIZE = sizeof(FrameType) + sizeof(Frame::Time) + sizeof(FrameSize);
static const QString FRAME_TYPE_MAP = QStringLiteral("frameTypes");
static const QString FRAME_COMREPSSION_FLAG = QStringLiteral("compressed");
using FrameHeaderList = std::list<FileClip::FrameHeader>;
using FrameTranslationMap = QMap<FrameType, FrameType>;
FrameTranslationMap parseTranslationMap(const QJsonDocument& doc) {
@ -49,19 +49,18 @@ FrameTranslationMap parseTranslationMap(const QJsonDocument& doc) {
}
FrameHeaderList parseFrameHeaders(uchar* const start, const qint64& size) {
using FrameHeader = FileClip::FrameHeader;
FrameHeaderList results;
FileFrameHeaderList parseFrameHeaders(uchar* const start, const qint64& size) {
FileFrameHeaderList results;
auto current = start;
auto end = current + size;
// Read all the frame headers
// FIXME move to Frame::readHeader?
while (end - current >= MINIMUM_FRAME_SIZE) {
FrameHeader header;
FileFrameHeader header;
memcpy(&(header.type), current, sizeof(FrameType));
current += sizeof(FrameType);
memcpy(&(header.timeOffset), current, sizeof(Time));
current += sizeof(Time);
memcpy(&(header.timeOffset), current, sizeof(Frame::Time));
current += sizeof(Frame::Time);
memcpy(&(header.size), current, sizeof(FrameSize));
current += sizeof(FrameSize);
header.fileOffset = current - start;
@ -72,6 +71,11 @@ FrameHeaderList parseFrameHeaders(uchar* const start, const qint64& size) {
current += header.size;
results.push_back(header);
}
qDebug() << "Parsed source data into " << results.size() << " frames";
int i = 0;
for (const auto& frameHeader : results) {
qDebug() << "Frame " << i++ << " time " << frameHeader.timeOffset;
}
return results;
}
@ -89,7 +93,7 @@ FileClip::FileClip(const QString& fileName) : _file(fileName) {
return;
}
FrameHeaderList parsedFrameHeaders = parseFrameHeaders(_map, size);
auto parsedFrameHeaders = parseFrameHeaders(_map, size);
// Verify that at least one frame exists and that the first frame is a header
if (0 == parsedFrameHeaders.size()) {
@ -110,6 +114,11 @@ FileClip::FileClip(const QString& fileName) : _file(fileName) {
_fileHeader = QJsonDocument::fromBinaryData(fileHeaderData);
}
// Check for compression
{
_compressed = _fileHeader.object()[FRAME_COMREPSSION_FLAG].toBool();
}
// Find the type enum translation map and fix up the frame headers
{
FrameTranslationMap translationMap = parseTranslationMap(_fileHeader);
@ -120,19 +129,25 @@ FileClip::FileClip(const QString& fileName) : _file(fileName) {
qDebug() << translationMap;
// Update the loaded headers with the frame data
_frameHeaders.reserve(parsedFrameHeaders.size());
_frames.reserve(parsedFrameHeaders.size());
for (auto& frameHeader : parsedFrameHeaders) {
if (!translationMap.contains(frameHeader.type)) {
continue;
}
frameHeader.type = translationMap[frameHeader.type];
_frameHeaders.push_back(frameHeader);
_frames.push_back(frameHeader);
}
}
}
QString FileClip::getName() const {
return _file.fileName();
}
// FIXME move to frame?
bool writeFrame(QIODevice& output, const Frame& frame) {
bool writeFrame(QIODevice& output, const Frame& frame, bool compressed = true) {
if (frame.type == Frame::TYPE_INVALID) {
qWarning() << "Attempting to write invalid frame";
return true;
@ -142,17 +157,24 @@ bool writeFrame(QIODevice& output, const Frame& frame) {
if (written != sizeof(FrameType)) {
return false;
}
written = output.write((char*)&(frame.timeOffset), sizeof(Time));
if (written != sizeof(Time)) {
//qDebug() << "Writing frame with time offset " << frame.timeOffset;
written = output.write((char*)&(frame.timeOffset), sizeof(Frame::Time));
if (written != sizeof(Frame::Time)) {
return false;
}
uint16_t dataSize = frame.data.size();
QByteArray frameData = frame.data;
if (compressed) {
frameData = qCompress(frameData);
}
uint16_t dataSize = frameData.size();
written = output.write((char*)&dataSize, sizeof(FrameSize));
if (written != sizeof(uint16_t)) {
return false;
}
if (dataSize != 0) {
written = output.write(frame.data);
written = output.write(frameData);
if (written != dataSize) {
return false;
}
@ -161,7 +183,8 @@ bool writeFrame(QIODevice& output, const Frame& frame) {
}
bool FileClip::write(const QString& fileName, Clip::Pointer clip) {
qCDebug(recordingLog) << "Writing clip to file " << fileName;
// FIXME need to move this to a different thread
//qCDebug(recordingLog) << "Writing clip to file " << fileName << " with " << clip->frameCount() << " frames";
if (0 == clip->frameCount()) {
return false;
@ -182,10 +205,14 @@ bool FileClip::write(const QString& fileName, Clip::Pointer clip) {
QJsonObject rootObject;
rootObject.insert(FRAME_TYPE_MAP, frameTypeObj);
// Always mark new files as compressed
rootObject.insert(FRAME_COMREPSSION_FLAG, true);
QByteArray headerFrameData = QJsonDocument(rootObject).toBinaryData();
if (!writeFrame(outputFile, Frame({ Frame::TYPE_HEADER, 0, headerFrameData }))) {
// Never compress the header frame
if (!writeFrame(outputFile, Frame({ Frame::TYPE_HEADER, 0, headerFrameData }), false)) {
return false;
}
}
clip->seek(0);
@ -207,73 +234,24 @@ FileClip::~FileClip() {
}
}
void FileClip::seek(Time offset) {
Locker lock(_mutex);
auto itr = std::lower_bound(_frameHeaders.begin(), _frameHeaders.end(), offset,
[](const FrameHeader& a, Time b)->bool {
return a.timeOffset < b;
}
);
_frameIndex = itr - _frameHeaders.begin();
}
Time FileClip::position() const {
Locker lock(_mutex);
Time result = INVALID_TIME;
if (_frameIndex < _frameHeaders.size()) {
result = _frameHeaders[_frameIndex].timeOffset;
}
return result;
}
FramePointer FileClip::readFrame(uint32_t frameIndex) const {
// Internal only function, needs no locking
FrameConstPointer FileClip::readFrame(size_t frameIndex) const {
FramePointer result;
if (frameIndex < _frameHeaders.size()) {
if (frameIndex < _frames.size()) {
result = std::make_shared<Frame>();
const FrameHeader& header = _frameHeaders[frameIndex];
const auto& header = _frames[frameIndex];
result->type = header.type;
result->timeOffset = header.timeOffset;
if (header.size) {
result->data.insert(0, reinterpret_cast<char*>(_map)+header.fileOffset, header.size);
if (_compressed) {
result->data = qUncompress(result->data);
}
}
}
return result;
}
FrameConstPointer FileClip::peekFrame() const {
Locker lock(_mutex);
return readFrame(_frameIndex);
}
FrameConstPointer FileClip::nextFrame() {
Locker lock(_mutex);
auto result = readFrame(_frameIndex);
if (_frameIndex < _frameHeaders.size()) {
++_frameIndex;
}
return result;
}
void FileClip::skipFrame() {
++_frameIndex;
}
void FileClip::reset() {
_frameIndex = 0;
}
void FileClip::addFrame(FrameConstPointer) {
throw std::runtime_error("File clips are read only");
}
Time FileClip::duration() const {
if (_frameHeaders.empty()) {
return 0;
}
return _frameHeaders.rbegin()->timeOffset;
}
size_t FileClip::frameCount() const {
return _frameHeaders.size();
}

View file

@ -10,31 +10,34 @@
#ifndef hifi_Recording_Impl_FileClip_h
#define hifi_Recording_Impl_FileClip_h
#include "../Clip.h"
#include "ArrayClip.h"
#include <mutex>
#include <QtCore/QFile>
#include <QtCore/QJsonDocument>
#include <mutex>
#include "../Frame.h"
namespace recording {
class FileClip : public Clip {
struct FileFrameHeader : public FrameHeader {
FrameType type;
Frame::Time timeOffset;
uint16_t size;
quint64 fileOffset;
};
using FileFrameHeaderList = std::list<FileFrameHeader>;
class FileClip : public ArrayClip<FileFrameHeader> {
public:
using Pointer = std::shared_ptr<FileClip>;
FileClip(const QString& file);
virtual ~FileClip();
virtual Time duration() const override;
virtual size_t frameCount() const override;
virtual void seek(Time offset) override;
virtual Time position() const override;
virtual FrameConstPointer peekFrame() const override;
virtual FrameConstPointer nextFrame() override;
virtual void skipFrame() override;
virtual QString getName() const override;
virtual void addFrame(FrameConstPointer) override;
const QJsonDocument& getHeader() {
@ -43,27 +46,12 @@ public:
static bool write(const QString& filePath, Clip::Pointer clip);
struct FrameHeader {
FrameType type;
Time timeOffset;
uint16_t size;
quint64 fileOffset;
};
private:
virtual void reset() override;
using FrameHeaderVector = std::vector<FrameHeader>;
FramePointer readFrame(uint32_t frameIndex) const;
virtual FrameConstPointer readFrame(size_t index) const override;
QJsonDocument _fileHeader;
QFile _file;
uint32_t _frameIndex { 0 };
uchar* _map { nullptr };
FrameHeaderVector _frameHeaders;
bool _compressed { true };
};
}

View file

@ -22,15 +22,15 @@
using namespace recording;
OffsetClip::OffsetClip(const Clip::Pointer& wrappedClip, Time offset)
: WrapperClip(wrappedClip), _offset(offset) { }
OffsetClip::OffsetClip(const Clip::Pointer& wrappedClip, float offset)
: WrapperClip(wrappedClip), _offset(Frame::secondsToFrameTime(offset)) { }
void OffsetClip::seek(Time offset) {
_wrappedClip->seek(offset - _offset);
void OffsetClip::seekFrameTime(Frame::Time offset) {
_wrappedClip->seekFrameTime(offset - _offset);
}
Time OffsetClip::position() const {
return _wrappedClip->position() + _offset;
Frame::Time OffsetClip::positionFrameTime() const {
return _wrappedClip->positionFrameTime() + _offset;
}
FrameConstPointer OffsetClip::peekFrame() const {
@ -45,7 +45,18 @@ FrameConstPointer OffsetClip::nextFrame() {
return result;
}
Time OffsetClip::duration() const {
float OffsetClip::duration() const {
return _wrappedClip->duration() + _offset;
}
QString OffsetClip::getName() const {
return _wrappedClip->getName();
}
Clip::Pointer OffsetClip::duplicate() const {
return std::make_shared<OffsetClip>(
_wrappedClip->duplicate(), Frame::frameTimeToSeconds(_offset));
}

View file

@ -18,18 +18,20 @@ class OffsetClip : public WrapperClip {
public:
using Pointer = std::shared_ptr<OffsetClip>;
OffsetClip(const Clip::Pointer& wrappedClip, Time offset);
virtual ~OffsetClip();
OffsetClip(const Clip::Pointer& wrappedClip, float offset);
virtual Time duration() const override;
virtual void seek(Time offset) override;
virtual Time position() const override;
virtual QString getName() const override;
virtual Clip::Pointer duplicate() const override;
virtual float duration() const override;
virtual void seekFrameTime(Frame::Time offset) override;
virtual Frame::Time positionFrameTime() const override;
virtual FrameConstPointer peekFrame() const override;
virtual FrameConstPointer nextFrame() override;
protected:
const Time _offset;
const Frame::Time _offset;
};
}

View file

@ -22,11 +22,11 @@ using namespace recording;
WrapperClip::WrapperClip(const Clip::Pointer& wrappedClip)
: _wrappedClip(wrappedClip) { }
void WrapperClip::seek(Time offset) {
_wrappedClip->seek(offset);
void WrapperClip::seekFrameTime(Frame::Time offset) {
_wrappedClip->seekFrameTime(offset);
}
Time WrapperClip::position() const {
Frame::Time WrapperClip::positionFrameTime() const {
return _wrappedClip->position();
}
@ -50,7 +50,7 @@ void WrapperClip::addFrame(FrameConstPointer) {
throw std::runtime_error("Wrapper clips are read only");
}
Time WrapperClip::duration() const {
float WrapperClip::duration() const {
return _wrappedClip->duration();
}

View file

@ -24,13 +24,12 @@ public:
using Pointer = std::shared_ptr<WrapperClip>;
WrapperClip(const Clip::Pointer& wrappedClip);
virtual ~WrapperClip();
virtual Time duration() const override;
virtual float duration() const override;
virtual size_t frameCount() const override;
virtual void seek(Time offset) override;
virtual Time position() const override;
virtual void seekFrameTime(Frame::Time offset) override;
virtual Frame::Time positionFrameTime() const override;
virtual FrameConstPointer peekFrame() const override;
virtual FrameConstPointer nextFrame() override;