diff --git a/domain-server/src/DomainServer.cpp b/domain-server/src/DomainServer.cpp index 1db277f47f..d7bcec2431 100644 --- a/domain-server/src/DomainServer.cpp +++ b/domain-server/src/DomainServer.cpp @@ -272,6 +272,7 @@ void DomainServer::setupNodeListAndAssignments(const QUuid& sessionUUID) { packetReceiver.registerListener(PacketType::DomainListRequest, this, "processListRequestPacket"); packetReceiver.registerListener(PacketType::DomainServerPathQuery, this, "processPathQueryPacket"); packetReceiver.registerMessageListener(PacketType::NodeJsonStats, this, "processNodeJSONStatsPacket"); + packetReceiver.registerListener(PacketType::DomainDisconnectRequest, this, "processNodeDisconnectRequestPacket"); // NodeList won't be available to the settings manager when it is created, so call registerListener here packetReceiver.registerListener(PacketType::DomainSettingsRequest, &_settingsManager, "processSettingsRequestPacket"); @@ -1825,3 +1826,24 @@ void DomainServer::processPathQueryPacket(QSharedPointer packet) { } } } + +void DomainServer::processNodeDisconnectRequestPacket(QSharedPointer packet) { + // This packet has been matched to a source node and they're asking not to be in the domain anymore + auto limitedNodeList = DependencyManager::get(); + + const QUuid& nodeUUID = packet->getSourceID(); + + qDebug() << "Received a disconnect request from node with UUID" << nodeUUID; + + if (limitedNodeList->killNodeWithUUID(nodeUUID)) { + static auto removedNodePacket = NLPacket::create(PacketType::DomainServerRemovedNode, NUM_BYTES_RFC4122_UUID); + + removedNodePacket->reset(); + removedNodePacket->write(nodeUUID.toRfc4122()); + + // broadcast out the DomainServerRemovedNode message + limitedNodeList->eachNode([&limitedNodeList](const SharedNodePointer& otherNode){ + limitedNodeList->sendUnreliablePacket(*removedNodePacket, *otherNode); + }); + } +} diff --git a/domain-server/src/DomainServer.h b/domain-server/src/DomainServer.h index df42bf3ad9..e5b3d3b3fd 100644 --- a/domain-server/src/DomainServer.h +++ b/domain-server/src/DomainServer.h @@ -60,7 +60,8 @@ public slots: void processListRequestPacket(QSharedPointer packet, SharedNodePointer sendingNode); void processNodeJSONStatsPacket(QSharedPointer packetList, SharedNodePointer sendingNode); void processPathQueryPacket(QSharedPointer packet); - + void processNodeDisconnectRequestPacket(QSharedPointer packet); + private slots: void aboutToQuit(); diff --git a/examples/utilities/record/recorder.js b/examples/utilities/record/recorder.js index 40476626e8..d08cdd68f3 100644 --- a/examples/utilities/record/recorder.js +++ b/examples/utilities/record/recorder.js @@ -15,11 +15,11 @@ Script.include("../../libraries/toolBars.js"); var recordingFile = "recording.rec"; function setPlayerOptions() { - MyAvatar.setPlayFromCurrentLocation(true); - MyAvatar.setPlayerUseDisplayName(false); - MyAvatar.setPlayerUseAttachments(false); - MyAvatar.setPlayerUseHeadModel(false); - MyAvatar.setPlayerUseSkeletonModel(false); + Recording.setPlayFromCurrentLocation(true); + Recording.setPlayerUseDisplayName(false); + Recording.setPlayerUseAttachments(false); + Recording.setPlayerUseHeadModel(false); + Recording.setPlayerUseSkeletonModel(false); } var windowDimensions = Controller.getViewportDimensions(); @@ -64,16 +64,16 @@ function setupToolBar() { x: 0, y: 0, width: Tool.IMAGE_WIDTH, height: Tool.IMAGE_HEIGHT, - alpha: MyAvatar.isPlaying() ? ALPHA_OFF : ALPHA_ON, + alpha: Recording.isPlaying() ? ALPHA_OFF : ALPHA_ON, visible: true - }, true, !MyAvatar.isRecording()); + }, true, !Recording.isRecording()); var playLoopWidthFactor = 1.65; playIcon = toolBar.addTool({ imageURL: TOOL_ICON_URL + "play-pause.svg", width: playLoopWidthFactor * Tool.IMAGE_WIDTH, height: Tool.IMAGE_HEIGHT, - alpha: (MyAvatar.isRecording() || MyAvatar.playerLength() === 0) ? ALPHA_OFF : ALPHA_ON, + alpha: (Recording.isRecording() || Recording.playerLength() === 0) ? ALPHA_OFF : ALPHA_ON, visible: true }, false); @@ -82,7 +82,7 @@ function setupToolBar() { subImage: { x: 0, y: 0, width: playLoopWidthFactor * Tool.IMAGE_WIDTH, height: Tool.IMAGE_HEIGHT }, width: playLoopWidthFactor * Tool.IMAGE_WIDTH, height: Tool.IMAGE_HEIGHT, - alpha: (MyAvatar.isRecording() || MyAvatar.playerLength() === 0) ? ALPHA_OFF : ALPHA_ON, + alpha: (Recording.isRecording() || Recording.playerLength() === 0) ? ALPHA_OFF : ALPHA_ON, visible: true }, false); @@ -93,7 +93,7 @@ function setupToolBar() { imageURL: TOOL_ICON_URL + "recording-save.svg", width: Tool.IMAGE_WIDTH, height: Tool.IMAGE_HEIGHT, - alpha: (MyAvatar.isRecording() || MyAvatar.isPlaying() || MyAvatar.playerLength() === 0) ? ALPHA_OFF : ALPHA_ON, + alpha: (Recording.isRecording() || Recording.isPlaying() || Recording.playerLength() === 0) ? ALPHA_OFF : ALPHA_ON, visible: true }, false); @@ -101,7 +101,7 @@ function setupToolBar() { imageURL: TOOL_ICON_URL + "recording-upload.svg", width: Tool.IMAGE_WIDTH, height: Tool.IMAGE_HEIGHT, - alpha: (MyAvatar.isRecording() || MyAvatar.isPlaying()) ? ALPHA_OFF : ALPHA_ON, + alpha: (Recording.isRecording() || Recording.isPlaying()) ? ALPHA_OFF : ALPHA_ON, visible: true }, false); } @@ -147,23 +147,23 @@ function setupTimer() { function updateTimer() { var text = ""; - if (MyAvatar.isRecording()) { - text = formatTime(MyAvatar.recorderElapsed()); + if (Recording.isRecording()) { + text = formatTime(Recording.recorderElapsed()); } else { - text = formatTime(MyAvatar.playerElapsed()) + " / " + - formatTime(MyAvatar.playerLength()); + text = formatTime(Recording.playerElapsed()) + " / " + + formatTime(Recording.playerLength()); } Overlays.editOverlay(timer, { text: text }) - toolBar.changeSpacing(text.length * 8 + ((MyAvatar.isRecording()) ? 15 : 0), spacing); + toolBar.changeSpacing(text.length * 8 + ((Recording.isRecording()) ? 15 : 0), spacing); - if (MyAvatar.isRecording()) { + if (Recording.isRecording()) { slider.pos = 1.0; - } else if (MyAvatar.playerLength() > 0) { - slider.pos = MyAvatar.playerElapsed() / MyAvatar.playerLength(); + } else if (Recording.playerLength() > 0) { + slider.pos = Recording.playerElapsed() / Recording.playerLength(); } Overlays.editOverlay(slider.foreground, { @@ -217,77 +217,77 @@ function moveUI() { function mousePressEvent(event) { clickedOverlay = Overlays.getOverlayAtPoint({ x: event.x, y: event.y }); - if (recordIcon === toolBar.clicked(clickedOverlay, false) && !MyAvatar.isPlaying()) { - if (!MyAvatar.isRecording()) { - MyAvatar.startRecording(); + if (recordIcon === toolBar.clicked(clickedOverlay, false) && !Recording.isPlaying()) { + if (!Recording.isRecording()) { + Recording.startRecording(); toolBar.selectTool(recordIcon, false); toolBar.setAlpha(ALPHA_OFF, playIcon); toolBar.setAlpha(ALPHA_OFF, playLoopIcon); toolBar.setAlpha(ALPHA_OFF, saveIcon); toolBar.setAlpha(ALPHA_OFF, loadIcon); } else { - MyAvatar.stopRecording(); + Recording.stopRecording(); toolBar.selectTool(recordIcon, true ); - MyAvatar.loadLastRecording(); + Recording.loadLastRecording(); toolBar.setAlpha(ALPHA_ON, playIcon); toolBar.setAlpha(ALPHA_ON, playLoopIcon); toolBar.setAlpha(ALPHA_ON, saveIcon); toolBar.setAlpha(ALPHA_ON, loadIcon); } - } else if (playIcon === toolBar.clicked(clickedOverlay) && !MyAvatar.isRecording()) { - if (MyAvatar.isPlaying()) { - MyAvatar.pausePlayer(); + } else if (playIcon === toolBar.clicked(clickedOverlay) && !Recording.isRecording()) { + if (Recording.isPlaying()) { + Recording.pausePlayer(); toolBar.setAlpha(ALPHA_ON, recordIcon); toolBar.setAlpha(ALPHA_ON, saveIcon); toolBar.setAlpha(ALPHA_ON, loadIcon); - } else if (MyAvatar.playerLength() > 0) { + } else if (Recording.playerLength() > 0) { setPlayerOptions(); - MyAvatar.setPlayerLoop(false); - MyAvatar.startPlaying(); + Recording.setPlayerLoop(false); + Recording.startPlaying(); toolBar.setAlpha(ALPHA_OFF, recordIcon); toolBar.setAlpha(ALPHA_OFF, saveIcon); toolBar.setAlpha(ALPHA_OFF, loadIcon); watchStop = true; } - } else if (playLoopIcon === toolBar.clicked(clickedOverlay) && !MyAvatar.isRecording()) { - if (MyAvatar.isPlaying()) { - MyAvatar.pausePlayer(); + } else if (playLoopIcon === toolBar.clicked(clickedOverlay) && !Recording.isRecording()) { + if (Recording.isPlaying()) { + Recording.pausePlayer(); toolBar.setAlpha(ALPHA_ON, recordIcon); toolBar.setAlpha(ALPHA_ON, saveIcon); toolBar.setAlpha(ALPHA_ON, loadIcon); - } else if (MyAvatar.playerLength() > 0) { + } else if (Recording.playerLength() > 0) { setPlayerOptions(); - MyAvatar.setPlayerLoop(true); - MyAvatar.startPlaying(); + Recording.setPlayerLoop(true); + Recording.startPlaying(); toolBar.setAlpha(ALPHA_OFF, recordIcon); toolBar.setAlpha(ALPHA_OFF, saveIcon); toolBar.setAlpha(ALPHA_OFF, loadIcon); } } else if (saveIcon === toolBar.clicked(clickedOverlay)) { - if (!MyAvatar.isRecording() && !MyAvatar.isPlaying() && MyAvatar.playerLength() != 0) { + if (!Recording.isRecording() && !Recording.isPlaying() && Recording.playerLength() != 0) { recordingFile = Window.save("Save recording to file", ".", "Recordings (*.hfr)"); if (!(recordingFile === "null" || recordingFile === null || recordingFile === "")) { - MyAvatar.saveRecording(recordingFile); + Recording.saveRecording(recordingFile); } } } else if (loadIcon === toolBar.clicked(clickedOverlay)) { - if (!MyAvatar.isRecording() && !MyAvatar.isPlaying()) { + if (!Recording.isRecording() && !Recording.isPlaying()) { recordingFile = Window.browse("Load recorcding from file", ".", "Recordings (*.hfr *.rec *.HFR *.REC)"); if (!(recordingFile === "null" || recordingFile === null || recordingFile === "")) { - MyAvatar.loadRecording(recordingFile); + Recording.loadRecording(recordingFile); } - if (MyAvatar.playerLength() > 0) { + if (Recording.playerLength() > 0) { toolBar.setAlpha(ALPHA_ON, playIcon); toolBar.setAlpha(ALPHA_ON, playLoopIcon); toolBar.setAlpha(ALPHA_ON, saveIcon); } } - } else if (MyAvatar.playerLength() > 0 && + } else if (Recording.playerLength() > 0 && slider.x < event.x && event.x < slider.x + slider.w && slider.y < event.y && event.y < slider.y + slider.h) { isSliding = true; slider.pos = (event.x - slider.x) / slider.w; - MyAvatar.setPlayerTime(slider.pos * MyAvatar.playerLength()); + Recording.setPlayerTime(slider.pos * Recording.playerLength()); } } var isSliding = false; @@ -296,10 +296,10 @@ function mouseMoveEvent(event) { if (isSliding) { slider.pos = (event.x - slider.x) / slider.w; if (slider.pos < 0.0 || slider.pos > 1.0) { - MyAvatar.stopPlaying(); + Recording.stopPlaying(); slider.pos = 0.0; } - MyAvatar.setPlayerTime(slider.pos * MyAvatar.playerLength()); + Recording.setPlayerTime(slider.pos * Recording.playerLength()); } } @@ -316,7 +316,7 @@ function update() { updateTimer(); - if (watchStop && !MyAvatar.isPlaying()) { + if (watchStop && !Recording.isPlaying()) { watchStop = false; toolBar.setAlpha(ALPHA_ON, recordIcon); toolBar.setAlpha(ALPHA_ON, saveIcon); @@ -325,11 +325,11 @@ function update() { } function scriptEnding() { - if (MyAvatar.isRecording()) { - MyAvatar.stopRecording(); + if (Recording.isRecording()) { + Recording.stopRecording(); } - if (MyAvatar.isPlaying()) { - MyAvatar.stopPlaying(); + if (Recording.isPlaying()) { + Recording.stopPlaying(); } toolBar.cleanup(); Overlays.deleteOverlay(timer); diff --git a/ice-server/src/IceServer.cpp b/ice-server/src/IceServer.cpp index a6a28caa23..f65ff4a8cf 100644 --- a/ice-server/src/IceServer.cpp +++ b/ice-server/src/IceServer.cpp @@ -55,8 +55,7 @@ bool IceServer::packetVersionMatch(const udt::Packet& packet) { if (headerVersion == versionForPacketType(headerType)) { return true; } else { - qDebug() << "Packet version mismatch for packet" << headerType - << "(" << nameForPacketType(headerType) << ") from" << packet.getSenderSockAddr(); + qDebug() << "Packet version mismatch for packet" << headerType << " from" << packet.getSenderSockAddr(); return false; } diff --git a/interface/resources/qml/RecorderDialog.qml b/interface/resources/qml/RecorderDialog.qml new file mode 100644 index 0000000000..4f197846aa --- /dev/null +++ b/interface/resources/qml/RecorderDialog.qml @@ -0,0 +1,105 @@ +// +// Created by Bradley Austin Davis on 2015/11/14 +// Copyright 2015 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + + +import Hifi 1.0 +import QtQuick 2.4 +import "controls" +import "styles" + +VrDialog { + id: root + HifiConstants { id: hifi } + + property real spacing: hifi.layout.spacing + property real outerSpacing: hifi.layout.spacing * 2 + + objectName: "RecorderDialog" + + destroyOnInvisible: false + destroyOnCloseButton: false + + contentImplicitWidth: recorderDialog.width + contentImplicitHeight: recorderDialog.height + + RecorderDialog { + id: recorderDialog + x: root.clientX; y: root.clientY + width: 800 + height: 128 + signal play() + signal rewind() + + onPlay: { + console.log("Pressed play") + player.isPlaying = !player.isPlaying + } + + onRewind: { + console.log("Pressed rewind") + player.position = 0 + } + + Row { + height: 32 + ButtonAwesome { + id: cmdRecord + visible: root.showRecordButton + width: 32; height: 32 + text: "\uf111" + iconColor: "red" + onClicked: { + console.log("Pressed record") + status.text = "Recording"; + } + } + } + Text { + id: status + anchors.top: parent.top + anchors.right: parent.right + width: 128 + text: "Idle" + } + + Player { + id: player + y: root.clientY + 64 + height: 64 + anchors.left: parent.left + anchors.right: parent.right + anchors.bottom: parent.bottom + + + +// onClicked: { +// if (recordTimer.running) { +// recordTimer.stop(); +// } +// recordTimer.start(); +// } + Timer { + id: recordTimer; + interval: 1000; running: false; repeat: false + onTriggered: { + console.log("Recording: " + MyAvatar.isRecording()) + MyAvatar.startRecording(); + console.log("Recording: " + MyAvatar.isRecording()) + } + } + + } + + Component.onCompleted: { + player.play.connect(play) + player.rewind.connect(rewind) + + } + } +} + diff --git a/interface/resources/qml/controls/ButtonAwesome.qml b/interface/resources/qml/controls/ButtonAwesome.qml new file mode 100644 index 0000000000..47c9fdc742 --- /dev/null +++ b/interface/resources/qml/controls/ButtonAwesome.qml @@ -0,0 +1,21 @@ +import QtQuick 2.3 +import QtQuick.Controls 1.3 as Original +import QtQuick.Controls.Styles 1.3 as OriginalStyles +import "." +import "../styles" + +Original.Button { + property color iconColor: "black" + FontLoader { id: iconFont; source: "../../fonts/fontawesome-webfont.ttf"; } + style: OriginalStyles.ButtonStyle { + label: Text { + renderType: Text.NativeRendering + verticalAlignment: Text.AlignVCenter + horizontalAlignment: Text.AlignHCenter + font.family: iconFont.name + font.pointSize: 20 + color: control.enabled ? control.iconColor : "gray" + text: control.text + } + } +} diff --git a/interface/resources/qml/controls/Player.qml b/interface/resources/qml/controls/Player.qml new file mode 100644 index 0000000000..8af0b1527d --- /dev/null +++ b/interface/resources/qml/controls/Player.qml @@ -0,0 +1,89 @@ +// +// AddressBarDialog.qml +// +// Created by Austin Davis on 2015/04/14 +// Copyright 2015 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +//import Hifi 1.0 +import QtQuick 2.4 +import QtQuick.Controls 1.2 +import QtQuick.Dialogs 1.2 +import QtQuick.Controls.Styles 1.2 +import "../styles" + +Item { + id: root + + signal play() + signal rewind() + + property real duration: 100 + property real position: 50 + property bool isPlaying: false + implicitHeight: 64 + implicitWidth: 640 + + Item { + anchors.top: parent.top + anchors.left: parent.left + anchors.right: parent.right + height: root.height / 2 + Text { + id: labelCurrent + anchors.top: parent.top + anchors.bottom: parent.bottom + anchors.left: parent.left + horizontalAlignment: Text.AlignHCenter + verticalAlignment: Text.AlignVCenter + width: 56 + text: "00:00:00" + } + Slider { + value: root.position / root.duration + anchors.top: parent.top + anchors.topMargin: 2 + anchors.bottomMargin: 2 + anchors.bottom: parent.bottom + anchors.left: labelCurrent.right + anchors.leftMargin: 4 + anchors.right: labelDuration.left + anchors.rightMargin: 4 + } + Text { + id: labelDuration + anchors.top: parent.top + anchors.bottom: parent.bottom + anchors.right: parent.right + horizontalAlignment: Text.AlignHCenter + verticalAlignment: Text.AlignVCenter + width: 56 + text: "00:00:00" + } + } + + Row { + anchors.bottom: parent.bottom + anchors.horizontalCenter: parent.horizontalCenter + height: root.height / 2; + ButtonAwesome { + id: cmdPlay + anchors.top: parent.top + anchors.bottom: parent.bottom + text: isPlaying ? "\uf04c" : "\uf04b" + width: root.height / 2; + onClicked: root.play(); + } + ButtonAwesome { + id: cmdRewind + anchors.top: parent.top + anchors.bottom: parent.bottom + width: root.height / 2 + text: "\uf04a" + onClicked: root.rewind(); + } + } +} diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index dd99599d96..96b8ab74a8 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -94,6 +94,8 @@ #include #include #include +#include +#include #include "AnimDebugDraw.h" #include "AudioClient.h" @@ -124,6 +126,7 @@ #include "scripting/LocationScriptingInterface.h" #include "scripting/MenuScriptingInterface.h" #include "scripting/SettingsScriptingInterface.h" +#include "scripting/RecordingScriptingInterface.h" #include "scripting/WebWindowClass.h" #include "scripting/WindowScriptingInterface.h" #include "scripting/ControllerScriptingInterface.h" @@ -132,6 +135,7 @@ #endif #include "Stars.h" #include "ui/AddressBarDialog.h" +#include "ui/RecorderDialog.h" #include "ui/AvatarInputs.h" #include "ui/AssetUploadDialogFactory.h" #include "ui/DataWebDialog.h" @@ -295,6 +299,8 @@ bool setupEssentials(int& argc, char** argv) { Setting::init(); // Set dependencies + DependencyManager::set(); + DependencyManager::set(); DependencyManager::set(); DependencyManager::set(NodeType::Agent, listenPort); DependencyManager::set(); @@ -319,6 +325,7 @@ bool setupEssentials(int& argc, char** argv) { DependencyManager::set(); DependencyManager::set(); DependencyManager::set(); + DependencyManager::set(); DependencyManager::set(); DependencyManager::set(); @@ -804,8 +811,11 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer) : void Application::aboutToQuit() { emit beforeAboutToQuit(); + getActiveDisplayPlugin()->deactivate(); + _aboutToQuit = true; + cleanupBeforeQuit(); } @@ -831,8 +841,14 @@ void Application::cleanupBeforeQuit() { _entities.clear(); // this will allow entity scripts to properly shutdown + auto nodeList = DependencyManager::get(); + + // send the domain a disconnect packet, force stoppage of domain-server check-ins + nodeList->getDomainHandler().disconnect(); + nodeList->setIsShuttingDown(true); + // tell the packet receiver we're shutting down, so it can drop packets - DependencyManager::get()->getPacketReceiver().setShouldDropPackets(true); + nodeList->getPacketReceiver().setShouldDropPackets(true); _entities.shutdown(); // tell the entities system we're shutting down, so it will stop running scripts ScriptEngine::stopAllScripts(this); // stop all currently running global scripts @@ -852,9 +868,6 @@ void Application::cleanupBeforeQuit() { saveSettings(); _window->saveGeometry(); - // let the avatar mixer know we're out - MyAvatar::sendKillAvatar(); - // stop the AudioClient QMetaObject::invokeMethod(DependencyManager::get().data(), "stop", Qt::BlockingQueuedConnection); @@ -990,6 +1003,7 @@ void Application::initializeGL() { void Application::initializeUi() { AddressBarDialog::registerType(); + RecorderDialog::registerType(); ErrorDialog::registerType(); LoginDialog::registerType(); MessageDialog::registerType(); @@ -1005,6 +1019,7 @@ void Application::initializeUi() { offscreenUi->load("RootMenu.qml"); auto scriptingInterface = DependencyManager::get(); offscreenUi->getRootContext()->setContextProperty("Controller", scriptingInterface.data()); + offscreenUi->getRootContext()->setContextProperty("MyAvatar", getMyAvatar()); _glWidget->installEventFilter(offscreenUi.data()); VrMenu::load(); VrMenu::executeQueuedLambdas(); @@ -1574,8 +1589,9 @@ void Application::keyPressEvent(QKeyEvent* event) { case Qt::Key_X: if (isMeta && isShifted) { - auto offscreenUi = DependencyManager::get(); - offscreenUi->load("TestControllers.qml"); +// auto offscreenUi = DependencyManager::get(); +// offscreenUi->load("TestControllers.qml"); + RecorderDialog::toggle(); } break; @@ -3963,6 +3979,7 @@ void Application::registerScriptEngineWithApplicationServices(ScriptEngine* scri RayToOverlayIntersectionResultFromScriptValue); scriptEngine->registerGlobalObject("Desktop", DependencyManager::get().data()); + scriptEngine->registerGlobalObject("Recording", DependencyManager::get().data()); scriptEngine->registerGlobalObject("Window", DependencyManager::get().data()); scriptEngine->registerGetterSetter("location", LocationScriptingInterface::locationGetter, diff --git a/interface/src/avatar/Head.cpp b/interface/src/avatar/Head.cpp index 1a9b8a49e2..b8cf8ab4f1 100644 --- a/interface/src/avatar/Head.cpp +++ b/interface/src/avatar/Head.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include "Application.h" #include "Avatar.h" @@ -91,9 +92,9 @@ void Head::simulate(float deltaTime, bool isMine, bool billboard) { if (isMine) { MyAvatar* myAvatar = static_cast(_owningAvatar); - + auto player = DependencyManager::get(); // Only use face trackers when not playing back a recording. - if (!myAvatar->isPlaying()) { + if (!player->isPlaying()) { FaceTracker* faceTracker = qApp->getActiveFaceTracker(); _isFaceTrackerConnected = faceTracker != NULL && !faceTracker->isMuted(); if (_isFaceTrackerConnected) { diff --git a/interface/src/avatar/MyAvatar.cpp b/interface/src/avatar/MyAvatar.cpp index 278cc5ff81..38eb5042f7 100644 --- a/interface/src/avatar/MyAvatar.cpp +++ b/interface/src/avatar/MyAvatar.cpp @@ -80,10 +80,6 @@ const QString& DEFAULT_AVATAR_COLLISION_SOUND_URL = "https://hifi-public.s3.amaz const float MyAvatar::ZOOM_MIN = 0.5f; const float MyAvatar::ZOOM_MAX = 25.0f; const float MyAvatar::ZOOM_DEFAULT = 1.5f; -static const QString HEADER_NAME = "com.highfidelity.recording.AvatarData"; -static recording::FrameType AVATAR_FRAME_TYPE = recording::Frame::TYPE_INVALID; -static std::once_flag frameTypeRegistration; - MyAvatar::MyAvatar(RigPointer rig) : Avatar(rig), @@ -121,17 +117,6 @@ MyAvatar::MyAvatar(RigPointer rig) : { using namespace recording; - std::call_once(frameTypeRegistration, [] { - AVATAR_FRAME_TYPE = recording::Frame::registerFrameType(HEADER_NAME); - }); - - // FIXME how to deal with driving multiple avatars locally? - Frame::registerFrameHandler(AVATAR_FRAME_TYPE, [this](Frame::ConstPointer frame) { - qDebug() << "Playback of avatar frame length: " << frame->data.size(); - avatarStateFromFrame(frame->data, this); - }); - - for (int i = 0; i < MAX_DRIVE_KEYS; i++) { _driveKeys[i] = 0.0f; } @@ -177,9 +162,8 @@ void MyAvatar::reset(bool andReload) { // Reset dynamic state. _wasPushing = _isPushing = _isBraking = _billboardValid = false; - _isFollowingHMD = false; - _hmdFollowVelocity = Vectors::ZERO; - _hmdFollowSpeed = 0.0f; + _followVelocity = Vectors::ZERO; + _followSpeed = 0.0f; _skeletonModel.reset(); getHead()->reset(); _targetVelocity = glm::vec3(0.0f); @@ -327,8 +311,10 @@ void MyAvatar::simulate(float deltaTime) { } // Record avatars movements. - if (_recorder && _recorder->isRecording()) { - _recorder->recordFrame(AVATAR_FRAME_TYPE, avatarStateToFrame(this)); + auto recorder = DependencyManager::get(); + if (recorder->isRecording()) { + static const recording::FrameType FRAME_TYPE = recording::Frame::registerFrameType(AvatarData::FRAME_NAME); + recorder->recordFrame(FRAME_TYPE, toFrame(*this)); } // consider updating our billboard @@ -352,52 +338,40 @@ void MyAvatar::updateFromHMDSensorMatrix(const glm::mat4& hmdSensorMatrix) { void MyAvatar::updateHMDFollowVelocity() { // compute offset to body's target position (in sensor-frame) auto sensorBodyMatrix = deriveBodyFromHMDSensor(); - _hmdFollowOffset = extractTranslation(sensorBodyMatrix) - extractTranslation(_bodySensorMatrix); - glm::vec3 truncatedOffset = _hmdFollowOffset; - if (truncatedOffset.y < 0.0f) { - // don't pull the body DOWN to match the target (allow animation system to squat) - truncatedOffset.y = 0.0f; - } - float truncatedOffsetDistance = glm::length(truncatedOffset); + glm::vec3 offset = extractTranslation(sensorBodyMatrix) - extractTranslation(_bodySensorMatrix); + _followOffsetDistance = glm::length(offset); + + const float FOLLOW_TIMESCALE = 0.5f; + const float FOLLOW_THRESHOLD_SPEED = 0.2f; + const float FOLLOW_MIN_DISTANCE = 0.01f; + const float FOLLOW_THRESHOLD_DISTANCE = 0.2f; + const float FOLLOW_MAX_IDLE_DISTANCE = 0.1f; - bool isMoving; - if (_lastIsMoving) { - const float MOVE_EXIT_SPEED_THRESHOLD = 0.07f; // m/sec - isMoving = glm::length(_velocity) >= MOVE_EXIT_SPEED_THRESHOLD; - } else { - const float MOVE_ENTER_SPEED_THRESHOLD = 0.2f; // m/sec - isMoving = glm::length(_velocity) > MOVE_ENTER_SPEED_THRESHOLD; - } - bool justStartedMoving = (_lastIsMoving != isMoving) && isMoving; - _lastIsMoving = isMoving; bool hmdIsAtRest = _hmdAtRestDetector.update(_hmdSensorPosition, _hmdSensorOrientation); - const float MIN_HMD_HIP_SHIFT = 0.05f; - if (justStartedMoving || (hmdIsAtRest && truncatedOffsetDistance > MIN_HMD_HIP_SHIFT)) { - _isFollowingHMD = true; - } - bool needNewFollowSpeed = (_isFollowingHMD && _hmdFollowSpeed == 0.0f); - if (!needNewFollowSpeed) { - // check to see if offset has exceeded its threshold - const float MAX_HMD_HIP_SHIFT = 0.2f; - if (truncatedOffsetDistance > MAX_HMD_HIP_SHIFT) { - _isFollowingHMD = true; - needNewFollowSpeed = true; + _followOffsetDistance = glm::length(offset); + if (_followOffsetDistance < FOLLOW_MIN_DISTANCE) { + // close enough + _followOffsetDistance = 0.0f; + } else { + bool avatarIsMoving = glm::length(_velocity - _followVelocity) > FOLLOW_THRESHOLD_SPEED; + bool shouldFollow = (hmdIsAtRest || avatarIsMoving) && _followOffsetDistance > FOLLOW_MAX_IDLE_DISTANCE; + + glm::vec3 truncatedOffset = offset; + if (truncatedOffset.y < 0.0f) { + truncatedOffset.y = 0.0f; + } + float truncatedDistance = glm::length(truncatedOffset); + bool needsNewSpeed = truncatedDistance > FOLLOW_THRESHOLD_DISTANCE; + if (needsNewSpeed || (shouldFollow && _followSpeed == 0.0f)) { + // compute new speed + _followSpeed = _followOffsetDistance / FOLLOW_TIMESCALE; + } + if (_followSpeed > 0.0f) { + // to compute new velocity we must rotate offset into the world-frame + glm::quat sensorToWorldRotation = extractRotation(_sensorToWorldMatrix); + _followVelocity = _followSpeed * glm::normalize(sensorToWorldRotation * offset); } - } - if (_isFollowingHMD) { - // only bother to rotate into world frame if we're following - glm::quat sensorToWorldRotation = extractRotation(_sensorToWorldMatrix); - _hmdFollowOffset = sensorToWorldRotation * _hmdFollowOffset; - } - if (needNewFollowSpeed) { - // compute new velocity that will be used to resolve offset of hips from body - const float FOLLOW_HMD_DURATION = 0.5f; // seconds - _hmdFollowVelocity = (_hmdFollowOffset / FOLLOW_HMD_DURATION); - _hmdFollowSpeed = glm::length(_hmdFollowVelocity); - } else if (_isFollowingHMD) { - // compute new velocity (but not new speed) - _hmdFollowVelocity = _hmdFollowSpeed * glm::normalize(_hmdFollowOffset); } } @@ -416,8 +390,8 @@ void MyAvatar::updateFromTrackers(float deltaTime) { glm::vec3 estimatedPosition, estimatedRotation; bool inHmd = qApp->getAvatarUpdater()->isHMDMode(); - - if (isPlaying() && inHmd) { + bool playing = DependencyManager::get()->isPlaying(); + if (inHmd && playing) { return; } @@ -468,7 +442,7 @@ void MyAvatar::updateFromTrackers(float deltaTime) { Head* head = getHead(); - if (inHmd || isPlaying()) { + if (inHmd || playing) { head->setDeltaPitch(estimatedRotation.x); head->setDeltaYaw(estimatedRotation.y); head->setDeltaRoll(estimatedRotation.z); @@ -585,102 +559,6 @@ bool MyAvatar::setJointReferential(const QUuid& id, int jointIndex) { } } -bool MyAvatar::isRecording() { - if (!_recorder) { - return false; - } - if (QThread::currentThread() != thread()) { - bool result; - QMetaObject::invokeMethod(this, "isRecording", Qt::BlockingQueuedConnection, - Q_RETURN_ARG(bool, result)); - return result; - } - return _recorder && _recorder->isRecording(); -} - -float MyAvatar::recorderElapsed() { - if (QThread::currentThread() != thread()) { - float result; - QMetaObject::invokeMethod(this, "recorderElapsed", Qt::BlockingQueuedConnection, - Q_RETURN_ARG(float, result)); - return result; - } - if (!_recorder) { - return 0; - } - return (float)_recorder->position() / (float) MSECS_PER_SECOND; -} - -QMetaObject::Connection _audioClientRecorderConnection; - -void MyAvatar::startRecording() { - if (QThread::currentThread() != thread()) { - QMetaObject::invokeMethod(this, "startRecording", Qt::BlockingQueuedConnection); - return; - } - - _recorder = std::make_shared(); - // connect to AudioClient's signal so we get input audio - auto audioClient = DependencyManager::get(); - _audioClientRecorderConnection = connect(audioClient.data(), &AudioClient::inputReceived, [] { - // FIXME, missing audio data handling - }); - setRecordingBasis(); - _recorder->start(); -} - -void MyAvatar::stopRecording() { - if (!_recorder) { - return; - } - if (QThread::currentThread() != thread()) { - QMetaObject::invokeMethod(this, "stopRecording", Qt::BlockingQueuedConnection); - return; - } - if (_recorder) { - QObject::disconnect(_audioClientRecorderConnection); - _audioClientRecorderConnection = QMetaObject::Connection(); - _recorder->stop(); - clearRecordingBasis(); - } -} - -void MyAvatar::saveRecording(const QString& filename) { - if (!_recorder) { - qCDebug(interfaceapp) << "There is no recording to save"; - return; - } - if (QThread::currentThread() != thread()) { - QMetaObject::invokeMethod(this, "saveRecording", Qt::BlockingQueuedConnection, - Q_ARG(QString, filename)); - return; - } - - if (_recorder) { - auto clip = _recorder->getClip(); - recording::Clip::toFile(filename, clip); - } -} - -void MyAvatar::loadLastRecording() { - if (QThread::currentThread() != thread()) { - QMetaObject::invokeMethod(this, "loadLastRecording", Qt::BlockingQueuedConnection); - return; - } - - if (!_recorder || !_recorder->getClip()) { - qCDebug(interfaceapp) << "There is no recording to load"; - return; - } - - if (!_player) { - _player = std::make_shared(); - } - - _player->queueClip(_recorder->getClip()); - _player->play(); -} - void MyAvatar::startAnimation(const QString& url, float fps, float priority, bool loop, bool hold, float firstFrame, float lastFrame, const QStringList& maskedJoints) { if (QThread::currentThread() != thread()) { @@ -1024,11 +902,6 @@ int MyAvatar::parseDataFromBuffer(const QByteArray& buffer) { return buffer.size(); } -void MyAvatar::sendKillAvatar() { - auto killPacket = NLPacket::create(PacketType::KillAvatar, 0); - DependencyManager::get()->broadcastToNodes(std::move(killPacket), NodeSet() << NodeType::AvatarMixer); -} - void MyAvatar::updateLookAtTargetAvatar() { // // Look at the avatar whose eyes are closest to the ray in direction of my avatar's head @@ -1300,11 +1173,11 @@ void MyAvatar::prepareForPhysicsSimulation() { _characterController.setAvatarPositionAndOrientation(getPosition(), getOrientation()); if (qApp->isHMDMode()) { updateHMDFollowVelocity(); - } else if (_isFollowingHMD) { - _isFollowingHMD = false; - _hmdFollowVelocity = Vectors::ZERO; + } else if (_followSpeed > 0.0f) { + _followVelocity = Vectors::ZERO; + _followSpeed = 0.0f; } - _characterController.setHMDVelocity(_hmdFollowVelocity); + _characterController.setFollowVelocity(_followVelocity); } void MyAvatar::harvestResultsFromPhysicsSimulation() { @@ -1312,35 +1185,27 @@ void MyAvatar::harvestResultsFromPhysicsSimulation() { glm::quat orientation = getOrientation(); _characterController.getAvatarPositionAndOrientation(position, orientation); nextAttitude(position, orientation); - if (_isFollowingHMD) { - setVelocity(_characterController.getLinearVelocity() + _hmdFollowVelocity); - glm::vec3 hmdShift = _characterController.getHMDShift(); - adjustSensorTransform(hmdShift); + if (_followSpeed > 0.0f) { + adjustSensorTransform(); + setVelocity(_characterController.getLinearVelocity() + _followVelocity); } else { setVelocity(_characterController.getLinearVelocity()); } } -void MyAvatar::adjustSensorTransform(glm::vec3 hmdShift) { +void MyAvatar::adjustSensorTransform() { // compute blendFactor of latest hmdShift // which we'll use to blend the rotation part - float blendFactor = 1.0f; - float shiftLength = glm::length(hmdShift); - if (shiftLength > 1.0e-5f) { - float offsetLength = glm::length(_hmdFollowOffset); - if (offsetLength > shiftLength) { - blendFactor = shiftLength / offsetLength; - } - } + float linearDistance = _characterController.getFollowTime() * _followSpeed; + float blendFactor = linearDistance < _followOffsetDistance ? linearDistance / _followOffsetDistance : 1.0f; auto newBodySensorMatrix = deriveBodyFromHMDSensor(); auto worldBodyMatrix = _sensorToWorldMatrix * newBodySensorMatrix; glm::quat finalBodyRotation = glm::normalize(glm::quat_cast(worldBodyMatrix)); if (blendFactor >= 0.99f) { // the "adjustment" is more or less complete so stop following - _isFollowingHMD = false; - _hmdFollowSpeed = 0.0f; - _hmdFollowVelocity = Vectors::ZERO; + _followVelocity = Vectors::ZERO; + _followSpeed = 0.0f; // and slam the body's transform anyway to eliminate any slight errors glm::vec3 finalBodyPosition = extractTranslation(worldBodyMatrix); nextAttitude(finalBodyPosition, finalBodyRotation); @@ -1520,6 +1385,9 @@ void MyAvatar::initAnimGraph() { QUrl::fromLocalFile(PathUtils::resourcesPath() + "meshes/defaultAvatar_full/avatar-animation.json") : _animGraphUrl); _rig->initAnimGraph(graphUrl, _skeletonModel.getGeometry()->getFBXGeometry()); + + _bodySensorMatrix = deriveBodyFromHMDSensor(); // Based on current cached HMD position/rotation.. + updateSensorToWorldMatrix(); // Uses updated position/orientation and _bodySensorMatrix changes } void MyAvatar::destroyAnimGraph() { @@ -1994,53 +1862,19 @@ glm::quat MyAvatar::getWorldBodyOrientation() const { // derive avatar body position and orientation from the current HMD Sensor location. // results are in sensor space glm::mat4 MyAvatar::deriveBodyFromHMDSensor() const { - - // HMD is in sensor space. - const glm::vec3 hmdPosition = getHMDSensorPosition(); - const glm::quat hmdOrientation = getHMDSensorOrientation(); - const glm::quat hmdOrientationYawOnly = cancelOutRollAndPitch(hmdOrientation); - - const glm::vec3 DEFAULT_RIGHT_EYE_POS(-0.3f, 1.6f, 0.0f); - const glm::vec3 DEFAULT_LEFT_EYE_POS(0.3f, 1.6f, 0.0f); - const glm::vec3 DEFAULT_NECK_POS(0.0f, 1.5f, 0.0f); - const glm::vec3 DEFAULT_HIPS_POS(0.0f, 1.0f, 0.0f); - - vec3 localEyes, localNeck; - if (!_debugDrawSkeleton) { - const glm::quat rotY180 = glm::angleAxis((float)PI, glm::vec3(0.0f, 1.0f, 0.0f)); - localEyes = rotY180 * (((DEFAULT_RIGHT_EYE_POS + DEFAULT_LEFT_EYE_POS) / 2.0f) - DEFAULT_HIPS_POS); - localNeck = rotY180 * (DEFAULT_NECK_POS - DEFAULT_HIPS_POS); - } else { - // TODO: At the moment MyAvatar does not have access to the rig, which has the skeleton, which has the bind poses. - // for now use the _debugDrawSkeleton, which is initialized with the same FBX model as the rig. - - // TODO: cache these indices. - int rightEyeIndex = _debugDrawSkeleton->nameToJointIndex("RightEye"); - int leftEyeIndex = _debugDrawSkeleton->nameToJointIndex("LeftEye"); - int neckIndex = _debugDrawSkeleton->nameToJointIndex("Neck"); - int hipsIndex = _debugDrawSkeleton->nameToJointIndex("Hips"); - - glm::vec3 absRightEyePos = rightEyeIndex != -1 ? _debugDrawSkeleton->getAbsoluteBindPose(rightEyeIndex).trans : DEFAULT_RIGHT_EYE_POS; - glm::vec3 absLeftEyePos = leftEyeIndex != -1 ? _debugDrawSkeleton->getAbsoluteBindPose(leftEyeIndex).trans : DEFAULT_LEFT_EYE_POS; - glm::vec3 absNeckPos = neckIndex != -1 ? _debugDrawSkeleton->getAbsoluteBindPose(neckIndex).trans : DEFAULT_NECK_POS; - glm::vec3 absHipsPos = neckIndex != -1 ? _debugDrawSkeleton->getAbsoluteBindPose(hipsIndex).trans : DEFAULT_HIPS_POS; - - const glm::quat rotY180 = glm::angleAxis((float)PI, glm::vec3(0.0f, 1.0f, 0.0f)); - localEyes = rotY180 * (((absRightEyePos + absLeftEyePos) / 2.0f) - absHipsPos); - localNeck = rotY180 * (absNeckPos - absHipsPos); + if (_rig) { + // orientation + const glm::quat hmdOrientation = getHMDSensorOrientation(); + const glm::quat yaw = cancelOutRollAndPitch(hmdOrientation); + // position + // we flip about yAxis when going from "root" to "avatar" frame + // and we must also apply "yaw" to get into HMD frame + glm::quat rotY180 = glm::angleAxis((float)M_PI, glm::vec3(0.0f, 1.0f, 0.0f)); + glm::vec3 eyesInAvatarFrame = rotY180 * yaw * _rig->getEyesInRootFrame(); + glm::vec3 bodyPos = getHMDSensorPosition() - eyesInAvatarFrame; + return createMatFromQuatAndPos(yaw, bodyPos); } - - // apply simplistic head/neck model - // figure out where the avatar body should be by applying offsets from the avatar's neck & head joints. - - // eyeToNeck offset is relative full HMD orientation. - // while neckToRoot offset is only relative to HMDs yaw. - glm::vec3 eyeToNeck = hmdOrientation * (localNeck - localEyes); - glm::vec3 neckToRoot = hmdOrientationYawOnly * -localNeck; - glm::vec3 bodyPos = hmdPosition + eyeToNeck + neckToRoot; - - // avatar facing is determined solely by hmd orientation. - return createMatFromQuatAndPos(hmdOrientationYawOnly, bodyPos); + return glm::mat4(); } glm::vec3 MyAvatar::getPositionForAudio() { diff --git a/interface/src/avatar/MyAvatar.h b/interface/src/avatar/MyAvatar.h index 52f1ffce3f..16c3abec3e 100644 --- a/interface/src/avatar/MyAvatar.h +++ b/interface/src/avatar/MyAvatar.h @@ -162,8 +162,6 @@ public: eyeContactTarget getEyeContactTarget(); - static void sendKillAvatar(); - Q_INVOKABLE glm::vec3 getTrackedHeadPosition() const { return _trackedHeadPosition; } Q_INVOKABLE glm::vec3 getHeadPosition() const { return getHead()->getPosition(); } Q_INVOKABLE float getHeadFinalYaw() const { return getHead()->getFinalYaw(); } @@ -208,7 +206,7 @@ public: void prepareForPhysicsSimulation(); void harvestResultsFromPhysicsSimulation(); - void adjustSensorTransform(glm::vec3 hmdShift); + void adjustSensorTransform(); const QString& getCollisionSoundURL() { return _collisionSoundURL; } void setCollisionSoundURL(const QString& url); @@ -256,13 +254,6 @@ public slots: bool setModelReferential(const QUuid& id); bool setJointReferential(const QUuid& id, int jointIndex); - bool isRecording(); - float recorderElapsed(); - void startRecording(); - void stopRecording(); - void saveRecording(const QString& filename); - void loadLastRecording(); - virtual void rebuildSkeletonBody() override; bool getEnableRigAnimations() const { return _rig->getEnableRig(); } @@ -311,9 +302,6 @@ private: const glm::vec3& translation = glm::vec3(), const glm::quat& rotation = glm::quat(), float scale = 1.0f, bool allowDuplicates = false, bool useSaved = true) override; - const recording::RecorderPointer getRecorder() const { return _recorder; } - const recording::DeckPointer getPlayer() const { return _player; } - //void beginFollowingHMD(); //bool shouldFollowHMD() const; //void followHMD(float deltaTime); @@ -330,7 +318,7 @@ private: PalmData getActivePalmData(int palmIndex) const; // derive avatar body position and orientation from the current HMD Sensor location. - // results are in sensor space + // results are in HMD frame glm::mat4 deriveBodyFromHMDSensor() const; float _driveKeys[MAX_DRIVE_KEYS]; @@ -360,8 +348,6 @@ private: eyeContactTarget _eyeContactTarget; - recording::RecorderPointer _recorder; - glm::vec3 _trackedHeadPosition; Setting::Handle _realWorldFieldOfView; @@ -394,9 +380,10 @@ private: // used to transform any sensor into world space, including the _hmdSensorMat, or hand controllers. glm::mat4 _sensorToWorldMatrix; - glm::vec3 _hmdFollowOffset { Vectors::ZERO }; - glm::vec3 _hmdFollowVelocity { Vectors::ZERO }; - float _hmdFollowSpeed { 0.0f }; + + glm::vec3 _followVelocity { Vectors::ZERO }; + float _followSpeed { 0.0f }; + float _followOffsetDistance { 0.0f }; bool _goToPending; glm::vec3 _goToPosition; @@ -414,9 +401,6 @@ private: glm::vec3 _customListenPosition; glm::quat _customListenOrientation; - bool _isFollowingHMD { false }; - float _followHMDAlpha { 0.0f }; - AtRestDetector _hmdAtRestDetector; bool _lastIsMoving { false }; }; diff --git a/interface/src/avatar/MyCharacterController.cpp b/interface/src/avatar/MyCharacterController.cpp index ad2ca32b05..e8f686da6f 100644 --- a/interface/src/avatar/MyCharacterController.cpp +++ b/interface/src/avatar/MyCharacterController.cpp @@ -60,7 +60,7 @@ MyCharacterController::MyCharacterController(MyAvatar* avatar) { _floorDistance = MAX_FALL_HEIGHT; _walkVelocity.setValue(0.0f, 0.0f, 0.0f); - _hmdVelocity.setValue(0.0f, 0.0f, 0.0f); + _followVelocity.setValue(0.0f, 0.0f, 0.0f); _jumpSpeed = JUMP_SPEED; _isOnGround = false; _isJumping = false; @@ -68,7 +68,7 @@ MyCharacterController::MyCharacterController(MyAvatar* avatar) { _isHovering = true; _isPushingUp = false; _jumpToHoverStart = 0; - _lastStepDuration = 0.0f; + _followTime = 0.0f; _pendingFlags = PENDING_FLAG_UPDATE_SHAPE; updateShapeIfNecessary(); @@ -161,16 +161,14 @@ void MyCharacterController::playerStep(btCollisionWorld* dynaWorld, btScalar dt) } } - // Rather than add _hmdVelocity to the velocity of the RigidBody, we explicitly teleport + // Rather than add _followVelocity to the velocity of the RigidBody, we explicitly teleport // the RigidBody forward according to the formula: distance = rate * time - if (_hmdVelocity.length2() > 0.0f) { + if (_followVelocity.length2() > 0.0f) { btTransform bodyTransform = _rigidBody->getWorldTransform(); - bodyTransform.setOrigin(bodyTransform.getOrigin() + dt * _hmdVelocity); + bodyTransform.setOrigin(bodyTransform.getOrigin() + dt * _followVelocity); _rigidBody->setWorldTransform(bodyTransform); } - // MyAvatar will ask us how far we stepped for HMD motion, which will depend on how - // much time has accumulated in _lastStepDuration. - _lastStepDuration += dt; + _followTime += dt; } void MyCharacterController::jump() { @@ -346,8 +344,8 @@ void MyCharacterController::setTargetVelocity(const glm::vec3& velocity) { _walkVelocity = glmToBullet(velocity); } -void MyCharacterController::setHMDVelocity(const glm::vec3& velocity) { - _hmdVelocity = glmToBullet(velocity); +void MyCharacterController::setFollowVelocity(const glm::vec3& velocity) { + _followVelocity = glmToBullet(velocity); } glm::vec3 MyCharacterController::getLinearVelocity() const { @@ -400,7 +398,7 @@ void MyCharacterController::preSimulation() { } } } - _lastStepDuration = 0.0f; + _followTime = 0.0f; } void MyCharacterController::postSimulation() { diff --git a/interface/src/avatar/MyCharacterController.h b/interface/src/avatar/MyCharacterController.h index de711c84f4..82aa958309 100644 --- a/interface/src/avatar/MyCharacterController.h +++ b/interface/src/avatar/MyCharacterController.h @@ -64,8 +64,8 @@ public: void getAvatarPositionAndOrientation(glm::vec3& position, glm::quat& rotation) const; void setTargetVelocity(const glm::vec3& velocity); - void setHMDVelocity(const glm::vec3& velocity); - glm::vec3 getHMDShift() const { return _lastStepDuration * bulletToGLM(_hmdVelocity); } + void setFollowVelocity(const glm::vec3& velocity); + float getFollowTime() const { return _followTime; } glm::vec3 getLinearVelocity() const; @@ -75,7 +75,7 @@ protected: protected: btVector3 _currentUp; btVector3 _walkVelocity; - btVector3 _hmdVelocity; + btVector3 _followVelocity; btTransform _avatarBodyTransform; glm::vec3 _shapeLocalOffset; @@ -93,7 +93,7 @@ protected: btScalar _gravity; btScalar _jumpSpeed; - btScalar _lastStepDuration; + btScalar _followTime; bool _enabled; bool _isOnGround; diff --git a/interface/src/avatar/SkeletonModel.cpp b/interface/src/avatar/SkeletonModel.cpp index 1347c69d61..83c8cdfcf5 100644 --- a/interface/src/avatar/SkeletonModel.cpp +++ b/interface/src/avatar/SkeletonModel.cpp @@ -13,6 +13,7 @@ #include #include +#include #include "Application.h" #include "Avatar.h" @@ -247,8 +248,8 @@ void SkeletonModel::simulate(float deltaTime, bool fullUpdate) { } MyAvatar* myAvatar = static_cast(_owningAvatar); - if (myAvatar->isPlaying()) { - // Don't take inputs if playing back a recording. + auto player = DependencyManager::get(); + if (player->isPlaying()) { return; } diff --git a/interface/src/scripting/RecordingScriptingInterface.cpp b/interface/src/scripting/RecordingScriptingInterface.cpp new file mode 100644 index 0000000000..bf585f5481 --- /dev/null +++ b/interface/src/scripting/RecordingScriptingInterface.cpp @@ -0,0 +1,258 @@ +// +// Created by Bradley Austin Davis on 2015/11/13 +// Copyright 2015 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#include "RecordingScriptingInterface.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "avatar/AvatarManager.h" +#include "Application.h" +#include "InterfaceLogging.h" + +typedef int16_t AudioSample; + + +using namespace recording; + +// FIXME move to somewhere audio related? +static const QString AUDIO_FRAME_NAME = "com.highfidelity.recording.Audio"; + +RecordingScriptingInterface::RecordingScriptingInterface() { + static const recording::FrameType AVATAR_FRAME_TYPE = recording::Frame::registerFrameType(AvatarData::FRAME_NAME); + // FIXME how to deal with driving multiple avatars locally? + Frame::registerFrameHandler(AVATAR_FRAME_TYPE, [this](Frame::ConstPointer frame) { + processAvatarFrame(frame); + }); + + static const recording::FrameType AUDIO_FRAME_TYPE = recording::Frame::registerFrameType(AUDIO_FRAME_NAME); + Frame::registerFrameHandler(AUDIO_FRAME_TYPE, [this](Frame::ConstPointer frame) { + processAudioFrame(frame); + }); + + _player = DependencyManager::get(); + _recorder = DependencyManager::get(); + + auto audioClient = DependencyManager::get(); + connect(audioClient.data(), &AudioClient::inputReceived, this, &RecordingScriptingInterface::processAudioInput); +} + +bool RecordingScriptingInterface::isPlaying() { + return _player->isPlaying(); +} + +bool RecordingScriptingInterface::isPaused() { + return _player->isPaused(); +} + +float RecordingScriptingInterface::playerElapsed() { + return (float)_player->position() / MSECS_PER_SECOND; +} + +float RecordingScriptingInterface::playerLength() { + return _player->length() / MSECS_PER_SECOND; +} + +void RecordingScriptingInterface::loadRecording(const QString& filename) { + using namespace recording; + + if (QThread::currentThread() != thread()) { + QMetaObject::invokeMethod(this, "loadRecording", Qt::BlockingQueuedConnection, + Q_ARG(QString, filename)); + return; + } + + ClipPointer clip = Clip::fromFile(filename); + if (!clip) { + qWarning() << "Unable to load clip data from " << filename; + } + _player->queueClip(clip); +} + +void RecordingScriptingInterface::startPlaying() { + if (QThread::currentThread() != thread()) { + QMetaObject::invokeMethod(this, "startPlaying", Qt::BlockingQueuedConnection); + return; + } + auto myAvatar = DependencyManager::get()->getMyAvatar(); + // Playback from the current position + if (_playFromCurrentLocation) { + _dummyAvatar.setRecordingBasis(std::make_shared(myAvatar->getTransform())); + } else { + _dummyAvatar.clearRecordingBasis(); + } + _player->play(); +} + +void RecordingScriptingInterface::setPlayerVolume(float volume) { + // FIXME +} + +void RecordingScriptingInterface::setPlayerAudioOffset(float audioOffset) { + // FIXME +} + +void RecordingScriptingInterface::setPlayerTime(float time) { + _player->seek(time * MSECS_PER_SECOND); +} + +void RecordingScriptingInterface::setPlayFromCurrentLocation(bool playFromCurrentLocation) { + _playFromCurrentLocation = playFromCurrentLocation; +} + +void RecordingScriptingInterface::setPlayerLoop(bool loop) { + _player->loop(loop); +} + +void RecordingScriptingInterface::setPlayerUseDisplayName(bool useDisplayName) { + _useDisplayName = useDisplayName; +} + +void RecordingScriptingInterface::setPlayerUseAttachments(bool useAttachments) { + _useAttachments = useAttachments; +} + +void RecordingScriptingInterface::setPlayerUseHeadModel(bool useHeadModel) { + _useHeadModel = useHeadModel; +} + +void RecordingScriptingInterface::setPlayerUseSkeletonModel(bool useSkeletonModel) { + _useSkeletonModel = useSkeletonModel; +} + +void RecordingScriptingInterface::play() { + _player->play(); +} + +void RecordingScriptingInterface::pausePlayer() { + _player->pause(); +} + +void RecordingScriptingInterface::stopPlaying() { + _player->stop(); +} + +bool RecordingScriptingInterface::isRecording() { + return _recorder->isRecording(); +} + +float RecordingScriptingInterface::recorderElapsed() { + return _recorder->position(); +} + +void RecordingScriptingInterface::startRecording() { + if (_recorder->isRecording()) { + qCWarning(interfaceapp) << "Recorder is already running"; + return; + } + + if (QThread::currentThread() != thread()) { + QMetaObject::invokeMethod(this, "startRecording", Qt::BlockingQueuedConnection); + return; + } + + _recordingEpoch = Frame::epochForFrameTime(0); + + auto myAvatar = DependencyManager::get()->getMyAvatar(); + myAvatar->setRecordingBasis(); + _recorder->start(); +} + +void RecordingScriptingInterface::stopRecording() { + _recorder->stop(); + + _lastClip = _recorder->getClip(); + // post-process the audio into discreet chunks based on times of received samples + _lastClip->seek(0); + Frame::ConstPointer frame; + while (frame = _lastClip->nextFrame()) { + qDebug() << "Frame time " << frame->timeOffset << " size " << frame->data.size(); + } + _lastClip->seek(0); + + auto myAvatar = DependencyManager::get()->getMyAvatar(); + myAvatar->clearRecordingBasis(); +} + +void RecordingScriptingInterface::saveRecording(const QString& filename) { + if (QThread::currentThread() != thread()) { + QMetaObject::invokeMethod(this, "saveRecording", Qt::BlockingQueuedConnection, + Q_ARG(QString, filename)); + return; + } + + if (!_lastClip) { + qWarning() << "There is no recording to save"; + return; + } + + recording::Clip::toFile(filename, _lastClip); +} + +void RecordingScriptingInterface::loadLastRecording() { + if (QThread::currentThread() != thread()) { + QMetaObject::invokeMethod(this, "loadLastRecording", Qt::BlockingQueuedConnection); + return; + } + + if (!_lastClip) { + qCDebug(interfaceapp) << "There is no recording to load"; + return; + } + + _player->queueClip(_lastClip); + _player->play(); +} + +void RecordingScriptingInterface::processAvatarFrame(const Frame::ConstPointer& frame) { + Q_ASSERT(QThread::currentThread() == thread()); + + AvatarData::fromFrame(frame->data, _dummyAvatar); + + auto myAvatar = DependencyManager::get()->getMyAvatar(); + if (_useHeadModel && _dummyAvatar.getFaceModelURL().isValid() && + (_dummyAvatar.getFaceModelURL() != myAvatar->getFaceModelURL())) { + // FIXME + //myAvatar->setFaceModelURL(_dummyAvatar.getFaceModelURL()); + } + + if (_useSkeletonModel && _dummyAvatar.getSkeletonModelURL().isValid() && + (_dummyAvatar.getSkeletonModelURL() != myAvatar->getSkeletonModelURL())) { + // FIXME + //myAvatar->useFullAvatarURL() + } + + if (_useDisplayName && _dummyAvatar.getDisplayName() != myAvatar->getDisplayName()) { + myAvatar->setDisplayName(_dummyAvatar.getDisplayName()); + } + + myAvatar->setPosition(_dummyAvatar.getPosition()); + myAvatar->setOrientation(_dummyAvatar.getOrientation()); + + // FIXME attachments + // FIXME joints + // FIXME head lean + // FIXME head orientation +} + +void RecordingScriptingInterface::processAudioInput(const QByteArray& audio) { + if (_recorder->isRecording()) { + static const recording::FrameType AUDIO_FRAME_TYPE = recording::Frame::registerFrameType(AUDIO_FRAME_NAME); + _recorder->recordFrame(AUDIO_FRAME_TYPE, audio); + } +} + +void RecordingScriptingInterface::processAudioFrame(const recording::FrameConstPointer& frame) { + auto audioClient = DependencyManager::get(); + audioClient->handleRecordedAudioInput(frame->data); +} diff --git a/interface/src/scripting/RecordingScriptingInterface.h b/interface/src/scripting/RecordingScriptingInterface.h new file mode 100644 index 0000000000..510a4b6898 --- /dev/null +++ b/interface/src/scripting/RecordingScriptingInterface.h @@ -0,0 +1,79 @@ +// +// Created by Bradley Austin Davis on 2015/11/13 +// Copyright 2015 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#ifndef hifi_RecordingScriptingInterface_h +#define hifi_RecordingScriptingInterface_h + +#include + +#include + +#include +#include +#include +#include + +class RecordingScriptingInterface : public QObject, public Dependency { + Q_OBJECT + +public: + RecordingScriptingInterface(); + +public slots: + bool isPlaying(); + bool isPaused(); + float playerElapsed(); + float playerLength(); + void loadRecording(const QString& filename); + void startPlaying(); + void setPlayerVolume(float volume); + void setPlayerAudioOffset(float audioOffset); + void setPlayerTime(float time); + void setPlayFromCurrentLocation(bool playFromCurrentLocation); + void setPlayerLoop(bool loop); + void setPlayerUseDisplayName(bool useDisplayName); + void setPlayerUseAttachments(bool useAttachments); + void setPlayerUseHeadModel(bool useHeadModel); + void setPlayerUseSkeletonModel(bool useSkeletonModel); + void play(); + void pausePlayer(); + void stopPlaying(); + bool isRecording(); + float recorderElapsed(); + void startRecording(); + void stopRecording(); + void saveRecording(const QString& filename); + void loadLastRecording(); + +signals: + void playbackStateChanged(); + // Should this occur for any frame or just for seek calls? + void playbackPositionChanged(); + void looped(); + +private: + using Mutex = std::recursive_mutex; + using Locker = std::unique_lock; + using Flag = std::atomic; + void processAvatarFrame(const recording::FrameConstPointer& frame); + void processAudioFrame(const recording::FrameConstPointer& frame); + void processAudioInput(const QByteArray& audioData); + QSharedPointer _player; + QSharedPointer _recorder; + quint64 _recordingEpoch { 0 }; + + Flag _playFromCurrentLocation { true }; + Flag _useDisplayName { false }; + Flag _useHeadModel { false }; + Flag _useAttachments { false }; + Flag _useSkeletonModel { false }; + recording::ClipPointer _lastClip; + AvatarData _dummyAvatar; +}; + +#endif // hifi_RecordingScriptingInterface_h diff --git a/interface/src/ui/RecorderDialog.cpp b/interface/src/ui/RecorderDialog.cpp new file mode 100644 index 0000000000..ddefa9fbd9 --- /dev/null +++ b/interface/src/ui/RecorderDialog.cpp @@ -0,0 +1,22 @@ +// +// Created by Bradley Austin Davis on 2015/11/14 +// Copyright 2015 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#include "RecorderDialog.h" + +#include + +#include "DependencyManager.h" + +HIFI_QML_DEF(RecorderDialog) + +RecorderDialog::RecorderDialog(QQuickItem* parent) : OffscreenQmlDialog(parent) { +} + +void RecorderDialog::hide() { + ((QQuickItem*)parent())->setEnabled(false); +} diff --git a/interface/src/ui/RecorderDialog.h b/interface/src/ui/RecorderDialog.h new file mode 100644 index 0000000000..f4f0a7c2d8 --- /dev/null +++ b/interface/src/ui/RecorderDialog.h @@ -0,0 +1,28 @@ +// +// Created by Bradley Austin Davis on 2015/11/14 +// Copyright 2015 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#pragma once +#ifndef hifi_RecorderDialog_h +#define hifi_RecorderDialog_h + +#include + +class RecorderDialog : public OffscreenQmlDialog { + Q_OBJECT + HIFI_QML_DECL + +public: + RecorderDialog(QQuickItem* parent = nullptr); + +signals: + +protected: + void hide(); +}; + +#endif diff --git a/libraries/animation/src/Rig.cpp b/libraries/animation/src/Rig.cpp index 7926b268b5..9b6221a370 100644 --- a/libraries/animation/src/Rig.cpp +++ b/libraries/animation/src/Rig.cpp @@ -407,6 +407,24 @@ void Rig::calcAnimAlpha(float speed, const std::vector& referenceSpeeds, *alphaOut = alpha; } +void Rig::computeEyesInRootFrame(const AnimPoseVec& poses) { + // TODO: use cached eye/hips indices for these calculations + int numPoses = poses.size(); + int rightEyeIndex = _animSkeleton->nameToJointIndex(QString("RightEye")); + int leftEyeIndex = _animSkeleton->nameToJointIndex(QString("LeftEye")); + if (numPoses > rightEyeIndex && numPoses > leftEyeIndex + && rightEyeIndex > 0 && leftEyeIndex > 0) { + int hipsIndex = _animSkeleton->nameToJointIndex(QString("Hips")); + int headIndex = _animSkeleton->nameToJointIndex(QString("Head")); + if (hipsIndex >= 0 && headIndex > 0) { + glm::vec3 rightEye = _animSkeleton->getAbsolutePose(rightEyeIndex, poses).trans; + glm::vec3 leftEye = _animSkeleton->getAbsolutePose(leftEyeIndex, poses).trans; + glm::vec3 hips = _animSkeleton->getAbsolutePose(hipsIndex, poses).trans; + _eyesInRootFrame = 0.5f * (rightEye + leftEye) - hips; + } + } +} + // animation reference speeds. static const std::vector FORWARD_SPEEDS = { 0.4f, 1.4f, 4.5f }; // m/s static const std::vector BACKWARD_SPEEDS = { 0.6f, 1.45f }; // m/s @@ -730,6 +748,7 @@ void Rig::updateAnimations(float deltaTime, glm::mat4 rootTransform) { setJointTranslation((int)i, true, poses[i].trans, PRIORITY); } + computeEyesInRootFrame(poses); } else { // First normalize the fades so that they sum to 1.0. @@ -1124,14 +1143,14 @@ void Rig::updateLeanJoint(int index, float leanSideways, float leanForward, floa static AnimPose avatarToBonePose(AnimPose pose, AnimSkeleton::ConstPointer skeleton) { AnimPose rootPose = skeleton->getAbsoluteBindPose(skeleton->nameToJointIndex("Hips")); - AnimPose rotY180(glm::vec3(1), glm::angleAxis((float)PI, glm::vec3(0.0f, 1.0f, 0.0f)), glm::vec3(0)); + AnimPose rotY180(glm::vec3(1.0f), glm::angleAxis(PI, glm::vec3(0.0f, 1.0f, 0.0f)), glm::vec3(0)); return rootPose * rotY180 * pose; } #ifdef DEBUG_RENDERING static AnimPose boneToAvatarPose(AnimPose pose, AnimSkeleton::ConstPointer skeleton) { AnimPose rootPose = skeleton->getAbsoluteBindPose(skeleton->nameToJointIndex("Hips")); - AnimPose rotY180(glm::vec3(1), glm::angleAxis((float)PI, glm::vec3(0.0f, 1.0f, 0.0f)), glm::vec3(0)); + AnimPose rotY180(glm::vec3(1.0f), glm::angleAxis(PI, glm::vec3(0.0f, 1.0f, 0.0f)), glm::vec3(0)); return (rootPose * rotY180).inverse() * pose; } #endif @@ -1342,6 +1361,7 @@ void Rig::updateFromHandParameters(const HandParameters& params, float dt) { void Rig::makeAnimSkeleton(const FBXGeometry& fbxGeometry) { if (!_animSkeleton) { _animSkeleton = std::make_shared(fbxGeometry); + computeEyesInRootFrame(_animSkeleton->getRelativeBindPoses()); } } diff --git a/libraries/animation/src/Rig.h b/libraries/animation/src/Rig.h index 98847b9915..98d3a30392 100644 --- a/libraries/animation/src/Rig.h +++ b/libraries/animation/src/Rig.h @@ -214,6 +214,8 @@ public: bool getModelOffset(glm::vec3& modelOffsetOut) const; + const glm::vec3& getEyesInRootFrame() const { return _eyesInRootFrame; } + protected: void updateAnimationStateHandlers(); @@ -222,6 +224,8 @@ public: void updateEyeJoint(int index, const glm::vec3& modelTranslation, const glm::quat& modelRotation, const glm::quat& worldHeadOrientation, const glm::vec3& lookAt, const glm::vec3& saccade); void calcAnimAlpha(float speed, const std::vector& referenceSpeeds, float* alphaOut) const; + void computeEyesInRootFrame(const AnimPoseVec& poses); + QVector _jointStates; int _rootJointIndex = -1; @@ -241,6 +245,7 @@ public: glm::vec3 _lastFront; glm::vec3 _lastPosition; glm::vec3 _lastVelocity; + glm::vec3 _eyesInRootFrame { Vectors::ZERO }; std::shared_ptr _animNode; std::shared_ptr _animSkeleton; diff --git a/libraries/audio-client/src/AudioClient.cpp b/libraries/audio-client/src/AudioClient.cpp index 72e47073f2..a506fe217c 100644 --- a/libraries/audio-client/src/AudioClient.cpp +++ b/libraries/audio-client/src/AudioClient.cpp @@ -904,6 +904,39 @@ void AudioClient::handleAudioInput() { } } +void AudioClient::handleRecordedAudioInput(const QByteArray& audio) { + if (!_audioPacket) { + // we don't have an audioPacket yet - set that up now + _audioPacket = NLPacket::create(PacketType::MicrophoneAudioWithEcho); + } + // FIXME either discard stereo in the recording or record a stereo flag + const int numNetworkBytes = _isStereoInput + ? AudioConstants::NETWORK_FRAME_BYTES_STEREO + : AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL; + const int numNetworkSamples = _isStereoInput + ? AudioConstants::NETWORK_FRAME_SAMPLES_STEREO + : AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL; + + auto nodeList = DependencyManager::get(); + SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer); + if (audioMixer && audioMixer->getActiveSocket()) { + glm::vec3 headPosition = _positionGetter(); + glm::quat headOrientation = _orientationGetter(); + quint8 isStereo = _isStereoInput ? 1 : 0; + _audioPacket->reset(); + _audioPacket->setType(PacketType::MicrophoneAudioWithEcho); + _audioPacket->writePrimitive(_outgoingAvatarAudioSequenceNumber); + _audioPacket->writePrimitive(isStereo); + _audioPacket->writePrimitive(headPosition); + _audioPacket->writePrimitive(headOrientation); + _audioPacket->write(audio); + _stats.sentPacket(); + nodeList->flagTimeForConnectionStep(LimitedNodeList::ConnectionStep::SendAudioPacket); + nodeList->sendUnreliablePacket(*_audioPacket, *audioMixer); + _outgoingAvatarAudioSequenceNumber++; + } +} + void AudioClient::processReceivedSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer) { const int numNetworkOutputSamples = inputBuffer.size() / sizeof(int16_t); const int numDeviceOutputSamples = numNetworkOutputSamples * (_outputFormat.sampleRate() * _outputFormat.channelCount()) diff --git a/libraries/audio-client/src/AudioClient.h b/libraries/audio-client/src/AudioClient.h index e699ee9266..7d2b5a783f 100644 --- a/libraries/audio-client/src/AudioClient.h +++ b/libraries/audio-client/src/AudioClient.h @@ -147,6 +147,7 @@ public slots: void sendDownstreamAudioStatsPacket() { _stats.sendDownstreamAudioStatsPacket(); } void handleAudioInput(); + void handleRecordedAudioInput(const QByteArray& audio); void reset(); void audioMixerKilled(); void toggleMute(); diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index a47d5f663e..017ef7578a 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -33,8 +33,7 @@ #include #include #include -#include -#include +#include #include "AvatarLogging.h" @@ -45,6 +44,9 @@ using namespace std; const glm::vec3 DEFAULT_LOCAL_AABOX_CORNER(-0.5f); const glm::vec3 DEFAULT_LOCAL_AABOX_SCALE(1.0f); +const QString AvatarData::FRAME_NAME = "com.highfidelity.recording.AvatarData"; +static std::once_flag frameTypeRegistration; + AvatarData::AvatarData() : _sessionUUID(), _position(0.0f), @@ -791,155 +793,10 @@ bool AvatarData::hasReferential() { return _referential != NULL; } -bool AvatarData::isPlaying() { - return _player && _player->isPlaying(); -} - -bool AvatarData::isPaused() { - return _player && _player->isPaused(); -} - -float AvatarData::playerElapsed() { - if (!_player) { - return 0; - } - if (QThread::currentThread() != thread()) { - float result; - QMetaObject::invokeMethod(this, "playerElapsed", Qt::BlockingQueuedConnection, - Q_RETURN_ARG(float, result)); - return result; - } - return (float)_player->position() / (float) MSECS_PER_SECOND; -} - -float AvatarData::playerLength() { - if (!_player) { - return 0; - } - if (QThread::currentThread() != thread()) { - float result; - QMetaObject::invokeMethod(this, "playerLength", Qt::BlockingQueuedConnection, - Q_RETURN_ARG(float, result)); - return result; - } - return (float)_player->length() / (float) MSECS_PER_SECOND; -} - -void AvatarData::loadRecording(const QString& filename) { - if (QThread::currentThread() != thread()) { - QMetaObject::invokeMethod(this, "loadRecording", Qt::BlockingQueuedConnection, - Q_ARG(QString, filename)); - return; - } - using namespace recording; - - ClipPointer clip = Clip::fromFile(filename); - if (!clip) { - qWarning() << "Unable to load clip data from " << filename; - } - - _player = std::make_shared(); - _player->queueClip(clip); -} - -void AvatarData::startPlaying() { - if (QThread::currentThread() != thread()) { - QMetaObject::invokeMethod(this, "startPlaying", Qt::BlockingQueuedConnection); - return; - } - - if (!_player) { - qWarning() << "No clip loaded for playback"; - return; - } - setRecordingBasis(); - _player->play(); -} - -void AvatarData::setPlayerVolume(float volume) { - // FIXME -} - -void AvatarData::setPlayerAudioOffset(float audioOffset) { - // FIXME -} - -void AvatarData::setPlayerTime(float time) { - if (!_player) { - qWarning() << "No player active"; - return; - } - - _player->seek(time * MSECS_PER_SECOND); -} - -void AvatarData::setPlayFromCurrentLocation(bool playFromCurrentLocation) { - // FIXME -} - -void AvatarData::setPlayerLoop(bool loop) { - if (_player) { - _player->loop(loop); - } -} - -void AvatarData::setPlayerUseDisplayName(bool useDisplayName) { - // FIXME -} - -void AvatarData::setPlayerUseAttachments(bool useAttachments) { - // FIXME -} - -void AvatarData::setPlayerUseHeadModel(bool useHeadModel) { - // FIXME -} - -void AvatarData::setPlayerUseSkeletonModel(bool useSkeletonModel) { - // FIXME -} - -void AvatarData::play() { - if (isPlaying()) { - if (QThread::currentThread() != thread()) { - QMetaObject::invokeMethod(this, "play", Qt::BlockingQueuedConnection); - return; - } - - _player->play(); - } -} - std::shared_ptr AvatarData::getRecordingBasis() const { return _recordingBasis; } -void AvatarData::pausePlayer() { - if (!_player) { - return; - } - if (QThread::currentThread() != thread()) { - QMetaObject::invokeMethod(this, "pausePlayer", Qt::BlockingQueuedConnection); - return; - } - if (_player) { - _player->pause(); - } -} - -void AvatarData::stopPlaying() { - if (!_player) { - return; - } - if (QThread::currentThread() != thread()) { - QMetaObject::invokeMethod(this, "stopPlaying", Qt::BlockingQueuedConnection); - return; - } - if (_player) { - _player->stop(); - } -} - void AvatarData::changeReferential(Referential* ref) { delete _referential; _referential = ref; @@ -1568,26 +1425,26 @@ JointData jointDataFromJsonValue(const QJsonValue& json) { // This allows the application to decide whether playback should be relative to an avatar's // transform at the start of playback, or relative to the transform of the recorded // avatar -QByteArray avatarStateToFrame(const AvatarData* _avatar) { +QByteArray AvatarData::toFrame(const AvatarData& avatar) { QJsonObject root; - if (!_avatar->getFaceModelURL().isEmpty()) { - root[JSON_AVATAR_HEAD_MODEL] = _avatar->getFaceModelURL().toString(); + if (!avatar.getFaceModelURL().isEmpty()) { + root[JSON_AVATAR_HEAD_MODEL] = avatar.getFaceModelURL().toString(); } - if (!_avatar->getSkeletonModelURL().isEmpty()) { - root[JSON_AVATAR_BODY_MODEL] = _avatar->getSkeletonModelURL().toString(); + if (!avatar.getSkeletonModelURL().isEmpty()) { + root[JSON_AVATAR_BODY_MODEL] = avatar.getSkeletonModelURL().toString(); } - if (!_avatar->getDisplayName().isEmpty()) { - root[JSON_AVATAR_DISPLAY_NAME] = _avatar->getDisplayName(); + if (!avatar.getDisplayName().isEmpty()) { + root[JSON_AVATAR_DISPLAY_NAME] = avatar.getDisplayName(); } - if (!_avatar->getAttachmentData().isEmpty()) { + if (!avatar.getAttachmentData().isEmpty()) { // FIXME serialize attachment data } - auto recordingBasis = _avatar->getRecordingBasis(); + auto recordingBasis = avatar.getRecordingBasis(); if (recordingBasis) { // Find the relative transform - auto relativeTransform = recordingBasis->relativeTransform(_avatar->getTransform()); + auto relativeTransform = recordingBasis->relativeTransform(avatar.getTransform()); // if the resulting relative basis is identity, we shouldn't record anything if (!relativeTransform.isIdentity()) { @@ -1595,17 +1452,17 @@ QByteArray avatarStateToFrame(const AvatarData* _avatar) { root[JSON_AVATAR_BASIS] = Transform::toJson(*recordingBasis); } } else { - root[JSON_AVATAR_RELATIVE] = Transform::toJson(_avatar->getTransform()); + root[JSON_AVATAR_RELATIVE] = Transform::toJson(avatar.getTransform()); } // Skeleton pose QJsonArray jointArray; - for (const auto& joint : _avatar->getRawJointData()) { + for (const auto& joint : avatar.getRawJointData()) { jointArray.push_back(toJsonValue(joint)); } root[JSON_AVATAR_JOINT_ARRAY] = jointArray; - const HeadData* head = _avatar->getHeadData(); + const HeadData* head = avatar.getHeadData(); if (head) { QJsonObject headJson; QJsonArray blendshapeCoefficients; @@ -1616,8 +1473,8 @@ QByteArray avatarStateToFrame(const AvatarData* _avatar) { headJson[JSON_AVATAR_HEAD_ROTATION] = toJsonValue(head->getRawOrientation()); headJson[JSON_AVATAR_HEAD_LEAN_FORWARD] = QJsonValue(head->getLeanForward()); headJson[JSON_AVATAR_HEAD_LEAN_SIDEWAYS] = QJsonValue(head->getLeanSideways()); - vec3 relativeLookAt = glm::inverse(_avatar->getOrientation()) * - (head->getLookAtPosition() - _avatar->getPosition()); + vec3 relativeLookAt = glm::inverse(avatar.getOrientation()) * + (head->getLookAtPosition() - avatar.getPosition()); headJson[JSON_AVATAR_HEAD_LOOKAT] = toJsonValue(relativeLookAt); root[JSON_AVATAR_HEAD] = headJson; } @@ -1625,26 +1482,29 @@ QByteArray avatarStateToFrame(const AvatarData* _avatar) { return QJsonDocument(root).toBinaryData(); } -void avatarStateFromFrame(const QByteArray& frameData, AvatarData* _avatar) { +void AvatarData::fromFrame(const QByteArray& frameData, AvatarData& result) { QJsonDocument doc = QJsonDocument::fromBinaryData(frameData); QJsonObject root = doc.object(); if (root.contains(JSON_AVATAR_HEAD_MODEL)) { auto faceModelURL = root[JSON_AVATAR_HEAD_MODEL].toString(); - if (faceModelURL != _avatar->getFaceModelURL().toString()) { - _avatar->setFaceModelURL(faceModelURL); + if (faceModelURL != result.getFaceModelURL().toString()) { + QUrl faceModel(faceModelURL); + if (faceModel.isValid()) { + result.setFaceModelURL(faceModel); + } } } if (root.contains(JSON_AVATAR_BODY_MODEL)) { auto bodyModelURL = root[JSON_AVATAR_BODY_MODEL].toString(); - if (bodyModelURL != _avatar->getSkeletonModelURL().toString()) { - _avatar->setSkeletonModelURL(bodyModelURL); + if (bodyModelURL != result.getSkeletonModelURL().toString()) { + result.setSkeletonModelURL(bodyModelURL); } } if (root.contains(JSON_AVATAR_DISPLAY_NAME)) { auto newDisplayName = root[JSON_AVATAR_DISPLAY_NAME].toString(); - if (newDisplayName != _avatar->getDisplayName()) { - _avatar->setDisplayName(newDisplayName); + if (newDisplayName != result.getDisplayName()) { + result.setDisplayName(newDisplayName); } } @@ -1656,18 +1516,18 @@ void avatarStateFromFrame(const QByteArray& frameData, AvatarData* _avatar) { // The first is more useful for playing back recordings on your own avatar, while // the latter is more useful for playing back other avatars within your scene. - auto currentBasis = _avatar->getRecordingBasis(); + auto currentBasis = result.getRecordingBasis(); if (!currentBasis) { currentBasis = std::make_shared(Transform::fromJson(root[JSON_AVATAR_BASIS])); } auto relativeTransform = Transform::fromJson(root[JSON_AVATAR_RELATIVE]); auto worldTransform = currentBasis->worldTransform(relativeTransform); - _avatar->setPosition(worldTransform.getTranslation()); - _avatar->setOrientation(worldTransform.getRotation()); + result.setPosition(worldTransform.getTranslation()); + result.setOrientation(worldTransform.getRotation()); // TODO: find a way to record/playback the Scale of the avatar - //_avatar->setTargetScale(worldTransform.getScale().x); + //result.setTargetScale(worldTransform.getScale().x); } @@ -1689,13 +1549,13 @@ void avatarStateFromFrame(const QByteArray& frameData, AvatarData* _avatar) { for (const auto& joint : jointArray) { jointRotations.push_back(joint.rotation); } - _avatar->setJointRotations(jointRotations); + result.setJointRotations(jointRotations); } #if 0 // Most head data is relative to the avatar, and needs no basis correction, // but the lookat vector does need correction - HeadData* head = _avatar->_headData; + HeadData* head = result._headData; if (head && root.contains(JSON_AVATAR_HEAD)) { QJsonObject headJson = root[JSON_AVATAR_HEAD].toObject(); if (headJson.contains(JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS)) { @@ -1718,7 +1578,7 @@ void avatarStateFromFrame(const QByteArray& frameData, AvatarData* _avatar) { if (headJson.contains(JSON_AVATAR_HEAD_LOOKAT)) { auto relativeLookAt = vec3FromJsonValue(headJson[JSON_AVATAR_HEAD_LOOKAT]); if (glm::length2(relativeLookAt) > 0.01) { - head->setLookAtPosition((_avatar->getOrientation() * relativeLookAt) + _avatar->getPosition()); + head->setLookAtPosition((result.getOrientation() * relativeLookAt) + result.getPosition()); } } } diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index 26bc9d83ff..e79c0be80a 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -50,13 +50,12 @@ typedef unsigned long long quint64; #include #include #include +#include #include "AABox.h" #include "HandData.h" #include "HeadData.h" #include "PathUtils.h" -#include "Player.h" -#include "Recorder.h" #include "Referential.h" using AvatarSharedPointer = std::shared_ptr; @@ -165,7 +164,13 @@ class AvatarData : public QObject { Q_PROPERTY(QStringList jointNames READ getJointNames) Q_PROPERTY(QUuid sessionUUID READ getSessionUUID) + public: + static const QString FRAME_NAME; + + static void fromFrame(const QByteArray& frameData, AvatarData& avatar); + static QByteArray toFrame(const AvatarData& avatar); + AvatarData(); virtual ~AvatarData(); @@ -348,25 +353,6 @@ public slots: void setJointMappingsFromNetworkReply(); void setSessionUUID(const QUuid& sessionUUID) { _sessionUUID = sessionUUID; } bool hasReferential(); - - bool isPlaying(); - bool isPaused(); - float playerElapsed(); - float playerLength(); - void loadRecording(const QString& filename); - void startPlaying(); - void setPlayerVolume(float volume); - void setPlayerAudioOffset(float audioOffset); - void setPlayerTime(float time); - void setPlayFromCurrentLocation(bool playFromCurrentLocation); - void setPlayerLoop(bool loop); - void setPlayerUseDisplayName(bool useDisplayName); - void setPlayerUseAttachments(bool useAttachments); - void setPlayerUseHeadModel(bool useHeadModel); - void setPlayerUseSkeletonModel(bool useSkeletonModel); - void play(); - void pausePlayer(); - void stopPlaying(); protected: QUuid _sessionUUID; @@ -421,8 +407,6 @@ protected: QWeakPointer _owningAvatarMixer; - recording::DeckPointer _player; - /// Loads the joint indices, names from the FST file (if any) virtual void updateJointMappings(); void changeReferential(Referential* ref); @@ -437,7 +421,7 @@ protected: QMutex avatarLock; // Name is redundant, but it aids searches. // During recording, this holds the starting position, orientation & scale of the recorded avatar - // During playback, it holds the + // During playback, it holds the origin from which to play the relative positions in the clip TransformPointer _recordingBasis; private: diff --git a/libraries/gl/src/gl/OffscreenQmlSurface.cpp b/libraries/gl/src/gl/OffscreenQmlSurface.cpp index b6603deb62..e8a950a16b 100644 --- a/libraries/gl/src/gl/OffscreenQmlSurface.cpp +++ b/libraries/gl/src/gl/OffscreenQmlSurface.cpp @@ -26,7 +26,7 @@ #include "OffscreenGlCanvas.h" // FIXME move to threaded rendering with Qt 5.5 -// #define QML_THREADED +//#define QML_THREADED // Time between receiving a request to render the offscreen UI actually triggering // the render. Could possibly be increased depending on the framerate we expect to @@ -72,7 +72,7 @@ public: OffscreenGlCanvas::create(shareContext); #ifdef QML_THREADED // Qt 5.5 - // _renderControl->prepareThread(_renderThread); + _renderControl->prepareThread(_renderThread); _context->moveToThread(&_thread); moveToThread(&_thread); _thread.setObjectName("QML Thread"); diff --git a/libraries/networking/src/DomainHandler.cpp b/libraries/networking/src/DomainHandler.cpp index 9f411c59f1..afb2dde266 100644 --- a/libraries/networking/src/DomainHandler.cpp +++ b/libraries/networking/src/DomainHandler.cpp @@ -46,7 +46,13 @@ DomainHandler::DomainHandler(QObject* parent) : connect(this, &DomainHandler::completedSocketDiscovery, &_icePeer, &NetworkPeer::stopPingTimer); } -void DomainHandler::clearConnectionInfo() { +void DomainHandler::disconnect() { + // if we're currently connected to a domain, send a disconnect packet on our way out + if (_isConnected) { + sendDisconnectPacket(); + } + + // clear member variables that hold the connection state to a domain _uuid = QUuid(); _connectionToken = QUuid(); @@ -60,6 +66,18 @@ void DomainHandler::clearConnectionInfo() { setIsConnected(false); } +void DomainHandler::sendDisconnectPacket() { + // The DomainDisconnect packet is not verified - we're relying on the eventual addition of DTLS to the + // domain-server connection to stop greifing here + + // construct the disconnect packet once (an empty packet but sourced with our current session UUID) + static auto disconnectPacket = NLPacket::create(PacketType::DomainDisconnectRequest, 0); + + // send the disconnect packet to the current domain server + auto nodeList = DependencyManager::get(); + nodeList->sendUnreliablePacket(*disconnectPacket, _sockAddr); +} + void DomainHandler::clearSettings() { _settingsObject = QJsonObject(); _failedSettingsRequests = 0; @@ -67,7 +85,7 @@ void DomainHandler::clearSettings() { void DomainHandler::softReset() { qCDebug(networking) << "Resetting current domain connection information."; - clearConnectionInfo(); + disconnect(); clearSettings(); } diff --git a/libraries/networking/src/DomainHandler.h b/libraries/networking/src/DomainHandler.h index 9dd4254c30..49bab6dc28 100644 --- a/libraries/networking/src/DomainHandler.h +++ b/libraries/networking/src/DomainHandler.h @@ -35,7 +35,7 @@ class DomainHandler : public QObject { public: DomainHandler(QObject* parent = 0); - void clearConnectionInfo(); + void disconnect(); void clearSettings(); const QUuid& getUUID() const { return _uuid; } @@ -113,6 +113,7 @@ signals: void settingsReceiveFail(); private: + void sendDisconnectPacket(); void hardReset(); QUuid _uuid; diff --git a/libraries/networking/src/LimitedNodeList.cpp b/libraries/networking/src/LimitedNodeList.cpp index 75d42f55cb..fdb5049f00 100644 --- a/libraries/networking/src/LimitedNodeList.cpp +++ b/libraries/networking/src/LimitedNodeList.cpp @@ -441,7 +441,7 @@ void LimitedNodeList::reset() { _nodeSocket.clearConnections(); } -void LimitedNodeList::killNodeWithUUID(const QUuid& nodeUUID) { +bool LimitedNodeList::killNodeWithUUID(const QUuid& nodeUUID) { QReadLocker readLocker(&_nodeMutex); NodeHash::iterator it = _nodeHash.find(nodeUUID); @@ -456,7 +456,10 @@ void LimitedNodeList::killNodeWithUUID(const QUuid& nodeUUID) { } handleNodeKill(matchingNode); + return true; } + + return false; } void LimitedNodeList::processKillNode(NLPacket& packet) { diff --git a/libraries/networking/src/LimitedNodeList.h b/libraries/networking/src/LimitedNodeList.h index 2488b0cf8c..1aacd27572 100644 --- a/libraries/networking/src/LimitedNodeList.h +++ b/libraries/networking/src/LimitedNodeList.h @@ -230,7 +230,7 @@ public slots: virtual void sendSTUNRequest(); void sendPingPackets(); - void killNodeWithUUID(const QUuid& nodeUUID); + bool killNodeWithUUID(const QUuid& nodeUUID); signals: void dataSent(quint8 channelType, int bytes); diff --git a/libraries/networking/src/NodeList.cpp b/libraries/networking/src/NodeList.cpp index b262904c63..e03ac47854 100644 --- a/libraries/networking/src/NodeList.cpp +++ b/libraries/networking/src/NodeList.cpp @@ -103,6 +103,7 @@ NodeList::NodeList(char newOwnerType, unsigned short socketListenPort, unsigned packetReceiver.registerListener(PacketType::DomainServerRequireDTLS, &_domainHandler, "processDTLSRequirementPacket"); packetReceiver.registerListener(PacketType::ICEPingReply, &_domainHandler, "processICEPingReplyPacket"); packetReceiver.registerListener(PacketType::DomainServerPathResponse, this, "processDomainServerPathResponse"); + packetReceiver.registerListener(PacketType::DomainServerRemovedNode, this, "processDomainServerRemovedNode"); } qint64 NodeList::sendStats(const QJsonObject& statsObject, const HifiSockAddr& destination) { @@ -218,6 +219,10 @@ void NodeList::addSetOfNodeTypesToNodeInterestSet(const NodeSet& setOfNodeTypes) } void NodeList::sendDomainServerCheckIn() { + if (_isShuttingDown) { + qCDebug(networking) << "Refusing to send a domain-server check in while shutting down."; + } + if (_publicSockAddr.isNull()) { // we don't know our public socket and we need to send it to the domain server qCDebug(networking) << "Waiting for inital public socket from STUN. Will not send domain-server check in."; @@ -513,6 +518,13 @@ void NodeList::processDomainServerAddedNode(QSharedPointer packet) { parseNodeFromPacketStream(packetStream); } +void NodeList::processDomainServerRemovedNode(QSharedPointer packet) { + // read the UUID from the packet, remove it if it exists + QUuid nodeUUID = QUuid::fromRfc4122(packet->readWithoutCopy(NUM_BYTES_RFC4122_UUID)); + qDebug() << "Received packet from domain-server to remove node with UUID" << uuidStringWithoutCurlyBraces(nodeUUID); + killNodeWithUUID(nodeUUID); +} + void NodeList::parseNodeFromPacketStream(QDataStream& packetStream) { // setup variables to read into from QDataStream qint8 nodeType; diff --git a/libraries/networking/src/NodeList.h b/libraries/networking/src/NodeList.h index 3aae3e3dfc..5b9a4e5ae5 100644 --- a/libraries/networking/src/NodeList.h +++ b/libraries/networking/src/NodeList.h @@ -66,6 +66,8 @@ public: void setAssignmentServerSocket(const HifiSockAddr& serverSocket) { _assignmentServerSocket = serverSocket; } void sendAssignment(Assignment& assignment); + + void setIsShuttingDown(bool isShuttingDown) { _isShuttingDown = isShuttingDown; } public slots: void reset(); @@ -74,6 +76,7 @@ public slots: void processDomainServerList(QSharedPointer packet); void processDomainServerAddedNode(QSharedPointer packet); + void processDomainServerRemovedNode(QSharedPointer packet); void processDomainServerPathResponse(QSharedPointer packet); void processDomainServerConnectionTokenPacket(QSharedPointer packet); @@ -114,6 +117,7 @@ private: DomainHandler _domainHandler; int _numNoReplyDomainCheckIns; HifiSockAddr _assignmentServerSocket; + bool _isShuttingDown { false }; }; #endif // hifi_NodeList_h diff --git a/libraries/networking/src/ThreadedAssignment.cpp b/libraries/networking/src/ThreadedAssignment.cpp index 992b3be2b4..b204982896 100644 --- a/libraries/networking/src/ThreadedAssignment.cpp +++ b/libraries/networking/src/ThreadedAssignment.cpp @@ -33,14 +33,19 @@ void ThreadedAssignment::setFinished(bool isFinished) { if (_isFinished) { qDebug() << "ThreadedAssignment::setFinished(true) called - finishing up."; - - auto& packetReceiver = DependencyManager::get()->getPacketReceiver(); + + auto nodeList = DependencyManager::get(); + + auto& packetReceiver = nodeList->getPacketReceiver(); // we should de-register immediately for any of our packets packetReceiver.unregisterListener(this); // we should also tell the packet receiver to drop packets while we're cleaning up packetReceiver.setShouldDropPackets(true); + + // send a disconnect packet to the domain + nodeList->getDomainHandler().disconnect(); if (_domainServerTimer) { // stop the domain-server check in timer by calling deleteLater so it gets cleaned up on NL thread diff --git a/libraries/networking/src/udt/PacketHeaders.cpp b/libraries/networking/src/udt/PacketHeaders.cpp index 24034ff9b3..f5c66617a8 100644 --- a/libraries/networking/src/udt/PacketHeaders.cpp +++ b/libraries/networking/src/udt/PacketHeaders.cpp @@ -14,11 +14,13 @@ #include #include +#include const QSet NON_VERIFIED_PACKETS = QSet() << PacketType::NodeJsonStats << PacketType::EntityQuery << PacketType::OctreeDataNack << PacketType::EntityEditNack - << PacketType::DomainListRequest << PacketType::StopNode; + << PacketType::DomainListRequest << PacketType::StopNode + << PacketType::DomainDisconnectRequest; const QSet NON_SOURCED_PACKETS = QSet() << PacketType::StunResponse << PacketType::CreateAssignment << PacketType::RequestAssignment @@ -29,7 +31,8 @@ const QSet NON_SOURCED_PACKETS = QSet() << PacketType::DomainSettingsRequest << PacketType::DomainSettings << PacketType::ICEServerPeerInformation << PacketType::ICEServerQuery << PacketType::ICEServerHeartbeat << PacketType::ICEPing << PacketType::ICEPingReply - << PacketType::AssignmentClientStatus << PacketType::StopNode; + << PacketType::AssignmentClientStatus << PacketType::StopNode + << PacketType::DomainServerRemovedNode; const QSet RELIABLE_PACKETS = QSet(); @@ -46,66 +49,17 @@ PacketVersion versionForPacketType(PacketType packetType) { } } -#define PACKET_TYPE_NAME_LOOKUP(x) case x: return QString(#x); - -QString nameForPacketType(PacketType packetType) { - switch (packetType) { - PACKET_TYPE_NAME_LOOKUP(PacketType::Unknown); - PACKET_TYPE_NAME_LOOKUP(PacketType::StunResponse); - PACKET_TYPE_NAME_LOOKUP(PacketType::DomainList); - PACKET_TYPE_NAME_LOOKUP(PacketType::Ping); - PACKET_TYPE_NAME_LOOKUP(PacketType::PingReply); - PACKET_TYPE_NAME_LOOKUP(PacketType::KillAvatar); - PACKET_TYPE_NAME_LOOKUP(PacketType::AvatarData); - PACKET_TYPE_NAME_LOOKUP(PacketType::InjectAudio); - PACKET_TYPE_NAME_LOOKUP(PacketType::MixedAudio); - PACKET_TYPE_NAME_LOOKUP(PacketType::MicrophoneAudioNoEcho); - PACKET_TYPE_NAME_LOOKUP(PacketType::MicrophoneAudioWithEcho); - PACKET_TYPE_NAME_LOOKUP(PacketType::BulkAvatarData); - PACKET_TYPE_NAME_LOOKUP(PacketType::SilentAudioFrame); - PACKET_TYPE_NAME_LOOKUP(PacketType::DomainListRequest); - PACKET_TYPE_NAME_LOOKUP(PacketType::RequestAssignment); - PACKET_TYPE_NAME_LOOKUP(PacketType::CreateAssignment); - PACKET_TYPE_NAME_LOOKUP(PacketType::DomainConnectionDenied); - PACKET_TYPE_NAME_LOOKUP(PacketType::MuteEnvironment); - PACKET_TYPE_NAME_LOOKUP(PacketType::AudioStreamStats); - PACKET_TYPE_NAME_LOOKUP(PacketType::OctreeStats); - PACKET_TYPE_NAME_LOOKUP(PacketType::Jurisdiction); - PACKET_TYPE_NAME_LOOKUP(PacketType::JurisdictionRequest); - PACKET_TYPE_NAME_LOOKUP(PacketType::AvatarIdentity); - PACKET_TYPE_NAME_LOOKUP(PacketType::AvatarBillboard); - PACKET_TYPE_NAME_LOOKUP(PacketType::DomainConnectRequest); - PACKET_TYPE_NAME_LOOKUP(PacketType::DomainServerRequireDTLS); - PACKET_TYPE_NAME_LOOKUP(PacketType::NodeJsonStats); - PACKET_TYPE_NAME_LOOKUP(PacketType::EntityQuery); - PACKET_TYPE_NAME_LOOKUP(PacketType::EntityData); - PACKET_TYPE_NAME_LOOKUP(PacketType::EntityErase); - PACKET_TYPE_NAME_LOOKUP(PacketType::OctreeDataNack); - PACKET_TYPE_NAME_LOOKUP(PacketType::StopNode); - PACKET_TYPE_NAME_LOOKUP(PacketType::AudioEnvironment); - PACKET_TYPE_NAME_LOOKUP(PacketType::EntityEditNack); - PACKET_TYPE_NAME_LOOKUP(PacketType::ICEServerHeartbeat); - PACKET_TYPE_NAME_LOOKUP(PacketType::DomainServerAddedNode); - PACKET_TYPE_NAME_LOOKUP(PacketType::ICEServerQuery); - PACKET_TYPE_NAME_LOOKUP(PacketType::ICEServerPeerInformation); - PACKET_TYPE_NAME_LOOKUP(PacketType::ICEPing); - PACKET_TYPE_NAME_LOOKUP(PacketType::ICEPingReply); - PACKET_TYPE_NAME_LOOKUP(PacketType::EntityAdd); - PACKET_TYPE_NAME_LOOKUP(PacketType::EntityEdit); - PACKET_TYPE_NAME_LOOKUP(PacketType::DomainServerConnectionToken); - default: - return QString("Type: ") + QString::number((int)packetType); - } - return QString("unexpected"); -} - uint qHash(const PacketType& key, uint seed) { - // seems odd that Qt couldn't figure out this cast itself, but this fixes a compile error after switch to - // strongly typed enum for PacketType + // seems odd that Qt couldn't figure out this cast itself, but this fixes a compile error after switch + // to strongly typed enum for PacketType return qHash((quint8) key, seed); } QDebug operator<<(QDebug debug, const PacketType& type) { - debug.nospace() << (uint8_t) type << " (" << qPrintable(nameForPacketType(type)) << ")"; + QMetaObject metaObject = PacketTypeEnum::staticMetaObject; + QMetaEnum metaEnum = metaObject.enumerator(metaObject.enumeratorOffset()); + QString typeName = metaEnum.valueToKey((int) type); + + debug.nospace().noquote() << (uint8_t) type << " (" << typeName << ")"; return debug.space(); } diff --git a/libraries/networking/src/udt/PacketHeaders.h b/libraries/networking/src/udt/PacketHeaders.h index 099e842c27..d1287d4a08 100644 --- a/libraries/networking/src/udt/PacketHeaders.h +++ b/libraries/networking/src/udt/PacketHeaders.h @@ -18,71 +18,82 @@ #include #include +#include #include #include -// If adding a new packet packetType, you can replace one marked usable or add at the end. -// If you want the name of the packet packetType to be available for debugging or logging, update nameForPacketType() as well -// This enum must hold 256 or fewer packet types (so the value is <= 255) since it is statically typed as a uint8_t -enum class PacketType : uint8_t { - Unknown, - StunResponse, - DomainList, - Ping, - PingReply, - KillAvatar, - AvatarData, - InjectAudio, - MixedAudio, - MicrophoneAudioNoEcho, - MicrophoneAudioWithEcho, - BulkAvatarData, - SilentAudioFrame, - DomainListRequest, - RequestAssignment, - CreateAssignment, - DomainConnectionDenied, - MuteEnvironment, - AudioStreamStats, - DomainServerPathQuery, - DomainServerPathResponse, - DomainServerAddedNode, - ICEServerPeerInformation, - ICEServerQuery, - OctreeStats, - Jurisdiction, - JurisdictionRequest, - AssignmentClientStatus, - NoisyMute, - AvatarIdentity, - AvatarBillboard, - DomainConnectRequest, - DomainServerRequireDTLS, - NodeJsonStats, - OctreeDataNack, - StopNode, - AudioEnvironment, - EntityEditNack, - ICEServerHeartbeat, - ICEPing, - ICEPingReply, - EntityData, - EntityQuery, - EntityAdd, - EntityErase, - EntityEdit, - DomainServerConnectionToken, - DomainSettingsRequest, - DomainSettings, - AssetGet, - AssetGetReply, - AssetUpload, - AssetUploadReply, - AssetGetInfo, - AssetGetInfoReply, - MessagesData +// The enums are inside this PacketTypeEnum for run-time conversion of enum value to string via +// Q_ENUMS, without requiring a macro that is called for each enum value. +class PacketTypeEnum { + Q_GADGET + Q_ENUMS(Value) +public: + // If adding a new packet packetType, you can replace one marked usable or add at the end. + // This enum must hold 256 or fewer packet types (so the value is <= 255) since it is statically typed as a uint8_t + enum class Value : uint8_t { + Unknown, + StunResponse, + DomainList, + Ping, + PingReply, + KillAvatar, + AvatarData, + InjectAudio, + MixedAudio, + MicrophoneAudioNoEcho, + MicrophoneAudioWithEcho, + BulkAvatarData, + SilentAudioFrame, + DomainListRequest, + RequestAssignment, + CreateAssignment, + DomainConnectionDenied, + MuteEnvironment, + AudioStreamStats, + DomainServerPathQuery, + DomainServerPathResponse, + DomainServerAddedNode, + ICEServerPeerInformation, + ICEServerQuery, + OctreeStats, + Jurisdiction, + JurisdictionRequest, + AssignmentClientStatus, + NoisyMute, + AvatarIdentity, + AvatarBillboard, + DomainConnectRequest, + DomainServerRequireDTLS, + NodeJsonStats, + OctreeDataNack, + StopNode, + AudioEnvironment, + EntityEditNack, + ICEServerHeartbeat, + ICEPing, + ICEPingReply, + EntityData, + EntityQuery, + EntityAdd, + EntityErase, + EntityEdit, + DomainServerConnectionToken, + DomainSettingsRequest, + DomainSettings, + AssetGet, + AssetGetReply, + AssetUpload, + AssetUploadReply, + AssetGetInfo, + AssetGetInfoReply, + DomainDisconnectRequest, + DomainServerRemovedNode, + MessagesData + }; }; +using PacketType = PacketTypeEnum::Value; + const int NUM_BYTES_MD5_HASH = 16; typedef char PacketVersion; @@ -91,7 +102,6 @@ extern const QSet NON_VERIFIED_PACKETS; extern const QSet NON_SOURCED_PACKETS; extern const QSet RELIABLE_PACKETS; -QString nameForPacketType(PacketType packetType); PacketVersion versionForPacketType(PacketType packetType); uint qHash(const PacketType& key, uint seed); diff --git a/libraries/octree/src/Octree.cpp b/libraries/octree/src/Octree.cpp index cceb3ba706..fe92fe7745 100644 --- a/libraries/octree/src/Octree.cpp +++ b/libraries/octree/src/Octree.cpp @@ -1890,8 +1890,8 @@ bool Octree::readSVOFromStream(unsigned long streamLength, QDataStream& inputStr versionForPacketType(expectedDataPacketType()), gotVersion); } } else { - qCDebug(octree) << "SVO file type mismatch. Expected: " << nameForPacketType(expectedType) - << " Got: " << nameForPacketType(gotType); + qCDebug(octree) << "SVO file type mismatch. Expected: " << expectedType + << " Got: " << gotType; } } else { diff --git a/libraries/recording/src/recording/Clip.cpp b/libraries/recording/src/recording/Clip.cpp index 28e4211fe3..abe66ccb2e 100644 --- a/libraries/recording/src/recording/Clip.cpp +++ b/libraries/recording/src/recording/Clip.cpp @@ -23,7 +23,7 @@ Clip::Pointer Clip::fromFile(const QString& filePath) { return result; } -void Clip::toFile(const QString& filePath, Clip::Pointer clip) { +void Clip::toFile(const QString& filePath, const Clip::ConstPointer& clip) { FileClip::write(filePath, clip->duplicate()); } @@ -31,19 +31,10 @@ Clip::Pointer Clip::newClip() { return std::make_shared(); } -Clip::Pointer Clip::duplicate() { - Clip::Pointer result = std::make_shared(); - - Locker lock(_mutex); - Time currentPosition = position(); - seek(0); - - auto frame = nextFrame(); - while (frame) { - result->addFrame(frame); - frame = nextFrame(); - } - - seek(currentPosition); - return result; +void Clip::seek(float offset) { + seekFrameTime(Frame::secondsToFrameTime(offset)); } + +float Clip::position() const { + return Frame::frameTimeToSeconds(positionFrameTime()); +}; diff --git a/libraries/recording/src/recording/Clip.h b/libraries/recording/src/recording/Clip.h index a00ab72c98..722fadf0b2 100644 --- a/libraries/recording/src/recording/Clip.h +++ b/libraries/recording/src/recording/Clip.h @@ -16,6 +16,8 @@ #include +#include "Frame.h" + class QIODevice; namespace recording { @@ -23,16 +25,22 @@ namespace recording { class Clip { public: using Pointer = std::shared_ptr; + using ConstPointer = std::shared_ptr; virtual ~Clip() {} - Pointer duplicate(); + virtual Pointer duplicate() const = 0; - virtual Time duration() const = 0; + virtual QString getName() const = 0; + + virtual float duration() const = 0; virtual size_t frameCount() const = 0; - virtual void seek(Time offset) = 0; - virtual Time position() const = 0; + virtual void seek(float offset) final; + virtual float position() const final; + + virtual void seekFrameTime(Frame::Time offset) = 0; + virtual Frame::Time positionFrameTime() const = 0; virtual FrameConstPointer peekFrame() const = 0; virtual FrameConstPointer nextFrame() = 0; @@ -40,7 +48,7 @@ public: virtual void addFrame(FrameConstPointer) = 0; static Pointer fromFile(const QString& filePath); - static void toFile(const QString& filePath, Pointer clip); + static void toFile(const QString& filePath, const ConstPointer& clip); static Pointer newClip(); protected: diff --git a/libraries/recording/src/recording/Deck.cpp b/libraries/recording/src/recording/Deck.cpp index 10209c26d7..e52fcc16e6 100644 --- a/libraries/recording/src/recording/Deck.cpp +++ b/libraries/recording/src/recording/Deck.cpp @@ -14,31 +14,46 @@ #include "Clip.h" #include "Frame.h" #include "Logging.h" +#include "impl/OffsetClip.h" using namespace recording; -void Deck::queueClip(ClipPointer clip, Time timeOffset) { +Deck::Deck(QObject* parent) + : QObject(parent) {} + +void Deck::queueClip(ClipPointer clip, float timeOffset) { + Locker lock(_mutex); + if (!clip) { qCWarning(recordingLog) << "Clip invalid, ignoring"; return; } - // FIXME if the time offset is not zero, wrap the clip in a OffsetClip wrapper + // FIXME disabling multiple clips for now + _clips.clear(); + + // if the time offset is not zero, wrap in an OffsetClip + if (timeOffset != 0.0f) { + clip = std::make_shared(clip, timeOffset); + } + _clips.push_back(clip); _length = std::max(_length, clip->duration()); } void Deck::play() { + Locker lock(_mutex); if (_pause) { _pause = false; - _startEpoch = usecTimestampNow() - (_position * USECS_PER_MSEC); + _startEpoch = Frame::epochForFrameTime(_position); emit playbackStateChanged(); processFrames(); } } void Deck::pause() { + Locker lock(_mutex); if (!_pause) { _pause = true; emit playbackStateChanged(); @@ -47,9 +62,9 @@ void Deck::pause() { Clip::Pointer Deck::getNextClip() { Clip::Pointer result; - Time soonestFramePosition = INVALID_TIME; + auto soonestFramePosition = Frame::INVALID_TIME; for (const auto& clip : _clips) { - Time nextFramePosition = clip->position(); + auto nextFramePosition = clip->positionFrameTime(); if (nextFramePosition < soonestFramePosition) { result = clip; soonestFramePosition = nextFramePosition; @@ -58,11 +73,16 @@ Clip::Pointer Deck::getNextClip() { return result; } -void Deck::seek(Time position) { - _position = position; - // FIXME reset the frames to the appropriate spot +void Deck::seek(float position) { + Locker lock(_mutex); + _position = Frame::secondsToFrameTime(position); + + // Recompute the start epoch + _startEpoch = Frame::epochForFrameTime(_position); + + // reset the clips to the appropriate spot for (auto& clip : _clips) { - clip->seek(position); + clip->seekFrameTime(_position); } if (!_pause) { @@ -71,35 +91,46 @@ void Deck::seek(Time position) { } } -Time Deck::position() const { - if (_pause) { - return _position; +float Deck::position() const { + Locker lock(_mutex); + auto currentPosition = _position; + if (!_pause) { + currentPosition = Frame::frameTimeFromEpoch(_startEpoch); } - return (usecTimestampNow() - _startEpoch) / USECS_PER_MSEC; + return Frame::frameTimeToSeconds(currentPosition); } -static const Time MIN_FRAME_WAIT_INTERVAL_MS = 1; +static const Frame::Time MIN_FRAME_WAIT_INTERVAL = Frame::secondsToFrameTime(0.001f); +static const Frame::Time MAX_FRAME_PROCESSING_TIME = Frame::secondsToFrameTime(0.002f); void Deck::processFrames() { + Locker lock(_mutex); if (_pause) { return; } - _position = position(); - auto triggerPosition = _position + MIN_FRAME_WAIT_INTERVAL_MS; + auto startingPosition = Frame::frameTimeFromEpoch(_startEpoch); + auto triggerPosition = startingPosition + MIN_FRAME_WAIT_INTERVAL; Clip::Pointer nextClip; + // FIXME add code to start dropping frames if we fall behind. + // Alternatively, add code to cache frames here and then process only the last frame of a given type + // ... the latter will work for Avatar, but not well for audio I suspect. for (nextClip = getNextClip(); nextClip; nextClip = getNextClip()) { - // If the clip is too far in the future, just break out of the handling loop - Time framePosition = nextClip->position(); - if (framePosition > triggerPosition) { + auto currentPosition = Frame::frameTimeFromEpoch(_startEpoch); + if ((currentPosition - startingPosition) >= MAX_FRAME_PROCESSING_TIME) { + qCWarning(recordingLog) << "Exceeded maximum frame processing time, breaking early"; break; } + // If the clip is too far in the future, just break out of the handling loop + Frame::Time framePosition = nextClip->positionFrameTime(); + if (framePosition > triggerPosition) { + break; + } // Handle the frame and advance the clip Frame::handleFrame(nextClip->nextFrame()); } - if (!nextClip) { qCDebug(recordingLog) << "No more frames available"; // No more frames available, so handle the end of playback @@ -107,6 +138,9 @@ void Deck::processFrames() { qCDebug(recordingLog) << "Looping enabled, seeking back to beginning"; // If we have looping enabled, start the playback over seek(0); + // FIXME configure the recording scripting interface to reset the avatar basis on a loop + // if doing relative movement + emit looped(); } else { // otherwise pause playback pause(); @@ -115,9 +149,67 @@ void Deck::processFrames() { } // If we have more clip frames available, set the timer for the next one - Time nextClipPosition = nextClip->position(); - Time interval = nextClipPosition - _position; + _position = Frame::frameTimeFromEpoch(_startEpoch); + auto nextFrameTime = nextClip->positionFrameTime(); + auto interval = Frame::frameTimeToMilliseconds(nextFrameTime - _position); _timer.singleShot(interval, [this] { processFrames(); }); } + +void Deck::removeClip(const ClipConstPointer& clip) { + Locker lock(_mutex); + std::remove_if(_clips.begin(), _clips.end(), [&](const Clip::ConstPointer& testClip)->bool { + return (clip == testClip); + }); +} + +void Deck::removeClip(const QString& clipName) { + Locker lock(_mutex); + std::remove_if(_clips.begin(), _clips.end(), [&](const Clip::ConstPointer& clip)->bool { + return (clip->getName() == clipName); + }); +} + +void Deck::removeAllClips() { + Locker lock(_mutex); + _clips.clear(); +} + +Deck::ClipList Deck::getClips(const QString& clipName) const { + Locker lock(_mutex); + ClipList result = _clips; + return result; +} + + +bool Deck::isPlaying() { + Locker lock(_mutex); + return !_pause; +} + +bool Deck::isPaused() const { + Locker lock(_mutex); + return _pause; +} + +void Deck::stop() { + Locker lock(_mutex); + pause(); + seek(0.0f); +} + +float Deck::length() const { + Locker lock(_mutex); + return _length; +} + +void Deck::loop(bool enable) { + Locker lock(_mutex); + _loop = enable; +} + +bool Deck::isLooping() const { + Locker lock(_mutex); + return _loop; +} diff --git a/libraries/recording/src/recording/Deck.h b/libraries/recording/src/recording/Deck.h index 7086e9759d..1f8d58d5e1 100644 --- a/libraries/recording/src/recording/Deck.h +++ b/libraries/recording/src/recording/Deck.h @@ -12,56 +12,70 @@ #include #include +#include #include #include +#include + +#include #include "Forward.h" +#include "Frame.h" namespace recording { -class Deck : public QObject { +class Deck : public QObject, public ::Dependency { Q_OBJECT public: + using ClipList = std::list; using Pointer = std::shared_ptr; - Deck(QObject* parent = nullptr) : QObject(parent) {} + + Deck(QObject* parent = nullptr); // Place a clip on the deck for recording or playback - void queueClip(ClipPointer clip, Time timeOffset = 0.0f); + void queueClip(ClipPointer clip, float timeOffset = 0.0f); + void removeClip(const ClipConstPointer& clip); + void removeClip(const QString& clipName); + void removeAllClips(); + ClipList getClips(const QString& clipName) const; void play(); - bool isPlaying() { return !_pause; } + bool isPlaying(); void pause(); - bool isPaused() const { return _pause; } + bool isPaused() const; - void stop() { pause(); seek(0.0f); } + void stop(); - Time length() const { return _length; } + float length() const; - void loop(bool enable = true) { _loop = enable; } - bool isLooping() const { return _loop; } + void loop(bool enable = true); + bool isLooping() const; - Time position() const; - void seek(Time position); + float position() const; + void seek(float position); signals: void playbackStateChanged(); + void looped(); private: - using Clips = std::list; + using Mutex = std::recursive_mutex; + using Locker = std::unique_lock; ClipPointer getNextClip(); void processFrames(); + mutable Mutex _mutex; QTimer _timer; - Clips _clips; + ClipList _clips; quint64 _startEpoch { 0 }; - Time _position { 0 }; + Frame::Time _position { 0 }; bool _pause { true }; bool _loop { false }; - Time _length { 0 }; + float _length { 0 }; }; } diff --git a/libraries/recording/src/recording/Forward.h b/libraries/recording/src/recording/Forward.h index 4ba54e23a3..1bc9b31ea9 100644 --- a/libraries/recording/src/recording/Forward.h +++ b/libraries/recording/src/recording/Forward.h @@ -16,10 +16,6 @@ namespace recording { -using Time = uint32_t; - -static const Time INVALID_TIME = std::numeric_limits::max(); - using FrameType = uint16_t; using FrameSize = uint16_t; @@ -36,16 +32,14 @@ class Clip; using ClipPointer = std::shared_ptr; +using ClipConstPointer = std::shared_ptr; + // An interface for playing back clips class Deck; -using DeckPointer = std::shared_ptr; - // An interface for recording a single clip class Recorder; -using RecorderPointer = std::shared_ptr; - } #endif diff --git a/libraries/recording/src/recording/Frame.cpp b/libraries/recording/src/recording/Frame.cpp index 5b0116519f..bff85ea872 100644 --- a/libraries/recording/src/recording/Frame.cpp +++ b/libraries/recording/src/recording/Frame.cpp @@ -12,6 +12,9 @@ #include +#include +#include + using namespace recording; // FIXME move to shared @@ -73,7 +76,31 @@ using Locker = std::unique_lock; static Mutex mutex; static std::once_flag once; +float FrameHeader::frameTimeToSeconds(Frame::Time frameTime) { + float result = frameTime; + result /= MSECS_PER_SECOND; + return result; +} +uint32_t FrameHeader::frameTimeToMilliseconds(Frame::Time frameTime) { + return frameTime; +} + +Frame::Time FrameHeader::frameTimeFromEpoch(quint64 epoch) { + auto intervalMicros = (usecTimestampNow() - epoch); + intervalMicros /= USECS_PER_MSEC; + return (Frame::Time)(intervalMicros); +} + +quint64 FrameHeader::epochForFrameTime(Time frameTime) { + auto epoch = usecTimestampNow(); + epoch -= (frameTime * USECS_PER_MSEC); + return epoch; +} + +Frame::Time FrameHeader::secondsToFrameTime(float seconds) { + return (Time)(seconds * MSECS_PER_SECOND); +} FrameType Frame::registerFrameType(const QString& frameTypeName) { Locker lock(mutex); diff --git a/libraries/recording/src/recording/Frame.h b/libraries/recording/src/recording/Frame.h index f0f53ce144..3cc999f505 100644 --- a/libraries/recording/src/recording/Frame.h +++ b/libraries/recording/src/recording/Frame.h @@ -13,26 +13,46 @@ #include "Forward.h" #include +#include #include namespace recording { -struct Frame { +struct FrameHeader { + using Time = uint32_t; + + static const Time INVALID_TIME = UINT32_MAX; + static const FrameType TYPE_INVALID = 0xFFFF; + static const FrameType TYPE_HEADER = 0x0; + + static Time secondsToFrameTime(float seconds); + static float frameTimeToSeconds(Time frameTime); + + static uint32_t frameTimeToMilliseconds(Time frameTime); + + static Time frameTimeFromEpoch(quint64 epoch); + static quint64 epochForFrameTime(Time frameTime); + + FrameType type { TYPE_INVALID }; + Time timeOffset { 0 }; // milliseconds + + FrameHeader() {} + FrameHeader(FrameType type, Time timeOffset) + : type(type), timeOffset(timeOffset) { } +}; + +struct Frame : public FrameHeader { public: using Pointer = std::shared_ptr; using ConstPointer = std::shared_ptr; using Handler = std::function; - static const FrameType TYPE_INVALID = 0xFFFF; - static const FrameType TYPE_HEADER = 0x0; - FrameType type { TYPE_INVALID }; - Time timeOffset { 0 }; // milliseconds QByteArray data; Frame() {} Frame(FrameType type, float timeOffset, const QByteArray& data) - : type(type), timeOffset(timeOffset), data(data) {} + : FrameHeader(type, timeOffset), data(data) { } static FrameType registerFrameType(const QString& frameTypeName); static QMap getFrameTypes(); diff --git a/libraries/recording/src/recording/Recorder.cpp b/libraries/recording/src/recording/Recorder.cpp index f007367cae..aae31f8ec0 100644 --- a/libraries/recording/src/recording/Recorder.cpp +++ b/libraries/recording/src/recording/Recorder.cpp @@ -16,20 +16,23 @@ using namespace recording; -Recorder::~Recorder() { +Recorder::Recorder(QObject* parent) + : QObject(parent) {} -} - -Time Recorder::position() { +float Recorder::position() { + Locker lock(_mutex); + if (_clip) { + return _clip->duration(); + } return 0.0f; } void Recorder::start() { + Locker lock(_mutex); if (!_recording) { _recording = true; - if (!_clip) { - _clip = std::make_shared(); - } + // FIXME for now just record a new clip every time + _clip = std::make_shared(); _startEpoch = usecTimestampNow(); _timer.start(); emit recordingStateChanged(); @@ -37,6 +40,7 @@ void Recorder::start() { } void Recorder::stop() { + Locker lock(_mutex); if (_recording) { _recording = false; _elapsed = _timer.elapsed(); @@ -45,14 +49,17 @@ void Recorder::stop() { } bool Recorder::isRecording() { + Locker lock(_mutex); return _recording; } void Recorder::clear() { + Locker lock(_mutex); _clip.reset(); } void Recorder::recordFrame(FrameType type, QByteArray frameData) { + Locker lock(_mutex); if (!_recording || !_clip) { return; } @@ -65,6 +72,7 @@ void Recorder::recordFrame(FrameType type, QByteArray frameData) { } ClipPointer Recorder::getClip() { + Locker lock(_mutex); return _clip; } diff --git a/libraries/recording/src/recording/Recorder.h b/libraries/recording/src/recording/Recorder.h index f8346456d4..abbc964389 100644 --- a/libraries/recording/src/recording/Recorder.h +++ b/libraries/recording/src/recording/Recorder.h @@ -10,24 +10,25 @@ #ifndef hifi_Recording_Recorder_h #define hifi_Recording_Recorder_h -#include "Forward.h" +#include #include #include +#include + +#include "Forward.h" + namespace recording { // An interface for interacting with clips, creating them by recording or // playing them back. Also serialization to and from files / network sources -class Recorder : public QObject { +class Recorder : public QObject, public Dependency { Q_OBJECT public: - using Pointer = std::shared_ptr; + Recorder(QObject* parent = nullptr); - Recorder(QObject* parent = nullptr) : QObject(parent) {} - virtual ~Recorder(); - - Time position(); + float position(); // Start recording frames void start(); @@ -49,6 +50,10 @@ signals: void recordingStateChanged(); private: + using Mutex = std::recursive_mutex; + using Locker = std::unique_lock; + + Mutex _mutex; QElapsedTimer _timer; ClipPointer _clip; quint64 _elapsed { 0 }; diff --git a/libraries/recording/src/recording/impl/ArrayClip.h b/libraries/recording/src/recording/impl/ArrayClip.h new file mode 100644 index 0000000000..10b3580228 --- /dev/null +++ b/libraries/recording/src/recording/impl/ArrayClip.h @@ -0,0 +1,100 @@ +// +// Created by Bradley Austin Davis 2015/11/05 +// Copyright 2015 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#pragma once +#ifndef hifi_Recording_Impl_ArrayClip_h +#define hifi_Recording_Impl_ArrayClip_h + +#include "../Clip.h" + +#include + +namespace recording { + +template +class ArrayClip : public Clip { +public: + virtual float duration() const override { + Locker lock(_mutex); + if (_frames.empty()) { + return 0; + } + return Frame::frameTimeToSeconds((*_frames.rbegin()).timeOffset); + } + + virtual size_t frameCount() const override { + Locker lock(_mutex); + return _frames.size(); + } + + Clip::Pointer duplicate() const { + auto result = newClip(); + Locker lock(_mutex); + for (size_t i = 0; i < _frames.size(); ++i) { + result->addFrame(readFrame(i)); + } + return result; + } + + virtual void seekFrameTime(Frame::Time offset) { + Locker lock(_mutex); + auto itr = std::lower_bound(_frames.begin(), _frames.end(), offset, + [](const T& a, Frame::Time b)->bool { + return a.timeOffset < b; + } + ); + _frameIndex = itr - _frames.begin(); + } + + virtual Frame::Time positionFrameTime() const override { + Locker lock(_mutex); + Frame::Time result = Frame::INVALID_TIME; + if (_frameIndex < _frames.size()) { + result = _frames[_frameIndex].timeOffset; + } + return result; + } + + virtual FrameConstPointer peekFrame() const override { + Locker lock(_mutex); + FrameConstPointer result; + if (_frameIndex < _frames.size()) { + result = readFrame(_frameIndex); + } + return result; + } + + virtual FrameConstPointer nextFrame() override { + Locker lock(_mutex); + FrameConstPointer result; + if (_frameIndex < _frames.size()) { + result = readFrame(_frameIndex++); + } + return result; + } + + virtual void skipFrame() override { + Locker lock(_mutex); + if (_frameIndex < _frames.size()) { + ++_frameIndex; + } + } + +protected: + virtual void reset() override { + _frameIndex = 0; + } + + virtual FrameConstPointer readFrame(size_t index) const = 0; + std::vector _frames; + mutable size_t _frameIndex { 0 }; +}; + +} + +#endif diff --git a/libraries/recording/src/recording/impl/BufferClip.cpp b/libraries/recording/src/recording/impl/BufferClip.cpp index 87bbfbfef7..c40d9dd42a 100644 --- a/libraries/recording/src/recording/impl/BufferClip.cpp +++ b/libraries/recording/src/recording/impl/BufferClip.cpp @@ -8,85 +8,40 @@ #include "BufferClip.h" -#include +#include +#include #include "../Frame.h" using namespace recording; - -void BufferClip::seek(Time offset) { - Locker lock(_mutex); - auto itr = std::lower_bound(_frames.begin(), _frames.end(), offset, - [](Frame::ConstPointer a, Time b)->bool { - return a->timeOffset < b; - } - ); - _frameIndex = itr - _frames.begin(); +QString BufferClip::getName() const { + return _name; } -Time BufferClip::position() const { - Locker lock(_mutex); - Time result = INVALID_TIME; - if (_frameIndex < _frames.size()) { - result = _frames[_frameIndex]->timeOffset; - } - return result; -} - -FrameConstPointer BufferClip::peekFrame() const { - Locker lock(_mutex); - FrameConstPointer result; - if (_frameIndex < _frames.size()) { - result = _frames[_frameIndex]; - } - return result; -} - -FrameConstPointer BufferClip::nextFrame() { - Locker lock(_mutex); - FrameConstPointer result; - if (_frameIndex < _frames.size()) { - result = _frames[_frameIndex]; - ++_frameIndex; - } - return result; -} void BufferClip::addFrame(FrameConstPointer newFrame) { if (newFrame->timeOffset < 0.0f) { throw std::runtime_error("Frames may not have negative time offsets"); } - auto currentPosition = position(); - seek(newFrame->timeOffset); - { - Locker lock(_mutex); - _frames.insert(_frames.begin() + _frameIndex, newFrame); - } - seek(currentPosition); -} - -void BufferClip::skipFrame() { Locker lock(_mutex); - if (_frameIndex < _frames.size()) { - ++_frameIndex; + auto itr = std::lower_bound(_frames.begin(), _frames.end(), newFrame->timeOffset, + [](const Frame& a, Frame::Time b)->bool { + return a.timeOffset < b; + } + ); + + auto newFrameIndex = itr - _frames.begin(); + //qDebug() << "Adding frame with time offset " << newFrame->timeOffset << " @ index " << newFrameIndex; + _frames.insert(_frames.begin() + newFrameIndex, Frame(*newFrame)); +} + +// Internal only function, needs no locking +FrameConstPointer BufferClip::readFrame(size_t frameIndex) const { + FramePointer result; + if (frameIndex < _frames.size()) { + result = std::make_shared(_frames[frameIndex]); } + return result; } - -void BufferClip::reset() { - Locker lock(_mutex); - _frameIndex = 0; -} - -Time BufferClip::duration() const { - if (_frames.empty()) { - return 0; - } - return (*_frames.rbegin())->timeOffset; -} - -size_t BufferClip::frameCount() const { - return _frames.size(); -} - diff --git a/libraries/recording/src/recording/impl/BufferClip.h b/libraries/recording/src/recording/impl/BufferClip.h index ce81dac730..af8a64716b 100644 --- a/libraries/recording/src/recording/impl/BufferClip.h +++ b/libraries/recording/src/recording/impl/BufferClip.h @@ -10,33 +10,22 @@ #ifndef hifi_Recording_Impl_BufferClip_h #define hifi_Recording_Impl_BufferClip_h -#include "../Clip.h" +#include "ArrayClip.h" -#include +#include namespace recording { -class BufferClip : public Clip { +class BufferClip : public ArrayClip { public: using Pointer = std::shared_ptr; - virtual ~BufferClip() {} - - virtual Time duration() const override; - virtual size_t frameCount() const override; - - virtual void seek(Time offset) override; - virtual Time position() const override; - - virtual FrameConstPointer peekFrame() const override; - virtual FrameConstPointer nextFrame() override; - virtual void skipFrame() override; + virtual QString getName() const override; virtual void addFrame(FrameConstPointer) override; private: - virtual void reset() override; - - std::vector _frames; + virtual FrameConstPointer readFrame(size_t index) const override; + QString _name { QUuid().toString() }; mutable size_t _frameIndex { 0 }; }; diff --git a/libraries/recording/src/recording/impl/FileClip.cpp b/libraries/recording/src/recording/impl/FileClip.cpp index b8e1eb26fa..80aaac4c87 100644 --- a/libraries/recording/src/recording/impl/FileClip.cpp +++ b/libraries/recording/src/recording/impl/FileClip.cpp @@ -18,15 +18,15 @@ #include "../Frame.h" #include "../Logging.h" +#include "BufferClip.h" using namespace recording; -static const qint64 MINIMUM_FRAME_SIZE = sizeof(FrameType) + sizeof(Time) + sizeof(FrameSize); - +static const qint64 MINIMUM_FRAME_SIZE = sizeof(FrameType) + sizeof(Frame::Time) + sizeof(FrameSize); static const QString FRAME_TYPE_MAP = QStringLiteral("frameTypes"); +static const QString FRAME_COMREPSSION_FLAG = QStringLiteral("compressed"); -using FrameHeaderList = std::list; using FrameTranslationMap = QMap; FrameTranslationMap parseTranslationMap(const QJsonDocument& doc) { @@ -49,19 +49,18 @@ FrameTranslationMap parseTranslationMap(const QJsonDocument& doc) { } -FrameHeaderList parseFrameHeaders(uchar* const start, const qint64& size) { - using FrameHeader = FileClip::FrameHeader; - FrameHeaderList results; +FileFrameHeaderList parseFrameHeaders(uchar* const start, const qint64& size) { + FileFrameHeaderList results; auto current = start; auto end = current + size; // Read all the frame headers // FIXME move to Frame::readHeader? while (end - current >= MINIMUM_FRAME_SIZE) { - FrameHeader header; + FileFrameHeader header; memcpy(&(header.type), current, sizeof(FrameType)); current += sizeof(FrameType); - memcpy(&(header.timeOffset), current, sizeof(Time)); - current += sizeof(Time); + memcpy(&(header.timeOffset), current, sizeof(Frame::Time)); + current += sizeof(Frame::Time); memcpy(&(header.size), current, sizeof(FrameSize)); current += sizeof(FrameSize); header.fileOffset = current - start; @@ -72,6 +71,11 @@ FrameHeaderList parseFrameHeaders(uchar* const start, const qint64& size) { current += header.size; results.push_back(header); } + qDebug() << "Parsed source data into " << results.size() << " frames"; + int i = 0; + for (const auto& frameHeader : results) { + qDebug() << "Frame " << i++ << " time " << frameHeader.timeOffset; + } return results; } @@ -89,7 +93,7 @@ FileClip::FileClip(const QString& fileName) : _file(fileName) { return; } - FrameHeaderList parsedFrameHeaders = parseFrameHeaders(_map, size); + auto parsedFrameHeaders = parseFrameHeaders(_map, size); // Verify that at least one frame exists and that the first frame is a header if (0 == parsedFrameHeaders.size()) { @@ -110,6 +114,11 @@ FileClip::FileClip(const QString& fileName) : _file(fileName) { _fileHeader = QJsonDocument::fromBinaryData(fileHeaderData); } + // Check for compression + { + _compressed = _fileHeader.object()[FRAME_COMREPSSION_FLAG].toBool(); + } + // Find the type enum translation map and fix up the frame headers { FrameTranslationMap translationMap = parseTranslationMap(_fileHeader); @@ -120,19 +129,25 @@ FileClip::FileClip(const QString& fileName) : _file(fileName) { qDebug() << translationMap; // Update the loaded headers with the frame data - _frameHeaders.reserve(parsedFrameHeaders.size()); + _frames.reserve(parsedFrameHeaders.size()); for (auto& frameHeader : parsedFrameHeaders) { if (!translationMap.contains(frameHeader.type)) { continue; } frameHeader.type = translationMap[frameHeader.type]; - _frameHeaders.push_back(frameHeader); + _frames.push_back(frameHeader); } } + +} + + +QString FileClip::getName() const { + return _file.fileName(); } // FIXME move to frame? -bool writeFrame(QIODevice& output, const Frame& frame) { +bool writeFrame(QIODevice& output, const Frame& frame, bool compressed = true) { if (frame.type == Frame::TYPE_INVALID) { qWarning() << "Attempting to write invalid frame"; return true; @@ -142,17 +157,24 @@ bool writeFrame(QIODevice& output, const Frame& frame) { if (written != sizeof(FrameType)) { return false; } - written = output.write((char*)&(frame.timeOffset), sizeof(Time)); - if (written != sizeof(Time)) { + //qDebug() << "Writing frame with time offset " << frame.timeOffset; + written = output.write((char*)&(frame.timeOffset), sizeof(Frame::Time)); + if (written != sizeof(Frame::Time)) { return false; } - uint16_t dataSize = frame.data.size(); + QByteArray frameData = frame.data; + if (compressed) { + frameData = qCompress(frameData); + } + + uint16_t dataSize = frameData.size(); written = output.write((char*)&dataSize, sizeof(FrameSize)); if (written != sizeof(uint16_t)) { return false; } + if (dataSize != 0) { - written = output.write(frame.data); + written = output.write(frameData); if (written != dataSize) { return false; } @@ -161,7 +183,8 @@ bool writeFrame(QIODevice& output, const Frame& frame) { } bool FileClip::write(const QString& fileName, Clip::Pointer clip) { - qCDebug(recordingLog) << "Writing clip to file " << fileName; + // FIXME need to move this to a different thread + //qCDebug(recordingLog) << "Writing clip to file " << fileName << " with " << clip->frameCount() << " frames"; if (0 == clip->frameCount()) { return false; @@ -182,10 +205,14 @@ bool FileClip::write(const QString& fileName, Clip::Pointer clip) { QJsonObject rootObject; rootObject.insert(FRAME_TYPE_MAP, frameTypeObj); + // Always mark new files as compressed + rootObject.insert(FRAME_COMREPSSION_FLAG, true); QByteArray headerFrameData = QJsonDocument(rootObject).toBinaryData(); - if (!writeFrame(outputFile, Frame({ Frame::TYPE_HEADER, 0, headerFrameData }))) { + // Never compress the header frame + if (!writeFrame(outputFile, Frame({ Frame::TYPE_HEADER, 0, headerFrameData }), false)) { return false; } + } clip->seek(0); @@ -207,73 +234,24 @@ FileClip::~FileClip() { } } -void FileClip::seek(Time offset) { - Locker lock(_mutex); - auto itr = std::lower_bound(_frameHeaders.begin(), _frameHeaders.end(), offset, - [](const FrameHeader& a, Time b)->bool { - return a.timeOffset < b; - } - ); - _frameIndex = itr - _frameHeaders.begin(); -} - -Time FileClip::position() const { - Locker lock(_mutex); - Time result = INVALID_TIME; - if (_frameIndex < _frameHeaders.size()) { - result = _frameHeaders[_frameIndex].timeOffset; - } - return result; -} - -FramePointer FileClip::readFrame(uint32_t frameIndex) const { +// Internal only function, needs no locking +FrameConstPointer FileClip::readFrame(size_t frameIndex) const { FramePointer result; - if (frameIndex < _frameHeaders.size()) { + if (frameIndex < _frames.size()) { result = std::make_shared(); - const FrameHeader& header = _frameHeaders[frameIndex]; + const auto& header = _frames[frameIndex]; result->type = header.type; result->timeOffset = header.timeOffset; if (header.size) { result->data.insert(0, reinterpret_cast(_map)+header.fileOffset, header.size); + if (_compressed) { + result->data = qUncompress(result->data); + } } } return result; } -FrameConstPointer FileClip::peekFrame() const { - Locker lock(_mutex); - return readFrame(_frameIndex); -} - -FrameConstPointer FileClip::nextFrame() { - Locker lock(_mutex); - auto result = readFrame(_frameIndex); - if (_frameIndex < _frameHeaders.size()) { - ++_frameIndex; - } - return result; -} - -void FileClip::skipFrame() { - ++_frameIndex; -} - -void FileClip::reset() { - _frameIndex = 0; -} - void FileClip::addFrame(FrameConstPointer) { throw std::runtime_error("File clips are read only"); } - -Time FileClip::duration() const { - if (_frameHeaders.empty()) { - return 0; - } - return _frameHeaders.rbegin()->timeOffset; -} - -size_t FileClip::frameCount() const { - return _frameHeaders.size(); -} - diff --git a/libraries/recording/src/recording/impl/FileClip.h b/libraries/recording/src/recording/impl/FileClip.h index 18c62936c1..f103a9aca6 100644 --- a/libraries/recording/src/recording/impl/FileClip.h +++ b/libraries/recording/src/recording/impl/FileClip.h @@ -10,31 +10,34 @@ #ifndef hifi_Recording_Impl_FileClip_h #define hifi_Recording_Impl_FileClip_h -#include "../Clip.h" +#include "ArrayClip.h" + +#include #include #include -#include +#include "../Frame.h" namespace recording { -class FileClip : public Clip { +struct FileFrameHeader : public FrameHeader { + FrameType type; + Frame::Time timeOffset; + uint16_t size; + quint64 fileOffset; +}; + +using FileFrameHeaderList = std::list; + +class FileClip : public ArrayClip { public: using Pointer = std::shared_ptr; FileClip(const QString& file); virtual ~FileClip(); - virtual Time duration() const override; - virtual size_t frameCount() const override; - - virtual void seek(Time offset) override; - virtual Time position() const override; - - virtual FrameConstPointer peekFrame() const override; - virtual FrameConstPointer nextFrame() override; - virtual void skipFrame() override; + virtual QString getName() const override; virtual void addFrame(FrameConstPointer) override; const QJsonDocument& getHeader() { @@ -43,27 +46,12 @@ public: static bool write(const QString& filePath, Clip::Pointer clip); - struct FrameHeader { - FrameType type; - Time timeOffset; - uint16_t size; - quint64 fileOffset; - }; - private: - - virtual void reset() override; - - - using FrameHeaderVector = std::vector; - - FramePointer readFrame(uint32_t frameIndex) const; - + virtual FrameConstPointer readFrame(size_t index) const override; QJsonDocument _fileHeader; QFile _file; - uint32_t _frameIndex { 0 }; uchar* _map { nullptr }; - FrameHeaderVector _frameHeaders; + bool _compressed { true }; }; } diff --git a/libraries/recording/src/recording/impl/OffsetClip.cpp b/libraries/recording/src/recording/impl/OffsetClip.cpp index bccd48d6c8..afca9e0b7a 100644 --- a/libraries/recording/src/recording/impl/OffsetClip.cpp +++ b/libraries/recording/src/recording/impl/OffsetClip.cpp @@ -22,15 +22,15 @@ using namespace recording; -OffsetClip::OffsetClip(const Clip::Pointer& wrappedClip, Time offset) - : WrapperClip(wrappedClip), _offset(offset) { } +OffsetClip::OffsetClip(const Clip::Pointer& wrappedClip, float offset) + : WrapperClip(wrappedClip), _offset(Frame::secondsToFrameTime(offset)) { } -void OffsetClip::seek(Time offset) { - _wrappedClip->seek(offset - _offset); +void OffsetClip::seekFrameTime(Frame::Time offset) { + _wrappedClip->seekFrameTime(offset - _offset); } -Time OffsetClip::position() const { - return _wrappedClip->position() + _offset; +Frame::Time OffsetClip::positionFrameTime() const { + return _wrappedClip->positionFrameTime() + _offset; } FrameConstPointer OffsetClip::peekFrame() const { @@ -45,7 +45,18 @@ FrameConstPointer OffsetClip::nextFrame() { return result; } -Time OffsetClip::duration() const { +float OffsetClip::duration() const { return _wrappedClip->duration() + _offset; } +QString OffsetClip::getName() const { + return _wrappedClip->getName(); +} + +Clip::Pointer OffsetClip::duplicate() const { + return std::make_shared( + _wrappedClip->duplicate(), Frame::frameTimeToSeconds(_offset)); +} + + + diff --git a/libraries/recording/src/recording/impl/OffsetClip.h b/libraries/recording/src/recording/impl/OffsetClip.h index 1c6b005b65..40301adf59 100644 --- a/libraries/recording/src/recording/impl/OffsetClip.h +++ b/libraries/recording/src/recording/impl/OffsetClip.h @@ -18,18 +18,20 @@ class OffsetClip : public WrapperClip { public: using Pointer = std::shared_ptr; - OffsetClip(const Clip::Pointer& wrappedClip, Time offset); - virtual ~OffsetClip(); + OffsetClip(const Clip::Pointer& wrappedClip, float offset); - virtual Time duration() const override; - virtual void seek(Time offset) override; - virtual Time position() const override; + virtual QString getName() const override; + + virtual Clip::Pointer duplicate() const override; + virtual float duration() const override; + virtual void seekFrameTime(Frame::Time offset) override; + virtual Frame::Time positionFrameTime() const override; virtual FrameConstPointer peekFrame() const override; virtual FrameConstPointer nextFrame() override; protected: - const Time _offset; + const Frame::Time _offset; }; } diff --git a/libraries/recording/src/recording/impl/WrapperClip.cpp b/libraries/recording/src/recording/impl/WrapperClip.cpp index f2bbacabf1..955dd47a5e 100644 --- a/libraries/recording/src/recording/impl/WrapperClip.cpp +++ b/libraries/recording/src/recording/impl/WrapperClip.cpp @@ -22,11 +22,11 @@ using namespace recording; WrapperClip::WrapperClip(const Clip::Pointer& wrappedClip) : _wrappedClip(wrappedClip) { } -void WrapperClip::seek(Time offset) { - _wrappedClip->seek(offset); +void WrapperClip::seekFrameTime(Frame::Time offset) { + _wrappedClip->seekFrameTime(offset); } -Time WrapperClip::position() const { +Frame::Time WrapperClip::positionFrameTime() const { return _wrappedClip->position(); } @@ -50,7 +50,7 @@ void WrapperClip::addFrame(FrameConstPointer) { throw std::runtime_error("Wrapper clips are read only"); } -Time WrapperClip::duration() const { +float WrapperClip::duration() const { return _wrappedClip->duration(); } diff --git a/libraries/recording/src/recording/impl/WrapperClip.h b/libraries/recording/src/recording/impl/WrapperClip.h index 3fe013e0ed..77a484b5f7 100644 --- a/libraries/recording/src/recording/impl/WrapperClip.h +++ b/libraries/recording/src/recording/impl/WrapperClip.h @@ -24,13 +24,12 @@ public: using Pointer = std::shared_ptr; WrapperClip(const Clip::Pointer& wrappedClip); - virtual ~WrapperClip(); - virtual Time duration() const override; + virtual float duration() const override; virtual size_t frameCount() const override; - virtual void seek(Time offset) override; - virtual Time position() const override; + virtual void seekFrameTime(Frame::Time offset) override; + virtual Frame::Time positionFrameTime() const override; virtual FrameConstPointer peekFrame() const override; virtual FrameConstPointer nextFrame() override;