mirror of
https://github.com/overte-org/overte.git
synced 2025-04-12 19:58:15 +02:00
Make recording and playback work in interface, playback in agent
This commit is contained in:
parent
2636e43fab
commit
5b8047ded4
12 changed files with 202 additions and 203 deletions
|
@ -27,12 +27,14 @@
|
|||
|
||||
#include <recording/Deck.h>
|
||||
#include <recording/Recorder.h>
|
||||
#include <recording/Frame.h>
|
||||
|
||||
#include <WebSocketServerClass.h>
|
||||
#include <EntityScriptingInterface.h> // TODO: consider moving to scriptengine.h
|
||||
|
||||
#include "avatars/ScriptableAvatar.h"
|
||||
#include "RecordingScriptingInterface.h"
|
||||
#include "AbstractAudioInterface.h"
|
||||
|
||||
#include "Agent.h"
|
||||
|
||||
|
@ -183,8 +185,22 @@ void Agent::run() {
|
|||
scriptedAvatar.setSkeletonModelURL(QUrl());
|
||||
|
||||
// give this AvatarData object to the script engine
|
||||
auto scriptedAvatarPtr = &scriptedAvatar;
|
||||
setAvatarData(&scriptedAvatar, "Avatar");
|
||||
|
||||
using namespace recording;
|
||||
static const FrameType AUDIO_FRAME_TYPE = Frame::registerFrameType(AudioConstants::AUDIO_FRAME_NAME);
|
||||
Frame::registerFrameHandler(AUDIO_FRAME_TYPE, [this, &scriptedAvatar](Frame::ConstPointer frame) {
|
||||
const QByteArray& audio = frame->data;
|
||||
static quint16 audioSequenceNumber{ 0 };
|
||||
Transform audioTransform;
|
||||
audioTransform.setTranslation(scriptedAvatar.getPosition());
|
||||
audioTransform.setRotation(scriptedAvatar.getOrientation());
|
||||
AbstractAudioInterface::emitAudioPacket(audio.data(), audio.size(), audioSequenceNumber, audioTransform, PacketType::MicrophoneAudioNoEcho);
|
||||
});
|
||||
|
||||
|
||||
|
||||
auto avatarHashMap = DependencyManager::set<AvatarHashMap>();
|
||||
_scriptEngine->registerGlobalObject("AvatarList", avatarHashMap.data());
|
||||
|
||||
|
@ -223,6 +239,9 @@ void Agent::run() {
|
|||
QObject::connect(_scriptEngine.get(), &ScriptEngine::update, this, &Agent::processAgentAvatarAndAudio);
|
||||
|
||||
_scriptEngine->run();
|
||||
|
||||
Frame::registerFrameHandler(AUDIO_FRAME_TYPE, [](Frame::ConstPointer frame) {});
|
||||
|
||||
setFinished(true);
|
||||
}
|
||||
|
||||
|
@ -244,7 +263,6 @@ void Agent::setIsAvatar(bool isAvatar) {
|
|||
}
|
||||
|
||||
if (!_isAvatar) {
|
||||
DependencyManager::get<RecordingScriptingInterface>()->setControlledAvatar(nullptr);
|
||||
|
||||
if (_avatarIdentityTimer) {
|
||||
_avatarIdentityTimer->stop();
|
||||
|
@ -263,7 +281,13 @@ void Agent::setIsAvatar(bool isAvatar) {
|
|||
void Agent::setAvatarData(AvatarData* avatarData, const QString& objectName) {
|
||||
_avatarData = avatarData;
|
||||
_scriptEngine->registerGlobalObject(objectName, avatarData);
|
||||
DependencyManager::get<RecordingScriptingInterface>()->setControlledAvatar(avatarData);
|
||||
|
||||
using namespace recording;
|
||||
static const FrameType AVATAR_FRAME_TYPE = Frame::registerFrameType(AvatarData::FRAME_NAME);
|
||||
// FIXME how to deal with driving multiple avatars locally?
|
||||
Frame::registerFrameHandler(AVATAR_FRAME_TYPE, [this](Frame::ConstPointer frame) {
|
||||
AvatarData::fromFrame(frame->data, *_avatarData);
|
||||
});
|
||||
}
|
||||
|
||||
void Agent::sendAvatarIdentityPacket() {
|
||||
|
|
|
@ -454,6 +454,17 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer) :
|
|||
audioIO->setOrientationGetter([this]{ return getMyAvatar()->getOrientationForAudio(); });
|
||||
|
||||
audioIO->moveToThread(audioThread);
|
||||
recording::Frame::registerFrameHandler(AudioConstants::AUDIO_FRAME_NAME, [=](recording::Frame::ConstPointer frame) {
|
||||
audioIO->handleRecordedAudioInput(frame->data);
|
||||
});
|
||||
|
||||
connect(audioIO.data(), &AudioClient::inputReceived, [](const QByteArray& audio){
|
||||
static auto recorder = DependencyManager::get<recording::Recorder>();
|
||||
if (recorder->isRecording()) {
|
||||
static const recording::FrameType AUDIO_FRAME_TYPE = recording::Frame::registerFrameType(AudioConstants::AUDIO_FRAME_NAME);
|
||||
recorder->recordFrame(AUDIO_FRAME_TYPE, audio);
|
||||
}
|
||||
});
|
||||
|
||||
auto& audioScriptingInterface = AudioScriptingInterface::getInstance();
|
||||
|
||||
|
@ -743,10 +754,6 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer) :
|
|||
connect(applicationUpdater.data(), &AutoUpdater::newVersionIsAvailable, dialogsManager.data(), &DialogsManager::showUpdateDialog);
|
||||
applicationUpdater->checkForUpdate();
|
||||
|
||||
// Assign MyAvatar to th eRecording Singleton
|
||||
DependencyManager::get<RecordingScriptingInterface>()->setControlledAvatar(getMyAvatar());
|
||||
|
||||
|
||||
// Now that menu is initalized we can sync myAvatar with it's state.
|
||||
getMyAvatar()->updateMotionBehaviorFromMenu();
|
||||
|
||||
|
@ -841,8 +848,6 @@ void Application::cleanupBeforeQuit() {
|
|||
#ifdef HAVE_IVIEWHMD
|
||||
DependencyManager::get<EyeTracker>()->setEnabled(false, true);
|
||||
#endif
|
||||
DependencyManager::get<RecordingScriptingInterface>()->setControlledAvatar(nullptr);
|
||||
|
||||
AnimDebugDraw::getInstance().shutdown();
|
||||
|
||||
// FIXME: once we move to shared pointer for the INputDevice we shoud remove this naked delete:
|
||||
|
|
|
@ -39,10 +39,10 @@
|
|||
#include <recording/Recorder.h>
|
||||
#include <recording/Clip.h>
|
||||
#include <recording/Frame.h>
|
||||
#include "devices/Faceshift.h"
|
||||
|
||||
#include <RecordingScriptingInterface.h>
|
||||
|
||||
#include "Application.h"
|
||||
#include "devices/Faceshift.h"
|
||||
#include "AvatarManager.h"
|
||||
#include "Environment.h"
|
||||
#include "Menu.h"
|
||||
|
@ -127,6 +127,65 @@ MyAvatar::MyAvatar(RigPointer rig) :
|
|||
_characterController.setEnabled(true);
|
||||
|
||||
_bodySensorMatrix = deriveBodyFromHMDSensor();
|
||||
|
||||
using namespace recording;
|
||||
|
||||
auto player = DependencyManager::get<Deck>();
|
||||
auto recorder = DependencyManager::get<Recorder>();
|
||||
connect(player.data(), &Deck::playbackStateChanged, [=] {
|
||||
if (player->isPlaying()) {
|
||||
auto recordingInterface = DependencyManager::get<RecordingScriptingInterface>();
|
||||
if (recordingInterface->getPlayFromCurrentLocation()) {
|
||||
setRecordingBasis();
|
||||
}
|
||||
} else {
|
||||
clearRecordingBasis();
|
||||
}
|
||||
});
|
||||
|
||||
connect(recorder.data(), &Recorder::recordingStateChanged, [=] {
|
||||
if (recorder->isRecording()) {
|
||||
setRecordingBasis();
|
||||
} else {
|
||||
clearRecordingBasis();
|
||||
}
|
||||
});
|
||||
|
||||
static const recording::FrameType AVATAR_FRAME_TYPE = recording::Frame::registerFrameType(AvatarData::FRAME_NAME);
|
||||
Frame::registerFrameHandler(AVATAR_FRAME_TYPE, [=](Frame::ConstPointer frame) {
|
||||
static AvatarData dummyAvatar;
|
||||
AvatarData::fromFrame(frame->data, dummyAvatar);
|
||||
if (getRecordingBasis()) {
|
||||
dummyAvatar.setRecordingBasis(getRecordingBasis());
|
||||
} else {
|
||||
dummyAvatar.clearRecordingBasis();
|
||||
}
|
||||
|
||||
auto recordingInterface = DependencyManager::get<RecordingScriptingInterface>();
|
||||
if (recordingInterface->getPlayerUseHeadModel() && dummyAvatar.getFaceModelURL().isValid() &&
|
||||
(dummyAvatar.getFaceModelURL() != getFaceModelURL())) {
|
||||
// FIXME
|
||||
//myAvatar->setFaceModelURL(_dummyAvatar.getFaceModelURL());
|
||||
}
|
||||
|
||||
if (recordingInterface->getPlayerUseSkeletonModel() && dummyAvatar.getSkeletonModelURL().isValid() &&
|
||||
(dummyAvatar.getSkeletonModelURL() != getSkeletonModelURL())) {
|
||||
// FIXME
|
||||
//myAvatar->useFullAvatarURL()
|
||||
}
|
||||
|
||||
if (recordingInterface->getPlayerUseDisplayName() && dummyAvatar.getDisplayName() != getDisplayName()) {
|
||||
setDisplayName(dummyAvatar.getDisplayName());
|
||||
}
|
||||
|
||||
setPosition(dummyAvatar.getPosition());
|
||||
setOrientation(dummyAvatar.getOrientation());
|
||||
|
||||
// FIXME attachments
|
||||
// FIXME joints
|
||||
// FIXME head lean
|
||||
// FIXME head orientation
|
||||
});
|
||||
}
|
||||
|
||||
MyAvatar::~MyAvatar() {
|
||||
|
|
|
@ -62,6 +62,7 @@ extern "C" {
|
|||
#include <SettingHandle.h>
|
||||
#include <SharedUtil.h>
|
||||
#include <UUID.h>
|
||||
#include <Transform.h>
|
||||
|
||||
#include "AudioInjector.h"
|
||||
#include "AudioConstants.h"
|
||||
|
@ -839,93 +840,27 @@ void AudioClient::handleAudioInput() {
|
|||
_inputRingBuffer.shiftReadPosition(inputSamplesRequired);
|
||||
}
|
||||
|
||||
emitAudioPacket(networkAudioSamples);
|
||||
}
|
||||
}
|
||||
auto packetType = _shouldEchoToServer ?
|
||||
PacketType::MicrophoneAudioWithEcho : PacketType::MicrophoneAudioNoEcho;
|
||||
|
||||
void AudioClient::emitAudioPacket(const int16_t* audioData, PacketType packetType) {
|
||||
static std::mutex _mutex;
|
||||
using Locker = std::unique_lock<std::mutex>;
|
||||
|
||||
// FIXME recorded audio isn't guaranteed to have the same stereo state
|
||||
// as the current system
|
||||
const int numNetworkBytes = _isStereoInput
|
||||
? AudioConstants::NETWORK_FRAME_BYTES_STEREO
|
||||
: AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
|
||||
const int numNetworkSamples = _isStereoInput
|
||||
? AudioConstants::NETWORK_FRAME_SAMPLES_STEREO
|
||||
: AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL;
|
||||
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
|
||||
|
||||
if (audioMixer && audioMixer->getActiveSocket()) {
|
||||
Locker lock(_mutex);
|
||||
if (!_audioPacket) {
|
||||
// we don't have an audioPacket yet - set that up now
|
||||
_audioPacket = NLPacket::create(PacketType::MicrophoneAudioWithEcho);
|
||||
if (_lastInputLoudness == 0) {
|
||||
packetType = PacketType::SilentAudioFrame;
|
||||
}
|
||||
|
||||
glm::vec3 headPosition = _positionGetter();
|
||||
glm::quat headOrientation = _orientationGetter();
|
||||
quint8 isStereo = _isStereoInput ? 1 : 0;
|
||||
|
||||
if (packetType == PacketType::Unknown) {
|
||||
if (_lastInputLoudness == 0) {
|
||||
_audioPacket->setType(PacketType::SilentAudioFrame);
|
||||
} else {
|
||||
if (_shouldEchoToServer) {
|
||||
_audioPacket->setType(PacketType::MicrophoneAudioWithEcho);
|
||||
} else {
|
||||
_audioPacket->setType(PacketType::MicrophoneAudioNoEcho);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
_audioPacket->setType(packetType);
|
||||
}
|
||||
|
||||
// reset the audio packet so we can start writing
|
||||
_audioPacket->reset();
|
||||
|
||||
// write sequence number
|
||||
_audioPacket->writePrimitive(_outgoingAvatarAudioSequenceNumber);
|
||||
|
||||
if (_audioPacket->getType() == PacketType::SilentAudioFrame) {
|
||||
// pack num silent samples
|
||||
quint16 numSilentSamples = numNetworkSamples;
|
||||
_audioPacket->writePrimitive(numSilentSamples);
|
||||
} else {
|
||||
// set the mono/stereo byte
|
||||
_audioPacket->writePrimitive(isStereo);
|
||||
}
|
||||
|
||||
// pack the three float positions
|
||||
_audioPacket->writePrimitive(headPosition);
|
||||
|
||||
// pack the orientation
|
||||
_audioPacket->writePrimitive(headOrientation);
|
||||
|
||||
if (_audioPacket->getType() != PacketType::SilentAudioFrame) {
|
||||
// audio samples have already been packed (written to networkAudioSamples)
|
||||
_audioPacket->setPayloadSize(_audioPacket->getPayloadSize() + numNetworkBytes);
|
||||
}
|
||||
|
||||
static const int leadingBytes = sizeof(quint16) + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
|
||||
int16_t* const networkAudioSamples = (int16_t*)(_audioPacket->getPayload() + leadingBytes);
|
||||
memcpy(networkAudioSamples, audioData, numNetworkBytes);
|
||||
|
||||
Transform audioTransform;
|
||||
audioTransform.setTranslation(_positionGetter());
|
||||
audioTransform.setRotation(_orientationGetter());
|
||||
// FIXME find a way to properly handle both playback audio and user audio concurrently
|
||||
emitAudioPacket(networkAudioSamples, numNetworkBytes, _outgoingAvatarAudioSequenceNumber, audioTransform, packetType);
|
||||
_stats.sentPacket();
|
||||
|
||||
nodeList->flagTimeForConnectionStep(LimitedNodeList::ConnectionStep::SendAudioPacket);
|
||||
|
||||
nodeList->sendUnreliablePacket(*_audioPacket, *audioMixer);
|
||||
|
||||
_outgoingAvatarAudioSequenceNumber++;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioClient::handleRecordedAudioInput(const QByteArray& audio) {
|
||||
emitAudioPacket((int16_t*)audio.data(), PacketType::MicrophoneAudioWithEcho);
|
||||
Transform audioTransform;
|
||||
audioTransform.setTranslation(_positionGetter());
|
||||
audioTransform.setRotation(_orientationGetter());
|
||||
// FIXME check a flag to see if we should echo audio?
|
||||
emitAudioPacket(audio.data(), audio.size(), _outgoingAvatarAudioSequenceNumber, audioTransform, PacketType::MicrophoneAudioWithEcho);
|
||||
}
|
||||
|
||||
void AudioClient::processReceivedSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer) {
|
||||
|
|
|
@ -74,9 +74,10 @@ class QAudioInput;
|
|||
class QAudioOutput;
|
||||
class QIODevice;
|
||||
|
||||
|
||||
typedef struct ty_gverb ty_gverb;
|
||||
|
||||
|
||||
class Transform;
|
||||
class NLPacket;
|
||||
|
||||
class AudioClient : public AbstractAudioInterface, public Dependency {
|
||||
|
@ -212,7 +213,6 @@ protected:
|
|||
}
|
||||
|
||||
private:
|
||||
void emitAudioPacket(const int16_t* audioData, PacketType packetType = PacketType::Unknown);
|
||||
void outputFormatChanged();
|
||||
|
||||
QByteArray firstInputFrame;
|
||||
|
@ -319,8 +319,6 @@ private:
|
|||
void checkDevices();
|
||||
|
||||
bool _hasReceivedFirstPacket = false;
|
||||
|
||||
std::unique_ptr<NLPacket> _audioPacket;
|
||||
};
|
||||
|
||||
|
||||
|
|
61
libraries/audio/src/AbstractAudioInterface.cpp
Normal file
61
libraries/audio/src/AbstractAudioInterface.cpp
Normal file
|
@ -0,0 +1,61 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2015/11/18
|
||||
// Copyright 2013 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "AbstractAudioInterface.h"
|
||||
|
||||
#include <QtCore/QSharedPointer>
|
||||
|
||||
#include <Node.h>
|
||||
#include <NodeType.h>
|
||||
#include <DependencyManager.h>
|
||||
#include <NodeList.h>
|
||||
#include <NLPacket.h>
|
||||
#include <Transform.h>
|
||||
|
||||
#include "AudioConstants.h"
|
||||
|
||||
void AbstractAudioInterface::emitAudioPacket(const void* audioData, size_t bytes, quint16& sequenceNumber, const Transform& transform, PacketType packetType) {
|
||||
static std::mutex _mutex;
|
||||
using Locker = std::unique_lock<std::mutex>;
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
|
||||
if (audioMixer && audioMixer->getActiveSocket()) {
|
||||
Locker lock(_mutex);
|
||||
static std::unique_ptr<NLPacket> audioPacket = NLPacket::create(PacketType::Unknown);
|
||||
quint8 isStereo = bytes == AudioConstants::NETWORK_FRAME_BYTES_STEREO ? 1 : 0;
|
||||
audioPacket->setType(packetType);
|
||||
// reset the audio packet so we can start writing
|
||||
audioPacket->reset();
|
||||
// write sequence number
|
||||
audioPacket->writePrimitive(sequenceNumber++);
|
||||
if (audioPacket->getType() == PacketType::SilentAudioFrame) {
|
||||
// pack num silent samples
|
||||
quint16 numSilentSamples = isStereo ?
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_STEREO :
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL;
|
||||
audioPacket->writePrimitive(numSilentSamples);
|
||||
} else {
|
||||
// set the mono/stereo byte
|
||||
audioPacket->writePrimitive(isStereo);
|
||||
}
|
||||
|
||||
// pack the three float positions
|
||||
audioPacket->writePrimitive(transform.getTranslation());
|
||||
// pack the orientation
|
||||
audioPacket->writePrimitive(transform.getRotation());
|
||||
|
||||
if (audioPacket->getType() != PacketType::SilentAudioFrame) {
|
||||
// audio samples have already been packed (written to networkAudioSamples)
|
||||
audioPacket->setPayloadSize(audioPacket->getPayloadSize() + bytes);
|
||||
static const int leadingBytes = sizeof(quint16) + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
|
||||
memcpy(audioPacket->getPayload() + leadingBytes, audioData, bytes);
|
||||
}
|
||||
nodeList->flagTimeForConnectionStep(LimitedNodeList::ConnectionStep::SendAudioPacket);
|
||||
nodeList->sendUnreliablePacket(*audioPacket, *audioMixer);
|
||||
}
|
||||
}
|
|
@ -15,16 +15,21 @@
|
|||
#include <QtCore/QObject>
|
||||
#include <QtMultimedia/qaudiooutput.h>
|
||||
|
||||
#include <udt/PacketHeaders.h>
|
||||
|
||||
#include "AudioInjectorOptions.h"
|
||||
|
||||
class AudioInjector;
|
||||
class AudioInjectorLocalBuffer;
|
||||
class Transform;
|
||||
|
||||
class AbstractAudioInterface : public QObject {
|
||||
Q_OBJECT
|
||||
public:
|
||||
AbstractAudioInterface(QObject* parent = 0) : QObject(parent) {};
|
||||
|
||||
static void emitAudioPacket(const void* audioData, size_t bytes, quint16& sequenceNumber, const Transform& transform, PacketType packetType);
|
||||
|
||||
public slots:
|
||||
virtual bool outputLocalInjector(bool isStereo, AudioInjector* injector) = 0;
|
||||
|
||||
|
|
|
@ -20,7 +20,9 @@ namespace AudioConstants {
|
|||
const int SAMPLE_RATE = 24000;
|
||||
|
||||
typedef int16_t AudioSample;
|
||||
|
||||
|
||||
static const char* AUDIO_FRAME_NAME = "com.highfidelity.recording.Audio";
|
||||
|
||||
const int NETWORK_FRAME_BYTES_STEREO = 1024;
|
||||
const int NETWORK_FRAME_SAMPLES_STEREO = NETWORK_FRAME_BYTES_STEREO / sizeof(AudioSample);
|
||||
const int NETWORK_FRAME_BYTES_PER_CHANNEL = 512;
|
||||
|
|
|
@ -131,6 +131,11 @@ Frame::Handler Frame::registerFrameHandler(FrameType type, Handler handler) {
|
|||
return result;
|
||||
}
|
||||
|
||||
Frame::Handler Frame::registerFrameHandler(const QString& frameTypeName, Handler handler) {
|
||||
auto frameType = registerFrameType(frameTypeName);
|
||||
return registerFrameHandler(frameType, handler);
|
||||
}
|
||||
|
||||
void Frame::handleFrame(const Frame::ConstPointer& frame) {
|
||||
Handler handler;
|
||||
{
|
||||
|
|
|
@ -55,9 +55,10 @@ public:
|
|||
: FrameHeader(type, timeOffset), data(data) { }
|
||||
|
||||
static FrameType registerFrameType(const QString& frameTypeName);
|
||||
static Handler registerFrameHandler(FrameType type, Handler handler);
|
||||
static Handler registerFrameHandler(const QString& frameTypeName, Handler handler);
|
||||
static QMap<QString, FrameType> getFrameTypes();
|
||||
static QMap<FrameType, QString> getFrameTypeNames();
|
||||
static Handler registerFrameHandler(FrameType type, Handler handler);
|
||||
static void handleFrame(const ConstPointer& frame);
|
||||
};
|
||||
|
||||
|
|
|
@ -15,43 +15,16 @@
|
|||
#include <recording/Clip.h>
|
||||
#include <recording/Frame.h>
|
||||
#include <NumericalConstants.h>
|
||||
// FiXME
|
||||
//#include <AudioClient.h>
|
||||
#include <AudioConstants.h>
|
||||
#include <Transform.h>
|
||||
|
||||
#include "ScriptEngineLogging.h"
|
||||
|
||||
typedef int16_t AudioSample;
|
||||
|
||||
|
||||
using namespace recording;
|
||||
|
||||
// FIXME move to somewhere audio related?
|
||||
static const QString AUDIO_FRAME_NAME = "com.highfidelity.recording.Audio";
|
||||
|
||||
RecordingScriptingInterface::RecordingScriptingInterface() {
|
||||
static const recording::FrameType AVATAR_FRAME_TYPE = recording::Frame::registerFrameType(AvatarData::FRAME_NAME);
|
||||
// FIXME how to deal with driving multiple avatars locally?
|
||||
Frame::registerFrameHandler(AVATAR_FRAME_TYPE, [this](Frame::ConstPointer frame) {
|
||||
processAvatarFrame(frame);
|
||||
});
|
||||
|
||||
static const recording::FrameType AUDIO_FRAME_TYPE = recording::Frame::registerFrameType(AUDIO_FRAME_NAME);
|
||||
Frame::registerFrameHandler(AUDIO_FRAME_TYPE, [this](Frame::ConstPointer frame) {
|
||||
processAudioFrame(frame);
|
||||
});
|
||||
|
||||
_player = DependencyManager::get<Deck>();
|
||||
_recorder = DependencyManager::get<Recorder>();
|
||||
|
||||
// FIXME : Disabling Sound
|
||||
// auto audioClient = DependencyManager::get<AudioClient>();
|
||||
// connect(audioClient.data(), &AudioClient::inputReceived, this, &RecordingScriptingInterface::processAudioInput);
|
||||
}
|
||||
|
||||
void RecordingScriptingInterface::setControlledAvatar(AvatarData* avatar) {
|
||||
_controlledAvatar = avatar;
|
||||
}
|
||||
|
||||
bool RecordingScriptingInterface::isPlaying() const {
|
||||
|
@ -92,12 +65,6 @@ void RecordingScriptingInterface::startPlaying() {
|
|||
return;
|
||||
}
|
||||
|
||||
// Playback from the current position
|
||||
if (_playFromCurrentLocation && _controlledAvatar) {
|
||||
_dummyAvatar.setRecordingBasis(std::make_shared<Transform>(_controlledAvatar->getTransform()));
|
||||
} else {
|
||||
_dummyAvatar.clearRecordingBasis();
|
||||
}
|
||||
_player->play();
|
||||
}
|
||||
|
||||
|
@ -176,12 +143,6 @@ void RecordingScriptingInterface::startRecording() {
|
|||
return;
|
||||
}
|
||||
|
||||
_recordingEpoch = Frame::epochForFrameTime(0);
|
||||
|
||||
if (_controlledAvatar) {
|
||||
_controlledAvatar->setRecordingBasis();
|
||||
}
|
||||
|
||||
_recorder->start();
|
||||
}
|
||||
|
||||
|
@ -189,10 +150,6 @@ void RecordingScriptingInterface::stopRecording() {
|
|||
_recorder->stop();
|
||||
_lastClip = _recorder->getClip();
|
||||
_lastClip->seek(0);
|
||||
|
||||
if (_controlledAvatar) {
|
||||
_controlledAvatar->clearRecordingBasis();
|
||||
}
|
||||
}
|
||||
|
||||
void RecordingScriptingInterface::saveRecording(const QString& filename) {
|
||||
|
@ -225,50 +182,3 @@ void RecordingScriptingInterface::loadLastRecording() {
|
|||
_player->play();
|
||||
}
|
||||
|
||||
void RecordingScriptingInterface::processAvatarFrame(const Frame::ConstPointer& frame) {
|
||||
Q_ASSERT(QThread::currentThread() == thread());
|
||||
|
||||
if (!_controlledAvatar) {
|
||||
return;
|
||||
}
|
||||
|
||||
AvatarData::fromFrame(frame->data, _dummyAvatar);
|
||||
|
||||
|
||||
|
||||
if (_useHeadModel && _dummyAvatar.getFaceModelURL().isValid() &&
|
||||
(_dummyAvatar.getFaceModelURL() != _controlledAvatar->getFaceModelURL())) {
|
||||
// FIXME
|
||||
//myAvatar->setFaceModelURL(_dummyAvatar.getFaceModelURL());
|
||||
}
|
||||
|
||||
if (_useSkeletonModel && _dummyAvatar.getSkeletonModelURL().isValid() &&
|
||||
(_dummyAvatar.getSkeletonModelURL() != _controlledAvatar->getSkeletonModelURL())) {
|
||||
// FIXME
|
||||
//myAvatar->useFullAvatarURL()
|
||||
}
|
||||
|
||||
if (_useDisplayName && _dummyAvatar.getDisplayName() != _controlledAvatar->getDisplayName()) {
|
||||
_controlledAvatar->setDisplayName(_dummyAvatar.getDisplayName());
|
||||
}
|
||||
|
||||
_controlledAvatar->setPosition(_dummyAvatar.getPosition());
|
||||
_controlledAvatar->setOrientation(_dummyAvatar.getOrientation());
|
||||
|
||||
// FIXME attachments
|
||||
// FIXME joints
|
||||
// FIXME head lean
|
||||
// FIXME head orientation
|
||||
}
|
||||
|
||||
void RecordingScriptingInterface::processAudioInput(const QByteArray& audio) {
|
||||
if (_recorder->isRecording()) {
|
||||
static const recording::FrameType AUDIO_FRAME_TYPE = recording::Frame::registerFrameType(AUDIO_FRAME_NAME);
|
||||
_recorder->recordFrame(AUDIO_FRAME_TYPE, audio);
|
||||
}
|
||||
}
|
||||
|
||||
void RecordingScriptingInterface::processAudioFrame(const recording::FrameConstPointer& frame) {
|
||||
// auto audioClient = DependencyManager::get<AudioClient>();
|
||||
// audioClient->handleRecordedAudioInput(frame->data);
|
||||
}
|
||||
|
|
|
@ -10,13 +10,13 @@
|
|||
#define hifi_RecordingScriptingInterface_h
|
||||
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
|
||||
#include <QObject>
|
||||
|
||||
#include <DependencyManager.h>
|
||||
#include <recording/Forward.h>
|
||||
#include <recording/Frame.h>
|
||||
#include <AvatarData.h>
|
||||
|
||||
class RecordingScriptingInterface : public QObject, public Dependency {
|
||||
Q_OBJECT
|
||||
|
@ -24,8 +24,6 @@ class RecordingScriptingInterface : public QObject, public Dependency {
|
|||
public:
|
||||
RecordingScriptingInterface();
|
||||
|
||||
void setControlledAvatar(AvatarData* avatar);
|
||||
|
||||
public slots:
|
||||
void loadRecording(const QString& filename);
|
||||
|
||||
|
@ -41,12 +39,19 @@ public slots:
|
|||
void setPlayerVolume(float volume);
|
||||
void setPlayerAudioOffset(float audioOffset);
|
||||
void setPlayerTime(float time);
|
||||
void setPlayFromCurrentLocation(bool playFromCurrentLocation);
|
||||
void setPlayerLoop(bool loop);
|
||||
|
||||
void setPlayerUseDisplayName(bool useDisplayName);
|
||||
void setPlayerUseAttachments(bool useAttachments);
|
||||
void setPlayerUseHeadModel(bool useHeadModel);
|
||||
void setPlayerUseSkeletonModel(bool useSkeletonModel);
|
||||
void setPlayFromCurrentLocation(bool playFromCurrentLocation);
|
||||
|
||||
bool getPlayerUseDisplayName() { return _useDisplayName; }
|
||||
bool getPlayerUseAttachments() { return _useAttachments; }
|
||||
bool getPlayerUseHeadModel() { return _useHeadModel; }
|
||||
bool getPlayerUseSkeletonModel() { return _useSkeletonModel; }
|
||||
bool getPlayFromCurrentLocation() { return _playFromCurrentLocation; }
|
||||
|
||||
void startRecording();
|
||||
void stopRecording();
|
||||
|
@ -57,22 +62,13 @@ public slots:
|
|||
void saveRecording(const QString& filename);
|
||||
void loadLastRecording();
|
||||
|
||||
signals:
|
||||
void playbackStateChanged();
|
||||
// Should this occur for any frame or just for seek calls?
|
||||
void playbackPositionChanged();
|
||||
void looped();
|
||||
|
||||
private:
|
||||
protected:
|
||||
using Mutex = std::recursive_mutex;
|
||||
using Locker = std::unique_lock<Mutex>;
|
||||
using Flag = std::atomic<bool>;
|
||||
void processAvatarFrame(const recording::FrameConstPointer& frame);
|
||||
void processAudioFrame(const recording::FrameConstPointer& frame);
|
||||
void processAudioInput(const QByteArray& audioData);
|
||||
|
||||
QSharedPointer<recording::Deck> _player;
|
||||
QSharedPointer<recording::Recorder> _recorder;
|
||||
quint64 _recordingEpoch { 0 };
|
||||
|
||||
Flag _playFromCurrentLocation { true };
|
||||
Flag _useDisplayName { false };
|
||||
|
@ -80,8 +76,6 @@ private:
|
|||
Flag _useAttachments { false };
|
||||
Flag _useSkeletonModel { false };
|
||||
recording::ClipPointer _lastClip;
|
||||
AvatarData _dummyAvatar;
|
||||
AvatarData* _controlledAvatar;
|
||||
};
|
||||
|
||||
#endif // hifi_RecordingScriptingInterface_h
|
||||
|
|
Loading…
Reference in a new issue