3
0
Fork 0
mirror of https://github.com/lubosz/overte.git synced 2025-04-27 09:15:32 +02:00

Merge pull request from jherico/dargo

Make recording and playback work in interface, playback in agent
This commit is contained in:
samcake 2015-11-18 17:52:09 -08:00
commit 74beeefe06
19 changed files with 274 additions and 241 deletions

View file

@ -27,12 +27,14 @@
#include <recording/Deck.h>
#include <recording/Recorder.h>
#include <recording/Frame.h>
#include <WebSocketServerClass.h>
#include <EntityScriptingInterface.h> // TODO: consider moving to scriptengine.h
#include "avatars/ScriptableAvatar.h"
#include "RecordingScriptingInterface.h"
#include "AbstractAudioInterface.h"
#include "Agent.h"
@ -170,15 +172,37 @@ void Agent::run() {
_scriptEngine->setParent(this); // be the parent of the script engine so it gets moved when we do
// setup an Avatar for the script to use
ScriptableAvatar scriptedAvatar(_scriptEngine.get());
scriptedAvatar.setForceFaceTrackerConnected(true);
auto scriptedAvatar = DependencyManager::get<ScriptableAvatar>();
connect(_scriptEngine.get(), SIGNAL(update(float)), scriptedAvatar.data(), SLOT(update(float)), Qt::ConnectionType::QueuedConnection);
scriptedAvatar->setForceFaceTrackerConnected(true);
// call model URL setters with empty URLs so our avatar, if user, will have the default models
scriptedAvatar.setFaceModelURL(QUrl());
scriptedAvatar.setSkeletonModelURL(QUrl());
scriptedAvatar->setFaceModelURL(QUrl());
scriptedAvatar->setSkeletonModelURL(QUrl());
// give this AvatarData object to the script engine
setAvatarData(&scriptedAvatar, "Avatar");
_scriptEngine->registerGlobalObject("Avatar", scriptedAvatar.data());
using namespace recording;
static const FrameType AVATAR_FRAME_TYPE = Frame::registerFrameType(AvatarData::FRAME_NAME);
// FIXME how to deal with driving multiple avatars locally?
Frame::registerFrameHandler(AVATAR_FRAME_TYPE, [this, scriptedAvatar](Frame::ConstPointer frame) {
AvatarData::fromFrame(frame->data, *scriptedAvatar);
});
using namespace recording;
static const FrameType AUDIO_FRAME_TYPE = Frame::registerFrameType(AudioConstants::AUDIO_FRAME_NAME);
Frame::registerFrameHandler(AUDIO_FRAME_TYPE, [this, &scriptedAvatar](Frame::ConstPointer frame) {
const QByteArray& audio = frame->data;
static quint16 audioSequenceNumber{ 0 };
Transform audioTransform;
audioTransform.setTranslation(scriptedAvatar->getPosition());
audioTransform.setRotation(scriptedAvatar->getOrientation());
AbstractAudioInterface::emitAudioPacket(audio.data(), audio.size(), audioSequenceNumber, audioTransform, PacketType::MicrophoneAudioNoEcho);
});
auto avatarHashMap = DependencyManager::set<AvatarHashMap>();
_scriptEngine->registerGlobalObject("AvatarList", avatarHashMap.data());
@ -218,6 +242,10 @@ void Agent::run() {
QObject::connect(_scriptEngine.get(), &ScriptEngine::update, this, &Agent::processAgentAvatarAndAudio);
_scriptEngine->run();
Frame::clearFrameHandler(AUDIO_FRAME_TYPE);
Frame::clearFrameHandler(AVATAR_FRAME_TYPE);
setFinished(true);
}
@ -239,7 +267,6 @@ void Agent::setIsAvatar(bool isAvatar) {
}
if (!_isAvatar) {
DependencyManager::get<RecordingScriptingInterface>()->setControlledAvatar(nullptr);
if (_avatarIdentityTimer) {
_avatarIdentityTimer->stop();
@ -255,34 +282,30 @@ void Agent::setIsAvatar(bool isAvatar) {
}
}
void Agent::setAvatarData(AvatarData* avatarData, const QString& objectName) {
_avatarData = avatarData;
_scriptEngine->registerGlobalObject(objectName, avatarData);
DependencyManager::get<RecordingScriptingInterface>()->setControlledAvatar(avatarData);
}
void Agent::sendAvatarIdentityPacket() {
if (_isAvatar && _avatarData) {
_avatarData->sendIdentityPacket();
if (_isAvatar) {
auto scriptedAvatar = DependencyManager::get<ScriptableAvatar>();
scriptedAvatar->sendIdentityPacket();
}
}
void Agent::sendAvatarBillboardPacket() {
if (_isAvatar && _avatarData) {
_avatarData->sendBillboardPacket();
if (_isAvatar) {
auto scriptedAvatar = DependencyManager::get<ScriptableAvatar>();
scriptedAvatar->sendBillboardPacket();
}
}
void Agent::processAgentAvatarAndAudio(float deltaTime) {
if (!_scriptEngine->isFinished() && _isAvatar && _avatarData) {
if (!_scriptEngine->isFinished() && _isAvatar) {
auto scriptedAvatar = DependencyManager::get<ScriptableAvatar>();
const int SCRIPT_AUDIO_BUFFER_SAMPLES = floor(((SCRIPT_DATA_CALLBACK_USECS * AudioConstants::SAMPLE_RATE)
/ (1000 * 1000)) + 0.5);
const int SCRIPT_AUDIO_BUFFER_BYTES = SCRIPT_AUDIO_BUFFER_SAMPLES * sizeof(int16_t);
QByteArray avatarByteArray = _avatarData->toByteArray(true, randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO);
_avatarData->doneEncoding(true);
QByteArray avatarByteArray = scriptedAvatar->toByteArray(true, randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO);
scriptedAvatar->doneEncoding(true);
static AvatarDataSequenceNumber sequenceNumber = 0;
auto avatarPacket = NLPacket::create(PacketType::AvatarData, avatarByteArray.size() + sizeof(sequenceNumber));
@ -347,8 +370,8 @@ void Agent::processAgentAvatarAndAudio(float deltaTime) {
audioPacket->writePrimitive(SCRIPT_AUDIO_BUFFER_SAMPLES);
// use the orientation and position of this avatar for the source of this audio
audioPacket->writePrimitive(_avatarData->getPosition());
glm::quat headOrientation = _avatarData->getHeadOrientation();
audioPacket->writePrimitive(scriptedAvatar->getPosition());
glm::quat headOrientation = scriptedAvatar->getHeadOrientation();
audioPacket->writePrimitive(headOrientation);
}else if (nextSoundOutput) {
@ -356,8 +379,8 @@ void Agent::processAgentAvatarAndAudio(float deltaTime) {
audioPacket->writePrimitive((quint8)0);
// use the orientation and position of this avatar for the source of this audio
audioPacket->writePrimitive(_avatarData->getPosition());
glm::quat headOrientation = _avatarData->getHeadOrientation();
audioPacket->writePrimitive(scriptedAvatar->getPosition());
glm::quat headOrientation = scriptedAvatar->getHeadOrientation();
audioPacket->writePrimitive(headOrientation);
// write the raw audio data

View file

@ -68,13 +68,11 @@ private:
MixedAudioStream _receivedAudioStream;
float _lastReceivedAudioLoudness;
void setAvatarData(AvatarData* avatarData, const QString& objectName);
void setAvatarSound(Sound* avatarSound) { _avatarSound = avatarSound; }
void sendAvatarIdentityPacket();
void sendAvatarBillboardPacket();
AvatarData* _avatarData = nullptr;
bool _isListeningToAudioStream = false;
Sound* _avatarSound = nullptr;
int _numAvatarSoundSentBytes = 0;

View file

@ -35,6 +35,7 @@
#include "AssignmentActionFactory.h"
#include "AssignmentClient.h"
#include "avatars/ScriptableAvatar.h"
const QString ASSIGNMENT_CLIENT_TARGET_NAME = "assignment-client";
const long long ASSIGNMENT_REQUEST_INTERVAL_MSECS = 1 * 1000;
@ -48,6 +49,7 @@ AssignmentClient::AssignmentClient(Assignment::Type requestAssignmentType, QStri
QSettings::setDefaultFormat(QSettings::IniFormat);
auto scriptableAvatar = DependencyManager::set<ScriptableAvatar>();
auto addressManager = DependencyManager::set<AddressManager>();
// create a NodeList as an unassigned client, must be after addressManager

View file

@ -15,10 +15,6 @@
#include "ScriptableAvatar.h"
ScriptableAvatar::ScriptableAvatar(ScriptEngine* scriptEngine) : _scriptEngine(scriptEngine), _animation(NULL) {
connect(_scriptEngine, SIGNAL(update(float)), this, SLOT(update(float)));
}
// hold and priority unused but kept so that client side JS can run.
void ScriptableAvatar::startAnimation(const QString& url, float fps, float priority,
bool loop, bool hold, float firstFrame, float lastFrame, const QStringList& maskedJoints) {

View file

@ -16,11 +16,10 @@
#include <AvatarData.h>
#include <ScriptEngine.h>
class ScriptableAvatar : public AvatarData {
class ScriptableAvatar : public AvatarData, public Dependency{
Q_OBJECT
public:
ScriptableAvatar(ScriptEngine* scriptEngine);
/// Allows scripts to run animations.
Q_INVOKABLE void startAnimation(const QString& url, float fps = 30.0f, float priority = 1.0f, bool loop = false,
bool hold = false, float firstFrame = 0.0f, float lastFrame = FLT_MAX, const QStringList& maskedJoints = QStringList());
@ -31,7 +30,6 @@ private slots:
void update(float deltatime);
private:
ScriptEngine* _scriptEngine;
AnimationPointer _animation;
AnimationDetails _animationDetails;
QStringList _maskedJoints;

View file

@ -0,0 +1,24 @@
"use strict";
var origin = {x: 512, y: 512, z: 512};
var millisecondsToWaitBeforeStarting = 2 * 1000; // To give the various servers a chance to start.
var millisecondsToWaitBeforeEnding = 30 * 1000;
Avatar.skeletonModelURL = "https://hifi-public.s3.amazonaws.com/marketplace/contents/dd03b8e3-52fb-4ab3-9ac9-3b17e00cd85d/98baa90b3b66803c5d7bd4537fca6993.fst"; //lovejoy
Avatar.displayName = "AC Avatar";
Agent.isAvatar = true;
Script.setTimeout(function () {
Avatar.position = origin;
Recording.loadRecording("d:/hifi.rec");
Recording.setPlayerLoop(true);
Recording.startPlaying();
}, millisecondsToWaitBeforeStarting);
Script.setTimeout(function () {
print("Stopping script");
Agent.isAvatar = false;
Recording.stopPlaying();
Script.stop();
}, millisecondsToWaitBeforeEnding);

View file

@ -454,6 +454,17 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer) :
audioIO->setOrientationGetter([this]{ return getMyAvatar()->getOrientationForAudio(); });
audioIO->moveToThread(audioThread);
recording::Frame::registerFrameHandler(AudioConstants::AUDIO_FRAME_NAME, [=](recording::Frame::ConstPointer frame) {
audioIO->handleRecordedAudioInput(frame->data);
});
connect(audioIO.data(), &AudioClient::inputReceived, [](const QByteArray& audio){
static auto recorder = DependencyManager::get<recording::Recorder>();
if (recorder->isRecording()) {
static const recording::FrameType AUDIO_FRAME_TYPE = recording::Frame::registerFrameType(AudioConstants::AUDIO_FRAME_NAME);
recorder->recordFrame(AUDIO_FRAME_TYPE, audio);
}
});
auto& audioScriptingInterface = AudioScriptingInterface::getInstance();
@ -743,10 +754,6 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer) :
connect(applicationUpdater.data(), &AutoUpdater::newVersionIsAvailable, dialogsManager.data(), &DialogsManager::showUpdateDialog);
applicationUpdater->checkForUpdate();
// Assign MyAvatar to th eRecording Singleton
DependencyManager::get<RecordingScriptingInterface>()->setControlledAvatar(getMyAvatar());
// Now that menu is initalized we can sync myAvatar with it's state.
getMyAvatar()->updateMotionBehaviorFromMenu();
@ -841,8 +848,6 @@ void Application::cleanupBeforeQuit() {
#ifdef HAVE_IVIEWHMD
DependencyManager::get<EyeTracker>()->setEnabled(false, true);
#endif
DependencyManager::get<RecordingScriptingInterface>()->setControlledAvatar(nullptr);
AnimDebugDraw::getInstance().shutdown();
// FIXME: once we move to shared pointer for the INputDevice we shoud remove this naked delete:

View file

@ -39,10 +39,10 @@
#include <recording/Recorder.h>
#include <recording/Clip.h>
#include <recording/Frame.h>
#include "devices/Faceshift.h"
#include <RecordingScriptingInterface.h>
#include "Application.h"
#include "devices/Faceshift.h"
#include "AvatarManager.h"
#include "Environment.h"
#include "Menu.h"
@ -127,6 +127,65 @@ MyAvatar::MyAvatar(RigPointer rig) :
_characterController.setEnabled(true);
_bodySensorMatrix = deriveBodyFromHMDSensor();
using namespace recording;
auto player = DependencyManager::get<Deck>();
auto recorder = DependencyManager::get<Recorder>();
connect(player.data(), &Deck::playbackStateChanged, [=] {
if (player->isPlaying()) {
auto recordingInterface = DependencyManager::get<RecordingScriptingInterface>();
if (recordingInterface->getPlayFromCurrentLocation()) {
setRecordingBasis();
}
} else {
clearRecordingBasis();
}
});
connect(recorder.data(), &Recorder::recordingStateChanged, [=] {
if (recorder->isRecording()) {
setRecordingBasis();
} else {
clearRecordingBasis();
}
});
static const recording::FrameType AVATAR_FRAME_TYPE = recording::Frame::registerFrameType(AvatarData::FRAME_NAME);
Frame::registerFrameHandler(AVATAR_FRAME_TYPE, [=](Frame::ConstPointer frame) {
static AvatarData dummyAvatar;
AvatarData::fromFrame(frame->data, dummyAvatar);
if (getRecordingBasis()) {
dummyAvatar.setRecordingBasis(getRecordingBasis());
} else {
dummyAvatar.clearRecordingBasis();
}
auto recordingInterface = DependencyManager::get<RecordingScriptingInterface>();
if (recordingInterface->getPlayerUseHeadModel() && dummyAvatar.getFaceModelURL().isValid() &&
(dummyAvatar.getFaceModelURL() != getFaceModelURL())) {
// FIXME
//myAvatar->setFaceModelURL(_dummyAvatar.getFaceModelURL());
}
if (recordingInterface->getPlayerUseSkeletonModel() && dummyAvatar.getSkeletonModelURL().isValid() &&
(dummyAvatar.getSkeletonModelURL() != getSkeletonModelURL())) {
// FIXME
//myAvatar->useFullAvatarURL()
}
if (recordingInterface->getPlayerUseDisplayName() && dummyAvatar.getDisplayName() != getDisplayName()) {
setDisplayName(dummyAvatar.getDisplayName());
}
setPosition(dummyAvatar.getPosition());
setOrientation(dummyAvatar.getOrientation());
// FIXME attachments
// FIXME joints
// FIXME head lean
// FIXME head orientation
});
}
MyAvatar::~MyAvatar() {

View file

@ -62,6 +62,7 @@ extern "C" {
#include <SettingHandle.h>
#include <SharedUtil.h>
#include <UUID.h>
#include <Transform.h>
#include "AudioInjector.h"
#include "AudioConstants.h"
@ -839,93 +840,27 @@ void AudioClient::handleAudioInput() {
_inputRingBuffer.shiftReadPosition(inputSamplesRequired);
}
emitAudioPacket(networkAudioSamples);
}
}
auto packetType = _shouldEchoToServer ?
PacketType::MicrophoneAudioWithEcho : PacketType::MicrophoneAudioNoEcho;
void AudioClient::emitAudioPacket(const int16_t* audioData, PacketType packetType) {
static std::mutex _mutex;
using Locker = std::unique_lock<std::mutex>;
// FIXME recorded audio isn't guaranteed to have the same stereo state
// as the current system
const int numNetworkBytes = _isStereoInput
? AudioConstants::NETWORK_FRAME_BYTES_STEREO
: AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
const int numNetworkSamples = _isStereoInput
? AudioConstants::NETWORK_FRAME_SAMPLES_STEREO
: AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL;
auto nodeList = DependencyManager::get<NodeList>();
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
if (audioMixer && audioMixer->getActiveSocket()) {
Locker lock(_mutex);
if (!_audioPacket) {
// we don't have an audioPacket yet - set that up now
_audioPacket = NLPacket::create(PacketType::MicrophoneAudioWithEcho);
if (_lastInputLoudness == 0) {
packetType = PacketType::SilentAudioFrame;
}
glm::vec3 headPosition = _positionGetter();
glm::quat headOrientation = _orientationGetter();
quint8 isStereo = _isStereoInput ? 1 : 0;
if (packetType == PacketType::Unknown) {
if (_lastInputLoudness == 0) {
_audioPacket->setType(PacketType::SilentAudioFrame);
} else {
if (_shouldEchoToServer) {
_audioPacket->setType(PacketType::MicrophoneAudioWithEcho);
} else {
_audioPacket->setType(PacketType::MicrophoneAudioNoEcho);
}
}
} else {
_audioPacket->setType(packetType);
}
// reset the audio packet so we can start writing
_audioPacket->reset();
// write sequence number
_audioPacket->writePrimitive(_outgoingAvatarAudioSequenceNumber);
if (_audioPacket->getType() == PacketType::SilentAudioFrame) {
// pack num silent samples
quint16 numSilentSamples = numNetworkSamples;
_audioPacket->writePrimitive(numSilentSamples);
} else {
// set the mono/stereo byte
_audioPacket->writePrimitive(isStereo);
}
// pack the three float positions
_audioPacket->writePrimitive(headPosition);
// pack the orientation
_audioPacket->writePrimitive(headOrientation);
if (_audioPacket->getType() != PacketType::SilentAudioFrame) {
// audio samples have already been packed (written to networkAudioSamples)
_audioPacket->setPayloadSize(_audioPacket->getPayloadSize() + numNetworkBytes);
}
static const int leadingBytes = sizeof(quint16) + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
int16_t* const networkAudioSamples = (int16_t*)(_audioPacket->getPayload() + leadingBytes);
memcpy(networkAudioSamples, audioData, numNetworkBytes);
Transform audioTransform;
audioTransform.setTranslation(_positionGetter());
audioTransform.setRotation(_orientationGetter());
// FIXME find a way to properly handle both playback audio and user audio concurrently
emitAudioPacket(networkAudioSamples, numNetworkBytes, _outgoingAvatarAudioSequenceNumber, audioTransform, packetType);
_stats.sentPacket();
nodeList->flagTimeForConnectionStep(LimitedNodeList::ConnectionStep::SendAudioPacket);
nodeList->sendUnreliablePacket(*_audioPacket, *audioMixer);
_outgoingAvatarAudioSequenceNumber++;
}
}
void AudioClient::handleRecordedAudioInput(const QByteArray& audio) {
emitAudioPacket((int16_t*)audio.data(), PacketType::MicrophoneAudioWithEcho);
Transform audioTransform;
audioTransform.setTranslation(_positionGetter());
audioTransform.setRotation(_orientationGetter());
// FIXME check a flag to see if we should echo audio?
emitAudioPacket(audio.data(), audio.size(), _outgoingAvatarAudioSequenceNumber, audioTransform, PacketType::MicrophoneAudioWithEcho);
}
void AudioClient::processReceivedSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer) {

View file

@ -74,9 +74,10 @@ class QAudioInput;
class QAudioOutput;
class QIODevice;
typedef struct ty_gverb ty_gverb;
class Transform;
class NLPacket;
class AudioClient : public AbstractAudioInterface, public Dependency {
@ -212,7 +213,6 @@ protected:
}
private:
void emitAudioPacket(const int16_t* audioData, PacketType packetType = PacketType::Unknown);
void outputFormatChanged();
QByteArray firstInputFrame;
@ -319,8 +319,6 @@ private:
void checkDevices();
bool _hasReceivedFirstPacket = false;
std::unique_ptr<NLPacket> _audioPacket;
};

View file

@ -0,0 +1,61 @@
//
// Created by Bradley Austin Davis on 2015/11/18
// Copyright 2013 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "AbstractAudioInterface.h"
#include <QtCore/QSharedPointer>
#include <Node.h>
#include <NodeType.h>
#include <DependencyManager.h>
#include <NodeList.h>
#include <NLPacket.h>
#include <Transform.h>
#include "AudioConstants.h"
void AbstractAudioInterface::emitAudioPacket(const void* audioData, size_t bytes, quint16& sequenceNumber, const Transform& transform, PacketType packetType) {
static std::mutex _mutex;
using Locker = std::unique_lock<std::mutex>;
auto nodeList = DependencyManager::get<NodeList>();
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
if (audioMixer && audioMixer->getActiveSocket()) {
Locker lock(_mutex);
static std::unique_ptr<NLPacket> audioPacket = NLPacket::create(PacketType::Unknown);
quint8 isStereo = bytes == AudioConstants::NETWORK_FRAME_BYTES_STEREO ? 1 : 0;
audioPacket->setType(packetType);
// reset the audio packet so we can start writing
audioPacket->reset();
// write sequence number
audioPacket->writePrimitive(sequenceNumber++);
if (audioPacket->getType() == PacketType::SilentAudioFrame) {
// pack num silent samples
quint16 numSilentSamples = isStereo ?
AudioConstants::NETWORK_FRAME_SAMPLES_STEREO :
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL;
audioPacket->writePrimitive(numSilentSamples);
} else {
// set the mono/stereo byte
audioPacket->writePrimitive(isStereo);
}
// pack the three float positions
audioPacket->writePrimitive(transform.getTranslation());
// pack the orientation
audioPacket->writePrimitive(transform.getRotation());
if (audioPacket->getType() != PacketType::SilentAudioFrame) {
// audio samples have already been packed (written to networkAudioSamples)
audioPacket->setPayloadSize(audioPacket->getPayloadSize() + bytes);
static const int leadingBytes = sizeof(quint16) + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
memcpy(audioPacket->getPayload() + leadingBytes, audioData, bytes);
}
nodeList->flagTimeForConnectionStep(LimitedNodeList::ConnectionStep::SendAudioPacket);
nodeList->sendUnreliablePacket(*audioPacket, *audioMixer);
}
}

View file

@ -15,16 +15,21 @@
#include <QtCore/QObject>
#include <QtMultimedia/qaudiooutput.h>
#include <udt/PacketHeaders.h>
#include "AudioInjectorOptions.h"
class AudioInjector;
class AudioInjectorLocalBuffer;
class Transform;
class AbstractAudioInterface : public QObject {
Q_OBJECT
public:
AbstractAudioInterface(QObject* parent = 0) : QObject(parent) {};
static void emitAudioPacket(const void* audioData, size_t bytes, quint16& sequenceNumber, const Transform& transform, PacketType packetType);
public slots:
virtual bool outputLocalInjector(bool isStereo, AudioInjector* injector) = 0;

View file

@ -20,7 +20,9 @@ namespace AudioConstants {
const int SAMPLE_RATE = 24000;
typedef int16_t AudioSample;
static const char* AUDIO_FRAME_NAME = "com.highfidelity.recording.Audio";
const int NETWORK_FRAME_BYTES_STEREO = 1024;
const int NETWORK_FRAME_SAMPLES_STEREO = NETWORK_FRAME_BYTES_STEREO / sizeof(AudioSample);
const int NETWORK_FRAME_BYTES_PER_CHANNEL = 512;

View file

@ -131,6 +131,25 @@ Frame::Handler Frame::registerFrameHandler(FrameType type, Handler handler) {
return result;
}
Frame::Handler Frame::registerFrameHandler(const QString& frameTypeName, Handler handler) {
auto frameType = registerFrameType(frameTypeName);
return registerFrameHandler(frameType, handler);
}
void Frame::clearFrameHandler(FrameType type) {
Locker lock(mutex);
auto iterator = handlerMap.find(type);
if (iterator != handlerMap.end()) {
handlerMap.erase(iterator);
}
}
void Frame::clearFrameHandler(const QString& frameTypeName) {
auto frameType = registerFrameType(frameTypeName);
clearFrameHandler(frameType);
}
void Frame::handleFrame(const Frame::ConstPointer& frame) {
Handler handler;
{

View file

@ -55,9 +55,12 @@ public:
: FrameHeader(type, timeOffset), data(data) { }
static FrameType registerFrameType(const QString& frameTypeName);
static Handler registerFrameHandler(FrameType type, Handler handler);
static Handler registerFrameHandler(const QString& frameTypeName, Handler handler);
static void clearFrameHandler(FrameType type);
static void clearFrameHandler(const QString& frameTypeName);
static QMap<QString, FrameType> getFrameTypes();
static QMap<FrameType, QString> getFrameTypeNames();
static Handler registerFrameHandler(FrameType type, Handler handler);
static void handleFrame(const ConstPointer& frame);
};

View file

@ -72,16 +72,17 @@ FileFrameHeaderList parseFrameHeaders(uchar* const start, const qint64& size) {
results.push_back(header);
}
qDebug() << "Parsed source data into " << results.size() << " frames";
int i = 0;
for (const auto& frameHeader : results) {
qDebug() << "Frame " << i++ << " time " << frameHeader.timeOffset;
}
// int i = 0;
// for (const auto& frameHeader : results) {
// qDebug() << "Frame " << i++ << " time " << frameHeader.timeOffset << " Type " << frameHeader.type;
// }
return results;
}
FileClip::FileClip(const QString& fileName) : _file(fileName) {
auto size = _file.size();
qDebug() << "Opening file of size: " << size;
bool opened = _file.open(QIODevice::ReadOnly);
if (!opened) {
qCWarning(recordingLog) << "Unable to open file " << fileName;

View file

@ -15,43 +15,16 @@
#include <recording/Clip.h>
#include <recording/Frame.h>
#include <NumericalConstants.h>
// FiXME
//#include <AudioClient.h>
#include <AudioConstants.h>
#include <Transform.h>
#include "ScriptEngineLogging.h"
typedef int16_t AudioSample;
using namespace recording;
// FIXME move to somewhere audio related?
static const QString AUDIO_FRAME_NAME = "com.highfidelity.recording.Audio";
RecordingScriptingInterface::RecordingScriptingInterface() {
static const recording::FrameType AVATAR_FRAME_TYPE = recording::Frame::registerFrameType(AvatarData::FRAME_NAME);
// FIXME how to deal with driving multiple avatars locally?
Frame::registerFrameHandler(AVATAR_FRAME_TYPE, [this](Frame::ConstPointer frame) {
processAvatarFrame(frame);
});
static const recording::FrameType AUDIO_FRAME_TYPE = recording::Frame::registerFrameType(AUDIO_FRAME_NAME);
Frame::registerFrameHandler(AUDIO_FRAME_TYPE, [this](Frame::ConstPointer frame) {
processAudioFrame(frame);
});
_player = DependencyManager::get<Deck>();
_recorder = DependencyManager::get<Recorder>();
// FIXME : Disabling Sound
// auto audioClient = DependencyManager::get<AudioClient>();
// connect(audioClient.data(), &AudioClient::inputReceived, this, &RecordingScriptingInterface::processAudioInput);
}
void RecordingScriptingInterface::setControlledAvatar(AvatarData* avatar) {
_controlledAvatar = avatar;
}
bool RecordingScriptingInterface::isPlaying() const {
@ -92,12 +65,6 @@ void RecordingScriptingInterface::startPlaying() {
return;
}
// Playback from the current position
if (_playFromCurrentLocation && _controlledAvatar) {
_dummyAvatar.setRecordingBasis(std::make_shared<Transform>(_controlledAvatar->getTransform()));
} else {
_dummyAvatar.clearRecordingBasis();
}
_player->play();
}
@ -176,12 +143,6 @@ void RecordingScriptingInterface::startRecording() {
return;
}
_recordingEpoch = Frame::epochForFrameTime(0);
if (_controlledAvatar) {
_controlledAvatar->setRecordingBasis();
}
_recorder->start();
}
@ -189,10 +150,6 @@ void RecordingScriptingInterface::stopRecording() {
_recorder->stop();
_lastClip = _recorder->getClip();
_lastClip->seek(0);
if (_controlledAvatar) {
_controlledAvatar->clearRecordingBasis();
}
}
void RecordingScriptingInterface::saveRecording(const QString& filename) {
@ -225,50 +182,3 @@ void RecordingScriptingInterface::loadLastRecording() {
_player->play();
}
void RecordingScriptingInterface::processAvatarFrame(const Frame::ConstPointer& frame) {
Q_ASSERT(QThread::currentThread() == thread());
if (!_controlledAvatar) {
return;
}
AvatarData::fromFrame(frame->data, _dummyAvatar);
if (_useHeadModel && _dummyAvatar.getFaceModelURL().isValid() &&
(_dummyAvatar.getFaceModelURL() != _controlledAvatar->getFaceModelURL())) {
// FIXME
//myAvatar->setFaceModelURL(_dummyAvatar.getFaceModelURL());
}
if (_useSkeletonModel && _dummyAvatar.getSkeletonModelURL().isValid() &&
(_dummyAvatar.getSkeletonModelURL() != _controlledAvatar->getSkeletonModelURL())) {
// FIXME
//myAvatar->useFullAvatarURL()
}
if (_useDisplayName && _dummyAvatar.getDisplayName() != _controlledAvatar->getDisplayName()) {
_controlledAvatar->setDisplayName(_dummyAvatar.getDisplayName());
}
_controlledAvatar->setPosition(_dummyAvatar.getPosition());
_controlledAvatar->setOrientation(_dummyAvatar.getOrientation());
// FIXME attachments
// FIXME joints
// FIXME head lean
// FIXME head orientation
}
void RecordingScriptingInterface::processAudioInput(const QByteArray& audio) {
if (_recorder->isRecording()) {
static const recording::FrameType AUDIO_FRAME_TYPE = recording::Frame::registerFrameType(AUDIO_FRAME_NAME);
_recorder->recordFrame(AUDIO_FRAME_TYPE, audio);
}
}
void RecordingScriptingInterface::processAudioFrame(const recording::FrameConstPointer& frame) {
// auto audioClient = DependencyManager::get<AudioClient>();
// audioClient->handleRecordedAudioInput(frame->data);
}

View file

@ -10,13 +10,13 @@
#define hifi_RecordingScriptingInterface_h
#include <atomic>
#include <mutex>
#include <QObject>
#include <DependencyManager.h>
#include <recording/Forward.h>
#include <recording/Frame.h>
#include <AvatarData.h>
class RecordingScriptingInterface : public QObject, public Dependency {
Q_OBJECT
@ -24,8 +24,6 @@ class RecordingScriptingInterface : public QObject, public Dependency {
public:
RecordingScriptingInterface();
void setControlledAvatar(AvatarData* avatar);
public slots:
void loadRecording(const QString& filename);
@ -41,12 +39,19 @@ public slots:
void setPlayerVolume(float volume);
void setPlayerAudioOffset(float audioOffset);
void setPlayerTime(float time);
void setPlayFromCurrentLocation(bool playFromCurrentLocation);
void setPlayerLoop(bool loop);
void setPlayerUseDisplayName(bool useDisplayName);
void setPlayerUseAttachments(bool useAttachments);
void setPlayerUseHeadModel(bool useHeadModel);
void setPlayerUseSkeletonModel(bool useSkeletonModel);
void setPlayFromCurrentLocation(bool playFromCurrentLocation);
bool getPlayerUseDisplayName() { return _useDisplayName; }
bool getPlayerUseAttachments() { return _useAttachments; }
bool getPlayerUseHeadModel() { return _useHeadModel; }
bool getPlayerUseSkeletonModel() { return _useSkeletonModel; }
bool getPlayFromCurrentLocation() { return _playFromCurrentLocation; }
void startRecording();
void stopRecording();
@ -57,22 +62,13 @@ public slots:
void saveRecording(const QString& filename);
void loadLastRecording();
signals:
void playbackStateChanged();
// Should this occur for any frame or just for seek calls?
void playbackPositionChanged();
void looped();
private:
protected:
using Mutex = std::recursive_mutex;
using Locker = std::unique_lock<Mutex>;
using Flag = std::atomic<bool>;
void processAvatarFrame(const recording::FrameConstPointer& frame);
void processAudioFrame(const recording::FrameConstPointer& frame);
void processAudioInput(const QByteArray& audioData);
QSharedPointer<recording::Deck> _player;
QSharedPointer<recording::Recorder> _recorder;
quint64 _recordingEpoch { 0 };
Flag _playFromCurrentLocation { true };
Flag _useDisplayName { false };
@ -80,8 +76,6 @@ private:
Flag _useAttachments { false };
Flag _useSkeletonModel { false };
recording::ClipPointer _lastClip;
AvatarData _dummyAvatar;
AvatarData* _controlledAvatar;
};
#endif // hifi_RecordingScriptingInterface_h

View file

@ -117,7 +117,7 @@ Transform Transform::fromJson(const QJsonValue& json) {
result.setTranslation(vec3FromJsonValue(obj[JSON_TRANSLATION]));
}
if (obj.contains(JSON_SCALE)) {
result.setScale(vec3FromJsonValue(obj[JSON_TRANSLATION]));
result.setScale(vec3FromJsonValue(obj[JSON_SCALE]));
}
return result;
}