mirror of
https://github.com/overte-org/overte.git
synced 2025-08-10 16:23:17 +02:00
Merging with todays upstream and checking
This commit is contained in:
commit
368f19b4e2
10 changed files with 180 additions and 151 deletions
|
@ -244,6 +244,8 @@ void Agent::setIsAvatar(bool isAvatar) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!_isAvatar) {
|
if (!_isAvatar) {
|
||||||
|
DependencyManager::get<RecordingScriptingInterface>()->setControlledAvatar(nullptr);
|
||||||
|
|
||||||
if (_avatarIdentityTimer) {
|
if (_avatarIdentityTimer) {
|
||||||
_avatarIdentityTimer->stop();
|
_avatarIdentityTimer->stop();
|
||||||
delete _avatarIdentityTimer;
|
delete _avatarIdentityTimer;
|
||||||
|
|
|
@ -841,6 +841,7 @@ void Application::cleanupBeforeQuit() {
|
||||||
#ifdef HAVE_IVIEWHMD
|
#ifdef HAVE_IVIEWHMD
|
||||||
DependencyManager::get<EyeTracker>()->setEnabled(false, true);
|
DependencyManager::get<EyeTracker>()->setEnabled(false, true);
|
||||||
#endif
|
#endif
|
||||||
|
DependencyManager::get<RecordingScriptingInterface>()->setControlledAvatar(nullptr);
|
||||||
|
|
||||||
AnimDebugDraw::getInstance().shutdown();
|
AnimDebugDraw::getInstance().shutdown();
|
||||||
|
|
||||||
|
|
|
@ -743,19 +743,9 @@ void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioClient::handleAudioInput() {
|
void AudioClient::handleAudioInput() {
|
||||||
if (!_audioPacket) {
|
|
||||||
// we don't have an audioPacket yet - set that up now
|
|
||||||
_audioPacket = NLPacket::create(PacketType::MicrophoneAudioNoEcho);
|
|
||||||
}
|
|
||||||
|
|
||||||
const float inputToNetworkInputRatio = calculateDeviceToNetworkInputRatio();
|
const float inputToNetworkInputRatio = calculateDeviceToNetworkInputRatio();
|
||||||
|
|
||||||
const int inputSamplesRequired = (int)((float)AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL * inputToNetworkInputRatio);
|
const int inputSamplesRequired = (int)((float)AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL * inputToNetworkInputRatio);
|
||||||
const auto inputAudioSamples = std::unique_ptr<int16_t[]>(new int16_t[inputSamplesRequired]);
|
const auto inputAudioSamples = std::unique_ptr<int16_t[]>(new int16_t[inputSamplesRequired]);
|
||||||
|
|
||||||
static const int leadingBytes = sizeof(quint16) + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
|
|
||||||
int16_t* const networkAudioSamples = (int16_t*)(_audioPacket->getPayload() + leadingBytes);
|
|
||||||
|
|
||||||
QByteArray inputByteArray = _inputDevice->readAll();
|
QByteArray inputByteArray = _inputDevice->readAll();
|
||||||
|
|
||||||
// Add audio source injection if enabled
|
// Add audio source injection if enabled
|
||||||
|
@ -784,30 +774,30 @@ void AudioClient::handleAudioInput() {
|
||||||
float audioInputMsecsRead = inputByteArray.size() / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
|
float audioInputMsecsRead = inputByteArray.size() / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
|
||||||
_stats.updateInputMsecsRead(audioInputMsecsRead);
|
_stats.updateInputMsecsRead(audioInputMsecsRead);
|
||||||
|
|
||||||
while (_inputRingBuffer.samplesAvailable() >= inputSamplesRequired) {
|
const int numNetworkBytes = _isStereoInput
|
||||||
|
? AudioConstants::NETWORK_FRAME_BYTES_STEREO
|
||||||
|
: AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
|
||||||
|
const int numNetworkSamples = _isStereoInput
|
||||||
|
? AudioConstants::NETWORK_FRAME_SAMPLES_STEREO
|
||||||
|
: AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL;
|
||||||
|
|
||||||
const int numNetworkBytes = _isStereoInput
|
static int16_t networkAudioSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
|
||||||
? AudioConstants::NETWORK_FRAME_BYTES_STEREO
|
|
||||||
: AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
|
while (_inputRingBuffer.samplesAvailable() >= inputSamplesRequired) {
|
||||||
const int numNetworkSamples = _isStereoInput
|
|
||||||
? AudioConstants::NETWORK_FRAME_SAMPLES_STEREO
|
|
||||||
: AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL;
|
|
||||||
|
|
||||||
if (!_muted) {
|
if (!_muted) {
|
||||||
|
|
||||||
// zero out the monoAudioSamples array and the locally injected audio
|
|
||||||
memset(networkAudioSamples, 0, numNetworkBytes);
|
|
||||||
|
|
||||||
// Increment the time since the last clip
|
// Increment the time since the last clip
|
||||||
if (_timeSinceLastClip >= 0.0f) {
|
if (_timeSinceLastClip >= 0.0f) {
|
||||||
_timeSinceLastClip += (float) numNetworkSamples / (float) AudioConstants::SAMPLE_RATE;
|
_timeSinceLastClip += (float)numNetworkSamples / (float)AudioConstants::SAMPLE_RATE;
|
||||||
}
|
}
|
||||||
|
|
||||||
_inputRingBuffer.readSamples(inputAudioSamples.get(), inputSamplesRequired);
|
_inputRingBuffer.readSamples(inputAudioSamples.get(), inputSamplesRequired);
|
||||||
possibleResampling(_inputToNetworkResampler,
|
possibleResampling(_inputToNetworkResampler,
|
||||||
inputAudioSamples.get(), networkAudioSamples,
|
inputAudioSamples.get(), networkAudioSamples,
|
||||||
inputSamplesRequired, numNetworkSamples,
|
inputSamplesRequired, numNetworkSamples,
|
||||||
_inputFormat, _desiredInputFormat);
|
_inputFormat, _desiredInputFormat);
|
||||||
|
|
||||||
// Remove DC offset
|
// Remove DC offset
|
||||||
if (!_isStereoInput && !_audioSourceInjectEnabled) {
|
if (!_isStereoInput && !_audioSourceInjectEnabled) {
|
||||||
|
@ -829,7 +819,7 @@ void AudioClient::handleAudioInput() {
|
||||||
|
|
||||||
for (int i = 0; i < numNetworkSamples; i++) {
|
for (int i = 0; i < numNetworkSamples; i++) {
|
||||||
int thisSample = std::abs(networkAudioSamples[i]);
|
int thisSample = std::abs(networkAudioSamples[i]);
|
||||||
loudness += (float) thisSample;
|
loudness += (float)thisSample;
|
||||||
|
|
||||||
if (thisSample > (AudioConstants::MAX_SAMPLE_VALUE * AudioNoiseGate::CLIPPING_THRESHOLD)) {
|
if (thisSample > (AudioConstants::MAX_SAMPLE_VALUE * AudioNoiseGate::CLIPPING_THRESHOLD)) {
|
||||||
_timeSinceLastClip = 0.0f;
|
_timeSinceLastClip = 0.0f;
|
||||||
|
@ -839,7 +829,7 @@ void AudioClient::handleAudioInput() {
|
||||||
_lastInputLoudness = fabs(loudness / numNetworkSamples);
|
_lastInputLoudness = fabs(loudness / numNetworkSamples);
|
||||||
}
|
}
|
||||||
|
|
||||||
emit inputReceived({reinterpret_cast<char*>(networkAudioSamples), numNetworkBytes});
|
emit inputReceived({ reinterpret_cast<char*>(networkAudioSamples), numNetworkBytes });
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// our input loudness is 0, since we're muted
|
// our input loudness is 0, since we're muted
|
||||||
|
@ -849,14 +839,38 @@ void AudioClient::handleAudioInput() {
|
||||||
_inputRingBuffer.shiftReadPosition(inputSamplesRequired);
|
_inputRingBuffer.shiftReadPosition(inputSamplesRequired);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto nodeList = DependencyManager::get<NodeList>();
|
emitAudioPacket(networkAudioSamples);
|
||||||
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (audioMixer && audioMixer->getActiveSocket()) {
|
void AudioClient::emitAudioPacket(const int16_t* audioData, PacketType packetType) {
|
||||||
glm::vec3 headPosition = _positionGetter();
|
static std::mutex _mutex;
|
||||||
glm::quat headOrientation = _orientationGetter();
|
using Locker = std::unique_lock<std::mutex>;
|
||||||
quint8 isStereo = _isStereoInput ? 1 : 0;
|
|
||||||
|
|
||||||
|
// FIXME recorded audio isn't guaranteed to have the same stereo state
|
||||||
|
// as the current system
|
||||||
|
const int numNetworkBytes = _isStereoInput
|
||||||
|
? AudioConstants::NETWORK_FRAME_BYTES_STEREO
|
||||||
|
: AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
|
||||||
|
const int numNetworkSamples = _isStereoInput
|
||||||
|
? AudioConstants::NETWORK_FRAME_SAMPLES_STEREO
|
||||||
|
: AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL;
|
||||||
|
|
||||||
|
auto nodeList = DependencyManager::get<NodeList>();
|
||||||
|
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
|
||||||
|
|
||||||
|
if (audioMixer && audioMixer->getActiveSocket()) {
|
||||||
|
Locker lock(_mutex);
|
||||||
|
if (!_audioPacket) {
|
||||||
|
// we don't have an audioPacket yet - set that up now
|
||||||
|
_audioPacket = NLPacket::create(PacketType::MicrophoneAudioWithEcho);
|
||||||
|
}
|
||||||
|
|
||||||
|
glm::vec3 headPosition = _positionGetter();
|
||||||
|
glm::quat headOrientation = _orientationGetter();
|
||||||
|
quint8 isStereo = _isStereoInput ? 1 : 0;
|
||||||
|
|
||||||
|
if (packetType == PacketType::Unknown) {
|
||||||
if (_lastInputLoudness == 0) {
|
if (_lastInputLoudness == 0) {
|
||||||
_audioPacket->setType(PacketType::SilentAudioFrame);
|
_audioPacket->setType(PacketType::SilentAudioFrame);
|
||||||
} else {
|
} else {
|
||||||
|
@ -866,70 +880,52 @@ void AudioClient::handleAudioInput() {
|
||||||
_audioPacket->setType(PacketType::MicrophoneAudioNoEcho);
|
_audioPacket->setType(PacketType::MicrophoneAudioNoEcho);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
// reset the audio packet so we can start writing
|
_audioPacket->setType(packetType);
|
||||||
_audioPacket->reset();
|
|
||||||
|
|
||||||
// write sequence number
|
|
||||||
_audioPacket->writePrimitive(_outgoingAvatarAudioSequenceNumber);
|
|
||||||
|
|
||||||
if (_audioPacket->getType() == PacketType::SilentAudioFrame) {
|
|
||||||
// pack num silent samples
|
|
||||||
quint16 numSilentSamples = numNetworkSamples;
|
|
||||||
_audioPacket->writePrimitive(numSilentSamples);
|
|
||||||
} else {
|
|
||||||
// set the mono/stereo byte
|
|
||||||
_audioPacket->writePrimitive(isStereo);
|
|
||||||
}
|
|
||||||
|
|
||||||
// pack the three float positions
|
|
||||||
_audioPacket->writePrimitive(headPosition);
|
|
||||||
|
|
||||||
// pack the orientation
|
|
||||||
_audioPacket->writePrimitive(headOrientation);
|
|
||||||
|
|
||||||
if (_audioPacket->getType() != PacketType::SilentAudioFrame) {
|
|
||||||
// audio samples have already been packed (written to networkAudioSamples)
|
|
||||||
_audioPacket->setPayloadSize(_audioPacket->getPayloadSize() + numNetworkBytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
_stats.sentPacket();
|
|
||||||
|
|
||||||
nodeList->flagTimeForConnectionStep(LimitedNodeList::ConnectionStep::SendAudioPacket);
|
|
||||||
|
|
||||||
nodeList->sendUnreliablePacket(*_audioPacket, *audioMixer);
|
|
||||||
|
|
||||||
_outgoingAvatarAudioSequenceNumber++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// reset the audio packet so we can start writing
|
||||||
|
_audioPacket->reset();
|
||||||
|
|
||||||
|
// write sequence number
|
||||||
|
_audioPacket->writePrimitive(_outgoingAvatarAudioSequenceNumber);
|
||||||
|
|
||||||
|
if (_audioPacket->getType() == PacketType::SilentAudioFrame) {
|
||||||
|
// pack num silent samples
|
||||||
|
quint16 numSilentSamples = numNetworkSamples;
|
||||||
|
_audioPacket->writePrimitive(numSilentSamples);
|
||||||
|
} else {
|
||||||
|
// set the mono/stereo byte
|
||||||
|
_audioPacket->writePrimitive(isStereo);
|
||||||
|
}
|
||||||
|
|
||||||
|
// pack the three float positions
|
||||||
|
_audioPacket->writePrimitive(headPosition);
|
||||||
|
|
||||||
|
// pack the orientation
|
||||||
|
_audioPacket->writePrimitive(headOrientation);
|
||||||
|
|
||||||
|
if (_audioPacket->getType() != PacketType::SilentAudioFrame) {
|
||||||
|
// audio samples have already been packed (written to networkAudioSamples)
|
||||||
|
_audioPacket->setPayloadSize(_audioPacket->getPayloadSize() + numNetworkBytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const int leadingBytes = sizeof(quint16) + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
|
||||||
|
int16_t* const networkAudioSamples = (int16_t*)(_audioPacket->getPayload() + leadingBytes);
|
||||||
|
memcpy(networkAudioSamples, audioData, numNetworkBytes);
|
||||||
|
|
||||||
|
_stats.sentPacket();
|
||||||
|
|
||||||
|
nodeList->flagTimeForConnectionStep(LimitedNodeList::ConnectionStep::SendAudioPacket);
|
||||||
|
|
||||||
|
nodeList->sendUnreliablePacket(*_audioPacket, *audioMixer);
|
||||||
|
|
||||||
|
_outgoingAvatarAudioSequenceNumber++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioClient::handleRecordedAudioInput(const QByteArray& audio) {
|
void AudioClient::handleRecordedAudioInput(const QByteArray& audio) {
|
||||||
if (!_audioPacket) {
|
emitAudioPacket((int16_t*)audio.data(), PacketType::MicrophoneAudioWithEcho);
|
||||||
// we don't have an audioPacket yet - set that up now
|
|
||||||
_audioPacket = NLPacket::create(PacketType::MicrophoneAudioWithEcho);
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME either discard stereo in the recording or record a stereo flag
|
|
||||||
|
|
||||||
auto nodeList = DependencyManager::get<NodeList>();
|
|
||||||
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
|
|
||||||
if (audioMixer && audioMixer->getActiveSocket()) {
|
|
||||||
glm::vec3 headPosition = _positionGetter();
|
|
||||||
glm::quat headOrientation = _orientationGetter();
|
|
||||||
quint8 isStereo = _isStereoInput ? 1 : 0;
|
|
||||||
_audioPacket->reset();
|
|
||||||
_audioPacket->setType(PacketType::MicrophoneAudioWithEcho);
|
|
||||||
_audioPacket->writePrimitive(_outgoingAvatarAudioSequenceNumber);
|
|
||||||
_audioPacket->writePrimitive(isStereo);
|
|
||||||
_audioPacket->writePrimitive(headPosition);
|
|
||||||
_audioPacket->writePrimitive(headOrientation);
|
|
||||||
_audioPacket->write(audio);
|
|
||||||
_stats.sentPacket();
|
|
||||||
nodeList->flagTimeForConnectionStep(LimitedNodeList::ConnectionStep::SendAudioPacket);
|
|
||||||
nodeList->sendUnreliablePacket(*_audioPacket, *audioMixer);
|
|
||||||
_outgoingAvatarAudioSequenceNumber++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioClient::processReceivedSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer) {
|
void AudioClient::processReceivedSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer) {
|
||||||
|
|
|
@ -212,6 +212,7 @@ protected:
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
void emitAudioPacket(const int16_t* audioData, PacketType packetType = PacketType::Unknown);
|
||||||
void outputFormatChanged();
|
void outputFormatChanged();
|
||||||
|
|
||||||
QByteArray firstInputFrame;
|
QByteArray firstInputFrame;
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
set(TARGET_NAME avatars)
|
set(TARGET_NAME avatars)
|
||||||
setup_hifi_library(Network Script)
|
setup_hifi_library(Network Script)
|
||||||
link_hifi_libraries(audio shared networking recording)
|
link_hifi_libraries(shared networking)
|
||||||
|
|
|
@ -33,7 +33,6 @@
|
||||||
#include <StreamUtils.h>
|
#include <StreamUtils.h>
|
||||||
#include <UUID.h>
|
#include <UUID.h>
|
||||||
#include <shared/JSONHelpers.h>
|
#include <shared/JSONHelpers.h>
|
||||||
#include <recording/Frame.h>
|
|
||||||
|
|
||||||
#include "AvatarLogging.h"
|
#include "AvatarLogging.h"
|
||||||
|
|
||||||
|
@ -1443,14 +1442,10 @@ QByteArray AvatarData::toFrame(const AvatarData& avatar) {
|
||||||
|
|
||||||
auto recordingBasis = avatar.getRecordingBasis();
|
auto recordingBasis = avatar.getRecordingBasis();
|
||||||
if (recordingBasis) {
|
if (recordingBasis) {
|
||||||
|
root[JSON_AVATAR_BASIS] = Transform::toJson(*recordingBasis);
|
||||||
// Find the relative transform
|
// Find the relative transform
|
||||||
auto relativeTransform = recordingBasis->relativeTransform(avatar.getTransform());
|
auto relativeTransform = recordingBasis->relativeTransform(avatar.getTransform());
|
||||||
|
root[JSON_AVATAR_RELATIVE] = Transform::toJson(relativeTransform);
|
||||||
// if the resulting relative basis is identity, we shouldn't record anything
|
|
||||||
if (!relativeTransform.isIdentity()) {
|
|
||||||
root[JSON_AVATAR_RELATIVE] = Transform::toJson(relativeTransform);
|
|
||||||
root[JSON_AVATAR_BASIS] = Transform::toJson(*recordingBasis);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
root[JSON_AVATAR_RELATIVE] = Transform::toJson(avatar.getTransform());
|
root[JSON_AVATAR_RELATIVE] = Transform::toJson(avatar.getTransform());
|
||||||
}
|
}
|
||||||
|
@ -1484,6 +1479,9 @@ QByteArray AvatarData::toFrame(const AvatarData& avatar) {
|
||||||
|
|
||||||
void AvatarData::fromFrame(const QByteArray& frameData, AvatarData& result) {
|
void AvatarData::fromFrame(const QByteArray& frameData, AvatarData& result) {
|
||||||
QJsonDocument doc = QJsonDocument::fromBinaryData(frameData);
|
QJsonDocument doc = QJsonDocument::fromBinaryData(frameData);
|
||||||
|
#ifdef WANT_JSON_DEBUG
|
||||||
|
qDebug() << doc.toJson(QJsonDocument::JsonFormat::Indented);
|
||||||
|
#endif
|
||||||
QJsonObject root = doc.object();
|
QJsonObject root = doc.object();
|
||||||
|
|
||||||
if (root.contains(JSON_AVATAR_HEAD_MODEL)) {
|
if (root.contains(JSON_AVATAR_HEAD_MODEL)) {
|
||||||
|
|
|
@ -50,7 +50,6 @@ typedef unsigned long long quint64;
|
||||||
#include <Node.h>
|
#include <Node.h>
|
||||||
#include <RegisteredMetaTypes.h>
|
#include <RegisteredMetaTypes.h>
|
||||||
#include <SimpleMovingAverage.h>
|
#include <SimpleMovingAverage.h>
|
||||||
#include <recording/Forward.h>
|
|
||||||
|
|
||||||
#include "AABox.h"
|
#include "AABox.h"
|
||||||
#include "HandData.h"
|
#include "HandData.h"
|
||||||
|
|
|
@ -8,6 +8,8 @@
|
||||||
|
|
||||||
#include "Deck.h"
|
#include "Deck.h"
|
||||||
|
|
||||||
|
#include <QtCore/QThread>
|
||||||
|
|
||||||
#include <NumericalConstants.h>
|
#include <NumericalConstants.h>
|
||||||
#include <SharedUtil.h>
|
#include <SharedUtil.h>
|
||||||
|
|
||||||
|
@ -101,9 +103,13 @@ float Deck::position() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
static const Frame::Time MIN_FRAME_WAIT_INTERVAL = Frame::secondsToFrameTime(0.001f);
|
static const Frame::Time MIN_FRAME_WAIT_INTERVAL = Frame::secondsToFrameTime(0.001f);
|
||||||
static const Frame::Time MAX_FRAME_PROCESSING_TIME = Frame::secondsToFrameTime(0.002f);
|
static const Frame::Time MAX_FRAME_PROCESSING_TIME = Frame::secondsToFrameTime(0.004f);
|
||||||
|
|
||||||
void Deck::processFrames() {
|
void Deck::processFrames() {
|
||||||
|
if (qApp->thread() != QThread::currentThread()) {
|
||||||
|
qWarning() << "Processing frames must only happen on the main thread.";
|
||||||
|
return;
|
||||||
|
}
|
||||||
Locker lock(_mutex);
|
Locker lock(_mutex);
|
||||||
if (_pause) {
|
if (_pause) {
|
||||||
return;
|
return;
|
||||||
|
@ -115,10 +121,17 @@ void Deck::processFrames() {
|
||||||
// FIXME add code to start dropping frames if we fall behind.
|
// FIXME add code to start dropping frames if we fall behind.
|
||||||
// Alternatively, add code to cache frames here and then process only the last frame of a given type
|
// Alternatively, add code to cache frames here and then process only the last frame of a given type
|
||||||
// ... the latter will work for Avatar, but not well for audio I suspect.
|
// ... the latter will work for Avatar, but not well for audio I suspect.
|
||||||
|
bool overLimit = false;
|
||||||
for (nextClip = getNextClip(); nextClip; nextClip = getNextClip()) {
|
for (nextClip = getNextClip(); nextClip; nextClip = getNextClip()) {
|
||||||
auto currentPosition = Frame::frameTimeFromEpoch(_startEpoch);
|
auto currentPosition = Frame::frameTimeFromEpoch(_startEpoch);
|
||||||
if ((currentPosition - startingPosition) >= MAX_FRAME_PROCESSING_TIME) {
|
if ((currentPosition - startingPosition) >= MAX_FRAME_PROCESSING_TIME) {
|
||||||
qCWarning(recordingLog) << "Exceeded maximum frame processing time, breaking early";
|
qCWarning(recordingLog) << "Exceeded maximum frame processing time, breaking early";
|
||||||
|
#ifdef WANT_RECORDING_DEBUG
|
||||||
|
qCDebug(recordingLog) << "Starting: " << currentPosition;
|
||||||
|
qCDebug(recordingLog) << "Current: " << startingPosition;
|
||||||
|
qCDebug(recordingLog) << "Trigger: " << triggerPosition;
|
||||||
|
#endif
|
||||||
|
overLimit = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -150,9 +163,19 @@ void Deck::processFrames() {
|
||||||
|
|
||||||
// If we have more clip frames available, set the timer for the next one
|
// If we have more clip frames available, set the timer for the next one
|
||||||
_position = Frame::frameTimeFromEpoch(_startEpoch);
|
_position = Frame::frameTimeFromEpoch(_startEpoch);
|
||||||
auto nextFrameTime = nextClip->positionFrameTime();
|
int nextInterval = 1;
|
||||||
auto interval = Frame::frameTimeToMilliseconds(nextFrameTime - _position);
|
if (!overLimit) {
|
||||||
_timer.singleShot(interval, [this] {
|
auto nextFrameTime = nextClip->positionFrameTime();
|
||||||
|
nextInterval = (int)Frame::frameTimeToMilliseconds(nextFrameTime - _position);
|
||||||
|
#ifdef WANT_RECORDING_DEBUG
|
||||||
|
qCDebug(recordingLog) << "Now " << _position;
|
||||||
|
qCDebug(recordingLog) << "Next frame time " << nextInterval;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
#ifdef WANT_RECORDING_DEBUG
|
||||||
|
qCDebug(recordingLog) << "Setting timer for next processing " << nextInterval;
|
||||||
|
#endif
|
||||||
|
_timer.singleShot(nextInterval, [this] {
|
||||||
processFrames();
|
processFrames();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,11 +15,11 @@
|
||||||
#include <recording/Clip.h>
|
#include <recording/Clip.h>
|
||||||
#include <recording/Frame.h>
|
#include <recording/Frame.h>
|
||||||
#include <NumericalConstants.h>
|
#include <NumericalConstants.h>
|
||||||
|
// FiXME
|
||||||
//#include <AudioClient.h>
|
//#include <AudioClient.h>
|
||||||
#include <AudioConstants.h>
|
#include <AudioConstants.h>
|
||||||
#include <Transform.h>
|
#include <Transform.h>
|
||||||
//#include "avatar/AvatarManager.h"
|
|
||||||
//#include "Application.h"
|
|
||||||
#include "ScriptEngineLogging.h"
|
#include "ScriptEngineLogging.h"
|
||||||
|
|
||||||
typedef int16_t AudioSample;
|
typedef int16_t AudioSample;
|
||||||
|
@ -45,6 +45,7 @@ RecordingScriptingInterface::RecordingScriptingInterface() {
|
||||||
_player = DependencyManager::get<Deck>();
|
_player = DependencyManager::get<Deck>();
|
||||||
_recorder = DependencyManager::get<Recorder>();
|
_recorder = DependencyManager::get<Recorder>();
|
||||||
|
|
||||||
|
// FIXME : Disabling Sound
|
||||||
// auto audioClient = DependencyManager::get<AudioClient>();
|
// auto audioClient = DependencyManager::get<AudioClient>();
|
||||||
// connect(audioClient.data(), &AudioClient::inputReceived, this, &RecordingScriptingInterface::processAudioInput);
|
// connect(audioClient.data(), &AudioClient::inputReceived, this, &RecordingScriptingInterface::processAudioInput);
|
||||||
}
|
}
|
||||||
|
@ -53,19 +54,19 @@ void RecordingScriptingInterface::setControlledAvatar(AvatarData* avatar) {
|
||||||
_controlledAvatar = avatar;
|
_controlledAvatar = avatar;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool RecordingScriptingInterface::isPlaying() {
|
bool RecordingScriptingInterface::isPlaying() const {
|
||||||
return _player->isPlaying();
|
return _player->isPlaying();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool RecordingScriptingInterface::isPaused() {
|
bool RecordingScriptingInterface::isPaused() const {
|
||||||
return _player->isPaused();
|
return _player->isPaused();
|
||||||
}
|
}
|
||||||
|
|
||||||
float RecordingScriptingInterface::playerElapsed() {
|
float RecordingScriptingInterface::playerElapsed() const {
|
||||||
return _player->position();
|
return _player->position();
|
||||||
}
|
}
|
||||||
|
|
||||||
float RecordingScriptingInterface::playerLength() {
|
float RecordingScriptingInterface::playerLength() const {
|
||||||
return _player->length();
|
return _player->length();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -90,11 +91,10 @@ void RecordingScriptingInterface::startPlaying() {
|
||||||
QMetaObject::invokeMethod(this, "startPlaying", Qt::BlockingQueuedConnection);
|
QMetaObject::invokeMethod(this, "startPlaying", Qt::BlockingQueuedConnection);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
//auto myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar();
|
|
||||||
auto myAvatar = _controlledAvatar;
|
|
||||||
// Playback from the current position
|
// Playback from the current position
|
||||||
if (_playFromCurrentLocation) {
|
if (_playFromCurrentLocation && _controlledAvatar) {
|
||||||
_dummyAvatar.setRecordingBasis(std::make_shared<Transform>(myAvatar->getTransform()));
|
_dummyAvatar.setRecordingBasis(std::make_shared<Transform>(_controlledAvatar->getTransform()));
|
||||||
} else {
|
} else {
|
||||||
_dummyAvatar.clearRecordingBasis();
|
_dummyAvatar.clearRecordingBasis();
|
||||||
}
|
}
|
||||||
|
@ -110,6 +110,10 @@ void RecordingScriptingInterface::setPlayerAudioOffset(float audioOffset) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void RecordingScriptingInterface::setPlayerTime(float time) {
|
void RecordingScriptingInterface::setPlayerTime(float time) {
|
||||||
|
if (QThread::currentThread() != thread()) {
|
||||||
|
QMetaObject::invokeMethod(this, "setPlayerTime", Qt::BlockingQueuedConnection, Q_ARG(float, time));
|
||||||
|
return;
|
||||||
|
}
|
||||||
_player->seek(time);
|
_player->seek(time);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,23 +141,27 @@ void RecordingScriptingInterface::setPlayerUseSkeletonModel(bool useSkeletonMode
|
||||||
_useSkeletonModel = useSkeletonModel;
|
_useSkeletonModel = useSkeletonModel;
|
||||||
}
|
}
|
||||||
|
|
||||||
void RecordingScriptingInterface::play() {
|
|
||||||
_player->play();
|
|
||||||
}
|
|
||||||
|
|
||||||
void RecordingScriptingInterface::pausePlayer() {
|
void RecordingScriptingInterface::pausePlayer() {
|
||||||
|
if (QThread::currentThread() != thread()) {
|
||||||
|
QMetaObject::invokeMethod(this, "pausePlayer", Qt::BlockingQueuedConnection);
|
||||||
|
return;
|
||||||
|
}
|
||||||
_player->pause();
|
_player->pause();
|
||||||
}
|
}
|
||||||
|
|
||||||
void RecordingScriptingInterface::stopPlaying() {
|
void RecordingScriptingInterface::stopPlaying() {
|
||||||
|
if (QThread::currentThread() != thread()) {
|
||||||
|
QMetaObject::invokeMethod(this, "stopPlaying", Qt::BlockingQueuedConnection);
|
||||||
|
return;
|
||||||
|
}
|
||||||
_player->stop();
|
_player->stop();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool RecordingScriptingInterface::isRecording() {
|
bool RecordingScriptingInterface::isRecording() const {
|
||||||
return _recorder->isRecording();
|
return _recorder->isRecording();
|
||||||
}
|
}
|
||||||
|
|
||||||
float RecordingScriptingInterface::recorderElapsed() {
|
float RecordingScriptingInterface::recorderElapsed() const {
|
||||||
return _recorder->position();
|
return _recorder->position();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -170,29 +178,21 @@ void RecordingScriptingInterface::startRecording() {
|
||||||
|
|
||||||
_recordingEpoch = Frame::epochForFrameTime(0);
|
_recordingEpoch = Frame::epochForFrameTime(0);
|
||||||
|
|
||||||
//auto myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar();
|
if (_controlledAvatar) {
|
||||||
//auto myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar();
|
_controlledAvatar->setRecordingBasis();
|
||||||
auto myAvatar = _controlledAvatar;
|
}
|
||||||
myAvatar->setRecordingBasis();
|
|
||||||
_recorder->start();
|
_recorder->start();
|
||||||
}
|
}
|
||||||
|
|
||||||
void RecordingScriptingInterface::stopRecording() {
|
void RecordingScriptingInterface::stopRecording() {
|
||||||
_recorder->stop();
|
_recorder->stop();
|
||||||
|
|
||||||
_lastClip = _recorder->getClip();
|
_lastClip = _recorder->getClip();
|
||||||
// post-process the audio into discreet chunks based on times of received samples
|
|
||||||
_lastClip->seek(0);
|
|
||||||
Frame::ConstPointer frame;
|
|
||||||
while (frame = _lastClip->nextFrame()) {
|
|
||||||
qDebug() << "Frame time " << frame->timeOffset << " size " << frame->data.size();
|
|
||||||
}
|
|
||||||
_lastClip->seek(0);
|
_lastClip->seek(0);
|
||||||
|
|
||||||
//auto myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar();
|
if (_controlledAvatar) {
|
||||||
//auto myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar();
|
_controlledAvatar->clearRecordingBasis();
|
||||||
auto myAvatar = _controlledAvatar;
|
}
|
||||||
myAvatar->clearRecordingBasis();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void RecordingScriptingInterface::saveRecording(const QString& filename) {
|
void RecordingScriptingInterface::saveRecording(const QString& filename) {
|
||||||
|
@ -228,28 +228,32 @@ void RecordingScriptingInterface::loadLastRecording() {
|
||||||
void RecordingScriptingInterface::processAvatarFrame(const Frame::ConstPointer& frame) {
|
void RecordingScriptingInterface::processAvatarFrame(const Frame::ConstPointer& frame) {
|
||||||
Q_ASSERT(QThread::currentThread() == thread());
|
Q_ASSERT(QThread::currentThread() == thread());
|
||||||
|
|
||||||
|
if (!_controlledAvatar) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
AvatarData::fromFrame(frame->data, _dummyAvatar);
|
AvatarData::fromFrame(frame->data, _dummyAvatar);
|
||||||
|
|
||||||
//auto myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar();
|
|
||||||
auto myAvatar = _controlledAvatar;
|
|
||||||
if (_useHeadModel && _dummyAvatar.getFaceModelURL().isValid() &&
|
if (_useHeadModel && _dummyAvatar.getFaceModelURL().isValid() &&
|
||||||
(_dummyAvatar.getFaceModelURL() != myAvatar->getFaceModelURL())) {
|
(_dummyAvatar.getFaceModelURL() != _controlledAvatar->getFaceModelURL())) {
|
||||||
// FIXME
|
// FIXME
|
||||||
//myAvatar->setFaceModelURL(_dummyAvatar.getFaceModelURL());
|
//myAvatar->setFaceModelURL(_dummyAvatar.getFaceModelURL());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_useSkeletonModel && _dummyAvatar.getSkeletonModelURL().isValid() &&
|
if (_useSkeletonModel && _dummyAvatar.getSkeletonModelURL().isValid() &&
|
||||||
(_dummyAvatar.getSkeletonModelURL() != myAvatar->getSkeletonModelURL())) {
|
(_dummyAvatar.getSkeletonModelURL() != _controlledAvatar->getSkeletonModelURL())) {
|
||||||
// FIXME
|
// FIXME
|
||||||
//myAvatar->useFullAvatarURL()
|
//myAvatar->useFullAvatarURL()
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_useDisplayName && _dummyAvatar.getDisplayName() != myAvatar->getDisplayName()) {
|
if (_useDisplayName && _dummyAvatar.getDisplayName() != _controlledAvatar->getDisplayName()) {
|
||||||
myAvatar->setDisplayName(_dummyAvatar.getDisplayName());
|
_controlledAvatar->setDisplayName(_dummyAvatar.getDisplayName());
|
||||||
}
|
}
|
||||||
|
|
||||||
myAvatar->setPosition(_dummyAvatar.getPosition());
|
_controlledAvatar->setPosition(_dummyAvatar.getPosition());
|
||||||
myAvatar->setOrientation(_dummyAvatar.getOrientation());
|
_controlledAvatar->setOrientation(_dummyAvatar.getOrientation());
|
||||||
|
|
||||||
// FIXME attachments
|
// FIXME attachments
|
||||||
// FIXME joints
|
// FIXME joints
|
||||||
|
|
|
@ -27,12 +27,17 @@ public:
|
||||||
void setControlledAvatar(AvatarData* avatar);
|
void setControlledAvatar(AvatarData* avatar);
|
||||||
|
|
||||||
public slots:
|
public slots:
|
||||||
bool isPlaying();
|
|
||||||
bool isPaused();
|
|
||||||
float playerElapsed();
|
|
||||||
float playerLength();
|
|
||||||
void loadRecording(const QString& filename);
|
void loadRecording(const QString& filename);
|
||||||
|
|
||||||
void startPlaying();
|
void startPlaying();
|
||||||
|
void pausePlayer();
|
||||||
|
void stopPlaying();
|
||||||
|
bool isPlaying() const;
|
||||||
|
bool isPaused() const;
|
||||||
|
|
||||||
|
float playerElapsed() const;
|
||||||
|
float playerLength() const;
|
||||||
|
|
||||||
void setPlayerVolume(float volume);
|
void setPlayerVolume(float volume);
|
||||||
void setPlayerAudioOffset(float audioOffset);
|
void setPlayerAudioOffset(float audioOffset);
|
||||||
void setPlayerTime(float time);
|
void setPlayerTime(float time);
|
||||||
|
@ -42,13 +47,13 @@ public slots:
|
||||||
void setPlayerUseAttachments(bool useAttachments);
|
void setPlayerUseAttachments(bool useAttachments);
|
||||||
void setPlayerUseHeadModel(bool useHeadModel);
|
void setPlayerUseHeadModel(bool useHeadModel);
|
||||||
void setPlayerUseSkeletonModel(bool useSkeletonModel);
|
void setPlayerUseSkeletonModel(bool useSkeletonModel);
|
||||||
void play();
|
|
||||||
void pausePlayer();
|
|
||||||
void stopPlaying();
|
|
||||||
bool isRecording();
|
|
||||||
float recorderElapsed();
|
|
||||||
void startRecording();
|
void startRecording();
|
||||||
void stopRecording();
|
void stopRecording();
|
||||||
|
bool isRecording() const;
|
||||||
|
|
||||||
|
float recorderElapsed() const;
|
||||||
|
|
||||||
void saveRecording(const QString& filename);
|
void saveRecording(const QString& filename);
|
||||||
void loadLastRecording();
|
void loadLastRecording();
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue