mirror of
https://github.com/overte-org/overte.git
synced 2025-08-10 00:57:24 +02:00
Recording fixes
This commit is contained in:
parent
2b07daa976
commit
900f425f35
4 changed files with 94 additions and 109 deletions
|
@ -162,26 +162,15 @@ void RecordingScriptingInterface::startRecording() {
|
|||
}
|
||||
|
||||
_recordingEpoch = Frame::epochForFrameTime(0);
|
||||
|
||||
auto myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar();
|
||||
myAvatar->setRecordingBasis();
|
||||
DependencyManager::get<AvatarManager>()->getMyAvatar()->setRecordingBasis();
|
||||
_recorder->start();
|
||||
}
|
||||
|
||||
void RecordingScriptingInterface::stopRecording() {
|
||||
_recorder->stop();
|
||||
|
||||
_lastClip = _recorder->getClip();
|
||||
// post-process the audio into discreet chunks based on times of received samples
|
||||
_lastClip->seek(0);
|
||||
Frame::ConstPointer frame;
|
||||
while (frame = _lastClip->nextFrame()) {
|
||||
qDebug() << "Frame time " << frame->timeOffset << " size " << frame->data.size();
|
||||
}
|
||||
_lastClip->seek(0);
|
||||
|
||||
auto myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar();
|
||||
myAvatar->clearRecordingBasis();
|
||||
DependencyManager::get<AvatarManager>()->getMyAvatar()->clearRecordingBasis();
|
||||
}
|
||||
|
||||
void RecordingScriptingInterface::saveRecording(const QString& filename) {
|
||||
|
|
|
@ -743,19 +743,9 @@ void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) {
|
|||
}
|
||||
|
||||
void AudioClient::handleAudioInput() {
|
||||
if (!_audioPacket) {
|
||||
// we don't have an audioPacket yet - set that up now
|
||||
_audioPacket = NLPacket::create(PacketType::MicrophoneAudioNoEcho);
|
||||
}
|
||||
|
||||
const float inputToNetworkInputRatio = calculateDeviceToNetworkInputRatio();
|
||||
|
||||
const int inputSamplesRequired = (int)((float)AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL * inputToNetworkInputRatio);
|
||||
const auto inputAudioSamples = std::unique_ptr<int16_t[]>(new int16_t[inputSamplesRequired]);
|
||||
|
||||
static const int leadingBytes = sizeof(quint16) + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
|
||||
int16_t* const networkAudioSamples = (int16_t*)(_audioPacket->getPayload() + leadingBytes);
|
||||
|
||||
QByteArray inputByteArray = _inputDevice->readAll();
|
||||
|
||||
// Add audio source injection if enabled
|
||||
|
@ -784,8 +774,6 @@ void AudioClient::handleAudioInput() {
|
|||
float audioInputMsecsRead = inputByteArray.size() / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
|
||||
_stats.updateInputMsecsRead(audioInputMsecsRead);
|
||||
|
||||
while (_inputRingBuffer.samplesAvailable() >= inputSamplesRequired) {
|
||||
|
||||
const int numNetworkBytes = _isStereoInput
|
||||
? AudioConstants::NETWORK_FRAME_BYTES_STEREO
|
||||
: AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
|
||||
|
@ -793,10 +781,12 @@ void AudioClient::handleAudioInput() {
|
|||
? AudioConstants::NETWORK_FRAME_SAMPLES_STEREO
|
||||
: AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL;
|
||||
|
||||
static int16_t networkAudioSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
|
||||
|
||||
while (_inputRingBuffer.samplesAvailable() >= inputSamplesRequired) {
|
||||
|
||||
if (!_muted) {
|
||||
|
||||
// zero out the monoAudioSamples array and the locally injected audio
|
||||
memset(networkAudioSamples, 0, numNetworkBytes);
|
||||
|
||||
// Increment the time since the last clip
|
||||
if (_timeSinceLastClip >= 0.0f) {
|
||||
|
@ -849,14 +839,38 @@ void AudioClient::handleAudioInput() {
|
|||
_inputRingBuffer.shiftReadPosition(inputSamplesRequired);
|
||||
}
|
||||
|
||||
emitAudioPacket(networkAudioSamples);
|
||||
}
|
||||
}
|
||||
|
||||
void AudioClient::emitAudioPacket(const int16_t* audioData, PacketType packetType) {
|
||||
static std::mutex _mutex;
|
||||
using Locker = std::unique_lock<std::mutex>;
|
||||
|
||||
// FIXME recorded audio isn't guaranteed to have the same stereo state
|
||||
// as the current system
|
||||
const int numNetworkBytes = _isStereoInput
|
||||
? AudioConstants::NETWORK_FRAME_BYTES_STEREO
|
||||
: AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
|
||||
const int numNetworkSamples = _isStereoInput
|
||||
? AudioConstants::NETWORK_FRAME_SAMPLES_STEREO
|
||||
: AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL;
|
||||
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
|
||||
|
||||
if (audioMixer && audioMixer->getActiveSocket()) {
|
||||
Locker lock(_mutex);
|
||||
if (!_audioPacket) {
|
||||
// we don't have an audioPacket yet - set that up now
|
||||
_audioPacket = NLPacket::create(PacketType::MicrophoneAudioWithEcho);
|
||||
}
|
||||
|
||||
glm::vec3 headPosition = _positionGetter();
|
||||
glm::quat headOrientation = _orientationGetter();
|
||||
quint8 isStereo = _isStereoInput ? 1 : 0;
|
||||
|
||||
if (packetType == PacketType::Unknown) {
|
||||
if (_lastInputLoudness == 0) {
|
||||
_audioPacket->setType(PacketType::SilentAudioFrame);
|
||||
} else {
|
||||
|
@ -866,6 +880,9 @@ void AudioClient::handleAudioInput() {
|
|||
_audioPacket->setType(PacketType::MicrophoneAudioNoEcho);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
_audioPacket->setType(packetType);
|
||||
}
|
||||
|
||||
// reset the audio packet so we can start writing
|
||||
_audioPacket->reset();
|
||||
|
@ -893,6 +910,10 @@ void AudioClient::handleAudioInput() {
|
|||
_audioPacket->setPayloadSize(_audioPacket->getPayloadSize() + numNetworkBytes);
|
||||
}
|
||||
|
||||
static const int leadingBytes = sizeof(quint16) + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
|
||||
int16_t* const networkAudioSamples = (int16_t*)(_audioPacket->getPayload() + leadingBytes);
|
||||
memcpy(networkAudioSamples, audioData, numNetworkBytes);
|
||||
|
||||
_stats.sentPacket();
|
||||
|
||||
nodeList->flagTimeForConnectionStep(LimitedNodeList::ConnectionStep::SendAudioPacket);
|
||||
|
@ -902,34 +923,9 @@ void AudioClient::handleAudioInput() {
|
|||
_outgoingAvatarAudioSequenceNumber++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AudioClient::handleRecordedAudioInput(const QByteArray& audio) {
|
||||
if (!_audioPacket) {
|
||||
// we don't have an audioPacket yet - set that up now
|
||||
_audioPacket = NLPacket::create(PacketType::MicrophoneAudioWithEcho);
|
||||
}
|
||||
|
||||
// FIXME either discard stereo in the recording or record a stereo flag
|
||||
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
|
||||
if (audioMixer && audioMixer->getActiveSocket()) {
|
||||
glm::vec3 headPosition = _positionGetter();
|
||||
glm::quat headOrientation = _orientationGetter();
|
||||
quint8 isStereo = _isStereoInput ? 1 : 0;
|
||||
_audioPacket->reset();
|
||||
_audioPacket->setType(PacketType::MicrophoneAudioWithEcho);
|
||||
_audioPacket->writePrimitive(_outgoingAvatarAudioSequenceNumber);
|
||||
_audioPacket->writePrimitive(isStereo);
|
||||
_audioPacket->writePrimitive(headPosition);
|
||||
_audioPacket->writePrimitive(headOrientation);
|
||||
_audioPacket->write(audio);
|
||||
_stats.sentPacket();
|
||||
nodeList->flagTimeForConnectionStep(LimitedNodeList::ConnectionStep::SendAudioPacket);
|
||||
nodeList->sendUnreliablePacket(*_audioPacket, *audioMixer);
|
||||
_outgoingAvatarAudioSequenceNumber++;
|
||||
}
|
||||
emitAudioPacket((int16_t*)audio.data(), PacketType::MicrophoneAudioWithEcho);
|
||||
}
|
||||
|
||||
void AudioClient::processReceivedSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer) {
|
||||
|
|
|
@ -212,6 +212,7 @@ protected:
|
|||
}
|
||||
|
||||
private:
|
||||
void emitAudioPacket(const int16_t* audioData, PacketType packetType = PacketType::Unknown);
|
||||
void outputFormatChanged();
|
||||
|
||||
QByteArray firstInputFrame;
|
||||
|
|
|
@ -1443,14 +1443,10 @@ QByteArray AvatarData::toFrame(const AvatarData& avatar) {
|
|||
|
||||
auto recordingBasis = avatar.getRecordingBasis();
|
||||
if (recordingBasis) {
|
||||
root[JSON_AVATAR_BASIS] = Transform::toJson(*recordingBasis);
|
||||
// Find the relative transform
|
||||
auto relativeTransform = recordingBasis->relativeTransform(avatar.getTransform());
|
||||
|
||||
// if the resulting relative basis is identity, we shouldn't record anything
|
||||
if (!relativeTransform.isIdentity()) {
|
||||
root[JSON_AVATAR_RELATIVE] = Transform::toJson(relativeTransform);
|
||||
root[JSON_AVATAR_BASIS] = Transform::toJson(*recordingBasis);
|
||||
}
|
||||
} else {
|
||||
root[JSON_AVATAR_RELATIVE] = Transform::toJson(avatar.getTransform());
|
||||
}
|
||||
|
@ -1484,6 +1480,9 @@ QByteArray AvatarData::toFrame(const AvatarData& avatar) {
|
|||
|
||||
void AvatarData::fromFrame(const QByteArray& frameData, AvatarData& result) {
|
||||
QJsonDocument doc = QJsonDocument::fromBinaryData(frameData);
|
||||
#ifdef WANT_JSON_DEBUG
|
||||
qDebug() << doc.toJson(QJsonDocument::JsonFormat::Indented);
|
||||
#endif
|
||||
QJsonObject root = doc.object();
|
||||
|
||||
if (root.contains(JSON_AVATAR_HEAD_MODEL)) {
|
||||
|
|
Loading…
Reference in a new issue