mirror of
https://github.com/JulianGro/overte.git
synced 2025-04-29 20:42:56 +02:00
move valid position check to packet processing
This commit is contained in:
parent
371de312cc
commit
7d8b15ed75
10 changed files with 83 additions and 42 deletions
|
@ -13,6 +13,8 @@
|
||||||
|
|
||||||
#include <random>
|
#include <random>
|
||||||
|
|
||||||
|
#include <glm/detail/func_common.hpp>
|
||||||
|
|
||||||
#include <QtCore/QDebug>
|
#include <QtCore/QDebug>
|
||||||
#include <QtCore/QJsonArray>
|
#include <QtCore/QJsonArray>
|
||||||
|
|
||||||
|
@ -332,18 +334,65 @@ int AudioMixerClientData::parseData(ReceivedMessage& message) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool AudioMixerClientData::containsValidPosition(ReceivedMessage& message) const {
|
||||||
|
static const int SEQUENCE_NUMBER_BYTES = sizeof(quint16);
|
||||||
|
|
||||||
|
auto posBefore = message.getPosition();
|
||||||
|
|
||||||
|
message.seek(SEQUENCE_NUMBER_BYTES);
|
||||||
|
|
||||||
|
// skip over the codec string
|
||||||
|
message.readString();
|
||||||
|
|
||||||
|
switch (message.getType()) {
|
||||||
|
case PacketType::MicrophoneAudioNoEcho:
|
||||||
|
case PacketType::MicrophoneAudioWithEcho: {
|
||||||
|
// skip over the stereo flag
|
||||||
|
message.seek(message.getPosition() + sizeof(ChannelFlag));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case PacketType::SilentAudioFrame: {
|
||||||
|
// skip the number of silent samples
|
||||||
|
message.seek(message.getPosition() + sizeof(SilentSamplesBytes));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case PacketType::InjectAudio: {
|
||||||
|
// skip the stream ID, stereo flag, and loopback flag
|
||||||
|
message.seek(message.getPosition() + NUM_STREAM_ID_BYTES + sizeof(ChannelFlag) + sizeof(LoopbackFlag));
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
Q_UNREACHABLE();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
glm::vec3 peekPosition;
|
||||||
|
message.readPrimitive(&peekPosition);
|
||||||
|
|
||||||
|
// reset the position the message was at before we were called
|
||||||
|
message.seek(posBefore);
|
||||||
|
|
||||||
|
if (glm::any(glm::isnan(peekPosition))) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
void AudioMixerClientData::processStreamPacket(ReceivedMessage& message, ConcurrentAddedStreams &addedStreams) {
|
void AudioMixerClientData::processStreamPacket(ReceivedMessage& message, ConcurrentAddedStreams &addedStreams) {
|
||||||
|
|
||||||
|
if (!containsValidPosition(message)) {
|
||||||
|
qDebug() << "Refusing to process audio stream from" << message.getSourceID() << "with invalid position";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
SharedStreamPointer matchingStream;
|
SharedStreamPointer matchingStream;
|
||||||
|
|
||||||
auto packetType = message.getType();
|
auto packetType = message.getType();
|
||||||
bool newStream = false;
|
bool newStream = false;
|
||||||
|
|
||||||
if (packetType == PacketType::MicrophoneAudioWithEcho
|
if (packetType == PacketType::MicrophoneAudioWithEcho
|
||||||
|| packetType == PacketType::ReplicatedMicrophoneAudioWithEcho
|
|
||||||
|| packetType == PacketType::MicrophoneAudioNoEcho
|
|| packetType == PacketType::MicrophoneAudioNoEcho
|
||||||
|| packetType == PacketType::ReplicatedMicrophoneAudioNoEcho
|
|| packetType == PacketType::SilentAudioFrame) {
|
||||||
|| packetType == PacketType::SilentAudioFrame
|
|
||||||
|| packetType == PacketType::ReplicatedSilentAudioFrame) {
|
|
||||||
|
|
||||||
QWriteLocker writeLocker { &_streamsLock };
|
QWriteLocker writeLocker { &_streamsLock };
|
||||||
|
|
||||||
|
@ -355,7 +404,7 @@ void AudioMixerClientData::processStreamPacket(ReceivedMessage& message, Concurr
|
||||||
// we don't have a mic stream yet, so add it
|
// we don't have a mic stream yet, so add it
|
||||||
|
|
||||||
// hop past the sequence number that leads the packet
|
// hop past the sequence number that leads the packet
|
||||||
message.seek(sizeof(quint16));
|
message.seek(sizeof(StreamSequenceNumber));
|
||||||
|
|
||||||
// pull the codec string from the packet
|
// pull the codec string from the packet
|
||||||
auto codecString = message.readString();
|
auto codecString = message.readString();
|
||||||
|
@ -363,11 +412,11 @@ void AudioMixerClientData::processStreamPacket(ReceivedMessage& message, Concurr
|
||||||
// determine if the stream is stereo or not
|
// determine if the stream is stereo or not
|
||||||
bool isStereo;
|
bool isStereo;
|
||||||
if (packetType == PacketType::SilentAudioFrame || packetType == PacketType::ReplicatedSilentAudioFrame) {
|
if (packetType == PacketType::SilentAudioFrame || packetType == PacketType::ReplicatedSilentAudioFrame) {
|
||||||
quint16 numSilentSamples;
|
SilentSamplesBytes numSilentSamples;
|
||||||
message.readPrimitive(&numSilentSamples);
|
message.readPrimitive(&numSilentSamples);
|
||||||
isStereo = numSilentSamples == AudioConstants::NETWORK_FRAME_SAMPLES_STEREO;
|
isStereo = numSilentSamples == AudioConstants::NETWORK_FRAME_SAMPLES_STEREO;
|
||||||
} else {
|
} else {
|
||||||
quint8 channelFlag;
|
ChannelFlag channelFlag;
|
||||||
message.readPrimitive(&channelFlag);
|
message.readPrimitive(&channelFlag);
|
||||||
isStereo = channelFlag == 1;
|
isStereo = channelFlag == 1;
|
||||||
}
|
}
|
||||||
|
@ -395,17 +444,15 @@ void AudioMixerClientData::processStreamPacket(ReceivedMessage& message, Concurr
|
||||||
}
|
}
|
||||||
|
|
||||||
writeLocker.unlock();
|
writeLocker.unlock();
|
||||||
} else if (packetType == PacketType::InjectAudio
|
} else if (packetType == PacketType::InjectAudio) {
|
||||||
|| packetType == PacketType::ReplicatedInjectAudio) {
|
|
||||||
// this is injected audio
|
// this is injected audio
|
||||||
// grab the stream identifier for this injected audio
|
// skip the sequence number and codec string and grab the stream identifier for this injected audio
|
||||||
message.seek(sizeof(quint16));
|
message.seek(sizeof(StreamSequenceNumber));
|
||||||
|
message.readString();
|
||||||
|
|
||||||
QUuid streamIdentifier = QUuid::fromRfc4122(message.readWithoutCopy(NUM_BYTES_RFC4122_UUID));
|
QUuid streamIdentifier = QUuid::fromRfc4122(message.readWithoutCopy(NUM_BYTES_RFC4122_UUID));
|
||||||
|
|
||||||
bool isStereo;
|
|
||||||
message.readPrimitive(&isStereo);
|
|
||||||
|
|
||||||
QWriteLocker writeLock { &_streamsLock };
|
QWriteLocker writeLock { &_streamsLock };
|
||||||
|
|
||||||
auto streamIt = std::find_if(_audioStreams.begin(), _audioStreams.end(), [&streamIdentifier](const SharedStreamPointer& stream) {
|
auto streamIt = std::find_if(_audioStreams.begin(), _audioStreams.end(), [&streamIdentifier](const SharedStreamPointer& stream) {
|
||||||
|
@ -413,6 +460,9 @@ void AudioMixerClientData::processStreamPacket(ReceivedMessage& message, Concurr
|
||||||
});
|
});
|
||||||
|
|
||||||
if (streamIt == _audioStreams.end()) {
|
if (streamIt == _audioStreams.end()) {
|
||||||
|
bool isStereo;
|
||||||
|
message.readPrimitive(&isStereo);
|
||||||
|
|
||||||
// we don't have this injected stream yet, so add it
|
// we don't have this injected stream yet, so add it
|
||||||
auto injectorStream = new InjectedAudioStream(streamIdentifier, isStereo, AudioMixer::getStaticJitterFrames());
|
auto injectorStream = new InjectedAudioStream(streamIdentifier, isStereo, AudioMixer::getStaticJitterFrames());
|
||||||
|
|
||||||
|
|
|
@ -172,6 +172,8 @@ private:
|
||||||
|
|
||||||
void setGainForAvatar(QUuid nodeID, uint8_t gain);
|
void setGainForAvatar(QUuid nodeID, uint8_t gain);
|
||||||
|
|
||||||
|
bool containsValidPosition(ReceivedMessage& message) const;
|
||||||
|
|
||||||
MixableStreamsVector _mixableStreams;
|
MixableStreamsVector _mixableStreams;
|
||||||
|
|
||||||
quint16 _outgoingMixedAudioSequenceNumber;
|
quint16 _outgoingMixedAudioSequenceNumber;
|
||||||
|
|
|
@ -135,12 +135,6 @@ bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
|
||||||
AvatarAudioStream* listenerAudioStream = static_cast<AudioMixerClientData*>(listener->getLinkedData())->getAvatarAudioStream();
|
AvatarAudioStream* listenerAudioStream = static_cast<AudioMixerClientData*>(listener->getLinkedData())->getAvatarAudioStream();
|
||||||
AudioMixerClientData* listenerData = static_cast<AudioMixerClientData*>(listener->getLinkedData());
|
AudioMixerClientData* listenerData = static_cast<AudioMixerClientData*>(listener->getLinkedData());
|
||||||
|
|
||||||
// if we received an invalid position from this listener, then refuse to make them a mix
|
|
||||||
// because we don't know how to do it properly
|
|
||||||
if (!listenerAudioStream->hasValidPosition()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// zero out the mix for this listener
|
// zero out the mix for this listener
|
||||||
memset(_mixSamples, 0, sizeof(_mixSamples));
|
memset(_mixSamples, 0, sizeof(_mixSamples));
|
||||||
|
|
||||||
|
@ -195,6 +189,7 @@ bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
|
||||||
auto it = mixableStreams.begin();
|
auto it = mixableStreams.begin();
|
||||||
auto end = mixableStreams.end();
|
auto end = mixableStreams.end();
|
||||||
while (it != end) {
|
while (it != end) {
|
||||||
|
|
||||||
// check if this node (and therefore all of the node's streams) has been removed
|
// check if this node (and therefore all of the node's streams) has been removed
|
||||||
auto& nodeIDStreamID = it->nodeStreamID;
|
auto& nodeIDStreamID = it->nodeStreamID;
|
||||||
auto matchedRemovedNode = std::find(_sharedData.removedNodes.cbegin(), _sharedData.removedNodes.cend(),
|
auto matchedRemovedNode = std::find(_sharedData.removedNodes.cbegin(), _sharedData.removedNodes.cend(),
|
||||||
|
@ -279,7 +274,7 @@ bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
|
||||||
} else {
|
} else {
|
||||||
// we're throttling, so we need to update the approximate volume for any un-skipped streams
|
// we're throttling, so we need to update the approximate volume for any un-skipped streams
|
||||||
// unless this is simply for an echo (in which case the approx volume is 1.0)
|
// unless this is simply for an echo (in which case the approx volume is 1.0)
|
||||||
if (!it->skippedStream) {
|
if (!it->skippedStream && it->positionalStream->getLastPopOutputTrailingLoudness() > 0.0f) {
|
||||||
if (it->positionalStream != listenerAudioStream) {
|
if (it->positionalStream != listenerAudioStream) {
|
||||||
// approximate the gain
|
// approximate the gain
|
||||||
float gain = approximateGain(*listenerAudioStream, *(it->positionalStream));
|
float gain = approximateGain(*listenerAudioStream, *(it->positionalStream));
|
||||||
|
|
|
@ -23,9 +23,9 @@ int AvatarAudioStream::parseStreamProperties(PacketType type, const QByteArray&
|
||||||
|
|
||||||
if (type == PacketType::SilentAudioFrame) {
|
if (type == PacketType::SilentAudioFrame) {
|
||||||
const char* dataAt = packetAfterSeqNum.constData();
|
const char* dataAt = packetAfterSeqNum.constData();
|
||||||
quint16 numSilentSamples = *(reinterpret_cast<const quint16*>(dataAt));
|
SilentSamplesBytes numSilentSamples = *(reinterpret_cast<const quint16*>(dataAt));
|
||||||
readBytes += sizeof(quint16);
|
readBytes += sizeof(SilentSamplesBytes);
|
||||||
numAudioSamples = (int)numSilentSamples;
|
numAudioSamples = (int) numSilentSamples;
|
||||||
|
|
||||||
// read the positional data
|
// read the positional data
|
||||||
readBytes += parsePositionalData(packetAfterSeqNum.mid(readBytes));
|
readBytes += parsePositionalData(packetAfterSeqNum.mid(readBytes));
|
||||||
|
@ -34,9 +34,9 @@ int AvatarAudioStream::parseStreamProperties(PacketType type, const QByteArray&
|
||||||
_shouldLoopbackForNode = (type == PacketType::MicrophoneAudioWithEcho);
|
_shouldLoopbackForNode = (type == PacketType::MicrophoneAudioWithEcho);
|
||||||
|
|
||||||
// read the channel flag
|
// read the channel flag
|
||||||
quint8 channelFlag = packetAfterSeqNum.at(readBytes);
|
ChannelFlag channelFlag = packetAfterSeqNum.at(readBytes);
|
||||||
bool isStereo = channelFlag == 1;
|
bool isStereo = channelFlag == 1;
|
||||||
readBytes += sizeof(quint8);
|
readBytes += sizeof(ChannelFlag);
|
||||||
|
|
||||||
// if isStereo value has changed, restart the ring buffer with new frame size
|
// if isStereo value has changed, restart the ring buffer with new frame size
|
||||||
if (isStereo != _isStereo) {
|
if (isStereo != _isStereo) {
|
||||||
|
|
|
@ -16,6 +16,8 @@
|
||||||
|
|
||||||
#include "PositionalAudioStream.h"
|
#include "PositionalAudioStream.h"
|
||||||
|
|
||||||
|
using SilentSamplesBytes = quint16;
|
||||||
|
|
||||||
class AvatarAudioStream : public PositionalAudioStream {
|
class AvatarAudioStream : public PositionalAudioStream {
|
||||||
public:
|
public:
|
||||||
AvatarAudioStream(bool isStereo, int numStaticJitterFrames = -1);
|
AvatarAudioStream(bool isStereo, int numStaticJitterFrames = -1);
|
||||||
|
|
|
@ -30,6 +30,8 @@
|
||||||
// Audio Env bitset
|
// Audio Env bitset
|
||||||
const int HAS_REVERB_BIT = 0; // 1st bit
|
const int HAS_REVERB_BIT = 0; // 1st bit
|
||||||
|
|
||||||
|
using StreamSequenceNumber = quint16;
|
||||||
|
|
||||||
class InboundAudioStream : public NodeData {
|
class InboundAudioStream : public NodeData {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,7 @@ int InjectedAudioStream::parseStreamProperties(PacketType type,
|
||||||
}
|
}
|
||||||
|
|
||||||
// pull the loopback flag and set our boolean
|
// pull the loopback flag and set our boolean
|
||||||
uchar shouldLoopback;
|
LoopbackFlag shouldLoopback;
|
||||||
packetStream >> shouldLoopback;
|
packetStream >> shouldLoopback;
|
||||||
_shouldLoopbackForNode = (shouldLoopback == 1);
|
_shouldLoopbackForNode = (shouldLoopback == 1);
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,8 @@
|
||||||
|
|
||||||
#include "PositionalAudioStream.h"
|
#include "PositionalAudioStream.h"
|
||||||
|
|
||||||
|
using LoopbackFlag = uchar;
|
||||||
|
|
||||||
class InjectedAudioStream : public PositionalAudioStream {
|
class InjectedAudioStream : public PositionalAudioStream {
|
||||||
public:
|
public:
|
||||||
InjectedAudioStream(const QUuid& streamIdentifier, bool isStereo, int numStaticJitterFrames = -1);
|
InjectedAudioStream(const QUuid& streamIdentifier, bool isStereo, int numStaticJitterFrames = -1);
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
|
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
|
||||||
#include <glm/detail/func_common.hpp>
|
|
||||||
#include <QtCore/QDataStream>
|
#include <QtCore/QDataStream>
|
||||||
#include <QtCore/QLoggingCategory>
|
#include <QtCore/QLoggingCategory>
|
||||||
|
|
||||||
|
@ -78,16 +77,6 @@ int PositionalAudioStream::parsePositionalData(const QByteArray& positionalByteA
|
||||||
QDataStream packetStream(positionalByteArray);
|
QDataStream packetStream(positionalByteArray);
|
||||||
|
|
||||||
packetStream.readRawData(reinterpret_cast<char*>(&_position), sizeof(_position));
|
packetStream.readRawData(reinterpret_cast<char*>(&_position), sizeof(_position));
|
||||||
|
|
||||||
// if the client sends us a bad position, flag it so that we don't consider this stream for mixing
|
|
||||||
if (glm::isnan(_position.x) || glm::isnan(_position.y) || glm::isnan(_position.z)) {
|
|
||||||
HIFI_FDEBUG("PositionalAudioStream unpacked invalid position for node" << uuidStringWithoutCurlyBraces(getNodeID()) );
|
|
||||||
|
|
||||||
_hasValidPosition = false;
|
|
||||||
} else {
|
|
||||||
_hasValidPosition = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
packetStream.readRawData(reinterpret_cast<char*>(&_orientation), sizeof(_orientation));
|
packetStream.readRawData(reinterpret_cast<char*>(&_orientation), sizeof(_orientation));
|
||||||
packetStream.readRawData(reinterpret_cast<char*>(&_avatarBoundingBoxCorner), sizeof(_avatarBoundingBoxCorner));
|
packetStream.readRawData(reinterpret_cast<char*>(&_avatarBoundingBoxCorner), sizeof(_avatarBoundingBoxCorner));
|
||||||
packetStream.readRawData(reinterpret_cast<char*>(&_avatarBoundingBoxScale), sizeof(_avatarBoundingBoxScale));
|
packetStream.readRawData(reinterpret_cast<char*>(&_avatarBoundingBoxScale), sizeof(_avatarBoundingBoxScale));
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
const int AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY = 100;
|
const int AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY = 100;
|
||||||
|
|
||||||
using StreamID = QUuid;
|
using StreamID = QUuid;
|
||||||
|
const int NUM_STREAM_ID_BYTES = NUM_BYTES_RFC4122_UUID;
|
||||||
|
|
||||||
struct NodeIDStreamID {
|
struct NodeIDStreamID {
|
||||||
QUuid nodeID;
|
QUuid nodeID;
|
||||||
|
@ -34,6 +35,8 @@ struct NodeIDStreamID {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
using ChannelFlag = quint8;
|
||||||
|
|
||||||
class PositionalAudioStream : public InboundAudioStream {
|
class PositionalAudioStream : public InboundAudioStream {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
|
@ -66,8 +69,6 @@ public:
|
||||||
const glm::vec3& getAvatarBoundingBoxCorner() const { return _avatarBoundingBoxCorner; }
|
const glm::vec3& getAvatarBoundingBoxCorner() const { return _avatarBoundingBoxCorner; }
|
||||||
const glm::vec3& getAvatarBoundingBoxScale() const { return _avatarBoundingBoxScale; }
|
const glm::vec3& getAvatarBoundingBoxScale() const { return _avatarBoundingBoxScale; }
|
||||||
|
|
||||||
bool hasValidPosition() const { return _hasValidPosition; }
|
|
||||||
|
|
||||||
using IgnoreBox = AABox;
|
using IgnoreBox = AABox;
|
||||||
|
|
||||||
// called from single AudioMixerSlave while processing packets for node
|
// called from single AudioMixerSlave while processing packets for node
|
||||||
|
@ -106,8 +107,6 @@ protected:
|
||||||
float _quietestFrameLoudness;
|
float _quietestFrameLoudness;
|
||||||
int _frameCounter;
|
int _frameCounter;
|
||||||
|
|
||||||
bool _hasValidPosition { false };
|
|
||||||
|
|
||||||
bool _isIgnoreBoxEnabled { false };
|
bool _isIgnoreBoxEnabled { false };
|
||||||
IgnoreBox _ignoreBox;
|
IgnoreBox _ignoreBox;
|
||||||
};
|
};
|
||||||
|
|
Loading…
Reference in a new issue