mirror of
https://github.com/lubosz/overte.git
synced 2025-08-07 20:31:29 +02:00
downstream silent packets seem to be working
This commit is contained in:
parent
4825457f4d
commit
a3b44a6a73
11 changed files with 76 additions and 45 deletions
|
@ -146,7 +146,7 @@ void Agent::readPendingDatagrams() {
|
||||||
_voxelViewer.processDatagram(mutablePacket, sourceNode);
|
_voxelViewer.processDatagram(mutablePacket, sourceNode);
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (datagramPacketType == PacketTypeMixedAudio) {
|
} else if (datagramPacketType == PacketTypeMixedAudio || datagramPacketType == PacketTypeSilentAudioFrame) {
|
||||||
|
|
||||||
_receivedAudioStream.parseData(receivedPacket);
|
_receivedAudioStream.parseData(receivedPacket);
|
||||||
|
|
||||||
|
|
|
@ -93,7 +93,7 @@ const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f;
|
||||||
const float ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE = 0.18f;
|
const float ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE = 0.18f;
|
||||||
const float ATTENUATION_EPSILON_DISTANCE = 0.1f;
|
const float ATTENUATION_EPSILON_DISTANCE = 0.1f;
|
||||||
|
|
||||||
void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
|
int AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
|
||||||
AvatarAudioStream* listeningNodeStream) {
|
AvatarAudioStream* listeningNodeStream) {
|
||||||
float bearingRelativeAngleToSource = 0.0f;
|
float bearingRelativeAngleToSource = 0.0f;
|
||||||
float attenuationCoefficient = 1.0f;
|
float attenuationCoefficient = 1.0f;
|
||||||
|
@ -116,7 +116,7 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
|
||||||
if (streamToAdd->getNextOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) {
|
if (streamToAdd->getNextOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) {
|
||||||
// according to mixer performance we have decided this does not get to be mixed in
|
// according to mixer performance we have decided this does not get to be mixed in
|
||||||
// bail out
|
// bail out
|
||||||
return;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
++_sumMixes;
|
++_sumMixes;
|
||||||
|
@ -261,36 +261,39 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
|
||||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixer::prepareMixForListeningNode(Node* node) {
|
int AudioMixer::prepareMixForListeningNode(Node* node) {
|
||||||
AvatarAudioStream* nodeAudioStream = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioStream();
|
AvatarAudioStream* nodeAudioStream = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioStream();
|
||||||
|
|
||||||
// zero out the client mix for this node
|
// zero out the client mix for this node
|
||||||
memset(_clientSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
memset(_clientSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
||||||
|
|
||||||
// loop through all other nodes that have sufficient audio to mix
|
// loop through all other nodes that have sufficient audio to mix
|
||||||
|
int streamsMixed = 0;
|
||||||
foreach (const SharedNodePointer& otherNode, NodeList::getInstance()->getNodeHash()) {
|
foreach (const SharedNodePointer& otherNode, NodeList::getInstance()->getNodeHash()) {
|
||||||
if (otherNode->getLinkedData()) {
|
if (otherNode->getLinkedData()) {
|
||||||
|
|
||||||
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
|
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
|
||||||
|
|
||||||
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
|
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
|
||||||
|
|
||||||
const QHash<QUuid, PositionalAudioStream*>& otherNodeAudioStreams = otherNodeClientData->getAudioStreams();
|
const QHash<QUuid, PositionalAudioStream*>& otherNodeAudioStreams = otherNodeClientData->getAudioStreams();
|
||||||
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
||||||
for (i = otherNodeAudioStreams.begin(); i != otherNodeAudioStreams.constEnd(); i++) {
|
for (i = otherNodeAudioStreams.constBegin(); i != otherNodeAudioStreams.constEnd(); i++) {
|
||||||
PositionalAudioStream* otherNodeStream = i.value();
|
PositionalAudioStream* otherNodeStream = i.value();
|
||||||
|
|
||||||
if ((*otherNode != *node || otherNodeStream->shouldLoopbackForNode())
|
if ((*otherNode != *node || otherNodeStream->shouldLoopbackForNode())
|
||||||
&& otherNodeStream->lastPopSucceeded()
|
&& otherNodeStream->lastPopSucceeded()
|
||||||
&& otherNodeStream->getNextOutputTrailingLoudness() > 0.0f) {
|
&& otherNodeStream->getLastPopOutputFrameLoudness() > 0.0f) {
|
||||||
|
|
||||||
addStreamToMixForListeningNodeWithStream(otherNodeStream, nodeAudioStream);
|
streamsMixed += addStreamToMixForListeningNodeWithStream(otherNodeStream, nodeAudioStream);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return streamsMixed;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixer::readPendingDatagrams() {
|
void AudioMixer::readPendingDatagrams() {
|
||||||
|
@ -474,9 +477,8 @@ void AudioMixer::run() {
|
||||||
int nextFrame = 0;
|
int nextFrame = 0;
|
||||||
QElapsedTimer timer;
|
QElapsedTimer timer;
|
||||||
timer.start();
|
timer.start();
|
||||||
|
|
||||||
char* clientMixBuffer = new char[NETWORK_BUFFER_LENGTH_BYTES_STEREO + sizeof(quint16)
|
char clientMixBuffer[MAX_PACKET_SIZE];
|
||||||
+ numBytesForPacketHeaderGivenPacketType(PacketTypeMixedAudio)];
|
|
||||||
|
|
||||||
int usecToSleep = BUFFER_SEND_INTERVAL_USECS;
|
int usecToSleep = BUFFER_SEND_INTERVAL_USECS;
|
||||||
|
|
||||||
|
@ -555,20 +557,37 @@ void AudioMixer::run() {
|
||||||
if (node->getType() == NodeType::Agent
|
if (node->getType() == NodeType::Agent
|
||||||
&& ((AudioMixerClientData*)node->getLinkedData())->getAvatarAudioStream()) {
|
&& ((AudioMixerClientData*)node->getLinkedData())->getAvatarAudioStream()) {
|
||||||
|
|
||||||
prepareMixForListeningNode(node.data());
|
int streamsMixed = prepareMixForListeningNode(node.data());
|
||||||
|
|
||||||
// pack header
|
char* dataAt;
|
||||||
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeMixedAudio);
|
if (streamsMixed > 0) {
|
||||||
char* dataAt = clientMixBuffer + numBytesPacketHeader;
|
// pack header
|
||||||
|
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeMixedAudio);
|
||||||
|
dataAt = clientMixBuffer + numBytesPacketHeader;
|
||||||
|
|
||||||
// pack sequence number
|
// pack sequence number
|
||||||
quint16 sequence = nodeData->getOutgoingSequenceNumber();
|
quint16 sequence = nodeData->getOutgoingSequenceNumber();
|
||||||
memcpy(dataAt, &sequence, sizeof(quint16));
|
memcpy(dataAt, &sequence, sizeof(quint16));
|
||||||
dataAt += sizeof(quint16);
|
dataAt += sizeof(quint16);
|
||||||
|
|
||||||
// pack mixed audio samples
|
// pack mixed audio samples
|
||||||
memcpy(dataAt, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
memcpy(dataAt, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
||||||
dataAt += NETWORK_BUFFER_LENGTH_BYTES_STEREO;
|
dataAt += NETWORK_BUFFER_LENGTH_BYTES_STEREO;
|
||||||
|
} else {
|
||||||
|
// pack header
|
||||||
|
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeSilentAudioFrame);
|
||||||
|
dataAt = clientMixBuffer + numBytesPacketHeader;
|
||||||
|
|
||||||
|
// pack sequence number
|
||||||
|
quint16 sequence = nodeData->getOutgoingSequenceNumber();
|
||||||
|
memcpy(dataAt, &sequence, sizeof(quint16));
|
||||||
|
dataAt += sizeof(quint16);
|
||||||
|
|
||||||
|
// pack number of silent audio samples
|
||||||
|
quint16 numSilentSamples = NETWORK_BUFFER_LENGTH_SAMPLES_STEREO;
|
||||||
|
memcpy(dataAt, &numSilentSamples, sizeof(quint16));
|
||||||
|
dataAt += sizeof(quint16);
|
||||||
|
}
|
||||||
|
|
||||||
// send mixed audio packet
|
// send mixed audio packet
|
||||||
nodeList->writeDatagram(clientMixBuffer, dataAt - clientMixBuffer, node);
|
nodeList->writeDatagram(clientMixBuffer, dataAt - clientMixBuffer, node);
|
||||||
|
|
|
@ -41,11 +41,11 @@ public slots:
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// adds one stream to the mix for a listening node
|
/// adds one stream to the mix for a listening node
|
||||||
void addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
|
int addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
|
||||||
AvatarAudioStream* listeningNodeStream);
|
AvatarAudioStream* listeningNodeStream);
|
||||||
|
|
||||||
/// prepares and sends a mix to one Node
|
/// prepares and sends a mix to one Node
|
||||||
void prepareMixForListeningNode(Node* node);
|
int prepareMixForListeningNode(Node* node);
|
||||||
|
|
||||||
// client samples capacity is larger than what will be sent to optimize mixing
|
// client samples capacity is larger than what will be sent to optimize mixing
|
||||||
// we are MMX adding 4 samples at a time so we need client samples to have an extra 4
|
// we are MMX adding 4 samples at a time so we need client samples to have an extra 4
|
||||||
|
|
|
@ -101,6 +101,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
||||||
void AudioMixerClientData::audioStreamsPopFrameForMixing() {
|
void AudioMixerClientData::audioStreamsPopFrameForMixing() {
|
||||||
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
||||||
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
|
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
|
||||||
|
i.value()->updateNextOutputTrailingLoudness();
|
||||||
i.value()->popFrames(1);
|
i.value()->popFrames(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,6 +48,7 @@ void DatagramProcessor::processDatagrams() {
|
||||||
// only process this packet if we have a match on the packet version
|
// only process this packet if we have a match on the packet version
|
||||||
switch (packetTypeForPacket(incomingPacket)) {
|
switch (packetTypeForPacket(incomingPacket)) {
|
||||||
case PacketTypeMixedAudio:
|
case PacketTypeMixedAudio:
|
||||||
|
case PacketTypeSilentAudioFrame:
|
||||||
QMetaObject::invokeMethod(&application->_audio, "addReceivedAudioToStream", Qt::QueuedConnection,
|
QMetaObject::invokeMethod(&application->_audio, "addReceivedAudioToStream", Qt::QueuedConnection,
|
||||||
Q_ARG(QByteArray, incomingPacket));
|
Q_ARG(QByteArray, incomingPacket));
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -218,17 +218,24 @@ int16_t* AudioRingBuffer::shiftedPositionAccomodatingWrap(int16_t* position, int
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
float AudioRingBuffer::getNextOutputFrameLoudness() const {
|
float AudioRingBuffer::getFrameLoudness(const int16_t* frameStart) const {
|
||||||
float loudness = 0.0f;
|
float loudness = 0.0f;
|
||||||
int16_t* sampleAt = _nextOutput;
|
const int16_t* sampleAt = frameStart;
|
||||||
int16_t* _bufferLastAt = _buffer + _sampleCapacity - 1;
|
const int16_t* _bufferLastAt = _buffer + _sampleCapacity - 1;
|
||||||
if (samplesAvailable() >= _numFrameSamples) {
|
for (int i = 0; i < _numFrameSamples; ++i) {
|
||||||
for (int i = 0; i < _numFrameSamples; ++i) {
|
loudness += fabsf(*sampleAt);
|
||||||
loudness += fabsf(*sampleAt);
|
sampleAt = sampleAt == _bufferLastAt ? _buffer : sampleAt + 1;
|
||||||
sampleAt = sampleAt == _bufferLastAt ? _buffer : sampleAt + 1;
|
|
||||||
}
|
|
||||||
loudness /= _numFrameSamples;
|
|
||||||
loudness /= MAX_SAMPLE_VALUE;
|
|
||||||
}
|
}
|
||||||
|
loudness /= _numFrameSamples;
|
||||||
|
loudness /= MAX_SAMPLE_VALUE;
|
||||||
|
|
||||||
return loudness;
|
return loudness;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
float AudioRingBuffer::getFrameLoudness(ConstIterator frameStart) const {
|
||||||
|
return getFrameLoudness(&(*frameStart));
|
||||||
|
}
|
||||||
|
|
||||||
|
float AudioRingBuffer::getNextOutputFrameLoudness() const {
|
||||||
|
return getFrameLoudness(_nextOutput);
|
||||||
|
}
|
||||||
|
|
|
@ -75,6 +75,10 @@ public:
|
||||||
int getOverflowCount() const { return _overflowCount; } /// how many times has the ring buffer has overwritten old data
|
int getOverflowCount() const { return _overflowCount; } /// how many times has the ring buffer has overwritten old data
|
||||||
|
|
||||||
int addSilentFrame(int numSilentSamples);
|
int addSilentFrame(int numSilentSamples);
|
||||||
|
|
||||||
|
private:
|
||||||
|
float getFrameLoudness(const int16_t* frameStart) const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// disallow copying of AudioRingBuffer objects
|
// disallow copying of AudioRingBuffer objects
|
||||||
AudioRingBuffer(const AudioRingBuffer&);
|
AudioRingBuffer(const AudioRingBuffer&);
|
||||||
|
@ -110,7 +114,7 @@ public:
|
||||||
|
|
||||||
bool operator==(const ConstIterator& rhs) { return _at == rhs._at; }
|
bool operator==(const ConstIterator& rhs) { return _at == rhs._at; }
|
||||||
bool operator!=(const ConstIterator& rhs) { return _at != rhs._at; }
|
bool operator!=(const ConstIterator& rhs) { return _at != rhs._at; }
|
||||||
int16_t operator*() { return *_at; }
|
const int16_t& operator*() { return *_at; }
|
||||||
|
|
||||||
ConstIterator& operator=(const ConstIterator& rhs) {
|
ConstIterator& operator=(const ConstIterator& rhs) {
|
||||||
_capacity = rhs._capacity;
|
_capacity = rhs._capacity;
|
||||||
|
@ -179,6 +183,7 @@ public:
|
||||||
};
|
};
|
||||||
|
|
||||||
ConstIterator nextOutput() const { return ConstIterator(_buffer, _sampleCapacity, _nextOutput); }
|
ConstIterator nextOutput() const { return ConstIterator(_buffer, _sampleCapacity, _nextOutput); }
|
||||||
|
float getFrameLoudness(ConstIterator frameStart) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // hifi_AudioRingBuffer_h
|
#endif // hifi_AudioRingBuffer_h
|
||||||
|
|
|
@ -174,6 +174,10 @@ void InboundAudioStream::starved() {
|
||||||
_framesAvailableStats.reset();
|
_framesAvailableStats.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
float InboundAudioStream::getLastPopOutputFrameLoudness() const {
|
||||||
|
return _ringBuffer.getFrameLoudness(_lastPopOutput);
|
||||||
|
}
|
||||||
|
|
||||||
void InboundAudioStream::overrideDesiredJitterBufferFramesTo(int desired) {
|
void InboundAudioStream::overrideDesiredJitterBufferFramesTo(int desired) {
|
||||||
_dynamicJitterBuffersOverride = true;
|
_dynamicJitterBuffersOverride = true;
|
||||||
_desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(desired);
|
_desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(desired);
|
||||||
|
|
|
@ -80,6 +80,8 @@ public:
|
||||||
|
|
||||||
/// returns the desired number of jitter buffer frames using Freddy's method
|
/// returns the desired number of jitter buffer frames using Freddy's method
|
||||||
int getCalculatedJitterBufferFramesUsingMaxGap() const { return _calculatedJitterBufferFramesUsingMaxGap; }
|
int getCalculatedJitterBufferFramesUsingMaxGap() const { return _calculatedJitterBufferFramesUsingMaxGap; }
|
||||||
|
|
||||||
|
float getLastPopOutputFrameLoudness() const;
|
||||||
|
|
||||||
int getDesiredJitterBufferFrames() const { return _desiredJitterBufferFrames; }
|
int getDesiredJitterBufferFrames() const { return _desiredJitterBufferFrames; }
|
||||||
int getNumFrameSamples() const { return _ringBuffer.getNumFrameSamples(); }
|
int getNumFrameSamples() const { return _ringBuffer.getNumFrameSamples(); }
|
||||||
|
|
|
@ -34,12 +34,6 @@ PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, b
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
int PositionalAudioStream::parseData(const QByteArray& packet) {
|
|
||||||
int bytesRead = InboundAudioStream::parseData(packet);
|
|
||||||
updateNextOutputTrailingLoudness();
|
|
||||||
return bytesRead;
|
|
||||||
}
|
|
||||||
|
|
||||||
void PositionalAudioStream::updateNextOutputTrailingLoudness() {
|
void PositionalAudioStream::updateNextOutputTrailingLoudness() {
|
||||||
float nextLoudness = _ringBuffer.getNextOutputFrameLoudness();
|
float nextLoudness = _ringBuffer.getNextOutputFrameLoudness();
|
||||||
|
|
||||||
|
|
|
@ -29,8 +29,6 @@ public:
|
||||||
|
|
||||||
PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo = false, bool dynamicJitterBuffers = false);
|
PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo = false, bool dynamicJitterBuffers = false);
|
||||||
|
|
||||||
int parseData(const QByteArray& packet);
|
|
||||||
|
|
||||||
virtual AudioStreamStats getAudioStreamStats() const;
|
virtual AudioStreamStats getAudioStreamStats() const;
|
||||||
|
|
||||||
void updateNextOutputTrailingLoudness();
|
void updateNextOutputTrailingLoudness();
|
||||||
|
|
Loading…
Reference in a new issue