mirror of
https://github.com/lubosz/overte.git
synced 2025-04-23 01:04:06 +02:00
downstream silent packets seem to be working
This commit is contained in:
parent
4825457f4d
commit
a3b44a6a73
11 changed files with 76 additions and 45 deletions
|
@ -146,7 +146,7 @@ void Agent::readPendingDatagrams() {
|
|||
_voxelViewer.processDatagram(mutablePacket, sourceNode);
|
||||
}
|
||||
|
||||
} else if (datagramPacketType == PacketTypeMixedAudio) {
|
||||
} else if (datagramPacketType == PacketTypeMixedAudio || datagramPacketType == PacketTypeSilentAudioFrame) {
|
||||
|
||||
_receivedAudioStream.parseData(receivedPacket);
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f;
|
|||
const float ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE = 0.18f;
|
||||
const float ATTENUATION_EPSILON_DISTANCE = 0.1f;
|
||||
|
||||
void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
|
||||
int AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
|
||||
AvatarAudioStream* listeningNodeStream) {
|
||||
float bearingRelativeAngleToSource = 0.0f;
|
||||
float attenuationCoefficient = 1.0f;
|
||||
|
@ -116,7 +116,7 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
|
|||
if (streamToAdd->getNextOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) {
|
||||
// according to mixer performance we have decided this does not get to be mixed in
|
||||
// bail out
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
++_sumMixes;
|
||||
|
@ -261,36 +261,39 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
|
|||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void AudioMixer::prepareMixForListeningNode(Node* node) {
|
||||
int AudioMixer::prepareMixForListeningNode(Node* node) {
|
||||
AvatarAudioStream* nodeAudioStream = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioStream();
|
||||
|
||||
|
||||
// zero out the client mix for this node
|
||||
memset(_clientSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
||||
|
||||
// loop through all other nodes that have sufficient audio to mix
|
||||
int streamsMixed = 0;
|
||||
foreach (const SharedNodePointer& otherNode, NodeList::getInstance()->getNodeHash()) {
|
||||
if (otherNode->getLinkedData()) {
|
||||
|
||||
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
|
||||
|
||||
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
|
||||
|
||||
const QHash<QUuid, PositionalAudioStream*>& otherNodeAudioStreams = otherNodeClientData->getAudioStreams();
|
||||
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
||||
for (i = otherNodeAudioStreams.begin(); i != otherNodeAudioStreams.constEnd(); i++) {
|
||||
for (i = otherNodeAudioStreams.constBegin(); i != otherNodeAudioStreams.constEnd(); i++) {
|
||||
PositionalAudioStream* otherNodeStream = i.value();
|
||||
|
||||
|
||||
if ((*otherNode != *node || otherNodeStream->shouldLoopbackForNode())
|
||||
&& otherNodeStream->lastPopSucceeded()
|
||||
&& otherNodeStream->getNextOutputTrailingLoudness() > 0.0f) {
|
||||
&& otherNodeStream->getLastPopOutputFrameLoudness() > 0.0f) {
|
||||
|
||||
addStreamToMixForListeningNodeWithStream(otherNodeStream, nodeAudioStream);
|
||||
streamsMixed += addStreamToMixForListeningNodeWithStream(otherNodeStream, nodeAudioStream);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return streamsMixed;
|
||||
}
|
||||
|
||||
void AudioMixer::readPendingDatagrams() {
|
||||
|
@ -474,9 +477,8 @@ void AudioMixer::run() {
|
|||
int nextFrame = 0;
|
||||
QElapsedTimer timer;
|
||||
timer.start();
|
||||
|
||||
char* clientMixBuffer = new char[NETWORK_BUFFER_LENGTH_BYTES_STEREO + sizeof(quint16)
|
||||
+ numBytesForPacketHeaderGivenPacketType(PacketTypeMixedAudio)];
|
||||
|
||||
char clientMixBuffer[MAX_PACKET_SIZE];
|
||||
|
||||
int usecToSleep = BUFFER_SEND_INTERVAL_USECS;
|
||||
|
||||
|
@ -555,20 +557,37 @@ void AudioMixer::run() {
|
|||
if (node->getType() == NodeType::Agent
|
||||
&& ((AudioMixerClientData*)node->getLinkedData())->getAvatarAudioStream()) {
|
||||
|
||||
prepareMixForListeningNode(node.data());
|
||||
int streamsMixed = prepareMixForListeningNode(node.data());
|
||||
|
||||
// pack header
|
||||
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeMixedAudio);
|
||||
char* dataAt = clientMixBuffer + numBytesPacketHeader;
|
||||
char* dataAt;
|
||||
if (streamsMixed > 0) {
|
||||
// pack header
|
||||
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeMixedAudio);
|
||||
dataAt = clientMixBuffer + numBytesPacketHeader;
|
||||
|
||||
// pack sequence number
|
||||
quint16 sequence = nodeData->getOutgoingSequenceNumber();
|
||||
memcpy(dataAt, &sequence, sizeof(quint16));
|
||||
dataAt += sizeof(quint16);
|
||||
// pack sequence number
|
||||
quint16 sequence = nodeData->getOutgoingSequenceNumber();
|
||||
memcpy(dataAt, &sequence, sizeof(quint16));
|
||||
dataAt += sizeof(quint16);
|
||||
|
||||
// pack mixed audio samples
|
||||
memcpy(dataAt, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
||||
dataAt += NETWORK_BUFFER_LENGTH_BYTES_STEREO;
|
||||
// pack mixed audio samples
|
||||
memcpy(dataAt, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
||||
dataAt += NETWORK_BUFFER_LENGTH_BYTES_STEREO;
|
||||
} else {
|
||||
// pack header
|
||||
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeSilentAudioFrame);
|
||||
dataAt = clientMixBuffer + numBytesPacketHeader;
|
||||
|
||||
// pack sequence number
|
||||
quint16 sequence = nodeData->getOutgoingSequenceNumber();
|
||||
memcpy(dataAt, &sequence, sizeof(quint16));
|
||||
dataAt += sizeof(quint16);
|
||||
|
||||
// pack number of silent audio samples
|
||||
quint16 numSilentSamples = NETWORK_BUFFER_LENGTH_SAMPLES_STEREO;
|
||||
memcpy(dataAt, &numSilentSamples, sizeof(quint16));
|
||||
dataAt += sizeof(quint16);
|
||||
}
|
||||
|
||||
// send mixed audio packet
|
||||
nodeList->writeDatagram(clientMixBuffer, dataAt - clientMixBuffer, node);
|
||||
|
|
|
@ -41,11 +41,11 @@ public slots:
|
|||
|
||||
private:
|
||||
/// adds one stream to the mix for a listening node
|
||||
void addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
|
||||
int addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
|
||||
AvatarAudioStream* listeningNodeStream);
|
||||
|
||||
/// prepares and sends a mix to one Node
|
||||
void prepareMixForListeningNode(Node* node);
|
||||
int prepareMixForListeningNode(Node* node);
|
||||
|
||||
// client samples capacity is larger than what will be sent to optimize mixing
|
||||
// we are MMX adding 4 samples at a time so we need client samples to have an extra 4
|
||||
|
|
|
@ -101,6 +101,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
|||
void AudioMixerClientData::audioStreamsPopFrameForMixing() {
|
||||
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
||||
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
|
||||
i.value()->updateNextOutputTrailingLoudness();
|
||||
i.value()->popFrames(1);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -48,6 +48,7 @@ void DatagramProcessor::processDatagrams() {
|
|||
// only process this packet if we have a match on the packet version
|
||||
switch (packetTypeForPacket(incomingPacket)) {
|
||||
case PacketTypeMixedAudio:
|
||||
case PacketTypeSilentAudioFrame:
|
||||
QMetaObject::invokeMethod(&application->_audio, "addReceivedAudioToStream", Qt::QueuedConnection,
|
||||
Q_ARG(QByteArray, incomingPacket));
|
||||
break;
|
||||
|
|
|
@ -218,17 +218,24 @@ int16_t* AudioRingBuffer::shiftedPositionAccomodatingWrap(int16_t* position, int
|
|||
}
|
||||
}
|
||||
|
||||
float AudioRingBuffer::getNextOutputFrameLoudness() const {
|
||||
float AudioRingBuffer::getFrameLoudness(const int16_t* frameStart) const {
|
||||
float loudness = 0.0f;
|
||||
int16_t* sampleAt = _nextOutput;
|
||||
int16_t* _bufferLastAt = _buffer + _sampleCapacity - 1;
|
||||
if (samplesAvailable() >= _numFrameSamples) {
|
||||
for (int i = 0; i < _numFrameSamples; ++i) {
|
||||
loudness += fabsf(*sampleAt);
|
||||
sampleAt = sampleAt == _bufferLastAt ? _buffer : sampleAt + 1;
|
||||
}
|
||||
loudness /= _numFrameSamples;
|
||||
loudness /= MAX_SAMPLE_VALUE;
|
||||
const int16_t* sampleAt = frameStart;
|
||||
const int16_t* _bufferLastAt = _buffer + _sampleCapacity - 1;
|
||||
for (int i = 0; i < _numFrameSamples; ++i) {
|
||||
loudness += fabsf(*sampleAt);
|
||||
sampleAt = sampleAt == _bufferLastAt ? _buffer : sampleAt + 1;
|
||||
}
|
||||
loudness /= _numFrameSamples;
|
||||
loudness /= MAX_SAMPLE_VALUE;
|
||||
|
||||
return loudness;
|
||||
}
|
||||
|
||||
float AudioRingBuffer::getFrameLoudness(ConstIterator frameStart) const {
|
||||
return getFrameLoudness(&(*frameStart));
|
||||
}
|
||||
|
||||
float AudioRingBuffer::getNextOutputFrameLoudness() const {
|
||||
return getFrameLoudness(_nextOutput);
|
||||
}
|
||||
|
|
|
@ -75,6 +75,10 @@ public:
|
|||
int getOverflowCount() const { return _overflowCount; } /// how many times has the ring buffer has overwritten old data
|
||||
|
||||
int addSilentFrame(int numSilentSamples);
|
||||
|
||||
private:
|
||||
float getFrameLoudness(const int16_t* frameStart) const;
|
||||
|
||||
protected:
|
||||
// disallow copying of AudioRingBuffer objects
|
||||
AudioRingBuffer(const AudioRingBuffer&);
|
||||
|
@ -110,7 +114,7 @@ public:
|
|||
|
||||
bool operator==(const ConstIterator& rhs) { return _at == rhs._at; }
|
||||
bool operator!=(const ConstIterator& rhs) { return _at != rhs._at; }
|
||||
int16_t operator*() { return *_at; }
|
||||
const int16_t& operator*() { return *_at; }
|
||||
|
||||
ConstIterator& operator=(const ConstIterator& rhs) {
|
||||
_capacity = rhs._capacity;
|
||||
|
@ -179,6 +183,7 @@ public:
|
|||
};
|
||||
|
||||
ConstIterator nextOutput() const { return ConstIterator(_buffer, _sampleCapacity, _nextOutput); }
|
||||
float getFrameLoudness(ConstIterator frameStart) const;
|
||||
};
|
||||
|
||||
#endif // hifi_AudioRingBuffer_h
|
||||
|
|
|
@ -174,6 +174,10 @@ void InboundAudioStream::starved() {
|
|||
_framesAvailableStats.reset();
|
||||
}
|
||||
|
||||
float InboundAudioStream::getLastPopOutputFrameLoudness() const {
|
||||
return _ringBuffer.getFrameLoudness(_lastPopOutput);
|
||||
}
|
||||
|
||||
void InboundAudioStream::overrideDesiredJitterBufferFramesTo(int desired) {
|
||||
_dynamicJitterBuffersOverride = true;
|
||||
_desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(desired);
|
||||
|
|
|
@ -80,6 +80,8 @@ public:
|
|||
|
||||
/// returns the desired number of jitter buffer frames using Freddy's method
|
||||
int getCalculatedJitterBufferFramesUsingMaxGap() const { return _calculatedJitterBufferFramesUsingMaxGap; }
|
||||
|
||||
float getLastPopOutputFrameLoudness() const;
|
||||
|
||||
int getDesiredJitterBufferFrames() const { return _desiredJitterBufferFrames; }
|
||||
int getNumFrameSamples() const { return _ringBuffer.getNumFrameSamples(); }
|
||||
|
|
|
@ -34,12 +34,6 @@ PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, b
|
|||
{
|
||||
}
|
||||
|
||||
int PositionalAudioStream::parseData(const QByteArray& packet) {
|
||||
int bytesRead = InboundAudioStream::parseData(packet);
|
||||
updateNextOutputTrailingLoudness();
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
void PositionalAudioStream::updateNextOutputTrailingLoudness() {
|
||||
float nextLoudness = _ringBuffer.getNextOutputFrameLoudness();
|
||||
|
||||
|
|
|
@ -29,8 +29,6 @@ public:
|
|||
|
||||
PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo = false, bool dynamicJitterBuffers = false);
|
||||
|
||||
int parseData(const QByteArray& packet);
|
||||
|
||||
virtual AudioStreamStats getAudioStreamStats() const;
|
||||
|
||||
void updateNextOutputTrailingLoudness();
|
||||
|
|
Loading…
Reference in a new issue