handle trivial case of not mixing silent audio streams

This commit is contained in:
Stephen Birarda 2014-03-17 14:29:53 -07:00
parent 2be8dec399
commit bc9deb5db7
5 changed files with 39 additions and 7 deletions

View file

@ -301,7 +301,8 @@ void AudioMixer::prepareMixForListeningNode(Node* node) {
if ((*otherNode != *node if ((*otherNode != *node
|| otherNodeBuffer->shouldLoopbackForNode()) || otherNodeBuffer->shouldLoopbackForNode())
&& otherNodeBuffer->willBeAddedToMix()) { && otherNodeBuffer->willBeAddedToMix()
&& otherNodeClientData->getNextOutputLoudness() != 0) {
addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer); addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer);
} }
} }
@ -355,12 +356,6 @@ void AudioMixer::run() {
while (!_isFinished) { while (!_isFinished) {
QCoreApplication::processEvents();
if (_isFinished) {
break;
}
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) { foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
if (node->getLinkedData()) { if (node->getLinkedData()) {
((AudioMixerClientData*) node->getLinkedData())->checkBuffersBeforeFrameSend(JITTER_BUFFER_SAMPLES); ((AudioMixerClientData*) node->getLinkedData())->checkBuffersBeforeFrameSend(JITTER_BUFFER_SAMPLES);
@ -384,6 +379,12 @@ void AudioMixer::run() {
} }
} }
QCoreApplication::processEvents();
if (_isFinished) {
break;
}
int usecToSleep = usecTimestamp(&startTime) + (++nextFrame * BUFFER_SEND_INTERVAL_USECS) - usecTimestampNow(); int usecToSleep = usecTimestamp(&startTime) + (++nextFrame * BUFFER_SEND_INTERVAL_USECS) - usecTimestampNow();
if (usecToSleep > 0) { if (usecToSleep > 0) {

View file

@ -13,6 +13,13 @@
#include "AudioMixerClientData.h" #include "AudioMixerClientData.h"
AudioMixerClientData::AudioMixerClientData() :
_ringBuffers(),
_nextOutputLoudness(0)
{
}
AudioMixerClientData::~AudioMixerClientData() { AudioMixerClientData::~AudioMixerClientData() {
for (unsigned int i = 0; i < _ringBuffers.size(); i++) { for (unsigned int i = 0; i < _ringBuffers.size(); i++) {
// delete this attached PositionalAudioRingBuffer // delete this attached PositionalAudioRingBuffer
@ -80,6 +87,10 @@ void AudioMixerClientData::checkBuffersBeforeFrameSend(int jitterBufferLengthSam
// this is a ring buffer that is ready to go // this is a ring buffer that is ready to go
// set its flag so we know to push its buffer when all is said and done // set its flag so we know to push its buffer when all is said and done
_ringBuffers[i]->setWillBeAddedToMix(true); _ringBuffers[i]->setWillBeAddedToMix(true);
// calculate the average loudness for the next NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL
// that would be mixed in
_nextOutputLoudness = _ringBuffers[i]->averageLoudnessForBoundarySamples(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
} }
} }
} }

View file

@ -18,16 +18,20 @@
class AudioMixerClientData : public NodeData { class AudioMixerClientData : public NodeData {
public: public:
AudioMixerClientData();
~AudioMixerClientData(); ~AudioMixerClientData();
const std::vector<PositionalAudioRingBuffer*> getRingBuffers() const { return _ringBuffers; } const std::vector<PositionalAudioRingBuffer*> getRingBuffers() const { return _ringBuffers; }
AvatarAudioRingBuffer* getAvatarAudioRingBuffer() const; AvatarAudioRingBuffer* getAvatarAudioRingBuffer() const;
float getNextOutputLoudness() const { return _nextOutputLoudness; }
int parseData(const QByteArray& packet); int parseData(const QByteArray& packet);
void checkBuffersBeforeFrameSend(int jitterBufferLengthSamples); void checkBuffersBeforeFrameSend(int jitterBufferLengthSamples);
void pushBuffersAfterFrameSend(); void pushBuffersAfterFrameSend();
private: private:
std::vector<PositionalAudioRingBuffer*> _ringBuffers; std::vector<PositionalAudioRingBuffer*> _ringBuffers;
float _nextOutputLoudness;
}; };
#endif /* defined(__hifi__AudioMixerClientData__) */ #endif /* defined(__hifi__AudioMixerClientData__) */

View file

@ -55,6 +55,20 @@ int AudioRingBuffer::parseData(const QByteArray& packet) {
return writeData(packet.data() + numBytesPacketHeader, packet.size() - numBytesPacketHeader); return writeData(packet.data() + numBytesPacketHeader, packet.size() - numBytesPacketHeader);
} }
float AudioRingBuffer::averageLoudnessForBoundarySamples(int numSamples) {
// ForBoundarySamples means that we expect the number of samples not to roll of the end of the ring buffer
float averageLoudness = 0;
for (int i = 0; i < numSamples; ++i) {
averageLoudness += _nextOutput[i];
}
averageLoudness /= numSamples;
averageLoudness /= MAX_SAMPLE_VALUE;
return averageLoudness;
}
qint64 AudioRingBuffer::readSamples(int16_t* destination, qint64 maxSamples) { qint64 AudioRingBuffer::readSamples(int16_t* destination, qint64 maxSamples) {
return readData((char*) destination, maxSamples * sizeof(int16_t)); return readData((char*) destination, maxSamples * sizeof(int16_t));
} }

View file

@ -50,6 +50,8 @@ public:
const int16_t* getNextOutput() { return _nextOutput; } const int16_t* getNextOutput() { return _nextOutput; }
const int16_t* getBuffer() { return _buffer; } const int16_t* getBuffer() { return _buffer; }
float averageLoudnessForBoundarySamples(int numSamples);
qint64 readSamples(int16_t* destination, qint64 maxSamples); qint64 readSamples(int16_t* destination, qint64 maxSamples);
qint64 writeSamples(const int16_t* source, qint64 maxSamples); qint64 writeSamples(const int16_t* source, qint64 maxSamples);