mirror of
https://github.com/lubosz/overte.git
synced 2025-08-07 20:31:29 +02:00
use a trailing average for sleep time, move loudness to RB
This commit is contained in:
parent
5515141792
commit
5ae63c5b0f
6 changed files with 69 additions and 63 deletions
|
@ -63,6 +63,7 @@ void attachNewBufferToNode(Node *newNode) {
|
||||||
|
|
||||||
AudioMixer::AudioMixer(const QByteArray& packet) :
|
AudioMixer::AudioMixer(const QByteArray& packet) :
|
||||||
ThreadedAssignment(packet),
|
ThreadedAssignment(packet),
|
||||||
|
_trailingSleepRatio(1.0f),
|
||||||
_minSourceLoudnessInFrame(1.0f),
|
_minSourceLoudnessInFrame(1.0f),
|
||||||
_maxSourceLoudnessInFrame(0.0f),
|
_maxSourceLoudnessInFrame(0.0f),
|
||||||
_loudnessCutoffRatio(0.0f),
|
_loudnessCutoffRatio(0.0f),
|
||||||
|
@ -305,7 +306,7 @@ void AudioMixer::prepareMixForListeningNode(Node* node) {
|
||||||
if ((*otherNode != *node
|
if ((*otherNode != *node
|
||||||
|| otherNodeBuffer->shouldLoopbackForNode())
|
|| otherNodeBuffer->shouldLoopbackForNode())
|
||||||
&& otherNodeBuffer->willBeAddedToMix()
|
&& otherNodeBuffer->willBeAddedToMix()
|
||||||
&& otherNodeClientData->getNextOutputLoudness() > _minRequiredLoudness) {
|
&& otherNodeBuffer->getAverageLoudness() > _minRequiredLoudness) {
|
||||||
addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer);
|
addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -355,8 +356,7 @@ void AudioMixer::run() {
|
||||||
char* clientMixBuffer = new char[NETWORK_BUFFER_LENGTH_BYTES_STEREO
|
char* clientMixBuffer = new char[NETWORK_BUFFER_LENGTH_BYTES_STEREO
|
||||||
+ numBytesForPacketHeaderGivenPacketType(PacketTypeMixedAudio)];
|
+ numBytesForPacketHeaderGivenPacketType(PacketTypeMixedAudio)];
|
||||||
|
|
||||||
int usecToSleep = 0;
|
int usecToSleep = BUFFER_SEND_INTERVAL_USECS;
|
||||||
bool isFirstRun = true;
|
|
||||||
|
|
||||||
while (!_isFinished) {
|
while (!_isFinished) {
|
||||||
|
|
||||||
|
@ -371,46 +371,48 @@ void AudioMixer::run() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!isFirstRun) {
|
const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10;
|
||||||
const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10;
|
const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.30;
|
||||||
const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.30;
|
const float CUTOFF_EPSILON = 0.0001;
|
||||||
const float CUTOFF_EPSILON = 0.0001;
|
|
||||||
|
|
||||||
float percentageSleep = (usecToSleep / (float) BUFFER_SEND_INTERVAL_USECS);
|
|
||||||
|
|
||||||
float lastCutoffRatio = _loudnessCutoffRatio;
|
|
||||||
bool hasRatioChanged = false;
|
|
||||||
|
|
||||||
if (percentageSleep <= STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD || usecToSleep < 0) {
|
|
||||||
// we're struggling - change our min required loudness to reduce some load
|
|
||||||
_loudnessCutoffRatio += (1 - _loudnessCutoffRatio) / 2;
|
|
||||||
|
|
||||||
qDebug() << "Mixer is struggling, sleeping" << percentageSleep * 100 << "% of frame time. Old cutoff was"
|
|
||||||
<< lastCutoffRatio << "and is now" << _loudnessCutoffRatio;
|
|
||||||
hasRatioChanged = true;
|
|
||||||
} else if (percentageSleep >= BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD && _loudnessCutoffRatio != 0) {
|
|
||||||
// we've recovered and can back off the required loudness
|
|
||||||
_loudnessCutoffRatio -= _loudnessCutoffRatio / 2;
|
|
||||||
|
|
||||||
if (_loudnessCutoffRatio < CUTOFF_EPSILON) {
|
|
||||||
_loudnessCutoffRatio = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
qDebug() << "Mixer is recovering, sleeping" << percentageSleep * 100 << "% of frame time. Old cutoff was"
|
|
||||||
<< lastCutoffRatio << "and is now" << _loudnessCutoffRatio;
|
|
||||||
hasRatioChanged = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (hasRatioChanged) {
|
|
||||||
// set out min required loudness from the new ratio
|
|
||||||
_minRequiredLoudness = _loudnessCutoffRatio * (_maxSourceLoudnessInFrame - _minSourceLoudnessInFrame);
|
|
||||||
qDebug() << "Minimum loudness required to be mixed is now" << _minRequiredLoudness;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
} else {
|
const int TRAILING_AVERAGE_FRAMES = 100;
|
||||||
isFirstRun = false;
|
const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
|
||||||
|
const float PREVIOUS_FRAMES_RATIO = 1 - CURRENT_FRAME_RATIO;
|
||||||
|
|
||||||
|
if (usecToSleep < 0) {
|
||||||
|
usecToSleep = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
_trailingSleepRatio = (PREVIOUS_FRAMES_RATIO * _trailingSleepRatio)
|
||||||
|
+ (usecToSleep * CURRENT_FRAME_RATIO / (float) BUFFER_SEND_INTERVAL_USECS);
|
||||||
|
|
||||||
|
float lastCutoffRatio = _loudnessCutoffRatio;
|
||||||
|
bool hasRatioChanged = false;
|
||||||
|
|
||||||
|
if (_trailingSleepRatio <= STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD) {
|
||||||
|
// we're struggling - change our min required loudness to reduce some load
|
||||||
|
_loudnessCutoffRatio += (1 - _loudnessCutoffRatio) / 2;
|
||||||
|
|
||||||
|
qDebug() << "Mixer is struggling, sleeping" << _trailingSleepRatio * 100 << "% of frame time. Old cutoff was"
|
||||||
|
<< lastCutoffRatio << "and is now" << _loudnessCutoffRatio;
|
||||||
|
hasRatioChanged = true;
|
||||||
|
} else if (_trailingSleepRatio >= BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD && _loudnessCutoffRatio != 0) {
|
||||||
|
// we've recovered and can back off the required loudness
|
||||||
|
_loudnessCutoffRatio -= _loudnessCutoffRatio / 2;
|
||||||
|
|
||||||
|
if (_loudnessCutoffRatio < CUTOFF_EPSILON) {
|
||||||
|
_loudnessCutoffRatio = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
qDebug() << "Mixer is recovering, sleeping" << _trailingSleepRatio * 100 << "% of frame time. Old cutoff was"
|
||||||
|
<< lastCutoffRatio << "and is now" << _loudnessCutoffRatio;
|
||||||
|
hasRatioChanged = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hasRatioChanged) {
|
||||||
|
// set out min required loudness from the new ratio
|
||||||
|
_minRequiredLoudness = _loudnessCutoffRatio * (_maxSourceLoudnessInFrame - _minSourceLoudnessInFrame);
|
||||||
|
qDebug() << "Minimum loudness required to be mixed is now" << _minRequiredLoudness;
|
||||||
}
|
}
|
||||||
|
|
||||||
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
||||||
|
|
|
@ -40,6 +40,7 @@ private:
|
||||||
// we are MMX adding 4 samples at a time so we need client samples to have an extra 4
|
// we are MMX adding 4 samples at a time so we need client samples to have an extra 4
|
||||||
int16_t _clientSamples[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (SAMPLE_PHASE_DELAY_AT_90 * 2)];
|
int16_t _clientSamples[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (SAMPLE_PHASE_DELAY_AT_90 * 2)];
|
||||||
|
|
||||||
|
float _trailingSleepRatio;
|
||||||
float _minSourceLoudnessInFrame;
|
float _minSourceLoudnessInFrame;
|
||||||
float _maxSourceLoudnessInFrame;
|
float _maxSourceLoudnessInFrame;
|
||||||
float _loudnessCutoffRatio;
|
float _loudnessCutoffRatio;
|
||||||
|
|
|
@ -16,8 +16,7 @@
|
||||||
#include "AudioMixerClientData.h"
|
#include "AudioMixerClientData.h"
|
||||||
|
|
||||||
AudioMixerClientData::AudioMixerClientData() :
|
AudioMixerClientData::AudioMixerClientData() :
|
||||||
_ringBuffers(),
|
_ringBuffers()
|
||||||
_nextOutputLoudness(0)
|
|
||||||
{
|
{
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -93,20 +92,19 @@ void AudioMixerClientData::checkBuffersBeforeFrameSend(int jitterBufferLengthSam
|
||||||
// set its flag so we know to push its buffer when all is said and done
|
// set its flag so we know to push its buffer when all is said and done
|
||||||
_ringBuffers[i]->setWillBeAddedToMix(true);
|
_ringBuffers[i]->setWillBeAddedToMix(true);
|
||||||
|
|
||||||
const int TRAILING_AVERAGE_FRAMES = 100;
|
|
||||||
const float CURRENT_FRAME_RATIO = 1 / TRAILING_AVERAGE_FRAMES;
|
|
||||||
const float PREVIOUS_FRAMES_RATIO = 1 - CURRENT_FRAME_RATIO;
|
|
||||||
|
|
||||||
// calculate the average loudness for the next NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL
|
// calculate the average loudness for the next NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL
|
||||||
// that would be mixed in
|
// that would be mixed in
|
||||||
_nextOutputLoudness = (_nextOutputLoudness * PREVIOUS_FRAMES_RATIO) + (CURRENT_FRAME_RATIO * _ringBuffers[i]->averageLoudnessForBoundarySamples(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL));
|
_ringBuffers[i]->updateAverageLoudnessForBoundarySamples(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||||
|
|
||||||
if (_nextOutputLoudness != 0 && _nextOutputLoudness < currentMinLoudness) {
|
float ringBufferLoudness = _ringBuffers[i]->getAverageLoudness();
|
||||||
currentMinLoudness = _nextOutputLoudness;
|
|
||||||
|
if (ringBufferLoudness != 0 && ringBufferLoudness < currentMinLoudness) {
|
||||||
|
currentMinLoudness = ringBufferLoudness;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_nextOutputLoudness > currentMaxLoudness) {
|
if (ringBufferLoudness > currentMaxLoudness) {
|
||||||
currentMaxLoudness = _nextOutputLoudness;
|
currentMaxLoudness = ringBufferLoudness;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,14 +24,11 @@ public:
|
||||||
const std::vector<PositionalAudioRingBuffer*> getRingBuffers() const { return _ringBuffers; }
|
const std::vector<PositionalAudioRingBuffer*> getRingBuffers() const { return _ringBuffers; }
|
||||||
AvatarAudioRingBuffer* getAvatarAudioRingBuffer() const;
|
AvatarAudioRingBuffer* getAvatarAudioRingBuffer() const;
|
||||||
|
|
||||||
float getNextOutputLoudness() const { return _nextOutputLoudness; }
|
|
||||||
|
|
||||||
int parseData(const QByteArray& packet);
|
int parseData(const QByteArray& packet);
|
||||||
void checkBuffersBeforeFrameSend(int jitterBufferLengthSamples, float& currentMinLoudness, float& currentMaxLoudness);
|
void checkBuffersBeforeFrameSend(int jitterBufferLengthSamples, float& currentMinLoudness, float& currentMaxLoudness);
|
||||||
void pushBuffersAfterFrameSend();
|
void pushBuffersAfterFrameSend();
|
||||||
private:
|
private:
|
||||||
std::vector<PositionalAudioRingBuffer*> _ringBuffers;
|
std::vector<PositionalAudioRingBuffer*> _ringBuffers;
|
||||||
float _nextOutputLoudness;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* defined(__hifi__AudioMixerClientData__) */
|
#endif /* defined(__hifi__AudioMixerClientData__) */
|
||||||
|
|
|
@ -19,7 +19,8 @@ AudioRingBuffer::AudioRingBuffer(int numFrameSamples) :
|
||||||
NodeData(),
|
NodeData(),
|
||||||
_sampleCapacity(numFrameSamples * RING_BUFFER_LENGTH_FRAMES),
|
_sampleCapacity(numFrameSamples * RING_BUFFER_LENGTH_FRAMES),
|
||||||
_isStarved(true),
|
_isStarved(true),
|
||||||
_hasStarted(false)
|
_hasStarted(false),
|
||||||
|
_averageLoudness(0)
|
||||||
{
|
{
|
||||||
if (numFrameSamples) {
|
if (numFrameSamples) {
|
||||||
_buffer = new int16_t[_sampleCapacity];
|
_buffer = new int16_t[_sampleCapacity];
|
||||||
|
@ -55,18 +56,22 @@ int AudioRingBuffer::parseData(const QByteArray& packet) {
|
||||||
return writeData(packet.data() + numBytesPacketHeader, packet.size() - numBytesPacketHeader);
|
return writeData(packet.data() + numBytesPacketHeader, packet.size() - numBytesPacketHeader);
|
||||||
}
|
}
|
||||||
|
|
||||||
float AudioRingBuffer::averageLoudnessForBoundarySamples(int numSamples) {
|
void AudioRingBuffer::updateAverageLoudnessForBoundarySamples(int numSamples) {
|
||||||
// ForBoundarySamples means that we expect the number of samples not to roll of the end of the ring buffer
|
// ForBoundarySamples means that we expect the number of samples not to roll of the end of the ring buffer
|
||||||
float averageLoudness = 0;
|
float nextLoudness = 0;
|
||||||
|
|
||||||
for (int i = 0; i < numSamples; ++i) {
|
for (int i = 0; i < numSamples; ++i) {
|
||||||
averageLoudness += fabsf(_nextOutput[i]);
|
nextLoudness += fabsf(_nextOutput[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
averageLoudness /= numSamples;
|
nextLoudness /= numSamples;
|
||||||
averageLoudness /= MAX_SAMPLE_VALUE;
|
nextLoudness /= MAX_SAMPLE_VALUE;
|
||||||
|
|
||||||
|
const int TRAILING_AVERAGE_FRAMES = 100;
|
||||||
|
const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
|
||||||
|
const float PREVIOUS_FRAMES_RATIO = 1 - CURRENT_FRAME_RATIO;
|
||||||
|
|
||||||
return averageLoudness;
|
_averageLoudness = (_averageLoudness * PREVIOUS_FRAMES_RATIO) + (CURRENT_FRAME_RATIO * nextLoudness);
|
||||||
}
|
}
|
||||||
|
|
||||||
qint64 AudioRingBuffer::readSamples(int16_t* destination, qint64 maxSamples) {
|
qint64 AudioRingBuffer::readSamples(int16_t* destination, qint64 maxSamples) {
|
||||||
|
|
|
@ -50,7 +50,8 @@ public:
|
||||||
const int16_t* getNextOutput() { return _nextOutput; }
|
const int16_t* getNextOutput() { return _nextOutput; }
|
||||||
const int16_t* getBuffer() { return _buffer; }
|
const int16_t* getBuffer() { return _buffer; }
|
||||||
|
|
||||||
float averageLoudnessForBoundarySamples(int numSamples);
|
void updateAverageLoudnessForBoundarySamples(int numSamples);
|
||||||
|
float getAverageLoudness() const { return _averageLoudness; }
|
||||||
|
|
||||||
qint64 readSamples(int16_t* destination, qint64 maxSamples);
|
qint64 readSamples(int16_t* destination, qint64 maxSamples);
|
||||||
qint64 writeSamples(const int16_t* source, qint64 maxSamples);
|
qint64 writeSamples(const int16_t* source, qint64 maxSamples);
|
||||||
|
@ -85,6 +86,8 @@ protected:
|
||||||
int16_t* _buffer;
|
int16_t* _buffer;
|
||||||
bool _isStarved;
|
bool _isStarved;
|
||||||
bool _hasStarted;
|
bool _hasStarted;
|
||||||
|
|
||||||
|
float _averageLoudness;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* defined(__interface__AudioRingBuffer__) */
|
#endif /* defined(__interface__AudioRingBuffer__) */
|
||||||
|
|
Loading…
Reference in a new issue