Merge pull request #3166 from wangyix/quick_audio_PR

Made audio timegap stats more readable with varying units; Fixed AudioMixer NaN error on WIndows;
This commit is contained in:
Brad Hefta-Gaub 2014-07-15 08:28:48 -07:00
commit dcd3110266
6 changed files with 77 additions and 43 deletions

View file

@ -380,11 +380,11 @@ void AudioMixer::prepareMixForListeningNode(Node* node) {
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
for (int i = 0; i < otherNodeClientData->getRingBuffers().size(); i++) {
PositionalAudioRingBuffer* otherNodeBuffer = otherNodeClientData->getRingBuffers()[i];
if ((*otherNode != *node
|| otherNodeBuffer->shouldLoopbackForNode())
&& otherNodeBuffer->willBeAddedToMix()
&& otherNodeBuffer->getNextOutputTrailingLoudness() > 0) {
&& otherNodeBuffer->getNextOutputTrailingLoudness() > 0.0f) {
addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer);
}
}

View file

@ -280,12 +280,12 @@ QString AudioMixerClientData::getAudioStreamStatsString() const {
+ " silents_dropped: ?"
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
+ " min_gap:" + QString::number(streamStats._timeGapMin)
+ " max_gap:" + QString::number(streamStats._timeGapMax)
+ " avg_gap:" + QString::number(streamStats._timeGapAverage, 'f', 2)
+ " min_gap_30s:" + QString::number(streamStats._timeGapWindowMin)
+ " max_gap_30s:" + QString::number(streamStats._timeGapWindowMax)
+ " avg_gap_30s:" + QString::number(streamStats._timeGapWindowAverage, 'f', 2);
+ " min_gap:" + formatUsecTime(streamStats._timeGapMin)
+ " max_gap:" + formatUsecTime(streamStats._timeGapMax)
+ " avg_gap:" + formatUsecTime(streamStats._timeGapAverage)
+ " min_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMin)
+ " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax)
+ " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage);
AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();
if (avatarRingBuffer) {
@ -299,12 +299,12 @@ QString AudioMixerClientData::getAudioStreamStatsString() const {
+ " silents_dropped:" + QString::number(streamStats._ringBufferSilentFramesDropped)
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
+ " min_gap:" + QString::number(streamStats._timeGapMin)
+ " max_gap:" + QString::number(streamStats._timeGapMax)
+ " avg_gap:" + QString::number(streamStats._timeGapAverage, 'f', 2)
+ " min_gap_30s:" + QString::number(streamStats._timeGapWindowMin)
+ " max_gap_30s:" + QString::number(streamStats._timeGapWindowMax)
+ " avg_gap_30s:" + QString::number(streamStats._timeGapWindowAverage, 'f', 2);
+ " min_gap:" + formatUsecTime(streamStats._timeGapMin)
+ " max_gap:" + formatUsecTime(streamStats._timeGapMax)
+ " avg_gap:" + formatUsecTime(streamStats._timeGapAverage)
+ " min_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMin)
+ " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax)
+ " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage);
} else {
result = "mic unknown";
}
@ -321,12 +321,12 @@ QString AudioMixerClientData::getAudioStreamStatsString() const {
+ " silents_dropped:" + QString::number(streamStats._ringBufferSilentFramesDropped)
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
+ " min_gap:" + QString::number(streamStats._timeGapMin)
+ " max_gap:" + QString::number(streamStats._timeGapMax)
+ " avg_gap:" + QString::number(streamStats._timeGapAverage, 'f', 2)
+ " min_gap_30s:" + QString::number(streamStats._timeGapWindowMin)
+ " max_gap_30s:" + QString::number(streamStats._timeGapWindowMax)
+ " avg_gap_30s:" + QString::number(streamStats._timeGapWindowAverage, 'f', 2);
+ " min_gap:" + formatUsecTime(streamStats._timeGapMin)
+ " max_gap:" + formatUsecTime(streamStats._timeGapMax)
+ " avg_gap:" + formatUsecTime(streamStats._timeGapAverage)
+ " min_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMin)
+ " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax)
+ " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage);
}
}
return result;

View file

@ -335,26 +335,28 @@ void Stats::display(
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamLabelString, color);
char downstreamAudioStatsString[30];
char downstreamAudioStatsString[512];
AudioStreamStats downstreamAudioStreamStats = audio->getDownstreamAudioStreamStats();
sprintf(downstreamAudioStatsString, " mix: %.1f%%/%.1f%%, %u/?/%u", downstreamAudioStreamStats._packetStreamStats.getLostRate()*100.0f,
sprintf(downstreamAudioStatsString, " mix: %.2f%%/%.2f%%, %u/?/%u", downstreamAudioStreamStats._packetStreamStats.getLostRate()*100.0f,
downstreamAudioStreamStats._packetStreamWindowStats.getLostRate() * 100.0f,
downstreamAudioStreamStats._ringBufferFramesAvailable, downstreamAudioStreamStats._ringBufferDesiredJitterBufferFrames);
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
sprintf(downstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", downstreamAudioStreamStats._timeGapMin,
downstreamAudioStreamStats._timeGapMax, downstreamAudioStreamStats._timeGapAverage,
sprintf(downstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(downstreamAudioStreamStats._timeGapMin).toLatin1().data(),
formatUsecTime(downstreamAudioStreamStats._timeGapMax).toLatin1().data(),
formatUsecTime(downstreamAudioStreamStats._timeGapAverage).toLatin1().data(),
downstreamAudioStreamStats._ringBufferStarveCount, downstreamAudioStreamStats._ringBufferOverflowCount);
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
sprintf(downstreamAudioStatsString, " %llu/%llu/%.2f, %u/?", downstreamAudioStreamStats._timeGapWindowMin,
downstreamAudioStreamStats._timeGapWindowMax, downstreamAudioStreamStats._timeGapWindowAverage,
sprintf(downstreamAudioStatsString, " %s/%s/%s, %u/?", formatUsecTime(downstreamAudioStreamStats._timeGapWindowMin).toLatin1().data(),
formatUsecTime(downstreamAudioStreamStats._timeGapWindowMax).toLatin1().data(),
formatUsecTime(downstreamAudioStreamStats._timeGapWindowAverage).toLatin1().data(),
downstreamAudioStreamStats._ringBufferConsecutiveNotMixedCount);
verticalOffset += STATS_PELS_PER_LINE;
@ -365,11 +367,11 @@ void Stats::display(
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamLabelString, color);
char upstreamAudioStatsString[30];
char upstreamAudioStatsString[512];
const AudioStreamStats& audioMixerAvatarAudioStreamStats = audio->getAudioMixerAvatarStreamAudioStats();
sprintf(upstreamAudioStatsString, " mic: %.1f%%/%.1f%%, %u/%u/%u", audioMixerAvatarAudioStreamStats._packetStreamStats.getLostRate()*100.0f,
sprintf(upstreamAudioStatsString, " mic: %.2f%%/%.2f%%, %u/%u/%u", audioMixerAvatarAudioStreamStats._packetStreamStats.getLostRate()*100.0f,
audioMixerAvatarAudioStreamStats._packetStreamWindowStats.getLostRate() * 100.0f,
audioMixerAvatarAudioStreamStats._ringBufferFramesAvailable, audioMixerAvatarAudioStreamStats._ringBufferCurrentJitterBufferFrames,
audioMixerAvatarAudioStreamStats._ringBufferDesiredJitterBufferFrames);
@ -377,15 +379,17 @@ void Stats::display(
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", audioMixerAvatarAudioStreamStats._timeGapMin,
audioMixerAvatarAudioStreamStats._timeGapMax, audioMixerAvatarAudioStreamStats._timeGapAverage,
sprintf(upstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapMin).toLatin1().data(),
formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapMax).toLatin1().data(),
formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapAverage).toLatin1().data(),
audioMixerAvatarAudioStreamStats._ringBufferStarveCount, audioMixerAvatarAudioStreamStats._ringBufferOverflowCount);
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", audioMixerAvatarAudioStreamStats._timeGapWindowMin,
audioMixerAvatarAudioStreamStats._timeGapWindowMax, audioMixerAvatarAudioStreamStats._timeGapWindowAverage,
sprintf(upstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapWindowMin).toLatin1().data(),
formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapWindowMax).toLatin1().data(),
formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapWindowAverage).toLatin1().data(),
audioMixerAvatarAudioStreamStats._ringBufferConsecutiveNotMixedCount, audioMixerAvatarAudioStreamStats._ringBufferSilentFramesDropped);
verticalOffset += STATS_PELS_PER_LINE;
@ -393,7 +397,7 @@ void Stats::display(
foreach(const AudioStreamStats& injectedStreamAudioStats, audioMixerInjectedStreamAudioStatsMap) {
sprintf(upstreamAudioStatsString, " inj: %.1f%%/%.1f%%, %u/%u/%u", injectedStreamAudioStats._packetStreamStats.getLostRate()*100.0f,
sprintf(upstreamAudioStatsString, " inj: %.2f%%/%.2f%%, %u/%u/%u", injectedStreamAudioStats._packetStreamStats.getLostRate()*100.0f,
injectedStreamAudioStats._packetStreamWindowStats.getLostRate() * 100.0f,
injectedStreamAudioStats._ringBufferFramesAvailable, injectedStreamAudioStats._ringBufferCurrentJitterBufferFrames,
injectedStreamAudioStats._ringBufferDesiredJitterBufferFrames);
@ -401,15 +405,17 @@ void Stats::display(
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", injectedStreamAudioStats._timeGapMin,
injectedStreamAudioStats._timeGapMax, injectedStreamAudioStats._timeGapAverage,
sprintf(upstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(injectedStreamAudioStats._timeGapMin).toLatin1().data(),
formatUsecTime(injectedStreamAudioStats._timeGapMax).toLatin1().data(),
formatUsecTime(injectedStreamAudioStats._timeGapAverage).toLatin1().data(),
injectedStreamAudioStats._ringBufferStarveCount, injectedStreamAudioStats._ringBufferOverflowCount);
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", injectedStreamAudioStats._timeGapWindowMin,
injectedStreamAudioStats._timeGapWindowMax, injectedStreamAudioStats._timeGapWindowAverage,
sprintf(upstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(injectedStreamAudioStats._timeGapWindowMin).toLatin1().data(),
formatUsecTime(injectedStreamAudioStats._timeGapWindowMax).toLatin1().data(),
formatUsecTime(injectedStreamAudioStats._timeGapWindowAverage).toLatin1().data(),
injectedStreamAudioStats._ringBufferConsecutiveNotMixedCount, injectedStreamAudioStats._ringBufferSilentFramesDropped);
verticalOffset += STATS_PELS_PER_LINE;

View file

@ -32,6 +32,7 @@ PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::
_shouldLoopbackForNode(false),
_shouldOutputStarveDebug(true),
_isStereo(isStereo),
_nextOutputTrailingLoudness(0.0f),
_listenerUnattenuatedZone(NULL),
_lastFrameReceivedTime(0),
_interframeTimeGapStatsForJitterCalc(TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES, TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS),
@ -121,27 +122,35 @@ void PositionalAudioRingBuffer::updateNextOutputTrailingLoudness() {
// ForBoundarySamples means that we expect the number of samples not to roll of the end of the ring buffer
float nextLoudness = 0;
for (int i = 0; i < _numFrameSamples; ++i) {
nextLoudness += fabsf(_nextOutput[i]);
if (samplesAvailable() >= _numFrameSamples) {
for (int i = 0; i < _numFrameSamples; ++i) {
nextLoudness += fabsf(_nextOutput[i]);
}
nextLoudness /= _numFrameSamples;
nextLoudness /= MAX_SAMPLE_VALUE;
}
nextLoudness /= _numFrameSamples;
nextLoudness /= MAX_SAMPLE_VALUE;
const int TRAILING_AVERAGE_FRAMES = 100;
const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO;
const float LOUDNESS_EPSILON = 0.000001f;
float oldNextOutputTrailingLoudness = _nextOutputTrailingLoudness;
if (nextLoudness >= _nextOutputTrailingLoudness) {
_nextOutputTrailingLoudness = nextLoudness;
} else {
_nextOutputTrailingLoudness = (_nextOutputTrailingLoudness * PREVIOUS_FRAMES_RATIO) + (CURRENT_FRAME_RATIO * nextLoudness);
if (_nextOutputTrailingLoudness < LOUDNESS_EPSILON) {
_nextOutputTrailingLoudness = 0;
}
}
// fixes bug on Windows where _nextOutputTrailingLoudness sometimes becomes NaN. In that case,
// revert _nextOutputTrailingLoudness to its previous value
if (isNaN(_nextOutputTrailingLoudness)) {
_nextOutputTrailingLoudness = oldNextOutputTrailingLoudness;
}
}
bool PositionalAudioRingBuffer::shouldBeAddedToMix() {

View file

@ -837,3 +837,20 @@ bool isSimilarPosition(const glm::vec3& positionA, const glm::vec3& positionB, f
QByteArray createByteArray(const glm::vec3& vector) {
return QByteArray::number(vector.x) + ',' + QByteArray::number(vector.y) + ',' + QByteArray::number(vector.z);
}
QString formatUsecTime(float usecs, int prec) {
static const quint64 SECONDS_PER_MINUTE = 60;
static const quint64 USECS_PER_MINUTE = USECS_PER_SECOND * SECONDS_PER_MINUTE;
QString result;
if (usecs > USECS_PER_MINUTE) {
result = QString::number(usecs / USECS_PER_MINUTE, 'f', prec) + "min";
} else if (usecs > USECS_PER_SECOND) {
result = QString::number(usecs / USECS_PER_SECOND, 'f', prec) + 's';
} else if (usecs > USECS_PER_MSEC) {
result = QString::number(usecs / USECS_PER_MSEC, 'f', prec) + "ms";
} else {
result = QString::number(usecs, 'f', prec) + "us";
}
return result;
}

View file

@ -189,4 +189,6 @@ bool isNaN(float value);
QByteArray createByteArray(const glm::vec3& vector);
QString formatUsecTime(float usecs, int prec = 3);
#endif // hifi_SharedUtil_h