diff --git a/assignment-client/src/audio/AudioMixer.cpp b/assignment-client/src/audio/AudioMixer.cpp index 459f8a4b59..c86d37e283 100644 --- a/assignment-client/src/audio/AudioMixer.cpp +++ b/assignment-client/src/audio/AudioMixer.cpp @@ -380,11 +380,11 @@ void AudioMixer::prepareMixForListeningNode(Node* node) { // enumerate the ARBs attached to the otherNode and add all that should be added to mix for (int i = 0; i < otherNodeClientData->getRingBuffers().size(); i++) { PositionalAudioRingBuffer* otherNodeBuffer = otherNodeClientData->getRingBuffers()[i]; - + if ((*otherNode != *node || otherNodeBuffer->shouldLoopbackForNode()) && otherNodeBuffer->willBeAddedToMix() - && otherNodeBuffer->getNextOutputTrailingLoudness() > 0) { + && otherNodeBuffer->getNextOutputTrailingLoudness() > 0.0f) { addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer); } } diff --git a/assignment-client/src/audio/AudioMixerClientData.cpp b/assignment-client/src/audio/AudioMixerClientData.cpp index 94bbdc6a6b..6559b57959 100644 --- a/assignment-client/src/audio/AudioMixerClientData.cpp +++ b/assignment-client/src/audio/AudioMixerClientData.cpp @@ -280,12 +280,12 @@ QString AudioMixerClientData::getAudioStreamStatsString() const { + " silents_dropped: ?" + " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2) + " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2) - + " min_gap:" + QString::number(streamStats._timeGapMin) - + " max_gap:" + QString::number(streamStats._timeGapMax) - + " avg_gap:" + QString::number(streamStats._timeGapAverage, 'f', 2) - + " min_gap_30s:" + QString::number(streamStats._timeGapWindowMin) - + " max_gap_30s:" + QString::number(streamStats._timeGapWindowMax) - + " avg_gap_30s:" + QString::number(streamStats._timeGapWindowAverage, 'f', 2); + + " min_gap:" + formatUsecTime(streamStats._timeGapMin) + + " max_gap:" + formatUsecTime(streamStats._timeGapMax) + + " avg_gap:" + formatUsecTime(streamStats._timeGapAverage) + + " min_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMin) + + " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax) + + " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage); AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer(); if (avatarRingBuffer) { @@ -299,12 +299,12 @@ QString AudioMixerClientData::getAudioStreamStatsString() const { + " silents_dropped:" + QString::number(streamStats._ringBufferSilentFramesDropped) + " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2) + " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2) - + " min_gap:" + QString::number(streamStats._timeGapMin) - + " max_gap:" + QString::number(streamStats._timeGapMax) - + " avg_gap:" + QString::number(streamStats._timeGapAverage, 'f', 2) - + " min_gap_30s:" + QString::number(streamStats._timeGapWindowMin) - + " max_gap_30s:" + QString::number(streamStats._timeGapWindowMax) - + " avg_gap_30s:" + QString::number(streamStats._timeGapWindowAverage, 'f', 2); + + " min_gap:" + formatUsecTime(streamStats._timeGapMin) + + " max_gap:" + formatUsecTime(streamStats._timeGapMax) + + " avg_gap:" + formatUsecTime(streamStats._timeGapAverage) + + " min_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMin) + + " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax) + + " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage); } else { result = "mic unknown"; } @@ -321,12 +321,12 @@ QString AudioMixerClientData::getAudioStreamStatsString() const { + " silents_dropped:" + QString::number(streamStats._ringBufferSilentFramesDropped) + " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2) + " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2) - + " min_gap:" + QString::number(streamStats._timeGapMin) - + " max_gap:" + QString::number(streamStats._timeGapMax) - + " avg_gap:" + QString::number(streamStats._timeGapAverage, 'f', 2) - + " min_gap_30s:" + QString::number(streamStats._timeGapWindowMin) - + " max_gap_30s:" + QString::number(streamStats._timeGapWindowMax) - + " avg_gap_30s:" + QString::number(streamStats._timeGapWindowAverage, 'f', 2); + + " min_gap:" + formatUsecTime(streamStats._timeGapMin) + + " max_gap:" + formatUsecTime(streamStats._timeGapMax) + + " avg_gap:" + formatUsecTime(streamStats._timeGapAverage) + + " min_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMin) + + " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax) + + " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage); } } return result; diff --git a/interface/src/ui/Stats.cpp b/interface/src/ui/Stats.cpp index b40db71132..d865fc8004 100644 --- a/interface/src/ui/Stats.cpp +++ b/interface/src/ui/Stats.cpp @@ -335,26 +335,28 @@ void Stats::display( verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamLabelString, color); - char downstreamAudioStatsString[30]; + char downstreamAudioStatsString[512]; AudioStreamStats downstreamAudioStreamStats = audio->getDownstreamAudioStreamStats(); - sprintf(downstreamAudioStatsString, " mix: %.1f%%/%.1f%%, %u/?/%u", downstreamAudioStreamStats._packetStreamStats.getLostRate()*100.0f, + sprintf(downstreamAudioStatsString, " mix: %.2f%%/%.2f%%, %u/?/%u", downstreamAudioStreamStats._packetStreamStats.getLostRate()*100.0f, downstreamAudioStreamStats._packetStreamWindowStats.getLostRate() * 100.0f, downstreamAudioStreamStats._ringBufferFramesAvailable, downstreamAudioStreamStats._ringBufferDesiredJitterBufferFrames); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color); - sprintf(downstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", downstreamAudioStreamStats._timeGapMin, - downstreamAudioStreamStats._timeGapMax, downstreamAudioStreamStats._timeGapAverage, + sprintf(downstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(downstreamAudioStreamStats._timeGapMin).toLatin1().data(), + formatUsecTime(downstreamAudioStreamStats._timeGapMax).toLatin1().data(), + formatUsecTime(downstreamAudioStreamStats._timeGapAverage).toLatin1().data(), downstreamAudioStreamStats._ringBufferStarveCount, downstreamAudioStreamStats._ringBufferOverflowCount); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color); - sprintf(downstreamAudioStatsString, " %llu/%llu/%.2f, %u/?", downstreamAudioStreamStats._timeGapWindowMin, - downstreamAudioStreamStats._timeGapWindowMax, downstreamAudioStreamStats._timeGapWindowAverage, + sprintf(downstreamAudioStatsString, " %s/%s/%s, %u/?", formatUsecTime(downstreamAudioStreamStats._timeGapWindowMin).toLatin1().data(), + formatUsecTime(downstreamAudioStreamStats._timeGapWindowMax).toLatin1().data(), + formatUsecTime(downstreamAudioStreamStats._timeGapWindowAverage).toLatin1().data(), downstreamAudioStreamStats._ringBufferConsecutiveNotMixedCount); verticalOffset += STATS_PELS_PER_LINE; @@ -365,11 +367,11 @@ void Stats::display( verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamLabelString, color); - char upstreamAudioStatsString[30]; + char upstreamAudioStatsString[512]; const AudioStreamStats& audioMixerAvatarAudioStreamStats = audio->getAudioMixerAvatarStreamAudioStats(); - sprintf(upstreamAudioStatsString, " mic: %.1f%%/%.1f%%, %u/%u/%u", audioMixerAvatarAudioStreamStats._packetStreamStats.getLostRate()*100.0f, + sprintf(upstreamAudioStatsString, " mic: %.2f%%/%.2f%%, %u/%u/%u", audioMixerAvatarAudioStreamStats._packetStreamStats.getLostRate()*100.0f, audioMixerAvatarAudioStreamStats._packetStreamWindowStats.getLostRate() * 100.0f, audioMixerAvatarAudioStreamStats._ringBufferFramesAvailable, audioMixerAvatarAudioStreamStats._ringBufferCurrentJitterBufferFrames, audioMixerAvatarAudioStreamStats._ringBufferDesiredJitterBufferFrames); @@ -377,15 +379,17 @@ void Stats::display( verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color); - sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", audioMixerAvatarAudioStreamStats._timeGapMin, - audioMixerAvatarAudioStreamStats._timeGapMax, audioMixerAvatarAudioStreamStats._timeGapAverage, + sprintf(upstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapMin).toLatin1().data(), + formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapMax).toLatin1().data(), + formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapAverage).toLatin1().data(), audioMixerAvatarAudioStreamStats._ringBufferStarveCount, audioMixerAvatarAudioStreamStats._ringBufferOverflowCount); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color); - sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", audioMixerAvatarAudioStreamStats._timeGapWindowMin, - audioMixerAvatarAudioStreamStats._timeGapWindowMax, audioMixerAvatarAudioStreamStats._timeGapWindowAverage, + sprintf(upstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapWindowMin).toLatin1().data(), + formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapWindowMax).toLatin1().data(), + formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapWindowAverage).toLatin1().data(), audioMixerAvatarAudioStreamStats._ringBufferConsecutiveNotMixedCount, audioMixerAvatarAudioStreamStats._ringBufferSilentFramesDropped); verticalOffset += STATS_PELS_PER_LINE; @@ -393,7 +397,7 @@ void Stats::display( foreach(const AudioStreamStats& injectedStreamAudioStats, audioMixerInjectedStreamAudioStatsMap) { - sprintf(upstreamAudioStatsString, " inj: %.1f%%/%.1f%%, %u/%u/%u", injectedStreamAudioStats._packetStreamStats.getLostRate()*100.0f, + sprintf(upstreamAudioStatsString, " inj: %.2f%%/%.2f%%, %u/%u/%u", injectedStreamAudioStats._packetStreamStats.getLostRate()*100.0f, injectedStreamAudioStats._packetStreamWindowStats.getLostRate() * 100.0f, injectedStreamAudioStats._ringBufferFramesAvailable, injectedStreamAudioStats._ringBufferCurrentJitterBufferFrames, injectedStreamAudioStats._ringBufferDesiredJitterBufferFrames); @@ -401,15 +405,17 @@ void Stats::display( verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color); - sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", injectedStreamAudioStats._timeGapMin, - injectedStreamAudioStats._timeGapMax, injectedStreamAudioStats._timeGapAverage, + sprintf(upstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(injectedStreamAudioStats._timeGapMin).toLatin1().data(), + formatUsecTime(injectedStreamAudioStats._timeGapMax).toLatin1().data(), + formatUsecTime(injectedStreamAudioStats._timeGapAverage).toLatin1().data(), injectedStreamAudioStats._ringBufferStarveCount, injectedStreamAudioStats._ringBufferOverflowCount); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color); - sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", injectedStreamAudioStats._timeGapWindowMin, - injectedStreamAudioStats._timeGapWindowMax, injectedStreamAudioStats._timeGapWindowAverage, + sprintf(upstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(injectedStreamAudioStats._timeGapWindowMin).toLatin1().data(), + formatUsecTime(injectedStreamAudioStats._timeGapWindowMax).toLatin1().data(), + formatUsecTime(injectedStreamAudioStats._timeGapWindowAverage).toLatin1().data(), injectedStreamAudioStats._ringBufferConsecutiveNotMixedCount, injectedStreamAudioStats._ringBufferSilentFramesDropped); verticalOffset += STATS_PELS_PER_LINE; diff --git a/libraries/audio/src/PositionalAudioRingBuffer.cpp b/libraries/audio/src/PositionalAudioRingBuffer.cpp index 411b02400d..8cba6d72b0 100644 --- a/libraries/audio/src/PositionalAudioRingBuffer.cpp +++ b/libraries/audio/src/PositionalAudioRingBuffer.cpp @@ -32,6 +32,7 @@ PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer:: _shouldLoopbackForNode(false), _shouldOutputStarveDebug(true), _isStereo(isStereo), + _nextOutputTrailingLoudness(0.0f), _listenerUnattenuatedZone(NULL), _lastFrameReceivedTime(0), _interframeTimeGapStatsForJitterCalc(TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES, TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS), @@ -121,27 +122,35 @@ void PositionalAudioRingBuffer::updateNextOutputTrailingLoudness() { // ForBoundarySamples means that we expect the number of samples not to roll of the end of the ring buffer float nextLoudness = 0; - for (int i = 0; i < _numFrameSamples; ++i) { - nextLoudness += fabsf(_nextOutput[i]); + if (samplesAvailable() >= _numFrameSamples) { + for (int i = 0; i < _numFrameSamples; ++i) { + nextLoudness += fabsf(_nextOutput[i]); + } + nextLoudness /= _numFrameSamples; + nextLoudness /= MAX_SAMPLE_VALUE; } - nextLoudness /= _numFrameSamples; - nextLoudness /= MAX_SAMPLE_VALUE; - const int TRAILING_AVERAGE_FRAMES = 100; const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES; const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO; const float LOUDNESS_EPSILON = 0.000001f; + float oldNextOutputTrailingLoudness = _nextOutputTrailingLoudness; if (nextLoudness >= _nextOutputTrailingLoudness) { _nextOutputTrailingLoudness = nextLoudness; } else { _nextOutputTrailingLoudness = (_nextOutputTrailingLoudness * PREVIOUS_FRAMES_RATIO) + (CURRENT_FRAME_RATIO * nextLoudness); - + if (_nextOutputTrailingLoudness < LOUDNESS_EPSILON) { _nextOutputTrailingLoudness = 0; } } + + // fixes bug on Windows where _nextOutputTrailingLoudness sometimes becomes NaN. In that case, + // revert _nextOutputTrailingLoudness to its previous value + if (isNaN(_nextOutputTrailingLoudness)) { + _nextOutputTrailingLoudness = oldNextOutputTrailingLoudness; + } } bool PositionalAudioRingBuffer::shouldBeAddedToMix() { diff --git a/libraries/shared/src/SharedUtil.cpp b/libraries/shared/src/SharedUtil.cpp index e4d2e1c835..b5be502ed5 100644 --- a/libraries/shared/src/SharedUtil.cpp +++ b/libraries/shared/src/SharedUtil.cpp @@ -837,3 +837,20 @@ bool isSimilarPosition(const glm::vec3& positionA, const glm::vec3& positionB, f QByteArray createByteArray(const glm::vec3& vector) { return QByteArray::number(vector.x) + ',' + QByteArray::number(vector.y) + ',' + QByteArray::number(vector.z); } + +QString formatUsecTime(float usecs, int prec) { + static const quint64 SECONDS_PER_MINUTE = 60; + static const quint64 USECS_PER_MINUTE = USECS_PER_SECOND * SECONDS_PER_MINUTE; + + QString result; + if (usecs > USECS_PER_MINUTE) { + result = QString::number(usecs / USECS_PER_MINUTE, 'f', prec) + "min"; + } else if (usecs > USECS_PER_SECOND) { + result = QString::number(usecs / USECS_PER_SECOND, 'f', prec) + 's'; + } else if (usecs > USECS_PER_MSEC) { + result = QString::number(usecs / USECS_PER_MSEC, 'f', prec) + "ms"; + } else { + result = QString::number(usecs, 'f', prec) + "us"; + } + return result; +} diff --git a/libraries/shared/src/SharedUtil.h b/libraries/shared/src/SharedUtil.h index e5c2a0afc9..6bb39f7e12 100644 --- a/libraries/shared/src/SharedUtil.h +++ b/libraries/shared/src/SharedUtil.h @@ -189,4 +189,6 @@ bool isNaN(float value); QByteArray createByteArray(const glm::vec3& vector); +QString formatUsecTime(float usecs, int prec = 3); + #endif // hifi_SharedUtil_h