From 9792d025fa1186ab80e9ebcd871b17c088b5ed9a Mon Sep 17 00:00:00 2001 From: Zach Pomerantz Date: Tue, 13 Sep 2016 15:14:59 -0700 Subject: [PATCH 1/6] clean up audio stats --- interface/src/ui/AudioStatsDialog.cpp | 148 +++++++++--------- interface/src/ui/AudioStatsDialog.h | 2 +- libraries/audio-client/src/AudioClient.cpp | 2 + libraries/audio/src/AudioLogging.cpp | 4 +- libraries/audio/src/InboundAudioStream.cpp | 19 ++- libraries/audio/src/InboundAudioStream.h | 1 + .../audio/src/MixedProcessedAudioStream.cpp | 2 + 7 files changed, 93 insertions(+), 85 deletions(-) diff --git a/interface/src/ui/AudioStatsDialog.cpp b/interface/src/ui/AudioStatsDialog.cpp index 95792cf79d..5938add8c2 100644 --- a/interface/src/ui/AudioStatsDialog.cpp +++ b/interface/src/ui/AudioStatsDialog.cpp @@ -82,7 +82,7 @@ AudioStatsDialog::AudioStatsDialog(QWidget* parent) : _upstreamInjectedID = addChannel(_form, _upstreamInjectedStats, COLOR0); connect(averageUpdateTimer, SIGNAL(timeout()), this, SLOT(updateTimerTimeout())); - averageUpdateTimer->start(1000); + averageUpdateTimer->start(200); } int AudioStatsDialog::addChannel(QFormLayout* form, QVector& stats, const unsigned color) { @@ -110,137 +110,135 @@ void AudioStatsDialog::renderStats() { // Clear current stats from all vectors clearAllChannels(); + double mixerRingBufferFrames = 0.0, + outputRingBufferFrames = 0.0; double audioInputBufferLatency = 0.0, - inputRingBufferLatency = 0.0, - networkRoundtripLatency = 0.0, - mixerRingBufferLatency = 0.0, - outputRingBufferLatency = 0.0, - audioOutputBufferLatency = 0.0; + inputRingBufferLatency = 0.0, + networkRoundtripLatency = 0.0, + mixerRingBufferLatency = 0.0, + outputRingBufferLatency = 0.0, + audioOutputBufferLatency = 0.0; - AudioStreamStats downstreamAudioStreamStats = _stats->getMixerDownstreamStats(); - SharedNodePointer audioMixerNodePointer = DependencyManager::get()->soloNodeOfType(NodeType::AudioMixer); - - if (!audioMixerNodePointer.isNull()) { + if (SharedNodePointer audioMixerNodePointer = DependencyManager::get()->soloNodeOfType(NodeType::AudioMixer)) { + mixerRingBufferFrames = (double)_stats->getMixerAvatarStreamStats()._framesAvailableAverage; + outputRingBufferFrames = (double)_stats->getMixerDownstreamStats()._framesAvailableAverage; + audioInputBufferLatency = (double)_stats->getAudioInputMsecsReadStats().getWindowAverage(); inputRingBufferLatency = (double)_stats->getInputRungBufferMsecsAvailableStats().getWindowAverage(); networkRoundtripLatency = (double) audioMixerNodePointer->getPingMs(); - mixerRingBufferLatency = (double)_stats->getMixerAvatarStreamStats()._framesAvailableAverage * - (double)AudioConstants::NETWORK_FRAME_MSECS; - outputRingBufferLatency = (double)downstreamAudioStreamStats._framesAvailableAverage * - (double)AudioConstants::NETWORK_FRAME_MSECS; + mixerRingBufferLatency = mixerRingBufferFrames * (double)AudioConstants::NETWORK_FRAME_MSECS; + outputRingBufferLatency = outputRingBufferFrames * (double)AudioConstants::NETWORK_FRAME_MSECS; audioOutputBufferLatency = (double)_stats->getAudioOutputMsecsUnplayedStats().getWindowAverage(); } - double totalLatency = audioInputBufferLatency + inputRingBufferLatency + networkRoundtripLatency + mixerRingBufferLatency - + outputRingBufferLatency + audioOutputBufferLatency; - - QString stats = "Audio input buffer: %1ms - avg msecs of samples read to the audio input buffer in last 10s"; - _audioMixerStats.push_back(stats.arg(QString::number(audioInputBufferLatency, 'f', 2))); - - stats = "Input ring buffer: %1ms - avg msecs of samples read to the input ring buffer in last 10s"; - _audioMixerStats.push_back(stats.arg(QString::number(inputRingBufferLatency, 'f', 2))); - stats = "Network to mixer: %1ms - half of last ping value calculated by the node list"; - _audioMixerStats.push_back(stats.arg(QString::number((networkRoundtripLatency / 2.0), 'f', 2))); - stats = "Network to client: %1ms - half of last ping value calculated by the node list"; - _audioMixerStats.push_back(stats.arg(QString::number((mixerRingBufferLatency / 2.0),'f', 2))); - stats = "Output ring buffer: %1ms - avg msecs of samples in output ring buffer in last 10s"; - _audioMixerStats.push_back(stats.arg(QString::number(outputRingBufferLatency,'f', 2))); - stats = "Audio output buffer: %1ms - avg msecs of samples in audio output buffer in last 10s"; - _audioMixerStats.push_back(stats.arg(QString::number(mixerRingBufferLatency,'f', 2))); - stats = "TOTAL: %1ms - avg msecs of samples in audio output buffer in last 10s"; - _audioMixerStats.push_back(stats.arg(QString::number(totalLatency, 'f', 2))); + double totalLatency = audioInputBufferLatency + inputRingBufferLatency + mixerRingBufferLatency + + outputRingBufferLatency + audioOutputBufferLatency + networkRoundtripLatency; + QString stats; + _audioMixerStats.push_back("PIPELINE (averaged over the past 10s)"); + stats = "Input Read:\t%1 ms"; + _audioMixerStats.push_back(stats.arg(QString::number(audioInputBufferLatency, 'f', 0))); + stats = "Input Ring:\t%1 ms"; + _audioMixerStats.push_back(stats.arg(QString::number(inputRingBufferLatency, 'f', 0))); + stats = "Network (client->mixer):\t%1 ms"; + _audioMixerStats.push_back(stats.arg(QString::number(networkRoundtripLatency / 2, 'f', 0))); + stats = "Mixer Ring:\t%1 ms (%2 frames)"; + _audioMixerStats.push_back(stats.arg(QString::number(mixerRingBufferLatency, 'f', 0), + QString::number(mixerRingBufferFrames, 'f', 0))); + stats = "Network (mixer->client):\t%1 ms"; + _audioMixerStats.push_back(stats.arg(QString::number(networkRoundtripLatency / 2, 'f', 0))); + stats = "Output Ring:\t%1 ms (%2 frames)"; + _audioMixerStats.push_back(stats.arg(QString::number(outputRingBufferLatency, 'f', 0), + QString::number(outputRingBufferFrames, 'f', 0))); + stats = "Output Read:\t%1 ms"; + _audioMixerStats.push_back(stats.arg(QString::number(audioOutputBufferLatency, 'f', 0))); + stats = "TOTAL:\t%1 ms"; + _audioMixerStats.push_back(stats.arg(QString::number(totalLatency, 'f', 0))); const MovingMinMaxAvg& packetSentTimeGaps = _stats->getPacketSentTimeGaps(); _upstreamClientStats.push_back("\nUpstream Mic Audio Packets Sent Gaps (by client):"); - stats = "Inter-packet timegaps (overall) | min: %1, max: %2, avg: %3"; + stats = "Inter-packet timegaps"; + _upstreamClientStats.push_back(stats); + stats = "overall min:\t%1, max:\t%2, avg:\t%3"; stats = stats.arg(formatUsecTime(packetSentTimeGaps.getMin()), formatUsecTime(packetSentTimeGaps.getMax()), formatUsecTime(packetSentTimeGaps.getAverage())); _upstreamClientStats.push_back(stats); - stats = "Inter-packet timegaps (last 30s) | min: %1, max: %2, avg: %3"; + stats = "last window min:\t%1, max:\t%2, avg:\t%3"; stats = stats.arg(formatUsecTime(packetSentTimeGaps.getWindowMin()), formatUsecTime(packetSentTimeGaps.getWindowMax()), formatUsecTime(packetSentTimeGaps.getWindowAverage())); _upstreamClientStats.push_back(stats); - _upstreamMixerStats.push_back("\nUpstream mic audio stats (received and reported by audio-mixer):"); + _upstreamMixerStats.push_back("\nMIXER STREAM"); + _upstreamMixerStats.push_back("(this client's remote mixer stream performance)"); - renderAudioStreamStats(&_stats->getMixerAvatarStreamStats(), &_upstreamMixerStats, true); + renderAudioStreamStats(&_stats->getMixerAvatarStreamStats(), &_upstreamMixerStats); - _downstreamStats.push_back("\nDownstream mixed audio stats:"); + _downstreamStats.push_back("\nCLIENT STREAM"); AudioStreamStats downstreamStats = _stats->getMixerDownstreamStats(); - renderAudioStreamStats(&downstreamStats, &_downstreamStats, true); + renderAudioStreamStats(&downstreamStats, &_downstreamStats); if (_shouldShowInjectedStreams) { foreach(const AudioStreamStats& injectedStreamAudioStats, _stats->getMixerInjectedStreamStatsMap()) { - stats = "\nUpstream injected audio stats: stream ID: %1"; + stats = "\nINJECTED STREAM (ID: %1)"; stats = stats.arg(injectedStreamAudioStats._streamIdentifier.toString()); _upstreamInjectedStats.push_back(stats); - renderAudioStreamStats(&injectedStreamAudioStats, &_upstreamInjectedStats, true); + renderAudioStreamStats(&injectedStreamAudioStats, &_upstreamInjectedStats); } } } -void AudioStatsDialog::renderAudioStreamStats(const AudioStreamStats* streamStats, QVector* audioStreamStats, bool isDownstreamStats) { +void AudioStatsDialog::renderAudioStreamStats(const AudioStreamStats* streamStats, QVector* audioStreamStats) { - QString stats = "Packet loss | overall: %1% (%2 lost), last_30s: %3% (%4 lost)"; + QString stats = "Packet Loss"; + audioStreamStats->push_back(stats); + stats = "overall:\t%1%\t(%2 lost), window:\t%3%\t(%4 lost)"; stats = stats.arg(QString::number((int)(streamStats->_packetStreamStats.getLostRate() * 100.0f)), - QString::number((int)(streamStats->_packetStreamStats._lost)), - QString::number((int)(streamStats->_packetStreamWindowStats.getLostRate() * 100.0f)), - QString::number((int)(streamStats->_packetStreamWindowStats._lost))); + QString::number((int)(streamStats->_packetStreamStats._lost)), + QString::number((int)(streamStats->_packetStreamWindowStats.getLostRate() * 100.0f)), + QString::number((int)(streamStats->_packetStreamWindowStats._lost))); audioStreamStats->push_back(stats); - if (isDownstreamStats) { - stats = "Ringbuffer frames | desired: %1, avg_available(10s): %2 + %3, available: %4 + %5"; - stats = stats.arg(QString::number(streamStats->_desiredJitterBufferFrames), - QString::number(streamStats->_framesAvailableAverage), - QString::number((int)((float)_stats->getAudioInputMsecsReadStats().getWindowAverage() / - AudioConstants::NETWORK_FRAME_MSECS)), - QString::number(streamStats->_framesAvailable), - QString::number((int)(_stats->getAudioOutputMsecsUnplayedStats().getCurrentIntervalLastSample() / - AudioConstants::NETWORK_FRAME_MSECS))); - audioStreamStats->push_back(stats); - } else { - stats = "Ringbuffer frames | desired: %1, avg_available(10s): %2, available: %3"; - stats = stats.arg(QString::number(streamStats->_desiredJitterBufferFrames), - QString::number(streamStats->_framesAvailableAverage), - QString::number(streamStats->_framesAvailable)); - audioStreamStats->push_back(stats); - } - - - stats = "Ringbuffer stats | starves: %1, prev_starve_lasted: %2, frames_dropped: %3, overflows: %4"; + stats = "Ringbuffer"; + audioStreamStats->push_back(stats); + stats = "available frames (avg):\t%1\t(%2), desired:\t%3"; + stats = stats.arg(QString::number(streamStats->_framesAvailable), + QString::number(streamStats->_framesAvailableAverage), + QString::number(streamStats->_desiredJitterBufferFrames)); + audioStreamStats->push_back(stats); + stats = "starves:\t%1, last starve duration:\t%2, drops:\t%3, overflows:\t%4"; stats = stats.arg(QString::number(streamStats->_starveCount), - QString::number(streamStats->_consecutiveNotMixedCount), - QString::number(streamStats->_framesDropped), - QString::number(streamStats->_overflowCount)); + QString::number(streamStats->_consecutiveNotMixedCount), + QString::number(streamStats->_framesDropped), + QString::number(streamStats->_overflowCount)); audioStreamStats->push_back(stats); + stats = "Inter-packet timegaps"; + audioStreamStats->push_back(stats); - stats = "Inter-packet timegaps (overall) | min: %1, max: %2, avg: %3"; + stats = "overall min:\t%1, max:\t%2, avg:\t%3"; stats = stats.arg(formatUsecTime(streamStats->_timeGapMin), - formatUsecTime(streamStats->_timeGapMax), - formatUsecTime(streamStats->_timeGapAverage)); + formatUsecTime(streamStats->_timeGapMax), + formatUsecTime(streamStats->_timeGapAverage)); audioStreamStats->push_back(stats); - stats = "Inter-packet timegaps (last 30s) | min: %1, max: %2, avg: %3"; + stats = "last window min:\t%1, max:\t%2, avg:\t%3"; stats = stats.arg(formatUsecTime(streamStats->_timeGapWindowMin), - formatUsecTime(streamStats->_timeGapWindowMax), - formatUsecTime(streamStats->_timeGapWindowAverage)); + formatUsecTime(streamStats->_timeGapWindowMax), + formatUsecTime(streamStats->_timeGapWindowAverage)); audioStreamStats->push_back(stats); - } void AudioStatsDialog::clearAllChannels() { diff --git a/interface/src/ui/AudioStatsDialog.h b/interface/src/ui/AudioStatsDialog.h index 3abab258c4..f1c9816a9d 100644 --- a/interface/src/ui/AudioStatsDialog.h +++ b/interface/src/ui/AudioStatsDialog.h @@ -74,7 +74,7 @@ private: void updateStats(QVector& stats, const int channelID); void renderStats(); void clearAllChannels(); - void renderAudioStreamStats(const AudioStreamStats* streamStats, QVector* audioStreamstats, bool isDownstreamStats); + void renderAudioStreamStats(const AudioStreamStats* streamStats, QVector* audioStreamstats); const AudioIOStats* _stats; diff --git a/libraries/audio-client/src/AudioClient.cpp b/libraries/audio-client/src/AudioClient.cpp index 99922284dc..e8471f5dbf 100644 --- a/libraries/audio-client/src/AudioClient.cpp +++ b/libraries/audio-client/src/AudioClient.cpp @@ -47,6 +47,7 @@ #include "PositionalAudioStream.h" #include "AudioClientLogging.h" +#include "AudioLogging.h" #include "AudioClient.h" @@ -1418,6 +1419,7 @@ qint64 AudioClient::AudioOutputIODevice::readData(char * data, qint64 maxSize) { int bytesWritten; if ((samplesPopped = _receivedAudioStream.popSamples((int)samplesRequested, false)) > 0) { + qCDebug(audiostream, "Read %d samples from buffer (%d available)", samplesPopped, _receivedAudioStream.getSamplesAvailable()); AudioRingBuffer::ConstIterator lastPopOutput = _receivedAudioStream.getLastPopOutput(); lastPopOutput.readSamples((int16_t*)data, samplesPopped); bytesWritten = samplesPopped * sizeof(int16_t); diff --git a/libraries/audio/src/AudioLogging.cpp b/libraries/audio/src/AudioLogging.cpp index 9bb44f5be6..1338bc793d 100644 --- a/libraries/audio/src/AudioLogging.cpp +++ b/libraries/audio/src/AudioLogging.cpp @@ -14,7 +14,7 @@ Q_LOGGING_CATEGORY(audio, "hifi.audio") #if DEV_BUILD || PR_BUILD -Q_LOGGING_CATEGORY(audiostream, "hifi.audio-stream", QtDebugMsg) -#else Q_LOGGING_CATEGORY(audiostream, "hifi.audio-stream", QtInfoMsg) +#else +Q_LOGGING_CATEGORY(audiostream, "hifi.audio-stream", QtWarningMsg) #endif diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index 7acefd30b8..7b46cc9565 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -163,6 +163,7 @@ int InboundAudioStream::parseData(ReceivedMessage& message) { int framesAvailable = _ringBuffer.framesAvailable(); // if this stream was starved, check if we're still starved. if (_isStarved && framesAvailable >= _desiredJitterBufferFrames) { + qCInfo(audiostream, "Starve ended"); _isStarved = false; } // if the ringbuffer exceeds the desired size by more than the threshold specified, @@ -176,8 +177,8 @@ int InboundAudioStream::parseData(ReceivedMessage& message) { _oldFramesDropped += framesToDrop; - qCDebug(audiostream, "Dropped %d frames", framesToDrop); - qCDebug(audiostream, "Resetted current jitter frames"); + qCInfo(audiostream, "Dropped %d frames", framesToDrop); + qCInfo(audiostream, "Reset current jitter frames"); } framesAvailableChanged(); @@ -232,8 +233,8 @@ int InboundAudioStream::writeDroppableSilentSamples(int silentSamples) { _currentJitterBufferFrames -= numSilentFramesToDrop; _silentFramesDropped += numSilentFramesToDrop; - qCDebug(audiostream, "Dropped %d silent frames", numSilentFramesToDrop); - qCDebug(audiostream, "Set current jitter frames to %d", _currentJitterBufferFrames); + qCInfo(audiostream, "Dropped %d silent frames", numSilentFramesToDrop); + qCInfo(audiostream, "Set current jitter frames to %d (dropped)", _currentJitterBufferFrames); _framesAvailableStat.reset(); } @@ -315,13 +316,17 @@ void InboundAudioStream::framesAvailableChanged() { if (_framesAvailableStat.getElapsedUsecs() >= FRAMES_AVAILABLE_STAT_WINDOW_USECS) { _currentJitterBufferFrames = (int)ceil(_framesAvailableStat.getAverage()); - qCDebug(audiostream, "Set current jitter frames to %d", _currentJitterBufferFrames); + qCInfo(audiostream, "Set current jitter frames to %d (changed)", _currentJitterBufferFrames); _framesAvailableStat.reset(); } } void InboundAudioStream::setToStarved() { + if (!_isStarved) { + qCInfo(audiostream, "Starved"); + } + _consecutiveNotMixedCount = 0; _starveCount++; // if we have more than the desired frames when setToStarved() is called, then we'll immediately @@ -364,7 +369,7 @@ void InboundAudioStream::setToStarved() { // make sure _desiredJitterBufferFrames does not become lower here if (calculatedJitterBufferFrames >= _desiredJitterBufferFrames) { _desiredJitterBufferFrames = calculatedJitterBufferFrames; - qCDebug(audiostream, "Set desired jitter frames to %d", _desiredJitterBufferFrames); + qCInfo(audiostream, "Set desired jitter frames to %d (starved)", _desiredJitterBufferFrames); } } } @@ -454,7 +459,7 @@ void InboundAudioStream::packetReceivedUpdateTimingStats() { / (float)AudioConstants::NETWORK_FRAME_USECS); if (calculatedJitterBufferFrames < _desiredJitterBufferFrames) { _desiredJitterBufferFrames = calculatedJitterBufferFrames; - qCDebug(audiostream, "Set desired jitter frames to %d", _desiredJitterBufferFrames); + qCInfo(audiostream, "Set desired jitter frames to %d (reduced)", _desiredJitterBufferFrames); } _timeGapStatsForDesiredReduction.clearNewStatsAvailableFlag(); } diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index 1290d43ef8..6b1db9d812 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -161,6 +161,7 @@ public: int getFrameCapacity() const { return _ringBuffer.getFrameCapacity(); } int getFramesAvailable() const { return _ringBuffer.framesAvailable(); } double getFramesAvailableAverage() const { return _framesAvailableStat.getAverage(); } + int getSamplesAvailable() const { return _ringBuffer.samplesAvailable(); } bool isStarved() const { return _isStarved; } bool hasStarted() const { return _hasStarted; } diff --git a/libraries/audio/src/MixedProcessedAudioStream.cpp b/libraries/audio/src/MixedProcessedAudioStream.cpp index 728deae0b1..ca5a670bd4 100644 --- a/libraries/audio/src/MixedProcessedAudioStream.cpp +++ b/libraries/audio/src/MixedProcessedAudioStream.cpp @@ -10,6 +10,7 @@ // #include "MixedProcessedAudioStream.h" +#include "AudioLogging.h" static const int STEREO_FACTOR = 2; @@ -56,6 +57,7 @@ int MixedProcessedAudioStream::parseAudioData(PacketType type, const QByteArray& emit processSamples(decodedBuffer, outputBuffer); _ringBuffer.writeData(outputBuffer.data(), outputBuffer.size()); + qCDebug(audiostream, "Wrote %d samples to buffer (%d available)", outputBuffer.size() / (int)sizeof(int16_t), getSamplesAvailable()); return packetAfterStreamProperties.size(); } From 177466e4c72e23ab5391c48508bf9c0b630f13ec Mon Sep 17 00:00:00 2001 From: Zach Pomerantz Date: Thu, 15 Sep 2016 17:54:41 -0700 Subject: [PATCH 2/6] calculate unplayed ms on all streams/buffers as max --- .../src/audio/AudioMixerClientData.cpp | 3 + interface/src/ui/AudioStatsDialog.cpp | 29 +++---- libraries/audio-client/src/AudioClient.cpp | 36 ++++----- libraries/audio-client/src/AudioClient.h | 4 - libraries/audio-client/src/AudioIOStats.cpp | 76 +++++++++++-------- libraries/audio-client/src/AudioIOStats.h | 32 ++++---- libraries/audio/src/AudioStreamStats.h | 1 + libraries/audio/src/InboundAudioStream.cpp | 12 ++- libraries/audio/src/InboundAudioStream.h | 1 + 9 files changed, 102 insertions(+), 92 deletions(-) diff --git a/assignment-client/src/audio/AudioMixerClientData.cpp b/assignment-client/src/audio/AudioMixerClientData.cpp index 0a45137da6..42d385c1f6 100644 --- a/assignment-client/src/audio/AudioMixerClientData.cpp +++ b/assignment-client/src/audio/AudioMixerClientData.cpp @@ -270,6 +270,7 @@ QJsonObject AudioMixerClientData::getAudioStreamStats() { downstreamStats["desired"] = streamStats._desiredJitterBufferFrames; downstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage; downstreamStats["available"] = (double) streamStats._framesAvailable; + downstreamStats["unplayed"] = (double) streamStats._unplayedMs; downstreamStats["starves"] = (double) streamStats._starveCount; downstreamStats["not_mixed"] = (double) streamStats._consecutiveNotMixedCount; downstreamStats["overflows"] = (double) streamStats._overflowCount; @@ -294,6 +295,7 @@ QJsonObject AudioMixerClientData::getAudioStreamStats() { upstreamStats["desired_calc"] = avatarAudioStream->getCalculatedJitterBufferFrames(); upstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage; upstreamStats["available"] = (double) streamStats._framesAvailable; + upstreamStats["unplayed"] = (double) streamStats._unplayedMs; upstreamStats["starves"] = (double) streamStats._starveCount; upstreamStats["not_mixed"] = (double) streamStats._consecutiveNotMixedCount; upstreamStats["overflows"] = (double) streamStats._overflowCount; @@ -323,6 +325,7 @@ QJsonObject AudioMixerClientData::getAudioStreamStats() { upstreamStats["desired_calc"] = injectorPair.second->getCalculatedJitterBufferFrames(); upstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage; upstreamStats["available"] = (double) streamStats._framesAvailable; + upstreamStats["unplayed"] = (double) streamStats._unplayedMs; upstreamStats["starves"] = (double) streamStats._starveCount; upstreamStats["not_mixed"] = (double) streamStats._consecutiveNotMixedCount; upstreamStats["overflows"] = (double) streamStats._overflowCount; diff --git a/interface/src/ui/AudioStatsDialog.cpp b/interface/src/ui/AudioStatsDialog.cpp index 5938add8c2..a0a92558af 100644 --- a/interface/src/ui/AudioStatsDialog.cpp +++ b/interface/src/ui/AudioStatsDialog.cpp @@ -120,17 +120,14 @@ void AudioStatsDialog::renderStats() { audioOutputBufferLatency = 0.0; if (SharedNodePointer audioMixerNodePointer = DependencyManager::get()->soloNodeOfType(NodeType::AudioMixer)) { - mixerRingBufferFrames = (double)_stats->getMixerAvatarStreamStats()._framesAvailableAverage; - outputRingBufferFrames = (double)_stats->getMixerDownstreamStats()._framesAvailableAverage; - - audioInputBufferLatency = (double)_stats->getAudioInputMsecsReadStats().getWindowAverage(); - inputRingBufferLatency = (double)_stats->getInputRungBufferMsecsAvailableStats().getWindowAverage(); - networkRoundtripLatency = (double) audioMixerNodePointer->getPingMs(); - mixerRingBufferLatency = mixerRingBufferFrames * (double)AudioConstants::NETWORK_FRAME_MSECS; - outputRingBufferLatency = outputRingBufferFrames * (double)AudioConstants::NETWORK_FRAME_MSECS; - audioOutputBufferLatency = (double)_stats->getAudioOutputMsecsUnplayedStats().getWindowAverage(); + audioInputBufferLatency = (double)_stats->getInputMsRead().getWindowMax(); + inputRingBufferLatency = (double)_stats->getInputMsUnplayed().getWindowMax(); + networkRoundtripLatency = (double)audioMixerNodePointer->getPingMs(); + mixerRingBufferLatency = (double)_stats->getMixerAvatarStreamStats()._unplayedMs; + outputRingBufferLatency = (double)_stats->getMixerDownstreamStats()._unplayedMs; + audioOutputBufferLatency = (double)_stats->getOutputMsUnplayed().getWindowMax(); } - + double totalLatency = audioInputBufferLatency + inputRingBufferLatency + mixerRingBufferLatency + outputRingBufferLatency + audioOutputBufferLatency + networkRoundtripLatency; @@ -142,20 +139,18 @@ void AudioStatsDialog::renderStats() { _audioMixerStats.push_back(stats.arg(QString::number(inputRingBufferLatency, 'f', 0))); stats = "Network (client->mixer):\t%1 ms"; _audioMixerStats.push_back(stats.arg(QString::number(networkRoundtripLatency / 2, 'f', 0))); - stats = "Mixer Ring:\t%1 ms (%2 frames)"; - _audioMixerStats.push_back(stats.arg(QString::number(mixerRingBufferLatency, 'f', 0), - QString::number(mixerRingBufferFrames, 'f', 0))); + stats = "Mixer Ring:\t%1 ms"; + _audioMixerStats.push_back(stats.arg(QString::number(mixerRingBufferLatency, 'f', 0))); stats = "Network (mixer->client):\t%1 ms"; _audioMixerStats.push_back(stats.arg(QString::number(networkRoundtripLatency / 2, 'f', 0))); - stats = "Output Ring:\t%1 ms (%2 frames)"; - _audioMixerStats.push_back(stats.arg(QString::number(outputRingBufferLatency, 'f', 0), - QString::number(outputRingBufferFrames, 'f', 0))); + stats = "Output Ring:\t%1 ms"; + _audioMixerStats.push_back(stats.arg(QString::number(outputRingBufferLatency, 'f', 0))); stats = "Output Read:\t%1 ms"; _audioMixerStats.push_back(stats.arg(QString::number(audioOutputBufferLatency, 'f', 0))); stats = "TOTAL:\t%1 ms"; _audioMixerStats.push_back(stats.arg(QString::number(totalLatency, 'f', 0))); - const MovingMinMaxAvg& packetSentTimeGaps = _stats->getPacketSentTimeGaps(); + const MovingMinMaxAvg& packetSentTimeGaps = _stats->getPacketTimegaps(); _upstreamClientStats.push_back("\nUpstream Mic Audio Packets Sent Gaps (by client):"); diff --git a/libraries/audio-client/src/AudioClient.cpp b/libraries/audio-client/src/AudioClient.cpp index e8471f5dbf..1f4268ca37 100644 --- a/libraries/audio-client/src/AudioClient.cpp +++ b/libraries/audio-client/src/AudioClient.cpp @@ -123,12 +123,11 @@ AudioClient::AudioClient() : _outputBufferSizeFrames("audioOutputBufferSizeFrames", DEFAULT_AUDIO_OUTPUT_BUFFER_SIZE_FRAMES), _sessionOutputBufferSizeFrames(_outputBufferSizeFrames.get()), _outputStarveDetectionEnabled("audioOutputBufferStarveDetectionEnabled", - DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_ENABLED), + DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_ENABLED), _outputStarveDetectionPeriodMsec("audioOutputStarveDetectionPeriod", - DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_PERIOD), + DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_PERIOD), _outputStarveDetectionThreshold("audioOutputStarveDetectionThreshold", - DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_THRESHOLD), - _averagedLatency(0.0f), + DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_THRESHOLD), _lastInputLoudness(0.0f), _timeSinceLastClip(-1.0f), _muted(false), @@ -854,7 +853,7 @@ void AudioClient::handleAudioInput() { _inputRingBuffer.writeData(inputByteArray.data(), inputByteArray.size()); float audioInputMsecsRead = inputByteArray.size() / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC)); - _stats.updateInputMsecsRead(audioInputMsecsRead); + _stats.updateInputMsRead(audioInputMsecsRead); const int numNetworkBytes = _isStereoInput ? AudioConstants::NETWORK_FRAME_BYTES_STEREO @@ -942,6 +941,10 @@ void AudioClient::handleAudioInput() { emitAudioPacket(encodedBuffer.constData(), encodedBuffer.size(), _outgoingAvatarAudioSequenceNumber, audioTransform, packetType, _selectedCodecName); _stats.sentPacket(); + + int bytesInInputRingBuffer = _inputRingBuffer.samplesAvailable() * sizeof(int16_t); + float msecsInInputRingBuffer = bytesInInputRingBuffer / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC)); + _stats.updateInputMsUnplayed(msecsInInputRingBuffer); } } @@ -1358,22 +1361,6 @@ int AudioClient::calculateNumberOfFrameSamples(int numBytes) const { return frameSamples; } -float AudioClient::getInputRingBufferMsecsAvailable() const { - int bytesInInputRingBuffer = _inputRingBuffer.samplesAvailable() * sizeof(int16_t); - float msecsInInputRingBuffer = bytesInInputRingBuffer / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC)); - return msecsInInputRingBuffer; -} - -float AudioClient::getAudioOutputMsecsUnplayed() const { - if (!_audioOutput) { - return 0.0f; - } - int bytesAudioOutputUnplayed = _audioOutput->bufferSize() - _audioOutput->bytesFree(); - float msecsAudioOutputUnplayed = bytesAudioOutputUnplayed / (float)_outputFormat.bytesForDuration(USECS_PER_MSEC); - return msecsAudioOutputUnplayed; -} - - float AudioClient::azimuthForSource(const glm::vec3& relativePosition) { // copied from AudioMixer, more or less glm::quat inverseOrientation = glm::inverse(_orientationGetter()); @@ -1430,8 +1417,11 @@ qint64 AudioClient::AudioOutputIODevice::readData(char * data, qint64 maxSize) { bytesWritten = maxSize; } - bool wasBufferStarved = _audio->_audioOutput->bufferSize() == _audio->_audioOutput->bytesFree(); - if (wasBufferStarved) { + int bytesAudioOutputUnplayed = _audio->_audioOutput->bufferSize() - _audio->_audioOutput->bytesFree(); + float msecsAudioOutputUnplayed = bytesAudioOutputUnplayed / (float)_audio->_outputFormat.bytesForDuration(USECS_PER_MSEC); + _audio->_stats.updateOutputMsUnplayed(msecsAudioOutputUnplayed); + + if (bytesAudioOutputUnplayed == 0) { _unfulfilledReads++; } diff --git a/libraries/audio-client/src/AudioClient.h b/libraries/audio-client/src/AudioClient.h index 36cda8050e..c4d32e8694 100644 --- a/libraries/audio-client/src/AudioClient.h +++ b/libraries/audio-client/src/AudioClient.h @@ -121,9 +121,6 @@ public: const AudioIOStats& getStats() const { return _stats; } - float getInputRingBufferMsecsAvailable() const; - float getAudioOutputMsecsUnplayed() const; - int getOutputBufferSize() { return _outputBufferSizeFrames.get(); } bool getOutputStarveDetectionEnabled() { return _outputStarveDetectionEnabled.get(); } @@ -284,7 +281,6 @@ private: StDev _stdev; QElapsedTimer _timeSinceLastReceived; - float _averagedLatency; float _lastInputLoudness; float _timeSinceLastClip; int _totalInputAudioSamples; diff --git a/libraries/audio-client/src/AudioIOStats.cpp b/libraries/audio-client/src/AudioIOStats.cpp index 6896c7fd6b..330854058f 100644 --- a/libraries/audio-client/src/AudioIOStats.cpp +++ b/libraries/audio-client/src/AudioIOStats.cpp @@ -18,54 +18,73 @@ #include "AudioIOStats.h" -const int FRAMES_AVAILABLE_STATS_WINDOW_SECONDS = 10; +// This is called 5x/sec (see AudioStatsDialog), and we want it to log the last 5s +static const int INPUT_READS_WINDOW = 25; +static const int INPUT_UNPLAYED_WINDOW = 25; +static const int OUTPUT_UNPLAYED_WINDOW = 25; -const int APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS = (int)(30.0f * 1000.0f / AudioConstants::NETWORK_FRAME_MSECS); +static const int APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS = (int)(30.0f * 1000.0f / AudioConstants::NETWORK_FRAME_MSECS); AudioIOStats::AudioIOStats(MixedProcessedAudioStream* receivedAudioStream) : _receivedAudioStream(receivedAudioStream), - _audioInputMsecsReadStats(MSECS_PER_SECOND / (float)AudioConstants::NETWORK_FRAME_MSECS * AudioClient::CALLBACK_ACCELERATOR_RATIO, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS), - _inputRingBufferMsecsAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS), - _audioOutputMsecsUnplayedStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS), - _lastSentAudioPacket(0), - _packetSentTimeGaps(1, APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS) + _inputMsRead(0, INPUT_READS_WINDOW), + _inputMsUnplayed(0, INPUT_UNPLAYED_WINDOW), + _outputMsUnplayed(0, OUTPUT_UNPLAYED_WINDOW), + _lastSentPacketTime(0), + _packetTimegaps(0, APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS) { - -} - -AudioStreamStats AudioIOStats::getMixerDownstreamStats() const { - return _receivedAudioStream->getAudioStreamStats(); } void AudioIOStats::reset() { _receivedAudioStream->resetStats(); + _inputMsRead.reset(); + _inputMsUnplayed.reset(); + _outputMsUnplayed.reset(); + _packetTimegaps.reset(); + _mixerAvatarStreamStats = AudioStreamStats(); _mixerInjectedStreamStatsMap.clear(); - - _audioInputMsecsReadStats.reset(); - _inputRingBufferMsecsAvailableStats.reset(); - - _audioOutputMsecsUnplayedStats.reset(); - _packetSentTimeGaps.reset(); } void AudioIOStats::sentPacket() { // first time this is 0 - if (_lastSentAudioPacket == 0) { - _lastSentAudioPacket = usecTimestampNow(); + if (_lastSentPacketTime == 0) { + _lastSentPacketTime = usecTimestampNow(); } else { quint64 now = usecTimestampNow(); - quint64 gap = now - _lastSentAudioPacket; - _packetSentTimeGaps.update(gap); - - _lastSentAudioPacket = now; + quint64 gap = now - _lastSentPacketTime; + _lastSentPacketTime = now; + _packetTimegaps.update(gap); } } -void AudioIOStats::processStreamStatsPacket(QSharedPointer message, SharedNodePointer sendingNode) { +const MovingMinMaxAvg& AudioIOStats::getInputMsRead() const { + _inputMsRead.currentIntervalComplete(); + return _inputMsRead; +} +const MovingMinMaxAvg& AudioIOStats::getInputMsUnplayed() const { + _inputMsUnplayed.currentIntervalComplete(); + return _inputMsUnplayed; +} + +const MovingMinMaxAvg& AudioIOStats::getOutputMsUnplayed() const { + _outputMsUnplayed.currentIntervalComplete(); + return _outputMsUnplayed; +} + +const MovingMinMaxAvg& AudioIOStats::getPacketTimegaps() const { + _packetTimegaps.currentIntervalComplete(); + return _packetTimegaps; +} + +const AudioStreamStats AudioIOStats::getMixerDownstreamStats() const { + return _receivedAudioStream->getAudioStreamStats(); +} + +void AudioIOStats::processStreamStatsPacket(QSharedPointer message, SharedNodePointer sendingNode) { // parse the appendFlag, clear injected audio stream stats if 0 quint8 appendFlag; message->readPrimitive(&appendFlag); @@ -92,14 +111,9 @@ void AudioIOStats::processStreamStatsPacket(QSharedPointer mess } void AudioIOStats::sendDownstreamAudioStatsPacket() { - auto audioIO = DependencyManager::get(); - // since this function is called every second, we'll sample for some of our stats here - _inputRingBufferMsecsAvailableStats.update(audioIO->getInputRingBufferMsecsAvailable()); - _audioOutputMsecsUnplayedStats.update(audioIO->getAudioOutputMsecsUnplayed()); - - // also, call _receivedAudioStream's per-second callback + // call _receivedAudioStream's per-second callback _receivedAudioStream->perSecondCallbackForUpdatingStats(); auto nodeList = DependencyManager::get(); diff --git a/libraries/audio-client/src/AudioIOStats.h b/libraries/audio-client/src/AudioIOStats.h index 2745deac2c..45217c5af6 100644 --- a/libraries/audio-client/src/AudioIOStats.h +++ b/libraries/audio-client/src/AudioIOStats.h @@ -29,19 +29,20 @@ public: void reset(); - void updateInputMsecsRead(float msecsRead) { _audioInputMsecsReadStats.update(msecsRead); } + void updateInputMsRead(float ms) { _inputMsRead.update(ms); } + void updateInputMsUnplayed(float ms) { _inputMsUnplayed.update(ms); } + void updateOutputMsUnplayed(float ms) { _outputMsUnplayed.update(ms); } void sentPacket(); - AudioStreamStats getMixerDownstreamStats() const; + const MovingMinMaxAvg& getInputMsRead() const; + const MovingMinMaxAvg& getInputMsUnplayed() const; + const MovingMinMaxAvg& getOutputMsUnplayed() const; + const MovingMinMaxAvg& getPacketTimegaps() const; + + const AudioStreamStats getMixerDownstreamStats() const; const AudioStreamStats& getMixerAvatarStreamStats() const { return _mixerAvatarStreamStats; } const QHash& getMixerInjectedStreamStatsMap() const { return _mixerInjectedStreamStatsMap; } - const MovingMinMaxAvg& getAudioInputMsecsReadStats() const { return _audioInputMsecsReadStats; } - const MovingMinMaxAvg& getInputRungBufferMsecsAvailableStats() const { return _inputRingBufferMsecsAvailableStats; } - const MovingMinMaxAvg& getAudioOutputMsecsUnplayedStats() const { return _audioOutputMsecsUnplayedStats; } - - const MovingMinMaxAvg& getPacketSentTimeGaps() const { return _packetSentTimeGaps; } - void sendDownstreamAudioStatsPacket(); public slots: @@ -49,17 +50,16 @@ public slots: private: MixedProcessedAudioStream* _receivedAudioStream; - - MovingMinMaxAvg _audioInputMsecsReadStats; - MovingMinMaxAvg _inputRingBufferMsecsAvailableStats; - - MovingMinMaxAvg _audioOutputMsecsUnplayedStats; + + mutable MovingMinMaxAvg _inputMsRead; + mutable MovingMinMaxAvg _inputMsUnplayed; + mutable MovingMinMaxAvg _outputMsUnplayed; + + quint64 _lastSentPacketTime; + mutable MovingMinMaxAvg _packetTimegaps; AudioStreamStats _mixerAvatarStreamStats; QHash _mixerInjectedStreamStatsMap; - - quint64 _lastSentAudioPacket; - MovingMinMaxAvg _packetSentTimeGaps; }; #endif // hifi_AudioIOStats_h diff --git a/libraries/audio/src/AudioStreamStats.h b/libraries/audio/src/AudioStreamStats.h index 148fad6557..046c4e5a47 100644 --- a/libraries/audio/src/AudioStreamStats.h +++ b/libraries/audio/src/AudioStreamStats.h @@ -48,6 +48,7 @@ public: quint32 _framesAvailable; quint16 _framesAvailableAverage; + quint16 _unplayedMs; quint16 _desiredJitterBufferFrames; quint32 _starveCount; quint32 _consecutiveNotMixedCount; diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index 7b46cc9565..b36042386a 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -18,7 +18,10 @@ #include "InboundAudioStream.h" #include "AudioLogging.h" -const int STARVE_HISTORY_CAPACITY = 50; +static const int STARVE_HISTORY_CAPACITY = 50; + +// This is called 1x/s, and we want it to log the last 5s +static const int UNPLAYED_MS_WINDOW_SECS = 5; InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity, const Settings& settings) : _ringBuffer(numFrameSamples, numFramesCapacity), @@ -46,6 +49,7 @@ InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacit _starveHistory(STARVE_HISTORY_CAPACITY), _starveThreshold(settings._windowStarveThreshold), _framesAvailableStat(), + _unplayedMs(0, UNPLAYED_MS_WINDOW_SECS), _currentJitterBufferFrames(0), _timeGapStatsForStatsPacket(0, STATS_FOR_STATS_PACKET_WINDOW_SECONDS), _repetitionWithFade(settings._repetitionWithFade), @@ -82,6 +86,7 @@ void InboundAudioStream::resetStats() { _framesAvailableStat.reset(); _currentJitterBufferFrames = 0; _timeGapStatsForStatsPacket.reset(); + _unplayedMs.reset(); } void InboundAudioStream::clearBuffer() { @@ -101,6 +106,7 @@ void InboundAudioStream::perSecondCallbackForUpdatingStats() { _timeGapStatsForDesiredCalcOnTooManyStarves.currentIntervalComplete(); _timeGapStatsForDesiredReduction.currentIntervalComplete(); _timeGapStatsForStatsPacket.currentIntervalComplete(); + _unplayedMs.currentIntervalComplete(); } int InboundAudioStream::parseData(ReceivedMessage& message) { @@ -303,6 +309,9 @@ int InboundAudioStream::popFrames(int maxFrames, bool allOrNothing, bool starveI } void InboundAudioStream::popSamplesNoCheck(int samples) { + float unplayedMs = (_ringBuffer.samplesAvailable() / (float)_ringBuffer.getNumFrameSamples()) * AudioConstants::NETWORK_FRAME_MSECS; + _unplayedMs.update(unplayedMs); + _lastPopOutput = _ringBuffer.nextOutput(); _ringBuffer.shiftReadPosition(samples); framesAvailableChanged(); @@ -507,6 +516,7 @@ AudioStreamStats InboundAudioStream::getAudioStreamStats() const { streamStats._framesAvailable = _ringBuffer.framesAvailable(); streamStats._framesAvailableAverage = _framesAvailableStat.getAverage(); + streamStats._unplayedMs = (quint16)_unplayedMs.getWindowMax(); streamStats._desiredJitterBufferFrames = _desiredJitterBufferFrames; streamStats._starveCount = _starveCount; streamStats._consecutiveNotMixedCount = _consecutiveNotMixedCount; diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index 6b1db9d812..b61a9ed167 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -265,6 +265,7 @@ protected: int _starveThreshold; TimeWeightedAvg _framesAvailableStat; + MovingMinMaxAvg _unplayedMs; // this value is periodically updated with the time-weighted avg from _framesAvailableStat. it is only used for // dropping silent frames right now. From 5b03d3e13d75e9777243b720fa8f193345f32a36 Mon Sep 17 00:00:00 2001 From: Zach Pomerantz Date: Thu, 15 Sep 2016 17:58:41 -0700 Subject: [PATCH 3/6] render audio network stats immediately --- interface/src/ui/AudioStatsDialog.cpp | 53 ++++++++++++++------------- interface/src/ui/AudioStatsDialog.h | 10 ++--- 2 files changed, 33 insertions(+), 30 deletions(-) diff --git a/interface/src/ui/AudioStatsDialog.cpp b/interface/src/ui/AudioStatsDialog.cpp index a0a92558af..2ea3559889 100644 --- a/interface/src/ui/AudioStatsDialog.cpp +++ b/interface/src/ui/AudioStatsDialog.cpp @@ -58,21 +58,19 @@ void AudioStatsDisplay::updatedDisplay(QString str) { AudioStatsDialog::AudioStatsDialog(QWidget* parent) : QDialog(parent, Qt::Window | Qt::WindowCloseButtonHint | Qt::WindowStaysOnTopHint) { - _shouldShowInjectedStreams = false; - setWindowTitle("Audio Network Statistics"); - + // Get statistics from the Audio Client _stats = &DependencyManager::get()->getStats(); // Create layout _form = new QFormLayout(); _form->setSizeConstraint(QLayout::SetFixedSize); - QDialog::setLayout(_form); - // Load and initialize all channels - renderStats(); + // Initialize channels' content (needed to correctly size channels) + updateStats(); + // Create channels _audioDisplayChannels = QVector>(1); _audioMixerID = addChannel(_form, _audioMixerStats, COLOR0); @@ -80,9 +78,16 @@ AudioStatsDialog::AudioStatsDialog(QWidget* parent) : _upstreamMixerID = addChannel(_form, _upstreamMixerStats, COLOR2); _downstreamID = addChannel(_form, _downstreamStats, COLOR3); _upstreamInjectedID = addChannel(_form, _upstreamInjectedStats, COLOR0); - - connect(averageUpdateTimer, SIGNAL(timeout()), this, SLOT(updateTimerTimeout())); + + // Initialize channels + updateChannels(); + + // Future renders + connect(averageUpdateTimer, SIGNAL(timeout()), this, SLOT(renderStats())); averageUpdateTimer->start(200); + + // Initial render + QDialog::setLayout(_form); } int AudioStatsDialog::addChannel(QFormLayout* form, QVector& stats, const unsigned color) { @@ -99,13 +104,26 @@ int AudioStatsDialog::addChannel(QFormLayout* form, QVector& stats, con return channelID; } -void AudioStatsDialog::updateStats(QVector& stats, int channelID) { +void AudioStatsDialog::renderStats() { + updateStats(); + updateChannels(); +} + +void AudioStatsDialog::updateChannels() { + updateChannel(_audioMixerStats, _audioMixerID); + updateChannel(_upstreamClientStats, _upstreamClientID); + updateChannel(_upstreamMixerStats, _upstreamMixerID); + updateChannel(_downstreamStats, _downstreamID); + updateChannel(_upstreamInjectedStats, _upstreamInjectedID); +} + +void AudioStatsDialog::updateChannel(QVector& stats, int channelID) { // Update all stat displays at specified channel for (int i = 0; i < stats.size(); i++) _audioDisplayChannels[channelID].at(i)->updatedDisplay(stats.at(i)); } -void AudioStatsDialog::renderStats() { +void AudioStatsDialog::updateStats() { // Clear current stats from all vectors clearAllChannels(); @@ -244,21 +262,6 @@ void AudioStatsDialog::clearAllChannels() { _upstreamInjectedStats.clear(); } - -void AudioStatsDialog::updateTimerTimeout() { - - renderStats(); - - // Update all audio stats - updateStats(_audioMixerStats, _audioMixerID); - updateStats(_upstreamClientStats, _upstreamClientID); - updateStats(_upstreamMixerStats, _upstreamMixerID); - updateStats(_downstreamStats, _downstreamID); - updateStats(_upstreamInjectedStats, _upstreamInjectedID); - -} - - void AudioStatsDialog::paintEvent(QPaintEvent* event) { // Repaint each stat in each channel diff --git a/interface/src/ui/AudioStatsDialog.h b/interface/src/ui/AudioStatsDialog.h index f1c9816a9d..59da056de4 100644 --- a/interface/src/ui/AudioStatsDialog.h +++ b/interface/src/ui/AudioStatsDialog.h @@ -70,9 +70,10 @@ private: QVector> _audioDisplayChannels; + void updateStats(); int addChannel(QFormLayout* form, QVector& stats, const unsigned color); - void updateStats(QVector& stats, const int channelID); - void renderStats(); + void updateChannel(QVector& stats, const int channelID); + void updateChannels(); void clearAllChannels(); void renderAudioStreamStats(const AudioStreamStats* streamStats, QVector* audioStreamstats); @@ -80,8 +81,7 @@ private: const AudioIOStats* _stats; QFormLayout* _form; - bool _isEnabled; - bool _shouldShowInjectedStreams; + bool _shouldShowInjectedStreams{ false }; signals: @@ -93,7 +93,7 @@ signals: void reject() override; - void updateTimerTimeout(); + void renderStats(); protected: From bbbe070af10f2dd8262876be0544fc5c22e4db7f Mon Sep 17 00:00:00 2001 From: Zach Pomerantz Date: Thu, 15 Sep 2016 17:58:58 -0700 Subject: [PATCH 4/6] bring audio network latencies to standard --- interface/src/ui/AudioStatsDialog.cpp | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/interface/src/ui/AudioStatsDialog.cpp b/interface/src/ui/AudioStatsDialog.cpp index 2ea3559889..e3cca9f0fe 100644 --- a/interface/src/ui/AudioStatsDialog.cpp +++ b/interface/src/ui/AudioStatsDialog.cpp @@ -128,14 +128,12 @@ void AudioStatsDialog::updateStats() { // Clear current stats from all vectors clearAllChannels(); - double mixerRingBufferFrames = 0.0, - outputRingBufferFrames = 0.0; - double audioInputBufferLatency = 0.0, - inputRingBufferLatency = 0.0, - networkRoundtripLatency = 0.0, - mixerRingBufferLatency = 0.0, - outputRingBufferLatency = 0.0, - audioOutputBufferLatency = 0.0; + double audioInputBufferLatency{ 0.0 }; + double inputRingBufferLatency{ 0.0 }; + double networkRoundtripLatency{ 0.0 }; + double mixerRingBufferLatency{ 0.0 }; + double outputRingBufferLatency{ 0.0 }; + double audioOutputBufferLatency{ 0.0 }; if (SharedNodePointer audioMixerNodePointer = DependencyManager::get()->soloNodeOfType(NodeType::AudioMixer)) { audioInputBufferLatency = (double)_stats->getInputMsRead().getWindowMax(); From 0d3a9af6a6a053d551568b0f65e4b8ef4e9878d9 Mon Sep 17 00:00:00 2001 From: Zach Pomerantz Date: Thu, 15 Sep 2016 14:48:13 -0700 Subject: [PATCH 5/6] wait 10s before growing jitter --- libraries/audio/src/InboundAudioStream.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index 7acefd30b8..6e7ab3e613 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -420,7 +420,7 @@ void InboundAudioStream::packetReceivedUpdateTimingStats() { // update our timegap stats and desired jitter buffer frames if necessary // discard the first few packets we receive since they usually have gaps that aren't represensative of normal jitter - const quint32 NUM_INITIAL_PACKETS_DISCARD = 3; + const quint32 NUM_INITIAL_PACKETS_DISCARD = 1000; // 10s quint64 now = usecTimestampNow(); if (_incomingSequenceNumberStats.getReceived() > NUM_INITIAL_PACKETS_DISCARD) { quint64 gap = now - _lastPacketReceivedTime; From b6d093889c2e3baa1562f61242cdd9d1dba1c44d Mon Sep 17 00:00:00 2001 From: Zach Pomerantz Date: Fri, 16 Sep 2016 14:20:50 -0700 Subject: [PATCH 6/6] add AudioConstants::SAMPLE_SIZE --- libraries/audio-client/src/AudioClient.cpp | 22 +++++++++++----------- libraries/audio/src/AudioConstants.h | 5 +++-- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/libraries/audio-client/src/AudioClient.cpp b/libraries/audio-client/src/AudioClient.cpp index 1f4268ca37..ffafc97298 100644 --- a/libraries/audio-client/src/AudioClient.cpp +++ b/libraries/audio-client/src/AudioClient.cpp @@ -441,7 +441,7 @@ void possibleResampling(AudioSRC* resampler, if (!sampleChannelConversion(sourceSamples, destinationSamples, numSourceSamples, sourceAudioFormat, destinationAudioFormat)) { // no conversion, we can copy the samples directly across - memcpy(destinationSamples, sourceSamples, numSourceSamples * sizeof(int16_t)); + memcpy(destinationSamples, sourceSamples, numSourceSamples * AudioConstants::SAMPLE_SIZE); } } else { @@ -815,10 +815,10 @@ void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) { static QByteArray loopBackByteArray; - int numInputSamples = inputByteArray.size() / sizeof(int16_t); + int numInputSamples = inputByteArray.size() / AudioConstants::SAMPLE_SIZE; int numLoopbackSamples = numDestinationSamplesRequired(_inputFormat, _outputFormat, numInputSamples); - loopBackByteArray.resize(numLoopbackSamples * sizeof(int16_t)); + loopBackByteArray.resize(numLoopbackSamples * AudioConstants::SAMPLE_SIZE); int16_t* inputSamples = reinterpret_cast(inputByteArray.data()); int16_t* loopbackSamples = reinterpret_cast(loopBackByteArray.data()); @@ -826,7 +826,7 @@ void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) { // upmix mono to stereo if (!sampleChannelConversion(inputSamples, loopbackSamples, numInputSamples, _inputFormat, _outputFormat)) { // no conversion, just copy the samples - memcpy(loopbackSamples, inputSamples, numInputSamples * sizeof(int16_t)); + memcpy(loopbackSamples, inputSamples, numInputSamples * AudioConstants::SAMPLE_SIZE); } // apply stereo reverb at the source, to the loopback audio @@ -942,7 +942,7 @@ void AudioClient::handleAudioInput() { emitAudioPacket(encodedBuffer.constData(), encodedBuffer.size(), _outgoingAvatarAudioSequenceNumber, audioTransform, packetType, _selectedCodecName); _stats.sentPacket(); - int bytesInInputRingBuffer = _inputRingBuffer.samplesAvailable() * sizeof(int16_t); + int bytesInInputRingBuffer = _inputRingBuffer.samplesAvailable() * AudioConstants::SAMPLE_SIZE; float msecsInInputRingBuffer = bytesInInputRingBuffer / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC)); _stats.updateInputMsUnplayed(msecsInInputRingBuffer); } @@ -1025,7 +1025,7 @@ void AudioClient::processReceivedSamples(const QByteArray& decodedBuffer, QByteA const int16_t* decodedSamples = reinterpret_cast(decodedBuffer.data()); assert(decodedBuffer.size() == AudioConstants::NETWORK_FRAME_BYTES_STEREO); - outputBuffer.resize(_outputFrameSize * sizeof(int16_t)); + outputBuffer.resize(_outputFrameSize * AudioConstants::SAMPLE_SIZE); int16_t* outputSamples = reinterpret_cast(outputBuffer.data()); // convert network audio to float @@ -1283,7 +1283,7 @@ bool AudioClient::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDevice // setup our general output device for audio-mixer audio _audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this); int osDefaultBufferSize = _audioOutput->bufferSize(); - int requestedSize = _sessionOutputBufferSizeFrames *_outputFrameSize * sizeof(int16_t); + int requestedSize = _sessionOutputBufferSizeFrames *_outputFrameSize * AudioConstants::SAMPLE_SIZE; _audioOutput->setBufferSize(requestedSize); connect(_audioOutput, &QAudioOutput::notify, this, &AudioClient::outputNotify); @@ -1295,7 +1295,7 @@ bool AudioClient::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDevice _audioOutput->start(&_audioOutputIODevice); lock.unlock(); - qCDebug(audioclient) << "Output Buffer capacity in frames: " << _audioOutput->bufferSize() / sizeof(int16_t) / (float)_outputFrameSize << + qCDebug(audioclient) << "Output Buffer capacity in frames: " << _audioOutput->bufferSize() / AudioConstants::SAMPLE_SIZE / (float)_outputFrameSize << "requested bytes:" << requestedSize << "actual bytes:" << _audioOutput->bufferSize() << "os default:" << osDefaultBufferSize << "period size:" << _audioOutput->periodSize(); @@ -1357,7 +1357,7 @@ int AudioClient::calculateNumberOfInputCallbackBytes(const QAudioFormat& format) } int AudioClient::calculateNumberOfFrameSamples(int numBytes) const { - int frameSamples = (int)(numBytes * CALLBACK_ACCELERATOR_RATIO + 0.5f) / sizeof(int16_t); + int frameSamples = (int)(numBytes * CALLBACK_ACCELERATOR_RATIO + 0.5f) / AudioConstants::SAMPLE_SIZE; return frameSamples; } @@ -1401,7 +1401,7 @@ float AudioClient::gainForSource(float distance, float volume) { } qint64 AudioClient::AudioOutputIODevice::readData(char * data, qint64 maxSize) { - auto samplesRequested = maxSize / sizeof(int16_t); + auto samplesRequested = maxSize / AudioConstants::SAMPLE_SIZE; int samplesPopped; int bytesWritten; @@ -1409,7 +1409,7 @@ qint64 AudioClient::AudioOutputIODevice::readData(char * data, qint64 maxSize) { qCDebug(audiostream, "Read %d samples from buffer (%d available)", samplesPopped, _receivedAudioStream.getSamplesAvailable()); AudioRingBuffer::ConstIterator lastPopOutput = _receivedAudioStream.getLastPopOutput(); lastPopOutput.readSamples((int16_t*)data, samplesPopped); - bytesWritten = samplesPopped * sizeof(int16_t); + bytesWritten = samplesPopped * AudioConstants::SAMPLE_SIZE; } else { // nothing on network, don't grab anything from injectors, and just return 0s // this will flood the log: qCDebug(audioclient, "empty/partial network buffer"); diff --git a/libraries/audio/src/AudioConstants.h b/libraries/audio/src/AudioConstants.h index b8ad94e669..353d9ddd9d 100644 --- a/libraries/audio/src/AudioConstants.h +++ b/libraries/audio/src/AudioConstants.h @@ -23,15 +23,16 @@ namespace AudioConstants { typedef int16_t AudioSample; + const int SAMPLE_SIZE = sizeof(AudioSample); inline const char* getAudioFrameName() { return "com.highfidelity.recording.Audio"; } const int MAX_CODEC_NAME_LENGTH = 30; const int MAX_CODEC_NAME_LENGTH_ON_WIRE = MAX_CODEC_NAME_LENGTH + sizeof(uint32_t); const int NETWORK_FRAME_BYTES_STEREO = 960; - const int NETWORK_FRAME_SAMPLES_STEREO = NETWORK_FRAME_BYTES_STEREO / sizeof(AudioSample); + const int NETWORK_FRAME_SAMPLES_STEREO = NETWORK_FRAME_BYTES_STEREO / SAMPLE_SIZE; const int NETWORK_FRAME_BYTES_PER_CHANNEL = NETWORK_FRAME_BYTES_STEREO / 2; - const int NETWORK_FRAME_SAMPLES_PER_CHANNEL = NETWORK_FRAME_BYTES_PER_CHANNEL / sizeof(AudioSample); + const int NETWORK_FRAME_SAMPLES_PER_CHANNEL = NETWORK_FRAME_BYTES_PER_CHANNEL / SAMPLE_SIZE; const float NETWORK_FRAME_SECS = (AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL / float(AudioConstants::SAMPLE_RATE)); const float NETWORK_FRAME_MSECS = NETWORK_FRAME_SECS * 1000.0f; const float NETWORK_FRAMES_PER_SEC = 1.0f / NETWORK_FRAME_SECS;