clean up audio stats

This commit is contained in:
Zach Pomerantz 2016-09-13 15:14:59 -07:00
parent d83600a22e
commit 9792d025fa
7 changed files with 93 additions and 85 deletions

View file

@ -82,7 +82,7 @@ AudioStatsDialog::AudioStatsDialog(QWidget* parent) :
_upstreamInjectedID = addChannel(_form, _upstreamInjectedStats, COLOR0);
connect(averageUpdateTimer, SIGNAL(timeout()), this, SLOT(updateTimerTimeout()));
averageUpdateTimer->start(1000);
averageUpdateTimer->start(200);
}
int AudioStatsDialog::addChannel(QFormLayout* form, QVector<QString>& stats, const unsigned color) {
@ -110,137 +110,135 @@ void AudioStatsDialog::renderStats() {
// Clear current stats from all vectors
clearAllChannels();
double mixerRingBufferFrames = 0.0,
outputRingBufferFrames = 0.0;
double audioInputBufferLatency = 0.0,
inputRingBufferLatency = 0.0,
networkRoundtripLatency = 0.0,
mixerRingBufferLatency = 0.0,
outputRingBufferLatency = 0.0,
audioOutputBufferLatency = 0.0;
inputRingBufferLatency = 0.0,
networkRoundtripLatency = 0.0,
mixerRingBufferLatency = 0.0,
outputRingBufferLatency = 0.0,
audioOutputBufferLatency = 0.0;
AudioStreamStats downstreamAudioStreamStats = _stats->getMixerDownstreamStats();
SharedNodePointer audioMixerNodePointer = DependencyManager::get<NodeList>()->soloNodeOfType(NodeType::AudioMixer);
if (!audioMixerNodePointer.isNull()) {
if (SharedNodePointer audioMixerNodePointer = DependencyManager::get<NodeList>()->soloNodeOfType(NodeType::AudioMixer)) {
mixerRingBufferFrames = (double)_stats->getMixerAvatarStreamStats()._framesAvailableAverage;
outputRingBufferFrames = (double)_stats->getMixerDownstreamStats()._framesAvailableAverage;
audioInputBufferLatency = (double)_stats->getAudioInputMsecsReadStats().getWindowAverage();
inputRingBufferLatency = (double)_stats->getInputRungBufferMsecsAvailableStats().getWindowAverage();
networkRoundtripLatency = (double) audioMixerNodePointer->getPingMs();
mixerRingBufferLatency = (double)_stats->getMixerAvatarStreamStats()._framesAvailableAverage *
(double)AudioConstants::NETWORK_FRAME_MSECS;
outputRingBufferLatency = (double)downstreamAudioStreamStats._framesAvailableAverage *
(double)AudioConstants::NETWORK_FRAME_MSECS;
mixerRingBufferLatency = mixerRingBufferFrames * (double)AudioConstants::NETWORK_FRAME_MSECS;
outputRingBufferLatency = outputRingBufferFrames * (double)AudioConstants::NETWORK_FRAME_MSECS;
audioOutputBufferLatency = (double)_stats->getAudioOutputMsecsUnplayedStats().getWindowAverage();
}
double totalLatency = audioInputBufferLatency + inputRingBufferLatency + networkRoundtripLatency + mixerRingBufferLatency
+ outputRingBufferLatency + audioOutputBufferLatency;
QString stats = "Audio input buffer: %1ms - avg msecs of samples read to the audio input buffer in last 10s";
_audioMixerStats.push_back(stats.arg(QString::number(audioInputBufferLatency, 'f', 2)));
stats = "Input ring buffer: %1ms - avg msecs of samples read to the input ring buffer in last 10s";
_audioMixerStats.push_back(stats.arg(QString::number(inputRingBufferLatency, 'f', 2)));
stats = "Network to mixer: %1ms - half of last ping value calculated by the node list";
_audioMixerStats.push_back(stats.arg(QString::number((networkRoundtripLatency / 2.0), 'f', 2)));
stats = "Network to client: %1ms - half of last ping value calculated by the node list";
_audioMixerStats.push_back(stats.arg(QString::number((mixerRingBufferLatency / 2.0),'f', 2)));
stats = "Output ring buffer: %1ms - avg msecs of samples in output ring buffer in last 10s";
_audioMixerStats.push_back(stats.arg(QString::number(outputRingBufferLatency,'f', 2)));
stats = "Audio output buffer: %1ms - avg msecs of samples in audio output buffer in last 10s";
_audioMixerStats.push_back(stats.arg(QString::number(mixerRingBufferLatency,'f', 2)));
stats = "TOTAL: %1ms - avg msecs of samples in audio output buffer in last 10s";
_audioMixerStats.push_back(stats.arg(QString::number(totalLatency, 'f', 2)));
double totalLatency = audioInputBufferLatency + inputRingBufferLatency + mixerRingBufferLatency
+ outputRingBufferLatency + audioOutputBufferLatency + networkRoundtripLatency;
QString stats;
_audioMixerStats.push_back("PIPELINE (averaged over the past 10s)");
stats = "Input Read:\t%1 ms";
_audioMixerStats.push_back(stats.arg(QString::number(audioInputBufferLatency, 'f', 0)));
stats = "Input Ring:\t%1 ms";
_audioMixerStats.push_back(stats.arg(QString::number(inputRingBufferLatency, 'f', 0)));
stats = "Network (client->mixer):\t%1 ms";
_audioMixerStats.push_back(stats.arg(QString::number(networkRoundtripLatency / 2, 'f', 0)));
stats = "Mixer Ring:\t%1 ms (%2 frames)";
_audioMixerStats.push_back(stats.arg(QString::number(mixerRingBufferLatency, 'f', 0),
QString::number(mixerRingBufferFrames, 'f', 0)));
stats = "Network (mixer->client):\t%1 ms";
_audioMixerStats.push_back(stats.arg(QString::number(networkRoundtripLatency / 2, 'f', 0)));
stats = "Output Ring:\t%1 ms (%2 frames)";
_audioMixerStats.push_back(stats.arg(QString::number(outputRingBufferLatency, 'f', 0),
QString::number(outputRingBufferFrames, 'f', 0)));
stats = "Output Read:\t%1 ms";
_audioMixerStats.push_back(stats.arg(QString::number(audioOutputBufferLatency, 'f', 0)));
stats = "TOTAL:\t%1 ms";
_audioMixerStats.push_back(stats.arg(QString::number(totalLatency, 'f', 0)));
const MovingMinMaxAvg<quint64>& packetSentTimeGaps = _stats->getPacketSentTimeGaps();
_upstreamClientStats.push_back("\nUpstream Mic Audio Packets Sent Gaps (by client):");
stats = "Inter-packet timegaps (overall) | min: %1, max: %2, avg: %3";
stats = "Inter-packet timegaps";
_upstreamClientStats.push_back(stats);
stats = "overall min:\t%1, max:\t%2, avg:\t%3";
stats = stats.arg(formatUsecTime(packetSentTimeGaps.getMin()),
formatUsecTime(packetSentTimeGaps.getMax()),
formatUsecTime(packetSentTimeGaps.getAverage()));
_upstreamClientStats.push_back(stats);
stats = "Inter-packet timegaps (last 30s) | min: %1, max: %2, avg: %3";
stats = "last window min:\t%1, max:\t%2, avg:\t%3";
stats = stats.arg(formatUsecTime(packetSentTimeGaps.getWindowMin()),
formatUsecTime(packetSentTimeGaps.getWindowMax()),
formatUsecTime(packetSentTimeGaps.getWindowAverage()));
_upstreamClientStats.push_back(stats);
_upstreamMixerStats.push_back("\nUpstream mic audio stats (received and reported by audio-mixer):");
_upstreamMixerStats.push_back("\nMIXER STREAM");
_upstreamMixerStats.push_back("(this client's remote mixer stream performance)");
renderAudioStreamStats(&_stats->getMixerAvatarStreamStats(), &_upstreamMixerStats, true);
renderAudioStreamStats(&_stats->getMixerAvatarStreamStats(), &_upstreamMixerStats);
_downstreamStats.push_back("\nDownstream mixed audio stats:");
_downstreamStats.push_back("\nCLIENT STREAM");
AudioStreamStats downstreamStats = _stats->getMixerDownstreamStats();
renderAudioStreamStats(&downstreamStats, &_downstreamStats, true);
renderAudioStreamStats(&downstreamStats, &_downstreamStats);
if (_shouldShowInjectedStreams) {
foreach(const AudioStreamStats& injectedStreamAudioStats, _stats->getMixerInjectedStreamStatsMap()) {
stats = "\nUpstream injected audio stats: stream ID: %1";
stats = "\nINJECTED STREAM (ID: %1)";
stats = stats.arg(injectedStreamAudioStats._streamIdentifier.toString());
_upstreamInjectedStats.push_back(stats);
renderAudioStreamStats(&injectedStreamAudioStats, &_upstreamInjectedStats, true);
renderAudioStreamStats(&injectedStreamAudioStats, &_upstreamInjectedStats);
}
}
}
void AudioStatsDialog::renderAudioStreamStats(const AudioStreamStats* streamStats, QVector<QString>* audioStreamStats, bool isDownstreamStats) {
void AudioStatsDialog::renderAudioStreamStats(const AudioStreamStats* streamStats, QVector<QString>* audioStreamStats) {
QString stats = "Packet loss | overall: %1% (%2 lost), last_30s: %3% (%4 lost)";
QString stats = "Packet Loss";
audioStreamStats->push_back(stats);
stats = "overall:\t%1%\t(%2 lost), window:\t%3%\t(%4 lost)";
stats = stats.arg(QString::number((int)(streamStats->_packetStreamStats.getLostRate() * 100.0f)),
QString::number((int)(streamStats->_packetStreamStats._lost)),
QString::number((int)(streamStats->_packetStreamWindowStats.getLostRate() * 100.0f)),
QString::number((int)(streamStats->_packetStreamWindowStats._lost)));
QString::number((int)(streamStats->_packetStreamStats._lost)),
QString::number((int)(streamStats->_packetStreamWindowStats.getLostRate() * 100.0f)),
QString::number((int)(streamStats->_packetStreamWindowStats._lost)));
audioStreamStats->push_back(stats);
if (isDownstreamStats) {
stats = "Ringbuffer frames | desired: %1, avg_available(10s): %2 + %3, available: %4 + %5";
stats = stats.arg(QString::number(streamStats->_desiredJitterBufferFrames),
QString::number(streamStats->_framesAvailableAverage),
QString::number((int)((float)_stats->getAudioInputMsecsReadStats().getWindowAverage() /
AudioConstants::NETWORK_FRAME_MSECS)),
QString::number(streamStats->_framesAvailable),
QString::number((int)(_stats->getAudioOutputMsecsUnplayedStats().getCurrentIntervalLastSample() /
AudioConstants::NETWORK_FRAME_MSECS)));
audioStreamStats->push_back(stats);
} else {
stats = "Ringbuffer frames | desired: %1, avg_available(10s): %2, available: %3";
stats = stats.arg(QString::number(streamStats->_desiredJitterBufferFrames),
QString::number(streamStats->_framesAvailableAverage),
QString::number(streamStats->_framesAvailable));
audioStreamStats->push_back(stats);
}
stats = "Ringbuffer stats | starves: %1, prev_starve_lasted: %2, frames_dropped: %3, overflows: %4";
stats = "Ringbuffer";
audioStreamStats->push_back(stats);
stats = "available frames (avg):\t%1\t(%2), desired:\t%3";
stats = stats.arg(QString::number(streamStats->_framesAvailable),
QString::number(streamStats->_framesAvailableAverage),
QString::number(streamStats->_desiredJitterBufferFrames));
audioStreamStats->push_back(stats);
stats = "starves:\t%1, last starve duration:\t%2, drops:\t%3, overflows:\t%4";
stats = stats.arg(QString::number(streamStats->_starveCount),
QString::number(streamStats->_consecutiveNotMixedCount),
QString::number(streamStats->_framesDropped),
QString::number(streamStats->_overflowCount));
QString::number(streamStats->_consecutiveNotMixedCount),
QString::number(streamStats->_framesDropped),
QString::number(streamStats->_overflowCount));
audioStreamStats->push_back(stats);
stats = "Inter-packet timegaps";
audioStreamStats->push_back(stats);
stats = "Inter-packet timegaps (overall) | min: %1, max: %2, avg: %3";
stats = "overall min:\t%1, max:\t%2, avg:\t%3";
stats = stats.arg(formatUsecTime(streamStats->_timeGapMin),
formatUsecTime(streamStats->_timeGapMax),
formatUsecTime(streamStats->_timeGapAverage));
formatUsecTime(streamStats->_timeGapMax),
formatUsecTime(streamStats->_timeGapAverage));
audioStreamStats->push_back(stats);
stats = "Inter-packet timegaps (last 30s) | min: %1, max: %2, avg: %3";
stats = "last window min:\t%1, max:\t%2, avg:\t%3";
stats = stats.arg(formatUsecTime(streamStats->_timeGapWindowMin),
formatUsecTime(streamStats->_timeGapWindowMax),
formatUsecTime(streamStats->_timeGapWindowAverage));
formatUsecTime(streamStats->_timeGapWindowMax),
formatUsecTime(streamStats->_timeGapWindowAverage));
audioStreamStats->push_back(stats);
}
void AudioStatsDialog::clearAllChannels() {

View file

@ -74,7 +74,7 @@ private:
void updateStats(QVector<QString>& stats, const int channelID);
void renderStats();
void clearAllChannels();
void renderAudioStreamStats(const AudioStreamStats* streamStats, QVector<QString>* audioStreamstats, bool isDownstreamStats);
void renderAudioStreamStats(const AudioStreamStats* streamStats, QVector<QString>* audioStreamstats);
const AudioIOStats* _stats;

View file

@ -47,6 +47,7 @@
#include "PositionalAudioStream.h"
#include "AudioClientLogging.h"
#include "AudioLogging.h"
#include "AudioClient.h"
@ -1418,6 +1419,7 @@ qint64 AudioClient::AudioOutputIODevice::readData(char * data, qint64 maxSize) {
int bytesWritten;
if ((samplesPopped = _receivedAudioStream.popSamples((int)samplesRequested, false)) > 0) {
qCDebug(audiostream, "Read %d samples from buffer (%d available)", samplesPopped, _receivedAudioStream.getSamplesAvailable());
AudioRingBuffer::ConstIterator lastPopOutput = _receivedAudioStream.getLastPopOutput();
lastPopOutput.readSamples((int16_t*)data, samplesPopped);
bytesWritten = samplesPopped * sizeof(int16_t);

View file

@ -14,7 +14,7 @@
Q_LOGGING_CATEGORY(audio, "hifi.audio")
#if DEV_BUILD || PR_BUILD
Q_LOGGING_CATEGORY(audiostream, "hifi.audio-stream", QtDebugMsg)
#else
Q_LOGGING_CATEGORY(audiostream, "hifi.audio-stream", QtInfoMsg)
#else
Q_LOGGING_CATEGORY(audiostream, "hifi.audio-stream", QtWarningMsg)
#endif

View file

@ -163,6 +163,7 @@ int InboundAudioStream::parseData(ReceivedMessage& message) {
int framesAvailable = _ringBuffer.framesAvailable();
// if this stream was starved, check if we're still starved.
if (_isStarved && framesAvailable >= _desiredJitterBufferFrames) {
qCInfo(audiostream, "Starve ended");
_isStarved = false;
}
// if the ringbuffer exceeds the desired size by more than the threshold specified,
@ -176,8 +177,8 @@ int InboundAudioStream::parseData(ReceivedMessage& message) {
_oldFramesDropped += framesToDrop;
qCDebug(audiostream, "Dropped %d frames", framesToDrop);
qCDebug(audiostream, "Resetted current jitter frames");
qCInfo(audiostream, "Dropped %d frames", framesToDrop);
qCInfo(audiostream, "Reset current jitter frames");
}
framesAvailableChanged();
@ -232,8 +233,8 @@ int InboundAudioStream::writeDroppableSilentSamples(int silentSamples) {
_currentJitterBufferFrames -= numSilentFramesToDrop;
_silentFramesDropped += numSilentFramesToDrop;
qCDebug(audiostream, "Dropped %d silent frames", numSilentFramesToDrop);
qCDebug(audiostream, "Set current jitter frames to %d", _currentJitterBufferFrames);
qCInfo(audiostream, "Dropped %d silent frames", numSilentFramesToDrop);
qCInfo(audiostream, "Set current jitter frames to %d (dropped)", _currentJitterBufferFrames);
_framesAvailableStat.reset();
}
@ -315,13 +316,17 @@ void InboundAudioStream::framesAvailableChanged() {
if (_framesAvailableStat.getElapsedUsecs() >= FRAMES_AVAILABLE_STAT_WINDOW_USECS) {
_currentJitterBufferFrames = (int)ceil(_framesAvailableStat.getAverage());
qCDebug(audiostream, "Set current jitter frames to %d", _currentJitterBufferFrames);
qCInfo(audiostream, "Set current jitter frames to %d (changed)", _currentJitterBufferFrames);
_framesAvailableStat.reset();
}
}
void InboundAudioStream::setToStarved() {
if (!_isStarved) {
qCInfo(audiostream, "Starved");
}
_consecutiveNotMixedCount = 0;
_starveCount++;
// if we have more than the desired frames when setToStarved() is called, then we'll immediately
@ -364,7 +369,7 @@ void InboundAudioStream::setToStarved() {
// make sure _desiredJitterBufferFrames does not become lower here
if (calculatedJitterBufferFrames >= _desiredJitterBufferFrames) {
_desiredJitterBufferFrames = calculatedJitterBufferFrames;
qCDebug(audiostream, "Set desired jitter frames to %d", _desiredJitterBufferFrames);
qCInfo(audiostream, "Set desired jitter frames to %d (starved)", _desiredJitterBufferFrames);
}
}
}
@ -454,7 +459,7 @@ void InboundAudioStream::packetReceivedUpdateTimingStats() {
/ (float)AudioConstants::NETWORK_FRAME_USECS);
if (calculatedJitterBufferFrames < _desiredJitterBufferFrames) {
_desiredJitterBufferFrames = calculatedJitterBufferFrames;
qCDebug(audiostream, "Set desired jitter frames to %d", _desiredJitterBufferFrames);
qCInfo(audiostream, "Set desired jitter frames to %d (reduced)", _desiredJitterBufferFrames);
}
_timeGapStatsForDesiredReduction.clearNewStatsAvailableFlag();
}

View file

@ -161,6 +161,7 @@ public:
int getFrameCapacity() const { return _ringBuffer.getFrameCapacity(); }
int getFramesAvailable() const { return _ringBuffer.framesAvailable(); }
double getFramesAvailableAverage() const { return _framesAvailableStat.getAverage(); }
int getSamplesAvailable() const { return _ringBuffer.samplesAvailable(); }
bool isStarved() const { return _isStarved; }
bool hasStarted() const { return _hasStarted; }

View file

@ -10,6 +10,7 @@
//
#include "MixedProcessedAudioStream.h"
#include "AudioLogging.h"
static const int STEREO_FACTOR = 2;
@ -56,6 +57,7 @@ int MixedProcessedAudioStream::parseAudioData(PacketType type, const QByteArray&
emit processSamples(decodedBuffer, outputBuffer);
_ringBuffer.writeData(outputBuffer.data(), outputBuffer.size());
qCDebug(audiostream, "Wrote %d samples to buffer (%d available)", outputBuffer.size() / (int)sizeof(int16_t), getSamplesAvailable());
return packetAfterStreamProperties.size();
}