mirror of
https://github.com/overte-org/overte.git
synced 2025-04-20 03:44:02 +02:00
all stats added, needs testing; created PacketStreamStats struct
This commit is contained in:
parent
81e168f657
commit
01f10024ae
9 changed files with 223 additions and 108 deletions
|
@ -188,13 +188,13 @@ AudioStreamStats AudioMixerClientData::getAudioStreamStatsOfStream(const Positio
|
|||
streamStats._ringBufferOverflowCount = ringBuffer->getOverflowCount();
|
||||
streamStats._ringBufferSilentFramesDropped = ringBuffer->getSilentFramesDropped();
|
||||
|
||||
streamStats._packetsReceived = streamSequenceNumberStats->getNumReceived();
|
||||
streamStats._packetsUnreasonable = streamSequenceNumberStats->getNumUnreasonable();
|
||||
streamStats._packetsEarly = streamSequenceNumberStats->getNumEarly();
|
||||
streamStats._packetsLate = streamSequenceNumberStats->getNumLate();
|
||||
streamStats._packetsLost = streamSequenceNumberStats->getNumLost();
|
||||
streamStats._packetsRecovered = streamSequenceNumberStats->getNumRecovered();
|
||||
streamStats._packetsDuplicate = streamSequenceNumberStats->getNumDuplicate();
|
||||
streamStats._packetStreamStats._numReceived = streamSequenceNumberStats->getNumReceived();
|
||||
streamStats._packetStreamStats._numUnreasonable = streamSequenceNumberStats->getNumUnreasonable();
|
||||
streamStats._packetStreamStats._numEarly = streamSequenceNumberStats->getNumEarly();
|
||||
streamStats._packetStreamStats._numLate = streamSequenceNumberStats->getNumLate();
|
||||
streamStats._packetStreamStats._numLost = streamSequenceNumberStats->getNumLost();
|
||||
streamStats._packetStreamStats._numRecovered = streamSequenceNumberStats->getNumRecovered();
|
||||
streamStats._packetStreamStats._numDuplicate = streamSequenceNumberStats->getNumDuplicate();
|
||||
|
||||
return streamStats;
|
||||
}
|
||||
|
@ -261,9 +261,9 @@ QString AudioMixerClientData::getAudioStreamStatsString() const {
|
|||
+ " not mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount)
|
||||
+ " overflows:" + QString::number(streamStats._ringBufferOverflowCount)
|
||||
+ " silents dropped:" + QString::number(streamStats._ringBufferSilentFramesDropped)
|
||||
+ " early:" + QString::number(streamStats._packetsEarly)
|
||||
+ " late:" + QString::number(streamStats._packetsLate)
|
||||
+ " lost:" + QString::number(streamStats._packetsLost)
|
||||
+ " early:" + QString::number(streamStats._packetStreamStats._numEarly)
|
||||
+ " late:" + QString::number(streamStats._packetStreamStats._numLate)
|
||||
+ " lost:" + QString::number(streamStats._packetStreamStats._numLost)
|
||||
+ " min gap:" + QString::number(streamStats._timeGapMin)
|
||||
+ " max gap:" + QString::number(streamStats._timeGapMax)
|
||||
+ " avg gap:" + QString::number(streamStats._timeGapAverage, 'g', 2)
|
||||
|
@ -284,9 +284,9 @@ QString AudioMixerClientData::getAudioStreamStatsString() const {
|
|||
+ " not mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount)
|
||||
+ " overflows:" + QString::number(streamStats._ringBufferOverflowCount)
|
||||
+ " silents dropped:" + QString::number(streamStats._ringBufferSilentFramesDropped)
|
||||
+ " early:" + QString::number(streamStats._packetsEarly)
|
||||
+ " late:" + QString::number(streamStats._packetsLate)
|
||||
+ " lost:" + QString::number(streamStats._packetsLost)
|
||||
+ " early:" + QString::number(streamStats._packetStreamStats._numEarly)
|
||||
+ " late:" + QString::number(streamStats._packetStreamStats._numLate)
|
||||
+ " lost:" + QString::number(streamStats._packetStreamStats._numLost)
|
||||
+ " min gap:" + QString::number(streamStats._timeGapMin)
|
||||
+ " max gap:" + QString::number(streamStats._timeGapMax)
|
||||
+ " avg gap:" + QString::number(streamStats._timeGapAverage, 'g', 2)
|
||||
|
|
|
@ -48,6 +48,11 @@ static const float AUDIO_CALLBACK_MSECS = (float) NETWORK_BUFFER_LENGTH_SAMPLES_
|
|||
|
||||
static const int NUMBER_OF_NOISE_SAMPLE_FRAMES = 300;
|
||||
|
||||
static const int AUDIO_STREAM_STATS_HISTORY_SIZE = 30;
|
||||
|
||||
const int TIME_GAPS_STATS_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS;
|
||||
const int TIME_GAP_STATS_WINDOW_INTERVALS = 30;
|
||||
|
||||
// Mute icon configration
|
||||
static const int MUTE_ICON_SIZE = 24;
|
||||
|
||||
|
@ -103,8 +108,13 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
|
|||
_scopeInput(0),
|
||||
_scopeOutputLeft(0),
|
||||
_scopeOutputRight(0),
|
||||
_audioMixerAvatarStreamStats(),
|
||||
_outgoingAvatarAudioSequenceNumber(0)
|
||||
_audioMixerAvatarStreamAudioStats(),
|
||||
_audioMixerAvatarStreamPacketStatsHistory(AUDIO_STREAM_STATS_HISTORY_SIZE),
|
||||
_outgoingAvatarAudioSequenceNumber(0),
|
||||
_incomingStreamPacketStatsHistory(AUDIO_STREAM_STATS_HISTORY_SIZE),
|
||||
_interframeTimeGapStats(TIME_GAPS_STATS_INTERVAL_SAMPLES, TIME_GAP_STATS_WINDOW_INTERVALS),
|
||||
_starveCount(0),
|
||||
_consecutiveNotMixedCount(0)
|
||||
{
|
||||
// clear the array of locally injected samples
|
||||
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||
|
@ -120,9 +130,20 @@ void Audio::init(QGLWidget *parent) {
|
|||
|
||||
void Audio::reset() {
|
||||
_ringBuffer.reset();
|
||||
|
||||
_starveCount = 0;
|
||||
_consecutiveNotMixedCount = 0;
|
||||
|
||||
_audioMixerAvatarStreamAudioStats = AudioStreamStats();
|
||||
_audioMixerInjectedStreamAudioStatsMap.clear();
|
||||
|
||||
_audioMixerAvatarStreamPacketStatsHistory.clear();
|
||||
_audioMixerInjectedStreamPacketStatsHistoryMap.clear();
|
||||
|
||||
_outgoingAvatarAudioSequenceNumber = 0;
|
||||
_audioMixerInjectedStreamStatsMap.clear();
|
||||
_incomingMixedAudioSequenceNumberStats.reset();
|
||||
|
||||
_incomingStreamPacketStatsHistory.clear();
|
||||
}
|
||||
|
||||
QAudioDeviceInfo getNamedAudioDeviceForMode(QAudio::Mode mode, const QString& deviceName) {
|
||||
|
@ -672,7 +693,7 @@ void Audio::handleAudioInput() {
|
|||
// memcpy our orientation
|
||||
memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation));
|
||||
currentPacketPtr += sizeof(headOrientation);
|
||||
|
||||
if (randFloat() < 0.95f)
|
||||
nodeList->writeDatagram(audioDataPacket, numAudioBytes + leadingBytes, audioMixer);
|
||||
_outgoingAvatarAudioSequenceNumber++;
|
||||
|
||||
|
@ -689,7 +710,9 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
|
|||
|
||||
_totalPacketsReceived++;
|
||||
|
||||
double timeDiff = (double)_timeSinceLastReceived.nsecsElapsed() / 1000000.0; // ns to ms
|
||||
double timeDiff = (double)_timeSinceLastReceived.nsecsElapsed() / 1000.0; // ns to us
|
||||
_interframeTimeGapStats.update((quint64)timeDiff);
|
||||
timeDiff /= USECS_PER_MSEC; // us to ms
|
||||
_timeSinceLastReceived.start();
|
||||
|
||||
// Discard first few received packets for computing jitter (often they pile up on start)
|
||||
|
@ -726,7 +749,8 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
|
|||
quint8 appendFlag = *(reinterpret_cast<const quint16*>(dataAt));
|
||||
dataAt += sizeof(quint8);
|
||||
if (!appendFlag) {
|
||||
_audioMixerInjectedStreamStatsMap.clear();
|
||||
_audioMixerInjectedStreamAudioStatsMap.clear();
|
||||
_audioMixerInjectedStreamPacketStatsHistoryMap.clear();
|
||||
}
|
||||
|
||||
// parse the number of stream stats structs to follow
|
||||
|
@ -740,11 +764,21 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
|
|||
dataAt += sizeof(AudioStreamStats);
|
||||
|
||||
if (streamStats._streamType == PositionalAudioRingBuffer::Microphone) {
|
||||
_audioMixerAvatarStreamStats = streamStats;
|
||||
_audioMixerAvatarStreamAudioStats = streamStats;
|
||||
_audioMixerAvatarStreamPacketStatsHistory.insert(streamStats._packetStreamStats);
|
||||
} else {
|
||||
_audioMixerInjectedStreamStatsMap[streamStats._streamIdentifier] = streamStats;
|
||||
if (!_audioMixerInjectedStreamAudioStatsMap.contains(streamStats._streamIdentifier)) {
|
||||
_audioMixerInjectedStreamPacketStatsHistoryMap.insert(streamStats._streamIdentifier,
|
||||
RingBufferHistory<PacketStreamStats>(AUDIO_STREAM_STATS_HISTORY_SIZE));
|
||||
}
|
||||
_audioMixerInjectedStreamAudioStatsMap[streamStats._streamIdentifier] = streamStats;
|
||||
_audioMixerInjectedStreamPacketStatsHistoryMap[streamStats._streamIdentifier].insert(streamStats._packetStreamStats);
|
||||
}
|
||||
}
|
||||
|
||||
// when an audio stream stats packet is received, also record the current packets received and lost
|
||||
// in the packet loss stats history
|
||||
_incomingStreamPacketStatsHistory.insert(_incomingMixedAudioSequenceNumberStats.getStats());
|
||||
}
|
||||
|
||||
// NOTE: numSamples is the total number of single channel samples, since callers will always call this with stereo
|
||||
|
@ -867,6 +901,9 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
|||
//qDebug() << "Audio output just starved.";
|
||||
_ringBuffer.setIsStarved(true);
|
||||
_numFramesDisplayStarve = 10;
|
||||
|
||||
_starveCount++;
|
||||
_consecutiveNotMixedCount = 0;
|
||||
}
|
||||
|
||||
int numNetworkOutputSamples;
|
||||
|
@ -886,6 +923,7 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
|||
if (!_ringBuffer.isNotStarvedOrHasMinimumSamples(numSamplesNeededToStartPlayback)) {
|
||||
// We are still waiting for enough samples to begin playback
|
||||
// qDebug() << numNetworkOutputSamples << " samples so far, waiting for " << numSamplesNeededToStartPlayback;
|
||||
_consecutiveNotMixedCount++;
|
||||
} else {
|
||||
int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio;
|
||||
|
||||
|
@ -1515,3 +1553,25 @@ int Audio::calculateNumberOfFrameSamples(int numBytes) {
|
|||
int frameSamples = (int)(numBytes * CALLBACK_ACCELERATOR_RATIO + 0.5f) / sizeof(int16_t);
|
||||
return frameSamples;
|
||||
}
|
||||
|
||||
void Audio::calculatePacketLossRate(const RingBufferHistory<PacketStreamStats>& statsHistory,
|
||||
float& overallLossRate, float& windowLossRate) const {
|
||||
|
||||
int numHistoryEntries = statsHistory.getNumEntries();
|
||||
if (numHistoryEntries == 0) {
|
||||
overallLossRate = 0.0f;
|
||||
windowLossRate = 0.0f;
|
||||
} else {
|
||||
const PacketStreamStats& newestStats = *statsHistory.getNewestEntry();
|
||||
overallLossRate = (float)newestStats._numLost / newestStats._numReceived;
|
||||
|
||||
if (numHistoryEntries == 1) {
|
||||
windowLossRate = overallLossRate;
|
||||
} else {
|
||||
int age = std::min(numHistoryEntries-1, AUDIO_STREAM_STATS_HISTORY_SIZE-1);
|
||||
const PacketStreamStats& oldestStats = *statsHistory.get(age);
|
||||
windowLossRate = (float)(newestStats._numLost - oldestStats._numLost)
|
||||
/ (newestStats._numReceived - oldestStats._numReceived);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
|
||||
#include "InterfaceConfig.h"
|
||||
#include "AudioStreamStats.h"
|
||||
#include "RingBufferHistory.h"
|
||||
#include "MovingMinMaxAvg.h"
|
||||
|
||||
#include <QAudio>
|
||||
#include <QAudioInput>
|
||||
|
@ -107,8 +109,22 @@ public slots:
|
|||
float getInputVolume() const { return (_audioInput) ? _audioInput->volume() : 0.0f; }
|
||||
void setInputVolume(float volume) { if (_audioInput) _audioInput->setVolume(volume); }
|
||||
|
||||
const AudioStreamStats& getAudioMixerAvatarStreamStats() const { return _audioMixerAvatarStreamStats; }
|
||||
const QHash<QUuid, AudioStreamStats>& getAudioMixerInjectedStreamStatsMap() const { return _audioMixerInjectedStreamStatsMap; }
|
||||
const AudioRingBuffer& getDownstreamRingBuffer() const { return _ringBuffer; }
|
||||
|
||||
int getDesiredJitterBufferFrames() const { return _jitterBufferSamples / _ringBuffer.getNumFrameSamples(); }
|
||||
|
||||
int getStarveCount() const { return _starveCount; }
|
||||
int getConsecutiveNotMixedCount() const { return _consecutiveNotMixedCount; }
|
||||
|
||||
const AudioStreamStats& getAudioMixerAvatarStreamAudioStats() const { return _audioMixerAvatarStreamAudioStats; }
|
||||
const QHash<QUuid, AudioStreamStats>& getAudioMixerInjectedStreamAudioStatsMap() const { return _audioMixerInjectedStreamAudioStatsMap; }
|
||||
const RingBufferHistory<PacketStreamStats>& getAudioMixerAvatarStreamPacketStatsHistory() const { return _audioMixerAvatarStreamPacketStatsHistory; }
|
||||
const QHash<QUuid, RingBufferHistory<PacketStreamStats> >& getAudioMixerInjectedStreamPacketStatsHistoryMap() const {return _audioMixerInjectedStreamPacketStatsHistoryMap; }
|
||||
const RingBufferHistory<PacketStreamStats>& getIncomingStreamPacketStatsHistory() const { return _incomingStreamPacketStatsHistory; }
|
||||
const MovingMinMaxAvg<quint64>& getInterframeTimeGapStats() const { return _interframeTimeGapStats; }
|
||||
|
||||
void calculatePacketLossRate(const RingBufferHistory<PacketStreamStats>& statsHistory,
|
||||
float& overallLossRate, float& windowLossRate) const;
|
||||
|
||||
signals:
|
||||
bool muteToggled();
|
||||
|
@ -241,11 +257,21 @@ private:
|
|||
QByteArray* _scopeOutputLeft;
|
||||
QByteArray* _scopeOutputRight;
|
||||
|
||||
AudioStreamStats _audioMixerAvatarStreamStats;
|
||||
QHash<QUuid, AudioStreamStats> _audioMixerInjectedStreamStatsMap;
|
||||
int _starveCount;
|
||||
int _consecutiveNotMixedCount;
|
||||
|
||||
AudioStreamStats _audioMixerAvatarStreamAudioStats;
|
||||
QHash<QUuid, AudioStreamStats> _audioMixerInjectedStreamAudioStatsMap;
|
||||
|
||||
RingBufferHistory<PacketStreamStats> _audioMixerAvatarStreamPacketStatsHistory;
|
||||
QHash<QUuid, RingBufferHistory<PacketStreamStats> > _audioMixerInjectedStreamPacketStatsHistoryMap;
|
||||
|
||||
quint16 _outgoingAvatarAudioSequenceNumber;
|
||||
SequenceNumberStats _incomingMixedAudioSequenceNumberStats;
|
||||
|
||||
RingBufferHistory<PacketStreamStats> _incomingStreamPacketStatsHistory;
|
||||
|
||||
MovingMinMaxAvg<quint64> _interframeTimeGapStats;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -288,15 +288,12 @@ void Stats::display(
|
|||
|
||||
|
||||
Audio* audio = Application::getInstance()->getAudio();
|
||||
const AudioStreamStats& audioMixerAvatarStreamStats = audio->getAudioMixerAvatarStreamStats();
|
||||
const QHash<QUuid, AudioStreamStats>& audioMixerInjectedStreamStatsMap = audio->getAudioMixerInjectedStreamStatsMap();
|
||||
const QHash<QUuid, AudioStreamStats>& audioMixerInjectedStreamAudioStatsMap = audio->getAudioMixerInjectedStreamAudioStatsMap();
|
||||
|
||||
lines = _expanded ? 12 + (audioMixerInjectedStreamStatsMap.size() + 1) * 3: 3;
|
||||
lines = _expanded ? 11 + (audioMixerInjectedStreamAudioStatsMap.size() + 2) * 3 : 3;
|
||||
drawBackground(backgroundColor, horizontalOffset, 0, _pingStatsWidth, lines * STATS_PELS_PER_LINE + 10);
|
||||
horizontalOffset += 5;
|
||||
|
||||
|
||||
|
||||
char audioJitter[30];
|
||||
sprintf(audioJitter,
|
||||
"Buffer msecs %.1f",
|
||||
|
@ -328,7 +325,7 @@ void Stats::display(
|
|||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, voxelMaxPing, color);
|
||||
|
||||
char audioMixerStatsLabelString[] = "AudioMixer stats:";
|
||||
char streamStatsFormatLabelString[] = "early/late/lost";
|
||||
char streamStatsFormatLabelString[] = "lost%/30s_lost%";
|
||||
char streamStatsFormatLabelString2[] = "avail/currJ/desiredJ";
|
||||
char streamStatsFormatLabelString3[] = "gaps: min/max/avg, starv/ovfl";
|
||||
char streamStatsFormatLabelString4[] = "30s gaps: (same), notmix/sdrop";
|
||||
|
@ -349,61 +346,98 @@ void Stats::display(
|
|||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamLabelString, color);
|
||||
|
||||
const SequenceNumberStats& downstreamAudioSequenceNumberStats = audio->getIncomingMixedAudioSequenceNumberStats();
|
||||
char downstreamAudioStatsString[30];
|
||||
/* const SequenceNumberStats& downstreamAudioSequenceNumberStats = audio->getIncomingMixedAudioSequenceNumberStats();
|
||||
|
||||
sprintf(downstreamAudioStatsString, " mix: %d/%d/%d, %d", downstreamAudioSequenceNumberStats.getNumEarly(),
|
||||
downstreamAudioSequenceNumberStats.getNumLate(), downstreamAudioSequenceNumberStats.getNumLost(),
|
||||
audio->getJitterBufferSamples() / NETWORK_BUFFER_LENGTH_SAMPLES_STEREO);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);*/
|
||||
|
||||
float packetLossRate, packetLossRate30s;
|
||||
|
||||
char downstreamAudioStatsString[30];
|
||||
|
||||
audio->calculatePacketLossRate(audio->getIncomingStreamPacketStatsHistory(), packetLossRate, packetLossRate30s);
|
||||
|
||||
sprintf(downstreamAudioStatsString, " mix: %.1f%%/%.1f%%, %d/?/%d", packetLossRate*100.0f, packetLossRate30s*100.0f,
|
||||
audio->getDownstreamRingBuffer().framesAvailable(), audio->getDesiredJitterBufferFrames());
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
|
||||
|
||||
const MovingMinMaxAvg<quint64>& timeGapStats = audio->getInterframeTimeGapStats();
|
||||
|
||||
sprintf(downstreamAudioStatsString, " %llu/%llu/%.2f, %d/%d", timeGapStats.getMin(), timeGapStats.getMax(),
|
||||
timeGapStats.getAverage(), audio->getStarveCount(), audio->getDownstreamRingBuffer().getOverflowCount());
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
|
||||
|
||||
sprintf(downstreamAudioStatsString, " %llu/%llu/%.2f, %d/?", timeGapStats.getWindowMin(), timeGapStats.getWindowMax(),
|
||||
timeGapStats.getWindowAverage(), audio->getConsecutiveNotMixedCount());
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
|
||||
|
||||
|
||||
char upstreamLabelString[] = " Upstream:";
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamLabelString, color);
|
||||
|
||||
char upstreamAudioStatsString[30];
|
||||
sprintf(upstreamAudioStatsString, " mic: %d/%d/%d, %d/%d/%d", audioMixerAvatarStreamStats._packetsEarly,
|
||||
audioMixerAvatarStreamStats._packetsLate, audioMixerAvatarStreamStats._packetsLost,
|
||||
audioMixerAvatarStreamStats._ringBufferFramesAvailable, audioMixerAvatarStreamStats._ringBufferCurrentJitterBufferFrames,
|
||||
audioMixerAvatarStreamStats._ringBufferDesiredJitterBufferFrames);
|
||||
|
||||
const AudioStreamStats& audioMixerAvatarAudioStreamStats = audio->getAudioMixerAvatarStreamAudioStats();
|
||||
|
||||
audio->calculatePacketLossRate(audio->getAudioMixerAvatarStreamPacketStatsHistory(), packetLossRate, packetLossRate30s);
|
||||
|
||||
sprintf(upstreamAudioStatsString, " mic: %.1f%%/%.1f%%, %u/%u/%u", packetLossRate*100.0f, packetLossRate30s*100.0f,
|
||||
audioMixerAvatarAudioStreamStats._ringBufferFramesAvailable, audioMixerAvatarAudioStreamStats._ringBufferCurrentJitterBufferFrames,
|
||||
audioMixerAvatarAudioStreamStats._ringBufferDesiredJitterBufferFrames);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
|
||||
sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", audioMixerAvatarStreamStats._timeGapMin,
|
||||
audioMixerAvatarStreamStats._timeGapMax, audioMixerAvatarStreamStats._timeGapAverage,
|
||||
audioMixerAvatarStreamStats._ringBufferStarveCount, audioMixerAvatarStreamStats._ringBufferOverflowCount);
|
||||
sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", audioMixerAvatarAudioStreamStats._timeGapMin,
|
||||
audioMixerAvatarAudioStreamStats._timeGapMax, audioMixerAvatarAudioStreamStats._timeGapAverage,
|
||||
audioMixerAvatarAudioStreamStats._ringBufferStarveCount, audioMixerAvatarAudioStreamStats._ringBufferOverflowCount);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
|
||||
sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", audioMixerAvatarStreamStats._timeGapMovingMin,
|
||||
audioMixerAvatarStreamStats._timeGapMovingMax, audioMixerAvatarStreamStats._timeGapMovingAverage,
|
||||
audioMixerAvatarStreamStats._ringBufferConsecutiveNotMixedCount, audioMixerAvatarStreamStats._ringBufferSilentFramesDropped);
|
||||
sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", audioMixerAvatarAudioStreamStats._timeGapMovingMin,
|
||||
audioMixerAvatarAudioStreamStats._timeGapMovingMax, audioMixerAvatarAudioStreamStats._timeGapMovingAverage,
|
||||
audioMixerAvatarAudioStreamStats._ringBufferConsecutiveNotMixedCount, audioMixerAvatarAudioStreamStats._ringBufferSilentFramesDropped);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
|
||||
foreach(AudioStreamStats injectedStreamStats, audioMixerInjectedStreamStatsMap) {
|
||||
sprintf(upstreamAudioStatsString, " inj: %d/%d/%d, %d/%d/%d", injectedStreamStats._packetsEarly,
|
||||
injectedStreamStats._packetsLate, injectedStreamStats._packetsLost,
|
||||
injectedStreamStats._ringBufferFramesAvailable, injectedStreamStats._ringBufferCurrentJitterBufferFrames,
|
||||
injectedStreamStats._ringBufferDesiredJitterBufferFrames);
|
||||
QHash<QUuid, RingBufferHistory<PacketStreamStats> > audioMixerInjectedStreamPacketStatsHistoryMap
|
||||
= audio->getAudioMixerInjectedStreamPacketStatsHistoryMap();
|
||||
|
||||
foreach(const AudioStreamStats& injectedStreamAudioStats, audioMixerInjectedStreamAudioStatsMap) {
|
||||
|
||||
audio->calculatePacketLossRate(audioMixerInjectedStreamPacketStatsHistoryMap[injectedStreamAudioStats._streamIdentifier],
|
||||
packetLossRate, packetLossRate30s);
|
||||
|
||||
sprintf(upstreamAudioStatsString, " inj: %.1f%%/%.1f%%, %u/%u/%u", packetLossRate*100.0f, packetLossRate30s*100.0f,
|
||||
injectedStreamAudioStats._ringBufferFramesAvailable, injectedStreamAudioStats._ringBufferCurrentJitterBufferFrames,
|
||||
injectedStreamAudioStats._ringBufferDesiredJitterBufferFrames);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
|
||||
sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", injectedStreamStats._timeGapMin,
|
||||
injectedStreamStats._timeGapMax, injectedStreamStats._timeGapAverage,
|
||||
injectedStreamStats._ringBufferStarveCount, injectedStreamStats._ringBufferOverflowCount);
|
||||
sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", injectedStreamAudioStats._timeGapMin,
|
||||
injectedStreamAudioStats._timeGapMax, injectedStreamAudioStats._timeGapAverage,
|
||||
injectedStreamAudioStats._ringBufferStarveCount, injectedStreamAudioStats._ringBufferOverflowCount);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
|
||||
sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", injectedStreamStats._timeGapMovingMin,
|
||||
injectedStreamStats._timeGapMovingMax, injectedStreamStats._timeGapMovingAverage,
|
||||
injectedStreamStats._ringBufferConsecutiveNotMixedCount, injectedStreamStats._ringBufferSilentFramesDropped);
|
||||
sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", injectedStreamAudioStats._timeGapMovingMin,
|
||||
injectedStreamAudioStats._timeGapMovingMax, injectedStreamAudioStats._timeGapMovingAverage,
|
||||
injectedStreamAudioStats._ringBufferConsecutiveNotMixedCount, injectedStreamAudioStats._ringBufferSilentFramesDropped);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
|
|
|
@ -67,6 +67,8 @@ public:
|
|||
|
||||
int samplesAvailable() const;
|
||||
int framesAvailable() const { return samplesAvailable() / _numFrameSamples; }
|
||||
|
||||
int getNumFrameSamples() const { return _numFrameSamples; }
|
||||
|
||||
bool isNotStarvedOrHasMinimumSamples(int numRequiredSamples) const;
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#define hifi_AudioStreamStats_h
|
||||
|
||||
#include "PositionalAudioRingBuffer.h"
|
||||
#include "SequenceNumberStats.h"
|
||||
|
||||
class AudioStreamStats {
|
||||
public:
|
||||
|
@ -32,13 +33,7 @@ public:
|
|||
_ringBufferConsecutiveNotMixedCount(0),
|
||||
_ringBufferOverflowCount(0),
|
||||
_ringBufferSilentFramesDropped(0),
|
||||
_packetsReceived(0),
|
||||
_packetsUnreasonable(0),
|
||||
_packetsEarly(0),
|
||||
_packetsLate(0),
|
||||
_packetsLost(0),
|
||||
_packetsRecovered(0),
|
||||
_packetsDuplicate(0)
|
||||
_packetStreamStats()
|
||||
{}
|
||||
|
||||
PositionalAudioRingBuffer::Type _streamType;
|
||||
|
@ -59,13 +54,7 @@ public:
|
|||
quint32 _ringBufferOverflowCount;
|
||||
quint32 _ringBufferSilentFramesDropped;
|
||||
|
||||
quint32 _packetsReceived;
|
||||
quint32 _packetsUnreasonable;
|
||||
quint32 _packetsEarly;
|
||||
quint32 _packetsLate;
|
||||
quint32 _packetsLost;
|
||||
quint32 _packetsRecovered;
|
||||
quint32 _packetsDuplicate;
|
||||
PacketStreamStats _packetStreamStats;
|
||||
};
|
||||
|
||||
#endif // hifi_AudioStreamStats_h
|
||||
|
|
|
@ -16,26 +16,14 @@
|
|||
SequenceNumberStats::SequenceNumberStats()
|
||||
: _lastReceived(std::numeric_limits<quint16>::max()),
|
||||
_missingSet(),
|
||||
_numReceived(0),
|
||||
_numUnreasonable(0),
|
||||
_numEarly(0),
|
||||
_numLate(0),
|
||||
_numLost(0),
|
||||
_numRecovered(0),
|
||||
_numDuplicate(0),
|
||||
_stats(),
|
||||
_lastSenderUUID()
|
||||
{
|
||||
}
|
||||
|
||||
void SequenceNumberStats::reset() {
|
||||
_missingSet.clear();
|
||||
_numReceived = 0;
|
||||
_numUnreasonable = 0;
|
||||
_numEarly = 0;
|
||||
_numLate = 0;
|
||||
_numLost = 0;
|
||||
_numRecovered = 0;
|
||||
_numDuplicate = 0;
|
||||
_stats = PacketStreamStats();
|
||||
}
|
||||
|
||||
static const int UINT16_RANGE = std::numeric_limits<uint16_t>::max() + 1;
|
||||
|
@ -51,9 +39,9 @@ void SequenceNumberStats::sequenceNumberReceived(quint16 incoming, QUuid senderU
|
|||
}
|
||||
|
||||
// determine our expected sequence number... handle rollover appropriately
|
||||
quint16 expected = _numReceived > 0 ? _lastReceived + (quint16)1 : incoming;
|
||||
quint16 expected = _stats._numReceived > 0 ? _lastReceived + (quint16)1 : incoming;
|
||||
|
||||
_numReceived++;
|
||||
_stats._numReceived++;
|
||||
|
||||
if (incoming == expected) { // on time
|
||||
_lastReceived = incoming;
|
||||
|
@ -80,7 +68,7 @@ void SequenceNumberStats::sequenceNumberReceived(quint16 incoming, QUuid senderU
|
|||
// ignore packet if gap is unreasonable
|
||||
qDebug() << "ignoring unreasonable sequence number:" << incoming
|
||||
<< "previous:" << _lastReceived;
|
||||
_numUnreasonable++;
|
||||
_stats._numUnreasonable++;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -92,8 +80,8 @@ void SequenceNumberStats::sequenceNumberReceived(quint16 incoming, QUuid senderU
|
|||
qDebug() << ">>>>>>>> missing gap=" << (incomingInt - expectedInt);
|
||||
}
|
||||
|
||||
_numEarly++;
|
||||
_numLost += (incomingInt - expectedInt);
|
||||
_stats._numEarly++;
|
||||
_stats._numLost += (incomingInt - expectedInt);
|
||||
_lastReceived = incoming;
|
||||
|
||||
// add all sequence numbers that were skipped to the missing sequence numbers list
|
||||
|
@ -110,7 +98,7 @@ void SequenceNumberStats::sequenceNumberReceived(quint16 incoming, QUuid senderU
|
|||
if (wantExtraDebugging) {
|
||||
qDebug() << "this packet is later than expected...";
|
||||
}
|
||||
_numLate++;
|
||||
_stats._numLate++;
|
||||
|
||||
// do not update _lastReceived; it shouldn't become smaller
|
||||
|
||||
|
@ -119,13 +107,13 @@ void SequenceNumberStats::sequenceNumberReceived(quint16 incoming, QUuid senderU
|
|||
if (wantExtraDebugging) {
|
||||
qDebug() << "found it in _missingSet";
|
||||
}
|
||||
_numLost--;
|
||||
_numRecovered++;
|
||||
_stats._numLost--;
|
||||
_stats._numRecovered++;
|
||||
} else {
|
||||
if (wantExtraDebugging) {
|
||||
qDebug() << "sequence:" << incoming << "was NOT found in _missingSet and is probably a duplicate";
|
||||
}
|
||||
_numDuplicate++;
|
||||
_stats._numDuplicate++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,6 +17,26 @@
|
|||
|
||||
const int MAX_REASONABLE_SEQUENCE_GAP = 1000;
|
||||
|
||||
class PacketStreamStats {
|
||||
public:
|
||||
PacketStreamStats()
|
||||
: _numReceived(0),
|
||||
_numUnreasonable(0),
|
||||
_numEarly(0),
|
||||
_numLate(0),
|
||||
_numLost(0),
|
||||
_numRecovered(0),
|
||||
_numDuplicate(0)
|
||||
{}
|
||||
quint32 _numReceived;
|
||||
quint32 _numUnreasonable;
|
||||
quint32 _numEarly;
|
||||
quint32 _numLate;
|
||||
quint32 _numLost;
|
||||
quint32 _numRecovered;
|
||||
quint32 _numDuplicate;
|
||||
};
|
||||
|
||||
class SequenceNumberStats {
|
||||
public:
|
||||
SequenceNumberStats();
|
||||
|
@ -25,27 +45,22 @@ public:
|
|||
void sequenceNumberReceived(quint16 incoming, QUuid senderUUID = QUuid(), const bool wantExtraDebugging = false);
|
||||
void pruneMissingSet(const bool wantExtraDebugging = false);
|
||||
|
||||
quint32 getNumReceived() const { return _numReceived; }
|
||||
quint32 getNumUnreasonable() const { return _numUnreasonable; }
|
||||
quint32 getNumOutOfOrder() const { return _numEarly + _numLate; }
|
||||
quint32 getNumEarly() const { return _numEarly; }
|
||||
quint32 getNumLate() const { return _numLate; }
|
||||
quint32 getNumLost() const { return _numLost; }
|
||||
quint32 getNumRecovered() const { return _numRecovered; }
|
||||
quint32 getNumDuplicate() const { return _numDuplicate; }
|
||||
quint32 getNumReceived() const { return _stats._numReceived; }
|
||||
quint32 getNumUnreasonable() const { return _stats._numUnreasonable; }
|
||||
quint32 getNumOutOfOrder() const { return _stats._numEarly + _stats._numLate; }
|
||||
quint32 getNumEarly() const { return _stats._numEarly; }
|
||||
quint32 getNumLate() const { return _stats._numLate; }
|
||||
quint32 getNumLost() const { return _stats._numLost; }
|
||||
quint32 getNumRecovered() const { return _stats._numRecovered; }
|
||||
quint32 getNumDuplicate() const { return _stats._numDuplicate; }
|
||||
const PacketStreamStats& getStats() const { return _stats; }
|
||||
const QSet<quint16>& getMissingSet() const { return _missingSet; }
|
||||
|
||||
private:
|
||||
quint16 _lastReceived;
|
||||
QSet<quint16> _missingSet;
|
||||
|
||||
quint32 _numReceived;
|
||||
quint32 _numUnreasonable;
|
||||
quint32 _numEarly;
|
||||
quint32 _numLate;
|
||||
quint32 _numLost;
|
||||
quint32 _numRecovered;
|
||||
quint32 _numDuplicate;
|
||||
PacketStreamStats _stats;
|
||||
|
||||
QUuid _lastSenderUUID;
|
||||
};
|
||||
|
|
|
@ -20,7 +20,7 @@ class RingBufferHistory {
|
|||
|
||||
public:
|
||||
|
||||
RingBufferHistory(int capacity)
|
||||
RingBufferHistory(int capacity = 10)
|
||||
: _size(capacity + 1),
|
||||
_capacity(capacity),
|
||||
_newestEntryAt(0),
|
||||
|
@ -47,6 +47,7 @@ public:
|
|||
delete[] _buffer;
|
||||
_buffer = new T[_size];
|
||||
memcpy(_buffer, rhs._buffer, _size*sizeof(T));
|
||||
return *this;
|
||||
}
|
||||
|
||||
~RingBufferHistory() {
|
||||
|
@ -120,7 +121,7 @@ public:
|
|||
return *this;
|
||||
}
|
||||
|
||||
Iterator& operator++(int) {
|
||||
Iterator operator++(int) {
|
||||
Iterator tmp(*this);
|
||||
++(*this);
|
||||
return tmp;
|
||||
|
|
Loading…
Reference in a new issue