mirror of
https://github.com/overte-org/overte.git
synced 2025-04-21 09:44:21 +02:00
cleanup AudioMixer stats to be properly nested
This commit is contained in:
parent
4e6979cac8
commit
68998f4423
5 changed files with 92 additions and 92 deletions
|
@ -586,12 +586,41 @@ void AudioMixer::sendStatsPacket() {
|
|||
_sumListeners = 0;
|
||||
_sumMixes = 0;
|
||||
_numStatFrames = 0;
|
||||
|
||||
statsObject["readPendingDatagram_calls_stats"] = getReadPendingDatagramsCallsPerSecondsStatsString();
|
||||
statsObject["readPendingDatagram_packets_per_call_stats"] = getReadPendingDatagramsPacketsPerCallStatsString();
|
||||
statsObject["readPendingDatagram_packets_time_per_call_stats"] = getReadPendingDatagramsTimeStatsString();
|
||||
statsObject["readPendingDatagram_hashmatch_time_per_call_stats"] = getReadPendingDatagramsHashMatchTimeStatsString();
|
||||
|
||||
QJsonObject readPendingDatagramStats;
|
||||
|
||||
QJsonObject rpdCallsStats;
|
||||
rpdCallsStats["calls_per_sec_avg_30s"] = _readPendingCallsPerSecondStats.getWindowAverage();
|
||||
rpdCallsStats["calls_last_sec"] = _readPendingCallsPerSecondStats.getLastCompleteIntervalStats().getSum() + 0.5;
|
||||
|
||||
readPendingDatagramStats["calls"] = rpdCallsStats;
|
||||
|
||||
QJsonObject packetsPerCallStats;
|
||||
packetsPerCallStats["avg_30s"] = _datagramsReadPerCallStats.getWindowAverage();
|
||||
packetsPerCallStats["avg_1s"] = _datagramsReadPerCallStats.getLastCompleteIntervalStats().getAverage();
|
||||
|
||||
readPendingDatagramStats["packets_per_call"] = packetsPerCallStats;
|
||||
|
||||
QJsonObject packetsTimePerCallStats;
|
||||
packetsTimePerCallStats["usecs_per_call_avg_30s"] = _timeSpentPerCallStats.getWindowAverage();
|
||||
packetsTimePerCallStats["usecs_per_call_avg_1s"] = _timeSpentPerCallStats.getLastCompleteIntervalStats().getAverage();
|
||||
packetsTimePerCallStats["prct_time_in_call_30s"] =
|
||||
_timeSpentPerCallStats.getWindowSum() / (READ_DATAGRAMS_STATS_WINDOW_SECONDS * USECS_PER_SECOND) * 100.0;
|
||||
packetsTimePerCallStats["prct_time_in_call_1s"] =
|
||||
_timeSpentPerCallStats.getLastCompleteIntervalStats().getSum() / USECS_PER_SECOND * 100.0;
|
||||
|
||||
readPendingDatagramStats["packets_time_per_call"] = packetsTimePerCallStats;
|
||||
|
||||
QJsonObject hashMatchTimePerCallStats;
|
||||
hashMatchTimePerCallStats["usecs_per_hashmatch_avg_30s"] = _timeSpentPerHashMatchCallStats.getWindowAverage();
|
||||
hashMatchTimePerCallStats["usecs_per_hashmatch_avg_1s"]
|
||||
= _timeSpentPerHashMatchCallStats.getLastCompleteIntervalStats().getAverage();
|
||||
hashMatchTimePerCallStats["prct_time_in_hashmatch_30s"]
|
||||
= _timeSpentPerHashMatchCallStats.getWindowSum() / (READ_DATAGRAMS_STATS_WINDOW_SECONDS*USECS_PER_SECOND) * 100.0;
|
||||
hashMatchTimePerCallStats["prct_time_in_hashmatch_1s"]
|
||||
= _timeSpentPerHashMatchCallStats.getLastCompleteIntervalStats().getSum() / USECS_PER_SECOND * 100.0;
|
||||
readPendingDatagramStats["hashmatch_time_per_call"] = hashMatchTimePerCallStats;
|
||||
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
int clientNumber = 0;
|
||||
|
||||
|
@ -600,7 +629,7 @@ void AudioMixer::sendStatsPacket() {
|
|||
clientNumber++;
|
||||
AudioMixerClientData* clientData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
if (clientData) {
|
||||
statsObject["jitterStats." + node->getUUID().toString()] = clientData->getAudioStreamStatsString();
|
||||
statsObject["jitterStats." + node->getUUID().toString()] = clientData->getAudioStreamStats();
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -883,34 +912,6 @@ void AudioMixer::perSecondActions() {
|
|||
_timeSpentPerHashMatchCallStats.currentIntervalComplete();
|
||||
}
|
||||
|
||||
QString AudioMixer::getReadPendingDatagramsCallsPerSecondsStatsString() const {
|
||||
QString result = "calls_per_sec_avg_30s: " + QString::number(_readPendingCallsPerSecondStats.getWindowAverage(), 'f', 2)
|
||||
+ " calls_last_sec: " + QString::number(_readPendingCallsPerSecondStats.getLastCompleteIntervalStats().getSum() + 0.5, 'f', 0);
|
||||
return result;
|
||||
}
|
||||
|
||||
QString AudioMixer::getReadPendingDatagramsPacketsPerCallStatsString() const {
|
||||
QString result = "pkts_per_call_avg_30s: " + QString::number(_datagramsReadPerCallStats.getWindowAverage(), 'f', 2)
|
||||
+ " pkts_per_call_avg_1s: " + QString::number(_datagramsReadPerCallStats.getLastCompleteIntervalStats().getAverage(), 'f', 2);
|
||||
return result;
|
||||
}
|
||||
|
||||
QString AudioMixer::getReadPendingDatagramsTimeStatsString() const {
|
||||
QString result = "usecs_per_call_avg_30s: " + QString::number(_timeSpentPerCallStats.getWindowAverage(), 'f', 2)
|
||||
+ " usecs_per_call_avg_1s: " + QString::number(_timeSpentPerCallStats.getLastCompleteIntervalStats().getAverage(), 'f', 2)
|
||||
+ " prct_time_in_call_30s: " + QString::number(_timeSpentPerCallStats.getWindowSum() / (READ_DATAGRAMS_STATS_WINDOW_SECONDS*USECS_PER_SECOND) * 100.0, 'f', 6) + "%"
|
||||
+ " prct_time_in_call_1s: " + QString::number(_timeSpentPerCallStats.getLastCompleteIntervalStats().getSum() / USECS_PER_SECOND * 100.0, 'f', 6) + "%";
|
||||
return result;
|
||||
}
|
||||
|
||||
QString AudioMixer::getReadPendingDatagramsHashMatchTimeStatsString() const {
|
||||
QString result = "usecs_per_hashmatch_avg_30s: " + QString::number(_timeSpentPerHashMatchCallStats.getWindowAverage(), 'f', 2)
|
||||
+ " usecs_per_hashmatch_avg_1s: " + QString::number(_timeSpentPerHashMatchCallStats.getLastCompleteIntervalStats().getAverage(), 'f', 2)
|
||||
+ " prct_time_in_hashmatch_30s: " + QString::number(_timeSpentPerHashMatchCallStats.getWindowSum() / (READ_DATAGRAMS_STATS_WINDOW_SECONDS*USECS_PER_SECOND) * 100.0, 'f', 6) + "%"
|
||||
+ " prct_time_in_hashmatch_1s: " + QString::number(_timeSpentPerHashMatchCallStats.getLastCompleteIntervalStats().getSum() / USECS_PER_SECOND * 100.0, 'f', 6) + "%";
|
||||
return result;
|
||||
}
|
||||
|
||||
void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
|
||||
if (settingsObject.contains(AUDIO_BUFFER_GROUP_KEY)) {
|
||||
QJsonObject audioBufferGroupObject = settingsObject[AUDIO_BUFFER_GROUP_KEY].toObject();
|
||||
|
|
|
@ -65,11 +65,6 @@ private:
|
|||
|
||||
bool shouldMute(float quietestFrame);
|
||||
|
||||
QString getReadPendingDatagramsCallsPerSecondsStatsString() const;
|
||||
QString getReadPendingDatagramsPacketsPerCallStatsString() const;
|
||||
QString getReadPendingDatagramsTimeStatsString() const;
|
||||
QString getReadPendingDatagramsHashMatchTimeStatsString() const;
|
||||
|
||||
void parseSettingsObject(const QJsonObject& settingsObject);
|
||||
|
||||
float _trailingSleepRatio;
|
||||
|
|
|
@ -196,68 +196,68 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
|
|||
}
|
||||
}
|
||||
|
||||
QString AudioMixerClientData::getAudioStreamStatsString() const {
|
||||
QString result;
|
||||
QJsonObject AudioMixerClientData::getAudioStreamStats() const {
|
||||
QJsonObject result;
|
||||
AudioStreamStats streamStats = _downstreamAudioStreamStats;
|
||||
result += "DOWNSTREAM.desired:" + QString::number(streamStats._desiredJitterBufferFrames)
|
||||
+ " available_avg_10s:" + QString::number(streamStats._framesAvailableAverage)
|
||||
+ " available:" + QString::number(streamStats._framesAvailable)
|
||||
+ " starves:" + QString::number(streamStats._starveCount)
|
||||
+ " not_mixed:" + QString::number(streamStats._consecutiveNotMixedCount)
|
||||
+ " overflows:" + QString::number(streamStats._overflowCount)
|
||||
+ " silents_dropped: ?"
|
||||
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
|
||||
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
|
||||
+ " min_gap:" + formatUsecTime(streamStats._timeGapMin)
|
||||
+ " max_gap:" + formatUsecTime(streamStats._timeGapMax)
|
||||
+ " avg_gap:" + formatUsecTime(streamStats._timeGapAverage)
|
||||
+ " min_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMin)
|
||||
+ " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax)
|
||||
+ " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage);
|
||||
result["DOWNSTREAM.desired"] = streamStats._desiredJitterBufferFrames;
|
||||
result["DOWNSTREAM.available_avg_10s"] = streamStats._framesAvailableAverage;
|
||||
result["DOWNSTREAM.available"] = (double) streamStats._framesAvailable;
|
||||
result["DOWNSTREAM.starves"] = (double) streamStats._starveCount;
|
||||
result["DOWNSTREAM.not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
|
||||
result["DOWNSTREAM.overflows"] = (double) streamStats._overflowCount;
|
||||
result["DOWNSTREAM.lost%"] = streamStats._packetStreamStats.getLostRate() * 100.0f;
|
||||
result["DOWNSTREAM.lost%_30s"] = streamStats._packetStreamWindowStats.getLostRate() * 100.0f;
|
||||
result["DOWNSTREAM.min_gap"] = formatUsecTime(streamStats._timeGapMin);
|
||||
result["DOWNSTREAM.max_gap"] = formatUsecTime(streamStats._timeGapMax);
|
||||
result["DOWNSTREAM.avg_gap"] = formatUsecTime(streamStats._timeGapAverage);
|
||||
result["DOWNSTREAM.min_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMin);
|
||||
result["DOWNSTREAM.max_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMax);
|
||||
result["DOWNSTREAM.avg_gap_30s"] = formatUsecTime(streamStats._timeGapWindowAverage);
|
||||
|
||||
AvatarAudioStream* avatarAudioStream = getAvatarAudioStream();
|
||||
if (avatarAudioStream) {
|
||||
AudioStreamStats streamStats = avatarAudioStream->getAudioStreamStats();
|
||||
result += " UPSTREAM.mic.desired:" + QString::number(streamStats._desiredJitterBufferFrames)
|
||||
+ " desired_calc:" + QString::number(avatarAudioStream->getCalculatedJitterBufferFrames())
|
||||
+ " available_avg_10s:" + QString::number(streamStats._framesAvailableAverage)
|
||||
+ " available:" + QString::number(streamStats._framesAvailable)
|
||||
+ " starves:" + QString::number(streamStats._starveCount)
|
||||
+ " not_mixed:" + QString::number(streamStats._consecutiveNotMixedCount)
|
||||
+ " overflows:" + QString::number(streamStats._overflowCount)
|
||||
+ " silents_dropped:" + QString::number(streamStats._framesDropped)
|
||||
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
|
||||
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
|
||||
+ " min_gap:" + formatUsecTime(streamStats._timeGapMin)
|
||||
+ " max_gap:" + formatUsecTime(streamStats._timeGapMax)
|
||||
+ " avg_gap:" + formatUsecTime(streamStats._timeGapAverage)
|
||||
+ " min_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMin)
|
||||
+ " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax)
|
||||
+ " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage);
|
||||
result["UPSTREAM.mic.desired"] = streamStats._desiredJitterBufferFrames;
|
||||
result["UPSTREAM.desired_calc"] = avatarAudioStream->getCalculatedJitterBufferFrames();
|
||||
result["UPSTREAM.available_avg_10s"] = streamStats._framesAvailableAverage;
|
||||
result["UPSTREAM.available"] = (double) streamStats._framesAvailable;
|
||||
result["UPSTREAM.starves"] = (double) streamStats._starveCount;
|
||||
result["UPSTREAM.not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
|
||||
result["UPSTREAM.overflows"] = (double) streamStats._overflowCount;
|
||||
result["UPSTREAM.silents_dropped"] = (double) streamStats._framesDropped;
|
||||
result["UPSTREAM.lost%"] = streamStats._packetStreamStats.getLostRate() * 100.0f;
|
||||
result["UPSTREAM.lost%_30s"] = streamStats._packetStreamWindowStats.getLostRate() * 100.0f;
|
||||
result["UPSTREAM.min_gap"] = formatUsecTime(streamStats._timeGapMin);
|
||||
result["UPSTREAM.max_gap"] = formatUsecTime(streamStats._timeGapMax);
|
||||
result["UPSTREAM.avg_gap"] = formatUsecTime(streamStats._timeGapAverage);
|
||||
result["UPSTREAM.min_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMin);
|
||||
result["UPSTREAM.max_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMax);
|
||||
result["UPSTREAM.avg_gap_30s"] = formatUsecTime(streamStats._timeGapWindowAverage);
|
||||
} else {
|
||||
result = "mic unknown";
|
||||
// TOOD: How should we handle this case?
|
||||
// result = "mic unknown";
|
||||
}
|
||||
|
||||
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
||||
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
|
||||
if (i.value()->getType() == PositionalAudioStream::Injector) {
|
||||
AudioStreamStats streamStats = i.value()->getAudioStreamStats();
|
||||
result += " UPSTREAM.inj.desired:" + QString::number(streamStats._desiredJitterBufferFrames)
|
||||
+ " desired_calc:" + QString::number(i.value()->getCalculatedJitterBufferFrames())
|
||||
+ " available_avg_10s:" + QString::number(streamStats._framesAvailableAverage)
|
||||
+ " available:" + QString::number(streamStats._framesAvailable)
|
||||
+ " starves:" + QString::number(streamStats._starveCount)
|
||||
+ " not_mixed:" + QString::number(streamStats._consecutiveNotMixedCount)
|
||||
+ " overflows:" + QString::number(streamStats._overflowCount)
|
||||
+ " silents_dropped:" + QString::number(streamStats._framesDropped)
|
||||
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
|
||||
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
|
||||
+ " min_gap:" + formatUsecTime(streamStats._timeGapMin)
|
||||
+ " max_gap:" + formatUsecTime(streamStats._timeGapMax)
|
||||
+ " avg_gap:" + formatUsecTime(streamStats._timeGapAverage)
|
||||
+ " min_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMin)
|
||||
+ " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax)
|
||||
+ " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage);
|
||||
result["UPSTREAM.inj.desired"] = streamStats._desiredJitterBufferFrames;
|
||||
result["UPSTREAM.desired_calc"] = i.value()->getCalculatedJitterBufferFrames();
|
||||
result["UPSTREAM.available_avg_10s"] = streamStats._framesAvailableAverage;
|
||||
result["UPSTREAM.available"] = (double) streamStats._framesAvailable;
|
||||
result["UPSTREAM.starves"] = (double) streamStats._starveCount;
|
||||
result["UPSTREAM.not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
|
||||
result["UPSTREAM.overflows"] = (double) streamStats._overflowCount;
|
||||
result["UPSTREAM.silents_dropped"] = (double) streamStats._framesDropped;
|
||||
result["UPSTREAM.lost%"] = streamStats._packetStreamStats.getLostRate() * 100.0f;
|
||||
result["UPSTREAM.lost%_30s"] = streamStats._packetStreamWindowStats.getLostRate() * 100.0f;
|
||||
result["UPSTREAM.min_gap"] = formatUsecTime(streamStats._timeGapMin);
|
||||
result["UPSTREAM.max_gap"] = formatUsecTime(streamStats._timeGapMax);
|
||||
result["UPSTREAM.avg_gap"] = formatUsecTime(streamStats._timeGapAverage);
|
||||
result["UPSTREAM.min_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMin);
|
||||
result["max_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMax);
|
||||
result["avg_gap_30s"] = formatUsecTime(streamStats._timeGapWindowAverage);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
#ifndef hifi_AudioMixerClientData_h
|
||||
#define hifi_AudioMixerClientData_h
|
||||
|
||||
#include <QtCore/QJsonObject>
|
||||
|
||||
#include <AABox.h>
|
||||
#include <AudioFormat.h> // For AudioFilterHSF1s and _penumbraFilter
|
||||
#include <AudioBuffer.h> // For AudioFilterHSF1s and _penumbraFilter
|
||||
|
@ -46,7 +48,7 @@ public:
|
|||
|
||||
void removeDeadInjectedStreams();
|
||||
|
||||
QString getAudioStreamStatsString() const;
|
||||
QJsonObject getAudioStreamStats() const;
|
||||
|
||||
void sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode);
|
||||
|
||||
|
|
|
@ -73,8 +73,10 @@ qint64 NodeList::sendStats(const QJsonObject& statsObject, const HifiSockAddr& d
|
|||
|
||||
// get a QStringList using JSONBreakableMarshal
|
||||
QStringList statsStringList = JSONBreakableMarshal::toStringList(statsObject, "");
|
||||
|
||||
qDebug() << "Stats string list is" << statsStringList;
|
||||
|
||||
foreach(const QString& statsItem, statsStringList) {
|
||||
qDebug() << statsItem;
|
||||
}
|
||||
|
||||
// enumerate the resulting strings, breaking them into MTU sized packets
|
||||
return 0;
|
||||
|
|
Loading…
Reference in a new issue