mirror of
https://github.com/overte-org/overte.git
synced 2025-08-06 18:50:00 +02:00
cleanup audio-mixer stats, add username interpolation
This commit is contained in:
parent
5e2a22a006
commit
d602c71346
2 changed files with 83 additions and 55 deletions
|
@ -622,19 +622,30 @@ void AudioMixer::sendStatsPacket() {
|
||||||
readPendingDatagramStats["hashmatch_time_per_call"] = hashMatchTimePerCallStats;
|
readPendingDatagramStats["hashmatch_time_per_call"] = hashMatchTimePerCallStats;
|
||||||
|
|
||||||
statsObject["read_pending_datagrams"] = readPendingDatagramStats;
|
statsObject["read_pending_datagrams"] = readPendingDatagramStats;
|
||||||
|
|
||||||
auto nodeList = DependencyManager::get<NodeList>();
|
|
||||||
int clientNumber = 0;
|
|
||||||
|
|
||||||
// add stats for each listerner
|
// add stats for each listerner
|
||||||
|
auto nodeList = DependencyManager::get<NodeList>();
|
||||||
|
QJsonObject listenerStats;
|
||||||
|
|
||||||
nodeList->eachNode([&](const SharedNodePointer& node) {
|
nodeList->eachNode([&](const SharedNodePointer& node) {
|
||||||
clientNumber++;
|
|
||||||
AudioMixerClientData* clientData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
AudioMixerClientData* clientData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||||
if (clientData) {
|
if (clientData) {
|
||||||
statsObject["jitterStats." + node->getUUID().toString()] = clientData->getAudioStreamStats();
|
QJsonObject nodeStats;
|
||||||
|
QString uuidString = uuidStringWithoutCurlyBraces(node->getUUID());
|
||||||
|
|
||||||
|
nodeStats["outbound_kbps"] = node->getOutboundBandwidth();
|
||||||
|
nodeStats[USERNAME_UUID_REPLACEMENT_STATS_KEY] = uuidString;
|
||||||
|
|
||||||
|
nodeStats["jitter"] = clientData->getAudioStreamStats();
|
||||||
|
|
||||||
|
listenerStats[uuidString] = nodeStats;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// add the listeners object to the root object
|
||||||
|
statsObject["listeners"] = listenerStats;
|
||||||
|
|
||||||
|
// send off the stats packets
|
||||||
ThreadedAssignment::addPacketStatsAndSendStatsPacket(statsObject);
|
ThreadedAssignment::addPacketStatsAndSendStatsPacket(statsObject);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,8 @@
|
||||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
//
|
//
|
||||||
|
|
||||||
#include <QDebug>
|
#include <QtCore/QDebug>
|
||||||
|
#include <QtCore/QJsonArray>
|
||||||
|
|
||||||
#include <PacketHeaders.h>
|
#include <PacketHeaders.h>
|
||||||
#include <UUID.h>
|
#include <UUID.h>
|
||||||
|
@ -198,68 +199,84 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
|
||||||
|
|
||||||
QJsonObject AudioMixerClientData::getAudioStreamStats() const {
|
QJsonObject AudioMixerClientData::getAudioStreamStats() const {
|
||||||
QJsonObject result;
|
QJsonObject result;
|
||||||
|
|
||||||
|
QJsonObject downstreamStats;
|
||||||
AudioStreamStats streamStats = _downstreamAudioStreamStats;
|
AudioStreamStats streamStats = _downstreamAudioStreamStats;
|
||||||
result["DOWNSTREAM.desired"] = streamStats._desiredJitterBufferFrames;
|
downstreamStats["desired"] = streamStats._desiredJitterBufferFrames;
|
||||||
result["DOWNSTREAM.available_avg_10s"] = streamStats._framesAvailableAverage;
|
downstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage;
|
||||||
result["DOWNSTREAM.available"] = (double) streamStats._framesAvailable;
|
downstreamStats["available"] = (double) streamStats._framesAvailable;
|
||||||
result["DOWNSTREAM.starves"] = (double) streamStats._starveCount;
|
downstreamStats["starves"] = (double) streamStats._starveCount;
|
||||||
result["DOWNSTREAM.not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
|
downstreamStats["not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
|
||||||
result["DOWNSTREAM.overflows"] = (double) streamStats._overflowCount;
|
downstreamStats["overflows"] = (double) streamStats._overflowCount;
|
||||||
result["DOWNSTREAM.lost%"] = streamStats._packetStreamStats.getLostRate() * 100.0f;
|
downstreamStats["lost%"] = streamStats._packetStreamStats.getLostRate() * 100.0f;
|
||||||
result["DOWNSTREAM.lost%_30s"] = streamStats._packetStreamWindowStats.getLostRate() * 100.0f;
|
downstreamStats["lost%_30s"] = streamStats._packetStreamWindowStats.getLostRate() * 100.0f;
|
||||||
result["DOWNSTREAM.min_gap"] = formatUsecTime(streamStats._timeGapMin);
|
downstreamStats["min_gap"] = formatUsecTime(streamStats._timeGapMin);
|
||||||
result["DOWNSTREAM.max_gap"] = formatUsecTime(streamStats._timeGapMax);
|
downstreamStats["max_gap"] = formatUsecTime(streamStats._timeGapMax);
|
||||||
result["DOWNSTREAM.avg_gap"] = formatUsecTime(streamStats._timeGapAverage);
|
downstreamStats["avg_gap"] = formatUsecTime(streamStats._timeGapAverage);
|
||||||
result["DOWNSTREAM.min_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMin);
|
downstreamStats["min_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMin);
|
||||||
result["DOWNSTREAM.max_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMax);
|
downstreamStats["max_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMax);
|
||||||
result["DOWNSTREAM.avg_gap_30s"] = formatUsecTime(streamStats._timeGapWindowAverage);
|
downstreamStats["avg_gap_30s"] = formatUsecTime(streamStats._timeGapWindowAverage);
|
||||||
|
|
||||||
|
result["downstream"] = downstreamStats;
|
||||||
|
|
||||||
AvatarAudioStream* avatarAudioStream = getAvatarAudioStream();
|
AvatarAudioStream* avatarAudioStream = getAvatarAudioStream();
|
||||||
|
|
||||||
if (avatarAudioStream) {
|
if (avatarAudioStream) {
|
||||||
|
QJsonObject upstreamStats;
|
||||||
|
|
||||||
AudioStreamStats streamStats = avatarAudioStream->getAudioStreamStats();
|
AudioStreamStats streamStats = avatarAudioStream->getAudioStreamStats();
|
||||||
result["UPSTREAM.mic.desired"] = streamStats._desiredJitterBufferFrames;
|
upstreamStats["mic.desired"] = streamStats._desiredJitterBufferFrames;
|
||||||
result["UPSTREAM.desired_calc"] = avatarAudioStream->getCalculatedJitterBufferFrames();
|
upstreamStats["desired_calc"] = avatarAudioStream->getCalculatedJitterBufferFrames();
|
||||||
result["UPSTREAM.available_avg_10s"] = streamStats._framesAvailableAverage;
|
upstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage;
|
||||||
result["UPSTREAM.available"] = (double) streamStats._framesAvailable;
|
upstreamStats["available"] = (double) streamStats._framesAvailable;
|
||||||
result["UPSTREAM.starves"] = (double) streamStats._starveCount;
|
upstreamStats["starves"] = (double) streamStats._starveCount;
|
||||||
result["UPSTREAM.not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
|
upstreamStats["not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
|
||||||
result["UPSTREAM.overflows"] = (double) streamStats._overflowCount;
|
upstreamStats["overflows"] = (double) streamStats._overflowCount;
|
||||||
result["UPSTREAM.silents_dropped"] = (double) streamStats._framesDropped;
|
upstreamStats["silents_dropped"] = (double) streamStats._framesDropped;
|
||||||
result["UPSTREAM.lost%"] = streamStats._packetStreamStats.getLostRate() * 100.0f;
|
upstreamStats["lost%"] = streamStats._packetStreamStats.getLostRate() * 100.0f;
|
||||||
result["UPSTREAM.lost%_30s"] = streamStats._packetStreamWindowStats.getLostRate() * 100.0f;
|
upstreamStats["lost%_30s"] = streamStats._packetStreamWindowStats.getLostRate() * 100.0f;
|
||||||
result["UPSTREAM.min_gap"] = formatUsecTime(streamStats._timeGapMin);
|
upstreamStats["min_gap"] = formatUsecTime(streamStats._timeGapMin);
|
||||||
result["UPSTREAM.max_gap"] = formatUsecTime(streamStats._timeGapMax);
|
upstreamStats["max_gap"] = formatUsecTime(streamStats._timeGapMax);
|
||||||
result["UPSTREAM.avg_gap"] = formatUsecTime(streamStats._timeGapAverage);
|
upstreamStats["avg_gap"] = formatUsecTime(streamStats._timeGapAverage);
|
||||||
result["UPSTREAM.min_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMin);
|
upstreamStats["min_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMin);
|
||||||
result["UPSTREAM.max_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMax);
|
upstreamStats["max_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMax);
|
||||||
result["UPSTREAM.avg_gap_30s"] = formatUsecTime(streamStats._timeGapWindowAverage);
|
upstreamStats["avg_gap_30s"] = formatUsecTime(streamStats._timeGapWindowAverage);
|
||||||
|
|
||||||
|
result["upstream"] = upstreamStats;
|
||||||
} else {
|
} else {
|
||||||
// TOOD: How should we handle this case?
|
result["upstream"] = "mic unknown";
|
||||||
// result = "mic unknown";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
||||||
|
QJsonArray injectorArray;
|
||||||
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
|
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
|
||||||
if (i.value()->getType() == PositionalAudioStream::Injector) {
|
if (i.value()->getType() == PositionalAudioStream::Injector) {
|
||||||
|
QJsonObject upstreamStats;
|
||||||
|
|
||||||
AudioStreamStats streamStats = i.value()->getAudioStreamStats();
|
AudioStreamStats streamStats = i.value()->getAudioStreamStats();
|
||||||
result["UPSTREAM.inj.desired"] = streamStats._desiredJitterBufferFrames;
|
upstreamStats["inj.desired"] = streamStats._desiredJitterBufferFrames;
|
||||||
result["UPSTREAM.desired_calc"] = i.value()->getCalculatedJitterBufferFrames();
|
upstreamStats["desired_calc"] = i.value()->getCalculatedJitterBufferFrames();
|
||||||
result["UPSTREAM.available_avg_10s"] = streamStats._framesAvailableAverage;
|
upstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage;
|
||||||
result["UPSTREAM.available"] = (double) streamStats._framesAvailable;
|
upstreamStats["available"] = (double) streamStats._framesAvailable;
|
||||||
result["UPSTREAM.starves"] = (double) streamStats._starveCount;
|
upstreamStats["starves"] = (double) streamStats._starveCount;
|
||||||
result["UPSTREAM.not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
|
upstreamStats["not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
|
||||||
result["UPSTREAM.overflows"] = (double) streamStats._overflowCount;
|
upstreamStats["overflows"] = (double) streamStats._overflowCount;
|
||||||
result["UPSTREAM.silents_dropped"] = (double) streamStats._framesDropped;
|
upstreamStats["silents_dropped"] = (double) streamStats._framesDropped;
|
||||||
result["UPSTREAM.lost%"] = streamStats._packetStreamStats.getLostRate() * 100.0f;
|
upstreamStats["lost%"] = streamStats._packetStreamStats.getLostRate() * 100.0f;
|
||||||
result["UPSTREAM.lost%_30s"] = streamStats._packetStreamWindowStats.getLostRate() * 100.0f;
|
upstreamStats["lost%_30s"] = streamStats._packetStreamWindowStats.getLostRate() * 100.0f;
|
||||||
result["UPSTREAM.min_gap"] = formatUsecTime(streamStats._timeGapMin);
|
upstreamStats["min_gap"] = formatUsecTime(streamStats._timeGapMin);
|
||||||
result["UPSTREAM.max_gap"] = formatUsecTime(streamStats._timeGapMax);
|
upstreamStats["max_gap"] = formatUsecTime(streamStats._timeGapMax);
|
||||||
result["UPSTREAM.avg_gap"] = formatUsecTime(streamStats._timeGapAverage);
|
upstreamStats["avg_gap"] = formatUsecTime(streamStats._timeGapAverage);
|
||||||
result["UPSTREAM.min_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMin);
|
upstreamStats["min_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMin);
|
||||||
result["max_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMax);
|
upstreamStats["max_gap_30s"] = formatUsecTime(streamStats._timeGapWindowMax);
|
||||||
result["avg_gap_30s"] = formatUsecTime(streamStats._timeGapWindowAverage);
|
upstreamStats["avg_gap_30s"] = formatUsecTime(streamStats._timeGapWindowAverage);
|
||||||
|
|
||||||
|
injectorArray.push_back(upstreamStats);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
result["injectors"] = injectorArray;
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue