mirror of
https://github.com/overte-org/overte.git
synced 2025-08-08 11:37:58 +02:00
expose AudioStats to qml/js
This commit is contained in:
parent
277eefafd7
commit
b9c4018b8e
7 changed files with 201 additions and 71 deletions
|
@ -49,7 +49,7 @@ AudioMixerClientData::~AudioMixerClientData() {
|
||||||
|
|
||||||
AvatarAudioStream* AudioMixerClientData::getAvatarAudioStream() {
|
AvatarAudioStream* AudioMixerClientData::getAvatarAudioStream() {
|
||||||
QReadLocker readLocker { &_streamsLock };
|
QReadLocker readLocker { &_streamsLock };
|
||||||
|
|
||||||
auto it = _audioStreams.find(QUuid());
|
auto it = _audioStreams.find(QUuid());
|
||||||
if (it != _audioStreams.end()) {
|
if (it != _audioStreams.end()) {
|
||||||
return dynamic_cast<AvatarAudioStream*>(it->second.get());
|
return dynamic_cast<AvatarAudioStream*>(it->second.get());
|
||||||
|
@ -75,7 +75,7 @@ void AudioMixerClientData::removeHRTFForStream(const QUuid& nodeID, const QUuid&
|
||||||
|
|
||||||
int AudioMixerClientData::parseData(ReceivedMessage& message) {
|
int AudioMixerClientData::parseData(ReceivedMessage& message) {
|
||||||
PacketType packetType = message.getType();
|
PacketType packetType = message.getType();
|
||||||
|
|
||||||
if (packetType == PacketType::AudioStreamStats) {
|
if (packetType == PacketType::AudioStreamStats) {
|
||||||
|
|
||||||
// skip over header, appendFlag, and num stats packed
|
// skip over header, appendFlag, and num stats packed
|
||||||
|
@ -219,9 +219,10 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
|
||||||
auto nodeList = DependencyManager::get<NodeList>();
|
auto nodeList = DependencyManager::get<NodeList>();
|
||||||
|
|
||||||
// The append flag is a boolean value that will be packed right after the header. The first packet sent
|
// The append flag is a boolean value that will be packed right after the header. The first packet sent
|
||||||
// inside this method will have 0 for this flag, while every subsequent packet will have 1 for this flag.
|
// inside this method will have 0 for this flag, every subsequent packet but the last will have 1 for this flag,
|
||||||
// The sole purpose of this flag is so the client can clear its map of injected audio stream stats when
|
// and the last packet will have 2 for this flag.
|
||||||
// it receives a packet with an appendFlag of 0. This prevents the buildup of dead audio stream stats in the client.
|
// This flag allows the client to know when it has received all stats packets, so it can group any downstream effects,
|
||||||
|
// and clear its cache of injector stream stats; it helps to prevent buildup of dead audio stream stats in the client.
|
||||||
quint8 appendFlag = 0;
|
quint8 appendFlag = 0;
|
||||||
|
|
||||||
auto streamsCopy = getAudioStreams();
|
auto streamsCopy = getAudioStreams();
|
||||||
|
|
|
@ -1593,6 +1593,7 @@ void Application::initializeUi() {
|
||||||
// though I can't find it. Hence, "ApplicationInterface"
|
// though I can't find it. Hence, "ApplicationInterface"
|
||||||
rootContext->setContextProperty("ApplicationInterface", this);
|
rootContext->setContextProperty("ApplicationInterface", this);
|
||||||
rootContext->setContextProperty("Audio", &AudioScriptingInterface::getInstance());
|
rootContext->setContextProperty("Audio", &AudioScriptingInterface::getInstance());
|
||||||
|
rootContext->setContextProperty("AudioStats", DependencyManager::get<AudioClient>()->getStats().data());
|
||||||
rootContext->setContextProperty("Controller", DependencyManager::get<controller::ScriptingInterface>().data());
|
rootContext->setContextProperty("Controller", DependencyManager::get<controller::ScriptingInterface>().data());
|
||||||
rootContext->setContextProperty("Entities", DependencyManager::get<EntityScriptingInterface>().data());
|
rootContext->setContextProperty("Entities", DependencyManager::get<EntityScriptingInterface>().data());
|
||||||
FileScriptingInterface* fileDownload = new FileScriptingInterface(engine);
|
FileScriptingInterface* fileDownload = new FileScriptingInterface(engine);
|
||||||
|
@ -4874,6 +4875,7 @@ void Application::registerScriptEngineWithApplicationServices(ScriptEngine* scri
|
||||||
scriptEngine->registerGlobalObject("Stats", Stats::getInstance());
|
scriptEngine->registerGlobalObject("Stats", Stats::getInstance());
|
||||||
scriptEngine->registerGlobalObject("Settings", SettingsScriptingInterface::getInstance());
|
scriptEngine->registerGlobalObject("Settings", SettingsScriptingInterface::getInstance());
|
||||||
scriptEngine->registerGlobalObject("AudioDevice", AudioDeviceScriptingInterface::getInstance());
|
scriptEngine->registerGlobalObject("AudioDevice", AudioDeviceScriptingInterface::getInstance());
|
||||||
|
scriptEngine->registerGlobalObject("AudioStats", DependencyManager::get<AudioClient>()->getStats().data());
|
||||||
|
|
||||||
// Caches
|
// Caches
|
||||||
scriptEngine->registerGlobalObject("AnimationCache", DependencyManager::get<AnimationCache>().data());
|
scriptEngine->registerGlobalObject("AnimationCache", DependencyManager::get<AnimationCache>().data());
|
||||||
|
|
|
@ -162,7 +162,7 @@ public slots:
|
||||||
void handleSelectedAudioFormat(QSharedPointer<ReceivedMessage> message);
|
void handleSelectedAudioFormat(QSharedPointer<ReceivedMessage> message);
|
||||||
void handleMismatchAudioFormat(SharedNodePointer node, const QString& currentCodec, const QString& recievedCodec);
|
void handleMismatchAudioFormat(SharedNodePointer node, const QString& currentCodec, const QString& recievedCodec);
|
||||||
|
|
||||||
void sendDownstreamAudioStatsPacket() { _stats.sendDownstreamAudioStatsPacket(); }
|
void sendDownstreamAudioStatsPacket() { _stats.publish(); }
|
||||||
void handleAudioInput();
|
void handleAudioInput();
|
||||||
void handleRecordedAudioInput(const QByteArray& audio);
|
void handleRecordedAudioInput(const QByteArray& audio);
|
||||||
void reset();
|
void reset();
|
||||||
|
|
|
@ -18,22 +18,24 @@
|
||||||
|
|
||||||
#include "AudioIOStats.h"
|
#include "AudioIOStats.h"
|
||||||
|
|
||||||
// This is called 5x/sec (see AudioStatsDialog), and we want it to log the last 5s
|
// This is called 1x/sec (see AudioClient) and we want it to log the last 5s
|
||||||
static const int INPUT_READS_WINDOW = 25;
|
static const int INPUT_READS_WINDOW = 5;
|
||||||
static const int INPUT_UNPLAYED_WINDOW = 25;
|
static const int INPUT_UNPLAYED_WINDOW = 5;
|
||||||
static const int OUTPUT_UNPLAYED_WINDOW = 25;
|
static const int OUTPUT_UNPLAYED_WINDOW = 5;
|
||||||
|
|
||||||
static const int APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS = (int)(30.0f * 1000.0f / AudioConstants::NETWORK_FRAME_MSECS);
|
static const int APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS = (int)(30.0f * 1000.0f / AudioConstants::NETWORK_FRAME_MSECS);
|
||||||
|
|
||||||
|
|
||||||
AudioIOStats::AudioIOStats(MixedProcessedAudioStream* receivedAudioStream) :
|
AudioIOStats::AudioIOStats(MixedProcessedAudioStream* receivedAudioStream) :
|
||||||
|
_interface(new AudioStatsInterface(this)),
|
||||||
_receivedAudioStream(receivedAudioStream),
|
_receivedAudioStream(receivedAudioStream),
|
||||||
_inputMsRead(0, INPUT_READS_WINDOW),
|
_inputMsRead(1, INPUT_READS_WINDOW),
|
||||||
_inputMsUnplayed(0, INPUT_UNPLAYED_WINDOW),
|
_inputMsUnplayed(1, INPUT_UNPLAYED_WINDOW),
|
||||||
_outputMsUnplayed(0, OUTPUT_UNPLAYED_WINDOW),
|
_outputMsUnplayed(1, OUTPUT_UNPLAYED_WINDOW),
|
||||||
_lastSentPacketTime(0),
|
_lastSentPacketTime(0),
|
||||||
_packetTimegaps(0, APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS)
|
_packetTimegaps(1, APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS)
|
||||||
{
|
{
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioIOStats::reset() {
|
void AudioIOStats::reset() {
|
||||||
|
@ -44,11 +46,13 @@ void AudioIOStats::reset() {
|
||||||
_outputMsUnplayed.reset();
|
_outputMsUnplayed.reset();
|
||||||
_packetTimegaps.reset();
|
_packetTimegaps.reset();
|
||||||
|
|
||||||
_mixerAvatarStreamStats = AudioStreamStats();
|
_interface->updateLocalBuffers(_inputMsRead, _inputMsUnplayed, _outputMsUnplayed, _packetTimegaps);
|
||||||
_mixerInjectedStreamStatsMap.clear();
|
_interface->updateMixerStream(AudioStreamStats());
|
||||||
|
_interface->updateClientStream(AudioStreamStats());
|
||||||
|
_interface->updateInjectorStreams(QHash<QUuid, AudioStreamStats>());
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioIOStats::sentPacket() {
|
void AudioIOStats::sentPacket() const {
|
||||||
// first time this is 0
|
// first time this is 0
|
||||||
if (_lastSentPacketTime == 0) {
|
if (_lastSentPacketTime == 0) {
|
||||||
_lastSentPacketTime = usecTimestampNow();
|
_lastSentPacketTime = usecTimestampNow();
|
||||||
|
@ -60,37 +64,13 @@ void AudioIOStats::sentPacket() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const MovingMinMaxAvg<float>& AudioIOStats::getInputMsRead() const {
|
|
||||||
_inputMsRead.currentIntervalComplete();
|
|
||||||
return _inputMsRead;
|
|
||||||
}
|
|
||||||
|
|
||||||
const MovingMinMaxAvg<float>& AudioIOStats::getInputMsUnplayed() const {
|
|
||||||
_inputMsUnplayed.currentIntervalComplete();
|
|
||||||
return _inputMsUnplayed;
|
|
||||||
}
|
|
||||||
|
|
||||||
const MovingMinMaxAvg<float>& AudioIOStats::getOutputMsUnplayed() const {
|
|
||||||
_outputMsUnplayed.currentIntervalComplete();
|
|
||||||
return _outputMsUnplayed;
|
|
||||||
}
|
|
||||||
|
|
||||||
const MovingMinMaxAvg<quint64>& AudioIOStats::getPacketTimegaps() const {
|
|
||||||
_packetTimegaps.currentIntervalComplete();
|
|
||||||
return _packetTimegaps;
|
|
||||||
}
|
|
||||||
|
|
||||||
const AudioStreamStats AudioIOStats::getMixerDownstreamStats() const {
|
|
||||||
return _receivedAudioStream->getAudioStreamStats();
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioIOStats::processStreamStatsPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer sendingNode) {
|
void AudioIOStats::processStreamStatsPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer sendingNode) {
|
||||||
// parse the appendFlag, clear injected audio stream stats if 0
|
// parse the appendFlag, clear injected audio stream stats if 0
|
||||||
quint8 appendFlag;
|
quint8 appendFlag;
|
||||||
message->readPrimitive(&appendFlag);
|
message->readPrimitive(&appendFlag);
|
||||||
|
|
||||||
if (!appendFlag) {
|
if (appendFlag == 0) {
|
||||||
_mixerInjectedStreamStatsMap.clear();
|
_injectorStreams.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
// parse the number of stream stats structs to follow
|
// parse the number of stream stats structs to follow
|
||||||
|
@ -103,14 +83,18 @@ void AudioIOStats::processStreamStatsPacket(QSharedPointer<ReceivedMessage> mess
|
||||||
message->readPrimitive(&streamStats);
|
message->readPrimitive(&streamStats);
|
||||||
|
|
||||||
if (streamStats._streamType == PositionalAudioStream::Microphone) {
|
if (streamStats._streamType == PositionalAudioStream::Microphone) {
|
||||||
_mixerAvatarStreamStats = streamStats;
|
_interface->updateMixerStream(streamStats);
|
||||||
} else {
|
} else {
|
||||||
_mixerInjectedStreamStatsMap[streamStats._streamIdentifier] = streamStats;
|
_injectorStreams[streamStats._streamIdentifier] = streamStats;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (appendFlag == 2) {
|
||||||
|
_interface->updateInjectorStreams(_injectorStreams);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioIOStats::sendDownstreamAudioStatsPacket() {
|
void AudioIOStats::publish() {
|
||||||
auto audioIO = DependencyManager::get<AudioClient>();
|
auto audioIO = DependencyManager::get<AudioClient>();
|
||||||
|
|
||||||
// call _receivedAudioStream's per-second callback
|
// call _receivedAudioStream's per-second callback
|
||||||
|
@ -126,6 +110,11 @@ void AudioIOStats::sendDownstreamAudioStatsPacket() {
|
||||||
quint16 numStreamStatsToPack = 1;
|
quint16 numStreamStatsToPack = 1;
|
||||||
AudioStreamStats stats = _receivedAudioStream->getAudioStreamStats();
|
AudioStreamStats stats = _receivedAudioStream->getAudioStreamStats();
|
||||||
|
|
||||||
|
// update the interface
|
||||||
|
_interface->updateLocalBuffers(_inputMsRead, _inputMsUnplayed, _outputMsUnplayed, _packetTimegaps);
|
||||||
|
_interface->updateClientStream(stats);
|
||||||
|
|
||||||
|
// prepare a packet to the mixer
|
||||||
int statsPacketSize = sizeof(appendFlag) + sizeof(numStreamStatsToPack) + sizeof(stats);
|
int statsPacketSize = sizeof(appendFlag) + sizeof(numStreamStatsToPack) + sizeof(stats);
|
||||||
auto statsPacket = NLPacket::create(PacketType::AudioStreamStats, statsPacketSize);
|
auto statsPacket = NLPacket::create(PacketType::AudioStreamStats, statsPacketSize);
|
||||||
|
|
||||||
|
@ -137,7 +126,63 @@ void AudioIOStats::sendDownstreamAudioStatsPacket() {
|
||||||
|
|
||||||
// pack downstream audio stream stats
|
// pack downstream audio stream stats
|
||||||
statsPacket->writePrimitive(stats);
|
statsPacket->writePrimitive(stats);
|
||||||
|
|
||||||
// send packet
|
// send packet
|
||||||
nodeList->sendPacket(std::move(statsPacket), *audioMixer);
|
nodeList->sendPacket(std::move(statsPacket), *audioMixer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
AudioStreamStatsInterface::AudioStreamStatsInterface(QObject* parent) :
|
||||||
|
QObject(parent) {}
|
||||||
|
|
||||||
|
void AudioStreamStatsInterface::updateStream(const AudioStreamStats& stats) {
|
||||||
|
lossRate(stats._packetStreamStats.getLostRate());
|
||||||
|
lossCount(stats._packetStreamStats._lost);
|
||||||
|
lossRateWindow(stats._packetStreamWindowStats.getLostRate());
|
||||||
|
lossCountWindow(stats._packetStreamWindowStats._lost);
|
||||||
|
|
||||||
|
framesDesired(stats._desiredJitterBufferFrames);
|
||||||
|
framesAvailable(stats._framesAvailable);
|
||||||
|
framesAvailableAvg(stats._framesAvailableAverage);
|
||||||
|
|
||||||
|
unplayedMsMax(stats._unplayedMs);
|
||||||
|
|
||||||
|
starveCount(stats._starveCount);
|
||||||
|
lastStarveDurationCount(stats._consecutiveNotMixedCount);
|
||||||
|
dropCount(stats._framesDropped);
|
||||||
|
overflowCount(stats._overflowCount);
|
||||||
|
|
||||||
|
timegapMsMax(stats._timeGapMax / USECS_PER_MSEC);
|
||||||
|
timegapMsAvg(stats._timeGapAverage / USECS_PER_MSEC);
|
||||||
|
timegapMsMaxWindow(stats._timeGapWindowMax / USECS_PER_MSEC);
|
||||||
|
timegapMsAvgWindow(stats._timeGapWindowAverage / USECS_PER_MSEC);
|
||||||
|
}
|
||||||
|
|
||||||
|
AudioStatsInterface::AudioStatsInterface(QObject* parent) :
|
||||||
|
QObject(parent),
|
||||||
|
_client(new AudioStreamStatsInterface(this)),
|
||||||
|
_mixer(new AudioStreamStatsInterface(this)),
|
||||||
|
_injectors(new QObject(this)) {}
|
||||||
|
|
||||||
|
|
||||||
|
void AudioStatsInterface::updateLocalBuffers(const MovingMinMaxAvg<float>& inputMsRead,
|
||||||
|
const MovingMinMaxAvg<float>& inputMsUnplayed,
|
||||||
|
const MovingMinMaxAvg<float>& outputMsUnplayed,
|
||||||
|
const MovingMinMaxAvg<quint64>& timegaps) {
|
||||||
|
if (SharedNodePointer audioNode = DependencyManager::get<NodeList>()->soloNodeOfType(NodeType::AudioMixer)) {
|
||||||
|
pingMs(audioNode->getPingMs());
|
||||||
|
}
|
||||||
|
|
||||||
|
inputReadMsMax(inputMsRead.getWindowMax());
|
||||||
|
inputUnplayedMsMax(inputMsUnplayed.getWindowMax());
|
||||||
|
outputUnplayedMsMax(outputMsUnplayed.getWindowMax());
|
||||||
|
|
||||||
|
sentTimegapMsMax(timegaps.getMax() / USECS_PER_MSEC);
|
||||||
|
sentTimegapMsAvg(timegaps.getAverage() / USECS_PER_MSEC);
|
||||||
|
sentTimegapMsMaxWindow(timegaps.getWindowMax() / USECS_PER_MSEC);
|
||||||
|
sentTimegapMsAvgWindow(timegaps.getWindowAverage() / USECS_PER_MSEC);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioStatsInterface::updateInjectorStreams(const QHash<QUuid, AudioStreamStats>& stats) {
|
||||||
|
// TODO
|
||||||
|
emit injectorStreamsChanged();
|
||||||
|
}
|
||||||
|
|
|
@ -22,44 +22,122 @@
|
||||||
|
|
||||||
class MixedProcessedAudioStream;
|
class MixedProcessedAudioStream;
|
||||||
|
|
||||||
|
#define AUDIO_PROPERTY(TYPE, NAME) \
|
||||||
|
Q_PROPERTY(TYPE NAME READ NAME NOTIFY NAME##Changed) \
|
||||||
|
public: \
|
||||||
|
TYPE NAME() const { return _##NAME; } \
|
||||||
|
void NAME(TYPE value) { \
|
||||||
|
if (_##NAME != value) { \
|
||||||
|
_##NAME = value; \
|
||||||
|
emit NAME##Changed(value); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
Q_SIGNAL void NAME##Changed(TYPE value); \
|
||||||
|
private: \
|
||||||
|
TYPE _##NAME{ (TYPE)0 };
|
||||||
|
|
||||||
|
class AudioStreamStatsInterface : public QObject {
|
||||||
|
Q_OBJECT
|
||||||
|
AUDIO_PROPERTY(float, lossRate)
|
||||||
|
AUDIO_PROPERTY(float, lossCount)
|
||||||
|
AUDIO_PROPERTY(float, lossRateWindow)
|
||||||
|
AUDIO_PROPERTY(float, lossCountWindow)
|
||||||
|
|
||||||
|
AUDIO_PROPERTY(int, framesDesired)
|
||||||
|
AUDIO_PROPERTY(int, framesAvailable)
|
||||||
|
AUDIO_PROPERTY(int, framesAvailableAvg)
|
||||||
|
AUDIO_PROPERTY(float, unplayedMsMax)
|
||||||
|
|
||||||
|
AUDIO_PROPERTY(int, starveCount)
|
||||||
|
AUDIO_PROPERTY(int, lastStarveDurationCount)
|
||||||
|
AUDIO_PROPERTY(int, dropCount)
|
||||||
|
AUDIO_PROPERTY(int, overflowCount)
|
||||||
|
|
||||||
|
AUDIO_PROPERTY(quint64, timegapMsMax)
|
||||||
|
AUDIO_PROPERTY(quint64, timegapMsAvg)
|
||||||
|
AUDIO_PROPERTY(quint64, timegapMsMaxWindow)
|
||||||
|
AUDIO_PROPERTY(quint64, timegapMsAvgWindow)
|
||||||
|
|
||||||
|
public:
|
||||||
|
void updateStream(const AudioStreamStats& stats);
|
||||||
|
|
||||||
|
private:
|
||||||
|
friend class AudioStatsInterface;
|
||||||
|
AudioStreamStatsInterface(QObject* parent);
|
||||||
|
};
|
||||||
|
|
||||||
|
class AudioStatsInterface : public QObject {
|
||||||
|
Q_OBJECT
|
||||||
|
AUDIO_PROPERTY(float, pingMs);
|
||||||
|
|
||||||
|
AUDIO_PROPERTY(float, inputReadMsMax);
|
||||||
|
AUDIO_PROPERTY(float, inputUnplayedMsMax);
|
||||||
|
AUDIO_PROPERTY(float, outputUnplayedMsMax);
|
||||||
|
|
||||||
|
AUDIO_PROPERTY(quint64, sentTimegapMsMax);
|
||||||
|
AUDIO_PROPERTY(quint64, sentTimegapMsAvg);
|
||||||
|
AUDIO_PROPERTY(quint64, sentTimegapMsMaxWindow);
|
||||||
|
AUDIO_PROPERTY(quint64, sentTimegapMsAvgWindow);
|
||||||
|
|
||||||
|
Q_PROPERTY(AudioStreamStatsInterface* mixerStream READ getMixerStream);
|
||||||
|
Q_PROPERTY(AudioStreamStatsInterface* clientStream READ getClientStream);
|
||||||
|
Q_PROPERTY(QObject* injectorStreams READ getInjectorStreams NOTIFY injectorStreamsChanged);
|
||||||
|
|
||||||
|
public:
|
||||||
|
AudioStreamStatsInterface* getMixerStream() const { return _mixer; }
|
||||||
|
AudioStreamStatsInterface* getClientStream() const { return _client; }
|
||||||
|
QObject* getInjectorStreams() const { return _injectors; }
|
||||||
|
|
||||||
|
void updateLocalBuffers(const MovingMinMaxAvg<float>& inputMsRead,
|
||||||
|
const MovingMinMaxAvg<float>& inputMsUnplayed,
|
||||||
|
const MovingMinMaxAvg<float>& outputMsUnplayed,
|
||||||
|
const MovingMinMaxAvg<quint64>& timegaps);
|
||||||
|
void updateClientStream(const AudioStreamStats& stats) { _client->updateStream(stats); }
|
||||||
|
void updateMixerStream(const AudioStreamStats& stats) { _mixer->updateStream(stats); }
|
||||||
|
void updateInjectorStreams(const QHash<QUuid, AudioStreamStats>& stats);
|
||||||
|
|
||||||
|
signals:
|
||||||
|
void injectorStreamsChanged();
|
||||||
|
|
||||||
|
private:
|
||||||
|
friend class AudioIOStats;
|
||||||
|
AudioStatsInterface(QObject* parent);
|
||||||
|
AudioStreamStatsInterface* _client;
|
||||||
|
AudioStreamStatsInterface* _mixer;
|
||||||
|
QObject* _injectors;
|
||||||
|
};
|
||||||
|
|
||||||
class AudioIOStats : public QObject {
|
class AudioIOStats : public QObject {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
AudioIOStats(MixedProcessedAudioStream* receivedAudioStream);
|
AudioIOStats(MixedProcessedAudioStream* receivedAudioStream);
|
||||||
|
|
||||||
void reset();
|
|
||||||
|
|
||||||
void updateInputMsRead(float ms) { _inputMsRead.update(ms); }
|
|
||||||
void updateInputMsUnplayed(float ms) { _inputMsUnplayed.update(ms); }
|
|
||||||
void updateOutputMsUnplayed(float ms) { _outputMsUnplayed.update(ms); }
|
|
||||||
void sentPacket();
|
|
||||||
|
|
||||||
const MovingMinMaxAvg<float>& getInputMsRead() const;
|
|
||||||
const MovingMinMaxAvg<float>& getInputMsUnplayed() const;
|
|
||||||
const MovingMinMaxAvg<float>& getOutputMsUnplayed() const;
|
|
||||||
const MovingMinMaxAvg<quint64>& getPacketTimegaps() const;
|
|
||||||
|
|
||||||
const AudioStreamStats getMixerDownstreamStats() const;
|
void reset();
|
||||||
const AudioStreamStats& getMixerAvatarStreamStats() const { return _mixerAvatarStreamStats; }
|
|
||||||
const QHash<QUuid, AudioStreamStats>& getMixerInjectedStreamStatsMap() const { return _mixerInjectedStreamStatsMap; }
|
AudioStatsInterface* data() const { return _interface; }
|
||||||
|
|
||||||
void sendDownstreamAudioStatsPacket();
|
void updateInputMsRead(float ms) const { _inputMsRead.update(ms); }
|
||||||
|
void updateInputMsUnplayed(float ms) const { _inputMsUnplayed.update(ms); }
|
||||||
|
void updateOutputMsUnplayed(float ms) const { _outputMsUnplayed.update(ms); }
|
||||||
|
void sentPacket() const;
|
||||||
|
|
||||||
|
void publish();
|
||||||
|
|
||||||
public slots:
|
public slots:
|
||||||
void processStreamStatsPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer sendingNode);
|
void processStreamStatsPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer sendingNode);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
MixedProcessedAudioStream* _receivedAudioStream;
|
AudioStatsInterface* _interface;
|
||||||
|
|
||||||
mutable MovingMinMaxAvg<float> _inputMsRead;
|
mutable MovingMinMaxAvg<float> _inputMsRead;
|
||||||
mutable MovingMinMaxAvg<float> _inputMsUnplayed;
|
mutable MovingMinMaxAvg<float> _inputMsUnplayed;
|
||||||
mutable MovingMinMaxAvg<float> _outputMsUnplayed;
|
mutable MovingMinMaxAvg<float> _outputMsUnplayed;
|
||||||
|
|
||||||
quint64 _lastSentPacketTime;
|
mutable quint64 _lastSentPacketTime;
|
||||||
mutable MovingMinMaxAvg<quint64> _packetTimegaps;
|
mutable MovingMinMaxAvg<quint64> _packetTimegaps;
|
||||||
|
|
||||||
AudioStreamStats _mixerAvatarStreamStats;
|
MixedProcessedAudioStream* _receivedAudioStream;
|
||||||
QHash<QUuid, AudioStreamStats> _mixerInjectedStreamStatsMap;
|
QHash<QUuid, AudioStreamStats> _injectorStreams;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // hifi_AudioIOStats_h
|
#endif // hifi_AudioIOStats_h
|
||||||
|
|
|
@ -77,7 +77,8 @@ PacketVersion versionForPacketType(PacketType packetType) {
|
||||||
case PacketType::InjectAudio:
|
case PacketType::InjectAudio:
|
||||||
case PacketType::MicrophoneAudioNoEcho:
|
case PacketType::MicrophoneAudioNoEcho:
|
||||||
case PacketType::MicrophoneAudioWithEcho:
|
case PacketType::MicrophoneAudioWithEcho:
|
||||||
return static_cast<PacketVersion>(AudioVersion::Exactly10msAudioPackets);
|
case PacketType::AudioStreamStats:
|
||||||
|
return static_cast<PacketVersion>(AudioVersion::CurrentVersion);
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return 17;
|
return 17;
|
||||||
|
|
|
@ -223,7 +223,10 @@ enum class DomainListVersion : PacketVersion {
|
||||||
enum class AudioVersion : PacketVersion {
|
enum class AudioVersion : PacketVersion {
|
||||||
HasCompressedAudio = 17,
|
HasCompressedAudio = 17,
|
||||||
CodecNameInAudioPackets,
|
CodecNameInAudioPackets,
|
||||||
Exactly10msAudioPackets
|
Exactly10msAudioPackets,
|
||||||
|
TerminatingStreamStats,
|
||||||
|
// add new versions above this line
|
||||||
|
CurrentVersion
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // hifi_PacketHeaders_h
|
#endif // hifi_PacketHeaders_h
|
||||||
|
|
Loading…
Reference in a new issue