Merge pull request #7074 from birarda/integrate-new-hrtf

use the existing counter in AudioMixer to send client stats, remove some now dead code
This commit is contained in:
Clément Brisset 2016-02-10 17:55:53 -08:00
commit 618712538a
6 changed files with 7 additions and 190 deletions

View file

@ -67,8 +67,6 @@ const QString AUDIO_BUFFER_GROUP_KEY = "audio_buffer";
InboundAudioStream::Settings AudioMixer::_streamSettings;
bool AudioMixer::_printStreamStats = false;
bool AudioMixer::_enableFilter = true;
bool AudioMixer::shouldMute(float quietestFrame) {
@ -81,13 +79,7 @@ AudioMixer::AudioMixer(ReceivedMessage& message) :
_minAudibilityThreshold(LOUDNESS_TO_DISTANCE_RATIO / 2.0f),
_performanceThrottlingRatio(0.0f),
_attenuationPerDoublingInDistance(DEFAULT_ATTENUATION_PER_DOUBLING_IN_DISTANCE),
_noiseMutingThreshold(DEFAULT_NOISE_MUTING_THRESHOLD),
_lastPerSecondCallbackTime(usecTimestampNow()),
_sendAudioStreamStats(false),
_datagramsReadPerCallStats(0, READ_DATAGRAMS_STATS_WINDOW_SECONDS),
_timeSpentPerCallStats(0, READ_DATAGRAMS_STATS_WINDOW_SECONDS),
_timeSpentPerHashMatchCallStats(0, READ_DATAGRAMS_STATS_WINDOW_SECONDS),
_readPendingCallsPerSecondStats(1, READ_DATAGRAMS_STATS_WINDOW_SECONDS)
_noiseMutingThreshold(DEFAULT_NOISE_MUTING_THRESHOLD)
{
auto nodeList = DependencyManager::get<NodeList>();
auto& packetReceiver = nodeList->getPacketReceiver();
@ -528,42 +520,6 @@ void AudioMixer::sendStatsPacket() {
_totalMixes = 0;
_numStatFrames = 0;
QJsonObject readPendingDatagramStats;
QJsonObject rpdCallsStats;
rpdCallsStats["calls_per_sec_avg_30s"] = _readPendingCallsPerSecondStats.getWindowAverage();
rpdCallsStats["calls_last_sec"] = _readPendingCallsPerSecondStats.getLastCompleteIntervalStats().getSum() + 0.5;
readPendingDatagramStats["calls"] = rpdCallsStats;
QJsonObject packetsPerCallStats;
packetsPerCallStats["avg_30s"] = _datagramsReadPerCallStats.getWindowAverage();
packetsPerCallStats["avg_1s"] = _datagramsReadPerCallStats.getLastCompleteIntervalStats().getAverage();
readPendingDatagramStats["packets_per_call"] = packetsPerCallStats;
QJsonObject packetsTimePerCallStats;
packetsTimePerCallStats["usecs_per_call_avg_30s"] = _timeSpentPerCallStats.getWindowAverage();
packetsTimePerCallStats["usecs_per_call_avg_1s"] = _timeSpentPerCallStats.getLastCompleteIntervalStats().getAverage();
packetsTimePerCallStats["prct_time_in_call_30s"] =
_timeSpentPerCallStats.getWindowSum() / (READ_DATAGRAMS_STATS_WINDOW_SECONDS * USECS_PER_SECOND) * 100.0;
packetsTimePerCallStats["prct_time_in_call_1s"] =
_timeSpentPerCallStats.getLastCompleteIntervalStats().getSum() / USECS_PER_SECOND * 100.0;
readPendingDatagramStats["packets_time_per_call"] = packetsTimePerCallStats;
QJsonObject hashMatchTimePerCallStats;
hashMatchTimePerCallStats["usecs_per_hashmatch_avg_30s"] = _timeSpentPerHashMatchCallStats.getWindowAverage();
hashMatchTimePerCallStats["usecs_per_hashmatch_avg_1s"]
= _timeSpentPerHashMatchCallStats.getLastCompleteIntervalStats().getAverage();
hashMatchTimePerCallStats["prct_time_in_hashmatch_30s"]
= _timeSpentPerHashMatchCallStats.getWindowSum() / (READ_DATAGRAMS_STATS_WINDOW_SECONDS*USECS_PER_SECOND) * 100.0;
hashMatchTimePerCallStats["prct_time_in_hashmatch_1s"]
= _timeSpentPerHashMatchCallStats.getLastCompleteIntervalStats().getSum() / USECS_PER_SECOND * 100.0;
readPendingDatagramStats["hashmatch_time_per_call"] = hashMatchTimePerCallStats;
statsObject["read_pending_datagrams"] = readPendingDatagramStats;
// add stats for each listerner
auto nodeList = DependencyManager::get<NodeList>();
QJsonObject listenerStats;
@ -689,12 +645,6 @@ void AudioMixer::broadcastMixes() {
++framesSinceCutoffEvent;
}
quint64 now = usecTimestampNow();
if (now - _lastPerSecondCallbackTime > USECS_PER_SECOND) {
perSecondActions();
_lastPerSecondCallbackTime = now;
}
nodeList->eachNode([&](const SharedNodePointer& node) {
if (node->getLinkedData()) {
@ -750,10 +700,11 @@ void AudioMixer::broadcastMixes() {
nodeList->sendPacket(std::move(mixPacket), *node);
nodeData->incrementOutgoingMixedAudioSequenceNumber();
// send an audio stream stats packet if it's time
if (_sendAudioStreamStats) {
static const int FRAMES_PER_SECOND = int(ceilf(1.0f / AudioConstants::NETWORK_FRAME_SECS));
// send an audio stream stats packet to the client approximately every second
if (nextFrame % FRAMES_PER_SECOND == 0) {
nodeData->sendAudioStreamStatsPackets(node);
_sendAudioStreamStats = false;
}
++_sumListeners;
@ -781,64 +732,6 @@ void AudioMixer::broadcastMixes() {
}
}
void AudioMixer::perSecondActions() {
_sendAudioStreamStats = true;
int callsLastSecond = _datagramsReadPerCallStats.getCurrentIntervalSamples();
_readPendingCallsPerSecondStats.update(callsLastSecond);
if (_printStreamStats) {
printf("\n================================================================================\n\n");
printf(" readPendingDatagram() calls per second | avg: %.2f, avg_30s: %.2f, last_second: %d\n",
_readPendingCallsPerSecondStats.getAverage(),
_readPendingCallsPerSecondStats.getWindowAverage(),
callsLastSecond);
printf(" Datagrams read per call | avg: %.2f, avg_30s: %.2f, last_second: %.2f\n",
_datagramsReadPerCallStats.getAverage(),
_datagramsReadPerCallStats.getWindowAverage(),
_datagramsReadPerCallStats.getCurrentIntervalAverage());
printf(" Usecs spent per readPendingDatagram() call | avg: %.2f, avg_30s: %.2f, last_second: %.2f\n",
_timeSpentPerCallStats.getAverage(),
_timeSpentPerCallStats.getWindowAverage(),
_timeSpentPerCallStats.getCurrentIntervalAverage());
printf(" Usecs spent per packetVersionAndHashMatch() call | avg: %.2f, avg_30s: %.2f, last_second: %.2f\n",
_timeSpentPerHashMatchCallStats.getAverage(),
_timeSpentPerHashMatchCallStats.getWindowAverage(),
_timeSpentPerHashMatchCallStats.getCurrentIntervalAverage());
double WINDOW_LENGTH_USECS = READ_DATAGRAMS_STATS_WINDOW_SECONDS * USECS_PER_SECOND;
printf(" %% time spent in readPendingDatagram() calls | avg_30s: %.6f%%, last_second: %.6f%%\n",
_timeSpentPerCallStats.getWindowSum() / WINDOW_LENGTH_USECS * 100.0,
_timeSpentPerCallStats.getCurrentIntervalSum() / USECS_PER_SECOND * 100.0);
printf("%% time spent in packetVersionAndHashMatch() calls: | avg_30s: %.6f%%, last_second: %.6f%%\n",
_timeSpentPerHashMatchCallStats.getWindowSum() / WINDOW_LENGTH_USECS * 100.0,
_timeSpentPerHashMatchCallStats.getCurrentIntervalSum() / USECS_PER_SECOND * 100.0);
DependencyManager::get<NodeList>()->eachNode([](const SharedNodePointer& node) {
if (node->getLinkedData()) {
AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();
if (node->getType() == NodeType::Agent && node->getActiveSocket()) {
printf("\nStats for agent %s --------------------------------\n",
node->getUUID().toString().toLatin1().data());
nodeData->printUpstreamDownstreamStats();
}
}
});
}
_datagramsReadPerCallStats.currentIntervalComplete();
_timeSpentPerCallStats.currentIntervalComplete();
_timeSpentPerHashMatchCallStats.currentIntervalComplete();
}
void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
if (settingsObject.contains(AUDIO_BUFFER_GROUP_KEY)) {
QJsonObject audioBufferGroupObject = settingsObject[AUDIO_BUFFER_GROUP_KEY].toObject();
@ -903,12 +796,6 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
} else {
qDebug() << "Repetition with fade disabled";
}
const QString PRINT_STREAM_STATS_JSON_KEY = "print_stream_stats";
_printStreamStats = audioBufferGroupObject[PRINT_STREAM_STATS_JSON_KEY].toBool();
if (_printStreamStats) {
qDebug() << "Stream stats will be printed to stdout";
}
}
if (settingsObject.contains(AUDIO_ENV_GROUP_KEY)) {

View file

@ -110,19 +110,7 @@ private:
static InboundAudioStream::Settings _streamSettings;
static bool _printStreamStats;
static bool _enableFilter;
quint64 _lastPerSecondCallbackTime;
bool _sendAudioStreamStats;
// stats
MovingMinMaxAvg<int> _datagramsReadPerCallStats; // update with # of datagrams read for each readPendingDatagrams call
MovingMinMaxAvg<quint64> _timeSpentPerCallStats; // update with usecs spent inside each readPendingDatagrams call
MovingMinMaxAvg<quint64> _timeSpentPerHashMatchCallStats; // update with usecs spent inside each packetVersionAndHashMatch call
MovingMinMaxAvg<int> _readPendingCallsPerSecondStats; // update with # of readPendingDatagrams calls in the last second
};
#endif // hifi_AudioMixer_h

View file

@ -311,48 +311,3 @@ QJsonObject AudioMixerClientData::getAudioStreamStats() {
return result;
}
void AudioMixerClientData::printUpstreamDownstreamStats() {
auto streamsCopy = getAudioStreams();
// print the upstream (mic stream) stats if the mic stream exists
auto it = streamsCopy.find(QUuid());
if (it != streamsCopy.end()) {
printf("Upstream:\n");
printAudioStreamStats(it->second->getAudioStreamStats());
}
// print the downstream stats if they contain valid info
if (_downstreamAudioStreamStats._packetStreamStats._received > 0) {
printf("Downstream:\n");
printAudioStreamStats(_downstreamAudioStreamStats);
}
}
void AudioMixerClientData::printAudioStreamStats(const AudioStreamStats& streamStats) const {
printf(" Packet loss | overall: %5.2f%% (%d lost), last_30s: %5.2f%% (%d lost)\n",
(double)(streamStats._packetStreamStats.getLostRate() * 100.0f),
streamStats._packetStreamStats._lost,
(double)(streamStats._packetStreamWindowStats.getLostRate() * 100.0f),
streamStats._packetStreamWindowStats._lost);
printf(" Ringbuffer frames | desired: %u, avg_available(10s): %u, available: %u\n",
streamStats._desiredJitterBufferFrames,
streamStats._framesAvailableAverage,
streamStats._framesAvailable);
printf(" Ringbuffer stats | starves: %u, prev_starve_lasted: %u, frames_dropped: %u, overflows: %u\n",
streamStats._starveCount,
streamStats._consecutiveNotMixedCount,
streamStats._framesDropped,
streamStats._overflowCount);
printf(" Inter-packet timegaps (overall) | min: %9s, max: %9s, avg: %9s\n",
formatUsecTime(streamStats._timeGapMin).toLatin1().data(),
formatUsecTime(streamStats._timeGapMax).toLatin1().data(),
formatUsecTime(streamStats._timeGapAverage).toLatin1().data());
printf(" Inter-packet timegaps (last 30s) | min: %9s, max: %9s, avg: %9s\n",
formatUsecTime(streamStats._timeGapWindowMin).toLatin1().data(),
formatUsecTime(streamStats._timeGapWindowMax).toLatin1().data(),
formatUsecTime(streamStats._timeGapWindowAverage).toLatin1().data());
}

View file

@ -58,14 +58,9 @@ public:
void incrementOutgoingMixedAudioSequenceNumber() { _outgoingMixedAudioSequenceNumber++; }
quint16 getOutgoingSequenceNumber() const { return _outgoingMixedAudioSequenceNumber; }
void printUpstreamDownstreamStats();
signals:
void injectorStreamFinished(const QUuid& streamIdentifier);
private:
void printAudioStreamStats(const AudioStreamStats& streamStats) const;
private:
QReadWriteLock _streamsLock;
AudioStreamMap _audioStreams; // microphone stream from avatar is stored under key of null UUID

View file

@ -366,14 +366,6 @@
"help": "Dropped frames and mixing during starves repeat the last frame, eventually fading to silence",
"default": false,
"advanced": true
},
{
"name": "print_stream_stats",
"type": "checkbox",
"label": "Print Stream Stats",
"help": "Audio upstream and downstream stats of each agent printed to audio-mixer stdout",
"default": false,
"advanced": true
}
]
},

View file

@ -27,8 +27,8 @@ namespace AudioConstants {
const int NETWORK_FRAME_SAMPLES_STEREO = NETWORK_FRAME_BYTES_STEREO / sizeof(AudioSample);
const int NETWORK_FRAME_BYTES_PER_CHANNEL = 512;
const int NETWORK_FRAME_SAMPLES_PER_CHANNEL = NETWORK_FRAME_BYTES_PER_CHANNEL / sizeof(AudioSample);
const float NETWORK_FRAME_MSECS = (AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL
/ (float)AudioConstants::SAMPLE_RATE) * 1000.0f;
const float NETWORK_FRAME_SECS = (AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL / float(AudioConstants::SAMPLE_RATE));
const float NETWORK_FRAME_MSECS = NETWORK_FRAME_SECS * 1000.0f;
// be careful with overflows when using this constant
const int NETWORK_FRAME_USECS = static_cast<int>(NETWORK_FRAME_MSECS * 1000.0f);