added stats for readPendingDatagrams in audiomixer

This commit is contained in:
wangyix 2014-08-13 13:30:02 -07:00
parent 014346094b
commit b17c9102c9
3 changed files with 51 additions and 12 deletions

View file

@ -81,7 +81,12 @@ AudioMixer::AudioMixer(const QByteArray& packet) :
_sumMixes(0), _sumMixes(0),
_sourceUnattenuatedZone(NULL), _sourceUnattenuatedZone(NULL),
_listenerUnattenuatedZone(NULL), _listenerUnattenuatedZone(NULL),
_lastSendAudioStreamStatsTime(usecTimestampNow()) _lastPerSecondCallbackTime(usecTimestampNow()),
_sendAudioStreamStats(false),
_datagramsReadPerCallStats(0, READ_DATAGRAMS_STATS_WINDOW_SECONDS),
_timeSpentPerCallStats(0, READ_DATAGRAMS_STATS_WINDOW_SECONDS),
_timeSpentPerHashMatchCallStats(0, READ_DATAGRAMS_STATS_WINDOW_SECONDS),
_readPendingCallsPerSecondStats(1, READ_DATAGRAMS_STATS_WINDOW_SECONDS)
{ {
} }
@ -328,12 +333,18 @@ int AudioMixer::prepareMixForListeningNode(Node* node) {
} }
void AudioMixer::readPendingDatagrams() { void AudioMixer::readPendingDatagrams() {
quint64 readPendingDatagramsStart = usecTimestampNow();
QByteArray receivedPacket; QByteArray receivedPacket;
HifiSockAddr senderSockAddr; HifiSockAddr senderSockAddr;
NodeList* nodeList = NodeList::getInstance(); NodeList* nodeList = NodeList::getInstance();
int datagramsRead = 0;
while (readAvailableDatagram(receivedPacket, senderSockAddr)) { while (readAvailableDatagram(receivedPacket, senderSockAddr)) {
if (nodeList->packetVersionAndHashMatch(receivedPacket)) { quint64 packetVersionAndHashMatchStart = usecTimestampNow();
bool match = nodeList->packetVersionAndHashMatch(receivedPacket);
_timeSpentPerHashMatchCallStats.update(usecTimestampNow() - packetVersionAndHashMatchStart);
if (match) {
// pull any new audio data from nodes off of the network stack // pull any new audio data from nodes off of the network stack
PacketType mixerPacketType = packetTypeForPacket(receivedPacket); PacketType mixerPacketType = packetTypeForPacket(receivedPacket);
if (mixerPacketType == PacketTypeMicrophoneAudioNoEcho if (mixerPacketType == PacketTypeMicrophoneAudioNoEcho
@ -352,13 +363,16 @@ void AudioMixer::readPendingDatagrams() {
nodeList->writeDatagram(packet, packet.size(), node); nodeList->writeDatagram(packet, packet.size(), node);
} }
} }
} else { } else {
// let processNodeData handle it. // let processNodeData handle it.
nodeList->processNodeData(senderSockAddr, receivedPacket); nodeList->processNodeData(senderSockAddr, receivedPacket);
} }
} }
datagramsRead++;
} }
_timeSpentPerCallStats.update(usecTimestampNow() - readPendingDatagramsStart);
_datagramsReadPerCallStats.update(datagramsRead);
} }
void AudioMixer::sendStatsPacket() { void AudioMixer::sendStatsPacket() {
@ -609,12 +623,11 @@ void AudioMixer::run() {
if (!hasRatioChanged) { if (!hasRatioChanged) {
++framesSinceCutoffEvent; ++framesSinceCutoffEvent;
} }
bool sendAudioStreamStats = false;
quint64 now = usecTimestampNow(); quint64 now = usecTimestampNow();
if (now - _lastSendAudioStreamStatsTime > TOO_LONG_SINCE_LAST_SEND_AUDIO_STREAM_STATS) { if (now - _lastPerSecondCallbackTime > USECS_PER_SECOND) {
_lastSendAudioStreamStatsTime = now; perSecondActions();
sendAudioStreamStats = true; _lastPerSecondCallbackTime = now;
} }
bool streamStatsPrinted = false; bool streamStatsPrinted = false;
@ -667,14 +680,14 @@ void AudioMixer::run() {
nodeData->incrementOutgoingMixedAudioSequenceNumber(); nodeData->incrementOutgoingMixedAudioSequenceNumber();
// send an audio stream stats packet if it's time // send an audio stream stats packet if it's time
if (sendAudioStreamStats) { if (_sendAudioStreamStats) {
nodeData->sendAudioStreamStatsPackets(node); nodeData->sendAudioStreamStatsPackets(node);
if (_printStreamStats) { if (_printStreamStats) {
printf("\nStats for agent %s:\n", node->getUUID().toString().toLatin1().data()); printf("\nStats for agent %s:\n", node->getUUID().toString().toLatin1().data());
nodeData->printUpstreamDownstreamStats(); nodeData->printUpstreamDownstreamStats();
streamStatsPrinted = true; streamStatsPrinted = true;
} }
_sendAudioStreamStats = false;
} }
++_sumListeners; ++_sumListeners;
@ -700,3 +713,14 @@ void AudioMixer::run() {
} }
} }
} }
void AudioMixer::perSecondActions() {
_sendAudioStreamStats = true;
int callsLastSecond = _datagramsReadPerCallStats.getCurrentIntervalSamples();
_readPendingCallsPerSecondStats.update(callsLastSecond);
_datagramsReadPerCallStats.currentIntervalComplete();
_timeSpentPerCallStats.currentIntervalComplete();
_timeSpentPerHashMatchCallStats.currentIntervalComplete();
}

View file

@ -21,7 +21,8 @@ class AvatarAudioStream;
const int SAMPLE_PHASE_DELAY_AT_90 = 20; const int SAMPLE_PHASE_DELAY_AT_90 = 20;
const quint64 TOO_LONG_SINCE_LAST_SEND_AUDIO_STREAM_STATS = 1 * USECS_PER_SECOND; const int READ_DATAGRAMS_STATS_WINDOW_SECONDS = 30;
/// Handles assignments of type AudioMixer - mixing streams of audio and re-distributing to various clients. /// Handles assignments of type AudioMixer - mixing streams of audio and re-distributing to various clients.
class AudioMixer : public ThreadedAssignment { class AudioMixer : public ThreadedAssignment {
@ -50,6 +51,9 @@ private:
// client samples capacity is larger than what will be sent to optimize mixing // client samples capacity is larger than what will be sent to optimize mixing
// we are MMX adding 4 samples at a time so we need client samples to have an extra 4 // we are MMX adding 4 samples at a time so we need client samples to have an extra 4
int16_t _clientSamples[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (SAMPLE_PHASE_DELAY_AT_90 * 2)]; int16_t _clientSamples[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (SAMPLE_PHASE_DELAY_AT_90 * 2)];
void perSecondActions();
float _trailingSleepRatio; float _trailingSleepRatio;
float _minAudibilityThreshold; float _minAudibilityThreshold;
@ -64,7 +68,16 @@ private:
static bool _printStreamStats; static bool _printStreamStats;
quint64 _lastSendAudioStreamStatsTime; quint64 _lastPerSecondCallbackTime;
bool _sendAudioStreamStats;
// stats
MovingMinMaxAvg<int> _datagramsReadPerCallStats; // update with # of datagrams read for each readPendingDatagrams call
MovingMinMaxAvg<quint64> _timeSpentPerCallStats; // update with usecs spent inside each readPendingDatagrams call
MovingMinMaxAvg<quint64> _timeSpentPerHashMatchCallStats; // update with usecs spent inside each packetVersionAndHashMatch call
MovingMinMaxAvg<int> _readPendingCallsPerSecondStats; // update with # of readPendingDatagrams calls in the last second
}; };
#endif // hifi_AudioMixer_h #endif // hifi_AudioMixer_h

View file

@ -156,6 +156,8 @@ public:
T getWindowMax() const { return _windowStats.getMax(); } T getWindowMax() const { return _windowStats.getMax(); }
double getWindowAverage() const { return _windowStats.getAverage(); } double getWindowAverage() const { return _windowStats.getAverage(); }
int getCurrentIntervalSamples() const { return _windowStats._samples; }
const MinMaxAvg<T>& getOverallStats() const{ return _overallStats; } const MinMaxAvg<T>& getOverallStats() const{ return _overallStats; }
const MinMaxAvg<T>& getWindowStats() const{ return _windowStats; } const MinMaxAvg<T>& getWindowStats() const{ return _windowStats; }