diff --git a/assignment-client/src/audio/AudioMixer.cpp b/assignment-client/src/audio/AudioMixer.cpp index 6cdcaef133..2ba3809729 100644 --- a/assignment-client/src/audio/AudioMixer.cpp +++ b/assignment-client/src/audio/AudioMixer.cpp @@ -405,7 +405,8 @@ void AudioMixer::readPendingDatagrams() { if (mixerPacketType == PacketTypeMicrophoneAudioNoEcho || mixerPacketType == PacketTypeMicrophoneAudioWithEcho || mixerPacketType == PacketTypeInjectAudio - || mixerPacketType == PacketTypeSilentAudioFrame) { + || mixerPacketType == PacketTypeSilentAudioFrame + || mixerPacketType == PacketTypeAudioStreamStats) { nodeList->findNodeAndUpdateWithDataFromPacket(receivedPacket); } else if (mixerPacketType == PacketTypeMuteEnvironment) { diff --git a/assignment-client/src/audio/AudioMixerClientData.cpp b/assignment-client/src/audio/AudioMixerClientData.cpp index ae4a0269cc..915199b443 100644 --- a/assignment-client/src/audio/AudioMixerClientData.cpp +++ b/assignment-client/src/audio/AudioMixerClientData.cpp @@ -83,7 +83,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { // ask the AvatarAudioRingBuffer instance to parse the data avatarRingBuffer->parseData(packet); - } else { + } else if (packetType == PacketTypeInjectAudio) { // this is injected audio // grab the stream identifier for this injected audio @@ -107,6 +107,15 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { } matchingInjectedRingBuffer->parseData(packet); + } else if (packetType == PacketTypeAudioStreamStats) { + + const char* dataAt = packet.data(); + + // skip over header, appendFlag, and num stats packed + dataAt += (numBytesPacketHeader + sizeof(quint8) + sizeof(quint16)); + + // read the downstream audio stream stats + memcpy(&_downstreamAudioStreamStats, dataAt, sizeof(AudioStreamStats)); } return 0; diff --git a/assignment-client/src/audio/AudioMixerClientData.h b/assignment-client/src/audio/AudioMixerClientData.h index 65fd4b3da3..526071832e 100644 --- a/assignment-client/src/audio/AudioMixerClientData.h +++ b/assignment-client/src/audio/AudioMixerClientData.h @@ -46,6 +46,8 @@ private: quint16 _outgoingMixedAudioSequenceNumber; SequenceNumberStats _incomingAvatarAudioSequenceNumberStats; QHash _incomingInjectedAudioSequenceNumberStatsMap; + + AudioStreamStats _downstreamAudioStreamStats; }; #endif // hifi_AudioMixerClientData_h diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index d7464f57a1..8edc788833 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -172,7 +172,8 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) : _runningScriptsWidget(NULL), _runningScriptsWidgetWasVisible(false), _trayIcon(new QSystemTrayIcon(_window)), - _lastNackTime(usecTimestampNow()) + _lastNackTime(usecTimestampNow()), + _lastSendDownstreamAudioStats(usecTimestampNow()) { // read the ApplicationInfo.ini file for Name/Version/Domain information QSettings applicationInfo(Application::resourcesPath() + "info/ApplicationInfo.ini", QSettings::IniFormat); @@ -2125,10 +2126,11 @@ void Application::updateMyAvatar(float deltaTime) { loadViewFrustum(_myCamera, _viewFrustum); } + quint64 now = usecTimestampNow(); + // Update my voxel servers with my current voxel query... { PerformanceTimer perfTimer("queryOctree"); - quint64 now = usecTimestampNow(); quint64 sinceLastQuery = now - _lastQueriedTime; const quint64 TOO_LONG_SINCE_LAST_QUERY = 3 * USECS_PER_SECOND; bool queryIsDue = sinceLastQuery > TOO_LONG_SINCE_LAST_QUERY; @@ -2146,7 +2148,6 @@ void Application::updateMyAvatar(float deltaTime) { // sent nack packets containing missing sequence numbers of received packets from nodes { - quint64 now = usecTimestampNow(); quint64 sinceLastNack = now - _lastNackTime; const quint64 TOO_LONG_SINCE_LAST_NACK = 1 * USECS_PER_SECOND; if (sinceLastNack > TOO_LONG_SINCE_LAST_NACK) { @@ -2154,6 +2155,16 @@ void Application::updateMyAvatar(float deltaTime) { sendNackPackets(); } } + + { + quint64 sinceLastNack = now - _lastSendDownstreamAudioStats; + const quint64 TOO_LONG_SINCE_LAST_SEND_DOWNSTREAM_AUDIO_STATS = 1 * USECS_PER_SECOND; + if (sinceLastNack > TOO_LONG_SINCE_LAST_SEND_DOWNSTREAM_AUDIO_STATS) { + _lastSendDownstreamAudioStats = now; + + QMetaObject::invokeMethod(&_audio, "sendDownstreamAudioStatsPacket", Qt::QueuedConnection); + } + } } int Application::sendNackPackets() { diff --git a/interface/src/Application.h b/interface/src/Application.h index 321a43d548..b55a830e3e 100644 --- a/interface/src/Application.h +++ b/interface/src/Application.h @@ -586,6 +586,7 @@ private: QSystemTrayIcon* _trayIcon; quint64 _lastNackTime; + quint64 _lastSendDownstreamAudioStats; }; #endif // hifi_Application_h diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 17e9054568..7a445bf816 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -782,6 +782,60 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) { _incomingStreamPacketStatsHistory.insert(_incomingMixedAudioSequenceNumberStats.getStats()); } +AudioStreamStats Audio::getDownstreamAudioStreamStats() const { + + AudioStreamStats stats; + stats._streamType = PositionalAudioRingBuffer::Microphone; + + stats._timeGapMin = _interframeTimeGapStats.getMin(); + stats._timeGapMax = _interframeTimeGapStats.getMax(); + stats._timeGapAverage = _interframeTimeGapStats.getAverage(); + stats._timeGapWindowMin = _interframeTimeGapStats.getWindowMin(); + stats._timeGapWindowMax = _interframeTimeGapStats.getWindowMax(); + stats._timeGapWindowAverage = _interframeTimeGapStats.getWindowAverage(); + + stats._ringBufferFramesAvailable = _ringBuffer.framesAvailable(); + stats._ringBufferCurrentJitterBufferFrames = 0; + stats._ringBufferDesiredJitterBufferFrames = getDesiredJitterBufferFrames(); + stats._ringBufferStarveCount = _starveCount; + stats._ringBufferConsecutiveNotMixedCount = _consecutiveNotMixedCount; + stats._ringBufferOverflowCount = _ringBuffer.getOverflowCount(); + stats._ringBufferSilentFramesDropped = 0; + + stats._packetStreamStats = _incomingMixedAudioSequenceNumberStats.getStats(); + + return stats; +} + +void Audio::sendDownstreamAudioStatsPacket() { + + char packet[MAX_PACKET_SIZE]; + + // pack header + int numBytesPacketHeader = populatePacketHeader(packet, PacketTypeAudioStreamStats); + char* dataAt = packet + numBytesPacketHeader; + + // pack append flag + quint8 appendFlag = 0; + memcpy(dataAt, &appendFlag, sizeof(quint8)); + dataAt += sizeof(quint8); + + // pack number of stats packed + quint16 numStreamStatsToPack = 1; + memcpy(dataAt, &numStreamStatsToPack, sizeof(quint16)); + dataAt += sizeof(quint16); + + // pack downstream audio stream stats + AudioStreamStats stats = getDownstreamAudioStreamStats(); + memcpy(dataAt, &stats, sizeof(AudioStreamStats)); + dataAt += sizeof(AudioStreamStats); + + // send packet + NodeList* nodeList = NodeList::getInstance(); + SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer); + nodeList->writeDatagram(packet, dataAt - packet, audioMixer); +} + // NOTE: numSamples is the total number of single channel samples, since callers will always call this with stereo // data we know that we will have 2x samples for each stereo time sample at the format's sample rate void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples) { diff --git a/interface/src/Audio.h b/interface/src/Audio.h index e6e06838d3..e8e92db1a0 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -99,6 +99,9 @@ public slots: virtual void handleAudioByteArray(const QByteArray& audioByteArray); + AudioStreamStats getDownstreamAudioStreamStats() const; + void sendDownstreamAudioStatsPacket(); + bool switchInputToAudioDevice(const QString& inputDeviceName); bool switchOutputToAudioDevice(const QString& outputDeviceName); QString getDeviceName(QAudio::Mode mode) const { return (mode == QAudio::AudioInput) ? diff --git a/interface/src/ui/Stats.cpp b/interface/src/ui/Stats.cpp index 0cd9e3fa05..add0754fe1 100644 --- a/interface/src/ui/Stats.cpp +++ b/interface/src/ui/Stats.cpp @@ -349,24 +349,26 @@ void Stats::display( char downstreamAudioStatsString[30]; + AudioStreamStats downstreamAudioStreamStats = audio->getDownstreamAudioStreamStats(); + audio->calculatePacketLossRate(audio->getIncomingStreamPacketStatsHistory(), packetLossRate, packetLossRate30s); - sprintf(downstreamAudioStatsString, " mix: %.1f%%/%.1f%%, %d/?/%d", packetLossRate*100.0f, packetLossRate30s*100.0f, - audio->getDownstreamRingBuffer().framesAvailable(), audio->getDesiredJitterBufferFrames()); + sprintf(downstreamAudioStatsString, " mix: %.1f%%/%.1f%%, %u/?/%u", packetLossRate*100.0f, packetLossRate30s*100.0f, + downstreamAudioStreamStats._ringBufferFramesAvailable, downstreamAudioStreamStats._ringBufferDesiredJitterBufferFrames); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color); - const MovingMinMaxAvg& timeGapStats = audio->getInterframeTimeGapStats(); - - sprintf(downstreamAudioStatsString, " %llu/%llu/%.2f, %d/%d", timeGapStats.getMin(), timeGapStats.getMax(), - timeGapStats.getAverage(), audio->getStarveCount(), audio->getDownstreamRingBuffer().getOverflowCount()); + sprintf(downstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", downstreamAudioStreamStats._timeGapMin, + downstreamAudioStreamStats._timeGapMax, downstreamAudioStreamStats._timeGapAverage, + downstreamAudioStreamStats._ringBufferStarveCount, downstreamAudioStreamStats._ringBufferOverflowCount); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color); - sprintf(downstreamAudioStatsString, " %llu/%llu/%.2f, %d/?", timeGapStats.getWindowMin(), timeGapStats.getWindowMax(), - timeGapStats.getWindowAverage(), audio->getConsecutiveNotMixedCount()); + sprintf(downstreamAudioStatsString, " %llu/%llu/%.2f, %u/?", downstreamAudioStreamStats._timeGapWindowMin, + downstreamAudioStreamStats._timeGapWindowMax, downstreamAudioStreamStats._timeGapWindowAverage, + downstreamAudioStreamStats._ringBufferConsecutiveNotMixedCount); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);