Audio now periodically sends downstream audio stats to audiomixer

This commit is contained in:
wangyix 2014-07-10 14:14:29 -07:00
parent 35419eb939
commit b4c9e51011
8 changed files with 96 additions and 13 deletions

View file

@ -405,7 +405,8 @@ void AudioMixer::readPendingDatagrams() {
if (mixerPacketType == PacketTypeMicrophoneAudioNoEcho if (mixerPacketType == PacketTypeMicrophoneAudioNoEcho
|| mixerPacketType == PacketTypeMicrophoneAudioWithEcho || mixerPacketType == PacketTypeMicrophoneAudioWithEcho
|| mixerPacketType == PacketTypeInjectAudio || mixerPacketType == PacketTypeInjectAudio
|| mixerPacketType == PacketTypeSilentAudioFrame) { || mixerPacketType == PacketTypeSilentAudioFrame
|| mixerPacketType == PacketTypeAudioStreamStats) {
nodeList->findNodeAndUpdateWithDataFromPacket(receivedPacket); nodeList->findNodeAndUpdateWithDataFromPacket(receivedPacket);
} else if (mixerPacketType == PacketTypeMuteEnvironment) { } else if (mixerPacketType == PacketTypeMuteEnvironment) {

View file

@ -83,7 +83,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
// ask the AvatarAudioRingBuffer instance to parse the data // ask the AvatarAudioRingBuffer instance to parse the data
avatarRingBuffer->parseData(packet); avatarRingBuffer->parseData(packet);
} else { } else if (packetType == PacketTypeInjectAudio) {
// this is injected audio // this is injected audio
// grab the stream identifier for this injected audio // grab the stream identifier for this injected audio
@ -107,6 +107,15 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
} }
matchingInjectedRingBuffer->parseData(packet); matchingInjectedRingBuffer->parseData(packet);
} else if (packetType == PacketTypeAudioStreamStats) {
const char* dataAt = packet.data();
// skip over header, appendFlag, and num stats packed
dataAt += (numBytesPacketHeader + sizeof(quint8) + sizeof(quint16));
// read the downstream audio stream stats
memcpy(&_downstreamAudioStreamStats, dataAt, sizeof(AudioStreamStats));
} }
return 0; return 0;

View file

@ -46,6 +46,8 @@ private:
quint16 _outgoingMixedAudioSequenceNumber; quint16 _outgoingMixedAudioSequenceNumber;
SequenceNumberStats _incomingAvatarAudioSequenceNumberStats; SequenceNumberStats _incomingAvatarAudioSequenceNumberStats;
QHash<QUuid, SequenceNumberStats> _incomingInjectedAudioSequenceNumberStatsMap; QHash<QUuid, SequenceNumberStats> _incomingInjectedAudioSequenceNumberStatsMap;
AudioStreamStats _downstreamAudioStreamStats;
}; };
#endif // hifi_AudioMixerClientData_h #endif // hifi_AudioMixerClientData_h

View file

@ -172,7 +172,8 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
_runningScriptsWidget(NULL), _runningScriptsWidget(NULL),
_runningScriptsWidgetWasVisible(false), _runningScriptsWidgetWasVisible(false),
_trayIcon(new QSystemTrayIcon(_window)), _trayIcon(new QSystemTrayIcon(_window)),
_lastNackTime(usecTimestampNow()) _lastNackTime(usecTimestampNow()),
_lastSendDownstreamAudioStats(usecTimestampNow())
{ {
// read the ApplicationInfo.ini file for Name/Version/Domain information // read the ApplicationInfo.ini file for Name/Version/Domain information
QSettings applicationInfo(Application::resourcesPath() + "info/ApplicationInfo.ini", QSettings::IniFormat); QSettings applicationInfo(Application::resourcesPath() + "info/ApplicationInfo.ini", QSettings::IniFormat);
@ -2125,10 +2126,11 @@ void Application::updateMyAvatar(float deltaTime) {
loadViewFrustum(_myCamera, _viewFrustum); loadViewFrustum(_myCamera, _viewFrustum);
} }
quint64 now = usecTimestampNow();
// Update my voxel servers with my current voxel query... // Update my voxel servers with my current voxel query...
{ {
PerformanceTimer perfTimer("queryOctree"); PerformanceTimer perfTimer("queryOctree");
quint64 now = usecTimestampNow();
quint64 sinceLastQuery = now - _lastQueriedTime; quint64 sinceLastQuery = now - _lastQueriedTime;
const quint64 TOO_LONG_SINCE_LAST_QUERY = 3 * USECS_PER_SECOND; const quint64 TOO_LONG_SINCE_LAST_QUERY = 3 * USECS_PER_SECOND;
bool queryIsDue = sinceLastQuery > TOO_LONG_SINCE_LAST_QUERY; bool queryIsDue = sinceLastQuery > TOO_LONG_SINCE_LAST_QUERY;
@ -2146,7 +2148,6 @@ void Application::updateMyAvatar(float deltaTime) {
// sent nack packets containing missing sequence numbers of received packets from nodes // sent nack packets containing missing sequence numbers of received packets from nodes
{ {
quint64 now = usecTimestampNow();
quint64 sinceLastNack = now - _lastNackTime; quint64 sinceLastNack = now - _lastNackTime;
const quint64 TOO_LONG_SINCE_LAST_NACK = 1 * USECS_PER_SECOND; const quint64 TOO_LONG_SINCE_LAST_NACK = 1 * USECS_PER_SECOND;
if (sinceLastNack > TOO_LONG_SINCE_LAST_NACK) { if (sinceLastNack > TOO_LONG_SINCE_LAST_NACK) {
@ -2154,6 +2155,16 @@ void Application::updateMyAvatar(float deltaTime) {
sendNackPackets(); sendNackPackets();
} }
} }
{
quint64 sinceLastNack = now - _lastSendDownstreamAudioStats;
const quint64 TOO_LONG_SINCE_LAST_SEND_DOWNSTREAM_AUDIO_STATS = 1 * USECS_PER_SECOND;
if (sinceLastNack > TOO_LONG_SINCE_LAST_SEND_DOWNSTREAM_AUDIO_STATS) {
_lastSendDownstreamAudioStats = now;
QMetaObject::invokeMethod(&_audio, "sendDownstreamAudioStatsPacket", Qt::QueuedConnection);
}
}
} }
int Application::sendNackPackets() { int Application::sendNackPackets() {

View file

@ -586,6 +586,7 @@ private:
QSystemTrayIcon* _trayIcon; QSystemTrayIcon* _trayIcon;
quint64 _lastNackTime; quint64 _lastNackTime;
quint64 _lastSendDownstreamAudioStats;
}; };
#endif // hifi_Application_h #endif // hifi_Application_h

View file

@ -782,6 +782,60 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
_incomingStreamPacketStatsHistory.insert(_incomingMixedAudioSequenceNumberStats.getStats()); _incomingStreamPacketStatsHistory.insert(_incomingMixedAudioSequenceNumberStats.getStats());
} }
AudioStreamStats Audio::getDownstreamAudioStreamStats() const {
AudioStreamStats stats;
stats._streamType = PositionalAudioRingBuffer::Microphone;
stats._timeGapMin = _interframeTimeGapStats.getMin();
stats._timeGapMax = _interframeTimeGapStats.getMax();
stats._timeGapAverage = _interframeTimeGapStats.getAverage();
stats._timeGapWindowMin = _interframeTimeGapStats.getWindowMin();
stats._timeGapWindowMax = _interframeTimeGapStats.getWindowMax();
stats._timeGapWindowAverage = _interframeTimeGapStats.getWindowAverage();
stats._ringBufferFramesAvailable = _ringBuffer.framesAvailable();
stats._ringBufferCurrentJitterBufferFrames = 0;
stats._ringBufferDesiredJitterBufferFrames = getDesiredJitterBufferFrames();
stats._ringBufferStarveCount = _starveCount;
stats._ringBufferConsecutiveNotMixedCount = _consecutiveNotMixedCount;
stats._ringBufferOverflowCount = _ringBuffer.getOverflowCount();
stats._ringBufferSilentFramesDropped = 0;
stats._packetStreamStats = _incomingMixedAudioSequenceNumberStats.getStats();
return stats;
}
void Audio::sendDownstreamAudioStatsPacket() {
char packet[MAX_PACKET_SIZE];
// pack header
int numBytesPacketHeader = populatePacketHeader(packet, PacketTypeAudioStreamStats);
char* dataAt = packet + numBytesPacketHeader;
// pack append flag
quint8 appendFlag = 0;
memcpy(dataAt, &appendFlag, sizeof(quint8));
dataAt += sizeof(quint8);
// pack number of stats packed
quint16 numStreamStatsToPack = 1;
memcpy(dataAt, &numStreamStatsToPack, sizeof(quint16));
dataAt += sizeof(quint16);
// pack downstream audio stream stats
AudioStreamStats stats = getDownstreamAudioStreamStats();
memcpy(dataAt, &stats, sizeof(AudioStreamStats));
dataAt += sizeof(AudioStreamStats);
// send packet
NodeList* nodeList = NodeList::getInstance();
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
nodeList->writeDatagram(packet, dataAt - packet, audioMixer);
}
// NOTE: numSamples is the total number of single channel samples, since callers will always call this with stereo // NOTE: numSamples is the total number of single channel samples, since callers will always call this with stereo
// data we know that we will have 2x samples for each stereo time sample at the format's sample rate // data we know that we will have 2x samples for each stereo time sample at the format's sample rate
void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples) { void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples) {

View file

@ -99,6 +99,9 @@ public slots:
virtual void handleAudioByteArray(const QByteArray& audioByteArray); virtual void handleAudioByteArray(const QByteArray& audioByteArray);
AudioStreamStats getDownstreamAudioStreamStats() const;
void sendDownstreamAudioStatsPacket();
bool switchInputToAudioDevice(const QString& inputDeviceName); bool switchInputToAudioDevice(const QString& inputDeviceName);
bool switchOutputToAudioDevice(const QString& outputDeviceName); bool switchOutputToAudioDevice(const QString& outputDeviceName);
QString getDeviceName(QAudio::Mode mode) const { return (mode == QAudio::AudioInput) ? QString getDeviceName(QAudio::Mode mode) const { return (mode == QAudio::AudioInput) ?

View file

@ -349,24 +349,26 @@ void Stats::display(
char downstreamAudioStatsString[30]; char downstreamAudioStatsString[30];
AudioStreamStats downstreamAudioStreamStats = audio->getDownstreamAudioStreamStats();
audio->calculatePacketLossRate(audio->getIncomingStreamPacketStatsHistory(), packetLossRate, packetLossRate30s); audio->calculatePacketLossRate(audio->getIncomingStreamPacketStatsHistory(), packetLossRate, packetLossRate30s);
sprintf(downstreamAudioStatsString, " mix: %.1f%%/%.1f%%, %d/?/%d", packetLossRate*100.0f, packetLossRate30s*100.0f, sprintf(downstreamAudioStatsString, " mix: %.1f%%/%.1f%%, %u/?/%u", packetLossRate*100.0f, packetLossRate30s*100.0f,
audio->getDownstreamRingBuffer().framesAvailable(), audio->getDesiredJitterBufferFrames()); downstreamAudioStreamStats._ringBufferFramesAvailable, downstreamAudioStreamStats._ringBufferDesiredJitterBufferFrames);
verticalOffset += STATS_PELS_PER_LINE; verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color); drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
const MovingMinMaxAvg<quint64>& timeGapStats = audio->getInterframeTimeGapStats(); sprintf(downstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", downstreamAudioStreamStats._timeGapMin,
downstreamAudioStreamStats._timeGapMax, downstreamAudioStreamStats._timeGapAverage,
sprintf(downstreamAudioStatsString, " %llu/%llu/%.2f, %d/%d", timeGapStats.getMin(), timeGapStats.getMax(), downstreamAudioStreamStats._ringBufferStarveCount, downstreamAudioStreamStats._ringBufferOverflowCount);
timeGapStats.getAverage(), audio->getStarveCount(), audio->getDownstreamRingBuffer().getOverflowCount());
verticalOffset += STATS_PELS_PER_LINE; verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color); drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
sprintf(downstreamAudioStatsString, " %llu/%llu/%.2f, %d/?", timeGapStats.getWindowMin(), timeGapStats.getWindowMax(), sprintf(downstreamAudioStatsString, " %llu/%llu/%.2f, %u/?", downstreamAudioStreamStats._timeGapWindowMin,
timeGapStats.getWindowAverage(), audio->getConsecutiveNotMixedCount()); downstreamAudioStreamStats._timeGapWindowMax, downstreamAudioStreamStats._timeGapWindowAverage,
downstreamAudioStreamStats._ringBufferConsecutiveNotMixedCount);
verticalOffset += STATS_PELS_PER_LINE; verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color); drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);