added stats for audiomixer jitter buffers lengths in Application

This commit is contained in:
wangyix 2014-06-23 17:48:57 -07:00
parent fbdca59d37
commit a4f5ce2215
10 changed files with 96 additions and 10 deletions

View file

@ -472,7 +472,7 @@ void AudioMixer::run() {
QElapsedTimer timer;
timer.start();
char* clientMixBuffer = new char[NETWORK_BUFFER_LENGTH_BYTES_STEREO
char* clientMixBuffer = new char[NETWORK_BUFFER_LENGTH_BYTES_STEREO + sizeof(AudioMixerJitterBuffersStats)
+ numBytesForPacketHeaderGivenPacketType(PacketTypeMixedAudio)];
int usecToSleep = BUFFER_SEND_INTERVAL_USECS;
@ -546,10 +546,19 @@ void AudioMixer::run() {
&& ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer()) {
prepareMixForListeningNode(node.data());
// pack header
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeMixedAudio);
char* dataAt = clientMixBuffer + numBytesPacketHeader;
memcpy(clientMixBuffer + numBytesPacketHeader, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
nodeList->writeDatagram(clientMixBuffer, NETWORK_BUFFER_LENGTH_BYTES_STEREO + numBytesPacketHeader, node);
// calculate and pack the jitter buffer size stats for this node
AudioMixerJitterBuffersStats jitterBuffersStats;
((AudioMixerClientData*)node->getLinkedData())->calculateJitterBuffersStats(jitterBuffersStats);
memcpy(dataAt, &jitterBuffersStats, sizeof(AudioMixerJitterBuffersStats));
dataAt += sizeof(AudioMixerJitterBuffersStats);
// pack mixed audio
memcpy(dataAt, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
nodeList->writeDatagram(clientMixBuffer, dataAt - clientMixBuffer, node);
++_sumListeners;
}

View file

@ -15,6 +15,7 @@
#include <UUID.h>
#include "InjectedAudioRingBuffer.h"
#include "SharedUtil.h"
#include "AudioMixerClientData.h"
@ -138,3 +139,27 @@ void AudioMixerClientData::pushBuffersAfterFrameSend() {
i++;
}
}
void AudioMixerClientData::calculateJitterBuffersStats(AudioMixerJitterBuffersStats& stats) const {
int avatarJitterBufferFrames = 0;
int maxJitterBufferFrames = 0;
int sumJitterBufferFrames = 0;
for (int i = 0; i < _ringBuffers.size(); i++) {
int bufferJitterFrames = _ringBuffers[i]->getCurrentJitterBufferFrames();
if (_ringBuffers[i]->getType() == PositionalAudioRingBuffer::Microphone) {
avatarJitterBufferFrames = bufferJitterFrames;
}
if (bufferJitterFrames > maxJitterBufferFrames) {
maxJitterBufferFrames = bufferJitterFrames;
}
sumJitterBufferFrames += bufferJitterFrames;
}
stats.avatarJitterBufferFrames = avatarJitterBufferFrames;
stats.maxJitterBufferFrames = maxJitterBufferFrames;
stats.avgJitterBufferFrames = (float)sumJitterBufferFrames / (float)_ringBuffers.size();
}

View file

@ -14,10 +14,21 @@
#include <AABox.h>
#include <NodeData.h>
#include <PositionalAudioRingBuffer.h>
#include "PositionalAudioRingBuffer.h"
#include "AvatarAudioRingBuffer.h"
class AudioMixerJitterBuffersStats {
public:
AudioMixerJitterBuffersStats()
: avatarJitterBufferFrames(0), maxJitterBufferFrames(0), avgJitterBufferFrames(0)
{}
int avatarJitterBufferFrames;
int maxJitterBufferFrames;
float avgJitterBufferFrames;
};
class AudioMixerClientData : public NodeData {
public:
AudioMixerClientData();
@ -29,6 +40,8 @@ public:
int parseData(const QByteArray& packet);
void checkBuffersBeforeFrameSend(AABox* checkSourceZone = NULL, AABox* listenerZone = NULL);
void pushBuffersAfterFrameSend();
void calculateJitterBuffersStats(AudioMixerJitterBuffersStats& stats) const;
private:
QList<PositionalAudioRingBuffer*> _ringBuffers;
};

View file

@ -102,7 +102,8 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
_samplesPerScope(NETWORK_SAMPLES_PER_FRAME * _framesPerScope),
_scopeInput(0),
_scopeOutputLeft(0),
_scopeOutputRight(0)
_scopeOutputRight(0),
_audioMixerJitterBufferStats()
{
// clear the array of locally injected samples
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
@ -800,7 +801,16 @@ void Audio::toggleStereoInput() {
}
}
void Audio::parseAudioMixerJitterBuffersStats(const QByteArray& audioByteArray) {
int numBytesPacketHeader = numBytesForPacketHeader(audioByteArray);
const char* dataAt = reinterpret_cast<const char*>(audioByteArray.data() + numBytesPacketHeader);
memcpy(&_audioMixerJitterBufferStats, dataAt, sizeof(AudioMixerJitterBuffersStats));
}
void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
parseAudioMixerJitterBuffersStats(audioByteArray);
_ringBuffer.parseData(audioByteArray);
float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float) _outputFormat.sampleRate())

View file

@ -16,6 +16,7 @@
#include <vector>
#include "InterfaceConfig.h"
#include "../../assignment-client/src/audio/AudioMixerClientData.h"
#include <QAudio>
#include <QAudioInput>
@ -102,6 +103,8 @@ public slots:
float getInputVolume() const { return (_audioInput) ? _audioInput->volume() : 0.0f; }
void setInputVolume(float volume) { if (_audioInput) _audioInput->setVolume(volume); }
const AudioMixerJitterBuffersStats& getAudioMixerJitterBuffersStats() const { return _audioMixerJitterBufferStats; }
signals:
bool muteToggled();
void preProcessOriginalInboundAudio(unsigned int sampleTime, QByteArray& samples, const QAudioFormat& format);
@ -216,6 +219,8 @@ private:
void renderGrid(const float* color, int x, int y, int width, int height, int rows, int cols);
void renderLineStrip(const float* color, int x, int y, int n, int offset, const QByteArray* byteArray);
void parseAudioMixerJitterBuffersStats(const QByteArray& audioByteArray);
// Audio scope data
static const unsigned int NETWORK_SAMPLES_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
static const unsigned int DEFAULT_FRAMES_PER_SCOPE = 5;
@ -233,6 +238,7 @@ private:
QByteArray* _scopeOutputLeft;
QByteArray* _scopeOutputRight;
AudioMixerJitterBuffersStats _audioMixerJitterBufferStats;
};

View file

@ -286,7 +286,7 @@ void Stats::display(
pingVoxel = totalPingVoxel/voxelServerCount;
}
lines = _expanded ? 4 : 3;
lines = _expanded ? 6 : 5;
drawBackground(backgroundColor, horizontalOffset, 0, _pingStatsWidth, lines * STATS_PELS_PER_LINE + 10);
horizontalOffset += 5;
@ -302,7 +302,6 @@ void Stats::display(
char audioPing[30];
sprintf(audioPing, "Audio ping: %d", pingAudio);
char avatarPing[30];
sprintf(avatarPing, "Avatar ping: %d", pingAvatar);
@ -324,10 +323,27 @@ void Stats::display(
drawText(horizontalOffset, verticalOffset, scale, rotation, font, voxelMaxPing, color);
}
static const float MSECS_PER_FRAME = (float)NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * (float)MSECS_PER_SECOND / (float)SAMPLE_RATE;
const AudioMixerJitterBuffersStats& audioMixerJitterBufferStats =
Application::getInstance()->getAudio()->getAudioMixerJitterBuffersStats();
char* audioMixerJitterBuffersStatsLabel = "AudioMixer j-buffers msecs:";
char audioMixerJitterBuffersStats[30];
sprintf(audioMixerJitterBuffersStats, "mic/max/avg: %.1f / %.1f / %.1f", audioMixerJitterBufferStats.avatarJitterBufferFrames * MSECS_PER_FRAME,
audioMixerJitterBufferStats.maxJitterBufferFrames * MSECS_PER_FRAME, audioMixerJitterBufferStats.avgJitterBufferFrames * MSECS_PER_FRAME);
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, audioMixerJitterBuffersStatsLabel, color);
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, audioMixerJitterBuffersStats, color);
verticalOffset = 0;
horizontalOffset = _lastHorizontalOffset + _generalStatsWidth + _pingStatsWidth + 2;
}
MyAvatar* myAvatar = Application::getInstance()->getAvatar();
glm::vec3 avatarPos = myAvatar->getPosition();

View file

@ -16,6 +16,7 @@
#include <QtCore/QDebug>
#include "PacketHeaders.h"
#include "../../../assignment-client/src/audio/AudioMixerClientData.h"
#include "AudioRingBuffer.h"
@ -63,8 +64,11 @@ void AudioRingBuffer::resizeForFrameSize(qint64 numFrameSamples) {
}
int AudioRingBuffer::parseData(const QByteArray& packet) {
int numBytesPacketHeader = numBytesForPacketHeader(packet);
return writeData(packet.data() + numBytesPacketHeader, packet.size() - numBytesPacketHeader);
int numBytesBeforeAudioData = numBytesForPacketHeader(packet);
if (packetTypeForPacket(packet) == PacketTypeMixedAudio) {
numBytesBeforeAudioData += sizeof(AudioMixerJitterBuffersStats);
}
return writeData(packet.data() + numBytesBeforeAudioData, packet.size() - numBytesBeforeAudioData);
}
qint64 AudioRingBuffer::readSamples(int16_t* destination, qint64 maxSamples) {

View file

@ -232,7 +232,7 @@ bool PositionalAudioRingBuffer::shouldBeAddedToMix() {
void PositionalAudioRingBuffer::updateDesiredJitterBufferFrames() {
const float USECS_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * USECS_PER_SECOND / (float)SAMPLE_RATE;
static const float USECS_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * USECS_PER_SECOND / (float)SAMPLE_RATE;
if (_interframeTimeGapStats.hasNewWindowMaxGapAvailable()) {
_desiredJitterBufferFrames = ceilf((float)_interframeTimeGapStats.getWindowMaxGap() / USECS_PER_FRAME);

View file

@ -76,6 +76,7 @@ public:
void setListenerUnattenuatedZone(AABox* listenerUnattenuatedZone) { _listenerUnattenuatedZone = listenerUnattenuatedZone; }
int getSamplesPerFrame() const { return _isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; }
int getCurrentJitterBufferFrames() const { return _currentJitterBufferFrames; }
protected:
// disallow copying of PositionalAudioRingBuffer objects

View file

@ -51,6 +51,8 @@ PacketVersion versionForPacketType(PacketType type) {
case PacketTypeMicrophoneAudioWithEcho:
case PacketTypeSilentAudioFrame:
return 1;
case PacketTypeMixedAudio:
return 1;
case PacketTypeAvatarData:
return 3;
case PacketTypeAvatarIdentity: