move audio stats into member of Audio

This commit is contained in:
Stephen Birarda 2014-12-16 14:47:24 -08:00
parent 60447e48cb
commit 1db12453d4
13 changed files with 527 additions and 396 deletions

View file

@ -354,7 +354,8 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData* l
for (int s = 0; s < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; s++) {
_preMixSamples[s] = glm::clamp(_preMixSamples[s] + (int)(streamPopOutput[s / stereoDivider] * attenuationAndFade),
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
AudioConstants::MIN_SAMPLE_VALUE,
AudioConstants::MAX_SAMPLE_VALUE);
}
}
@ -423,7 +424,8 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData* l
// Actually mix the _preMixSamples into the _mixSamples here.
for (int s = 0; s < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; s++) {
_mixSamples[s] = glm::clamp(_mixSamples[s] + _preMixSamples[s], MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
_mixSamples[s] = glm::clamp(_mixSamples[s] + _preMixSamples[s], AudioConstants::MIN_SAMPLE_VALUE,
AudioConstants::MAX_SAMPLE_VALUE);
}
return 1;
@ -700,7 +702,7 @@ void AudioMixer::run() {
char clientMixBuffer[MAX_PACKET_SIZE];
int usecToSleep = BUFFER_SEND_INTERVAL_USECS;
int usecToSleep = AudioConstants::NETWORK_FRAME_USECS;
const int TRAILING_AVERAGE_FRAMES = 100;
int framesSinceCutoffEvent = TRAILING_AVERAGE_FRAMES;
@ -719,7 +721,7 @@ void AudioMixer::run() {
}
_trailingSleepRatio = (PREVIOUS_FRAMES_RATIO * _trailingSleepRatio)
+ (usecToSleep * CURRENT_FRAME_RATIO / (float) BUFFER_SEND_INTERVAL_USECS);
+ (usecToSleep * CURRENT_FRAME_RATIO / (float) AudioConstants::NETWORK_FRAME_USECS);
float lastCutoffRatio = _performanceThrottlingRatio;
bool hasRatioChanged = false;
@ -841,7 +843,7 @@ void AudioMixer::run() {
break;
}
usecToSleep = (++nextFrame * BUFFER_SEND_INTERVAL_USECS) - timer.nsecsElapsed() / 1000; // ns to us
usecToSleep = (++nextFrame * AudioConstants::NETWORK_FRAME_USECS) - timer.nsecsElapsed() / 1000; // ns to us
if (usecToSleep > 0) {
usleep(usecToSleep);

View file

@ -51,9 +51,6 @@
static const int NUMBER_OF_NOISE_SAMPLE_FRAMES = 300;
static const int FRAMES_AVAILABLE_STATS_WINDOW_SECONDS = 10;
static const int APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS = (int)(30.0f * 1000.0f / AudioConstants::NETWORK_FRAME_MSECS);
// Mute icon configration
static const int MUTE_ICON_SIZE = 24;
@ -105,15 +102,9 @@ Audio::Audio() :
_iconPulseTimeReference(usecTimestampNow()),
_noiseSourceEnabled(false),
_toneSourceEnabled(true),
_statsEnabled(false),
_statsShowInjectedStreams(false),
_outgoingAvatarAudioSequenceNumber(0),
_audioInputMsecsReadStats(MSECS_PER_SECOND / (float)AudioConstants::NETWORK_FRAME_MSECS * CALLBACK_ACCELERATOR_RATIO, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
_inputRingBufferMsecsAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
_audioOutputMsecsUnplayedStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
_lastSentAudioPacket(0),
_packetSentTimeGaps(1, APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS),
_audioOutputIODevice(_receivedAudioStream)
_audioOutputIODevice(_receivedAudioStream),
_stats(&_receivedAudioStream)
{
// clear the array of locally injected samples
memset(_localProceduralSamples, 0, AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL);
@ -134,29 +125,16 @@ void Audio::init(QGLWidget *parent) {
void Audio::reset() {
_receivedAudioStream.reset();
resetStats();
_stats.reset();
_noiseSource.reset();
_toneSource.reset();
_sourceGain.reset();
_inputGain.reset();
}
void Audio::resetStats() {
_receivedAudioStream.resetStats();
_audioMixerAvatarStreamAudioStats = AudioStreamStats();
_audioMixerInjectedStreamAudioStatsMap.clear();
_audioInputMsecsReadStats.reset();
_inputRingBufferMsecsAvailableStats.reset();
_audioOutputMsecsUnplayedStats.reset();
_packetSentTimeGaps.reset();
}
void Audio::audioMixerKilled() {
_outgoingAvatarAudioSequenceNumber = 0;
resetStats();
_stats.reset();
}
@ -573,12 +551,12 @@ void Audio::addReverb(ty_gverb* gverb, int16_t* samplesData, int numSamples, QAu
if (j == sample) {
// left channel
int lResult = glm::clamp((int)(samplesData[j] * dryFraction + lValue * wetFraction),
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
AudioConstants::MIN_SAMPLE_VALUE, AudioConstants::MAX_SAMPLE_VALUE);
samplesData[j] = (int16_t)lResult;
} else if (j == (sample + 1)) {
// right channel
int rResult = glm::clamp((int)(samplesData[j] * dryFraction + rValue * wetFraction),
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
AudioConstants::MIN_SAMPLE_VALUE, AudioConstants::MAX_SAMPLE_VALUE);
samplesData[j] = (int16_t)rResult;
} else {
// ignore channels above 2
@ -674,7 +652,7 @@ void Audio::handleAudioInput() {
_inputRingBuffer.writeData(inputByteArray.data(), inputByteArray.size());
float audioInputMsecsRead = inputByteArray.size() / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
_audioInputMsecsReadStats.update(audioInputMsecsRead);
_stats.updateInputMsecsRead(audioInputMsecsRead);
while (_inputRingBuffer.samplesAvailable() >= inputSamplesRequired) {
@ -744,7 +722,7 @@ void Audio::handleAudioInput() {
measuredDcOffset += networkAudioSamples[i];
networkAudioSamples[i] -= (int16_t) _dcOffset;
thisSample = fabsf(networkAudioSamples[i]);
if (thisSample >= ((float)MAX_16_BIT_AUDIO_SAMPLE * CLIPPING_THRESHOLD)) {
if (thisSample >= ((float)AudioConstants::MAX_SAMPLE_VALUE * CLIPPING_THRESHOLD)) {
_timeSinceLastClip = 0.0f;
}
loudness += thisSample;
@ -895,16 +873,7 @@ void Audio::handleAudioInput() {
currentPacketPtr += numNetworkBytes;
}
// first time this is 0
if (_lastSentAudioPacket == 0) {
_lastSentAudioPacket = usecTimestampNow();
} else {
quint64 now = usecTimestampNow();
quint64 gap = now - _lastSentAudioPacket;
_packetSentTimeGaps.update(gap);
_lastSentAudioPacket = now;
}
_stats.sentPacket();
int packetBytes = currentPacketPtr - audioDataPacket;
nodeList->writeDatagram(audioDataPacket, packetBytes, audioMixer);
@ -972,36 +941,6 @@ void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) {
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(audioByteArray.size());
}
void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
int numBytesPacketHeader = numBytesForPacketHeader(packet);
const char* dataAt = packet.constData() + numBytesPacketHeader;
// parse the appendFlag, clear injected audio stream stats if 0
quint8 appendFlag = *(reinterpret_cast<const quint16*>(dataAt));
dataAt += sizeof(quint8);
if (!appendFlag) {
_audioMixerInjectedStreamAudioStatsMap.clear();
}
// parse the number of stream stats structs to follow
quint16 numStreamStats = *(reinterpret_cast<const quint16*>(dataAt));
dataAt += sizeof(quint16);
// parse the stream stats
AudioStreamStats streamStats;
for (quint16 i = 0; i < numStreamStats; i++) {
memcpy(&streamStats, dataAt, sizeof(AudioStreamStats));
dataAt += sizeof(AudioStreamStats);
if (streamStats._streamType == PositionalAudioStream::Microphone) {
_audioMixerAvatarStreamAudioStats = streamStats;
} else {
_audioMixerInjectedStreamAudioStatsMap[streamStats._streamIdentifier] = streamStats;
}
}
}
void Audio::parseAudioEnvironmentData(const QByteArray &packet) {
int numBytesPacketHeader = numBytesForPacketHeader(packet);
const char* dataAt = packet.constData() + numBytesPacketHeader;
@ -1023,44 +962,6 @@ void Audio::parseAudioEnvironmentData(const QByteArray &packet) {
}
}
void Audio::sendDownstreamAudioStatsPacket() {
// since this function is called every second, we'll sample for some of our stats here
_inputRingBufferMsecsAvailableStats.update(getInputRingBufferMsecsAvailable());
_audioOutputMsecsUnplayedStats.update(getAudioOutputMsecsUnplayed());
// also, call _receivedAudioStream's per-second callback
_receivedAudioStream.perSecondCallbackForUpdatingStats();
char packet[MAX_PACKET_SIZE];
// pack header
int numBytesPacketHeader = populatePacketHeader(packet, PacketTypeAudioStreamStats);
char* dataAt = packet + numBytesPacketHeader;
// pack append flag
quint8 appendFlag = 0;
memcpy(dataAt, &appendFlag, sizeof(quint8));
dataAt += sizeof(quint8);
// pack number of stats packed
quint16 numStreamStatsToPack = 1;
memcpy(dataAt, &numStreamStatsToPack, sizeof(quint16));
dataAt += sizeof(quint16);
// pack downstream audio stream stats
AudioStreamStats stats = _receivedAudioStream.getAudioStreamStats();
memcpy(dataAt, &stats, sizeof(AudioStreamStats));
dataAt += sizeof(AudioStreamStats);
// send packet
NodeList* nodeList = NodeList::getInstance();
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
nodeList->writeDatagram(packet, dataAt - packet, audioMixer);
}
bool Audio::mousePressEvent(int x, int y) {
if (_iconBounds.contains(x, y)) {
toggleMute();
@ -1161,14 +1062,17 @@ void Audio::addProceduralSounds(int16_t* monoInput, int numSamples) {
_lastInputLoudness = 0;
monoInput[i] = glm::clamp(monoInput[i] + collisionSample, MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
monoInput[i] = glm::clamp(monoInput[i] + collisionSample,
AudioConstants::MIN_SAMPLE_VALUE,
AudioConstants::MAX_SAMPLE_VALUE);
_lastInputLoudness += fabsf(monoInput[i]);
_lastInputLoudness /= numSamples;
_lastInputLoudness /= MAX_SAMPLE_VALUE;
_lastInputLoudness /= AudioConstants::MAX_SAMPLE_VALUE;
_localProceduralSamples[i] = glm::clamp(_localProceduralSamples[i] + collisionSample,
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
AudioConstants::MIN_SAMPLE_VALUE,
AudioConstants::MAX_SAMPLE_VALUE);
_collisionSoundMagnitude *= _collisionSoundDuration;
}
@ -1192,14 +1096,17 @@ void Audio::addProceduralSounds(int16_t* monoInput, int numSamples) {
_lastInputLoudness = 0;
monoInput[i] = glm::clamp(monoInput[i] + collisionSample, MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
monoInput[i] = glm::clamp(monoInput[i] + collisionSample,
AudioConstants::MIN_SAMPLE_VALUE,
AudioConstants::MAX_SAMPLE_VALUE);
_lastInputLoudness += fabsf(monoInput[i]);
_lastInputLoudness /= numSamples;
_lastInputLoudness /= MAX_SAMPLE_VALUE;
_lastInputLoudness /= AudioConstants::MAX_SAMPLE_VALUE;
_localProceduralSamples[i] = glm::clamp(_localProceduralSamples[i] + collisionSample,
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
AudioConstants::MIN_SAMPLE_VALUE,
AudioConstants::MAX_SAMPLE_VALUE);
_drumSoundVolume *= (1.0f - _drumSoundDecay);
}
@ -1332,213 +1239,6 @@ void Audio::renderToolBox(int x, int y, bool boxed) {
glDisable(GL_TEXTURE_2D);
}
void Audio::toggleStats() {
_statsEnabled = !_statsEnabled;
}
void Audio::toggleStatsShowInjectedStreams() {
_statsShowInjectedStreams = !_statsShowInjectedStreams;
}
void Audio::renderStats(const float* color, int width, int height) {
if (!_statsEnabled) {
return;
}
const int linesWhenCentered = _statsShowInjectedStreams ? 34 : 27;
const int CENTERED_BACKGROUND_HEIGHT = STATS_HEIGHT_PER_LINE * linesWhenCentered;
int lines = _statsShowInjectedStreams ? _audioMixerInjectedStreamAudioStatsMap.size() * 7 + 27 : 27;
int statsHeight = STATS_HEIGHT_PER_LINE * lines;
static const float backgroundColor[4] = { 0.2f, 0.2f, 0.2f, 0.6f };
int x = std::max((width - (int)STATS_WIDTH) / 2, 0);
int y = std::max((height - CENTERED_BACKGROUND_HEIGHT) / 2, 0);
int backgroundHeight = statsHeight;
glColor4fv(backgroundColor);
glBegin(GL_QUADS);
glVertex2i(x, y);
glVertex2i(x + STATS_WIDTH, y);
glVertex2i(x + STATS_WIDTH, y + backgroundHeight);
glVertex2i(x , y + backgroundHeight);
glEnd();
glColor4f(1, 1, 1, 1);
int horizontalOffset = x + 5;
int verticalOffset = y;
float scale = 0.10f;
float rotation = 0.0f;
int font = 2;
char latencyStatString[512];
const float BUFFER_SEND_INTERVAL_MSECS = BUFFER_SEND_INTERVAL_USECS / (float)USECS_PER_MSEC;
float audioInputBufferLatency = 0.0f, inputRingBufferLatency = 0.0f, networkRoundtripLatency = 0.0f, mixerRingBufferLatency = 0.0f, outputRingBufferLatency = 0.0f, audioOutputBufferLatency = 0.0f;
AudioStreamStats downstreamAudioStreamStats = _receivedAudioStream.getAudioStreamStats();
SharedNodePointer audioMixerNodePointer = NodeList::getInstance()->soloNodeOfType(NodeType::AudioMixer);
if (!audioMixerNodePointer.isNull()) {
audioInputBufferLatency = _audioInputMsecsReadStats.getWindowAverage();
inputRingBufferLatency = getInputRingBufferAverageMsecsAvailable();
networkRoundtripLatency = audioMixerNodePointer->getPingMs();
mixerRingBufferLatency = _audioMixerAvatarStreamAudioStats._framesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS;
outputRingBufferLatency = downstreamAudioStreamStats._framesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS;
audioOutputBufferLatency = _audioOutputMsecsUnplayedStats.getWindowAverage();
}
float totalLatency = audioInputBufferLatency + inputRingBufferLatency + networkRoundtripLatency + mixerRingBufferLatency + outputRingBufferLatency + audioOutputBufferLatency;
sprintf(latencyStatString, " Audio input buffer: %7.2fms - avg msecs of samples read to the input ring buffer in last 10s", audioInputBufferLatency);
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
sprintf(latencyStatString, " Input ring buffer: %7.2fms - avg msecs of samples in input ring buffer in last 10s", inputRingBufferLatency);
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
sprintf(latencyStatString, " Network to mixer: %7.2fms - half of last ping value calculated by the node list", networkRoundtripLatency / 2.0f);
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
sprintf(latencyStatString, " AudioMixer ring buffer: %7.2fms - avg msecs of samples in audio mixer's ring buffer in last 10s", mixerRingBufferLatency);
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
sprintf(latencyStatString, " Network to client: %7.2fms - half of last ping value calculated by the node list", networkRoundtripLatency / 2.0f);
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
sprintf(latencyStatString, " Output ring buffer: %7.2fms - avg msecs of samples in output ring buffer in last 10s", outputRingBufferLatency);
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
sprintf(latencyStatString, " Audio output buffer: %7.2fms - avg msecs of samples in audio output buffer in last 10s", audioOutputBufferLatency);
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
sprintf(latencyStatString, " TOTAL: %7.2fms\n", totalLatency);
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
char clientUpstreamMicLabelString[] = "Upstream Mic Audio Packets Sent Gaps (by client):";
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, clientUpstreamMicLabelString, color);
char stringBuffer[512];
sprintf(stringBuffer, " Inter-packet timegaps (overall) | min: %9s, max: %9s, avg: %9s",
formatUsecTime(_packetSentTimeGaps.getMin()).toLatin1().data(),
formatUsecTime(_packetSentTimeGaps.getMax()).toLatin1().data(),
formatUsecTime(_packetSentTimeGaps.getAverage()).toLatin1().data());
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
sprintf(stringBuffer, " Inter-packet timegaps (last 30s) | min: %9s, max: %9s, avg: %9s",
formatUsecTime(_packetSentTimeGaps.getWindowMin()).toLatin1().data(),
formatUsecTime(_packetSentTimeGaps.getWindowMax()).toLatin1().data(),
formatUsecTime(_packetSentTimeGaps.getWindowAverage()).toLatin1().data());
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
char upstreamMicLabelString[] = "Upstream mic audio stats (received and reported by audio-mixer):";
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamMicLabelString, color);
renderAudioStreamStats(_audioMixerAvatarStreamAudioStats, horizontalOffset, verticalOffset, scale, rotation, font, color);
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
char downstreamLabelString[] = "Downstream mixed audio stats:";
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamLabelString, color);
renderAudioStreamStats(downstreamAudioStreamStats, horizontalOffset, verticalOffset, scale, rotation, font, color, true);
if (_statsShowInjectedStreams) {
foreach(const AudioStreamStats& injectedStreamAudioStats, _audioMixerInjectedStreamAudioStatsMap) {
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
char upstreamInjectedLabelString[512];
sprintf(upstreamInjectedLabelString, "Upstream injected audio stats: stream ID: %s",
injectedStreamAudioStats._streamIdentifier.toString().toLatin1().data());
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamInjectedLabelString, color);
renderAudioStreamStats(injectedStreamAudioStats, horizontalOffset, verticalOffset, scale, rotation, font, color);
}
}
}
void Audio::renderAudioStreamStats(const AudioStreamStats& streamStats, int horizontalOffset, int& verticalOffset,
float scale, float rotation, int font, const float* color, bool isDownstreamStats) {
char stringBuffer[512];
sprintf(stringBuffer, " Packet loss | overall: %5.2f%% (%d lost), last_30s: %5.2f%% (%d lost)",
streamStats._packetStreamStats.getLostRate() * 100.0f,
streamStats._packetStreamStats._lost,
streamStats._packetStreamWindowStats.getLostRate() * 100.0f,
streamStats._packetStreamWindowStats._lost);
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
if (isDownstreamStats) {
const float BUFFER_SEND_INTERVAL_MSECS = BUFFER_SEND_INTERVAL_USECS / (float)USECS_PER_MSEC;
sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u+%d, available: %u+%d",
streamStats._desiredJitterBufferFrames,
streamStats._framesAvailableAverage,
(int)(getAudioOutputAverageMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS),
streamStats._framesAvailable,
(int)(getAudioOutputMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS));
} else {
sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u, available: %u",
streamStats._desiredJitterBufferFrames,
streamStats._framesAvailableAverage,
streamStats._framesAvailable);
}
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
sprintf(stringBuffer, " Ringbuffer stats | starves: %u, prev_starve_lasted: %u, frames_dropped: %u, overflows: %u",
streamStats._starveCount,
streamStats._consecutiveNotMixedCount,
streamStats._framesDropped,
streamStats._overflowCount);
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
sprintf(stringBuffer, " Inter-packet timegaps (overall) | min: %9s, max: %9s, avg: %9s",
formatUsecTime(streamStats._timeGapMin).toLatin1().data(),
formatUsecTime(streamStats._timeGapMax).toLatin1().data(),
formatUsecTime(streamStats._timeGapAverage).toLatin1().data());
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
sprintf(stringBuffer, " Inter-packet timegaps (last 30s) | min: %9s, max: %9s, avg: %9s",
formatUsecTime(streamStats._timeGapWindowMin).toLatin1().data(),
formatUsecTime(streamStats._timeGapWindowMax).toLatin1().data(),
formatUsecTime(streamStats._timeGapWindowAverage).toLatin1().data());
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
}
void Audio::outputFormatChanged() {
int outputFormatChannelCountTimesSampleRate = _outputFormat.channelCount() * _outputFormat.sampleRate();
_outputFrameSize = AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL * outputFormatChannelCountTimesSampleRate / _desiredOutputFormat.sampleRate();
@ -1691,12 +1391,6 @@ float Audio::getAudioOutputMsecsUnplayed() const {
return msecsAudioOutputUnplayed;
}
float Audio::getInputRingBufferMsecsAvailable() const {
int bytesInInputRingBuffer = _inputRingBuffer.samplesAvailable() * sizeof(int16_t);
float msecsInInputRingBuffer = bytesInInputRingBuffer / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
return msecsInInputRingBuffer;
}
qint64 Audio::AudioOutputIODevice::readData(char * data, qint64 maxSize) {
int samplesRequested = maxSize / sizeof(int16_t);
int samplesPopped;

View file

@ -31,10 +31,10 @@
#include <StDev.h>
#include "InterfaceConfig.h"
#include "audio/AudioIOStats.h"
#include "AudioStreamStats.h"
#include "Recorder.h"
#include "RingBufferHistory.h"
#include "MovingMinMaxAvg.h"
#include "AudioRingBuffer.h"
#include "AudioFormat.h"
#include "AudioBuffer.h"
@ -60,9 +60,6 @@ extern "C" {
static const int NUM_AUDIO_CHANNELS = 2;
static const int MAX_16_BIT_AUDIO_SAMPLE = 32767;
class QAudioInput;
class QAudioOutput;
class QIODevice;
@ -108,36 +105,28 @@ public:
bool mousePressEvent(int x, int y);
void renderToolBox(int x, int y, bool boxed);
void renderStats(const float* color, int width, int height);
float getInputRingBufferMsecsAvailable() const;
float getInputRingBufferAverageMsecsAvailable() const { return (float)_inputRingBufferMsecsAvailableStats.getWindowAverage(); }
float getAudioOutputMsecsUnplayed() const;
float getAudioOutputAverageMsecsUnplayed() const { return (float)_audioOutputMsecsUnplayedStats.getWindowAverage(); }
void setRecorder(RecorderPointer recorder) { _recorder = recorder; }
static const float CALLBACK_ACCELERATOR_RATIO;
friend class DependencyManager;
public slots:
void start();
void stop();
void addReceivedAudioToStream(const QByteArray& audioByteArray);
void parseAudioStreamStatsPacket(const QByteArray& packet);
void parseAudioEnvironmentData(const QByteArray& packet);
void handleAudioInput();
void reset();
void resetStats();
void audioMixerKilled();
void toggleMute();
void toggleAudioNoiseReduction();
void toggleAudioSourceInject();
void selectAudioSourcePinkNoise();
void selectAudioSourceSine440();
void toggleStats();
void toggleStatsShowInjectedStreams();
void toggleStereoInput();
void processReceivedSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer);
@ -145,8 +134,6 @@ public slots:
virtual bool outputLocalInjector(bool isStereo, qreal volume, AudioInjector* injector);
void sendDownstreamAudioStatsPacket();
bool switchInputToAudioDevice(const QString& inputDeviceName);
bool switchOutputToAudioDevice(const QString& outputDeviceName);
QString getDeviceName(QAudio::Mode mode) const { return (mode == QAudio::AudioInput) ?
@ -159,9 +146,6 @@ public slots:
void setReverb(bool reverb) { _reverb = reverb; }
void setReverbOptions(const AudioEffectOptions* options);
const AudioStreamStats& getAudioMixerAvatarStreamAudioStats() const { return _audioMixerAvatarStreamAudioStats; }
const QHash<QUuid, AudioStreamStats>& getAudioMixerInjectedStreamAudioStatsMap() const { return _audioMixerInjectedStreamAudioStatsMap; }
signals:
bool muteToggled();
void inputReceived(const QByteArray& inputSamples);
@ -263,15 +247,10 @@ private:
bool switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo);
// Callback acceleration dependent calculations
static const float CALLBACK_ACCELERATOR_RATIO;
int calculateNumberOfInputCallbackBytes(const QAudioFormat& format) const;
int calculateNumberOfFrameSamples(int numBytes) const;
float calculateDeviceToNetworkInputRatio(int numBytes) const;
// audio stats methods for rendering
void renderAudioStreamStats(const AudioStreamStats& streamStats, int horizontalOffset, int& verticalOffset,
float scale, float rotation, int font, const float* color, bool isDownstreamStats = false);
// Input framebuffer
AudioBufferFloat32 _inputFrameBuffer;
@ -288,32 +267,14 @@ private:
// Tone source
bool _toneSourceEnabled;
AudioSourceTone _toneSource;
#ifdef _WIN32
static const unsigned int STATS_WIDTH = 1500;
#else
static const unsigned int STATS_WIDTH = 650;
#endif
static const unsigned int STATS_HEIGHT_PER_LINE = 20;
bool _statsEnabled;
bool _statsShowInjectedStreams;
AudioStreamStats _audioMixerAvatarStreamAudioStats;
QHash<QUuid, AudioStreamStats> _audioMixerInjectedStreamAudioStatsMap;
quint16 _outgoingAvatarAudioSequenceNumber;
MovingMinMaxAvg<float> _audioInputMsecsReadStats;
MovingMinMaxAvg<float> _inputRingBufferMsecsAvailableStats;
MovingMinMaxAvg<float> _audioOutputMsecsUnplayedStats;
quint64 _lastSentAudioPacket;
MovingMinMaxAvg<quint64> _packetSentTimeGaps;
AudioOutputIODevice _audioOutputIODevice;
WeakRecorderPointer _recorder;
AudioIOStats _stats;
};

View file

@ -0,0 +1,134 @@
//
// AudioStats.cpp
// interface/src/audio
//
// Created by Stephen Birarda on 2014-12-16.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "InterfaceConfig.h"
#include <AudioConstants.h>
#include <MixedProcessedAudioStream.h>
#include <NodeList.h>
#include <PositionalAudioStream.h>
#include "Audio.h"
#include "AudioIOStats.h"
const int FRAMES_AVAILABLE_STATS_WINDOW_SECONDS = 10;
const int APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS = (int)(30.0f * 1000.0f / AudioConstants::NETWORK_FRAME_MSECS);
AudioIOStats::AudioIOStats(MixedProcessedAudioStream* receivedAudioStream) :
_receivedAudioStream(receivedAudioStream),
_audioInputMsecsReadStats(MSECS_PER_SECOND / (float)AudioConstants::NETWORK_FRAME_MSECS * Audio::CALLBACK_ACCELERATOR_RATIO, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
_inputRingBufferMsecsAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
_audioOutputMsecsUnplayedStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
_lastSentAudioPacket(0),
_packetSentTimeGaps(1, APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS)
{
}
AudioStreamStats AudioIOStats::getMixerDownstreamStats() const {
return _receivedAudioStream->getAudioStreamStats();
}
void AudioIOStats::reset() {
_receivedAudioStream->resetStats();
_mixerAvatarStreamStats = AudioStreamStats();
_mixerInjectedStreamStatsMap.clear();
_audioInputMsecsReadStats.reset();
_inputRingBufferMsecsAvailableStats.reset();
_audioOutputMsecsUnplayedStats.reset();
_packetSentTimeGaps.reset();
}
void AudioIOStats::sentPacket() {
// first time this is 0
if (_lastSentAudioPacket == 0) {
_lastSentAudioPacket = usecTimestampNow();
} else {
quint64 now = usecTimestampNow();
quint64 gap = now - _lastSentAudioPacket;
_packetSentTimeGaps.update(gap);
_lastSentAudioPacket = now;
}
}
void AudioIOStats::parseAudioStreamStatsPacket(const QByteArray& packet) {
int numBytesPacketHeader = numBytesForPacketHeader(packet);
const char* dataAt = packet.constData() + numBytesPacketHeader;
// parse the appendFlag, clear injected audio stream stats if 0
quint8 appendFlag = *(reinterpret_cast<const quint16*>(dataAt));
dataAt += sizeof(quint8);
if (!appendFlag) {
_mixerInjectedStreamStatsMap.clear();
}
// parse the number of stream stats structs to follow
quint16 numStreamStats = *(reinterpret_cast<const quint16*>(dataAt));
dataAt += sizeof(quint16);
// parse the stream stats
AudioStreamStats streamStats;
for (quint16 i = 0; i < numStreamStats; i++) {
memcpy(&streamStats, dataAt, sizeof(AudioStreamStats));
dataAt += sizeof(AudioStreamStats);
if (streamStats._streamType == PositionalAudioStream::Microphone) {
_mixerAvatarStreamStats = streamStats;
} else {
_mixerInjectedStreamStatsMap[streamStats._streamIdentifier] = streamStats;
}
}
}
void AudioIOStats::sendDownstreamAudioStatsPacket() {
Audio* audioIO = DependencyManager::get<Audio>();
// since this function is called every second, we'll sample for some of our stats here
_inputRingBufferMsecsAvailableStats.update(audioIO->getInputRingBufferMsecsAvailable());
_audioOutputMsecsUnplayedStats.update(audioIO->getAudioOutputMsecsUnplayed());
// also, call _receivedAudioStream's per-second callback
_receivedAudioStream->perSecondCallbackForUpdatingStats();
char packet[MAX_PACKET_SIZE];
// pack header
int numBytesPacketHeader = populatePacketHeader(packet, PacketTypeAudioStreamStats);
char* dataAt = packet + numBytesPacketHeader;
// pack append flag
quint8 appendFlag = 0;
memcpy(dataAt, &appendFlag, sizeof(quint8));
dataAt += sizeof(quint8);
// pack number of stats packed
quint16 numStreamStatsToPack = 1;
memcpy(dataAt, &numStreamStatsToPack, sizeof(quint16));
dataAt += sizeof(quint16);
// pack downstream audio stream stats
AudioStreamStats stats = _receivedAudioStream->getAudioStreamStats();
memcpy(dataAt, &stats, sizeof(AudioStreamStats));
dataAt += sizeof(AudioStreamStats);
// send packet
NodeList* nodeList = NodeList::getInstance();
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
nodeList->writeDatagram(packet, dataAt - packet, audioMixer);
}

View file

@ -0,0 +1,60 @@
//
// AudioIOStats.h
// interface/src/audio
//
// Created by Stephen Birarda on 2014-12-16.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioIOStats_h
#define hifi_AudioIOStats_h
#include "MovingMinMaxAvg.h"
#include <QObject>
#include <AudioStreamStats.h>
class MixedProcessedAudioStream;
class AudioIOStats : public QObject {
Q_OBJECT
public:
AudioIOStats(MixedProcessedAudioStream* receivedAudioStream);
void reset();
void updateInputMsecsRead(float msecsRead) { _audioInputMsecsReadStats.update(msecsRead); }
void sentPacket();
AudioStreamStats getMixerDownstreamStats() const;
const AudioStreamStats& getMixerAvatarStreamStats() const { return _mixerAvatarStreamStats; }
const QHash<QUuid, AudioStreamStats>& getMixerInjectedStreamStatsMap() const { return _mixerInjectedStreamStatsMap; }
const MovingMinMaxAvg<float>& getAudioInputMsecsReadStats() const { return _audioInputMsecsReadStats; }
const MovingMinMaxAvg<float>& getInputRungBufferMsecsAvailableStats() const { return _inputRingBufferMsecsAvailableStats; }
const MovingMinMaxAvg<float>& getAudioOutputMsecsUnplayedStats() const { return _audioOutputMsecsUnplayedStats; }
const MovingMinMaxAvg<quint64>& getPacketSentTimeGaps() const { return _packetSentTimeGaps; }
void sendDownstreamAudioStatsPacket();
void parseAudioStreamStatsPacket(const QByteArray& packet);
private:
MixedProcessedAudioStream* _receivedAudioStream;
MovingMinMaxAvg<float> _audioInputMsecsReadStats;
MovingMinMaxAvg<float> _inputRingBufferMsecsAvailableStats;
MovingMinMaxAvg<float> _audioOutputMsecsUnplayedStats;
AudioStreamStats _mixerAvatarStreamStats;
QHash<QUuid, AudioStreamStats> _mixerInjectedStreamStatsMap;
quint64 _lastSentAudioPacket;
MovingMinMaxAvg<quint64> _packetSentTimeGaps;
};
#endif // hifi_AudioIOStats_h

View file

@ -0,0 +1,236 @@
//
// AudioIOStatsRenderer.cpp
// interface/src/audio
//
// Created by Stephen Birarda on 2014-12-16.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "InterfaceConfig.h"
#include <AudioConstants.h>
#include <DependencyManager.h>
#include <NodeList.h>
#include <Util.h>
#include "AudioIOStats.h"
#include "AudioIOStatsRenderer.h"
AudioIOStatsRenderer::AudioIOStatsRenderer(const AudioIOStats* stats) :
_stats(stats),
_isEnabled(false),
_shouldShowInjectedStreams(false)
{
}
#ifdef _WIN32
const unsigned int STATS_WIDTH = 1500;
#else
const unsigned int STATS_WIDTH = 650;
#endif
const unsigned int STATS_HEIGHT_PER_LINE = 20;
void AudioIOStatsRenderer::render(const float* color, int width, int height) {
if (!_isEnabled) {
return;
}
const int linesWhenCentered = _shouldShowInjectedStreams ? 34 : 27;
const int CENTERED_BACKGROUND_HEIGHT = STATS_HEIGHT_PER_LINE * linesWhenCentered;
int lines = _shouldShowInjectedStreams ? _stats->getMixerInjectedStreamStatsMap().size() * 7 + 27 : 27;
int statsHeight = STATS_HEIGHT_PER_LINE * lines;
static const float backgroundColor[4] = { 0.2f, 0.2f, 0.2f, 0.6f };
int x = std::max((width - (int)STATS_WIDTH) / 2, 0);
int y = std::max((height - CENTERED_BACKGROUND_HEIGHT) / 2, 0);
int backgroundHeight = statsHeight;
glColor4fv(backgroundColor);
glBegin(GL_QUADS);
glVertex2i(x, y);
glVertex2i(x + STATS_WIDTH, y);
glVertex2i(x + STATS_WIDTH, y + backgroundHeight);
glVertex2i(x , y + backgroundHeight);
glEnd();
glColor4f(1, 1, 1, 1);
int horizontalOffset = x + 5;
int verticalOffset = y;
float scale = 0.10f;
float rotation = 0.0f;
int font = 2;
char latencyStatString[512];
float audioInputBufferLatency = 0.0f, inputRingBufferLatency = 0.0f, networkRoundtripLatency = 0.0f, mixerRingBufferLatency = 0.0f, outputRingBufferLatency = 0.0f, audioOutputBufferLatency = 0.0f;
AudioStreamStats downstreamAudioStreamStats = _stats->getMixerDownstreamStats();
SharedNodePointer audioMixerNodePointer = NodeList::getInstance()->soloNodeOfType(NodeType::AudioMixer);
if (!audioMixerNodePointer.isNull()) {
audioInputBufferLatency = _stats->getAudioInputMsecsReadStats().getWindowAverage();
inputRingBufferLatency = (float) _stats->getInputRungBufferMsecsAvailableStats().getWindowAverage();
networkRoundtripLatency = audioMixerNodePointer->getPingMs();
mixerRingBufferLatency = _stats->getMixerAvatarStreamStats()._framesAvailableAverage * AudioConstants::NETWORK_FRAME_MSECS;
outputRingBufferLatency = downstreamAudioStreamStats._framesAvailableAverage * AudioConstants::NETWORK_FRAME_MSECS;
audioOutputBufferLatency = _stats->getAudioOutputMsecsUnplayedStats().getWindowAverage();
}
float totalLatency = audioInputBufferLatency + inputRingBufferLatency + networkRoundtripLatency + mixerRingBufferLatency + outputRingBufferLatency + audioOutputBufferLatency;
sprintf(latencyStatString, " Audio input buffer: %7.2fms - avg msecs of samples read to the input ring buffer in last 10s", audioInputBufferLatency);
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
sprintf(latencyStatString, " Input ring buffer: %7.2fms - avg msecs of samples in input ring buffer in last 10s", inputRingBufferLatency);
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
sprintf(latencyStatString, " Network to mixer: %7.2fms - half of last ping value calculated by the node list", networkRoundtripLatency / 2.0f);
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
sprintf(latencyStatString, " AudioMixer ring buffer: %7.2fms - avg msecs of samples in audio mixer's ring buffer in last 10s", mixerRingBufferLatency);
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
sprintf(latencyStatString, " Network to client: %7.2fms - half of last ping value calculated by the node list", networkRoundtripLatency / 2.0f);
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
sprintf(latencyStatString, " Output ring buffer: %7.2fms - avg msecs of samples in output ring buffer in last 10s", outputRingBufferLatency);
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
sprintf(latencyStatString, " Audio output buffer: %7.2fms - avg msecs of samples in audio output buffer in last 10s", audioOutputBufferLatency);
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
sprintf(latencyStatString, " TOTAL: %7.2fms\n", totalLatency);
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
char clientUpstreamMicLabelString[] = "Upstream Mic Audio Packets Sent Gaps (by client):";
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, clientUpstreamMicLabelString, color);
const MovingMinMaxAvg<quint64>& packetSentTimeGaps = _stats->getPacketSentTimeGaps();
char stringBuffer[512];
sprintf(stringBuffer, " Inter-packet timegaps (overall) | min: %9s, max: %9s, avg: %9s",
formatUsecTime(packetSentTimeGaps.getMin()).toLatin1().data(),
formatUsecTime(packetSentTimeGaps.getMax()).toLatin1().data(),
formatUsecTime(packetSentTimeGaps.getAverage()).toLatin1().data());
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
sprintf(stringBuffer, " Inter-packet timegaps (last 30s) | min: %9s, max: %9s, avg: %9s",
formatUsecTime(packetSentTimeGaps.getWindowMin()).toLatin1().data(),
formatUsecTime(packetSentTimeGaps.getWindowMax()).toLatin1().data(),
formatUsecTime(packetSentTimeGaps.getWindowAverage()).toLatin1().data());
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
char upstreamMicLabelString[] = "Upstream mic audio stats (received and reported by audio-mixer):";
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamMicLabelString, color);
renderAudioStreamStats(&_stats->getMixerAvatarStreamStats(), horizontalOffset, verticalOffset,
scale, rotation, font, color);
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
char downstreamLabelString[] = "Downstream mixed audio stats:";
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamLabelString, color);
AudioStreamStats downstreamStats = _stats->getMixerDownstreamStats();
renderAudioStreamStats(&downstreamStats, horizontalOffset, verticalOffset,
scale, rotation, font, color, true);
if (_shouldShowInjectedStreams) {
foreach(const AudioStreamStats& injectedStreamAudioStats, _stats->getMixerInjectedStreamStatsMap()) {
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
char upstreamInjectedLabelString[512];
sprintf(upstreamInjectedLabelString, "Upstream injected audio stats: stream ID: %s",
injectedStreamAudioStats._streamIdentifier.toString().toLatin1().data());
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamInjectedLabelString, color);
renderAudioStreamStats(&injectedStreamAudioStats, horizontalOffset, verticalOffset, scale, rotation, font, color);
}
}
}
void AudioIOStatsRenderer::renderAudioStreamStats(const AudioStreamStats* streamStats, int horizontalOffset, int& verticalOffset,
float scale, float rotation, int font, const float* color, bool isDownstreamStats) {
char stringBuffer[512];
sprintf(stringBuffer, " Packet loss | overall: %5.2f%% (%d lost), last_30s: %5.2f%% (%d lost)",
streamStats->_packetStreamStats.getLostRate() * 100.0f,
streamStats->_packetStreamStats._lost,
streamStats->_packetStreamWindowStats.getLostRate() * 100.0f,
streamStats->_packetStreamWindowStats._lost);
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
if (isDownstreamStats) {
sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u+%d, available: %u+%d",
streamStats->_desiredJitterBufferFrames,
streamStats->_framesAvailableAverage,
(int)(_stats->getAudioInputMsecsReadStats().getWindowAverage() / AudioConstants::NETWORK_FRAME_MSECS),
streamStats->_framesAvailable,
(int)(_stats->getAudioOutputMsecsUnplayedStats().getCurrentIntervalLastSample()
/ AudioConstants::NETWORK_FRAME_MSECS));
} else {
sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u, available: %u",
streamStats->_desiredJitterBufferFrames,
streamStats->_framesAvailableAverage,
streamStats->_framesAvailable);
}
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
sprintf(stringBuffer, " Ringbuffer stats | starves: %u, prev_starve_lasted: %u, frames_dropped: %u, overflows: %u",
streamStats->_starveCount,
streamStats->_consecutiveNotMixedCount,
streamStats->_framesDropped,
streamStats->_overflowCount);
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
sprintf(stringBuffer, " Inter-packet timegaps (overall) | min: %9s, max: %9s, avg: %9s",
formatUsecTime(streamStats->_timeGapMin).toLatin1().data(),
formatUsecTime(streamStats->_timeGapMax).toLatin1().data(),
formatUsecTime(streamStats->_timeGapAverage).toLatin1().data());
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
sprintf(stringBuffer, " Inter-packet timegaps (last 30s) | min: %9s, max: %9s, avg: %9s",
formatUsecTime(streamStats->_timeGapWindowMin).toLatin1().data(),
formatUsecTime(streamStats->_timeGapWindowMax).toLatin1().data(),
formatUsecTime(streamStats->_timeGapWindowAverage).toLatin1().data());
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
}

View file

@ -0,0 +1,41 @@
//
// AudioIOStatsRenderer.h
// interface/src/audio
//
// Created by Stephen Birarda on 2014-12-16.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioIOStatsRenderer_h
#define hifi_AudioIOStatsRenderer_h
#include <QObject>
class AudioIOStats;
class AudioStreamStats;
class AudioIOStatsRenderer : public QObject {
Q_OBJECT
public:
AudioIOStatsRenderer(const AudioIOStats* audioIOStats);
void render(const float* color, int width, int height);
public slots:
void toggle() { _isEnabled = !_isEnabled; }
void toggleShowInjectedStreams() { _shouldShowInjectedStreams = !_shouldShowInjectedStreams; }
private:
// audio stats methods for rendering
void renderAudioStreamStats(const AudioStreamStats* streamStats, int horizontalOffset, int& verticalOffset,
float scale, float rotation, int font, const float* color, bool isDownstreamStats = false);
const AudioIOStats* _stats;
bool _isEnabled;
bool _shouldShowInjectedStreams;
};
#endif // hifi_AudioIOStatsRenderer_h

View file

@ -13,6 +13,7 @@
#define hifi_AudioConstants_h
#include <limits>
#include <math.h>
#include <stdint.h>
namespace AudioConstants {
@ -26,6 +27,7 @@ namespace AudioConstants {
const int NETWORK_FRAME_SAMPLES_PER_CHANNEL = NETWORK_FRAME_BYTES_PER_CHANNEL / sizeof(AudioSample);
const float NETWORK_FRAME_MSECS = (AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL
/ (float) AudioConstants::SAMPLE_RATE) * 1000.0;
const unsigned int NETWORK_FRAME_USECS = floorf(NETWORK_FRAME_MSECS * 1000.0);
const int MIN_SAMPLE_VALUE = std::numeric_limits<AudioSample>::min();
const int MAX_SAMPLE_VALUE = std::numeric_limits<AudioSample>::max();
}

View file

@ -204,7 +204,7 @@ void AudioInjector::injectToMixer() {
_loudness = 0.0f;
for (int i = 0; i < bytesToCopy; i += sizeof(int16_t)) {
_loudness += abs(*reinterpret_cast<int16_t*>(_audioData.data() + _currentSendPosition + i)) /
(MAX_SAMPLE_VALUE / 2.0f);
(AudioConstants::MAX_SAMPLE_VALUE / 2.0f);
}
_loudness /= (float)(bytesToCopy / sizeof(int16_t));
@ -243,7 +243,7 @@ void AudioInjector::injectToMixer() {
if (_currentSendPosition != bytesToCopy && _currentSendPosition < _audioData.size()) {
// not the first packet and not done
// sleep for the appropriate time
int usecToSleep = (++nextFrame * BUFFER_SEND_INTERVAL_USECS) - timer.nsecsElapsed() / 1000;
int usecToSleep = (++nextFrame * AudioConstants::NETWORK_FRAME_USECS) - timer.nsecsElapsed() / 1000;
if (usecToSleep > 0) {
usleep(usecToSleep);

View file

@ -212,7 +212,7 @@ float AudioRingBuffer::getFrameLoudness(const int16_t* frameStart) const {
sampleAt = sampleAt == _bufferLastAt ? _buffer : sampleAt + 1;
}
loudness /= _numFrameSamples;
loudness /= MAX_SAMPLE_VALUE;
loudness /= AudioConstants::MAX_SAMPLE_VALUE;
return loudness;
}

View file

@ -12,9 +12,6 @@
#ifndef hifi_AudioRingBuffer_h
#define hifi_AudioRingBuffer_h
#include <limits>
#include <stdint.h>
#include "AudioConstants.h"
#include <QtCore/QIODevice>
@ -22,12 +19,6 @@
#include <SharedUtil.h>
#include <NodeData.h>
const unsigned int BUFFER_SEND_INTERVAL_USECS = floorf((AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL
/ (float)AudioConstants::SAMPLE_RATE) * USECS_PER_SECOND);
const int MAX_SAMPLE_VALUE = std::numeric_limits<int16_t>::max();
const int MIN_SAMPLE_VALUE = std::numeric_limits<int16_t>::min();
const int DEFAULT_RING_BUFFER_FRAME_CAPACITY = 10;
class AudioRingBuffer {

View file

@ -324,7 +324,8 @@ void InboundAudioStream::setToStarved() {
// we don't know when the next packet will arrive, so it's possible the gap between the last packet and the
// next packet will exceed the max time gap in the window. If the time since the last packet has already exceeded
// the window max gap, then we should use that value to calculate desired frames.
int framesSinceLastPacket = ceilf((float)(now - _lastPacketReceivedTime) / (float)BUFFER_SEND_INTERVAL_USECS);
int framesSinceLastPacket = ceilf((float)(now - _lastPacketReceivedTime)
/ (float)AudioConstants::NETWORK_FRAME_USECS);
calculatedJitterBufferFrames = std::max(_calculatedJitterBufferFramesUsingMaxGap, framesSinceLastPacket);
}
// make sure _desiredJitterBufferFrames does not become lower here
@ -398,15 +399,16 @@ void InboundAudioStream::packetReceivedUpdateTimingStats() {
if (_timeGapStatsForDesiredCalcOnTooManyStarves.getNewStatsAvailableFlag()) {
_calculatedJitterBufferFramesUsingMaxGap = ceilf((float)_timeGapStatsForDesiredCalcOnTooManyStarves.getWindowMax()
/ (float)BUFFER_SEND_INTERVAL_USECS);
/ (float) AudioConstants::NETWORK_FRAME_USECS);
_timeGapStatsForDesiredCalcOnTooManyStarves.clearNewStatsAvailableFlag();
}
const int STANDARD_DEVIATION_SAMPLE_COUNT = 500;
if (_stdevStatsForDesiredCalcOnTooManyStarves.getSamples() > STANDARD_DEVIATION_SAMPLE_COUNT) {
const float NUM_STANDARD_DEVIATIONS = 3.0f;
_calculatedJitterBufferFramesUsingStDev = ceilf(NUM_STANDARD_DEVIATIONS * _stdevStatsForDesiredCalcOnTooManyStarves.getStDev()
/ (float)BUFFER_SEND_INTERVAL_USECS);
_calculatedJitterBufferFramesUsingStDev = ceilf(NUM_STANDARD_DEVIATIONS
* _stdevStatsForDesiredCalcOnTooManyStarves.getStDev()
/ (float) AudioConstants::NETWORK_FRAME_USECS);
_stdevStatsForDesiredCalcOnTooManyStarves.reset();
}
@ -414,7 +416,8 @@ void InboundAudioStream::packetReceivedUpdateTimingStats() {
// if the max gap in window B (_timeGapStatsForDesiredReduction) corresponds to a smaller number of frames than _desiredJitterBufferFrames,
// then reduce _desiredJitterBufferFrames to that number of frames.
if (_timeGapStatsForDesiredReduction.getNewStatsAvailableFlag() && _timeGapStatsForDesiredReduction.isWindowFilled()) {
int calculatedJitterBufferFrames = ceilf((float)_timeGapStatsForDesiredReduction.getWindowMax() / (float)BUFFER_SEND_INTERVAL_USECS);
int calculatedJitterBufferFrames = ceilf((float)_timeGapStatsForDesiredReduction.getWindowMax()
/ (float)AudioConstants::NETWORK_FRAME_USECS);
if (calculatedJitterBufferFrames < _desiredJitterBufferFrames) {
_desiredJitterBufferFrames = calculatedJitterBufferFrames;
}
@ -483,8 +486,8 @@ float calculateRepeatedFrameFadeFactor(int indexOfRepeat) {
const float INITIAL_MSECS_NO_FADE = 20.0f;
const float MSECS_FADE_TO_ZERO = 320.0f;
const float INITIAL_FRAMES_NO_FADE = INITIAL_MSECS_NO_FADE * (float)USECS_PER_MSEC / (float)BUFFER_SEND_INTERVAL_USECS;
const float FRAMES_FADE_TO_ZERO = MSECS_FADE_TO_ZERO * (float)USECS_PER_MSEC / (float)BUFFER_SEND_INTERVAL_USECS;
const float INITIAL_FRAMES_NO_FADE = INITIAL_MSECS_NO_FADE * AudioConstants::NETWORK_FRAME_MSECS;
const float FRAMES_FADE_TO_ZERO = MSECS_FADE_TO_ZERO * AudioConstants::NETWORK_FRAME_MSECS;
const float SAMPLE_RANGE = std::numeric_limits<int16_t>::max();

View file

@ -24,7 +24,8 @@ public:
: _min(std::numeric_limits<T>::max()),
_max(std::numeric_limits<T>::min()),
_average(0.0),
_samples(0)
_samples(0),
_last(0)
{}
void reset() {
@ -32,6 +33,7 @@ public:
_max = std::numeric_limits<T>::min();
_average = 0.0;
_samples = 0;
_last = 0;
}
void update(T sample) {
@ -45,6 +47,8 @@ public:
_average = _average * ((double)_samples / totalSamples)
+ (double)sample / totalSamples;
_samples++;
_last = sample;
}
void update(const MinMaxAvg<T>& other) {
@ -65,12 +69,14 @@ public:
double getAverage() const { return _average; }
int getSamples() const { return _samples; }
double getSum() const { return _samples * _average; }
T getLast() const { return _last; }
private:
T _min;
T _max;
double _average;
int _samples;
T _last;
};
template <typename T>
@ -167,6 +173,7 @@ public:
double getCurrentIntervalAverage() const { return _currentIntervalStats.getAverage(); }
int getCurrentIntervalSamples() const { return _currentIntervalStats.getSamples(); }
double getCurrentIntervalSum() const { return _currentIntervalStats.getSum(); }
T getCurrentIntervalLastSample() const { return _currentIntervalStats.getLast(); }
const MinMaxAvg<T>& getOverallStats() const{ return _overallStats; }
const MinMaxAvg<T>& getWindowStats() const{ return _windowStats; }