mirror of
https://github.com/overte-org/overte.git
synced 2025-04-20 04:44:11 +02:00
Merge pull request #3201 from wangyix/master
moved audio stats to own overlay; audio stats now reset when audiomixer is killed;
This commit is contained in:
commit
a5d6b2282e
14 changed files with 257 additions and 167 deletions
|
@ -237,14 +237,6 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
|||
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET] += delayBufferSample[1];
|
||||
}
|
||||
|
||||
// The following code is pretty gross and redundant, but AFAIK it's the best way to avoid
|
||||
// too many conditionals in handling the delay samples at the beginning of _clientSamples.
|
||||
// Basically we try to take the samples in batches of four, and then handle the remainder
|
||||
// conditionally to get rid of the rest.
|
||||
|
||||
const int DOUBLE_STEREO_OFFSET = 4;
|
||||
const int TRIPLE_STEREO_OFFSET = 6;
|
||||
|
||||
if (numSamplesDelay > 0) {
|
||||
// if there was a sample delay for this buffer, we need to pull samples prior to the nextOutput
|
||||
// to stick at the beginning
|
||||
|
|
|
@ -90,12 +90,12 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
|||
switch (packetArrivalInfo._status) {
|
||||
case SequenceNumberStats::Early: {
|
||||
int packetsLost = packetArrivalInfo._seqDiffFromExpected;
|
||||
avatarRingBuffer->parseData(packet, packetsLost);
|
||||
avatarRingBuffer->parseDataAndHandleDroppedPackets(packet, packetsLost);
|
||||
break;
|
||||
}
|
||||
case SequenceNumberStats::OnTime: {
|
||||
// ask the AvatarAudioRingBuffer instance to parse the data
|
||||
avatarRingBuffer->parseData(packet);
|
||||
avatarRingBuffer->parseDataAndHandleDroppedPackets(packet, 0);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
|
@ -134,12 +134,12 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
|||
switch (packetArrivalInfo._status) {
|
||||
case SequenceNumberStats::Early: {
|
||||
int packetsLost = packetArrivalInfo._seqDiffFromExpected;
|
||||
matchingInjectedRingBuffer->parseData(packet, packetsLost);
|
||||
matchingInjectedRingBuffer->parseDataAndHandleDroppedPackets(packet, packetsLost);
|
||||
break;
|
||||
}
|
||||
case SequenceNumberStats::OnTime: {
|
||||
// ask the AvatarAudioRingBuffer instance to parse the data
|
||||
matchingInjectedRingBuffer->parseData(packet);
|
||||
matchingInjectedRingBuffer->parseDataAndHandleDroppedPackets(packet, 0);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
|
|
|
@ -18,7 +18,7 @@ AvatarAudioRingBuffer::AvatarAudioRingBuffer(bool isStereo, bool dynamicJitterBu
|
|||
|
||||
}
|
||||
|
||||
int AvatarAudioRingBuffer::parseData(const QByteArray& packet, int packetsSkipped) {
|
||||
int AvatarAudioRingBuffer::parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped) {
|
||||
frameReceivedUpdateTimingStats();
|
||||
|
||||
_shouldLoopbackForNode = (packetTypeForPacket(packet) == PacketTypeMicrophoneAudioWithEcho);
|
||||
|
|
|
@ -20,7 +20,7 @@ class AvatarAudioRingBuffer : public PositionalAudioRingBuffer {
|
|||
public:
|
||||
AvatarAudioRingBuffer(bool isStereo = false, bool dynamicJitterBuffer = false);
|
||||
|
||||
int parseData(const QByteArray& packet, int packetsSkipped = 0);
|
||||
int parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped);
|
||||
private:
|
||||
// disallow copying of AvatarAudioRingBuffer objects
|
||||
AvatarAudioRingBuffer(const AvatarAudioRingBuffer&);
|
||||
|
|
|
@ -3355,7 +3355,7 @@ void Application::nodeKilled(SharedNodePointer node) {
|
|||
_modelEditSender.nodeKilled(node);
|
||||
|
||||
if (node->getType() == NodeType::AudioMixer) {
|
||||
QMetaObject::invokeMethod(&_audio, "resetIncomingMixedAudioSequenceNumberStats");
|
||||
QMetaObject::invokeMethod(&_audio, "audioMixerKilled");
|
||||
}
|
||||
|
||||
if (node->getType() == NodeType::VoxelServer) {
|
||||
|
|
|
@ -43,6 +43,7 @@
|
|||
#include "Audio.h"
|
||||
#include "Menu.h"
|
||||
#include "Util.h"
|
||||
#include "AudioRingBuffer.h"
|
||||
|
||||
static const float AUDIO_CALLBACK_MSECS = (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float)SAMPLE_RATE * 1000.0;
|
||||
|
||||
|
@ -125,14 +126,16 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
|
|||
_scopeInput(0),
|
||||
_scopeOutputLeft(0),
|
||||
_scopeOutputRight(0),
|
||||
_statsEnabled(false),
|
||||
_starveCount(0),
|
||||
_consecutiveNotMixedCount(0),
|
||||
_outgoingAvatarAudioSequenceNumber(0),
|
||||
_incomingMixedAudioSequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH),
|
||||
_interframeTimeGapStats(TIME_GAPS_STATS_INTERVAL_SAMPLES, TIME_GAP_STATS_WINDOW_INTERVALS),
|
||||
_inputRingBufferFramesAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
||||
_audioInputMsecsReadStats(MSECS_PER_SECOND / (float)AUDIO_CALLBACK_MSECS * CALLBACK_ACCELERATOR_RATIO, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
||||
_inputRingBufferMsecsAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
||||
_outputRingBufferFramesAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
||||
_audioOutputBufferFramesAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS)
|
||||
_audioOutputMsecsUnplayedStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS)
|
||||
{
|
||||
// clear the array of locally injected samples
|
||||
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||
|
@ -148,15 +151,34 @@ void Audio::init(QGLWidget *parent) {
|
|||
|
||||
void Audio::reset() {
|
||||
_ringBuffer.reset();
|
||||
|
||||
|
||||
// we don't want to reset seq numbers when space-bar reset occurs.
|
||||
//_outgoingAvatarAudioSequenceNumber = 0;
|
||||
|
||||
resetStats();
|
||||
}
|
||||
|
||||
void Audio::resetStats() {
|
||||
_starveCount = 0;
|
||||
_consecutiveNotMixedCount = 0;
|
||||
|
||||
_audioMixerAvatarStreamAudioStats = AudioStreamStats();
|
||||
_audioMixerInjectedStreamAudioStatsMap.clear();
|
||||
|
||||
//_outgoingAvatarAudioSequenceNumber = 0;
|
||||
_incomingMixedAudioSequenceNumberStats.reset();
|
||||
|
||||
_interframeTimeGapStats.reset();
|
||||
|
||||
_audioInputMsecsReadStats.reset();
|
||||
_inputRingBufferMsecsAvailableStats.reset();
|
||||
|
||||
_outputRingBufferFramesAvailableStats.reset();
|
||||
_audioOutputMsecsUnplayedStats.reset();
|
||||
}
|
||||
|
||||
void Audio::audioMixerKilled() {
|
||||
_outgoingAvatarAudioSequenceNumber = 0;
|
||||
resetStats();
|
||||
}
|
||||
|
||||
QAudioDeviceInfo getNamedAudioDeviceForMode(QAudio::Mode mode, const QString& deviceName) {
|
||||
|
@ -499,8 +521,11 @@ void Audio::handleAudioInput() {
|
|||
}
|
||||
|
||||
_inputRingBuffer.writeData(inputByteArray.data(), inputByteArray.size());
|
||||
|
||||
float audioInputMsecsRead = inputByteArray.size() / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
|
||||
_audioInputMsecsReadStats.update(audioInputMsecsRead);
|
||||
|
||||
while (_inputRingBuffer.samplesAvailable() > inputSamplesRequired) {
|
||||
while (_inputRingBuffer.samplesAvailable() >= inputSamplesRequired) {
|
||||
|
||||
int16_t* inputAudioSamples = new int16_t[inputSamplesRequired];
|
||||
_inputRingBuffer.readSamples(inputAudioSamples, inputSamplesRequired);
|
||||
|
@ -811,11 +836,12 @@ AudioStreamStats Audio::getDownstreamAudioStreamStats() const {
|
|||
|
||||
void Audio::sendDownstreamAudioStatsPacket() {
|
||||
|
||||
_inputRingBufferFramesAvailableStats.update(getInputRingBufferFramesAvailable());
|
||||
// since this function is called every second, we'll sample some of our stats here
|
||||
|
||||
_inputRingBufferMsecsAvailableStats.update(getInputRingBufferMsecsAvailable());
|
||||
|
||||
// since this function is called every second, we'll sample the number of audio frames available here.
|
||||
_outputRingBufferFramesAvailableStats.update(_ringBuffer.framesAvailable());
|
||||
_audioOutputBufferFramesAvailableStats.update(getOutputRingBufferFramesAvailable());
|
||||
_audioOutputMsecsUnplayedStats.update(getAudioOutputMsecsUnplayed());
|
||||
|
||||
// push the current seq number stats into history, which moves the history window forward 1s
|
||||
// (since that's how often pushStatsToHistory() is called)
|
||||
|
@ -1286,6 +1312,10 @@ void Audio::toggleScopePause() {
|
|||
_scopeEnabledPause = !_scopeEnabledPause;
|
||||
}
|
||||
|
||||
void Audio::toggleStats() {
|
||||
_statsEnabled = !_statsEnabled;
|
||||
}
|
||||
|
||||
void Audio::selectAudioScopeFiveFrames() {
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioScopeFiveFrames)) {
|
||||
reallocateScope(5);
|
||||
|
@ -1365,6 +1395,174 @@ void Audio::addBufferToScope(
|
|||
}
|
||||
}
|
||||
|
||||
void Audio::renderStats(const float* color, int width, int height) {
|
||||
if (!_statsEnabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
const int LINES_WHEN_CENTERED = 30;
|
||||
const int CENTERED_BACKGROUND_HEIGHT = STATS_HEIGHT_PER_LINE * LINES_WHEN_CENTERED;
|
||||
|
||||
int lines = _audioMixerInjectedStreamAudioStatsMap.size() * 7 + 23;
|
||||
int statsHeight = STATS_HEIGHT_PER_LINE * lines;
|
||||
|
||||
|
||||
static const float backgroundColor[4] = { 0.2f, 0.2f, 0.2f, 0.6f };
|
||||
|
||||
int x = std::max((width - (int)STATS_WIDTH) / 2, 0);
|
||||
int y = std::max((height - CENTERED_BACKGROUND_HEIGHT) / 2, 0);
|
||||
int w = STATS_WIDTH;
|
||||
int h = statsHeight;
|
||||
renderBackground(backgroundColor, x, y, w, h);
|
||||
|
||||
|
||||
int horizontalOffset = x + 5;
|
||||
int verticalOffset = y;
|
||||
|
||||
float scale = 0.10f;
|
||||
float rotation = 0.0f;
|
||||
int font = 2;
|
||||
|
||||
|
||||
char latencyStatString[512];
|
||||
|
||||
const float BUFFER_SEND_INTERVAL_MSECS = BUFFER_SEND_INTERVAL_USECS / (float)USECS_PER_MSEC;
|
||||
|
||||
float audioInputBufferLatency = 0.0f, inputRingBufferLatency = 0.0f, networkRoundtripLatency = 0.0f, mixerRingBufferLatency = 0.0f, outputRingBufferLatency = 0.0f, audioOutputBufferLatency = 0.0f;
|
||||
|
||||
SharedNodePointer audioMixerNodePointer = NodeList::getInstance()->soloNodeOfType(NodeType::AudioMixer);
|
||||
if (!audioMixerNodePointer.isNull()) {
|
||||
audioInputBufferLatency = _audioInputMsecsReadStats.getWindowAverage();
|
||||
inputRingBufferLatency = getInputRingBufferAverageMsecsAvailable();
|
||||
networkRoundtripLatency = audioMixerNodePointer->getPingMs();
|
||||
mixerRingBufferLatency = _audioMixerAvatarStreamAudioStats._ringBufferFramesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS;
|
||||
outputRingBufferLatency = _outputRingBufferFramesAvailableStats.getWindowAverage() * BUFFER_SEND_INTERVAL_MSECS;
|
||||
audioOutputBufferLatency = _audioOutputMsecsUnplayedStats.getWindowAverage();
|
||||
}
|
||||
float totalLatency = audioInputBufferLatency + inputRingBufferLatency + networkRoundtripLatency + mixerRingBufferLatency + outputRingBufferLatency + audioOutputBufferLatency;
|
||||
|
||||
sprintf(latencyStatString, " Audio input buffer: %7.2fms - avg msecs of samples read to the input ring buffer in last 10s", audioInputBufferLatency);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
sprintf(latencyStatString, " Input ring buffer: %7.2fms - avg msecs of samples in input ring buffer in last 10s", inputRingBufferLatency);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
sprintf(latencyStatString, " Network to mixer: %7.2fms - half of last ping value calculated by the node list", networkRoundtripLatency / 2.0f);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
sprintf(latencyStatString, " AudioMixer ring buffer: %7.2fms - avg msecs of samples in audio mixer's ring buffer in last 10s", mixerRingBufferLatency);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
sprintf(latencyStatString, " Network to client: %7.2fms - half of last ping value calculated by the node list", networkRoundtripLatency / 2.0f);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
sprintf(latencyStatString, " Output ring buffer: %7.2fms - avg msecs of samples in output ring buffer in last 10s", outputRingBufferLatency);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
sprintf(latencyStatString, " Audio output buffer: %7.2fms - avg msecs of samples in audio output buffer in last 10s", audioOutputBufferLatency);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
sprintf(latencyStatString, " TOTAL: %7.2fms\n", totalLatency);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
|
||||
|
||||
|
||||
char downstreamLabelString[] = "Downstream mixed audio stats:";
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamLabelString, color);
|
||||
|
||||
renderAudioStreamStats(getDownstreamAudioStreamStats(), horizontalOffset, verticalOffset, scale, rotation, font, color, true);
|
||||
|
||||
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
|
||||
|
||||
char upstreamMicLabelString[] = "Upstream mic audio stats:";
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamMicLabelString, color);
|
||||
|
||||
renderAudioStreamStats(_audioMixerAvatarStreamAudioStats, horizontalOffset, verticalOffset, scale, rotation, font, color);
|
||||
|
||||
|
||||
foreach(const AudioStreamStats& injectedStreamAudioStats, _audioMixerInjectedStreamAudioStatsMap) {
|
||||
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
|
||||
|
||||
char upstreamInjectedLabelString[512];
|
||||
sprintf(upstreamInjectedLabelString, "Upstream injected audio stats: stream ID: %s",
|
||||
injectedStreamAudioStats._streamIdentifier.toString().toLatin1().data());
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamInjectedLabelString, color);
|
||||
|
||||
renderAudioStreamStats(injectedStreamAudioStats, horizontalOffset, verticalOffset, scale, rotation, font, color);
|
||||
}
|
||||
}
|
||||
|
||||
void Audio::renderAudioStreamStats(const AudioStreamStats& streamStats, int horizontalOffset, int& verticalOffset,
|
||||
float scale, float rotation, int font, const float* color, bool isDownstreamStats) {
|
||||
|
||||
char stringBuffer[512];
|
||||
|
||||
sprintf(stringBuffer, " Packet loss | overall: %5.2f%% (%d lost), last_30s: %5.2f%% (%d lost)",
|
||||
streamStats._packetStreamStats.getLostRate() * 100.0f,
|
||||
streamStats._packetStreamStats._numLost,
|
||||
streamStats._packetStreamWindowStats.getLostRate() * 100.0f,
|
||||
streamStats._packetStreamWindowStats._numLost);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||
|
||||
if (isDownstreamStats) {
|
||||
|
||||
const float BUFFER_SEND_INTERVAL_MSECS = BUFFER_SEND_INTERVAL_USECS / (float)USECS_PER_MSEC;
|
||||
sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u+%d, available: %u+%d",
|
||||
streamStats._ringBufferDesiredJitterBufferFrames,
|
||||
streamStats._ringBufferFramesAvailableAverage,
|
||||
(int)(getAudioOutputAverageMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS),
|
||||
streamStats._ringBufferFramesAvailable,
|
||||
(int)(getAudioOutputMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS));
|
||||
} else {
|
||||
sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u, available: %u",
|
||||
streamStats._ringBufferDesiredJitterBufferFrames,
|
||||
streamStats._ringBufferFramesAvailableAverage,
|
||||
streamStats._ringBufferFramesAvailable);
|
||||
}
|
||||
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||
|
||||
sprintf(stringBuffer, " Ringbuffer stats | starves: %u, prev_starve_lasted: %u, frames_dropped: %u, overflows: %u",
|
||||
streamStats._ringBufferStarveCount,
|
||||
streamStats._ringBufferConsecutiveNotMixedCount,
|
||||
streamStats._ringBufferSilentFramesDropped,
|
||||
streamStats._ringBufferOverflowCount);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||
|
||||
sprintf(stringBuffer, " Inter-packet timegaps (overall) | min: %9s, max: %9s, avg: %9s",
|
||||
formatUsecTime(streamStats._timeGapMin).toLatin1().data(),
|
||||
formatUsecTime(streamStats._timeGapMax).toLatin1().data(),
|
||||
formatUsecTime(streamStats._timeGapAverage).toLatin1().data());
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||
|
||||
sprintf(stringBuffer, " Inter-packet timegaps (last 30s) | min: %9s, max: %9s, avg: %9s",
|
||||
formatUsecTime(streamStats._timeGapWindowMin).toLatin1().data(),
|
||||
formatUsecTime(streamStats._timeGapWindowMax).toLatin1().data(),
|
||||
formatUsecTime(streamStats._timeGapWindowAverage).toLatin1().data());
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||
}
|
||||
|
||||
|
||||
void Audio::renderScope(int width, int height) {
|
||||
|
||||
if (!_scopeEnabled)
|
||||
|
@ -1622,15 +1820,14 @@ int Audio::calculateNumberOfFrameSamples(int numBytes) const {
|
|||
return frameSamples;
|
||||
}
|
||||
|
||||
int Audio::getOutputRingBufferFramesAvailable() const {
|
||||
float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float)_outputFormat.sampleRate())
|
||||
* (_desiredOutputFormat.channelCount() / (float)_outputFormat.channelCount());
|
||||
|
||||
return (_audioOutput->bufferSize() - _audioOutput->bytesFree()) * networkOutputToOutputRatio
|
||||
/ (sizeof(int16_t) * _ringBuffer.getNumFrameSamples());
|
||||
float Audio::getAudioOutputMsecsUnplayed() const {
|
||||
int bytesAudioOutputUnplayed = _audioOutput->bufferSize() - _audioOutput->bytesFree();
|
||||
float msecsAudioOutputUnplayed = bytesAudioOutputUnplayed / (float)_outputFormat.bytesForDuration(USECS_PER_MSEC);
|
||||
return msecsAudioOutputUnplayed;
|
||||
}
|
||||
|
||||
int Audio::getInputRingBufferFramesAvailable() const {
|
||||
float inputToNetworkInputRatio = calculateDeviceToNetworkInputRatio(_numInputCallbackBytes);
|
||||
return _inputRingBuffer.samplesAvailable() / inputToNetworkInputRatio / _inputRingBuffer.getNumFrameSamples();
|
||||
float Audio::getInputRingBufferMsecsAvailable() const {
|
||||
int bytesInInputRingBuffer = _inputRingBuffer.samplesAvailable() * sizeof(int16_t);
|
||||
float msecsInInputRingBuffer = bytesInInputRingBuffer / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
|
||||
return msecsInInputRingBuffer;
|
||||
}
|
||||
|
|
|
@ -71,6 +71,7 @@ public:
|
|||
|
||||
void renderToolBox(int x, int y, bool boxed);
|
||||
void renderScope(int width, int height);
|
||||
void renderStats(const float* color, int width, int height);
|
||||
|
||||
int getNetworkSampleRate() { return SAMPLE_RATE; }
|
||||
int getNetworkBufferLengthSamplesPerChannel() { return NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; }
|
||||
|
@ -78,12 +79,12 @@ public:
|
|||
bool getProcessSpatialAudio() const { return _processSpatialAudio; }
|
||||
|
||||
const SequenceNumberStats& getIncomingMixedAudioSequenceNumberStats() const { return _incomingMixedAudioSequenceNumberStats; }
|
||||
|
||||
float getInputRingBufferMsecsAvailable() const;
|
||||
float getInputRingBufferAverageMsecsAvailable() const { return (float)_inputRingBufferMsecsAvailableStats.getWindowAverage(); }
|
||||
|
||||
int getInputRingBufferFramesAvailable() const;
|
||||
int getInputRingBufferAverageFramesAvailable() const { return (int)_inputRingBufferFramesAvailableStats.getWindowAverage(); }
|
||||
|
||||
int getOutputRingBufferFramesAvailable() const;
|
||||
int getOutputRingBufferAverageFramesAvailable() const { return (int)_audioOutputBufferFramesAvailableStats.getWindowAverage(); }
|
||||
float getAudioOutputMsecsUnplayed() const;
|
||||
float getAudioOutputAverageMsecsUnplayed() const { return (float)_audioOutputMsecsUnplayedStats.getWindowAverage(); }
|
||||
|
||||
public slots:
|
||||
void start();
|
||||
|
@ -93,12 +94,14 @@ public slots:
|
|||
void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples);
|
||||
void handleAudioInput();
|
||||
void reset();
|
||||
void resetIncomingMixedAudioSequenceNumberStats() { _incomingMixedAudioSequenceNumberStats.reset(); }
|
||||
void resetStats();
|
||||
void audioMixerKilled();
|
||||
void toggleMute();
|
||||
void toggleAudioNoiseReduction();
|
||||
void toggleToneInjection();
|
||||
void toggleScope();
|
||||
void toggleScopePause();
|
||||
void toggleStats();
|
||||
void toggleAudioSpatialProcessing();
|
||||
void toggleStereoInput();
|
||||
void selectAudioScopeFiveFrames();
|
||||
|
@ -245,6 +248,10 @@ private:
|
|||
void renderGrid(const float* color, int x, int y, int width, int height, int rows, int cols);
|
||||
void renderLineStrip(const float* color, int x, int y, int n, int offset, const QByteArray* byteArray);
|
||||
|
||||
// audio stats methods for rendering
|
||||
void renderAudioStreamStats(const AudioStreamStats& streamStats, int horizontalOffset, int& verticalOffset,
|
||||
float scale, float rotation, int font, const float* color, bool isDownstreamStats = false);
|
||||
|
||||
// Audio scope data
|
||||
static const unsigned int NETWORK_SAMPLES_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
|
||||
static const unsigned int DEFAULT_FRAMES_PER_SCOPE = 5;
|
||||
|
@ -261,6 +268,13 @@ private:
|
|||
QByteArray* _scopeInput;
|
||||
QByteArray* _scopeOutputLeft;
|
||||
QByteArray* _scopeOutputRight;
|
||||
#ifdef _WIN32
|
||||
static const unsigned int STATS_WIDTH = 1500;
|
||||
#else
|
||||
static const unsigned int STATS_WIDTH = 650;
|
||||
#endif
|
||||
static const unsigned int STATS_HEIGHT_PER_LINE = 20;
|
||||
bool _statsEnabled;
|
||||
|
||||
int _starveCount;
|
||||
int _consecutiveNotMixedCount;
|
||||
|
@ -273,10 +287,11 @@ private:
|
|||
|
||||
MovingMinMaxAvg<quint64> _interframeTimeGapStats;
|
||||
|
||||
MovingMinMaxAvg<int> _inputRingBufferFramesAvailableStats;
|
||||
MovingMinMaxAvg<float> _audioInputMsecsReadStats;
|
||||
MovingMinMaxAvg<float> _inputRingBufferMsecsAvailableStats;
|
||||
|
||||
MovingMinMaxAvg<int> _outputRingBufferFramesAvailableStats;
|
||||
MovingMinMaxAvg<int> _audioOutputBufferFramesAvailableStats;
|
||||
MovingMinMaxAvg<float> _audioOutputMsecsUnplayedStats;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -593,6 +593,12 @@ Menu::Menu() :
|
|||
Qt::CTRL | Qt::SHIFT | Qt::Key_U,
|
||||
false);
|
||||
|
||||
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioStats,
|
||||
0,
|
||||
false,
|
||||
appInstance->getAudio(),
|
||||
SLOT(toggleStats()));
|
||||
|
||||
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::DisableQAudioOutputOverflowCheck, 0, true);
|
||||
|
||||
addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel,
|
||||
|
|
|
@ -315,6 +315,7 @@ namespace MenuOption {
|
|||
const QString AudioScopeFrames = "Display Frames";
|
||||
const QString AudioScopePause = "Pause Audio Scope";
|
||||
const QString AudioScopeTwentyFrames = "Twenty";
|
||||
const QString AudioStats = "Audio Stats";
|
||||
const QString AudioSpatialProcessingAlternateDistanceAttenuate = "Alternate distance attenuation";
|
||||
const QString AudioSpatialProcessing = "Audio Spatial Processing";
|
||||
const QString AudioSpatialProcessingDontDistanceAttenuate = "Don't calculate distance attenuation";
|
||||
|
|
|
@ -1022,6 +1022,8 @@ void ApplicationOverlay::renderAudioMeter() {
|
|||
|
||||
audio->renderScope(glWidget->width(), glWidget->height());
|
||||
|
||||
audio->renderStats(WHITE_TEXT, glWidget->width(), glWidget->height());
|
||||
|
||||
glBegin(GL_QUADS);
|
||||
if (isClipping) {
|
||||
glColor3f(1, 0, 0);
|
||||
|
|
|
@ -278,9 +278,8 @@ void Stats::display(
|
|||
|
||||
|
||||
Audio* audio = Application::getInstance()->getAudio();
|
||||
const QHash<QUuid, AudioStreamStats>& audioMixerInjectedStreamAudioStatsMap = audio->getAudioMixerInjectedStreamAudioStatsMap();
|
||||
|
||||
lines = _expanded ? 13 + (audioMixerInjectedStreamAudioStatsMap.size() + 2) * 3 : 3;
|
||||
lines = _expanded ? 4 : 3;
|
||||
drawBackground(backgroundColor, horizontalOffset, 0, _pingStatsWidth, lines * STATS_PELS_PER_LINE + 10);
|
||||
horizontalOffset += 5;
|
||||
|
||||
|
@ -313,128 +312,6 @@ void Stats::display(
|
|||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, voxelMaxPing, color);
|
||||
|
||||
char inputAudioLabelString[] = "Input: avail_avg_10s/avail";
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, inputAudioLabelString, color);
|
||||
|
||||
char inputAudioStatsString[512];
|
||||
sprintf(inputAudioStatsString, " %d/%d", audio->getInputRingBufferAverageFramesAvailable(),
|
||||
audio->getInputRingBufferFramesAvailable());
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, inputAudioStatsString, color);
|
||||
|
||||
char audioMixerStatsLabelString[] = "AudioMixer stats:";
|
||||
char streamStatsFormatLabelString[] = "lost%/lost_30s%";
|
||||
char streamStatsFormatLabelString2[] = "desired/avail_avg_10s/avail";
|
||||
char streamStatsFormatLabelString3[] = "gaps: min/max/avg, starv/ovfl";
|
||||
char streamStatsFormatLabelString4[] = "gaps_30s: (same), notmix/sdrop";
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, audioMixerStatsLabelString, color);
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, streamStatsFormatLabelString, color);
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, streamStatsFormatLabelString2, color);
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, streamStatsFormatLabelString3, color);
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, streamStatsFormatLabelString4, color);
|
||||
|
||||
char downstreamLabelString[] = " Downstream:";
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamLabelString, color);
|
||||
|
||||
char downstreamAudioStatsString[512];
|
||||
|
||||
AudioStreamStats downstreamAudioStreamStats = audio->getDownstreamAudioStreamStats();
|
||||
|
||||
sprintf(downstreamAudioStatsString, " mix: %.2f%%/%.2f%%, %u/%u+%d/%u+%d", downstreamAudioStreamStats._packetStreamStats.getLostRate()*100.0f,
|
||||
downstreamAudioStreamStats._packetStreamWindowStats.getLostRate() * 100.0f,
|
||||
downstreamAudioStreamStats._ringBufferDesiredJitterBufferFrames, downstreamAudioStreamStats._ringBufferFramesAvailableAverage,
|
||||
audio->getOutputRingBufferAverageFramesAvailable(),
|
||||
downstreamAudioStreamStats._ringBufferFramesAvailable, audio->getOutputRingBufferFramesAvailable());
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
|
||||
|
||||
sprintf(downstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(downstreamAudioStreamStats._timeGapMin).toLatin1().data(),
|
||||
formatUsecTime(downstreamAudioStreamStats._timeGapMax).toLatin1().data(),
|
||||
formatUsecTime(downstreamAudioStreamStats._timeGapAverage).toLatin1().data(),
|
||||
downstreamAudioStreamStats._ringBufferStarveCount, downstreamAudioStreamStats._ringBufferOverflowCount);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
|
||||
|
||||
sprintf(downstreamAudioStatsString, " %s/%s/%s, %u/?", formatUsecTime(downstreamAudioStreamStats._timeGapWindowMin).toLatin1().data(),
|
||||
formatUsecTime(downstreamAudioStreamStats._timeGapWindowMax).toLatin1().data(),
|
||||
formatUsecTime(downstreamAudioStreamStats._timeGapWindowAverage).toLatin1().data(),
|
||||
downstreamAudioStreamStats._ringBufferConsecutiveNotMixedCount);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
|
||||
|
||||
|
||||
char upstreamLabelString[] = " Upstream:";
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamLabelString, color);
|
||||
|
||||
char upstreamAudioStatsString[512];
|
||||
|
||||
const AudioStreamStats& audioMixerAvatarAudioStreamStats = audio->getAudioMixerAvatarStreamAudioStats();
|
||||
|
||||
sprintf(upstreamAudioStatsString, " mic: %.2f%%/%.2f%%, %u/%u/%u", audioMixerAvatarAudioStreamStats._packetStreamStats.getLostRate()*100.0f,
|
||||
audioMixerAvatarAudioStreamStats._packetStreamWindowStats.getLostRate() * 100.0f,
|
||||
audioMixerAvatarAudioStreamStats._ringBufferDesiredJitterBufferFrames, audioMixerAvatarAudioStreamStats._ringBufferFramesAvailableAverage,
|
||||
audioMixerAvatarAudioStreamStats._ringBufferFramesAvailable);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
|
||||
sprintf(upstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapMin).toLatin1().data(),
|
||||
formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapMax).toLatin1().data(),
|
||||
formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapAverage).toLatin1().data(),
|
||||
audioMixerAvatarAudioStreamStats._ringBufferStarveCount, audioMixerAvatarAudioStreamStats._ringBufferOverflowCount);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
|
||||
sprintf(upstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapWindowMin).toLatin1().data(),
|
||||
formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapWindowMax).toLatin1().data(),
|
||||
formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapWindowAverage).toLatin1().data(),
|
||||
audioMixerAvatarAudioStreamStats._ringBufferConsecutiveNotMixedCount, audioMixerAvatarAudioStreamStats._ringBufferSilentFramesDropped);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
|
||||
foreach(const AudioStreamStats& injectedStreamAudioStats, audioMixerInjectedStreamAudioStatsMap) {
|
||||
|
||||
sprintf(upstreamAudioStatsString, " inj: %.2f%%/%.2f%%, %u/%u/%u", injectedStreamAudioStats._packetStreamStats.getLostRate()*100.0f,
|
||||
injectedStreamAudioStats._packetStreamWindowStats.getLostRate() * 100.0f,
|
||||
injectedStreamAudioStats._ringBufferDesiredJitterBufferFrames, injectedStreamAudioStats._ringBufferFramesAvailableAverage,
|
||||
injectedStreamAudioStats._ringBufferFramesAvailable);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
|
||||
sprintf(upstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(injectedStreamAudioStats._timeGapMin).toLatin1().data(),
|
||||
formatUsecTime(injectedStreamAudioStats._timeGapMax).toLatin1().data(),
|
||||
formatUsecTime(injectedStreamAudioStats._timeGapAverage).toLatin1().data(),
|
||||
injectedStreamAudioStats._ringBufferStarveCount, injectedStreamAudioStats._ringBufferOverflowCount);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
|
||||
sprintf(upstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(injectedStreamAudioStats._timeGapWindowMin).toLatin1().data(),
|
||||
formatUsecTime(injectedStreamAudioStats._timeGapWindowMax).toLatin1().data(),
|
||||
formatUsecTime(injectedStreamAudioStats._timeGapWindowAverage).toLatin1().data(),
|
||||
injectedStreamAudioStats._ringBufferConsecutiveNotMixedCount, injectedStreamAudioStats._ringBufferSilentFramesDropped);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
}
|
||||
}
|
||||
|
||||
verticalOffset = 0;
|
||||
|
|
|
@ -30,7 +30,7 @@ InjectedAudioRingBuffer::InjectedAudioRingBuffer(const QUuid& streamIdentifier,
|
|||
|
||||
const uchar MAX_INJECTOR_VOLUME = 255;
|
||||
|
||||
int InjectedAudioRingBuffer::parseData(const QByteArray& packet, int packetsSkipped) {
|
||||
int InjectedAudioRingBuffer::parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped) {
|
||||
frameReceivedUpdateTimingStats();
|
||||
|
||||
// setup a data stream to read from this packet
|
||||
|
|
|
@ -20,7 +20,7 @@ class InjectedAudioRingBuffer : public PositionalAudioRingBuffer {
|
|||
public:
|
||||
InjectedAudioRingBuffer(const QUuid& streamIdentifier = QUuid(), bool dynamicJitterBuffer = false);
|
||||
|
||||
int parseData(const QByteArray& packet, int packetsSkipped = 0);
|
||||
int parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped);
|
||||
|
||||
const QUuid& getStreamIdentifier() const { return _streamIdentifier; }
|
||||
float getRadius() const { return _radius; }
|
||||
|
|
|
@ -45,7 +45,7 @@ public:
|
|||
|
||||
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo = false, bool dynamicJitterBuffers = false);
|
||||
|
||||
virtual int parseData(const QByteArray& packet, int packetsSkipped = 0) = 0;
|
||||
virtual int parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped) = 0;
|
||||
|
||||
int parsePositionalData(const QByteArray& positionalByteArray);
|
||||
int parseListenModeData(const QByteArray& listenModeByteArray);
|
||||
|
|
Loading…
Reference in a new issue