mirror of
https://github.com/overte-org/overte.git
synced 2025-04-21 06:44:06 +02:00
Merge pull request #3153 from wangyix/quick_audio_PR
Added more stats to AudioStreamStats; Added template classes RingBufferHistory and MovingMinMaxAvg
This commit is contained in:
commit
a94e077a75
25 changed files with 1009 additions and 263 deletions
|
@ -405,7 +405,8 @@ void AudioMixer::readPendingDatagrams() {
|
|||
if (mixerPacketType == PacketTypeMicrophoneAudioNoEcho
|
||||
|| mixerPacketType == PacketTypeMicrophoneAudioWithEcho
|
||||
|| mixerPacketType == PacketTypeInjectAudio
|
||||
|| mixerPacketType == PacketTypeSilentAudioFrame) {
|
||||
|| mixerPacketType == PacketTypeSilentAudioFrame
|
||||
|| mixerPacketType == PacketTypeAudioStreamStats) {
|
||||
|
||||
nodeList->findNodeAndUpdateWithDataFromPacket(receivedPacket);
|
||||
} else if (mixerPacketType == PacketTypeMuteEnvironment) {
|
||||
|
@ -640,9 +641,6 @@ void AudioMixer::run() {
|
|||
++framesSinceCutoffEvent;
|
||||
}
|
||||
|
||||
|
||||
const quint64 TOO_LONG_SINCE_LAST_SEND_AUDIO_STREAM_STATS = 1 * USECS_PER_SECOND;
|
||||
|
||||
bool sendAudioStreamStats = false;
|
||||
quint64 now = usecTimestampNow();
|
||||
if (now - _lastSendAudioStreamStatsTime > TOO_LONG_SINCE_LAST_SEND_AUDIO_STREAM_STATS) {
|
||||
|
|
|
@ -21,6 +21,8 @@ class AvatarAudioRingBuffer;
|
|||
|
||||
const int SAMPLE_PHASE_DELAY_AT_90 = 20;
|
||||
|
||||
const quint64 TOO_LONG_SINCE_LAST_SEND_AUDIO_STREAM_STATS = 1 * USECS_PER_SECOND;
|
||||
|
||||
/// Handles assignments of type AudioMixer - mixing streams of audio and re-distributing to various clients.
|
||||
class AudioMixer : public ThreadedAssignment {
|
||||
Q_OBJECT
|
||||
|
|
|
@ -18,11 +18,15 @@
|
|||
|
||||
#include "AudioMixer.h"
|
||||
#include "AudioMixerClientData.h"
|
||||
#include "MovingMinMaxAvg.h"
|
||||
|
||||
const int INCOMING_SEQ_STATS_HISTORY_LENGTH = INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS /
|
||||
(TOO_LONG_SINCE_LAST_SEND_AUDIO_STREAM_STATS / USECS_PER_SECOND);
|
||||
|
||||
AudioMixerClientData::AudioMixerClientData() :
|
||||
_ringBuffers(),
|
||||
_outgoingMixedAudioSequenceNumber(0),
|
||||
_incomingAvatarAudioSequenceNumberStats()
|
||||
_incomingAvatarAudioSequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH)
|
||||
{
|
||||
|
||||
}
|
||||
|
@ -82,12 +86,15 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
|||
|
||||
// ask the AvatarAudioRingBuffer instance to parse the data
|
||||
avatarRingBuffer->parseData(packet);
|
||||
} else {
|
||||
} else if (packetType == PacketTypeInjectAudio) {
|
||||
// this is injected audio
|
||||
|
||||
// grab the stream identifier for this injected audio
|
||||
QUuid streamIdentifier = QUuid::fromRfc4122(packet.mid(numBytesForPacketHeader(packet) + sizeof(quint16), NUM_BYTES_RFC4122_UUID));
|
||||
|
||||
if (!_incomingInjectedAudioSequenceNumberStatsMap.contains(streamIdentifier)) {
|
||||
_incomingInjectedAudioSequenceNumberStatsMap.insert(streamIdentifier, SequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH));
|
||||
}
|
||||
_incomingInjectedAudioSequenceNumberStatsMap[streamIdentifier].sequenceNumberReceived(sequence);
|
||||
|
||||
InjectedAudioRingBuffer* matchingInjectedRingBuffer = NULL;
|
||||
|
@ -106,6 +113,15 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
|||
}
|
||||
|
||||
matchingInjectedRingBuffer->parseData(packet);
|
||||
} else if (packetType == PacketTypeAudioStreamStats) {
|
||||
|
||||
const char* dataAt = packet.data();
|
||||
|
||||
// skip over header, appendFlag, and num stats packed
|
||||
dataAt += (numBytesPacketHeader + sizeof(quint8) + sizeof(quint16));
|
||||
|
||||
// read the downstream audio stream stats
|
||||
memcpy(&_downstreamAudioStreamStats, dataAt, sizeof(AudioStreamStats));
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -159,31 +175,51 @@ void AudioMixerClientData::pushBuffersAfterFrameSend() {
|
|||
}
|
||||
|
||||
AudioStreamStats AudioMixerClientData::getAudioStreamStatsOfStream(const PositionalAudioRingBuffer* ringBuffer) const {
|
||||
|
||||
AudioStreamStats streamStats;
|
||||
SequenceNumberStats streamSequenceNumberStats;
|
||||
|
||||
streamStats._streamType = ringBuffer->getType();
|
||||
if (streamStats._streamType == PositionalAudioRingBuffer::Injector) {
|
||||
streamStats._streamIdentifier = ((InjectedAudioRingBuffer*)ringBuffer)->getStreamIdentifier();
|
||||
streamSequenceNumberStats = _incomingInjectedAudioSequenceNumberStatsMap.value(streamStats._streamIdentifier);
|
||||
const SequenceNumberStats& sequenceNumberStats = _incomingInjectedAudioSequenceNumberStatsMap[streamStats._streamIdentifier];
|
||||
streamStats._packetStreamStats = sequenceNumberStats.getStats();
|
||||
streamStats._packetStreamWindowStats = sequenceNumberStats.getStatsForHistoryWindow();
|
||||
} else {
|
||||
streamSequenceNumberStats = _incomingAvatarAudioSequenceNumberStats;
|
||||
streamStats._packetStreamStats = _incomingAvatarAudioSequenceNumberStats.getStats();
|
||||
streamStats._packetStreamWindowStats = _incomingAvatarAudioSequenceNumberStats.getStatsForHistoryWindow();
|
||||
}
|
||||
streamStats._jitterBufferFrames = ringBuffer->getCurrentJitterBufferFrames();
|
||||
|
||||
streamStats._packetsReceived = streamSequenceNumberStats.getNumReceived();
|
||||
streamStats._packetsUnreasonable = streamSequenceNumberStats.getNumUnreasonable();
|
||||
streamStats._packetsEarly = streamSequenceNumberStats.getNumEarly();
|
||||
streamStats._packetsLate = streamSequenceNumberStats.getNumLate();
|
||||
streamStats._packetsLost = streamSequenceNumberStats.getNumLost();
|
||||
streamStats._packetsRecovered = streamSequenceNumberStats.getNumRecovered();
|
||||
streamStats._packetsDuplicate = streamSequenceNumberStats.getNumDuplicate();
|
||||
|
||||
const MovingMinMaxAvg<quint64>& timeGapStats = ringBuffer->getInterframeTimeGapStatsForStatsPacket();
|
||||
streamStats._timeGapMin = timeGapStats.getMin();
|
||||
streamStats._timeGapMax = timeGapStats.getMax();
|
||||
streamStats._timeGapAverage = timeGapStats.getAverage();
|
||||
streamStats._timeGapWindowMin = timeGapStats.getWindowMin();
|
||||
streamStats._timeGapWindowMax = timeGapStats.getWindowMax();
|
||||
streamStats._timeGapWindowAverage = timeGapStats.getWindowAverage();
|
||||
|
||||
streamStats._ringBufferFramesAvailable = ringBuffer->framesAvailable();
|
||||
streamStats._ringBufferCurrentJitterBufferFrames = ringBuffer->getCurrentJitterBufferFrames();
|
||||
streamStats._ringBufferDesiredJitterBufferFrames = ringBuffer->getDesiredJitterBufferFrames();
|
||||
streamStats._ringBufferStarveCount = ringBuffer->getStarveCount();
|
||||
streamStats._ringBufferConsecutiveNotMixedCount = ringBuffer->getConsecutiveNotMixedCount();
|
||||
streamStats._ringBufferOverflowCount = ringBuffer->getOverflowCount();
|
||||
streamStats._ringBufferSilentFramesDropped = ringBuffer->getSilentFramesDropped();
|
||||
|
||||
return streamStats;
|
||||
}
|
||||
|
||||
void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode) const {
|
||||
void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode) {
|
||||
|
||||
// have all the seq number stats of each audio stream push their current stats into their history,
|
||||
// which moves that history window 1 second forward (since that's how long since the last stats were pushed into history)
|
||||
_incomingAvatarAudioSequenceNumberStats.pushStatsToHistory();
|
||||
QHash<QUuid, SequenceNumberStats>::Iterator i = _incomingInjectedAudioSequenceNumberStatsMap.begin();
|
||||
QHash<QUuid, SequenceNumberStats>::Iterator end = _incomingInjectedAudioSequenceNumberStatsMap.end();
|
||||
while (i != end) {
|
||||
i.value().pushStatsToHistory();
|
||||
i++;
|
||||
}
|
||||
|
||||
char packet[MAX_PACKET_SIZE];
|
||||
NodeList* nodeList = NodeList::getInstance();
|
||||
|
||||
|
@ -234,46 +270,63 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
|
|||
|
||||
QString AudioMixerClientData::getAudioStreamStatsString() const {
|
||||
QString result;
|
||||
AudioStreamStats streamStats = _downstreamAudioStreamStats;
|
||||
result += "DOWNSTREAM.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames)
|
||||
+ " current: ?"
|
||||
+ " available:" + QString::number(streamStats._ringBufferFramesAvailable)
|
||||
+ " starves:" + QString::number(streamStats._ringBufferStarveCount)
|
||||
+ " not_mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount)
|
||||
+ " overflows:" + QString::number(streamStats._ringBufferOverflowCount)
|
||||
+ " silents_dropped: ?"
|
||||
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
|
||||
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
|
||||
+ " min_gap:" + QString::number(streamStats._timeGapMin)
|
||||
+ " max_gap:" + QString::number(streamStats._timeGapMax)
|
||||
+ " avg_gap:" + QString::number(streamStats._timeGapAverage, 'f', 2)
|
||||
+ " min_gap_30s:" + QString::number(streamStats._timeGapWindowMin)
|
||||
+ " max_gap_30s:" + QString::number(streamStats._timeGapWindowMax)
|
||||
+ " avg_gap_30s:" + QString::number(streamStats._timeGapWindowAverage, 'f', 2);
|
||||
|
||||
AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();
|
||||
if (avatarRingBuffer) {
|
||||
int desiredJitterBuffer = avatarRingBuffer->getDesiredJitterBufferFrames();
|
||||
int calculatedJitterBuffer = avatarRingBuffer->getCalculatedDesiredJitterBufferFrames();
|
||||
int currentJitterBuffer = avatarRingBuffer->getCurrentJitterBufferFrames();
|
||||
int overflowCount = avatarRingBuffer->getOverflowCount();
|
||||
int samplesAvailable = avatarRingBuffer->samplesAvailable();
|
||||
int framesAvailable = (samplesAvailable / avatarRingBuffer->getSamplesPerFrame());
|
||||
AudioStreamStats streamStats = getAudioStreamStatsOfStream(avatarRingBuffer);
|
||||
result += "mic.desired:" + QString::number(desiredJitterBuffer)
|
||||
+ " calculated:" + QString::number(calculatedJitterBuffer)
|
||||
+ " current:" + QString::number(currentJitterBuffer)
|
||||
+ " available:" + QString::number(framesAvailable)
|
||||
+ " samples:" + QString::number(samplesAvailable)
|
||||
+ " overflows:" + QString::number(overflowCount)
|
||||
+ " early:" + QString::number(streamStats._packetsEarly)
|
||||
+ " late:" + QString::number(streamStats._packetsLate)
|
||||
+ " lost:" + QString::number(streamStats._packetsLost);
|
||||
result += " UPSTREAM.mic.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames)
|
||||
+ " current:" + QString::number(streamStats._ringBufferCurrentJitterBufferFrames)
|
||||
+ " available:" + QString::number(streamStats._ringBufferFramesAvailable)
|
||||
+ " starves:" + QString::number(streamStats._ringBufferStarveCount)
|
||||
+ " not_mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount)
|
||||
+ " overflows:" + QString::number(streamStats._ringBufferOverflowCount)
|
||||
+ " silents_dropped:" + QString::number(streamStats._ringBufferSilentFramesDropped)
|
||||
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
|
||||
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
|
||||
+ " min_gap:" + QString::number(streamStats._timeGapMin)
|
||||
+ " max_gap:" + QString::number(streamStats._timeGapMax)
|
||||
+ " avg_gap:" + QString::number(streamStats._timeGapAverage, 'f', 2)
|
||||
+ " min_gap_30s:" + QString::number(streamStats._timeGapWindowMin)
|
||||
+ " max_gap_30s:" + QString::number(streamStats._timeGapWindowMax)
|
||||
+ " avg_gap_30s:" + QString::number(streamStats._timeGapWindowAverage, 'f', 2);
|
||||
} else {
|
||||
result = "mic unknown";
|
||||
}
|
||||
|
||||
for (int i = 0; i < _ringBuffers.size(); i++) {
|
||||
if (_ringBuffers[i]->getType() == PositionalAudioRingBuffer::Injector) {
|
||||
int desiredJitterBuffer = _ringBuffers[i]->getDesiredJitterBufferFrames();
|
||||
int calculatedJitterBuffer = _ringBuffers[i]->getCalculatedDesiredJitterBufferFrames();
|
||||
int currentJitterBuffer = _ringBuffers[i]->getCurrentJitterBufferFrames();
|
||||
int overflowCount = _ringBuffers[i]->getOverflowCount();
|
||||
int samplesAvailable = _ringBuffers[i]->samplesAvailable();
|
||||
int framesAvailable = (samplesAvailable / _ringBuffers[i]->getSamplesPerFrame());
|
||||
AudioStreamStats streamStats = getAudioStreamStatsOfStream(_ringBuffers[i]);
|
||||
result += "| injected[" + QString::number(i) + "].desired:" + QString::number(desiredJitterBuffer)
|
||||
+ " calculated:" + QString::number(calculatedJitterBuffer)
|
||||
+ " current:" + QString::number(currentJitterBuffer)
|
||||
+ " available:" + QString::number(framesAvailable)
|
||||
+ " samples:" + QString::number(samplesAvailable)
|
||||
+ " overflows:" + QString::number(overflowCount)
|
||||
+ " early:" + QString::number(streamStats._packetsEarly)
|
||||
+ " late:" + QString::number(streamStats._packetsLate)
|
||||
+ " lost:" + QString::number(streamStats._packetsLost);
|
||||
result += " UPSTREAM.inj.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames)
|
||||
+ " current:" + QString::number(streamStats._ringBufferCurrentJitterBufferFrames)
|
||||
+ " available:" + QString::number(streamStats._ringBufferFramesAvailable)
|
||||
+ " starves:" + QString::number(streamStats._ringBufferStarveCount)
|
||||
+ " not_mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount)
|
||||
+ " overflows:" + QString::number(streamStats._ringBufferOverflowCount)
|
||||
+ " silents_dropped:" + QString::number(streamStats._ringBufferSilentFramesDropped)
|
||||
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
|
||||
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
|
||||
+ " min_gap:" + QString::number(streamStats._timeGapMin)
|
||||
+ " max_gap:" + QString::number(streamStats._timeGapMax)
|
||||
+ " avg_gap:" + QString::number(streamStats._timeGapAverage, 'f', 2)
|
||||
+ " min_gap_30s:" + QString::number(streamStats._timeGapWindowMin)
|
||||
+ " max_gap_30s:" + QString::number(streamStats._timeGapWindowMax)
|
||||
+ " avg_gap_30s:" + QString::number(streamStats._timeGapWindowAverage, 'f', 2);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
|
|
|
@ -20,6 +20,9 @@
|
|||
#include "AudioStreamStats.h"
|
||||
#include "SequenceNumberStats.h"
|
||||
|
||||
|
||||
const int INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS = 30;
|
||||
|
||||
class AudioMixerClientData : public NodeData {
|
||||
public:
|
||||
AudioMixerClientData();
|
||||
|
@ -35,7 +38,7 @@ public:
|
|||
AudioStreamStats getAudioStreamStatsOfStream(const PositionalAudioRingBuffer* ringBuffer) const;
|
||||
QString getAudioStreamStatsString() const;
|
||||
|
||||
void sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode) const;
|
||||
void sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode);
|
||||
|
||||
void incrementOutgoingMixedAudioSequenceNumber() { _outgoingMixedAudioSequenceNumber++; }
|
||||
quint16 getOutgoingSequenceNumber() const { return _outgoingMixedAudioSequenceNumber; }
|
||||
|
@ -46,6 +49,8 @@ private:
|
|||
quint16 _outgoingMixedAudioSequenceNumber;
|
||||
SequenceNumberStats _incomingAvatarAudioSequenceNumberStats;
|
||||
QHash<QUuid, SequenceNumberStats> _incomingInjectedAudioSequenceNumberStatsMap;
|
||||
|
||||
AudioStreamStats _downstreamAudioStreamStats;
|
||||
};
|
||||
|
||||
#endif // hifi_AudioMixerClientData_h
|
||||
|
|
|
@ -19,7 +19,7 @@ AvatarAudioRingBuffer::AvatarAudioRingBuffer(bool isStereo, bool dynamicJitterBu
|
|||
}
|
||||
|
||||
int AvatarAudioRingBuffer::parseData(const QByteArray& packet) {
|
||||
_interframeTimeGapStats.frameReceived();
|
||||
timeGapStatsFrameReceived();
|
||||
updateDesiredJitterBufferFrames();
|
||||
|
||||
_shouldLoopbackForNode = (packetTypeForPacket(packet) == PacketTypeMicrophoneAudioWithEcho);
|
||||
|
|
|
@ -172,7 +172,8 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
|
|||
_runningScriptsWidget(NULL),
|
||||
_runningScriptsWidgetWasVisible(false),
|
||||
_trayIcon(new QSystemTrayIcon(_window)),
|
||||
_lastNackTime(usecTimestampNow())
|
||||
_lastNackTime(usecTimestampNow()),
|
||||
_lastSendDownstreamAudioStats(usecTimestampNow())
|
||||
{
|
||||
// read the ApplicationInfo.ini file for Name/Version/Domain information
|
||||
QSettings applicationInfo(Application::resourcesPath() + "info/ApplicationInfo.ini", QSettings::IniFormat);
|
||||
|
@ -2124,10 +2125,11 @@ void Application::updateMyAvatar(float deltaTime) {
|
|||
loadViewFrustum(_myCamera, _viewFrustum);
|
||||
}
|
||||
|
||||
quint64 now = usecTimestampNow();
|
||||
|
||||
// Update my voxel servers with my current voxel query...
|
||||
{
|
||||
PerformanceTimer perfTimer("queryOctree");
|
||||
quint64 now = usecTimestampNow();
|
||||
quint64 sinceLastQuery = now - _lastQueriedTime;
|
||||
const quint64 TOO_LONG_SINCE_LAST_QUERY = 3 * USECS_PER_SECOND;
|
||||
bool queryIsDue = sinceLastQuery > TOO_LONG_SINCE_LAST_QUERY;
|
||||
|
@ -2145,7 +2147,6 @@ void Application::updateMyAvatar(float deltaTime) {
|
|||
|
||||
// sent nack packets containing missing sequence numbers of received packets from nodes
|
||||
{
|
||||
quint64 now = usecTimestampNow();
|
||||
quint64 sinceLastNack = now - _lastNackTime;
|
||||
const quint64 TOO_LONG_SINCE_LAST_NACK = 1 * USECS_PER_SECOND;
|
||||
if (sinceLastNack > TOO_LONG_SINCE_LAST_NACK) {
|
||||
|
@ -2153,6 +2154,15 @@ void Application::updateMyAvatar(float deltaTime) {
|
|||
sendNackPackets();
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
quint64 sinceLastNack = now - _lastSendDownstreamAudioStats;
|
||||
if (sinceLastNack > TOO_LONG_SINCE_LAST_SEND_DOWNSTREAM_AUDIO_STATS) {
|
||||
_lastSendDownstreamAudioStats = now;
|
||||
|
||||
QMetaObject::invokeMethod(&_audio, "sendDownstreamAudioStatsPacket", Qt::QueuedConnection);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int Application::sendNackPackets() {
|
||||
|
|
|
@ -125,6 +125,8 @@ static const float MIRROR_REARVIEW_DISTANCE = 0.65f;
|
|||
static const float MIRROR_REARVIEW_BODY_DISTANCE = 2.3f;
|
||||
static const float MIRROR_FIELD_OF_VIEW = 30.0f;
|
||||
|
||||
static const quint64 TOO_LONG_SINCE_LAST_SEND_DOWNSTREAM_AUDIO_STATS = 1 * USECS_PER_SECOND;
|
||||
|
||||
class Application : public QApplication {
|
||||
Q_OBJECT
|
||||
|
||||
|
@ -586,6 +588,7 @@ private:
|
|||
QSystemTrayIcon* _trayIcon;
|
||||
|
||||
quint64 _lastNackTime;
|
||||
quint64 _lastSendDownstreamAudioStats;
|
||||
};
|
||||
|
||||
#endif // hifi_Application_h
|
||||
|
|
|
@ -48,9 +48,18 @@ static const float AUDIO_CALLBACK_MSECS = (float) NETWORK_BUFFER_LENGTH_SAMPLES_
|
|||
|
||||
static const int NUMBER_OF_NOISE_SAMPLE_FRAMES = 300;
|
||||
|
||||
// audio frames time gap stats (min/max/avg) for last ~30 seconds are recalculated every ~1 second
|
||||
static const int TIME_GAPS_STATS_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS;
|
||||
static const int TIME_GAP_STATS_WINDOW_INTERVALS = 30;
|
||||
|
||||
// incoming sequence number stats history will cover last 30s
|
||||
static const int INCOMING_SEQ_STATS_HISTORY_LENGTH = INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS /
|
||||
(TOO_LONG_SINCE_LAST_SEND_DOWNSTREAM_AUDIO_STATS / USECS_PER_SECOND);
|
||||
|
||||
// Mute icon configration
|
||||
static const int MUTE_ICON_SIZE = 24;
|
||||
|
||||
|
||||
Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
|
||||
AbstractAudioInterface(parent),
|
||||
_audioInput(NULL),
|
||||
|
@ -103,8 +112,12 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
|
|||
_scopeInput(0),
|
||||
_scopeOutputLeft(0),
|
||||
_scopeOutputRight(0),
|
||||
_audioMixerAvatarStreamStats(),
|
||||
_outgoingAvatarAudioSequenceNumber(0)
|
||||
_audioMixerAvatarStreamAudioStats(),
|
||||
_outgoingAvatarAudioSequenceNumber(0),
|
||||
_incomingMixedAudioSequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH),
|
||||
_interframeTimeGapStats(TIME_GAPS_STATS_INTERVAL_SAMPLES, TIME_GAP_STATS_WINDOW_INTERVALS),
|
||||
_starveCount(0),
|
||||
_consecutiveNotMixedCount(0)
|
||||
{
|
||||
// clear the array of locally injected samples
|
||||
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||
|
@ -120,8 +133,14 @@ void Audio::init(QGLWidget *parent) {
|
|||
|
||||
void Audio::reset() {
|
||||
_ringBuffer.reset();
|
||||
|
||||
_starveCount = 0;
|
||||
_consecutiveNotMixedCount = 0;
|
||||
|
||||
_audioMixerAvatarStreamAudioStats = AudioStreamStats();
|
||||
_audioMixerInjectedStreamAudioStatsMap.clear();
|
||||
|
||||
_outgoingAvatarAudioSequenceNumber = 0;
|
||||
_audioMixerInjectedStreamStatsMap.clear();
|
||||
_incomingMixedAudioSequenceNumberStats.reset();
|
||||
}
|
||||
|
||||
|
@ -689,7 +708,9 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
|
|||
|
||||
_totalPacketsReceived++;
|
||||
|
||||
double timeDiff = (double)_timeSinceLastReceived.nsecsElapsed() / 1000000.0; // ns to ms
|
||||
double timeDiff = (double)_timeSinceLastReceived.nsecsElapsed() / 1000.0; // ns to us
|
||||
_interframeTimeGapStats.update((quint64)timeDiff);
|
||||
timeDiff /= USECS_PER_MSEC; // us to ms
|
||||
_timeSinceLastReceived.start();
|
||||
|
||||
// Discard first few received packets for computing jitter (often they pile up on start)
|
||||
|
@ -726,7 +747,7 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
|
|||
quint8 appendFlag = *(reinterpret_cast<const quint16*>(dataAt));
|
||||
dataAt += sizeof(quint8);
|
||||
if (!appendFlag) {
|
||||
_audioMixerInjectedStreamStatsMap.clear();
|
||||
_audioMixerInjectedStreamAudioStatsMap.clear();
|
||||
}
|
||||
|
||||
// parse the number of stream stats structs to follow
|
||||
|
@ -740,13 +761,72 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
|
|||
dataAt += sizeof(AudioStreamStats);
|
||||
|
||||
if (streamStats._streamType == PositionalAudioRingBuffer::Microphone) {
|
||||
_audioMixerAvatarStreamStats = streamStats;
|
||||
_audioMixerAvatarStreamAudioStats = streamStats;
|
||||
} else {
|
||||
_audioMixerInjectedStreamStatsMap[streamStats._streamIdentifier] = streamStats;
|
||||
_audioMixerInjectedStreamAudioStatsMap[streamStats._streamIdentifier] = streamStats;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
AudioStreamStats Audio::getDownstreamAudioStreamStats() const {
|
||||
|
||||
AudioStreamStats stats;
|
||||
stats._streamType = PositionalAudioRingBuffer::Microphone;
|
||||
|
||||
stats._timeGapMin = _interframeTimeGapStats.getMin();
|
||||
stats._timeGapMax = _interframeTimeGapStats.getMax();
|
||||
stats._timeGapAverage = _interframeTimeGapStats.getAverage();
|
||||
stats._timeGapWindowMin = _interframeTimeGapStats.getWindowMin();
|
||||
stats._timeGapWindowMax = _interframeTimeGapStats.getWindowMax();
|
||||
stats._timeGapWindowAverage = _interframeTimeGapStats.getWindowAverage();
|
||||
|
||||
stats._ringBufferFramesAvailable = _ringBuffer.framesAvailable();
|
||||
stats._ringBufferCurrentJitterBufferFrames = 0;
|
||||
stats._ringBufferDesiredJitterBufferFrames = getDesiredJitterBufferFrames();
|
||||
stats._ringBufferStarveCount = _starveCount;
|
||||
stats._ringBufferConsecutiveNotMixedCount = _consecutiveNotMixedCount;
|
||||
stats._ringBufferOverflowCount = _ringBuffer.getOverflowCount();
|
||||
stats._ringBufferSilentFramesDropped = 0;
|
||||
|
||||
stats._packetStreamStats = _incomingMixedAudioSequenceNumberStats.getStats();
|
||||
stats._packetStreamWindowStats = _incomingMixedAudioSequenceNumberStats.getStatsForHistoryWindow();
|
||||
|
||||
return stats;
|
||||
}
|
||||
|
||||
void Audio::sendDownstreamAudioStatsPacket() {
|
||||
|
||||
// push the current seq number stats into history, which moves the history window forward 1s
|
||||
// (since that's how often pushStatsToHistory() is called)
|
||||
_incomingMixedAudioSequenceNumberStats.pushStatsToHistory();
|
||||
|
||||
char packet[MAX_PACKET_SIZE];
|
||||
|
||||
// pack header
|
||||
int numBytesPacketHeader = populatePacketHeader(packet, PacketTypeAudioStreamStats);
|
||||
char* dataAt = packet + numBytesPacketHeader;
|
||||
|
||||
// pack append flag
|
||||
quint8 appendFlag = 0;
|
||||
memcpy(dataAt, &appendFlag, sizeof(quint8));
|
||||
dataAt += sizeof(quint8);
|
||||
|
||||
// pack number of stats packed
|
||||
quint16 numStreamStatsToPack = 1;
|
||||
memcpy(dataAt, &numStreamStatsToPack, sizeof(quint16));
|
||||
dataAt += sizeof(quint16);
|
||||
|
||||
// pack downstream audio stream stats
|
||||
AudioStreamStats stats = getDownstreamAudioStreamStats();
|
||||
memcpy(dataAt, &stats, sizeof(AudioStreamStats));
|
||||
dataAt += sizeof(AudioStreamStats);
|
||||
|
||||
// send packet
|
||||
NodeList* nodeList = NodeList::getInstance();
|
||||
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
|
||||
nodeList->writeDatagram(packet, dataAt - packet, audioMixer);
|
||||
}
|
||||
|
||||
// NOTE: numSamples is the total number of single channel samples, since callers will always call this with stereo
|
||||
// data we know that we will have 2x samples for each stereo time sample at the format's sample rate
|
||||
void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples) {
|
||||
|
@ -867,6 +947,9 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
|||
//qDebug() << "Audio output just starved.";
|
||||
_ringBuffer.setIsStarved(true);
|
||||
_numFramesDisplayStarve = 10;
|
||||
|
||||
_starveCount++;
|
||||
_consecutiveNotMixedCount = 0;
|
||||
}
|
||||
|
||||
int numNetworkOutputSamples;
|
||||
|
@ -886,6 +969,7 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
|||
if (!_ringBuffer.isNotStarvedOrHasMinimumSamples(numSamplesNeededToStartPlayback)) {
|
||||
// We are still waiting for enough samples to begin playback
|
||||
// qDebug() << numNetworkOutputSamples << " samples so far, waiting for " << numSamplesNeededToStartPlayback;
|
||||
_consecutiveNotMixedCount++;
|
||||
} else {
|
||||
int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio;
|
||||
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
|
||||
#include "InterfaceConfig.h"
|
||||
#include "AudioStreamStats.h"
|
||||
#include "RingBufferHistory.h"
|
||||
#include "MovingMinMaxAvg.h"
|
||||
|
||||
#include <QAudio>
|
||||
#include <QAudioInput>
|
||||
|
@ -34,6 +36,8 @@
|
|||
|
||||
static const int NUM_AUDIO_CHANNELS = 2;
|
||||
|
||||
static const int INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS = 30;
|
||||
|
||||
class QAudioInput;
|
||||
class QAudioOutput;
|
||||
class QIODevice;
|
||||
|
@ -97,6 +101,9 @@ public slots:
|
|||
|
||||
virtual void handleAudioByteArray(const QByteArray& audioByteArray);
|
||||
|
||||
AudioStreamStats getDownstreamAudioStreamStats() const;
|
||||
void sendDownstreamAudioStatsPacket();
|
||||
|
||||
bool switchInputToAudioDevice(const QString& inputDeviceName);
|
||||
bool switchOutputToAudioDevice(const QString& outputDeviceName);
|
||||
QString getDeviceName(QAudio::Mode mode) const { return (mode == QAudio::AudioInput) ?
|
||||
|
@ -107,8 +114,16 @@ public slots:
|
|||
float getInputVolume() const { return (_audioInput) ? _audioInput->volume() : 0.0f; }
|
||||
void setInputVolume(float volume) { if (_audioInput) _audioInput->setVolume(volume); }
|
||||
|
||||
const AudioStreamStats& getAudioMixerAvatarStreamStats() const { return _audioMixerAvatarStreamStats; }
|
||||
const QHash<QUuid, AudioStreamStats>& getAudioMixerInjectedStreamStatsMap() const { return _audioMixerInjectedStreamStatsMap; }
|
||||
const AudioRingBuffer& getDownstreamRingBuffer() const { return _ringBuffer; }
|
||||
|
||||
int getDesiredJitterBufferFrames() const { return _jitterBufferSamples / _ringBuffer.getNumFrameSamples(); }
|
||||
|
||||
int getStarveCount() const { return _starveCount; }
|
||||
int getConsecutiveNotMixedCount() const { return _consecutiveNotMixedCount; }
|
||||
|
||||
const AudioStreamStats& getAudioMixerAvatarStreamAudioStats() const { return _audioMixerAvatarStreamAudioStats; }
|
||||
const QHash<QUuid, AudioStreamStats>& getAudioMixerInjectedStreamAudioStatsMap() const { return _audioMixerInjectedStreamAudioStatsMap; }
|
||||
const MovingMinMaxAvg<quint64>& getInterframeTimeGapStats() const { return _interframeTimeGapStats; }
|
||||
|
||||
signals:
|
||||
bool muteToggled();
|
||||
|
@ -241,11 +256,16 @@ private:
|
|||
QByteArray* _scopeOutputLeft;
|
||||
QByteArray* _scopeOutputRight;
|
||||
|
||||
AudioStreamStats _audioMixerAvatarStreamStats;
|
||||
QHash<QUuid, AudioStreamStats> _audioMixerInjectedStreamStatsMap;
|
||||
int _starveCount;
|
||||
int _consecutiveNotMixedCount;
|
||||
|
||||
AudioStreamStats _audioMixerAvatarStreamAudioStats;
|
||||
QHash<QUuid, AudioStreamStats> _audioMixerInjectedStreamAudioStatsMap;
|
||||
|
||||
quint16 _outgoingAvatarAudioSequenceNumber;
|
||||
SequenceNumberStats _incomingMixedAudioSequenceNumberStats;
|
||||
|
||||
MovingMinMaxAvg<quint64> _interframeTimeGapStats;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include "InterfaceConfig.h"
|
||||
#include "Menu.h"
|
||||
#include "Util.h"
|
||||
#include "SequenceNumberStats.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
@ -288,15 +289,12 @@ void Stats::display(
|
|||
|
||||
|
||||
Audio* audio = Application::getInstance()->getAudio();
|
||||
const AudioStreamStats& audioMixerAvatarStreamStats = audio->getAudioMixerAvatarStreamStats();
|
||||
const QHash<QUuid, AudioStreamStats>& audioMixerInjectedStreamStatsMap = audio->getAudioMixerInjectedStreamStatsMap();
|
||||
const QHash<QUuid, AudioStreamStats>& audioMixerInjectedStreamAudioStatsMap = audio->getAudioMixerInjectedStreamAudioStatsMap();
|
||||
|
||||
lines = _expanded ? 10 + audioMixerInjectedStreamStatsMap.size(): 3;
|
||||
lines = _expanded ? 11 + (audioMixerInjectedStreamAudioStatsMap.size() + 2) * 3 : 3;
|
||||
drawBackground(backgroundColor, horizontalOffset, 0, _pingStatsWidth, lines * STATS_PELS_PER_LINE + 10);
|
||||
horizontalOffset += 5;
|
||||
|
||||
|
||||
|
||||
char audioJitter[30];
|
||||
sprintf(audioJitter,
|
||||
"Buffer msecs %.1f",
|
||||
|
@ -328,43 +326,103 @@ void Stats::display(
|
|||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, voxelMaxPing, color);
|
||||
|
||||
char audioMixerStatsLabelString[] = "AudioMixer stats:";
|
||||
char streamStatsFormatLabelString[] = "early/late/lost, jframes";
|
||||
char streamStatsFormatLabelString[] = "lost%/30s_lost%";
|
||||
char streamStatsFormatLabelString2[] = "avail/currJ/desiredJ";
|
||||
char streamStatsFormatLabelString3[] = "gaps: min/max/avg, starv/ovfl";
|
||||
char streamStatsFormatLabelString4[] = "30s gaps: (same), notmix/sdrop";
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, audioMixerStatsLabelString, color);
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, streamStatsFormatLabelString, color);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, streamStatsFormatLabelString2, color);
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, streamStatsFormatLabelString3, color);
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, streamStatsFormatLabelString4, color);
|
||||
|
||||
char downstreamLabelString[] = " Downstream:";
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamLabelString, color);
|
||||
|
||||
const SequenceNumberStats& downstreamAudioSequenceNumberStats = audio->getIncomingMixedAudioSequenceNumberStats();
|
||||
char downstreamAudioStatsString[30];
|
||||
sprintf(downstreamAudioStatsString, " mix: %d/%d/%d, %d", downstreamAudioSequenceNumberStats.getNumEarly(),
|
||||
downstreamAudioSequenceNumberStats.getNumLate(), downstreamAudioSequenceNumberStats.getNumLost(),
|
||||
audio->getJitterBufferSamples() / NETWORK_BUFFER_LENGTH_SAMPLES_STEREO);
|
||||
|
||||
AudioStreamStats downstreamAudioStreamStats = audio->getDownstreamAudioStreamStats();
|
||||
|
||||
sprintf(downstreamAudioStatsString, " mix: %.1f%%/%.1f%%, %u/?/%u", downstreamAudioStreamStats._packetStreamStats.getLostRate()*100.0f,
|
||||
downstreamAudioStreamStats._packetStreamWindowStats.getLostRate() * 100.0f,
|
||||
downstreamAudioStreamStats._ringBufferFramesAvailable, downstreamAudioStreamStats._ringBufferDesiredJitterBufferFrames);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
|
||||
|
||||
sprintf(downstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", downstreamAudioStreamStats._timeGapMin,
|
||||
downstreamAudioStreamStats._timeGapMax, downstreamAudioStreamStats._timeGapAverage,
|
||||
downstreamAudioStreamStats._ringBufferStarveCount, downstreamAudioStreamStats._ringBufferOverflowCount);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
|
||||
|
||||
sprintf(downstreamAudioStatsString, " %llu/%llu/%.2f, %u/?", downstreamAudioStreamStats._timeGapWindowMin,
|
||||
downstreamAudioStreamStats._timeGapWindowMax, downstreamAudioStreamStats._timeGapWindowAverage,
|
||||
downstreamAudioStreamStats._ringBufferConsecutiveNotMixedCount);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
|
||||
|
||||
|
||||
char upstreamLabelString[] = " Upstream:";
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamLabelString, color);
|
||||
|
||||
char upstreamAudioStatsString[30];
|
||||
sprintf(upstreamAudioStatsString, " mic: %d/%d/%d, %d", audioMixerAvatarStreamStats._packetsEarly,
|
||||
audioMixerAvatarStreamStats._packetsLate, audioMixerAvatarStreamStats._packetsLost,
|
||||
audioMixerAvatarStreamStats._jitterBufferFrames);
|
||||
|
||||
const AudioStreamStats& audioMixerAvatarAudioStreamStats = audio->getAudioMixerAvatarStreamAudioStats();
|
||||
|
||||
sprintf(upstreamAudioStatsString, " mic: %.1f%%/%.1f%%, %u/%u/%u", audioMixerAvatarAudioStreamStats._packetStreamStats.getLostRate()*100.0f,
|
||||
audioMixerAvatarAudioStreamStats._packetStreamWindowStats.getLostRate() * 100.0f,
|
||||
audioMixerAvatarAudioStreamStats._ringBufferFramesAvailable, audioMixerAvatarAudioStreamStats._ringBufferCurrentJitterBufferFrames,
|
||||
audioMixerAvatarAudioStreamStats._ringBufferDesiredJitterBufferFrames);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
|
||||
foreach(AudioStreamStats injectedStreamStats, audioMixerInjectedStreamStatsMap) {
|
||||
sprintf(upstreamAudioStatsString, " inj: %d/%d/%d, %d", injectedStreamStats._packetsEarly,
|
||||
injectedStreamStats._packetsLate, injectedStreamStats._packetsLost, injectedStreamStats._jitterBufferFrames);
|
||||
|
||||
sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", audioMixerAvatarAudioStreamStats._timeGapMin,
|
||||
audioMixerAvatarAudioStreamStats._timeGapMax, audioMixerAvatarAudioStreamStats._timeGapAverage,
|
||||
audioMixerAvatarAudioStreamStats._ringBufferStarveCount, audioMixerAvatarAudioStreamStats._ringBufferOverflowCount);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
|
||||
sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", audioMixerAvatarAudioStreamStats._timeGapWindowMin,
|
||||
audioMixerAvatarAudioStreamStats._timeGapWindowMax, audioMixerAvatarAudioStreamStats._timeGapWindowAverage,
|
||||
audioMixerAvatarAudioStreamStats._ringBufferConsecutiveNotMixedCount, audioMixerAvatarAudioStreamStats._ringBufferSilentFramesDropped);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
|
||||
foreach(const AudioStreamStats& injectedStreamAudioStats, audioMixerInjectedStreamAudioStatsMap) {
|
||||
|
||||
sprintf(upstreamAudioStatsString, " inj: %.1f%%/%.1f%%, %u/%u/%u", injectedStreamAudioStats._packetStreamStats.getLostRate()*100.0f,
|
||||
injectedStreamAudioStats._packetStreamWindowStats.getLostRate() * 100.0f,
|
||||
injectedStreamAudioStats._ringBufferFramesAvailable, injectedStreamAudioStats._ringBufferCurrentJitterBufferFrames,
|
||||
injectedStreamAudioStats._ringBufferDesiredJitterBufferFrames);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
|
||||
sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", injectedStreamAudioStats._timeGapMin,
|
||||
injectedStreamAudioStats._timeGapMax, injectedStreamAudioStats._timeGapAverage,
|
||||
injectedStreamAudioStats._ringBufferStarveCount, injectedStreamAudioStats._ringBufferOverflowCount);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
|
||||
sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", injectedStreamAudioStats._timeGapWindowMin,
|
||||
injectedStreamAudioStats._timeGapWindowMax, injectedStreamAudioStats._timeGapWindowAverage,
|
||||
injectedStreamAudioStats._ringBufferConsecutiveNotMixedCount, injectedStreamAudioStats._ringBufferSilentFramesDropped);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <QtCore/QIODevice>
|
||||
|
||||
#include "NodeData.h"
|
||||
#include "SharedUtil.h"
|
||||
|
||||
const int SAMPLE_RATE = 24000;
|
||||
|
||||
|
@ -29,7 +30,7 @@ const int NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL = 512;
|
|||
const int NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL = NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL / sizeof(int16_t);
|
||||
|
||||
const unsigned int BUFFER_SEND_INTERVAL_USECS = floorf((NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL
|
||||
/ (float) SAMPLE_RATE) * 1000 * 1000);
|
||||
/ (float) SAMPLE_RATE) * USECS_PER_SECOND);
|
||||
|
||||
const int MAX_SAMPLE_VALUE = std::numeric_limits<int16_t>::max();
|
||||
const int MIN_SAMPLE_VALUE = std::numeric_limits<int16_t>::min();
|
||||
|
@ -65,6 +66,9 @@ public:
|
|||
void shiftReadPosition(unsigned int numSamples);
|
||||
|
||||
int samplesAvailable() const;
|
||||
int framesAvailable() const { return samplesAvailable() / _numFrameSamples; }
|
||||
|
||||
int getNumFrameSamples() const { return _numFrameSamples; }
|
||||
|
||||
bool isNotStarvedOrHasMinimumSamples(int numRequiredSamples) const;
|
||||
|
||||
|
|
|
@ -13,34 +13,50 @@
|
|||
#define hifi_AudioStreamStats_h
|
||||
|
||||
#include "PositionalAudioRingBuffer.h"
|
||||
#include "SequenceNumberStats.h"
|
||||
|
||||
class AudioStreamStats {
|
||||
public:
|
||||
AudioStreamStats()
|
||||
: _streamType(PositionalAudioRingBuffer::Microphone),
|
||||
_streamIdentifier(),
|
||||
_jitterBufferFrames(0),
|
||||
_packetsReceived(0),
|
||||
_packetsUnreasonable(0),
|
||||
_packetsEarly(0),
|
||||
_packetsLate(0),
|
||||
_packetsLost(0),
|
||||
_packetsRecovered(0),
|
||||
_packetsDuplicate(0)
|
||||
_timeGapMin(0),
|
||||
_timeGapMax(0),
|
||||
_timeGapAverage(0.0f),
|
||||
_timeGapWindowMin(0),
|
||||
_timeGapWindowMax(0),
|
||||
_timeGapWindowAverage(0.0f),
|
||||
_ringBufferFramesAvailable(0),
|
||||
_ringBufferCurrentJitterBufferFrames(0),
|
||||
_ringBufferDesiredJitterBufferFrames(0),
|
||||
_ringBufferStarveCount(0),
|
||||
_ringBufferConsecutiveNotMixedCount(0),
|
||||
_ringBufferOverflowCount(0),
|
||||
_ringBufferSilentFramesDropped(0),
|
||||
_packetStreamStats(),
|
||||
_packetStreamWindowStats()
|
||||
{}
|
||||
|
||||
PositionalAudioRingBuffer::Type _streamType;
|
||||
QUuid _streamIdentifier;
|
||||
|
||||
quint16 _jitterBufferFrames;
|
||||
quint64 _timeGapMin;
|
||||
quint64 _timeGapMax;
|
||||
float _timeGapAverage;
|
||||
quint64 _timeGapWindowMin;
|
||||
quint64 _timeGapWindowMax;
|
||||
float _timeGapWindowAverage;
|
||||
|
||||
quint32 _packetsReceived;
|
||||
quint32 _packetsUnreasonable;
|
||||
quint32 _packetsEarly;
|
||||
quint32 _packetsLate;
|
||||
quint32 _packetsLost;
|
||||
quint32 _packetsRecovered;
|
||||
quint32 _packetsDuplicate;
|
||||
quint32 _ringBufferFramesAvailable;
|
||||
quint16 _ringBufferCurrentJitterBufferFrames;
|
||||
quint16 _ringBufferDesiredJitterBufferFrames;
|
||||
quint32 _ringBufferStarveCount;
|
||||
quint32 _ringBufferConsecutiveNotMixedCount;
|
||||
quint32 _ringBufferOverflowCount;
|
||||
quint32 _ringBufferSilentFramesDropped;
|
||||
|
||||
PacketStreamStats _packetStreamStats;
|
||||
PacketStreamStats _packetStreamWindowStats;
|
||||
};
|
||||
|
||||
#endif // hifi_AudioStreamStats_h
|
||||
|
|
|
@ -31,7 +31,7 @@ InjectedAudioRingBuffer::InjectedAudioRingBuffer(const QUuid& streamIdentifier,
|
|||
const uchar MAX_INJECTOR_VOLUME = 255;
|
||||
|
||||
int InjectedAudioRingBuffer::parseData(const QByteArray& packet) {
|
||||
_interframeTimeGapStats.frameReceived();
|
||||
timeGapStatsFrameReceived();
|
||||
updateDesiredJitterBufferFrames();
|
||||
|
||||
// setup a data stream to read from this packet
|
||||
|
|
|
@ -21,70 +21,6 @@
|
|||
#include "PositionalAudioRingBuffer.h"
|
||||
#include "SharedUtil.h"
|
||||
|
||||
InterframeTimeGapStats::InterframeTimeGapStats()
|
||||
: _lastFrameReceivedTime(0),
|
||||
_numSamplesInCurrentInterval(0),
|
||||
_currentIntervalMaxGap(0),
|
||||
_newestIntervalMaxGapAt(0),
|
||||
_windowMaxGap(0),
|
||||
_newWindowMaxGapAvailable(false)
|
||||
{
|
||||
memset(_intervalMaxGaps, 0, TIME_GAP_NUM_INTERVALS_IN_WINDOW * sizeof(quint64));
|
||||
}
|
||||
|
||||
void InterframeTimeGapStats::frameReceived() {
|
||||
quint64 now = usecTimestampNow();
|
||||
|
||||
// make sure this isn't the first time frameReceived() is called so can actually calculate a gap.
|
||||
if (_lastFrameReceivedTime != 0) {
|
||||
quint64 gap = now - _lastFrameReceivedTime;
|
||||
|
||||
// update the current interval max
|
||||
if (gap > _currentIntervalMaxGap) {
|
||||
_currentIntervalMaxGap = gap;
|
||||
|
||||
// keep the window max gap at least as large as the current interval max
|
||||
// this allows the window max gap to respond immediately to a sudden spike in gap times
|
||||
// also, this prevents the window max gap from staying at 0 until the first interval of samples filled up
|
||||
if (_currentIntervalMaxGap > _windowMaxGap) {
|
||||
_windowMaxGap = _currentIntervalMaxGap;
|
||||
_newWindowMaxGapAvailable = true;
|
||||
}
|
||||
}
|
||||
_numSamplesInCurrentInterval++;
|
||||
|
||||
// if the current interval of samples is now full, record it in our interval maxes
|
||||
if (_numSamplesInCurrentInterval == TIME_GAP_NUM_SAMPLES_IN_INTERVAL) {
|
||||
|
||||
// find location to insert this interval's max (increment index cyclically)
|
||||
_newestIntervalMaxGapAt = _newestIntervalMaxGapAt == TIME_GAP_NUM_INTERVALS_IN_WINDOW - 1 ? 0 : _newestIntervalMaxGapAt + 1;
|
||||
|
||||
// record the current interval's max gap as the newest
|
||||
_intervalMaxGaps[_newestIntervalMaxGapAt] = _currentIntervalMaxGap;
|
||||
|
||||
// update the window max gap, which is the max out of all the past intervals' max gaps
|
||||
_windowMaxGap = 0;
|
||||
for (int i = 0; i < TIME_GAP_NUM_INTERVALS_IN_WINDOW; i++) {
|
||||
if (_intervalMaxGaps[i] > _windowMaxGap) {
|
||||
_windowMaxGap = _intervalMaxGaps[i];
|
||||
}
|
||||
}
|
||||
_newWindowMaxGapAvailable = true;
|
||||
|
||||
// reset the current interval
|
||||
_numSamplesInCurrentInterval = 0;
|
||||
_currentIntervalMaxGap = 0;
|
||||
}
|
||||
}
|
||||
_lastFrameReceivedTime = now;
|
||||
}
|
||||
|
||||
quint64 InterframeTimeGapStats::getWindowMaxGap() {
|
||||
_newWindowMaxGapAvailable = false;
|
||||
return _windowMaxGap;
|
||||
}
|
||||
|
||||
|
||||
PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo, bool dynamicJitterBuffers) :
|
||||
|
||||
AudioRingBuffer(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
|
||||
|
@ -97,10 +33,15 @@ PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::
|
|||
_shouldOutputStarveDebug(true),
|
||||
_isStereo(isStereo),
|
||||
_listenerUnattenuatedZone(NULL),
|
||||
_lastFrameReceivedTime(0),
|
||||
_interframeTimeGapStatsForJitterCalc(TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES, TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS),
|
||||
_interframeTimeGapStatsForStatsPacket(TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES, TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS),
|
||||
_desiredJitterBufferFrames(1),
|
||||
_currentJitterBufferFrames(-1),
|
||||
_dynamicJitterBuffers(dynamicJitterBuffers),
|
||||
_consecutiveNotMixedCount(0)
|
||||
_consecutiveNotMixedCount(0),
|
||||
_starveCount(0),
|
||||
_silentFramesDropped(0)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -143,9 +84,12 @@ int PositionalAudioRingBuffer::parseData(const QByteArray& packet) {
|
|||
addSilentFrame(numSilentFramesToAdd * samplesPerFrame);
|
||||
_currentJitterBufferFrames = _desiredJitterBufferFrames;
|
||||
|
||||
_silentFramesDropped += numFramesToDropDesired;
|
||||
} else {
|
||||
// we need to drop all frames to get the jitter buffer close as possible to its desired length
|
||||
_currentJitterBufferFrames -= numSilentFrames;
|
||||
|
||||
_silentFramesDropped += numSilentFrames;
|
||||
}
|
||||
} else {
|
||||
addSilentFrame(numSilentSamples);
|
||||
|
@ -217,6 +161,7 @@ bool PositionalAudioRingBuffer::shouldBeAddedToMix() {
|
|||
} else if (samplesAvailable() < samplesPerFrame) {
|
||||
// if the buffer doesn't have a full frame of samples to take for mixing, it is starved
|
||||
_isStarved = true;
|
||||
_starveCount++;
|
||||
|
||||
// set to -1 to indicate the jitter buffer is starved
|
||||
_currentJitterBufferFrames = -1;
|
||||
|
@ -224,7 +169,7 @@ bool PositionalAudioRingBuffer::shouldBeAddedToMix() {
|
|||
// reset our _shouldOutputStarveDebug to true so the next is printed
|
||||
_shouldOutputStarveDebug = true;
|
||||
|
||||
_consecutiveNotMixedCount++;
|
||||
_consecutiveNotMixedCount = 1;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -234,7 +179,6 @@ bool PositionalAudioRingBuffer::shouldBeAddedToMix() {
|
|||
// minus one (since a frame will be read immediately after this) is the length of the jitter buffer
|
||||
_currentJitterBufferFrames = samplesAvailable() / samplesPerFrame - 1;
|
||||
_isStarved = false;
|
||||
_consecutiveNotMixedCount = 0;
|
||||
}
|
||||
|
||||
// since we've read data from ring buffer at least once - we've started
|
||||
|
@ -247,21 +191,31 @@ int PositionalAudioRingBuffer::getCalculatedDesiredJitterBufferFrames() const {
|
|||
int calculatedDesiredJitterBufferFrames = 1;
|
||||
const float USECS_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * USECS_PER_SECOND / (float)SAMPLE_RATE;
|
||||
|
||||
calculatedDesiredJitterBufferFrames = ceilf((float)_interframeTimeGapStats.peekWindowMaxGap() / USECS_PER_FRAME);
|
||||
calculatedDesiredJitterBufferFrames = ceilf((float)_interframeTimeGapStatsForJitterCalc.getWindowMax() / USECS_PER_FRAME);
|
||||
if (calculatedDesiredJitterBufferFrames < 1) {
|
||||
calculatedDesiredJitterBufferFrames = 1;
|
||||
}
|
||||
return calculatedDesiredJitterBufferFrames;
|
||||
}
|
||||
|
||||
void PositionalAudioRingBuffer::timeGapStatsFrameReceived() {
|
||||
quint64 now = usecTimestampNow();
|
||||
if (_lastFrameReceivedTime != 0) {
|
||||
quint64 gap = now - _lastFrameReceivedTime;
|
||||
_interframeTimeGapStatsForJitterCalc.update(gap);
|
||||
_interframeTimeGapStatsForStatsPacket.update(gap);
|
||||
}
|
||||
_lastFrameReceivedTime = now;
|
||||
}
|
||||
|
||||
void PositionalAudioRingBuffer::updateDesiredJitterBufferFrames() {
|
||||
if (_interframeTimeGapStats.hasNewWindowMaxGapAvailable()) {
|
||||
if (_interframeTimeGapStatsForJitterCalc.getNewStatsAvailableFlag()) {
|
||||
if (!_dynamicJitterBuffers) {
|
||||
_desiredJitterBufferFrames = 1; // HACK to see if this fixes the audio silence
|
||||
} else {
|
||||
const float USECS_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * USECS_PER_SECOND / (float)SAMPLE_RATE;
|
||||
|
||||
_desiredJitterBufferFrames = ceilf((float)_interframeTimeGapStats.getWindowMaxGap() / USECS_PER_FRAME);
|
||||
_desiredJitterBufferFrames = ceilf((float)_interframeTimeGapStatsForJitterCalc.getWindowMax() / USECS_PER_FRAME);
|
||||
if (_desiredJitterBufferFrames < 1) {
|
||||
_desiredJitterBufferFrames = 1;
|
||||
}
|
||||
|
@ -270,5 +224,6 @@ void PositionalAudioRingBuffer::updateDesiredJitterBufferFrames() {
|
|||
_desiredJitterBufferFrames = maxDesired;
|
||||
}
|
||||
}
|
||||
_interframeTimeGapStatsForJitterCalc.clearNewStatsAvailableFlag();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,31 +17,17 @@
|
|||
#include <AABox.h>
|
||||
|
||||
#include "AudioRingBuffer.h"
|
||||
#include "MovingMinMaxAvg.h"
|
||||
|
||||
// this means that every 500 samples, the max for the past 10*500 samples will be calculated
|
||||
const int TIME_GAP_NUM_SAMPLES_IN_INTERVAL = 500;
|
||||
const int TIME_GAP_NUM_INTERVALS_IN_WINDOW = 10;
|
||||
// the time gaps stats for _desiredJitterBufferFrames calculation
|
||||
// will recalculate the max for the past 5000 samples every 500 samples
|
||||
const int TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES = 500;
|
||||
const int TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS = 10;
|
||||
|
||||
// class used to track time between incoming frames for the purpose of varying the jitter buffer length
|
||||
class InterframeTimeGapStats {
|
||||
public:
|
||||
InterframeTimeGapStats();
|
||||
|
||||
void frameReceived();
|
||||
bool hasNewWindowMaxGapAvailable() const { return _newWindowMaxGapAvailable; }
|
||||
quint64 peekWindowMaxGap() const { return _windowMaxGap; }
|
||||
quint64 getWindowMaxGap();
|
||||
|
||||
private:
|
||||
quint64 _lastFrameReceivedTime;
|
||||
|
||||
int _numSamplesInCurrentInterval;
|
||||
quint64 _currentIntervalMaxGap;
|
||||
quint64 _intervalMaxGaps[TIME_GAP_NUM_INTERVALS_IN_WINDOW];
|
||||
int _newestIntervalMaxGapAt;
|
||||
quint64 _windowMaxGap;
|
||||
bool _newWindowMaxGapAvailable;
|
||||
};
|
||||
// the time gap stats for constructing AudioStreamStats will
|
||||
// recalculate min/max/avg every ~1 second for the past ~30 seconds of time gap data
|
||||
const int TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS;
|
||||
const int TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS = 30;
|
||||
|
||||
const int AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY = 100;
|
||||
|
||||
|
@ -79,17 +65,22 @@ public:
|
|||
|
||||
int getSamplesPerFrame() const { return _isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; }
|
||||
|
||||
const MovingMinMaxAvg<quint64>& getInterframeTimeGapStatsForStatsPacket() const { return _interframeTimeGapStatsForStatsPacket; }
|
||||
|
||||
int getCalculatedDesiredJitterBufferFrames() const; /// returns what we would calculate our desired as if asked
|
||||
int getDesiredJitterBufferFrames() const { return _desiredJitterBufferFrames; }
|
||||
int getCurrentJitterBufferFrames() const { return _currentJitterBufferFrames; }
|
||||
|
||||
int getConsecutiveNotMixedCount() const { return _consecutiveNotMixedCount; }
|
||||
int getStarveCount() const { return _starveCount; }
|
||||
int getSilentFramesDropped() const { return _silentFramesDropped; }
|
||||
|
||||
protected:
|
||||
// disallow copying of PositionalAudioRingBuffer objects
|
||||
PositionalAudioRingBuffer(const PositionalAudioRingBuffer&);
|
||||
PositionalAudioRingBuffer& operator= (const PositionalAudioRingBuffer&);
|
||||
|
||||
void timeGapStatsFrameReceived();
|
||||
void updateDesiredJitterBufferFrames();
|
||||
|
||||
PositionalAudioRingBuffer::Type _type;
|
||||
|
@ -103,13 +94,18 @@ protected:
|
|||
float _nextOutputTrailingLoudness;
|
||||
AABox* _listenerUnattenuatedZone;
|
||||
|
||||
InterframeTimeGapStats _interframeTimeGapStats;
|
||||
quint64 _lastFrameReceivedTime;
|
||||
MovingMinMaxAvg<quint64> _interframeTimeGapStatsForJitterCalc;
|
||||
MovingMinMaxAvg<quint64> _interframeTimeGapStatsForStatsPacket;
|
||||
|
||||
int _desiredJitterBufferFrames;
|
||||
int _currentJitterBufferFrames;
|
||||
bool _dynamicJitterBuffers;
|
||||
|
||||
// extra stats
|
||||
int _consecutiveNotMixedCount;
|
||||
int _starveCount;
|
||||
int _silentFramesDropped;
|
||||
};
|
||||
|
||||
#endif // hifi_PositionalAudioRingBuffer_h
|
||||
|
|
|
@ -78,6 +78,8 @@ PacketVersion versionForPacketType(PacketType type) {
|
|||
return 2;
|
||||
case PacketTypeModelErase:
|
||||
return 1;
|
||||
case PacketTypeAudioStreamStats:
|
||||
return 1;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -14,8 +14,6 @@
|
|||
|
||||
SentPacketHistory::SentPacketHistory(int size)
|
||||
: _sentPackets(size),
|
||||
_newestPacketAt(0),
|
||||
_numExistingPackets(0),
|
||||
_newestSequenceNumber(std::numeric_limits<uint16_t>::max())
|
||||
{
|
||||
}
|
||||
|
@ -29,16 +27,8 @@ void SentPacketHistory::packetSent(uint16_t sequenceNumber, const QByteArray& pa
|
|||
qDebug() << "Unexpected sequence number passed to SentPacketHistory::packetSent()!"
|
||||
<< "Expected:" << expectedSequenceNumber << "Actual:" << sequenceNumber;
|
||||
}
|
||||
|
||||
_newestSequenceNumber = sequenceNumber;
|
||||
|
||||
// increment _newestPacketAt cyclically, insert new packet there.
|
||||
// this will overwrite the oldest packet in the buffer
|
||||
_newestPacketAt = (_newestPacketAt == _sentPackets.size() - 1) ? 0 : _newestPacketAt + 1;
|
||||
_sentPackets[_newestPacketAt] = packet;
|
||||
if (_numExistingPackets < _sentPackets.size()) {
|
||||
_numExistingPackets++;
|
||||
}
|
||||
_sentPackets.insert(packet);
|
||||
}
|
||||
|
||||
const QByteArray* SentPacketHistory::getPacket(uint16_t sequenceNumber) const {
|
||||
|
@ -51,13 +41,6 @@ const QByteArray* SentPacketHistory::getPacket(uint16_t sequenceNumber) const {
|
|||
if (seqDiff < 0) {
|
||||
seqDiff += UINT16_RANGE;
|
||||
}
|
||||
// if desired sequence number is too old to be found in the history, return null
|
||||
if (seqDiff >= _numExistingPackets) {
|
||||
return NULL;
|
||||
}
|
||||
int packetAt = _newestPacketAt - seqDiff;
|
||||
if (packetAt < 0) {
|
||||
packetAt += _sentPackets.size();
|
||||
}
|
||||
return &_sentPackets.at(packetAt);
|
||||
|
||||
return _sentPackets.get(seqDiff);
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
#include <stdint.h>
|
||||
#include <qbytearray.h>
|
||||
#include <qvector.h>
|
||||
#include "RingBufferHistory.h"
|
||||
|
||||
#include "SequenceNumberStats.h"
|
||||
|
||||
|
@ -26,9 +26,7 @@ public:
|
|||
const QByteArray* getPacket(uint16_t sequenceNumber) const;
|
||||
|
||||
private:
|
||||
QVector<QByteArray> _sentPackets; // circular buffer
|
||||
int _newestPacketAt;
|
||||
int _numExistingPackets;
|
||||
RingBufferHistory<QByteArray> _sentPackets; // circular buffer
|
||||
|
||||
uint16_t _newestSequenceNumber;
|
||||
};
|
||||
|
|
|
@ -13,29 +13,19 @@
|
|||
|
||||
#include <limits>
|
||||
|
||||
SequenceNumberStats::SequenceNumberStats()
|
||||
SequenceNumberStats::SequenceNumberStats(int statsHistoryLength)
|
||||
: _lastReceived(std::numeric_limits<quint16>::max()),
|
||||
_missingSet(),
|
||||
_numReceived(0),
|
||||
_numUnreasonable(0),
|
||||
_numEarly(0),
|
||||
_numLate(0),
|
||||
_numLost(0),
|
||||
_numRecovered(0),
|
||||
_numDuplicate(0),
|
||||
_lastSenderUUID()
|
||||
_stats(),
|
||||
_lastSenderUUID(),
|
||||
_statsHistory(statsHistoryLength)
|
||||
{
|
||||
}
|
||||
|
||||
void SequenceNumberStats::reset() {
|
||||
_missingSet.clear();
|
||||
_numReceived = 0;
|
||||
_numUnreasonable = 0;
|
||||
_numEarly = 0;
|
||||
_numLate = 0;
|
||||
_numLost = 0;
|
||||
_numRecovered = 0;
|
||||
_numDuplicate = 0;
|
||||
_stats = PacketStreamStats();
|
||||
_statsHistory.clear();
|
||||
}
|
||||
|
||||
static const int UINT16_RANGE = std::numeric_limits<uint16_t>::max() + 1;
|
||||
|
@ -51,9 +41,9 @@ void SequenceNumberStats::sequenceNumberReceived(quint16 incoming, QUuid senderU
|
|||
}
|
||||
|
||||
// determine our expected sequence number... handle rollover appropriately
|
||||
quint16 expected = _numReceived > 0 ? _lastReceived + (quint16)1 : incoming;
|
||||
quint16 expected = _stats._numReceived > 0 ? _lastReceived + (quint16)1 : incoming;
|
||||
|
||||
_numReceived++;
|
||||
_stats._numReceived++;
|
||||
|
||||
if (incoming == expected) { // on time
|
||||
_lastReceived = incoming;
|
||||
|
@ -80,7 +70,7 @@ void SequenceNumberStats::sequenceNumberReceived(quint16 incoming, QUuid senderU
|
|||
// ignore packet if gap is unreasonable
|
||||
qDebug() << "ignoring unreasonable sequence number:" << incoming
|
||||
<< "previous:" << _lastReceived;
|
||||
_numUnreasonable++;
|
||||
_stats._numUnreasonable++;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -92,8 +82,8 @@ void SequenceNumberStats::sequenceNumberReceived(quint16 incoming, QUuid senderU
|
|||
qDebug() << ">>>>>>>> missing gap=" << (incomingInt - expectedInt);
|
||||
}
|
||||
|
||||
_numEarly++;
|
||||
_numLost += (incomingInt - expectedInt);
|
||||
_stats._numEarly++;
|
||||
_stats._numLost += (incomingInt - expectedInt);
|
||||
_lastReceived = incoming;
|
||||
|
||||
// add all sequence numbers that were skipped to the missing sequence numbers list
|
||||
|
@ -110,7 +100,7 @@ void SequenceNumberStats::sequenceNumberReceived(quint16 incoming, QUuid senderU
|
|||
if (wantExtraDebugging) {
|
||||
qDebug() << "this packet is later than expected...";
|
||||
}
|
||||
_numLate++;
|
||||
_stats._numLate++;
|
||||
|
||||
// do not update _lastReceived; it shouldn't become smaller
|
||||
|
||||
|
@ -119,13 +109,13 @@ void SequenceNumberStats::sequenceNumberReceived(quint16 incoming, QUuid senderU
|
|||
if (wantExtraDebugging) {
|
||||
qDebug() << "found it in _missingSet";
|
||||
}
|
||||
_numLost--;
|
||||
_numRecovered++;
|
||||
_stats._numLost--;
|
||||
_stats._numRecovered++;
|
||||
} else {
|
||||
if (wantExtraDebugging) {
|
||||
qDebug() << "sequence:" << incoming << "was NOT found in _missingSet and is probably a duplicate";
|
||||
}
|
||||
_numDuplicate++;
|
||||
_stats._numDuplicate++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -180,3 +170,26 @@ void SequenceNumberStats::pruneMissingSet(const bool wantExtraDebugging) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
PacketStreamStats SequenceNumberStats::getStatsForHistoryWindow() const {
|
||||
|
||||
const PacketStreamStats* newestStats = _statsHistory.getNewestEntry();
|
||||
const PacketStreamStats* oldestStats = _statsHistory.get(_statsHistory.getNumEntries() - 1);
|
||||
|
||||
// this catches cases where history is length 1 or 0 (both are NULL in case of 0)
|
||||
if (newestStats == oldestStats) {
|
||||
return PacketStreamStats();
|
||||
}
|
||||
|
||||
// calculate difference between newest stats and oldest stats to get window stats
|
||||
PacketStreamStats windowStats;
|
||||
windowStats._numReceived = newestStats->_numReceived - oldestStats->_numReceived;
|
||||
windowStats._numUnreasonable = newestStats->_numUnreasonable - oldestStats->_numUnreasonable;
|
||||
windowStats._numEarly = newestStats->_numEarly - oldestStats->_numEarly;
|
||||
windowStats._numLate = newestStats->_numLate - oldestStats->_numLate;
|
||||
windowStats._numLost = newestStats->_numLost - oldestStats->_numLost;
|
||||
windowStats._numRecovered = newestStats->_numRecovered - oldestStats->_numRecovered;
|
||||
windowStats._numDuplicate = newestStats->_numDuplicate - oldestStats->_numDuplicate;
|
||||
|
||||
return windowStats;
|
||||
}
|
||||
|
|
|
@ -13,31 +13,29 @@
|
|||
#define hifi_SequenceNumberStats_h
|
||||
|
||||
#include "SharedUtil.h"
|
||||
#include "RingBufferHistory.h"
|
||||
#include <quuid.h>
|
||||
|
||||
const int MAX_REASONABLE_SEQUENCE_GAP = 1000;
|
||||
|
||||
class SequenceNumberStats {
|
||||
class PacketStreamStats {
|
||||
public:
|
||||
SequenceNumberStats();
|
||||
PacketStreamStats()
|
||||
: _numReceived(0),
|
||||
_numUnreasonable(0),
|
||||
_numEarly(0),
|
||||
_numLate(0),
|
||||
_numLost(0),
|
||||
_numRecovered(0),
|
||||
_numDuplicate(0)
|
||||
{}
|
||||
|
||||
void reset();
|
||||
void sequenceNumberReceived(quint16 incoming, QUuid senderUUID = QUuid(), const bool wantExtraDebugging = false);
|
||||
void pruneMissingSet(const bool wantExtraDebugging = false);
|
||||
|
||||
quint32 getNumReceived() const { return _numReceived; }
|
||||
quint32 getNumUnreasonable() const { return _numUnreasonable; }
|
||||
quint32 getNumOutOfOrder() const { return _numEarly + _numLate; }
|
||||
quint32 getNumEarly() const { return _numEarly; }
|
||||
quint32 getNumLate() const { return _numLate; }
|
||||
quint32 getNumLost() const { return _numLost; }
|
||||
quint32 getNumRecovered() const { return _numRecovered; }
|
||||
quint32 getNumDuplicate() const { return _numDuplicate; }
|
||||
const QSet<quint16>& getMissingSet() const { return _missingSet; }
|
||||
|
||||
private:
|
||||
quint16 _lastReceived;
|
||||
QSet<quint16> _missingSet;
|
||||
float getUnreasonableRate() const { return (float)_numUnreasonable / _numReceived; }
|
||||
float getNumEaryRate() const { return (float)_numEarly / _numReceived; }
|
||||
float getLateRate() const { return (float)_numLate / _numReceived; }
|
||||
float getLostRate() const { return (float)_numLost / _numReceived; }
|
||||
float getRecoveredRate() const { return (float)_numRecovered / _numReceived; }
|
||||
float getDuplicateRate() const { return (float)_numDuplicate / _numReceived; }
|
||||
|
||||
quint32 _numReceived;
|
||||
quint32 _numUnreasonable;
|
||||
|
@ -46,8 +44,38 @@ private:
|
|||
quint32 _numLost;
|
||||
quint32 _numRecovered;
|
||||
quint32 _numDuplicate;
|
||||
};
|
||||
|
||||
class SequenceNumberStats {
|
||||
public:
|
||||
SequenceNumberStats(int statsHistoryLength = 0);
|
||||
|
||||
void reset();
|
||||
void sequenceNumberReceived(quint16 incoming, QUuid senderUUID = QUuid(), const bool wantExtraDebugging = false);
|
||||
void pruneMissingSet(const bool wantExtraDebugging = false);
|
||||
void pushStatsToHistory() { _statsHistory.insert(_stats); }
|
||||
|
||||
quint32 getNumReceived() const { return _stats._numReceived; }
|
||||
quint32 getNumUnreasonable() const { return _stats._numUnreasonable; }
|
||||
quint32 getNumOutOfOrder() const { return _stats._numEarly + _stats._numLate; }
|
||||
quint32 getNumEarly() const { return _stats._numEarly; }
|
||||
quint32 getNumLate() const { return _stats._numLate; }
|
||||
quint32 getNumLost() const { return _stats._numLost; }
|
||||
quint32 getNumRecovered() const { return _stats._numRecovered; }
|
||||
quint32 getNumDuplicate() const { return _stats._numDuplicate; }
|
||||
const PacketStreamStats& getStats() const { return _stats; }
|
||||
PacketStreamStats getStatsForHistoryWindow() const;
|
||||
const QSet<quint16>& getMissingSet() const { return _missingSet; }
|
||||
|
||||
private:
|
||||
quint16 _lastReceived;
|
||||
QSet<quint16> _missingSet;
|
||||
|
||||
PacketStreamStats _stats;
|
||||
|
||||
QUuid _lastSenderUUID;
|
||||
|
||||
RingBufferHistory<PacketStreamStats> _statsHistory;
|
||||
};
|
||||
|
||||
#endif // hifi_SequenceNumberStats_h
|
||||
|
|
150
libraries/shared/src/MovingMinMaxAvg.h
Normal file
150
libraries/shared/src/MovingMinMaxAvg.h
Normal file
|
@ -0,0 +1,150 @@
|
|||
//
|
||||
// MovingMinMaxAvg.h
|
||||
// libraries/shared/src
|
||||
//
|
||||
// Created by Yixin Wang on 7/8/2014
|
||||
// Copyright 2013 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_MovingMinMaxAvg_h
|
||||
#define hifi_MovingMinMaxAvg_h
|
||||
|
||||
#include <limits>
|
||||
|
||||
#include "RingBufferHistory.h"
|
||||
|
||||
template <typename T>
|
||||
class MovingMinMaxAvg {
|
||||
|
||||
private:
|
||||
class Stats {
|
||||
public:
|
||||
Stats()
|
||||
: _min(std::numeric_limits<T>::max()),
|
||||
_max(std::numeric_limits<T>::min()),
|
||||
_average(0.0) {}
|
||||
|
||||
void updateWithSample(T sample, int& numSamplesInAverage) {
|
||||
if (sample < _min) {
|
||||
_min = sample;
|
||||
}
|
||||
if (sample > _max) {
|
||||
_max = sample;
|
||||
}
|
||||
_average = _average * ((double)numSamplesInAverage / (numSamplesInAverage + 1))
|
||||
+ (double)sample / (numSamplesInAverage + 1);
|
||||
numSamplesInAverage++;
|
||||
}
|
||||
|
||||
void updateWithOtherStats(const Stats& other, int& numStatsInAverage) {
|
||||
if (other._min < _min) {
|
||||
_min = other._min;
|
||||
}
|
||||
if (other._max > _max) {
|
||||
_max = other._max;
|
||||
}
|
||||
_average = _average * ((double)numStatsInAverage / (numStatsInAverage + 1))
|
||||
+ other._average / (numStatsInAverage + 1);
|
||||
numStatsInAverage++;
|
||||
}
|
||||
|
||||
T _min;
|
||||
T _max;
|
||||
double _average;
|
||||
};
|
||||
|
||||
public:
|
||||
// This class collects 3 stats (min, max, avg) over a moving window of samples.
|
||||
// The moving window contains _windowIntervals * _intervalLength samples.
|
||||
// Those stats are updated every _intervalLength samples collected. When that happens, _newStatsAvaialble is set
|
||||
// to true and it's up to the user to clear that flag.
|
||||
// For example, if you want a moving avg of the past 5000 samples updated every 100 samples, you would instantiate
|
||||
// this class with MovingMinMaxAvg(100, 50). If you want a moving min of the past 100 samples updated on every
|
||||
// new sample, instantiate this class with MovingMinMaxAvg(1, 100).
|
||||
|
||||
MovingMinMaxAvg(int intervalLength, int windowIntervals)
|
||||
: _intervalLength(intervalLength),
|
||||
_windowIntervals(windowIntervals),
|
||||
_overallStats(),
|
||||
_samplesCollected(0),
|
||||
_windowStats(),
|
||||
_existingSamplesInCurrentInterval(0),
|
||||
_currentIntervalStats(),
|
||||
_intervalStats(windowIntervals),
|
||||
_newStatsAvailable(false)
|
||||
{}
|
||||
|
||||
void reset() {
|
||||
_overallStats = Stats();
|
||||
_samplesCollected = 0;
|
||||
_windowStats = Stats();
|
||||
_existingSamplesInCurrentInterval = 0;
|
||||
_currentIntervalStats = Stats();
|
||||
_intervalStats.clear();
|
||||
_newStatsAvailable = false;
|
||||
}
|
||||
|
||||
void update(T newSample) {
|
||||
// update overall stats
|
||||
_overallStats.updateWithSample(newSample, _samplesCollected);
|
||||
|
||||
// update the current interval stats
|
||||
_currentIntervalStats.updateWithSample(newSample, _existingSamplesInCurrentInterval);
|
||||
|
||||
// if the current interval of samples is now full, record its stats into our past intervals' stats
|
||||
if (_existingSamplesInCurrentInterval == _intervalLength) {
|
||||
|
||||
// record current interval's stats, then reset them
|
||||
_intervalStats.insert(_currentIntervalStats);
|
||||
_currentIntervalStats = Stats();
|
||||
_existingSamplesInCurrentInterval = 0;
|
||||
|
||||
// update the window's stats by combining the intervals' stats
|
||||
typename RingBufferHistory<Stats>::Iterator i = _intervalStats.begin();
|
||||
typename RingBufferHistory<Stats>::Iterator end = _intervalStats.end();
|
||||
_windowStats = Stats();
|
||||
int intervalsIncludedInWindowStats = 0;
|
||||
while (i != end) {
|
||||
_windowStats.updateWithOtherStats(*i, intervalsIncludedInWindowStats);
|
||||
i++;
|
||||
}
|
||||
|
||||
_newStatsAvailable = true;
|
||||
}
|
||||
}
|
||||
|
||||
bool getNewStatsAvailableFlag() const { return _newStatsAvailable; }
|
||||
void clearNewStatsAvailableFlag() { _newStatsAvailable = false; }
|
||||
|
||||
T getMin() const { return _overallStats._min; }
|
||||
T getMax() const { return _overallStats._max; }
|
||||
double getAverage() const { return _overallStats._average; }
|
||||
T getWindowMin() const { return _windowStats._min; }
|
||||
T getWindowMax() const { return _windowStats._max; }
|
||||
double getWindowAverage() const { return _windowStats._average; }
|
||||
|
||||
private:
|
||||
int _intervalLength;
|
||||
int _windowIntervals;
|
||||
|
||||
// these are min/max/avg stats for all samples collected.
|
||||
Stats _overallStats;
|
||||
int _samplesCollected;
|
||||
|
||||
// these are the min/max/avg stats for the samples in the moving window
|
||||
Stats _windowStats;
|
||||
int _existingSamplesInCurrentInterval;
|
||||
|
||||
// these are the min/max/avg stats for the current interval
|
||||
Stats _currentIntervalStats;
|
||||
|
||||
// these are stored stats for the past intervals in the window
|
||||
RingBufferHistory<Stats> _intervalStats;
|
||||
|
||||
bool _newStatsAvailable;
|
||||
};
|
||||
|
||||
#endif // hifi_MovingMinMaxAvg_h
|
122
libraries/shared/src/RingBufferHistory.h
Normal file
122
libraries/shared/src/RingBufferHistory.h
Normal file
|
@ -0,0 +1,122 @@
|
|||
//
|
||||
// RingBufferHistory.h
|
||||
// libraries/shared/src
|
||||
//
|
||||
// Created by Yixin Wang on 7/9/2014
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_RingBufferHistory_h
|
||||
#define hifi_RingBufferHistory_h
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <iterator>
|
||||
|
||||
#include <qvector.h>
|
||||
|
||||
template <typename T>
|
||||
class RingBufferHistory {
|
||||
|
||||
public:
|
||||
|
||||
RingBufferHistory(int capacity = 10)
|
||||
: _size(capacity + 1),
|
||||
_capacity(capacity),
|
||||
_newestEntryAtIndex(0),
|
||||
_numEntries(0),
|
||||
_buffer(capacity + 1)
|
||||
{
|
||||
}
|
||||
|
||||
void clear() {
|
||||
_numEntries = 0;
|
||||
}
|
||||
|
||||
void insert(const T& entry) {
|
||||
// increment newest entry index cyclically
|
||||
_newestEntryAtIndex = (_newestEntryAtIndex == _size - 1) ? 0 : _newestEntryAtIndex + 1;
|
||||
|
||||
// insert new entry
|
||||
_buffer[_newestEntryAtIndex] = entry;
|
||||
if (_numEntries < _capacity) {
|
||||
_numEntries++;
|
||||
}
|
||||
}
|
||||
|
||||
// 0 retrieves the most recent entry, _numEntries - 1 retrieves the oldest.
|
||||
// returns NULL if entryAge not within [0, _numEntries-1]
|
||||
const T* get(int entryAge) const {
|
||||
if (!(entryAge >= 0 && entryAge < _numEntries)) {
|
||||
return NULL;
|
||||
}
|
||||
int entryAt = _newestEntryAtIndex - entryAge;
|
||||
if (entryAt < 0) {
|
||||
entryAt += _size;
|
||||
}
|
||||
return &_buffer[entryAt];
|
||||
}
|
||||
|
||||
T* get(int entryAge) {
|
||||
return const_cast<T*>((static_cast<const RingBufferHistory*>(this))->get(entryAge));
|
||||
}
|
||||
|
||||
const T* getNewestEntry() const {
|
||||
return _numEntries == 0 ? NULL : &_buffer[_newestEntryAtIndex];
|
||||
}
|
||||
|
||||
T* getNewestEntry() {
|
||||
return _numEntries == 0 ? NULL : &_buffer[_newestEntryAtIndex];
|
||||
}
|
||||
|
||||
int getCapacity() const { return _capacity; }
|
||||
int getNumEntries() const { return _numEntries; }
|
||||
|
||||
private:
|
||||
int _size;
|
||||
int _capacity;
|
||||
int _newestEntryAtIndex;
|
||||
int _numEntries;
|
||||
QVector<T> _buffer;
|
||||
|
||||
public:
|
||||
class Iterator : public std::iterator < std::forward_iterator_tag, T > {
|
||||
public:
|
||||
Iterator(T* bufferFirst, T* bufferLast, T* at) : _bufferFirst(bufferFirst), _bufferLast(bufferLast), _at(at) {}
|
||||
|
||||
bool operator==(const Iterator& rhs) { return _at == rhs._at; }
|
||||
bool operator!=(const Iterator& rhs) { return _at != rhs._at; }
|
||||
T& operator*() { return *_at; }
|
||||
T* operator->() { return _at; }
|
||||
|
||||
Iterator& operator++() {
|
||||
_at = (_at == _bufferFirst) ? _bufferLast : _at - 1;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Iterator operator++(int) {
|
||||
Iterator tmp(*this);
|
||||
++(*this);
|
||||
return tmp;
|
||||
}
|
||||
|
||||
private:
|
||||
T* const _bufferFirst;
|
||||
T* const _bufferLast;
|
||||
T* _at;
|
||||
};
|
||||
|
||||
Iterator begin() { return Iterator(&_buffer.first(), &_buffer.last(), &_buffer[_newestEntryAtIndex]); }
|
||||
|
||||
Iterator end() {
|
||||
int endAtIndex = _newestEntryAtIndex - _numEntries;
|
||||
if (endAtIndex < 0) {
|
||||
endAtIndex += _size;
|
||||
}
|
||||
return Iterator(&_buffer.first(), &_buffer.last(), &_buffer[endAtIndex]);
|
||||
}
|
||||
};
|
||||
|
||||
#endif // hifi_RingBufferHistory_h
|
218
tests/shared/src/MovingMinMaxAvgTests.cpp
Normal file
218
tests/shared/src/MovingMinMaxAvgTests.cpp
Normal file
|
@ -0,0 +1,218 @@
|
|||
//
|
||||
// MovingMinMaxAvgTests.cpp
|
||||
// tests/shared/src
|
||||
//
|
||||
// Created by Yixin Wang on 7/8/2014
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "MovingMinMaxAvgTests.h"
|
||||
#include <qqueue.h>
|
||||
|
||||
quint64 MovingMinMaxAvgTests::randQuint64() {
|
||||
quint64 ret = 0;
|
||||
for (int i = 0; i < 32; i++) {
|
||||
ret = (ret + rand() % 4);
|
||||
ret *= 4;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void MovingMinMaxAvgTests::runAllTests() {
|
||||
{
|
||||
// quint64 test
|
||||
|
||||
const int INTERVAL_LENGTH = 100;
|
||||
const int WINDOW_INTERVALS = 50;
|
||||
|
||||
MovingMinMaxAvg<quint64> stats(INTERVAL_LENGTH, WINDOW_INTERVALS);
|
||||
|
||||
quint64 min = std::numeric_limits<quint64>::max();
|
||||
quint64 max = 0;
|
||||
double average = 0.0;
|
||||
int totalSamples = 0;
|
||||
|
||||
quint64 windowMin;
|
||||
quint64 windowMax;
|
||||
double windowAverage;
|
||||
|
||||
QQueue<quint64> windowSamples;
|
||||
// fill window samples
|
||||
for (int i = 0; i < 100000; i++) {
|
||||
|
||||
quint64 sample = randQuint64();
|
||||
|
||||
windowSamples.enqueue(sample);
|
||||
if (windowSamples.size() > INTERVAL_LENGTH * WINDOW_INTERVALS) {
|
||||
windowSamples.dequeue();
|
||||
}
|
||||
|
||||
stats.update(sample);
|
||||
|
||||
min = std::min(min, sample);
|
||||
max = std::max(max, sample);
|
||||
average = (average * totalSamples + sample) / (totalSamples + 1);
|
||||
totalSamples++;
|
||||
|
||||
assert(stats.getMin() == min);
|
||||
assert(stats.getMax() == max);
|
||||
assert(abs(stats.getAverage() / average - 1.0) < 0.000001 || abs(stats.getAverage() - average) < 0.000001);
|
||||
|
||||
if ((i + 1) % INTERVAL_LENGTH == 0) {
|
||||
|
||||
assert(stats.getNewStatsAvailableFlag());
|
||||
stats.clearNewStatsAvailableFlag();
|
||||
|
||||
windowMin = std::numeric_limits<quint64>::max();
|
||||
windowMax = 0;
|
||||
windowAverage = 0.0;
|
||||
foreach(quint64 s, windowSamples) {
|
||||
windowMin = std::min(windowMin, s);
|
||||
windowMax = std::max(windowMax, s);
|
||||
windowAverage += (double)s;
|
||||
}
|
||||
windowAverage /= (double)windowSamples.size();
|
||||
|
||||
assert(stats.getWindowMin() == windowMin);
|
||||
assert(stats.getWindowMax() == windowMax);
|
||||
assert(abs(stats.getAverage() / average - 1.0) < 0.000001 || abs(stats.getAverage() - average) < 0.000001);
|
||||
|
||||
} else {
|
||||
assert(!stats.getNewStatsAvailableFlag());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// int test
|
||||
|
||||
const int INTERVAL_LENGTH = 1;
|
||||
const int WINDOW_INTERVALS = 75;
|
||||
|
||||
MovingMinMaxAvg<int> stats(INTERVAL_LENGTH, WINDOW_INTERVALS);
|
||||
|
||||
int min = std::numeric_limits<int>::max();
|
||||
int max = 0;
|
||||
double average = 0.0;
|
||||
int totalSamples = 0;
|
||||
|
||||
int windowMin;
|
||||
int windowMax;
|
||||
double windowAverage;
|
||||
|
||||
QQueue<int> windowSamples;
|
||||
// fill window samples
|
||||
for (int i = 0; i < 100000; i++) {
|
||||
|
||||
int sample = rand();
|
||||
|
||||
windowSamples.enqueue(sample);
|
||||
if (windowSamples.size() > INTERVAL_LENGTH * WINDOW_INTERVALS) {
|
||||
windowSamples.dequeue();
|
||||
}
|
||||
|
||||
stats.update(sample);
|
||||
|
||||
min = std::min(min, sample);
|
||||
max = std::max(max, sample);
|
||||
average = (average * totalSamples + sample) / (totalSamples + 1);
|
||||
totalSamples++;
|
||||
|
||||
assert(stats.getMin() == min);
|
||||
assert(stats.getMax() == max);
|
||||
assert(abs(stats.getAverage() / average - 1.0) < 0.000001);
|
||||
|
||||
if ((i + 1) % INTERVAL_LENGTH == 0) {
|
||||
|
||||
assert(stats.getNewStatsAvailableFlag());
|
||||
stats.clearNewStatsAvailableFlag();
|
||||
|
||||
windowMin = std::numeric_limits<int>::max();
|
||||
windowMax = 0;
|
||||
windowAverage = 0.0;
|
||||
foreach(int s, windowSamples) {
|
||||
windowMin = std::min(windowMin, s);
|
||||
windowMax = std::max(windowMax, s);
|
||||
windowAverage += (double)s;
|
||||
}
|
||||
windowAverage /= (double)windowSamples.size();
|
||||
|
||||
assert(stats.getWindowMin() == windowMin);
|
||||
assert(stats.getWindowMax() == windowMax);
|
||||
assert(abs(stats.getAverage() / average - 1.0) < 0.000001);
|
||||
|
||||
} else {
|
||||
assert(!stats.getNewStatsAvailableFlag());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// float test
|
||||
|
||||
const int INTERVAL_LENGTH = 57;
|
||||
const int WINDOW_INTERVALS = 1;
|
||||
|
||||
MovingMinMaxAvg<float> stats(INTERVAL_LENGTH, WINDOW_INTERVALS);
|
||||
|
||||
float min = std::numeric_limits<float>::max();
|
||||
float max = 0;
|
||||
double average = 0.0;
|
||||
int totalSamples = 0;
|
||||
|
||||
float windowMin;
|
||||
float windowMax;
|
||||
double windowAverage;
|
||||
|
||||
QQueue<float> windowSamples;
|
||||
// fill window samples
|
||||
for (int i = 0; i < 100000; i++) {
|
||||
|
||||
float sample = randFloat();
|
||||
|
||||
windowSamples.enqueue(sample);
|
||||
if (windowSamples.size() > INTERVAL_LENGTH * WINDOW_INTERVALS) {
|
||||
windowSamples.dequeue();
|
||||
}
|
||||
|
||||
stats.update(sample);
|
||||
|
||||
min = std::min(min, sample);
|
||||
max = std::max(max, sample);
|
||||
average = (average * totalSamples + sample) / (totalSamples + 1);
|
||||
totalSamples++;
|
||||
|
||||
assert(stats.getMin() == min);
|
||||
assert(stats.getMax() == max);
|
||||
assert(abs(stats.getAverage() / average - 1.0) < 0.000001);
|
||||
|
||||
if ((i + 1) % INTERVAL_LENGTH == 0) {
|
||||
|
||||
assert(stats.getNewStatsAvailableFlag());
|
||||
stats.clearNewStatsAvailableFlag();
|
||||
|
||||
windowMin = std::numeric_limits<float>::max();
|
||||
windowMax = 0;
|
||||
windowAverage = 0.0;
|
||||
foreach(float s, windowSamples) {
|
||||
windowMin = std::min(windowMin, s);
|
||||
windowMax = std::max(windowMax, s);
|
||||
windowAverage += (double)s;
|
||||
}
|
||||
windowAverage /= (double)windowSamples.size();
|
||||
|
||||
assert(stats.getWindowMin() == windowMin);
|
||||
assert(stats.getWindowMax() == windowMax);
|
||||
assert(abs(stats.getAverage() / average - 1.0) < 0.000001);
|
||||
|
||||
} else {
|
||||
assert(!stats.getNewStatsAvailableFlag());
|
||||
}
|
||||
}
|
||||
}
|
||||
printf("moving min/max/avg test passed!\n");
|
||||
}
|
||||
|
25
tests/shared/src/MovingMinMaxAvgTests.h
Normal file
25
tests/shared/src/MovingMinMaxAvgTests.h
Normal file
|
@ -0,0 +1,25 @@
|
|||
//
|
||||
// MovingMinMaxAvgTests.h
|
||||
// tests/shared/src
|
||||
//
|
||||
// Created by Yixin Wang on 7/8/2014
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_MovingMinMaxAvgTests_h
|
||||
#define hifi_MovingMinMaxAvgTests_h
|
||||
|
||||
#include "MovingMinMaxAvg.h"
|
||||
#include "SharedUtil.h"
|
||||
|
||||
namespace MovingMinMaxAvgTests {
|
||||
|
||||
quint64 randQuint64();
|
||||
|
||||
void runAllTests();
|
||||
}
|
||||
|
||||
#endif // hifi_MovingMinMaxAvgTests_h
|
|
@ -10,9 +10,12 @@
|
|||
|
||||
#include "AngularConstraintTests.h"
|
||||
#include "MovingPercentileTests.h"
|
||||
#include "MovingMinMaxAvgTests.h"
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
MovingMinMaxAvgTests::runAllTests();
|
||||
MovingPercentileTests::runAllTests();
|
||||
AngularConstraintTests::runAllTests();
|
||||
getchar();
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue