mirror of
https://github.com/overte-org/overte.git
synced 2025-06-21 13:20:58 +02:00
Merge branch 'master' of https://github.com/highfidelity/hifi into metavoxels
This commit is contained in:
commit
21a1f4772c
32 changed files with 1147 additions and 345 deletions
|
@ -405,7 +405,8 @@ void AudioMixer::readPendingDatagrams() {
|
||||||
if (mixerPacketType == PacketTypeMicrophoneAudioNoEcho
|
if (mixerPacketType == PacketTypeMicrophoneAudioNoEcho
|
||||||
|| mixerPacketType == PacketTypeMicrophoneAudioWithEcho
|
|| mixerPacketType == PacketTypeMicrophoneAudioWithEcho
|
||||||
|| mixerPacketType == PacketTypeInjectAudio
|
|| mixerPacketType == PacketTypeInjectAudio
|
||||||
|| mixerPacketType == PacketTypeSilentAudioFrame) {
|
|| mixerPacketType == PacketTypeSilentAudioFrame
|
||||||
|
|| mixerPacketType == PacketTypeAudioStreamStats) {
|
||||||
|
|
||||||
nodeList->findNodeAndUpdateWithDataFromPacket(receivedPacket);
|
nodeList->findNodeAndUpdateWithDataFromPacket(receivedPacket);
|
||||||
} else if (mixerPacketType == PacketTypeMuteEnvironment) {
|
} else if (mixerPacketType == PacketTypeMuteEnvironment) {
|
||||||
|
@ -640,9 +641,6 @@ void AudioMixer::run() {
|
||||||
++framesSinceCutoffEvent;
|
++framesSinceCutoffEvent;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
const quint64 TOO_LONG_SINCE_LAST_SEND_AUDIO_STREAM_STATS = 1 * USECS_PER_SECOND;
|
|
||||||
|
|
||||||
bool sendAudioStreamStats = false;
|
bool sendAudioStreamStats = false;
|
||||||
quint64 now = usecTimestampNow();
|
quint64 now = usecTimestampNow();
|
||||||
if (now - _lastSendAudioStreamStatsTime > TOO_LONG_SINCE_LAST_SEND_AUDIO_STREAM_STATS) {
|
if (now - _lastSendAudioStreamStatsTime > TOO_LONG_SINCE_LAST_SEND_AUDIO_STREAM_STATS) {
|
||||||
|
|
|
@ -21,6 +21,8 @@ class AvatarAudioRingBuffer;
|
||||||
|
|
||||||
const int SAMPLE_PHASE_DELAY_AT_90 = 20;
|
const int SAMPLE_PHASE_DELAY_AT_90 = 20;
|
||||||
|
|
||||||
|
const quint64 TOO_LONG_SINCE_LAST_SEND_AUDIO_STREAM_STATS = 1 * USECS_PER_SECOND;
|
||||||
|
|
||||||
/// Handles assignments of type AudioMixer - mixing streams of audio and re-distributing to various clients.
|
/// Handles assignments of type AudioMixer - mixing streams of audio and re-distributing to various clients.
|
||||||
class AudioMixer : public ThreadedAssignment {
|
class AudioMixer : public ThreadedAssignment {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
|
|
|
@ -18,11 +18,15 @@
|
||||||
|
|
||||||
#include "AudioMixer.h"
|
#include "AudioMixer.h"
|
||||||
#include "AudioMixerClientData.h"
|
#include "AudioMixerClientData.h"
|
||||||
|
#include "MovingMinMaxAvg.h"
|
||||||
|
|
||||||
|
const int INCOMING_SEQ_STATS_HISTORY_LENGTH = INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS /
|
||||||
|
(TOO_LONG_SINCE_LAST_SEND_AUDIO_STREAM_STATS / USECS_PER_SECOND);
|
||||||
|
|
||||||
AudioMixerClientData::AudioMixerClientData() :
|
AudioMixerClientData::AudioMixerClientData() :
|
||||||
_ringBuffers(),
|
_ringBuffers(),
|
||||||
_outgoingMixedAudioSequenceNumber(0),
|
_outgoingMixedAudioSequenceNumber(0),
|
||||||
_incomingAvatarAudioSequenceNumberStats()
|
_incomingAvatarAudioSequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH)
|
||||||
{
|
{
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -82,12 +86,15 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
||||||
|
|
||||||
// ask the AvatarAudioRingBuffer instance to parse the data
|
// ask the AvatarAudioRingBuffer instance to parse the data
|
||||||
avatarRingBuffer->parseData(packet);
|
avatarRingBuffer->parseData(packet);
|
||||||
} else {
|
} else if (packetType == PacketTypeInjectAudio) {
|
||||||
// this is injected audio
|
// this is injected audio
|
||||||
|
|
||||||
// grab the stream identifier for this injected audio
|
// grab the stream identifier for this injected audio
|
||||||
QUuid streamIdentifier = QUuid::fromRfc4122(packet.mid(numBytesForPacketHeader(packet) + sizeof(quint16), NUM_BYTES_RFC4122_UUID));
|
QUuid streamIdentifier = QUuid::fromRfc4122(packet.mid(numBytesForPacketHeader(packet) + sizeof(quint16), NUM_BYTES_RFC4122_UUID));
|
||||||
|
|
||||||
|
if (!_incomingInjectedAudioSequenceNumberStatsMap.contains(streamIdentifier)) {
|
||||||
|
_incomingInjectedAudioSequenceNumberStatsMap.insert(streamIdentifier, SequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH));
|
||||||
|
}
|
||||||
_incomingInjectedAudioSequenceNumberStatsMap[streamIdentifier].sequenceNumberReceived(sequence);
|
_incomingInjectedAudioSequenceNumberStatsMap[streamIdentifier].sequenceNumberReceived(sequence);
|
||||||
|
|
||||||
InjectedAudioRingBuffer* matchingInjectedRingBuffer = NULL;
|
InjectedAudioRingBuffer* matchingInjectedRingBuffer = NULL;
|
||||||
|
@ -106,6 +113,15 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
||||||
}
|
}
|
||||||
|
|
||||||
matchingInjectedRingBuffer->parseData(packet);
|
matchingInjectedRingBuffer->parseData(packet);
|
||||||
|
} else if (packetType == PacketTypeAudioStreamStats) {
|
||||||
|
|
||||||
|
const char* dataAt = packet.data();
|
||||||
|
|
||||||
|
// skip over header, appendFlag, and num stats packed
|
||||||
|
dataAt += (numBytesPacketHeader + sizeof(quint8) + sizeof(quint16));
|
||||||
|
|
||||||
|
// read the downstream audio stream stats
|
||||||
|
memcpy(&_downstreamAudioStreamStats, dataAt, sizeof(AudioStreamStats));
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -159,30 +175,50 @@ void AudioMixerClientData::pushBuffersAfterFrameSend() {
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioStreamStats AudioMixerClientData::getAudioStreamStatsOfStream(const PositionalAudioRingBuffer* ringBuffer) const {
|
AudioStreamStats AudioMixerClientData::getAudioStreamStatsOfStream(const PositionalAudioRingBuffer* ringBuffer) const {
|
||||||
|
|
||||||
AudioStreamStats streamStats;
|
AudioStreamStats streamStats;
|
||||||
SequenceNumberStats streamSequenceNumberStats;
|
|
||||||
|
|
||||||
streamStats._streamType = ringBuffer->getType();
|
streamStats._streamType = ringBuffer->getType();
|
||||||
if (streamStats._streamType == PositionalAudioRingBuffer::Injector) {
|
if (streamStats._streamType == PositionalAudioRingBuffer::Injector) {
|
||||||
streamStats._streamIdentifier = ((InjectedAudioRingBuffer*)ringBuffer)->getStreamIdentifier();
|
streamStats._streamIdentifier = ((InjectedAudioRingBuffer*)ringBuffer)->getStreamIdentifier();
|
||||||
streamSequenceNumberStats = _incomingInjectedAudioSequenceNumberStatsMap.value(streamStats._streamIdentifier);
|
const SequenceNumberStats& sequenceNumberStats = _incomingInjectedAudioSequenceNumberStatsMap[streamStats._streamIdentifier];
|
||||||
|
streamStats._packetStreamStats = sequenceNumberStats.getStats();
|
||||||
|
streamStats._packetStreamWindowStats = sequenceNumberStats.getStatsForHistoryWindow();
|
||||||
} else {
|
} else {
|
||||||
streamSequenceNumberStats = _incomingAvatarAudioSequenceNumberStats;
|
streamStats._packetStreamStats = _incomingAvatarAudioSequenceNumberStats.getStats();
|
||||||
|
streamStats._packetStreamWindowStats = _incomingAvatarAudioSequenceNumberStats.getStatsForHistoryWindow();
|
||||||
}
|
}
|
||||||
streamStats._jitterBufferFrames = ringBuffer->getCurrentJitterBufferFrames();
|
|
||||||
|
|
||||||
streamStats._packetsReceived = streamSequenceNumberStats.getNumReceived();
|
const MovingMinMaxAvg<quint64>& timeGapStats = ringBuffer->getInterframeTimeGapStatsForStatsPacket();
|
||||||
streamStats._packetsUnreasonable = streamSequenceNumberStats.getNumUnreasonable();
|
streamStats._timeGapMin = timeGapStats.getMin();
|
||||||
streamStats._packetsEarly = streamSequenceNumberStats.getNumEarly();
|
streamStats._timeGapMax = timeGapStats.getMax();
|
||||||
streamStats._packetsLate = streamSequenceNumberStats.getNumLate();
|
streamStats._timeGapAverage = timeGapStats.getAverage();
|
||||||
streamStats._packetsLost = streamSequenceNumberStats.getNumLost();
|
streamStats._timeGapWindowMin = timeGapStats.getWindowMin();
|
||||||
streamStats._packetsRecovered = streamSequenceNumberStats.getNumRecovered();
|
streamStats._timeGapWindowMax = timeGapStats.getWindowMax();
|
||||||
streamStats._packetsDuplicate = streamSequenceNumberStats.getNumDuplicate();
|
streamStats._timeGapWindowAverage = timeGapStats.getWindowAverage();
|
||||||
|
|
||||||
|
streamStats._ringBufferFramesAvailable = ringBuffer->framesAvailable();
|
||||||
|
streamStats._ringBufferCurrentJitterBufferFrames = ringBuffer->getCurrentJitterBufferFrames();
|
||||||
|
streamStats._ringBufferDesiredJitterBufferFrames = ringBuffer->getDesiredJitterBufferFrames();
|
||||||
|
streamStats._ringBufferStarveCount = ringBuffer->getStarveCount();
|
||||||
|
streamStats._ringBufferConsecutiveNotMixedCount = ringBuffer->getConsecutiveNotMixedCount();
|
||||||
|
streamStats._ringBufferOverflowCount = ringBuffer->getOverflowCount();
|
||||||
|
streamStats._ringBufferSilentFramesDropped = ringBuffer->getSilentFramesDropped();
|
||||||
|
|
||||||
return streamStats;
|
return streamStats;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode) const {
|
void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode) {
|
||||||
|
|
||||||
|
// have all the seq number stats of each audio stream push their current stats into their history,
|
||||||
|
// which moves that history window 1 second forward (since that's how long since the last stats were pushed into history)
|
||||||
|
_incomingAvatarAudioSequenceNumberStats.pushStatsToHistory();
|
||||||
|
QHash<QUuid, SequenceNumberStats>::Iterator i = _incomingInjectedAudioSequenceNumberStatsMap.begin();
|
||||||
|
QHash<QUuid, SequenceNumberStats>::Iterator end = _incomingInjectedAudioSequenceNumberStatsMap.end();
|
||||||
|
while (i != end) {
|
||||||
|
i.value().pushStatsToHistory();
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
|
||||||
char packet[MAX_PACKET_SIZE];
|
char packet[MAX_PACKET_SIZE];
|
||||||
NodeList* nodeList = NodeList::getInstance();
|
NodeList* nodeList = NodeList::getInstance();
|
||||||
|
@ -234,46 +270,63 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
|
||||||
|
|
||||||
QString AudioMixerClientData::getAudioStreamStatsString() const {
|
QString AudioMixerClientData::getAudioStreamStatsString() const {
|
||||||
QString result;
|
QString result;
|
||||||
|
AudioStreamStats streamStats = _downstreamAudioStreamStats;
|
||||||
|
result += "DOWNSTREAM.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames)
|
||||||
|
+ " current: ?"
|
||||||
|
+ " available:" + QString::number(streamStats._ringBufferFramesAvailable)
|
||||||
|
+ " starves:" + QString::number(streamStats._ringBufferStarveCount)
|
||||||
|
+ " not_mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount)
|
||||||
|
+ " overflows:" + QString::number(streamStats._ringBufferOverflowCount)
|
||||||
|
+ " silents_dropped: ?"
|
||||||
|
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
|
||||||
|
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
|
||||||
|
+ " min_gap:" + QString::number(streamStats._timeGapMin)
|
||||||
|
+ " max_gap:" + QString::number(streamStats._timeGapMax)
|
||||||
|
+ " avg_gap:" + QString::number(streamStats._timeGapAverage, 'f', 2)
|
||||||
|
+ " min_gap_30s:" + QString::number(streamStats._timeGapWindowMin)
|
||||||
|
+ " max_gap_30s:" + QString::number(streamStats._timeGapWindowMax)
|
||||||
|
+ " avg_gap_30s:" + QString::number(streamStats._timeGapWindowAverage, 'f', 2);
|
||||||
|
|
||||||
AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();
|
AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();
|
||||||
if (avatarRingBuffer) {
|
if (avatarRingBuffer) {
|
||||||
int desiredJitterBuffer = avatarRingBuffer->getDesiredJitterBufferFrames();
|
|
||||||
int calculatedJitterBuffer = avatarRingBuffer->getCalculatedDesiredJitterBufferFrames();
|
|
||||||
int currentJitterBuffer = avatarRingBuffer->getCurrentJitterBufferFrames();
|
|
||||||
int overflowCount = avatarRingBuffer->getOverflowCount();
|
|
||||||
int samplesAvailable = avatarRingBuffer->samplesAvailable();
|
|
||||||
int framesAvailable = (samplesAvailable / avatarRingBuffer->getSamplesPerFrame());
|
|
||||||
AudioStreamStats streamStats = getAudioStreamStatsOfStream(avatarRingBuffer);
|
AudioStreamStats streamStats = getAudioStreamStatsOfStream(avatarRingBuffer);
|
||||||
result += "mic.desired:" + QString::number(desiredJitterBuffer)
|
result += " UPSTREAM.mic.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames)
|
||||||
+ " calculated:" + QString::number(calculatedJitterBuffer)
|
+ " current:" + QString::number(streamStats._ringBufferCurrentJitterBufferFrames)
|
||||||
+ " current:" + QString::number(currentJitterBuffer)
|
+ " available:" + QString::number(streamStats._ringBufferFramesAvailable)
|
||||||
+ " available:" + QString::number(framesAvailable)
|
+ " starves:" + QString::number(streamStats._ringBufferStarveCount)
|
||||||
+ " samples:" + QString::number(samplesAvailable)
|
+ " not_mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount)
|
||||||
+ " overflows:" + QString::number(overflowCount)
|
+ " overflows:" + QString::number(streamStats._ringBufferOverflowCount)
|
||||||
+ " early:" + QString::number(streamStats._packetsEarly)
|
+ " silents_dropped:" + QString::number(streamStats._ringBufferSilentFramesDropped)
|
||||||
+ " late:" + QString::number(streamStats._packetsLate)
|
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
|
||||||
+ " lost:" + QString::number(streamStats._packetsLost);
|
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
|
||||||
|
+ " min_gap:" + QString::number(streamStats._timeGapMin)
|
||||||
|
+ " max_gap:" + QString::number(streamStats._timeGapMax)
|
||||||
|
+ " avg_gap:" + QString::number(streamStats._timeGapAverage, 'f', 2)
|
||||||
|
+ " min_gap_30s:" + QString::number(streamStats._timeGapWindowMin)
|
||||||
|
+ " max_gap_30s:" + QString::number(streamStats._timeGapWindowMax)
|
||||||
|
+ " avg_gap_30s:" + QString::number(streamStats._timeGapWindowAverage, 'f', 2);
|
||||||
} else {
|
} else {
|
||||||
result = "mic unknown";
|
result = "mic unknown";
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < _ringBuffers.size(); i++) {
|
for (int i = 0; i < _ringBuffers.size(); i++) {
|
||||||
if (_ringBuffers[i]->getType() == PositionalAudioRingBuffer::Injector) {
|
if (_ringBuffers[i]->getType() == PositionalAudioRingBuffer::Injector) {
|
||||||
int desiredJitterBuffer = _ringBuffers[i]->getDesiredJitterBufferFrames();
|
|
||||||
int calculatedJitterBuffer = _ringBuffers[i]->getCalculatedDesiredJitterBufferFrames();
|
|
||||||
int currentJitterBuffer = _ringBuffers[i]->getCurrentJitterBufferFrames();
|
|
||||||
int overflowCount = _ringBuffers[i]->getOverflowCount();
|
|
||||||
int samplesAvailable = _ringBuffers[i]->samplesAvailable();
|
|
||||||
int framesAvailable = (samplesAvailable / _ringBuffers[i]->getSamplesPerFrame());
|
|
||||||
AudioStreamStats streamStats = getAudioStreamStatsOfStream(_ringBuffers[i]);
|
AudioStreamStats streamStats = getAudioStreamStatsOfStream(_ringBuffers[i]);
|
||||||
result += "| injected[" + QString::number(i) + "].desired:" + QString::number(desiredJitterBuffer)
|
result += " UPSTREAM.inj.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames)
|
||||||
+ " calculated:" + QString::number(calculatedJitterBuffer)
|
+ " current:" + QString::number(streamStats._ringBufferCurrentJitterBufferFrames)
|
||||||
+ " current:" + QString::number(currentJitterBuffer)
|
+ " available:" + QString::number(streamStats._ringBufferFramesAvailable)
|
||||||
+ " available:" + QString::number(framesAvailable)
|
+ " starves:" + QString::number(streamStats._ringBufferStarveCount)
|
||||||
+ " samples:" + QString::number(samplesAvailable)
|
+ " not_mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount)
|
||||||
+ " overflows:" + QString::number(overflowCount)
|
+ " overflows:" + QString::number(streamStats._ringBufferOverflowCount)
|
||||||
+ " early:" + QString::number(streamStats._packetsEarly)
|
+ " silents_dropped:" + QString::number(streamStats._ringBufferSilentFramesDropped)
|
||||||
+ " late:" + QString::number(streamStats._packetsLate)
|
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
|
||||||
+ " lost:" + QString::number(streamStats._packetsLost);
|
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
|
||||||
|
+ " min_gap:" + QString::number(streamStats._timeGapMin)
|
||||||
|
+ " max_gap:" + QString::number(streamStats._timeGapMax)
|
||||||
|
+ " avg_gap:" + QString::number(streamStats._timeGapAverage, 'f', 2)
|
||||||
|
+ " min_gap_30s:" + QString::number(streamStats._timeGapWindowMin)
|
||||||
|
+ " max_gap_30s:" + QString::number(streamStats._timeGapWindowMax)
|
||||||
|
+ " avg_gap_30s:" + QString::number(streamStats._timeGapWindowAverage, 'f', 2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
|
|
|
@ -20,6 +20,9 @@
|
||||||
#include "AudioStreamStats.h"
|
#include "AudioStreamStats.h"
|
||||||
#include "SequenceNumberStats.h"
|
#include "SequenceNumberStats.h"
|
||||||
|
|
||||||
|
|
||||||
|
const int INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS = 30;
|
||||||
|
|
||||||
class AudioMixerClientData : public NodeData {
|
class AudioMixerClientData : public NodeData {
|
||||||
public:
|
public:
|
||||||
AudioMixerClientData();
|
AudioMixerClientData();
|
||||||
|
@ -35,7 +38,7 @@ public:
|
||||||
AudioStreamStats getAudioStreamStatsOfStream(const PositionalAudioRingBuffer* ringBuffer) const;
|
AudioStreamStats getAudioStreamStatsOfStream(const PositionalAudioRingBuffer* ringBuffer) const;
|
||||||
QString getAudioStreamStatsString() const;
|
QString getAudioStreamStatsString() const;
|
||||||
|
|
||||||
void sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode) const;
|
void sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode);
|
||||||
|
|
||||||
void incrementOutgoingMixedAudioSequenceNumber() { _outgoingMixedAudioSequenceNumber++; }
|
void incrementOutgoingMixedAudioSequenceNumber() { _outgoingMixedAudioSequenceNumber++; }
|
||||||
quint16 getOutgoingSequenceNumber() const { return _outgoingMixedAudioSequenceNumber; }
|
quint16 getOutgoingSequenceNumber() const { return _outgoingMixedAudioSequenceNumber; }
|
||||||
|
@ -46,6 +49,8 @@ private:
|
||||||
quint16 _outgoingMixedAudioSequenceNumber;
|
quint16 _outgoingMixedAudioSequenceNumber;
|
||||||
SequenceNumberStats _incomingAvatarAudioSequenceNumberStats;
|
SequenceNumberStats _incomingAvatarAudioSequenceNumberStats;
|
||||||
QHash<QUuid, SequenceNumberStats> _incomingInjectedAudioSequenceNumberStatsMap;
|
QHash<QUuid, SequenceNumberStats> _incomingInjectedAudioSequenceNumberStatsMap;
|
||||||
|
|
||||||
|
AudioStreamStats _downstreamAudioStreamStats;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // hifi_AudioMixerClientData_h
|
#endif // hifi_AudioMixerClientData_h
|
||||||
|
|
|
@ -19,7 +19,7 @@ AvatarAudioRingBuffer::AvatarAudioRingBuffer(bool isStereo, bool dynamicJitterBu
|
||||||
}
|
}
|
||||||
|
|
||||||
int AvatarAudioRingBuffer::parseData(const QByteArray& packet) {
|
int AvatarAudioRingBuffer::parseData(const QByteArray& packet) {
|
||||||
_interframeTimeGapStats.frameReceived();
|
timeGapStatsFrameReceived();
|
||||||
updateDesiredJitterBufferFrames();
|
updateDesiredJitterBufferFrames();
|
||||||
|
|
||||||
_shouldLoopbackForNode = (packetTypeForPacket(packet) == PacketTypeMicrophoneAudioWithEcho);
|
_shouldLoopbackForNode = (packetTypeForPacket(packet) == PacketTypeMicrophoneAudioWithEcho);
|
||||||
|
|
|
@ -43,7 +43,10 @@ var animationLenght = 2.0;
|
||||||
|
|
||||||
var avatarOldPosition = { x: 0, y: 0, z: 0 };
|
var avatarOldPosition = { x: 0, y: 0, z: 0 };
|
||||||
|
|
||||||
var sitting = false;
|
var sittingSettingsHandle = "SitJsSittingPosition";
|
||||||
|
var sitting = Settings.getValue(sittingSettingsHandle, false) == "true";
|
||||||
|
print("Original sitting status: " + sitting);
|
||||||
|
var frame = 0;
|
||||||
|
|
||||||
var seat = new Object();
|
var seat = new Object();
|
||||||
var hiddingSeats = false;
|
var hiddingSeats = false;
|
||||||
|
@ -123,6 +126,8 @@ var goToSeatAnimation = function(deltaTime) {
|
||||||
|
|
||||||
function sitDown() {
|
function sitDown() {
|
||||||
sitting = true;
|
sitting = true;
|
||||||
|
Settings.setValue(sittingSettingsHandle, sitting);
|
||||||
|
print("sitDown sitting status: " + Settings.getValue(sittingSettingsHandle, false));
|
||||||
passedTime = 0.0;
|
passedTime = 0.0;
|
||||||
startPosition = MyAvatar.position;
|
startPosition = MyAvatar.position;
|
||||||
storeStartPoseAndTransition();
|
storeStartPoseAndTransition();
|
||||||
|
@ -138,6 +143,8 @@ function sitDown() {
|
||||||
|
|
||||||
function standUp() {
|
function standUp() {
|
||||||
sitting = false;
|
sitting = false;
|
||||||
|
Settings.setValue(sittingSettingsHandle, sitting);
|
||||||
|
print("standUp sitting status: " + Settings.getValue(sittingSettingsHandle, false));
|
||||||
passedTime = 0.0;
|
passedTime = 0.0;
|
||||||
startPosition = MyAvatar.position;
|
startPosition = MyAvatar.position;
|
||||||
try{
|
try{
|
||||||
|
@ -159,9 +166,11 @@ function SeatIndicator(modelProperties, seatIndex) {
|
||||||
modelProperties.sittingPoints[seatIndex].rotation);
|
modelProperties.sittingPoints[seatIndex].rotation);
|
||||||
this.scale = MyAvatar.scale / 12;
|
this.scale = MyAvatar.scale / 12;
|
||||||
|
|
||||||
this.sphere = Overlays.addOverlay("sphere", {
|
this.sphere = Overlays.addOverlay("billboard", {
|
||||||
|
subImage: { x: 0, y: buttonHeight, width: buttonWidth, height: buttonHeight},
|
||||||
|
url: buttonImageUrl,
|
||||||
position: this.position,
|
position: this.position,
|
||||||
size: this.scale,
|
scale: this.scale * 4,
|
||||||
solid: true,
|
solid: true,
|
||||||
color: { red: 0, green: 0, blue: 255 },
|
color: { red: 0, green: 0, blue: 255 },
|
||||||
alpha: 0.3,
|
alpha: 0.3,
|
||||||
|
@ -218,33 +227,6 @@ Controller.mousePressEvent.connect(function(event) {
|
||||||
try{ Script.update.disconnect(sittingDownAnimation); } catch(e){}
|
try{ Script.update.disconnect(sittingDownAnimation); } catch(e){}
|
||||||
Script.update.connect(goToSeatAnimation);
|
Script.update.connect(goToSeatAnimation);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
return;
|
|
||||||
var intersection = Models.findRayIntersection(pickRay);
|
|
||||||
|
|
||||||
if (intersection.accurate && intersection.intersects && false) {
|
|
||||||
var properties = intersection.modelProperties;
|
|
||||||
print("Intersecting with model, let's check for seats.");
|
|
||||||
|
|
||||||
if (properties.sittingPoints.length > 0) {
|
|
||||||
print("Available seats, going to the first one: " + properties.sittingPoints[0].name);
|
|
||||||
seat.position = Vec3.sum(properties.position, Vec3.multiplyQbyV(properties.modelRotation, properties.sittingPoints[0].position));
|
|
||||||
Vec3.print("Seat position: ", seat.position);
|
|
||||||
seat.rotation = Quat.multiply(properties.modelRotation, properties.sittingPoints[0].rotation);
|
|
||||||
Quat.print("Seat rotation: ", seat.rotation);
|
|
||||||
|
|
||||||
passedTime = 0.0;
|
|
||||||
startPosition = MyAvatar.position;
|
|
||||||
startRotation = MyAvatar.orientation;
|
|
||||||
try{ Script.update.disconnect(standingUpAnimation); } catch(e){}
|
|
||||||
try{ Script.update.disconnect(sittingDownAnimation); } catch(e){}
|
|
||||||
Script.update.connect(goToSeatAnimation);
|
|
||||||
} else {
|
|
||||||
print ("Sorry, no seats here.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -258,12 +240,28 @@ function update(deltaTime){
|
||||||
Overlays.editOverlay( sitDownButton, {x: newX, y: newY} );
|
Overlays.editOverlay( sitDownButton, {x: newX, y: newY} );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For a weird reason avatar joint don't update till the 10th frame
|
||||||
|
// Set the update frame to 20 to be safe
|
||||||
|
var UPDATE_FRAME = 20;
|
||||||
|
if (frame <= UPDATE_FRAME) {
|
||||||
|
if (frame == UPDATE_FRAME) {
|
||||||
|
if (sitting == true) {
|
||||||
|
print("Was seated: " + sitting);
|
||||||
|
storeStartPoseAndTransition();
|
||||||
|
updateJoints(1.0);
|
||||||
|
Overlays.editOverlay(sitDownButton, { visible: false });
|
||||||
|
Overlays.editOverlay(standUpButton, { visible: true });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
frame++;
|
||||||
|
}
|
||||||
|
|
||||||
if (MyAvatar.position.x != avatarOldPosition.x &&
|
if (MyAvatar.position.x != avatarOldPosition.x &&
|
||||||
MyAvatar.position.y != avatarOldPosition.y &&
|
MyAvatar.position.y != avatarOldPosition.y &&
|
||||||
MyAvatar.position.z != avatarOldPosition.z) {
|
MyAvatar.position.z != avatarOldPosition.z) {
|
||||||
avatarOldPosition = MyAvatar.position;
|
avatarOldPosition = MyAvatar.position;
|
||||||
|
|
||||||
var SEARCH_RADIUS = 5;
|
var SEARCH_RADIUS = 10;
|
||||||
var foundModels = Models.findModels(MyAvatar.position, SEARCH_RADIUS);
|
var foundModels = Models.findModels(MyAvatar.position, SEARCH_RADIUS);
|
||||||
// Let's remove indicator that got out of radius
|
// Let's remove indicator that got out of radius
|
||||||
for (model in models) {
|
for (model in models) {
|
||||||
|
|
|
@ -172,7 +172,8 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
|
||||||
_runningScriptsWidget(NULL),
|
_runningScriptsWidget(NULL),
|
||||||
_runningScriptsWidgetWasVisible(false),
|
_runningScriptsWidgetWasVisible(false),
|
||||||
_trayIcon(new QSystemTrayIcon(_window)),
|
_trayIcon(new QSystemTrayIcon(_window)),
|
||||||
_lastNackTime(usecTimestampNow())
|
_lastNackTime(usecTimestampNow()),
|
||||||
|
_lastSendDownstreamAudioStats(usecTimestampNow())
|
||||||
{
|
{
|
||||||
// read the ApplicationInfo.ini file for Name/Version/Domain information
|
// read the ApplicationInfo.ini file for Name/Version/Domain information
|
||||||
QSettings applicationInfo(Application::resourcesPath() + "info/ApplicationInfo.ini", QSettings::IniFormat);
|
QSettings applicationInfo(Application::resourcesPath() + "info/ApplicationInfo.ini", QSettings::IniFormat);
|
||||||
|
@ -2124,10 +2125,11 @@ void Application::updateMyAvatar(float deltaTime) {
|
||||||
loadViewFrustum(_myCamera, _viewFrustum);
|
loadViewFrustum(_myCamera, _viewFrustum);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
quint64 now = usecTimestampNow();
|
||||||
|
|
||||||
// Update my voxel servers with my current voxel query...
|
// Update my voxel servers with my current voxel query...
|
||||||
{
|
{
|
||||||
PerformanceTimer perfTimer("queryOctree");
|
PerformanceTimer perfTimer("queryOctree");
|
||||||
quint64 now = usecTimestampNow();
|
|
||||||
quint64 sinceLastQuery = now - _lastQueriedTime;
|
quint64 sinceLastQuery = now - _lastQueriedTime;
|
||||||
const quint64 TOO_LONG_SINCE_LAST_QUERY = 3 * USECS_PER_SECOND;
|
const quint64 TOO_LONG_SINCE_LAST_QUERY = 3 * USECS_PER_SECOND;
|
||||||
bool queryIsDue = sinceLastQuery > TOO_LONG_SINCE_LAST_QUERY;
|
bool queryIsDue = sinceLastQuery > TOO_LONG_SINCE_LAST_QUERY;
|
||||||
|
@ -2145,7 +2147,6 @@ void Application::updateMyAvatar(float deltaTime) {
|
||||||
|
|
||||||
// sent nack packets containing missing sequence numbers of received packets from nodes
|
// sent nack packets containing missing sequence numbers of received packets from nodes
|
||||||
{
|
{
|
||||||
quint64 now = usecTimestampNow();
|
|
||||||
quint64 sinceLastNack = now - _lastNackTime;
|
quint64 sinceLastNack = now - _lastNackTime;
|
||||||
const quint64 TOO_LONG_SINCE_LAST_NACK = 1 * USECS_PER_SECOND;
|
const quint64 TOO_LONG_SINCE_LAST_NACK = 1 * USECS_PER_SECOND;
|
||||||
if (sinceLastNack > TOO_LONG_SINCE_LAST_NACK) {
|
if (sinceLastNack > TOO_LONG_SINCE_LAST_NACK) {
|
||||||
|
@ -2153,6 +2154,15 @@ void Application::updateMyAvatar(float deltaTime) {
|
||||||
sendNackPackets();
|
sendNackPackets();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
quint64 sinceLastNack = now - _lastSendDownstreamAudioStats;
|
||||||
|
if (sinceLastNack > TOO_LONG_SINCE_LAST_SEND_DOWNSTREAM_AUDIO_STATS) {
|
||||||
|
_lastSendDownstreamAudioStats = now;
|
||||||
|
|
||||||
|
QMetaObject::invokeMethod(&_audio, "sendDownstreamAudioStatsPacket", Qt::QueuedConnection);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int Application::sendNackPackets() {
|
int Application::sendNackPackets() {
|
||||||
|
|
|
@ -125,6 +125,8 @@ static const float MIRROR_REARVIEW_DISTANCE = 0.65f;
|
||||||
static const float MIRROR_REARVIEW_BODY_DISTANCE = 2.3f;
|
static const float MIRROR_REARVIEW_BODY_DISTANCE = 2.3f;
|
||||||
static const float MIRROR_FIELD_OF_VIEW = 30.0f;
|
static const float MIRROR_FIELD_OF_VIEW = 30.0f;
|
||||||
|
|
||||||
|
static const quint64 TOO_LONG_SINCE_LAST_SEND_DOWNSTREAM_AUDIO_STATS = 1 * USECS_PER_SECOND;
|
||||||
|
|
||||||
class Application : public QApplication {
|
class Application : public QApplication {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
|
|
||||||
|
@ -586,6 +588,7 @@ private:
|
||||||
QSystemTrayIcon* _trayIcon;
|
QSystemTrayIcon* _trayIcon;
|
||||||
|
|
||||||
quint64 _lastNackTime;
|
quint64 _lastNackTime;
|
||||||
|
quint64 _lastSendDownstreamAudioStats;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // hifi_Application_h
|
#endif // hifi_Application_h
|
||||||
|
|
|
@ -48,9 +48,18 @@ static const float AUDIO_CALLBACK_MSECS = (float) NETWORK_BUFFER_LENGTH_SAMPLES_
|
||||||
|
|
||||||
static const int NUMBER_OF_NOISE_SAMPLE_FRAMES = 300;
|
static const int NUMBER_OF_NOISE_SAMPLE_FRAMES = 300;
|
||||||
|
|
||||||
|
// audio frames time gap stats (min/max/avg) for last ~30 seconds are recalculated every ~1 second
|
||||||
|
static const int TIME_GAPS_STATS_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS;
|
||||||
|
static const int TIME_GAP_STATS_WINDOW_INTERVALS = 30;
|
||||||
|
|
||||||
|
// incoming sequence number stats history will cover last 30s
|
||||||
|
static const int INCOMING_SEQ_STATS_HISTORY_LENGTH = INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS /
|
||||||
|
(TOO_LONG_SINCE_LAST_SEND_DOWNSTREAM_AUDIO_STATS / USECS_PER_SECOND);
|
||||||
|
|
||||||
// Mute icon configration
|
// Mute icon configration
|
||||||
static const int MUTE_ICON_SIZE = 24;
|
static const int MUTE_ICON_SIZE = 24;
|
||||||
|
|
||||||
|
|
||||||
Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
|
Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
|
||||||
AbstractAudioInterface(parent),
|
AbstractAudioInterface(parent),
|
||||||
_audioInput(NULL),
|
_audioInput(NULL),
|
||||||
|
@ -103,8 +112,12 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
|
||||||
_scopeInput(0),
|
_scopeInput(0),
|
||||||
_scopeOutputLeft(0),
|
_scopeOutputLeft(0),
|
||||||
_scopeOutputRight(0),
|
_scopeOutputRight(0),
|
||||||
_audioMixerAvatarStreamStats(),
|
_audioMixerAvatarStreamAudioStats(),
|
||||||
_outgoingAvatarAudioSequenceNumber(0)
|
_outgoingAvatarAudioSequenceNumber(0),
|
||||||
|
_incomingMixedAudioSequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH),
|
||||||
|
_interframeTimeGapStats(TIME_GAPS_STATS_INTERVAL_SAMPLES, TIME_GAP_STATS_WINDOW_INTERVALS),
|
||||||
|
_starveCount(0),
|
||||||
|
_consecutiveNotMixedCount(0)
|
||||||
{
|
{
|
||||||
// clear the array of locally injected samples
|
// clear the array of locally injected samples
|
||||||
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||||
|
@ -120,8 +133,14 @@ void Audio::init(QGLWidget *parent) {
|
||||||
|
|
||||||
void Audio::reset() {
|
void Audio::reset() {
|
||||||
_ringBuffer.reset();
|
_ringBuffer.reset();
|
||||||
|
|
||||||
|
_starveCount = 0;
|
||||||
|
_consecutiveNotMixedCount = 0;
|
||||||
|
|
||||||
|
_audioMixerAvatarStreamAudioStats = AudioStreamStats();
|
||||||
|
_audioMixerInjectedStreamAudioStatsMap.clear();
|
||||||
|
|
||||||
_outgoingAvatarAudioSequenceNumber = 0;
|
_outgoingAvatarAudioSequenceNumber = 0;
|
||||||
_audioMixerInjectedStreamStatsMap.clear();
|
|
||||||
_incomingMixedAudioSequenceNumberStats.reset();
|
_incomingMixedAudioSequenceNumberStats.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -689,7 +708,9 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
|
||||||
|
|
||||||
_totalPacketsReceived++;
|
_totalPacketsReceived++;
|
||||||
|
|
||||||
double timeDiff = (double)_timeSinceLastReceived.nsecsElapsed() / 1000000.0; // ns to ms
|
double timeDiff = (double)_timeSinceLastReceived.nsecsElapsed() / 1000.0; // ns to us
|
||||||
|
_interframeTimeGapStats.update((quint64)timeDiff);
|
||||||
|
timeDiff /= USECS_PER_MSEC; // us to ms
|
||||||
_timeSinceLastReceived.start();
|
_timeSinceLastReceived.start();
|
||||||
|
|
||||||
// Discard first few received packets for computing jitter (often they pile up on start)
|
// Discard first few received packets for computing jitter (often they pile up on start)
|
||||||
|
@ -726,7 +747,7 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
|
||||||
quint8 appendFlag = *(reinterpret_cast<const quint16*>(dataAt));
|
quint8 appendFlag = *(reinterpret_cast<const quint16*>(dataAt));
|
||||||
dataAt += sizeof(quint8);
|
dataAt += sizeof(quint8);
|
||||||
if (!appendFlag) {
|
if (!appendFlag) {
|
||||||
_audioMixerInjectedStreamStatsMap.clear();
|
_audioMixerInjectedStreamAudioStatsMap.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
// parse the number of stream stats structs to follow
|
// parse the number of stream stats structs to follow
|
||||||
|
@ -740,13 +761,72 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
|
||||||
dataAt += sizeof(AudioStreamStats);
|
dataAt += sizeof(AudioStreamStats);
|
||||||
|
|
||||||
if (streamStats._streamType == PositionalAudioRingBuffer::Microphone) {
|
if (streamStats._streamType == PositionalAudioRingBuffer::Microphone) {
|
||||||
_audioMixerAvatarStreamStats = streamStats;
|
_audioMixerAvatarStreamAudioStats = streamStats;
|
||||||
} else {
|
} else {
|
||||||
_audioMixerInjectedStreamStatsMap[streamStats._streamIdentifier] = streamStats;
|
_audioMixerInjectedStreamAudioStatsMap[streamStats._streamIdentifier] = streamStats;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
AudioStreamStats Audio::getDownstreamAudioStreamStats() const {
|
||||||
|
|
||||||
|
AudioStreamStats stats;
|
||||||
|
stats._streamType = PositionalAudioRingBuffer::Microphone;
|
||||||
|
|
||||||
|
stats._timeGapMin = _interframeTimeGapStats.getMin();
|
||||||
|
stats._timeGapMax = _interframeTimeGapStats.getMax();
|
||||||
|
stats._timeGapAverage = _interframeTimeGapStats.getAverage();
|
||||||
|
stats._timeGapWindowMin = _interframeTimeGapStats.getWindowMin();
|
||||||
|
stats._timeGapWindowMax = _interframeTimeGapStats.getWindowMax();
|
||||||
|
stats._timeGapWindowAverage = _interframeTimeGapStats.getWindowAverage();
|
||||||
|
|
||||||
|
stats._ringBufferFramesAvailable = _ringBuffer.framesAvailable();
|
||||||
|
stats._ringBufferCurrentJitterBufferFrames = 0;
|
||||||
|
stats._ringBufferDesiredJitterBufferFrames = getDesiredJitterBufferFrames();
|
||||||
|
stats._ringBufferStarveCount = _starveCount;
|
||||||
|
stats._ringBufferConsecutiveNotMixedCount = _consecutiveNotMixedCount;
|
||||||
|
stats._ringBufferOverflowCount = _ringBuffer.getOverflowCount();
|
||||||
|
stats._ringBufferSilentFramesDropped = 0;
|
||||||
|
|
||||||
|
stats._packetStreamStats = _incomingMixedAudioSequenceNumberStats.getStats();
|
||||||
|
stats._packetStreamWindowStats = _incomingMixedAudioSequenceNumberStats.getStatsForHistoryWindow();
|
||||||
|
|
||||||
|
return stats;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Audio::sendDownstreamAudioStatsPacket() {
|
||||||
|
|
||||||
|
// push the current seq number stats into history, which moves the history window forward 1s
|
||||||
|
// (since that's how often pushStatsToHistory() is called)
|
||||||
|
_incomingMixedAudioSequenceNumberStats.pushStatsToHistory();
|
||||||
|
|
||||||
|
char packet[MAX_PACKET_SIZE];
|
||||||
|
|
||||||
|
// pack header
|
||||||
|
int numBytesPacketHeader = populatePacketHeader(packet, PacketTypeAudioStreamStats);
|
||||||
|
char* dataAt = packet + numBytesPacketHeader;
|
||||||
|
|
||||||
|
// pack append flag
|
||||||
|
quint8 appendFlag = 0;
|
||||||
|
memcpy(dataAt, &appendFlag, sizeof(quint8));
|
||||||
|
dataAt += sizeof(quint8);
|
||||||
|
|
||||||
|
// pack number of stats packed
|
||||||
|
quint16 numStreamStatsToPack = 1;
|
||||||
|
memcpy(dataAt, &numStreamStatsToPack, sizeof(quint16));
|
||||||
|
dataAt += sizeof(quint16);
|
||||||
|
|
||||||
|
// pack downstream audio stream stats
|
||||||
|
AudioStreamStats stats = getDownstreamAudioStreamStats();
|
||||||
|
memcpy(dataAt, &stats, sizeof(AudioStreamStats));
|
||||||
|
dataAt += sizeof(AudioStreamStats);
|
||||||
|
|
||||||
|
// send packet
|
||||||
|
NodeList* nodeList = NodeList::getInstance();
|
||||||
|
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
|
||||||
|
nodeList->writeDatagram(packet, dataAt - packet, audioMixer);
|
||||||
|
}
|
||||||
|
|
||||||
// NOTE: numSamples is the total number of single channel samples, since callers will always call this with stereo
|
// NOTE: numSamples is the total number of single channel samples, since callers will always call this with stereo
|
||||||
// data we know that we will have 2x samples for each stereo time sample at the format's sample rate
|
// data we know that we will have 2x samples for each stereo time sample at the format's sample rate
|
||||||
void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples) {
|
void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples) {
|
||||||
|
@ -867,6 +947,9 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
||||||
//qDebug() << "Audio output just starved.";
|
//qDebug() << "Audio output just starved.";
|
||||||
_ringBuffer.setIsStarved(true);
|
_ringBuffer.setIsStarved(true);
|
||||||
_numFramesDisplayStarve = 10;
|
_numFramesDisplayStarve = 10;
|
||||||
|
|
||||||
|
_starveCount++;
|
||||||
|
_consecutiveNotMixedCount = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int numNetworkOutputSamples;
|
int numNetworkOutputSamples;
|
||||||
|
@ -886,6 +969,7 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
||||||
if (!_ringBuffer.isNotStarvedOrHasMinimumSamples(numSamplesNeededToStartPlayback)) {
|
if (!_ringBuffer.isNotStarvedOrHasMinimumSamples(numSamplesNeededToStartPlayback)) {
|
||||||
// We are still waiting for enough samples to begin playback
|
// We are still waiting for enough samples to begin playback
|
||||||
// qDebug() << numNetworkOutputSamples << " samples so far, waiting for " << numSamplesNeededToStartPlayback;
|
// qDebug() << numNetworkOutputSamples << " samples so far, waiting for " << numSamplesNeededToStartPlayback;
|
||||||
|
_consecutiveNotMixedCount++;
|
||||||
} else {
|
} else {
|
||||||
int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio;
|
int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio;
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,8 @@
|
||||||
|
|
||||||
#include "InterfaceConfig.h"
|
#include "InterfaceConfig.h"
|
||||||
#include "AudioStreamStats.h"
|
#include "AudioStreamStats.h"
|
||||||
|
#include "RingBufferHistory.h"
|
||||||
|
#include "MovingMinMaxAvg.h"
|
||||||
|
|
||||||
#include <QAudio>
|
#include <QAudio>
|
||||||
#include <QAudioInput>
|
#include <QAudioInput>
|
||||||
|
@ -34,6 +36,8 @@
|
||||||
|
|
||||||
static const int NUM_AUDIO_CHANNELS = 2;
|
static const int NUM_AUDIO_CHANNELS = 2;
|
||||||
|
|
||||||
|
static const int INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS = 30;
|
||||||
|
|
||||||
class QAudioInput;
|
class QAudioInput;
|
||||||
class QAudioOutput;
|
class QAudioOutput;
|
||||||
class QIODevice;
|
class QIODevice;
|
||||||
|
@ -97,6 +101,9 @@ public slots:
|
||||||
|
|
||||||
virtual void handleAudioByteArray(const QByteArray& audioByteArray);
|
virtual void handleAudioByteArray(const QByteArray& audioByteArray);
|
||||||
|
|
||||||
|
AudioStreamStats getDownstreamAudioStreamStats() const;
|
||||||
|
void sendDownstreamAudioStatsPacket();
|
||||||
|
|
||||||
bool switchInputToAudioDevice(const QString& inputDeviceName);
|
bool switchInputToAudioDevice(const QString& inputDeviceName);
|
||||||
bool switchOutputToAudioDevice(const QString& outputDeviceName);
|
bool switchOutputToAudioDevice(const QString& outputDeviceName);
|
||||||
QString getDeviceName(QAudio::Mode mode) const { return (mode == QAudio::AudioInput) ?
|
QString getDeviceName(QAudio::Mode mode) const { return (mode == QAudio::AudioInput) ?
|
||||||
|
@ -107,8 +114,16 @@ public slots:
|
||||||
float getInputVolume() const { return (_audioInput) ? _audioInput->volume() : 0.0f; }
|
float getInputVolume() const { return (_audioInput) ? _audioInput->volume() : 0.0f; }
|
||||||
void setInputVolume(float volume) { if (_audioInput) _audioInput->setVolume(volume); }
|
void setInputVolume(float volume) { if (_audioInput) _audioInput->setVolume(volume); }
|
||||||
|
|
||||||
const AudioStreamStats& getAudioMixerAvatarStreamStats() const { return _audioMixerAvatarStreamStats; }
|
const AudioRingBuffer& getDownstreamRingBuffer() const { return _ringBuffer; }
|
||||||
const QHash<QUuid, AudioStreamStats>& getAudioMixerInjectedStreamStatsMap() const { return _audioMixerInjectedStreamStatsMap; }
|
|
||||||
|
int getDesiredJitterBufferFrames() const { return _jitterBufferSamples / _ringBuffer.getNumFrameSamples(); }
|
||||||
|
|
||||||
|
int getStarveCount() const { return _starveCount; }
|
||||||
|
int getConsecutiveNotMixedCount() const { return _consecutiveNotMixedCount; }
|
||||||
|
|
||||||
|
const AudioStreamStats& getAudioMixerAvatarStreamAudioStats() const { return _audioMixerAvatarStreamAudioStats; }
|
||||||
|
const QHash<QUuid, AudioStreamStats>& getAudioMixerInjectedStreamAudioStatsMap() const { return _audioMixerInjectedStreamAudioStatsMap; }
|
||||||
|
const MovingMinMaxAvg<quint64>& getInterframeTimeGapStats() const { return _interframeTimeGapStats; }
|
||||||
|
|
||||||
signals:
|
signals:
|
||||||
bool muteToggled();
|
bool muteToggled();
|
||||||
|
@ -241,11 +256,16 @@ private:
|
||||||
QByteArray* _scopeOutputLeft;
|
QByteArray* _scopeOutputLeft;
|
||||||
QByteArray* _scopeOutputRight;
|
QByteArray* _scopeOutputRight;
|
||||||
|
|
||||||
AudioStreamStats _audioMixerAvatarStreamStats;
|
int _starveCount;
|
||||||
QHash<QUuid, AudioStreamStats> _audioMixerInjectedStreamStatsMap;
|
int _consecutiveNotMixedCount;
|
||||||
|
|
||||||
|
AudioStreamStats _audioMixerAvatarStreamAudioStats;
|
||||||
|
QHash<QUuid, AudioStreamStats> _audioMixerInjectedStreamAudioStatsMap;
|
||||||
|
|
||||||
quint16 _outgoingAvatarAudioSequenceNumber;
|
quint16 _outgoingAvatarAudioSequenceNumber;
|
||||||
SequenceNumberStats _incomingMixedAudioSequenceNumberStats;
|
SequenceNumberStats _incomingMixedAudioSequenceNumberStats;
|
||||||
|
|
||||||
|
MovingMinMaxAvg<quint64> _interframeTimeGapStats;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
#include "InterfaceConfig.h"
|
#include "InterfaceConfig.h"
|
||||||
#include "Menu.h"
|
#include "Menu.h"
|
||||||
#include "Util.h"
|
#include "Util.h"
|
||||||
|
#include "SequenceNumberStats.h"
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
|
@ -288,15 +289,12 @@ void Stats::display(
|
||||||
|
|
||||||
|
|
||||||
Audio* audio = Application::getInstance()->getAudio();
|
Audio* audio = Application::getInstance()->getAudio();
|
||||||
const AudioStreamStats& audioMixerAvatarStreamStats = audio->getAudioMixerAvatarStreamStats();
|
const QHash<QUuid, AudioStreamStats>& audioMixerInjectedStreamAudioStatsMap = audio->getAudioMixerInjectedStreamAudioStatsMap();
|
||||||
const QHash<QUuid, AudioStreamStats>& audioMixerInjectedStreamStatsMap = audio->getAudioMixerInjectedStreamStatsMap();
|
|
||||||
|
|
||||||
lines = _expanded ? 10 + audioMixerInjectedStreamStatsMap.size(): 3;
|
lines = _expanded ? 11 + (audioMixerInjectedStreamAudioStatsMap.size() + 2) * 3 : 3;
|
||||||
drawBackground(backgroundColor, horizontalOffset, 0, _pingStatsWidth, lines * STATS_PELS_PER_LINE + 10);
|
drawBackground(backgroundColor, horizontalOffset, 0, _pingStatsWidth, lines * STATS_PELS_PER_LINE + 10);
|
||||||
horizontalOffset += 5;
|
horizontalOffset += 5;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
char audioJitter[30];
|
char audioJitter[30];
|
||||||
sprintf(audioJitter,
|
sprintf(audioJitter,
|
||||||
"Buffer msecs %.1f",
|
"Buffer msecs %.1f",
|
||||||
|
@ -328,42 +326,102 @@ void Stats::display(
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, voxelMaxPing, color);
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, voxelMaxPing, color);
|
||||||
|
|
||||||
char audioMixerStatsLabelString[] = "AudioMixer stats:";
|
char audioMixerStatsLabelString[] = "AudioMixer stats:";
|
||||||
char streamStatsFormatLabelString[] = "early/late/lost, jframes";
|
char streamStatsFormatLabelString[] = "lost%/30s_lost%";
|
||||||
|
char streamStatsFormatLabelString2[] = "avail/currJ/desiredJ";
|
||||||
|
char streamStatsFormatLabelString3[] = "gaps: min/max/avg, starv/ovfl";
|
||||||
|
char streamStatsFormatLabelString4[] = "30s gaps: (same), notmix/sdrop";
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, audioMixerStatsLabelString, color);
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, audioMixerStatsLabelString, color);
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, streamStatsFormatLabelString, color);
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, streamStatsFormatLabelString, color);
|
||||||
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, streamStatsFormatLabelString2, color);
|
||||||
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, streamStatsFormatLabelString3, color);
|
||||||
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, streamStatsFormatLabelString4, color);
|
||||||
|
|
||||||
char downstreamLabelString[] = " Downstream:";
|
char downstreamLabelString[] = " Downstream:";
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamLabelString, color);
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamLabelString, color);
|
||||||
|
|
||||||
const SequenceNumberStats& downstreamAudioSequenceNumberStats = audio->getIncomingMixedAudioSequenceNumberStats();
|
|
||||||
char downstreamAudioStatsString[30];
|
char downstreamAudioStatsString[30];
|
||||||
sprintf(downstreamAudioStatsString, " mix: %d/%d/%d, %d", downstreamAudioSequenceNumberStats.getNumEarly(),
|
|
||||||
downstreamAudioSequenceNumberStats.getNumLate(), downstreamAudioSequenceNumberStats.getNumLost(),
|
AudioStreamStats downstreamAudioStreamStats = audio->getDownstreamAudioStreamStats();
|
||||||
audio->getJitterBufferSamples() / NETWORK_BUFFER_LENGTH_SAMPLES_STEREO);
|
|
||||||
|
sprintf(downstreamAudioStatsString, " mix: %.1f%%/%.1f%%, %u/?/%u", downstreamAudioStreamStats._packetStreamStats.getLostRate()*100.0f,
|
||||||
|
downstreamAudioStreamStats._packetStreamWindowStats.getLostRate() * 100.0f,
|
||||||
|
downstreamAudioStreamStats._ringBufferFramesAvailable, downstreamAudioStreamStats._ringBufferDesiredJitterBufferFrames);
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
|
||||||
|
|
||||||
|
sprintf(downstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", downstreamAudioStreamStats._timeGapMin,
|
||||||
|
downstreamAudioStreamStats._timeGapMax, downstreamAudioStreamStats._timeGapAverage,
|
||||||
|
downstreamAudioStreamStats._ringBufferStarveCount, downstreamAudioStreamStats._ringBufferOverflowCount);
|
||||||
|
|
||||||
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
|
||||||
|
|
||||||
|
sprintf(downstreamAudioStatsString, " %llu/%llu/%.2f, %u/?", downstreamAudioStreamStats._timeGapWindowMin,
|
||||||
|
downstreamAudioStreamStats._timeGapWindowMax, downstreamAudioStreamStats._timeGapWindowAverage,
|
||||||
|
downstreamAudioStreamStats._ringBufferConsecutiveNotMixedCount);
|
||||||
|
|
||||||
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
|
||||||
|
|
||||||
|
|
||||||
char upstreamLabelString[] = " Upstream:";
|
char upstreamLabelString[] = " Upstream:";
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamLabelString, color);
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamLabelString, color);
|
||||||
|
|
||||||
char upstreamAudioStatsString[30];
|
char upstreamAudioStatsString[30];
|
||||||
sprintf(upstreamAudioStatsString, " mic: %d/%d/%d, %d", audioMixerAvatarStreamStats._packetsEarly,
|
|
||||||
audioMixerAvatarStreamStats._packetsLate, audioMixerAvatarStreamStats._packetsLost,
|
const AudioStreamStats& audioMixerAvatarAudioStreamStats = audio->getAudioMixerAvatarStreamAudioStats();
|
||||||
audioMixerAvatarStreamStats._jitterBufferFrames);
|
|
||||||
|
sprintf(upstreamAudioStatsString, " mic: %.1f%%/%.1f%%, %u/%u/%u", audioMixerAvatarAudioStreamStats._packetStreamStats.getLostRate()*100.0f,
|
||||||
|
audioMixerAvatarAudioStreamStats._packetStreamWindowStats.getLostRate() * 100.0f,
|
||||||
|
audioMixerAvatarAudioStreamStats._ringBufferFramesAvailable, audioMixerAvatarAudioStreamStats._ringBufferCurrentJitterBufferFrames,
|
||||||
|
audioMixerAvatarAudioStreamStats._ringBufferDesiredJitterBufferFrames);
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||||
|
|
||||||
foreach(AudioStreamStats injectedStreamStats, audioMixerInjectedStreamStatsMap) {
|
sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", audioMixerAvatarAudioStreamStats._timeGapMin,
|
||||||
sprintf(upstreamAudioStatsString, " inj: %d/%d/%d, %d", injectedStreamStats._packetsEarly,
|
audioMixerAvatarAudioStreamStats._timeGapMax, audioMixerAvatarAudioStreamStats._timeGapAverage,
|
||||||
injectedStreamStats._packetsLate, injectedStreamStats._packetsLost, injectedStreamStats._jitterBufferFrames);
|
audioMixerAvatarAudioStreamStats._ringBufferStarveCount, audioMixerAvatarAudioStreamStats._ringBufferOverflowCount);
|
||||||
|
|
||||||
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||||
|
|
||||||
|
sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", audioMixerAvatarAudioStreamStats._timeGapWindowMin,
|
||||||
|
audioMixerAvatarAudioStreamStats._timeGapWindowMax, audioMixerAvatarAudioStreamStats._timeGapWindowAverage,
|
||||||
|
audioMixerAvatarAudioStreamStats._ringBufferConsecutiveNotMixedCount, audioMixerAvatarAudioStreamStats._ringBufferSilentFramesDropped);
|
||||||
|
|
||||||
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||||
|
|
||||||
|
foreach(const AudioStreamStats& injectedStreamAudioStats, audioMixerInjectedStreamAudioStatsMap) {
|
||||||
|
|
||||||
|
sprintf(upstreamAudioStatsString, " inj: %.1f%%/%.1f%%, %u/%u/%u", injectedStreamAudioStats._packetStreamStats.getLostRate()*100.0f,
|
||||||
|
injectedStreamAudioStats._packetStreamWindowStats.getLostRate() * 100.0f,
|
||||||
|
injectedStreamAudioStats._ringBufferFramesAvailable, injectedStreamAudioStats._ringBufferCurrentJitterBufferFrames,
|
||||||
|
injectedStreamAudioStats._ringBufferDesiredJitterBufferFrames);
|
||||||
|
|
||||||
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||||
|
|
||||||
|
sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", injectedStreamAudioStats._timeGapMin,
|
||||||
|
injectedStreamAudioStats._timeGapMax, injectedStreamAudioStats._timeGapAverage,
|
||||||
|
injectedStreamAudioStats._ringBufferStarveCount, injectedStreamAudioStats._ringBufferOverflowCount);
|
||||||
|
|
||||||
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||||
|
|
||||||
|
sprintf(upstreamAudioStatsString, " %llu/%llu/%.2f, %u/%u", injectedStreamAudioStats._timeGapWindowMin,
|
||||||
|
injectedStreamAudioStats._timeGapWindowMax, injectedStreamAudioStats._timeGapWindowAverage,
|
||||||
|
injectedStreamAudioStats._ringBufferConsecutiveNotMixedCount, injectedStreamAudioStats._ringBufferSilentFramesDropped);
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||||
|
|
|
@ -14,20 +14,26 @@
|
||||||
#include "BillboardOverlay.h"
|
#include "BillboardOverlay.h"
|
||||||
|
|
||||||
BillboardOverlay::BillboardOverlay()
|
BillboardOverlay::BillboardOverlay()
|
||||||
: _scale(1.0f),
|
: _fromImage(-1,-1,-1,-1),
|
||||||
|
_scale(1.0f),
|
||||||
_isFacingAvatar(true) {
|
_isFacingAvatar(true) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void BillboardOverlay::render() {
|
void BillboardOverlay::render() {
|
||||||
if (_billboard.isEmpty()) {
|
if (!_visible) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!_billboard.isEmpty()) {
|
||||||
if (!_billboardTexture) {
|
if (!_billboardTexture) {
|
||||||
QImage image = QImage::fromData(_billboard);
|
QImage image = QImage::fromData(_billboard);
|
||||||
if (image.format() != QImage::Format_ARGB32) {
|
if (image.format() != QImage::Format_ARGB32) {
|
||||||
image = image.convertToFormat(QImage::Format_ARGB32);
|
image = image.convertToFormat(QImage::Format_ARGB32);
|
||||||
}
|
}
|
||||||
_size = image.size();
|
_size = image.size();
|
||||||
|
if (_fromImage.x() == -1) {
|
||||||
|
_fromImage.setRect(0, 0, _size.width(), _size.height());
|
||||||
|
}
|
||||||
_billboardTexture.reset(new Texture());
|
_billboardTexture.reset(new Texture());
|
||||||
glBindTexture(GL_TEXTURE_2D, _billboardTexture->getID());
|
glBindTexture(GL_TEXTURE_2D, _billboardTexture->getID());
|
||||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, _size.width(), _size.height(), 0,
|
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, _size.width(), _size.height(), 0,
|
||||||
|
@ -37,6 +43,7 @@ void BillboardOverlay::render() {
|
||||||
} else {
|
} else {
|
||||||
glBindTexture(GL_TEXTURE_2D, _billboardTexture->getID());
|
glBindTexture(GL_TEXTURE_2D, _billboardTexture->getID());
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
glEnable(GL_ALPHA_TEST);
|
glEnable(GL_ALPHA_TEST);
|
||||||
glAlphaFunc(GL_GREATER, 0.5f);
|
glAlphaFunc(GL_GREATER, 0.5f);
|
||||||
|
@ -58,21 +65,35 @@ void BillboardOverlay::render() {
|
||||||
}
|
}
|
||||||
glScalef(_scale, _scale, _scale);
|
glScalef(_scale, _scale, _scale);
|
||||||
|
|
||||||
float maxSize = glm::max(_size.width(), _size.height());
|
if (_billboardTexture) {
|
||||||
float x = _size.width() / (2.0f * maxSize);
|
float maxSize = glm::max(_fromImage.width(), _fromImage.height());
|
||||||
float y = -_size.height() / (2.0f * maxSize);
|
float x = _fromImage.width() / (2.0f * maxSize);
|
||||||
|
float y = -_fromImage.height() / (2.0f * maxSize);
|
||||||
|
|
||||||
glColor3f(1.0f, 1.0f, 1.0f);
|
glColor3f(1.0f, 1.0f, 1.0f);
|
||||||
glBegin(GL_QUADS); {
|
glBegin(GL_QUADS); {
|
||||||
glTexCoord2f(0.0f, 0.0f);
|
glTexCoord2f((float)_fromImage.x() / (float)_size.width(),
|
||||||
|
(float)_fromImage.y() / (float)_size.height());
|
||||||
glVertex2f(-x, -y);
|
glVertex2f(-x, -y);
|
||||||
glTexCoord2f(1.0f, 0.0f);
|
glTexCoord2f(((float)_fromImage.x() + (float)_fromImage.width()) / (float)_size.width(),
|
||||||
|
(float)_fromImage.y() / (float)_size.height());
|
||||||
glVertex2f(x, -y);
|
glVertex2f(x, -y);
|
||||||
glTexCoord2f(1.0f, 1.0f);
|
glTexCoord2f(((float)_fromImage.x() + (float)_fromImage.width()) / (float)_size.width(),
|
||||||
|
((float)_fromImage.y() + (float)_fromImage.height()) / _size.height());
|
||||||
glVertex2f(x, y);
|
glVertex2f(x, y);
|
||||||
glTexCoord2f(0.0f, 1.0f);
|
glTexCoord2f((float)_fromImage.x() / (float)_size.width(),
|
||||||
|
((float)_fromImage.y() + (float)_fromImage.height()) / (float)_size.height());
|
||||||
glVertex2f(-x, y);
|
glVertex2f(-x, y);
|
||||||
} glEnd();
|
} glEnd();
|
||||||
|
} else {
|
||||||
|
glColor4f(0.5f, 0.5f, 0.5f, 1.0f);
|
||||||
|
glBegin(GL_QUADS); {
|
||||||
|
glVertex2f(-1.0f, -1.0f);
|
||||||
|
glVertex2f(1.0f, -1.0f);
|
||||||
|
glVertex2f(1.0f, 1.0f);
|
||||||
|
glVertex2f(-1.0f, 1.0f);
|
||||||
|
} glEnd();
|
||||||
|
}
|
||||||
|
|
||||||
} glPopMatrix();
|
} glPopMatrix();
|
||||||
|
|
||||||
|
@ -93,6 +114,33 @@ void BillboardOverlay::setProperties(const QScriptValue &properties) {
|
||||||
setBillboardURL(_url);
|
setBillboardURL(_url);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
QScriptValue subImageBounds = properties.property("subImage");
|
||||||
|
if (subImageBounds.isValid()) {
|
||||||
|
QRect oldSubImageRect = _fromImage;
|
||||||
|
QRect subImageRect = _fromImage;
|
||||||
|
if (subImageBounds.property("x").isValid()) {
|
||||||
|
subImageRect.setX(subImageBounds.property("x").toVariant().toInt());
|
||||||
|
} else {
|
||||||
|
subImageRect.setX(oldSubImageRect.x());
|
||||||
|
}
|
||||||
|
if (subImageBounds.property("y").isValid()) {
|
||||||
|
subImageRect.setY(subImageBounds.property("y").toVariant().toInt());
|
||||||
|
} else {
|
||||||
|
subImageRect.setY(oldSubImageRect.y());
|
||||||
|
}
|
||||||
|
if (subImageBounds.property("width").isValid()) {
|
||||||
|
subImageRect.setWidth(subImageBounds.property("width").toVariant().toInt());
|
||||||
|
} else {
|
||||||
|
subImageRect.setWidth(oldSubImageRect.width());
|
||||||
|
}
|
||||||
|
if (subImageBounds.property("height").isValid()) {
|
||||||
|
subImageRect.setHeight(subImageBounds.property("height").toVariant().toInt());
|
||||||
|
} else {
|
||||||
|
subImageRect.setHeight(oldSubImageRect.height());
|
||||||
|
}
|
||||||
|
setClipFromSource(subImageRect);
|
||||||
|
}
|
||||||
|
|
||||||
QScriptValue scaleValue = properties.property("scale");
|
QScriptValue scaleValue = properties.property("scale");
|
||||||
if (scaleValue.isValid()) {
|
if (scaleValue.isValid()) {
|
||||||
_scale = scaleValue.toVariant().toFloat();
|
_scale = scaleValue.toVariant().toFloat();
|
||||||
|
|
|
@ -25,6 +25,7 @@ public:
|
||||||
|
|
||||||
virtual void render();
|
virtual void render();
|
||||||
virtual void setProperties(const QScriptValue& properties);
|
virtual void setProperties(const QScriptValue& properties);
|
||||||
|
void setClipFromSource(const QRect& bounds) { _fromImage = bounds; }
|
||||||
|
|
||||||
private slots:
|
private slots:
|
||||||
void replyFinished();
|
void replyFinished();
|
||||||
|
@ -37,6 +38,8 @@ private:
|
||||||
QSize _size;
|
QSize _size;
|
||||||
QScopedPointer<Texture> _billboardTexture;
|
QScopedPointer<Texture> _billboardTexture;
|
||||||
|
|
||||||
|
QRect _fromImage; // where from in the image to sample
|
||||||
|
|
||||||
glm::quat _rotation;
|
glm::quat _rotation;
|
||||||
float _scale;
|
float _scale;
|
||||||
bool _isFacingAvatar;
|
bool _isFacingAvatar;
|
||||||
|
|
|
@ -35,6 +35,10 @@ void ModelOverlay::update(float deltatime) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void ModelOverlay::render() {
|
void ModelOverlay::render() {
|
||||||
|
if (!_visible) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (_model.isActive()) {
|
if (_model.isActive()) {
|
||||||
|
|
||||||
if (_model.isRenderable()) {
|
if (_model.isRenderable()) {
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#include <QtCore/QIODevice>
|
#include <QtCore/QIODevice>
|
||||||
|
|
||||||
#include "NodeData.h"
|
#include "NodeData.h"
|
||||||
|
#include "SharedUtil.h"
|
||||||
|
|
||||||
const int SAMPLE_RATE = 24000;
|
const int SAMPLE_RATE = 24000;
|
||||||
|
|
||||||
|
@ -29,7 +30,7 @@ const int NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL = 512;
|
||||||
const int NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL = NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL / sizeof(int16_t);
|
const int NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL = NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL / sizeof(int16_t);
|
||||||
|
|
||||||
const unsigned int BUFFER_SEND_INTERVAL_USECS = floorf((NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL
|
const unsigned int BUFFER_SEND_INTERVAL_USECS = floorf((NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL
|
||||||
/ (float) SAMPLE_RATE) * 1000 * 1000);
|
/ (float) SAMPLE_RATE) * USECS_PER_SECOND);
|
||||||
|
|
||||||
const int MAX_SAMPLE_VALUE = std::numeric_limits<int16_t>::max();
|
const int MAX_SAMPLE_VALUE = std::numeric_limits<int16_t>::max();
|
||||||
const int MIN_SAMPLE_VALUE = std::numeric_limits<int16_t>::min();
|
const int MIN_SAMPLE_VALUE = std::numeric_limits<int16_t>::min();
|
||||||
|
@ -65,6 +66,9 @@ public:
|
||||||
void shiftReadPosition(unsigned int numSamples);
|
void shiftReadPosition(unsigned int numSamples);
|
||||||
|
|
||||||
int samplesAvailable() const;
|
int samplesAvailable() const;
|
||||||
|
int framesAvailable() const { return samplesAvailable() / _numFrameSamples; }
|
||||||
|
|
||||||
|
int getNumFrameSamples() const { return _numFrameSamples; }
|
||||||
|
|
||||||
bool isNotStarvedOrHasMinimumSamples(int numRequiredSamples) const;
|
bool isNotStarvedOrHasMinimumSamples(int numRequiredSamples) const;
|
||||||
|
|
||||||
|
|
|
@ -13,34 +13,50 @@
|
||||||
#define hifi_AudioStreamStats_h
|
#define hifi_AudioStreamStats_h
|
||||||
|
|
||||||
#include "PositionalAudioRingBuffer.h"
|
#include "PositionalAudioRingBuffer.h"
|
||||||
|
#include "SequenceNumberStats.h"
|
||||||
|
|
||||||
class AudioStreamStats {
|
class AudioStreamStats {
|
||||||
public:
|
public:
|
||||||
AudioStreamStats()
|
AudioStreamStats()
|
||||||
: _streamType(PositionalAudioRingBuffer::Microphone),
|
: _streamType(PositionalAudioRingBuffer::Microphone),
|
||||||
_streamIdentifier(),
|
_streamIdentifier(),
|
||||||
_jitterBufferFrames(0),
|
_timeGapMin(0),
|
||||||
_packetsReceived(0),
|
_timeGapMax(0),
|
||||||
_packetsUnreasonable(0),
|
_timeGapAverage(0.0f),
|
||||||
_packetsEarly(0),
|
_timeGapWindowMin(0),
|
||||||
_packetsLate(0),
|
_timeGapWindowMax(0),
|
||||||
_packetsLost(0),
|
_timeGapWindowAverage(0.0f),
|
||||||
_packetsRecovered(0),
|
_ringBufferFramesAvailable(0),
|
||||||
_packetsDuplicate(0)
|
_ringBufferCurrentJitterBufferFrames(0),
|
||||||
|
_ringBufferDesiredJitterBufferFrames(0),
|
||||||
|
_ringBufferStarveCount(0),
|
||||||
|
_ringBufferConsecutiveNotMixedCount(0),
|
||||||
|
_ringBufferOverflowCount(0),
|
||||||
|
_ringBufferSilentFramesDropped(0),
|
||||||
|
_packetStreamStats(),
|
||||||
|
_packetStreamWindowStats()
|
||||||
{}
|
{}
|
||||||
|
|
||||||
PositionalAudioRingBuffer::Type _streamType;
|
PositionalAudioRingBuffer::Type _streamType;
|
||||||
QUuid _streamIdentifier;
|
QUuid _streamIdentifier;
|
||||||
|
|
||||||
quint16 _jitterBufferFrames;
|
quint64 _timeGapMin;
|
||||||
|
quint64 _timeGapMax;
|
||||||
|
float _timeGapAverage;
|
||||||
|
quint64 _timeGapWindowMin;
|
||||||
|
quint64 _timeGapWindowMax;
|
||||||
|
float _timeGapWindowAverage;
|
||||||
|
|
||||||
quint32 _packetsReceived;
|
quint32 _ringBufferFramesAvailable;
|
||||||
quint32 _packetsUnreasonable;
|
quint16 _ringBufferCurrentJitterBufferFrames;
|
||||||
quint32 _packetsEarly;
|
quint16 _ringBufferDesiredJitterBufferFrames;
|
||||||
quint32 _packetsLate;
|
quint32 _ringBufferStarveCount;
|
||||||
quint32 _packetsLost;
|
quint32 _ringBufferConsecutiveNotMixedCount;
|
||||||
quint32 _packetsRecovered;
|
quint32 _ringBufferOverflowCount;
|
||||||
quint32 _packetsDuplicate;
|
quint32 _ringBufferSilentFramesDropped;
|
||||||
|
|
||||||
|
PacketStreamStats _packetStreamStats;
|
||||||
|
PacketStreamStats _packetStreamWindowStats;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // hifi_AudioStreamStats_h
|
#endif // hifi_AudioStreamStats_h
|
||||||
|
|
|
@ -31,7 +31,7 @@ InjectedAudioRingBuffer::InjectedAudioRingBuffer(const QUuid& streamIdentifier,
|
||||||
const uchar MAX_INJECTOR_VOLUME = 255;
|
const uchar MAX_INJECTOR_VOLUME = 255;
|
||||||
|
|
||||||
int InjectedAudioRingBuffer::parseData(const QByteArray& packet) {
|
int InjectedAudioRingBuffer::parseData(const QByteArray& packet) {
|
||||||
_interframeTimeGapStats.frameReceived();
|
timeGapStatsFrameReceived();
|
||||||
updateDesiredJitterBufferFrames();
|
updateDesiredJitterBufferFrames();
|
||||||
|
|
||||||
// setup a data stream to read from this packet
|
// setup a data stream to read from this packet
|
||||||
|
|
|
@ -21,70 +21,6 @@
|
||||||
#include "PositionalAudioRingBuffer.h"
|
#include "PositionalAudioRingBuffer.h"
|
||||||
#include "SharedUtil.h"
|
#include "SharedUtil.h"
|
||||||
|
|
||||||
InterframeTimeGapStats::InterframeTimeGapStats()
|
|
||||||
: _lastFrameReceivedTime(0),
|
|
||||||
_numSamplesInCurrentInterval(0),
|
|
||||||
_currentIntervalMaxGap(0),
|
|
||||||
_newestIntervalMaxGapAt(0),
|
|
||||||
_windowMaxGap(0),
|
|
||||||
_newWindowMaxGapAvailable(false)
|
|
||||||
{
|
|
||||||
memset(_intervalMaxGaps, 0, TIME_GAP_NUM_INTERVALS_IN_WINDOW * sizeof(quint64));
|
|
||||||
}
|
|
||||||
|
|
||||||
void InterframeTimeGapStats::frameReceived() {
|
|
||||||
quint64 now = usecTimestampNow();
|
|
||||||
|
|
||||||
// make sure this isn't the first time frameReceived() is called so can actually calculate a gap.
|
|
||||||
if (_lastFrameReceivedTime != 0) {
|
|
||||||
quint64 gap = now - _lastFrameReceivedTime;
|
|
||||||
|
|
||||||
// update the current interval max
|
|
||||||
if (gap > _currentIntervalMaxGap) {
|
|
||||||
_currentIntervalMaxGap = gap;
|
|
||||||
|
|
||||||
// keep the window max gap at least as large as the current interval max
|
|
||||||
// this allows the window max gap to respond immediately to a sudden spike in gap times
|
|
||||||
// also, this prevents the window max gap from staying at 0 until the first interval of samples filled up
|
|
||||||
if (_currentIntervalMaxGap > _windowMaxGap) {
|
|
||||||
_windowMaxGap = _currentIntervalMaxGap;
|
|
||||||
_newWindowMaxGapAvailable = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_numSamplesInCurrentInterval++;
|
|
||||||
|
|
||||||
// if the current interval of samples is now full, record it in our interval maxes
|
|
||||||
if (_numSamplesInCurrentInterval == TIME_GAP_NUM_SAMPLES_IN_INTERVAL) {
|
|
||||||
|
|
||||||
// find location to insert this interval's max (increment index cyclically)
|
|
||||||
_newestIntervalMaxGapAt = _newestIntervalMaxGapAt == TIME_GAP_NUM_INTERVALS_IN_WINDOW - 1 ? 0 : _newestIntervalMaxGapAt + 1;
|
|
||||||
|
|
||||||
// record the current interval's max gap as the newest
|
|
||||||
_intervalMaxGaps[_newestIntervalMaxGapAt] = _currentIntervalMaxGap;
|
|
||||||
|
|
||||||
// update the window max gap, which is the max out of all the past intervals' max gaps
|
|
||||||
_windowMaxGap = 0;
|
|
||||||
for (int i = 0; i < TIME_GAP_NUM_INTERVALS_IN_WINDOW; i++) {
|
|
||||||
if (_intervalMaxGaps[i] > _windowMaxGap) {
|
|
||||||
_windowMaxGap = _intervalMaxGaps[i];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_newWindowMaxGapAvailable = true;
|
|
||||||
|
|
||||||
// reset the current interval
|
|
||||||
_numSamplesInCurrentInterval = 0;
|
|
||||||
_currentIntervalMaxGap = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_lastFrameReceivedTime = now;
|
|
||||||
}
|
|
||||||
|
|
||||||
quint64 InterframeTimeGapStats::getWindowMaxGap() {
|
|
||||||
_newWindowMaxGapAvailable = false;
|
|
||||||
return _windowMaxGap;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo, bool dynamicJitterBuffers) :
|
PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo, bool dynamicJitterBuffers) :
|
||||||
|
|
||||||
AudioRingBuffer(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
|
AudioRingBuffer(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
|
||||||
|
@ -97,10 +33,15 @@ PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::
|
||||||
_shouldOutputStarveDebug(true),
|
_shouldOutputStarveDebug(true),
|
||||||
_isStereo(isStereo),
|
_isStereo(isStereo),
|
||||||
_listenerUnattenuatedZone(NULL),
|
_listenerUnattenuatedZone(NULL),
|
||||||
|
_lastFrameReceivedTime(0),
|
||||||
|
_interframeTimeGapStatsForJitterCalc(TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES, TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS),
|
||||||
|
_interframeTimeGapStatsForStatsPacket(TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES, TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS),
|
||||||
_desiredJitterBufferFrames(1),
|
_desiredJitterBufferFrames(1),
|
||||||
_currentJitterBufferFrames(-1),
|
_currentJitterBufferFrames(-1),
|
||||||
_dynamicJitterBuffers(dynamicJitterBuffers),
|
_dynamicJitterBuffers(dynamicJitterBuffers),
|
||||||
_consecutiveNotMixedCount(0)
|
_consecutiveNotMixedCount(0),
|
||||||
|
_starveCount(0),
|
||||||
|
_silentFramesDropped(0)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -143,9 +84,12 @@ int PositionalAudioRingBuffer::parseData(const QByteArray& packet) {
|
||||||
addSilentFrame(numSilentFramesToAdd * samplesPerFrame);
|
addSilentFrame(numSilentFramesToAdd * samplesPerFrame);
|
||||||
_currentJitterBufferFrames = _desiredJitterBufferFrames;
|
_currentJitterBufferFrames = _desiredJitterBufferFrames;
|
||||||
|
|
||||||
|
_silentFramesDropped += numFramesToDropDesired;
|
||||||
} else {
|
} else {
|
||||||
// we need to drop all frames to get the jitter buffer close as possible to its desired length
|
// we need to drop all frames to get the jitter buffer close as possible to its desired length
|
||||||
_currentJitterBufferFrames -= numSilentFrames;
|
_currentJitterBufferFrames -= numSilentFrames;
|
||||||
|
|
||||||
|
_silentFramesDropped += numSilentFrames;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
addSilentFrame(numSilentSamples);
|
addSilentFrame(numSilentSamples);
|
||||||
|
@ -217,6 +161,7 @@ bool PositionalAudioRingBuffer::shouldBeAddedToMix() {
|
||||||
} else if (samplesAvailable() < samplesPerFrame) {
|
} else if (samplesAvailable() < samplesPerFrame) {
|
||||||
// if the buffer doesn't have a full frame of samples to take for mixing, it is starved
|
// if the buffer doesn't have a full frame of samples to take for mixing, it is starved
|
||||||
_isStarved = true;
|
_isStarved = true;
|
||||||
|
_starveCount++;
|
||||||
|
|
||||||
// set to -1 to indicate the jitter buffer is starved
|
// set to -1 to indicate the jitter buffer is starved
|
||||||
_currentJitterBufferFrames = -1;
|
_currentJitterBufferFrames = -1;
|
||||||
|
@ -224,7 +169,7 @@ bool PositionalAudioRingBuffer::shouldBeAddedToMix() {
|
||||||
// reset our _shouldOutputStarveDebug to true so the next is printed
|
// reset our _shouldOutputStarveDebug to true so the next is printed
|
||||||
_shouldOutputStarveDebug = true;
|
_shouldOutputStarveDebug = true;
|
||||||
|
|
||||||
_consecutiveNotMixedCount++;
|
_consecutiveNotMixedCount = 1;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -234,7 +179,6 @@ bool PositionalAudioRingBuffer::shouldBeAddedToMix() {
|
||||||
// minus one (since a frame will be read immediately after this) is the length of the jitter buffer
|
// minus one (since a frame will be read immediately after this) is the length of the jitter buffer
|
||||||
_currentJitterBufferFrames = samplesAvailable() / samplesPerFrame - 1;
|
_currentJitterBufferFrames = samplesAvailable() / samplesPerFrame - 1;
|
||||||
_isStarved = false;
|
_isStarved = false;
|
||||||
_consecutiveNotMixedCount = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// since we've read data from ring buffer at least once - we've started
|
// since we've read data from ring buffer at least once - we've started
|
||||||
|
@ -247,21 +191,31 @@ int PositionalAudioRingBuffer::getCalculatedDesiredJitterBufferFrames() const {
|
||||||
int calculatedDesiredJitterBufferFrames = 1;
|
int calculatedDesiredJitterBufferFrames = 1;
|
||||||
const float USECS_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * USECS_PER_SECOND / (float)SAMPLE_RATE;
|
const float USECS_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * USECS_PER_SECOND / (float)SAMPLE_RATE;
|
||||||
|
|
||||||
calculatedDesiredJitterBufferFrames = ceilf((float)_interframeTimeGapStats.peekWindowMaxGap() / USECS_PER_FRAME);
|
calculatedDesiredJitterBufferFrames = ceilf((float)_interframeTimeGapStatsForJitterCalc.getWindowMax() / USECS_PER_FRAME);
|
||||||
if (calculatedDesiredJitterBufferFrames < 1) {
|
if (calculatedDesiredJitterBufferFrames < 1) {
|
||||||
calculatedDesiredJitterBufferFrames = 1;
|
calculatedDesiredJitterBufferFrames = 1;
|
||||||
}
|
}
|
||||||
return calculatedDesiredJitterBufferFrames;
|
return calculatedDesiredJitterBufferFrames;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void PositionalAudioRingBuffer::timeGapStatsFrameReceived() {
|
||||||
|
quint64 now = usecTimestampNow();
|
||||||
|
if (_lastFrameReceivedTime != 0) {
|
||||||
|
quint64 gap = now - _lastFrameReceivedTime;
|
||||||
|
_interframeTimeGapStatsForJitterCalc.update(gap);
|
||||||
|
_interframeTimeGapStatsForStatsPacket.update(gap);
|
||||||
|
}
|
||||||
|
_lastFrameReceivedTime = now;
|
||||||
|
}
|
||||||
|
|
||||||
void PositionalAudioRingBuffer::updateDesiredJitterBufferFrames() {
|
void PositionalAudioRingBuffer::updateDesiredJitterBufferFrames() {
|
||||||
if (_interframeTimeGapStats.hasNewWindowMaxGapAvailable()) {
|
if (_interframeTimeGapStatsForJitterCalc.getNewStatsAvailableFlag()) {
|
||||||
if (!_dynamicJitterBuffers) {
|
if (!_dynamicJitterBuffers) {
|
||||||
_desiredJitterBufferFrames = 1; // HACK to see if this fixes the audio silence
|
_desiredJitterBufferFrames = 1; // HACK to see if this fixes the audio silence
|
||||||
} else {
|
} else {
|
||||||
const float USECS_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * USECS_PER_SECOND / (float)SAMPLE_RATE;
|
const float USECS_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * USECS_PER_SECOND / (float)SAMPLE_RATE;
|
||||||
|
|
||||||
_desiredJitterBufferFrames = ceilf((float)_interframeTimeGapStats.getWindowMaxGap() / USECS_PER_FRAME);
|
_desiredJitterBufferFrames = ceilf((float)_interframeTimeGapStatsForJitterCalc.getWindowMax() / USECS_PER_FRAME);
|
||||||
if (_desiredJitterBufferFrames < 1) {
|
if (_desiredJitterBufferFrames < 1) {
|
||||||
_desiredJitterBufferFrames = 1;
|
_desiredJitterBufferFrames = 1;
|
||||||
}
|
}
|
||||||
|
@ -270,5 +224,6 @@ void PositionalAudioRingBuffer::updateDesiredJitterBufferFrames() {
|
||||||
_desiredJitterBufferFrames = maxDesired;
|
_desiredJitterBufferFrames = maxDesired;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
_interframeTimeGapStatsForJitterCalc.clearNewStatsAvailableFlag();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,31 +17,17 @@
|
||||||
#include <AABox.h>
|
#include <AABox.h>
|
||||||
|
|
||||||
#include "AudioRingBuffer.h"
|
#include "AudioRingBuffer.h"
|
||||||
|
#include "MovingMinMaxAvg.h"
|
||||||
|
|
||||||
// this means that every 500 samples, the max for the past 10*500 samples will be calculated
|
// the time gaps stats for _desiredJitterBufferFrames calculation
|
||||||
const int TIME_GAP_NUM_SAMPLES_IN_INTERVAL = 500;
|
// will recalculate the max for the past 5000 samples every 500 samples
|
||||||
const int TIME_GAP_NUM_INTERVALS_IN_WINDOW = 10;
|
const int TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES = 500;
|
||||||
|
const int TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS = 10;
|
||||||
|
|
||||||
// class used to track time between incoming frames for the purpose of varying the jitter buffer length
|
// the time gap stats for constructing AudioStreamStats will
|
||||||
class InterframeTimeGapStats {
|
// recalculate min/max/avg every ~1 second for the past ~30 seconds of time gap data
|
||||||
public:
|
const int TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS;
|
||||||
InterframeTimeGapStats();
|
const int TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS = 30;
|
||||||
|
|
||||||
void frameReceived();
|
|
||||||
bool hasNewWindowMaxGapAvailable() const { return _newWindowMaxGapAvailable; }
|
|
||||||
quint64 peekWindowMaxGap() const { return _windowMaxGap; }
|
|
||||||
quint64 getWindowMaxGap();
|
|
||||||
|
|
||||||
private:
|
|
||||||
quint64 _lastFrameReceivedTime;
|
|
||||||
|
|
||||||
int _numSamplesInCurrentInterval;
|
|
||||||
quint64 _currentIntervalMaxGap;
|
|
||||||
quint64 _intervalMaxGaps[TIME_GAP_NUM_INTERVALS_IN_WINDOW];
|
|
||||||
int _newestIntervalMaxGapAt;
|
|
||||||
quint64 _windowMaxGap;
|
|
||||||
bool _newWindowMaxGapAvailable;
|
|
||||||
};
|
|
||||||
|
|
||||||
const int AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY = 100;
|
const int AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY = 100;
|
||||||
|
|
||||||
|
@ -79,17 +65,22 @@ public:
|
||||||
|
|
||||||
int getSamplesPerFrame() const { return _isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; }
|
int getSamplesPerFrame() const { return _isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; }
|
||||||
|
|
||||||
|
const MovingMinMaxAvg<quint64>& getInterframeTimeGapStatsForStatsPacket() const { return _interframeTimeGapStatsForStatsPacket; }
|
||||||
|
|
||||||
int getCalculatedDesiredJitterBufferFrames() const; /// returns what we would calculate our desired as if asked
|
int getCalculatedDesiredJitterBufferFrames() const; /// returns what we would calculate our desired as if asked
|
||||||
int getDesiredJitterBufferFrames() const { return _desiredJitterBufferFrames; }
|
int getDesiredJitterBufferFrames() const { return _desiredJitterBufferFrames; }
|
||||||
int getCurrentJitterBufferFrames() const { return _currentJitterBufferFrames; }
|
int getCurrentJitterBufferFrames() const { return _currentJitterBufferFrames; }
|
||||||
|
|
||||||
int getConsecutiveNotMixedCount() const { return _consecutiveNotMixedCount; }
|
int getConsecutiveNotMixedCount() const { return _consecutiveNotMixedCount; }
|
||||||
|
int getStarveCount() const { return _starveCount; }
|
||||||
|
int getSilentFramesDropped() const { return _silentFramesDropped; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// disallow copying of PositionalAudioRingBuffer objects
|
// disallow copying of PositionalAudioRingBuffer objects
|
||||||
PositionalAudioRingBuffer(const PositionalAudioRingBuffer&);
|
PositionalAudioRingBuffer(const PositionalAudioRingBuffer&);
|
||||||
PositionalAudioRingBuffer& operator= (const PositionalAudioRingBuffer&);
|
PositionalAudioRingBuffer& operator= (const PositionalAudioRingBuffer&);
|
||||||
|
|
||||||
|
void timeGapStatsFrameReceived();
|
||||||
void updateDesiredJitterBufferFrames();
|
void updateDesiredJitterBufferFrames();
|
||||||
|
|
||||||
PositionalAudioRingBuffer::Type _type;
|
PositionalAudioRingBuffer::Type _type;
|
||||||
|
@ -103,13 +94,18 @@ protected:
|
||||||
float _nextOutputTrailingLoudness;
|
float _nextOutputTrailingLoudness;
|
||||||
AABox* _listenerUnattenuatedZone;
|
AABox* _listenerUnattenuatedZone;
|
||||||
|
|
||||||
InterframeTimeGapStats _interframeTimeGapStats;
|
quint64 _lastFrameReceivedTime;
|
||||||
|
MovingMinMaxAvg<quint64> _interframeTimeGapStatsForJitterCalc;
|
||||||
|
MovingMinMaxAvg<quint64> _interframeTimeGapStatsForStatsPacket;
|
||||||
|
|
||||||
int _desiredJitterBufferFrames;
|
int _desiredJitterBufferFrames;
|
||||||
int _currentJitterBufferFrames;
|
int _currentJitterBufferFrames;
|
||||||
bool _dynamicJitterBuffers;
|
bool _dynamicJitterBuffers;
|
||||||
|
|
||||||
// extra stats
|
// extra stats
|
||||||
int _consecutiveNotMixedCount;
|
int _consecutiveNotMixedCount;
|
||||||
|
int _starveCount;
|
||||||
|
int _silentFramesDropped;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // hifi_PositionalAudioRingBuffer_h
|
#endif // hifi_PositionalAudioRingBuffer_h
|
||||||
|
|
|
@ -331,6 +331,7 @@ ModelItem ModelItem::fromEditPacket(const unsigned char* data, int length, int&
|
||||||
|
|
||||||
newModelItem.setCreatorTokenID(creatorTokenID);
|
newModelItem.setCreatorTokenID(creatorTokenID);
|
||||||
newModelItem._newlyCreated = true;
|
newModelItem._newlyCreated = true;
|
||||||
|
valid = true;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// look up the existing modelItem
|
// look up the existing modelItem
|
||||||
|
@ -339,20 +340,19 @@ ModelItem ModelItem::fromEditPacket(const unsigned char* data, int length, int&
|
||||||
// copy existing properties before over-writing with new properties
|
// copy existing properties before over-writing with new properties
|
||||||
if (existingModelItem) {
|
if (existingModelItem) {
|
||||||
newModelItem = *existingModelItem;
|
newModelItem = *existingModelItem;
|
||||||
|
valid = true;
|
||||||
} else {
|
} else {
|
||||||
// the user attempted to edit a modelItem that doesn't exist
|
// the user attempted to edit a modelItem that doesn't exist
|
||||||
qDebug() << "user attempted to edit a modelItem that doesn't exist...";
|
qDebug() << "user attempted to edit a modelItem that doesn't exist... editID=" << editID;
|
||||||
|
|
||||||
|
// NOTE: even though this is a bad editID, we have to consume the edit details, so that
|
||||||
|
// the buffer doesn't get corrupted for further processing...
|
||||||
valid = false;
|
valid = false;
|
||||||
return newModelItem;
|
|
||||||
}
|
}
|
||||||
newModelItem._id = editID;
|
newModelItem._id = editID;
|
||||||
newModelItem._newlyCreated = false;
|
newModelItem._newlyCreated = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we got this far, then our result will be valid
|
|
||||||
valid = true;
|
|
||||||
|
|
||||||
|
|
||||||
// lastEdited
|
// lastEdited
|
||||||
memcpy(&newModelItem._lastEdited, dataAt, sizeof(newModelItem._lastEdited));
|
memcpy(&newModelItem._lastEdited, dataAt, sizeof(newModelItem._lastEdited));
|
||||||
dataAt += sizeof(newModelItem._lastEdited);
|
dataAt += sizeof(newModelItem._lastEdited);
|
||||||
|
|
|
@ -69,8 +69,9 @@ ModelItemProperties ModelsScriptingInterface::getModelProperties(ModelItemID mod
|
||||||
}
|
}
|
||||||
if (_modelTree) {
|
if (_modelTree) {
|
||||||
_modelTree->lockForRead();
|
_modelTree->lockForRead();
|
||||||
const ModelItem* model = _modelTree->findModelByID(identity.id, true);
|
ModelItem* model = const_cast<ModelItem*>(_modelTree->findModelByID(identity.id, true));
|
||||||
if (model) {
|
if (model) {
|
||||||
|
model->setSittingPoints(_modelTree->getGeometryForModel(*model)->sittingPoints);
|
||||||
results.copyFromModelItem(*model);
|
results.copyFromModelItem(*model);
|
||||||
} else {
|
} else {
|
||||||
results.setIsUnknownID();
|
results.setIsUnknownID();
|
||||||
|
|
|
@ -78,6 +78,8 @@ PacketVersion versionForPacketType(PacketType type) {
|
||||||
return 2;
|
return 2;
|
||||||
case PacketTypeModelErase:
|
case PacketTypeModelErase:
|
||||||
return 1;
|
return 1;
|
||||||
|
case PacketTypeAudioStreamStats:
|
||||||
|
return 1;
|
||||||
default:
|
default:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,8 +14,6 @@
|
||||||
|
|
||||||
SentPacketHistory::SentPacketHistory(int size)
|
SentPacketHistory::SentPacketHistory(int size)
|
||||||
: _sentPackets(size),
|
: _sentPackets(size),
|
||||||
_newestPacketAt(0),
|
|
||||||
_numExistingPackets(0),
|
|
||||||
_newestSequenceNumber(std::numeric_limits<uint16_t>::max())
|
_newestSequenceNumber(std::numeric_limits<uint16_t>::max())
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -29,16 +27,8 @@ void SentPacketHistory::packetSent(uint16_t sequenceNumber, const QByteArray& pa
|
||||||
qDebug() << "Unexpected sequence number passed to SentPacketHistory::packetSent()!"
|
qDebug() << "Unexpected sequence number passed to SentPacketHistory::packetSent()!"
|
||||||
<< "Expected:" << expectedSequenceNumber << "Actual:" << sequenceNumber;
|
<< "Expected:" << expectedSequenceNumber << "Actual:" << sequenceNumber;
|
||||||
}
|
}
|
||||||
|
|
||||||
_newestSequenceNumber = sequenceNumber;
|
_newestSequenceNumber = sequenceNumber;
|
||||||
|
_sentPackets.insert(packet);
|
||||||
// increment _newestPacketAt cyclically, insert new packet there.
|
|
||||||
// this will overwrite the oldest packet in the buffer
|
|
||||||
_newestPacketAt = (_newestPacketAt == _sentPackets.size() - 1) ? 0 : _newestPacketAt + 1;
|
|
||||||
_sentPackets[_newestPacketAt] = packet;
|
|
||||||
if (_numExistingPackets < _sentPackets.size()) {
|
|
||||||
_numExistingPackets++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const QByteArray* SentPacketHistory::getPacket(uint16_t sequenceNumber) const {
|
const QByteArray* SentPacketHistory::getPacket(uint16_t sequenceNumber) const {
|
||||||
|
@ -51,13 +41,6 @@ const QByteArray* SentPacketHistory::getPacket(uint16_t sequenceNumber) const {
|
||||||
if (seqDiff < 0) {
|
if (seqDiff < 0) {
|
||||||
seqDiff += UINT16_RANGE;
|
seqDiff += UINT16_RANGE;
|
||||||
}
|
}
|
||||||
// if desired sequence number is too old to be found in the history, return null
|
|
||||||
if (seqDiff >= _numExistingPackets) {
|
return _sentPackets.get(seqDiff);
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
int packetAt = _newestPacketAt - seqDiff;
|
|
||||||
if (packetAt < 0) {
|
|
||||||
packetAt += _sentPackets.size();
|
|
||||||
}
|
|
||||||
return &_sentPackets.at(packetAt);
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <qbytearray.h>
|
#include <qbytearray.h>
|
||||||
#include <qvector.h>
|
#include "RingBufferHistory.h"
|
||||||
|
|
||||||
#include "SequenceNumberStats.h"
|
#include "SequenceNumberStats.h"
|
||||||
|
|
||||||
|
@ -26,9 +26,7 @@ public:
|
||||||
const QByteArray* getPacket(uint16_t sequenceNumber) const;
|
const QByteArray* getPacket(uint16_t sequenceNumber) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
QVector<QByteArray> _sentPackets; // circular buffer
|
RingBufferHistory<QByteArray> _sentPackets; // circular buffer
|
||||||
int _newestPacketAt;
|
|
||||||
int _numExistingPackets;
|
|
||||||
|
|
||||||
uint16_t _newestSequenceNumber;
|
uint16_t _newestSequenceNumber;
|
||||||
};
|
};
|
||||||
|
|
|
@ -13,29 +13,19 @@
|
||||||
|
|
||||||
#include <limits>
|
#include <limits>
|
||||||
|
|
||||||
SequenceNumberStats::SequenceNumberStats()
|
SequenceNumberStats::SequenceNumberStats(int statsHistoryLength)
|
||||||
: _lastReceived(std::numeric_limits<quint16>::max()),
|
: _lastReceived(std::numeric_limits<quint16>::max()),
|
||||||
_missingSet(),
|
_missingSet(),
|
||||||
_numReceived(0),
|
_stats(),
|
||||||
_numUnreasonable(0),
|
_lastSenderUUID(),
|
||||||
_numEarly(0),
|
_statsHistory(statsHistoryLength)
|
||||||
_numLate(0),
|
|
||||||
_numLost(0),
|
|
||||||
_numRecovered(0),
|
|
||||||
_numDuplicate(0),
|
|
||||||
_lastSenderUUID()
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void SequenceNumberStats::reset() {
|
void SequenceNumberStats::reset() {
|
||||||
_missingSet.clear();
|
_missingSet.clear();
|
||||||
_numReceived = 0;
|
_stats = PacketStreamStats();
|
||||||
_numUnreasonable = 0;
|
_statsHistory.clear();
|
||||||
_numEarly = 0;
|
|
||||||
_numLate = 0;
|
|
||||||
_numLost = 0;
|
|
||||||
_numRecovered = 0;
|
|
||||||
_numDuplicate = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const int UINT16_RANGE = std::numeric_limits<uint16_t>::max() + 1;
|
static const int UINT16_RANGE = std::numeric_limits<uint16_t>::max() + 1;
|
||||||
|
@ -51,9 +41,9 @@ void SequenceNumberStats::sequenceNumberReceived(quint16 incoming, QUuid senderU
|
||||||
}
|
}
|
||||||
|
|
||||||
// determine our expected sequence number... handle rollover appropriately
|
// determine our expected sequence number... handle rollover appropriately
|
||||||
quint16 expected = _numReceived > 0 ? _lastReceived + (quint16)1 : incoming;
|
quint16 expected = _stats._numReceived > 0 ? _lastReceived + (quint16)1 : incoming;
|
||||||
|
|
||||||
_numReceived++;
|
_stats._numReceived++;
|
||||||
|
|
||||||
if (incoming == expected) { // on time
|
if (incoming == expected) { // on time
|
||||||
_lastReceived = incoming;
|
_lastReceived = incoming;
|
||||||
|
@ -80,7 +70,7 @@ void SequenceNumberStats::sequenceNumberReceived(quint16 incoming, QUuid senderU
|
||||||
// ignore packet if gap is unreasonable
|
// ignore packet if gap is unreasonable
|
||||||
qDebug() << "ignoring unreasonable sequence number:" << incoming
|
qDebug() << "ignoring unreasonable sequence number:" << incoming
|
||||||
<< "previous:" << _lastReceived;
|
<< "previous:" << _lastReceived;
|
||||||
_numUnreasonable++;
|
_stats._numUnreasonable++;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,8 +82,8 @@ void SequenceNumberStats::sequenceNumberReceived(quint16 incoming, QUuid senderU
|
||||||
qDebug() << ">>>>>>>> missing gap=" << (incomingInt - expectedInt);
|
qDebug() << ">>>>>>>> missing gap=" << (incomingInt - expectedInt);
|
||||||
}
|
}
|
||||||
|
|
||||||
_numEarly++;
|
_stats._numEarly++;
|
||||||
_numLost += (incomingInt - expectedInt);
|
_stats._numLost += (incomingInt - expectedInt);
|
||||||
_lastReceived = incoming;
|
_lastReceived = incoming;
|
||||||
|
|
||||||
// add all sequence numbers that were skipped to the missing sequence numbers list
|
// add all sequence numbers that were skipped to the missing sequence numbers list
|
||||||
|
@ -110,7 +100,7 @@ void SequenceNumberStats::sequenceNumberReceived(quint16 incoming, QUuid senderU
|
||||||
if (wantExtraDebugging) {
|
if (wantExtraDebugging) {
|
||||||
qDebug() << "this packet is later than expected...";
|
qDebug() << "this packet is later than expected...";
|
||||||
}
|
}
|
||||||
_numLate++;
|
_stats._numLate++;
|
||||||
|
|
||||||
// do not update _lastReceived; it shouldn't become smaller
|
// do not update _lastReceived; it shouldn't become smaller
|
||||||
|
|
||||||
|
@ -119,13 +109,13 @@ void SequenceNumberStats::sequenceNumberReceived(quint16 incoming, QUuid senderU
|
||||||
if (wantExtraDebugging) {
|
if (wantExtraDebugging) {
|
||||||
qDebug() << "found it in _missingSet";
|
qDebug() << "found it in _missingSet";
|
||||||
}
|
}
|
||||||
_numLost--;
|
_stats._numLost--;
|
||||||
_numRecovered++;
|
_stats._numRecovered++;
|
||||||
} else {
|
} else {
|
||||||
if (wantExtraDebugging) {
|
if (wantExtraDebugging) {
|
||||||
qDebug() << "sequence:" << incoming << "was NOT found in _missingSet and is probably a duplicate";
|
qDebug() << "sequence:" << incoming << "was NOT found in _missingSet and is probably a duplicate";
|
||||||
}
|
}
|
||||||
_numDuplicate++;
|
_stats._numDuplicate++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -180,3 +170,26 @@ void SequenceNumberStats::pruneMissingSet(const bool wantExtraDebugging) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
PacketStreamStats SequenceNumberStats::getStatsForHistoryWindow() const {
|
||||||
|
|
||||||
|
const PacketStreamStats* newestStats = _statsHistory.getNewestEntry();
|
||||||
|
const PacketStreamStats* oldestStats = _statsHistory.get(_statsHistory.getNumEntries() - 1);
|
||||||
|
|
||||||
|
// this catches cases where history is length 1 or 0 (both are NULL in case of 0)
|
||||||
|
if (newestStats == oldestStats) {
|
||||||
|
return PacketStreamStats();
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculate difference between newest stats and oldest stats to get window stats
|
||||||
|
PacketStreamStats windowStats;
|
||||||
|
windowStats._numReceived = newestStats->_numReceived - oldestStats->_numReceived;
|
||||||
|
windowStats._numUnreasonable = newestStats->_numUnreasonable - oldestStats->_numUnreasonable;
|
||||||
|
windowStats._numEarly = newestStats->_numEarly - oldestStats->_numEarly;
|
||||||
|
windowStats._numLate = newestStats->_numLate - oldestStats->_numLate;
|
||||||
|
windowStats._numLost = newestStats->_numLost - oldestStats->_numLost;
|
||||||
|
windowStats._numRecovered = newestStats->_numRecovered - oldestStats->_numRecovered;
|
||||||
|
windowStats._numDuplicate = newestStats->_numDuplicate - oldestStats->_numDuplicate;
|
||||||
|
|
||||||
|
return windowStats;
|
||||||
|
}
|
||||||
|
|
|
@ -13,31 +13,29 @@
|
||||||
#define hifi_SequenceNumberStats_h
|
#define hifi_SequenceNumberStats_h
|
||||||
|
|
||||||
#include "SharedUtil.h"
|
#include "SharedUtil.h"
|
||||||
|
#include "RingBufferHistory.h"
|
||||||
#include <quuid.h>
|
#include <quuid.h>
|
||||||
|
|
||||||
const int MAX_REASONABLE_SEQUENCE_GAP = 1000;
|
const int MAX_REASONABLE_SEQUENCE_GAP = 1000;
|
||||||
|
|
||||||
class SequenceNumberStats {
|
class PacketStreamStats {
|
||||||
public:
|
public:
|
||||||
SequenceNumberStats();
|
PacketStreamStats()
|
||||||
|
: _numReceived(0),
|
||||||
|
_numUnreasonable(0),
|
||||||
|
_numEarly(0),
|
||||||
|
_numLate(0),
|
||||||
|
_numLost(0),
|
||||||
|
_numRecovered(0),
|
||||||
|
_numDuplicate(0)
|
||||||
|
{}
|
||||||
|
|
||||||
void reset();
|
float getUnreasonableRate() const { return (float)_numUnreasonable / _numReceived; }
|
||||||
void sequenceNumberReceived(quint16 incoming, QUuid senderUUID = QUuid(), const bool wantExtraDebugging = false);
|
float getNumEaryRate() const { return (float)_numEarly / _numReceived; }
|
||||||
void pruneMissingSet(const bool wantExtraDebugging = false);
|
float getLateRate() const { return (float)_numLate / _numReceived; }
|
||||||
|
float getLostRate() const { return (float)_numLost / _numReceived; }
|
||||||
quint32 getNumReceived() const { return _numReceived; }
|
float getRecoveredRate() const { return (float)_numRecovered / _numReceived; }
|
||||||
quint32 getNumUnreasonable() const { return _numUnreasonable; }
|
float getDuplicateRate() const { return (float)_numDuplicate / _numReceived; }
|
||||||
quint32 getNumOutOfOrder() const { return _numEarly + _numLate; }
|
|
||||||
quint32 getNumEarly() const { return _numEarly; }
|
|
||||||
quint32 getNumLate() const { return _numLate; }
|
|
||||||
quint32 getNumLost() const { return _numLost; }
|
|
||||||
quint32 getNumRecovered() const { return _numRecovered; }
|
|
||||||
quint32 getNumDuplicate() const { return _numDuplicate; }
|
|
||||||
const QSet<quint16>& getMissingSet() const { return _missingSet; }
|
|
||||||
|
|
||||||
private:
|
|
||||||
quint16 _lastReceived;
|
|
||||||
QSet<quint16> _missingSet;
|
|
||||||
|
|
||||||
quint32 _numReceived;
|
quint32 _numReceived;
|
||||||
quint32 _numUnreasonable;
|
quint32 _numUnreasonable;
|
||||||
|
@ -46,8 +44,38 @@ private:
|
||||||
quint32 _numLost;
|
quint32 _numLost;
|
||||||
quint32 _numRecovered;
|
quint32 _numRecovered;
|
||||||
quint32 _numDuplicate;
|
quint32 _numDuplicate;
|
||||||
|
};
|
||||||
|
|
||||||
|
class SequenceNumberStats {
|
||||||
|
public:
|
||||||
|
SequenceNumberStats(int statsHistoryLength = 0);
|
||||||
|
|
||||||
|
void reset();
|
||||||
|
void sequenceNumberReceived(quint16 incoming, QUuid senderUUID = QUuid(), const bool wantExtraDebugging = false);
|
||||||
|
void pruneMissingSet(const bool wantExtraDebugging = false);
|
||||||
|
void pushStatsToHistory() { _statsHistory.insert(_stats); }
|
||||||
|
|
||||||
|
quint32 getNumReceived() const { return _stats._numReceived; }
|
||||||
|
quint32 getNumUnreasonable() const { return _stats._numUnreasonable; }
|
||||||
|
quint32 getNumOutOfOrder() const { return _stats._numEarly + _stats._numLate; }
|
||||||
|
quint32 getNumEarly() const { return _stats._numEarly; }
|
||||||
|
quint32 getNumLate() const { return _stats._numLate; }
|
||||||
|
quint32 getNumLost() const { return _stats._numLost; }
|
||||||
|
quint32 getNumRecovered() const { return _stats._numRecovered; }
|
||||||
|
quint32 getNumDuplicate() const { return _stats._numDuplicate; }
|
||||||
|
const PacketStreamStats& getStats() const { return _stats; }
|
||||||
|
PacketStreamStats getStatsForHistoryWindow() const;
|
||||||
|
const QSet<quint16>& getMissingSet() const { return _missingSet; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
quint16 _lastReceived;
|
||||||
|
QSet<quint16> _missingSet;
|
||||||
|
|
||||||
|
PacketStreamStats _stats;
|
||||||
|
|
||||||
QUuid _lastSenderUUID;
|
QUuid _lastSenderUUID;
|
||||||
|
|
||||||
|
RingBufferHistory<PacketStreamStats> _statsHistory;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // hifi_SequenceNumberStats_h
|
#endif // hifi_SequenceNumberStats_h
|
||||||
|
|
|
@ -385,6 +385,8 @@ Particle Particle::fromEditPacket(const unsigned char* data, int length, int& pr
|
||||||
newParticle._newlyCreated = true;
|
newParticle._newlyCreated = true;
|
||||||
newParticle.setAge(0); // this guy is new!
|
newParticle.setAge(0); // this guy is new!
|
||||||
|
|
||||||
|
valid = true;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// look up the existing particle
|
// look up the existing particle
|
||||||
const Particle* existingParticle = tree->findParticleByID(editID, true);
|
const Particle* existingParticle = tree->findParticleByID(editID, true);
|
||||||
|
@ -392,20 +394,20 @@ Particle Particle::fromEditPacket(const unsigned char* data, int length, int& pr
|
||||||
// copy existing properties before over-writing with new properties
|
// copy existing properties before over-writing with new properties
|
||||||
if (existingParticle) {
|
if (existingParticle) {
|
||||||
newParticle = *existingParticle;
|
newParticle = *existingParticle;
|
||||||
|
valid = true;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// the user attempted to edit a particle that doesn't exist
|
// the user attempted to edit a particle that doesn't exist
|
||||||
qDebug() << "user attempted to edit a particle that doesn't exist...";
|
qDebug() << "user attempted to edit a particle that doesn't exist... editID=" << editID;
|
||||||
|
|
||||||
|
// NOTE: even though this is a bad particle ID, we have to consume the edit details, so that
|
||||||
|
// the buffer doesn't get corrupted for further processing...
|
||||||
valid = false;
|
valid = false;
|
||||||
return newParticle;
|
|
||||||
}
|
}
|
||||||
newParticle._id = editID;
|
newParticle._id = editID;
|
||||||
newParticle._newlyCreated = false;
|
newParticle._newlyCreated = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we got this far, then our result will be valid
|
|
||||||
valid = true;
|
|
||||||
|
|
||||||
|
|
||||||
// lastEdited
|
// lastEdited
|
||||||
memcpy(&newParticle._lastEdited, dataAt, sizeof(newParticle._lastEdited));
|
memcpy(&newParticle._lastEdited, dataAt, sizeof(newParticle._lastEdited));
|
||||||
dataAt += sizeof(newParticle._lastEdited);
|
dataAt += sizeof(newParticle._lastEdited);
|
||||||
|
|
150
libraries/shared/src/MovingMinMaxAvg.h
Normal file
150
libraries/shared/src/MovingMinMaxAvg.h
Normal file
|
@ -0,0 +1,150 @@
|
||||||
|
//
|
||||||
|
// MovingMinMaxAvg.h
|
||||||
|
// libraries/shared/src
|
||||||
|
//
|
||||||
|
// Created by Yixin Wang on 7/8/2014
|
||||||
|
// Copyright 2013 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef hifi_MovingMinMaxAvg_h
|
||||||
|
#define hifi_MovingMinMaxAvg_h
|
||||||
|
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
|
#include "RingBufferHistory.h"
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
class MovingMinMaxAvg {
|
||||||
|
|
||||||
|
private:
|
||||||
|
class Stats {
|
||||||
|
public:
|
||||||
|
Stats()
|
||||||
|
: _min(std::numeric_limits<T>::max()),
|
||||||
|
_max(std::numeric_limits<T>::min()),
|
||||||
|
_average(0.0) {}
|
||||||
|
|
||||||
|
void updateWithSample(T sample, int& numSamplesInAverage) {
|
||||||
|
if (sample < _min) {
|
||||||
|
_min = sample;
|
||||||
|
}
|
||||||
|
if (sample > _max) {
|
||||||
|
_max = sample;
|
||||||
|
}
|
||||||
|
_average = _average * ((double)numSamplesInAverage / (numSamplesInAverage + 1))
|
||||||
|
+ (double)sample / (numSamplesInAverage + 1);
|
||||||
|
numSamplesInAverage++;
|
||||||
|
}
|
||||||
|
|
||||||
|
void updateWithOtherStats(const Stats& other, int& numStatsInAverage) {
|
||||||
|
if (other._min < _min) {
|
||||||
|
_min = other._min;
|
||||||
|
}
|
||||||
|
if (other._max > _max) {
|
||||||
|
_max = other._max;
|
||||||
|
}
|
||||||
|
_average = _average * ((double)numStatsInAverage / (numStatsInAverage + 1))
|
||||||
|
+ other._average / (numStatsInAverage + 1);
|
||||||
|
numStatsInAverage++;
|
||||||
|
}
|
||||||
|
|
||||||
|
T _min;
|
||||||
|
T _max;
|
||||||
|
double _average;
|
||||||
|
};
|
||||||
|
|
||||||
|
public:
|
||||||
|
// This class collects 3 stats (min, max, avg) over a moving window of samples.
|
||||||
|
// The moving window contains _windowIntervals * _intervalLength samples.
|
||||||
|
// Those stats are updated every _intervalLength samples collected. When that happens, _newStatsAvaialble is set
|
||||||
|
// to true and it's up to the user to clear that flag.
|
||||||
|
// For example, if you want a moving avg of the past 5000 samples updated every 100 samples, you would instantiate
|
||||||
|
// this class with MovingMinMaxAvg(100, 50). If you want a moving min of the past 100 samples updated on every
|
||||||
|
// new sample, instantiate this class with MovingMinMaxAvg(1, 100).
|
||||||
|
|
||||||
|
MovingMinMaxAvg(int intervalLength, int windowIntervals)
|
||||||
|
: _intervalLength(intervalLength),
|
||||||
|
_windowIntervals(windowIntervals),
|
||||||
|
_overallStats(),
|
||||||
|
_samplesCollected(0),
|
||||||
|
_windowStats(),
|
||||||
|
_existingSamplesInCurrentInterval(0),
|
||||||
|
_currentIntervalStats(),
|
||||||
|
_intervalStats(windowIntervals),
|
||||||
|
_newStatsAvailable(false)
|
||||||
|
{}
|
||||||
|
|
||||||
|
void reset() {
|
||||||
|
_overallStats = Stats();
|
||||||
|
_samplesCollected = 0;
|
||||||
|
_windowStats = Stats();
|
||||||
|
_existingSamplesInCurrentInterval = 0;
|
||||||
|
_currentIntervalStats = Stats();
|
||||||
|
_intervalStats.clear();
|
||||||
|
_newStatsAvailable = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void update(T newSample) {
|
||||||
|
// update overall stats
|
||||||
|
_overallStats.updateWithSample(newSample, _samplesCollected);
|
||||||
|
|
||||||
|
// update the current interval stats
|
||||||
|
_currentIntervalStats.updateWithSample(newSample, _existingSamplesInCurrentInterval);
|
||||||
|
|
||||||
|
// if the current interval of samples is now full, record its stats into our past intervals' stats
|
||||||
|
if (_existingSamplesInCurrentInterval == _intervalLength) {
|
||||||
|
|
||||||
|
// record current interval's stats, then reset them
|
||||||
|
_intervalStats.insert(_currentIntervalStats);
|
||||||
|
_currentIntervalStats = Stats();
|
||||||
|
_existingSamplesInCurrentInterval = 0;
|
||||||
|
|
||||||
|
// update the window's stats by combining the intervals' stats
|
||||||
|
typename RingBufferHistory<Stats>::Iterator i = _intervalStats.begin();
|
||||||
|
typename RingBufferHistory<Stats>::Iterator end = _intervalStats.end();
|
||||||
|
_windowStats = Stats();
|
||||||
|
int intervalsIncludedInWindowStats = 0;
|
||||||
|
while (i != end) {
|
||||||
|
_windowStats.updateWithOtherStats(*i, intervalsIncludedInWindowStats);
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
|
||||||
|
_newStatsAvailable = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool getNewStatsAvailableFlag() const { return _newStatsAvailable; }
|
||||||
|
void clearNewStatsAvailableFlag() { _newStatsAvailable = false; }
|
||||||
|
|
||||||
|
T getMin() const { return _overallStats._min; }
|
||||||
|
T getMax() const { return _overallStats._max; }
|
||||||
|
double getAverage() const { return _overallStats._average; }
|
||||||
|
T getWindowMin() const { return _windowStats._min; }
|
||||||
|
T getWindowMax() const { return _windowStats._max; }
|
||||||
|
double getWindowAverage() const { return _windowStats._average; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
int _intervalLength;
|
||||||
|
int _windowIntervals;
|
||||||
|
|
||||||
|
// these are min/max/avg stats for all samples collected.
|
||||||
|
Stats _overallStats;
|
||||||
|
int _samplesCollected;
|
||||||
|
|
||||||
|
// these are the min/max/avg stats for the samples in the moving window
|
||||||
|
Stats _windowStats;
|
||||||
|
int _existingSamplesInCurrentInterval;
|
||||||
|
|
||||||
|
// these are the min/max/avg stats for the current interval
|
||||||
|
Stats _currentIntervalStats;
|
||||||
|
|
||||||
|
// these are stored stats for the past intervals in the window
|
||||||
|
RingBufferHistory<Stats> _intervalStats;
|
||||||
|
|
||||||
|
bool _newStatsAvailable;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // hifi_MovingMinMaxAvg_h
|
122
libraries/shared/src/RingBufferHistory.h
Normal file
122
libraries/shared/src/RingBufferHistory.h
Normal file
|
@ -0,0 +1,122 @@
|
||||||
|
//
|
||||||
|
// RingBufferHistory.h
|
||||||
|
// libraries/shared/src
|
||||||
|
//
|
||||||
|
// Created by Yixin Wang on 7/9/2014
|
||||||
|
// Copyright 2014 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef hifi_RingBufferHistory_h
|
||||||
|
#define hifi_RingBufferHistory_h
|
||||||
|
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <iterator>
|
||||||
|
|
||||||
|
#include <qvector.h>
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
class RingBufferHistory {
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
RingBufferHistory(int capacity = 10)
|
||||||
|
: _size(capacity + 1),
|
||||||
|
_capacity(capacity),
|
||||||
|
_newestEntryAtIndex(0),
|
||||||
|
_numEntries(0),
|
||||||
|
_buffer(capacity + 1)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void clear() {
|
||||||
|
_numEntries = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void insert(const T& entry) {
|
||||||
|
// increment newest entry index cyclically
|
||||||
|
_newestEntryAtIndex = (_newestEntryAtIndex == _size - 1) ? 0 : _newestEntryAtIndex + 1;
|
||||||
|
|
||||||
|
// insert new entry
|
||||||
|
_buffer[_newestEntryAtIndex] = entry;
|
||||||
|
if (_numEntries < _capacity) {
|
||||||
|
_numEntries++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 0 retrieves the most recent entry, _numEntries - 1 retrieves the oldest.
|
||||||
|
// returns NULL if entryAge not within [0, _numEntries-1]
|
||||||
|
const T* get(int entryAge) const {
|
||||||
|
if (!(entryAge >= 0 && entryAge < _numEntries)) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
int entryAt = _newestEntryAtIndex - entryAge;
|
||||||
|
if (entryAt < 0) {
|
||||||
|
entryAt += _size;
|
||||||
|
}
|
||||||
|
return &_buffer[entryAt];
|
||||||
|
}
|
||||||
|
|
||||||
|
T* get(int entryAge) {
|
||||||
|
return const_cast<T*>((static_cast<const RingBufferHistory*>(this))->get(entryAge));
|
||||||
|
}
|
||||||
|
|
||||||
|
const T* getNewestEntry() const {
|
||||||
|
return _numEntries == 0 ? NULL : &_buffer[_newestEntryAtIndex];
|
||||||
|
}
|
||||||
|
|
||||||
|
T* getNewestEntry() {
|
||||||
|
return _numEntries == 0 ? NULL : &_buffer[_newestEntryAtIndex];
|
||||||
|
}
|
||||||
|
|
||||||
|
int getCapacity() const { return _capacity; }
|
||||||
|
int getNumEntries() const { return _numEntries; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
int _size;
|
||||||
|
int _capacity;
|
||||||
|
int _newestEntryAtIndex;
|
||||||
|
int _numEntries;
|
||||||
|
QVector<T> _buffer;
|
||||||
|
|
||||||
|
public:
|
||||||
|
class Iterator : public std::iterator < std::forward_iterator_tag, T > {
|
||||||
|
public:
|
||||||
|
Iterator(T* bufferFirst, T* bufferLast, T* at) : _bufferFirst(bufferFirst), _bufferLast(bufferLast), _at(at) {}
|
||||||
|
|
||||||
|
bool operator==(const Iterator& rhs) { return _at == rhs._at; }
|
||||||
|
bool operator!=(const Iterator& rhs) { return _at != rhs._at; }
|
||||||
|
T& operator*() { return *_at; }
|
||||||
|
T* operator->() { return _at; }
|
||||||
|
|
||||||
|
Iterator& operator++() {
|
||||||
|
_at = (_at == _bufferFirst) ? _bufferLast : _at - 1;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
Iterator operator++(int) {
|
||||||
|
Iterator tmp(*this);
|
||||||
|
++(*this);
|
||||||
|
return tmp;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
T* const _bufferFirst;
|
||||||
|
T* const _bufferLast;
|
||||||
|
T* _at;
|
||||||
|
};
|
||||||
|
|
||||||
|
Iterator begin() { return Iterator(&_buffer.first(), &_buffer.last(), &_buffer[_newestEntryAtIndex]); }
|
||||||
|
|
||||||
|
Iterator end() {
|
||||||
|
int endAtIndex = _newestEntryAtIndex - _numEntries;
|
||||||
|
if (endAtIndex < 0) {
|
||||||
|
endAtIndex += _size;
|
||||||
|
}
|
||||||
|
return Iterator(&_buffer.first(), &_buffer.last(), &_buffer[endAtIndex]);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // hifi_RingBufferHistory_h
|
218
tests/shared/src/MovingMinMaxAvgTests.cpp
Normal file
218
tests/shared/src/MovingMinMaxAvgTests.cpp
Normal file
|
@ -0,0 +1,218 @@
|
||||||
|
//
|
||||||
|
// MovingMinMaxAvgTests.cpp
|
||||||
|
// tests/shared/src
|
||||||
|
//
|
||||||
|
// Created by Yixin Wang on 7/8/2014
|
||||||
|
// Copyright 2014 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "MovingMinMaxAvgTests.h"
|
||||||
|
#include <qqueue.h>
|
||||||
|
|
||||||
|
quint64 MovingMinMaxAvgTests::randQuint64() {
|
||||||
|
quint64 ret = 0;
|
||||||
|
for (int i = 0; i < 32; i++) {
|
||||||
|
ret = (ret + rand() % 4);
|
||||||
|
ret *= 4;
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
void MovingMinMaxAvgTests::runAllTests() {
|
||||||
|
{
|
||||||
|
// quint64 test
|
||||||
|
|
||||||
|
const int INTERVAL_LENGTH = 100;
|
||||||
|
const int WINDOW_INTERVALS = 50;
|
||||||
|
|
||||||
|
MovingMinMaxAvg<quint64> stats(INTERVAL_LENGTH, WINDOW_INTERVALS);
|
||||||
|
|
||||||
|
quint64 min = std::numeric_limits<quint64>::max();
|
||||||
|
quint64 max = 0;
|
||||||
|
double average = 0.0;
|
||||||
|
int totalSamples = 0;
|
||||||
|
|
||||||
|
quint64 windowMin;
|
||||||
|
quint64 windowMax;
|
||||||
|
double windowAverage;
|
||||||
|
|
||||||
|
QQueue<quint64> windowSamples;
|
||||||
|
// fill window samples
|
||||||
|
for (int i = 0; i < 100000; i++) {
|
||||||
|
|
||||||
|
quint64 sample = randQuint64();
|
||||||
|
|
||||||
|
windowSamples.enqueue(sample);
|
||||||
|
if (windowSamples.size() > INTERVAL_LENGTH * WINDOW_INTERVALS) {
|
||||||
|
windowSamples.dequeue();
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.update(sample);
|
||||||
|
|
||||||
|
min = std::min(min, sample);
|
||||||
|
max = std::max(max, sample);
|
||||||
|
average = (average * totalSamples + sample) / (totalSamples + 1);
|
||||||
|
totalSamples++;
|
||||||
|
|
||||||
|
assert(stats.getMin() == min);
|
||||||
|
assert(stats.getMax() == max);
|
||||||
|
assert(abs(stats.getAverage() / average - 1.0) < 0.000001 || abs(stats.getAverage() - average) < 0.000001);
|
||||||
|
|
||||||
|
if ((i + 1) % INTERVAL_LENGTH == 0) {
|
||||||
|
|
||||||
|
assert(stats.getNewStatsAvailableFlag());
|
||||||
|
stats.clearNewStatsAvailableFlag();
|
||||||
|
|
||||||
|
windowMin = std::numeric_limits<quint64>::max();
|
||||||
|
windowMax = 0;
|
||||||
|
windowAverage = 0.0;
|
||||||
|
foreach(quint64 s, windowSamples) {
|
||||||
|
windowMin = std::min(windowMin, s);
|
||||||
|
windowMax = std::max(windowMax, s);
|
||||||
|
windowAverage += (double)s;
|
||||||
|
}
|
||||||
|
windowAverage /= (double)windowSamples.size();
|
||||||
|
|
||||||
|
assert(stats.getWindowMin() == windowMin);
|
||||||
|
assert(stats.getWindowMax() == windowMax);
|
||||||
|
assert(abs(stats.getAverage() / average - 1.0) < 0.000001 || abs(stats.getAverage() - average) < 0.000001);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
assert(!stats.getNewStatsAvailableFlag());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// int test
|
||||||
|
|
||||||
|
const int INTERVAL_LENGTH = 1;
|
||||||
|
const int WINDOW_INTERVALS = 75;
|
||||||
|
|
||||||
|
MovingMinMaxAvg<int> stats(INTERVAL_LENGTH, WINDOW_INTERVALS);
|
||||||
|
|
||||||
|
int min = std::numeric_limits<int>::max();
|
||||||
|
int max = 0;
|
||||||
|
double average = 0.0;
|
||||||
|
int totalSamples = 0;
|
||||||
|
|
||||||
|
int windowMin;
|
||||||
|
int windowMax;
|
||||||
|
double windowAverage;
|
||||||
|
|
||||||
|
QQueue<int> windowSamples;
|
||||||
|
// fill window samples
|
||||||
|
for (int i = 0; i < 100000; i++) {
|
||||||
|
|
||||||
|
int sample = rand();
|
||||||
|
|
||||||
|
windowSamples.enqueue(sample);
|
||||||
|
if (windowSamples.size() > INTERVAL_LENGTH * WINDOW_INTERVALS) {
|
||||||
|
windowSamples.dequeue();
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.update(sample);
|
||||||
|
|
||||||
|
min = std::min(min, sample);
|
||||||
|
max = std::max(max, sample);
|
||||||
|
average = (average * totalSamples + sample) / (totalSamples + 1);
|
||||||
|
totalSamples++;
|
||||||
|
|
||||||
|
assert(stats.getMin() == min);
|
||||||
|
assert(stats.getMax() == max);
|
||||||
|
assert(abs(stats.getAverage() / average - 1.0) < 0.000001);
|
||||||
|
|
||||||
|
if ((i + 1) % INTERVAL_LENGTH == 0) {
|
||||||
|
|
||||||
|
assert(stats.getNewStatsAvailableFlag());
|
||||||
|
stats.clearNewStatsAvailableFlag();
|
||||||
|
|
||||||
|
windowMin = std::numeric_limits<int>::max();
|
||||||
|
windowMax = 0;
|
||||||
|
windowAverage = 0.0;
|
||||||
|
foreach(int s, windowSamples) {
|
||||||
|
windowMin = std::min(windowMin, s);
|
||||||
|
windowMax = std::max(windowMax, s);
|
||||||
|
windowAverage += (double)s;
|
||||||
|
}
|
||||||
|
windowAverage /= (double)windowSamples.size();
|
||||||
|
|
||||||
|
assert(stats.getWindowMin() == windowMin);
|
||||||
|
assert(stats.getWindowMax() == windowMax);
|
||||||
|
assert(abs(stats.getAverage() / average - 1.0) < 0.000001);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
assert(!stats.getNewStatsAvailableFlag());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// float test
|
||||||
|
|
||||||
|
const int INTERVAL_LENGTH = 57;
|
||||||
|
const int WINDOW_INTERVALS = 1;
|
||||||
|
|
||||||
|
MovingMinMaxAvg<float> stats(INTERVAL_LENGTH, WINDOW_INTERVALS);
|
||||||
|
|
||||||
|
float min = std::numeric_limits<float>::max();
|
||||||
|
float max = 0;
|
||||||
|
double average = 0.0;
|
||||||
|
int totalSamples = 0;
|
||||||
|
|
||||||
|
float windowMin;
|
||||||
|
float windowMax;
|
||||||
|
double windowAverage;
|
||||||
|
|
||||||
|
QQueue<float> windowSamples;
|
||||||
|
// fill window samples
|
||||||
|
for (int i = 0; i < 100000; i++) {
|
||||||
|
|
||||||
|
float sample = randFloat();
|
||||||
|
|
||||||
|
windowSamples.enqueue(sample);
|
||||||
|
if (windowSamples.size() > INTERVAL_LENGTH * WINDOW_INTERVALS) {
|
||||||
|
windowSamples.dequeue();
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.update(sample);
|
||||||
|
|
||||||
|
min = std::min(min, sample);
|
||||||
|
max = std::max(max, sample);
|
||||||
|
average = (average * totalSamples + sample) / (totalSamples + 1);
|
||||||
|
totalSamples++;
|
||||||
|
|
||||||
|
assert(stats.getMin() == min);
|
||||||
|
assert(stats.getMax() == max);
|
||||||
|
assert(abs(stats.getAverage() / average - 1.0) < 0.000001);
|
||||||
|
|
||||||
|
if ((i + 1) % INTERVAL_LENGTH == 0) {
|
||||||
|
|
||||||
|
assert(stats.getNewStatsAvailableFlag());
|
||||||
|
stats.clearNewStatsAvailableFlag();
|
||||||
|
|
||||||
|
windowMin = std::numeric_limits<float>::max();
|
||||||
|
windowMax = 0;
|
||||||
|
windowAverage = 0.0;
|
||||||
|
foreach(float s, windowSamples) {
|
||||||
|
windowMin = std::min(windowMin, s);
|
||||||
|
windowMax = std::max(windowMax, s);
|
||||||
|
windowAverage += (double)s;
|
||||||
|
}
|
||||||
|
windowAverage /= (double)windowSamples.size();
|
||||||
|
|
||||||
|
assert(stats.getWindowMin() == windowMin);
|
||||||
|
assert(stats.getWindowMax() == windowMax);
|
||||||
|
assert(abs(stats.getAverage() / average - 1.0) < 0.000001);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
assert(!stats.getNewStatsAvailableFlag());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
printf("moving min/max/avg test passed!\n");
|
||||||
|
}
|
||||||
|
|
25
tests/shared/src/MovingMinMaxAvgTests.h
Normal file
25
tests/shared/src/MovingMinMaxAvgTests.h
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
//
|
||||||
|
// MovingMinMaxAvgTests.h
|
||||||
|
// tests/shared/src
|
||||||
|
//
|
||||||
|
// Created by Yixin Wang on 7/8/2014
|
||||||
|
// Copyright 2014 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef hifi_MovingMinMaxAvgTests_h
|
||||||
|
#define hifi_MovingMinMaxAvgTests_h
|
||||||
|
|
||||||
|
#include "MovingMinMaxAvg.h"
|
||||||
|
#include "SharedUtil.h"
|
||||||
|
|
||||||
|
namespace MovingMinMaxAvgTests {
|
||||||
|
|
||||||
|
quint64 randQuint64();
|
||||||
|
|
||||||
|
void runAllTests();
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // hifi_MovingMinMaxAvgTests_h
|
|
@ -10,9 +10,12 @@
|
||||||
|
|
||||||
#include "AngularConstraintTests.h"
|
#include "AngularConstraintTests.h"
|
||||||
#include "MovingPercentileTests.h"
|
#include "MovingPercentileTests.h"
|
||||||
|
#include "MovingMinMaxAvgTests.h"
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) {
|
||||||
|
MovingMinMaxAvgTests::runAllTests();
|
||||||
MovingPercentileTests::runAllTests();
|
MovingPercentileTests::runAllTests();
|
||||||
AngularConstraintTests::runAllTests();
|
AngularConstraintTests::runAllTests();
|
||||||
|
getchar();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue