From e2f957d6dc834475fb5c3053a22caddd8adcee84 Mon Sep 17 00:00:00 2001 From: wangyix Date: Wed, 23 Jul 2014 16:46:23 -0700 Subject: [PATCH 01/62] added InboundAudioStream class --- interface/src/Audio.cpp | 3 - libraries/audio/src/AudioRingBuffer.h | 1 + libraries/audio/src/InboundAudioStream.cpp | 232 +++++++++++++++++++++ libraries/audio/src/InboundAudioStream.h | 124 +++++++++++ 4 files changed, 357 insertions(+), 3 deletions(-) create mode 100644 libraries/audio/src/InboundAudioStream.cpp create mode 100644 libraries/audio/src/InboundAudioStream.h diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 5054537287..4787c0951c 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -152,9 +152,6 @@ void Audio::init(QGLWidget *parent) { void Audio::reset() { _ringBuffer.reset(); - // we don't want to reset seq numbers when space-bar reset occurs. - //_outgoingAvatarAudioSequenceNumber = 0; - resetStats(); } diff --git a/libraries/audio/src/AudioRingBuffer.h b/libraries/audio/src/AudioRingBuffer.h index 9f049fc5e8..0ec9213db2 100644 --- a/libraries/audio/src/AudioRingBuffer.h +++ b/libraries/audio/src/AudioRingBuffer.h @@ -47,6 +47,7 @@ public: void resizeForFrameSize(int numFrameSamples); int getSampleCapacity() const { return _sampleCapacity; } + int getFrameCapacity() const { return _frameCapacity; } int parseData(const QByteArray& packet); diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp new file mode 100644 index 0000000000..ad7830655d --- /dev/null +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -0,0 +1,232 @@ +// +// InboundAudioStream.cpp +// libraries/audio/src +// +// Created by Yixin Wang on 7/17/2014 +// Copyright 2013 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#include "InboundAudioStream.h" +#include "PacketHeaders.h" + +InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers) : +_ringBuffer(numFrameSamples, false, numFramesCapacity), +_dynamicJitterBuffers(dynamicJitterBuffers), +_desiredJitterBufferFrames(1), +_isStarved(true), +_hasStarted(false), +_consecutiveNotMixedCount(0), +_starveCount(0), +_silentFramesDropped(0), +_incomingSequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS), +_lastFrameReceivedTime(0), +_interframeTimeGapStatsForJitterCalc(TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES, TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS), +_interframeTimeGapStatsForStatsPacket(TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES, TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS), +_framesAvailableStats(FRAMES_AVAILABLE_STATS_INTERVAL_SAMPLES, FRAMES_AVAILABLE_STATS_WINDOW_INTERVALS) +{ +} + +void InboundAudioStream::reset() { + _ringBuffer.reset(); + _desiredJitterBufferFrames = 1; + _isStarved = true; + _hasStarted = false; + _consecutiveNotMixedCount = 0; + _starveCount = 0; + _silentFramesDropped = 0; + _incomingSequenceNumberStats.reset(); + _lastFrameReceivedTime = 0; + _interframeTimeGapStatsForJitterCalc.reset(); + _interframeTimeGapStatsForStatsPacket.reset(); + _framesAvailableStats.reset(); +} + +int InboundAudioStream::parseData(const QByteArray& packet) { + frameReceivedUpdateTimingStats(); + + PacketType packetType = packetTypeForPacket(packet); + QUuid senderUUID = uuidFromPacketHeader(packet); + + // parse header + int numBytesHeader = numBytesForPacketHeader(packet); + const char* sequenceAt = packet.constData() + numBytesHeader; + int readBytes = numBytesHeader; + + // parse sequence number and track it + quint16 sequence = *(reinterpret_cast(sequenceAt)); + readBytes += sizeof(quint16); + SequenceNumberStats::ArrivalInfo arrivalInfo = _incomingSequenceNumberStats.sequenceNumberReceived(sequence, senderUUID); + + // TODO: handle generalized silent packet here????? + + + // parse the info after the seq number and before the audio data.(the stream properties) + int numAudioSamples; + readBytes += parseStreamProperties(packetType, packet.mid(readBytes), numAudioSamples); + + // handle this packet based on its arrival status. + // For now, late packets are ignored. It may be good in the future to insert the late audio frame + // into the ring buffer to fill in the missing frame if it hasn't been mixed yet. + switch (arrivalInfo._status) { + case SequenceNumberStats::Early: { + int packetsDropped = arrivalInfo._seqDiffFromExpected; + writeSamplesForDroppedPackets(packetsDropped * numAudioSamples); + // fall through to OnTime case + } + case SequenceNumberStats::OnTime: { + readBytes += parseAudioData(packetType, packet.mid(readBytes), numAudioSamples); + break; + } + default: { + break; + } + } + + if (_isStarved && _ringBuffer.samplesAvailable() >= _desiredJitterBufferFrames * _ringBuffer.getNumFrameSamples()) { + _isStarved = false; + } + + _framesAvailableStats.update(_ringBuffer.framesAvailable()); + + return readBytes; +} + +bool InboundAudioStream::popFrames(int16_t* dest, int numFrames, bool starveOnFail) { + if (_isStarved) { + _consecutiveNotMixedCount++; + return false; + } + + bool framesPopped = false; + + int numSamplesRequested = numFrames * _ringBuffer.getNumFrameSamples(); + if (_ringBuffer.samplesAvailable >= numSamplesRequested) { + _ringBuffer.readSamples(dest, numSamplesRequested); + _hasStarted = true; + framesPopped = true; + } else { + if (starveOnFail) { + setToStarved(); + _consecutiveNotMixedCount++; + } + } + + _framesAvailableStats.update(_ringBuffer.framesAvailable()); + + return framesPopped; +} + +void InboundAudioStream::setToStarved() { + _isStarved = true; + _consecutiveNotMixedCount = 0; + _starveCount++; +} + + +int InboundAudioStream::getCalculatedDesiredJitterBufferFrames() const { + const float USECS_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * USECS_PER_SECOND / (float)SAMPLE_RATE; + + int calculatedDesiredJitterBufferFrames = ceilf((float)_interframeTimeGapStatsForJitterCalc.getWindowMax() / USECS_PER_FRAME); + if (calculatedDesiredJitterBufferFrames < 1) { + calculatedDesiredJitterBufferFrames = 1; + } + return calculatedDesiredJitterBufferFrames; +} + + +void InboundAudioStream::frameReceivedUpdateTimingStats() { + // update the two time gap stats we're keeping + quint64 now = usecTimestampNow(); + if (_lastFrameReceivedTime != 0) { + quint64 gap = now - _lastFrameReceivedTime; + _interframeTimeGapStatsForJitterCalc.update(gap); + _interframeTimeGapStatsForStatsPacket.update(gap); + } + _lastFrameReceivedTime = now; + + // recalculate the _desiredJitterBufferFrames if _interframeTimeGapStatsForJitterCalc has updated stats for us + if (_interframeTimeGapStatsForJitterCalc.getNewStatsAvailableFlag()) { + if (!_dynamicJitterBuffers) { + _desiredJitterBufferFrames = 1; // HACK to see if this fixes the audio silence + } else { + _desiredJitterBufferFrames = getCalculatedDesiredJitterBufferFrames(); + + const int maxDesired = _ringBuffer.getFrameCapacity() - 1; + if (_desiredJitterBufferFrames > maxDesired) { + _desiredJitterBufferFrames = maxDesired; + } + } + _interframeTimeGapStatsForJitterCalc.clearNewStatsAvailableFlag(); + } +} + +int InboundAudioStream::writeDroppableSilentSamples(int numSilentSamples) { + + // This adds some number of frames to the desired jitter buffer frames target we use. + // The larger this value is, the less aggressive we are about reducing the jitter buffer length. + // Setting this to 0 will try to get the jitter buffer to be exactly _desiredJitterBufferFrames long, + // which could lead immediately to a starve. + const int DESIRED_JITTER_BUFFER_FRAMES_PADDING = 1; + + // calculate how many silent frames we should drop. We only drop silent frames if + // the running avg num frames available has stabilized and it's more than + // our desired number of frames by the margin defined above. + int samplesPerFrame = _ringBuffer.getNumFrameSamples(); + int numSilentFramesToDrop = 0; + if (_framesAvailableStats.getNewStatsAvailableFlag() && _framesAvailableStats.isWindowFilled() + && numSilentSamples >= samplesPerFrame) { + _framesAvailableStats.clearNewStatsAvailableFlag(); + int averageJitterBufferFrames = (int)getFramesAvailableAverage(); + int desiredJitterBufferFramesPlusPadding = _desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING; + + if (averageJitterBufferFrames > desiredJitterBufferFramesPlusPadding) { + // our avg jitter buffer size exceeds its desired value, so ignore some silent + // frames to get that size as close to desired as possible + int numSilentFramesToDropDesired = averageJitterBufferFrames - desiredJitterBufferFramesPlusPadding; + int numSilentFramesReceived = numSilentSamples / samplesPerFrame; + numSilentFramesToDrop = std::min(numSilentFramesToDropDesired, numSilentFramesReceived); + + // since we now have a new jitter buffer length, reset the frames available stats. + _framesAvailableStats.reset(); + + _silentFramesDropped += numSilentFramesToDrop; + } + } + return _ringBuffer.addSilentFrame(numSilentSamples - numSilentFramesToDrop * samplesPerFrame); +} + +int InboundAudioStream::writeSamplesForDroppedPackets(int numSamples) { + return writeDroppableSilentSamples(numSamples); +} + +AudioStreamStats InboundAudioStream::getAudioStreamStats() const { + AudioStreamStats streamStats; + + streamStats._timeGapMin = _interframeTimeGapStatsForStatsPacket.getMin(); + streamStats._timeGapMax = _interframeTimeGapStatsForStatsPacket.getMax(); + streamStats._timeGapAverage = _interframeTimeGapStatsForStatsPacket.getAverage(); + streamStats._timeGapWindowMin = _interframeTimeGapStatsForStatsPacket.getWindowMin(); + streamStats._timeGapWindowMax = _interframeTimeGapStatsForStatsPacket.getWindowMax(); + streamStats._timeGapWindowAverage = _interframeTimeGapStatsForStatsPacket.getWindowAverage(); + + streamStats._ringBufferFramesAvailable = _ringBuffer.framesAvailable(); + streamStats._ringBufferFramesAvailableAverage = _framesAvailableStats.getWindowAverage(); + streamStats._ringBufferDesiredJitterBufferFrames = _desiredJitterBufferFrames; + streamStats._ringBufferStarveCount = _starveCount; + streamStats._ringBufferConsecutiveNotMixedCount = _consecutiveNotMixedCount; + streamStats._ringBufferOverflowCount = _ringBuffer.getOverflowCount(); + streamStats._ringBufferSilentFramesDropped = _silentFramesDropped; + + streamStats._packetStreamStats = _incomingSequenceNumberStats.getStats(); + streamStats._packetStreamWindowStats = _incomingSequenceNumberStats.getStatsForHistoryWindow(); + + return streamStats; +} + +AudioStreamStats InboundAudioStream::updateSeqHistoryAndGetAudioStreamStats() { + _incomingSequenceNumberStats.pushStatsToHistory(); + return getAudioStreamStats(); +} diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h new file mode 100644 index 0000000000..7b24f01077 --- /dev/null +++ b/libraries/audio/src/InboundAudioStream.h @@ -0,0 +1,124 @@ +// +// InboundAudioStream.h +// libraries/audio/src +// +// Created by Yixin Wang on 7/17/2014. +// Copyright 2013 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#ifndef hifi_InboundAudioStream_h +#define hifi_InboundAudioStream_h + +#include "NodeData.h" +#include "AudioRingBuffer.h" +#include "MovingMinMaxAvg.h" +#include "SequenceNumberStats.h" +#include "AudioStreamStats.h" +#include "PacketHeaders.h" + +// the time gaps stats for _desiredJitterBufferFrames calculation +// will recalculate the max for the past 5000 samples every 500 samples +const int TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES = 500; +const int TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS = 10; + +// the time gap stats for constructing AudioStreamStats will +// recalculate min/max/avg every ~1 second for the past ~30 seconds of time gap data +const int TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS; +const int TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS = 30; + +// the stats for calculating the average frames available will recalculate every ~1 second +// and will include data for the past ~2 seconds +const int FRAMES_AVAILABLE_STATS_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS; +const int FRAMES_AVAILABLE_STATS_WINDOW_INTERVALS = 2; + +// the internal history buffer of the incoming seq stats will cover 30s to calculate +// packet loss % over last 30s +const int INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS = 30; + +const int INBOUND_RING_BUFFER_FRAME_CAPACITY = 100; + + +class InboundAudioStream : public NodeData { + Q_OBJECT +public: + InboundAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers); + + void reset(); + void flushBuffer() { _ringBuffer.reset(); } + void resetSequenceNumberStats() { _incomingSequenceNumberStats.reset(); } + + + int parseData(const QByteArray& packet); + + bool popFrames(int16_t* dest, int numFrames, bool starveOnFail = true); + + + void setToStarved(); + + + + AudioStreamStats updateSeqHistoryAndGetAudioStreamStats(); + virtual AudioStreamStats getAudioStreamStats() const; + + int getCalculatedDesiredJitterBufferFrames() const; + + int getDesiredJitterBufferFrames() const { return _desiredJitterBufferFrames; } + int getNumFrameSamples() const { return _ringBuffer.getNumFrameSamples(); } + int getFramesAvailable() const { return _ringBuffer.framesAvailable(); } + double getFramesAvailableAverage() const { return _framesAvailableStats.getWindowAverage(); } + + bool isStarved() const { return _isStarved; } + bool hasStarted() const { return _hasStarted; } + + int getConsecutiveNotMixedCount() const { return _consecutiveNotMixedCount; } + int getStarveCount() const { return _starveCount; } + int getSilentFramesDropped() const { return _silentFramesDropped; } + int getOverflowCount() const { return _ringBuffer.getOverflowCount(); } + +protected: + // disallow copying of InboundAudioStream objects + InboundAudioStream(const InboundAudioStream&); + InboundAudioStream& operator= (const InboundAudioStream&); + + /// parses the info between the seq num and the audio data in the network packet and calculates + /// how many audio samples this packet contains + virtual int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) = 0; + + /// parses the audio data in the network packet + virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) = 0; + + int writeDroppableSilentSamples(int numSilentSamples); + int writeSamplesForDroppedPackets(int numSamples); + void frameReceivedUpdateTimingStats(); + +protected: + + AudioRingBuffer _ringBuffer; + + bool _dynamicJitterBuffers; + int _desiredJitterBufferFrames; + + bool _isStarved; + bool _hasStarted; + + + // stats + + int _consecutiveNotMixedCount; + int _starveCount; + int _silentFramesDropped; + + SequenceNumberStats _incomingSequenceNumberStats; + + quint64 _lastFrameReceivedTime; + MovingMinMaxAvg _interframeTimeGapStatsForJitterCalc; + MovingMinMaxAvg _interframeTimeGapStatsForStatsPacket; + + // TODO: change this to time-weighted moving avg + MovingMinMaxAvg _framesAvailableStats; +}; + +#endif // hifi_InboundAudioStream_h From c9b6879ca82fef076fa0844bd6863b1ca8963f74 Mon Sep 17 00:00:00 2001 From: wangyix Date: Thu, 24 Jul 2014 10:48:27 -0700 Subject: [PATCH 02/62] audiomixer code complete; need to test --- assignment-client/src/audio/AudioMixer.cpp | 51 +++-- .../src/audio/AudioMixerClientData.cpp | 189 ++++++----------- .../src/audio/AudioMixerClientData.h | 20 +- .../src/audio/AvatarAudioRingBuffer.cpp | 71 +++---- .../src/audio/AvatarAudioRingBuffer.h | 6 +- libraries/audio/src/AudioRingBuffer.cpp | 15 ++ libraries/audio/src/AudioRingBuffer.h | 82 ++++++++ libraries/audio/src/InboundAudioStream.cpp | 89 ++++++-- libraries/audio/src/InboundAudioStream.h | 7 +- .../audio/src/InjectedAudioRingBuffer.cpp | 54 +++-- libraries/audio/src/InjectedAudioRingBuffer.h | 16 +- .../audio/src/PositionalAudioRingBuffer.cpp | 190 +++--------------- .../audio/src/PositionalAudioRingBuffer.h | 89 ++------ 13 files changed, 394 insertions(+), 485 deletions(-) diff --git a/assignment-client/src/audio/AudioMixer.cpp b/assignment-client/src/audio/AudioMixer.cpp index ad4787b407..1a436fc9bf 100644 --- a/assignment-client/src/audio/AudioMixer.cpp +++ b/assignment-client/src/audio/AudioMixer.cpp @@ -95,6 +95,19 @@ const float ATTENUATION_EPSILON_DISTANCE = 0.1f; void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd, AvatarAudioRingBuffer* listeningNodeBuffer) { + // if the frame to be mixed is silent, don't mix it + if (bufferToAdd->getNextOutputTrailingLoudness() == 0.0f) { + bufferToAdd->popFrames(1); + return; + } + + // get pointer to frame to be mixed. If the stream cannot provide a frame (is starved), bail + AudioRingBuffer::ConstIterator nextOutputStart; + if (!bufferToAdd->popFrames(&nextOutputStart, 1)) { + return; + } + + float bearingRelativeAngleToSource = 0.0f; float attenuationCoefficient = 1.0f; int numSamplesDelay = 0; @@ -203,7 +216,7 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf } } - const int16_t* nextOutputStart = bufferToAdd->getNextOutput(); + if (!bufferToAdd->isStereo() && shouldAttenuate) { // this is a mono buffer, which means it gets full attenuation and spatialization @@ -212,8 +225,8 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0; int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0; - const int16_t* bufferStart = bufferToAdd->getBuffer(); - int ringBufferSampleCapacity = bufferToAdd->getSampleCapacity(); + //const int16_t* bufferStart = bufferToAdd->getBuffer(); + //int ringBufferSampleCapacity = bufferToAdd->getSampleCapacity(); int16_t correctBufferSample[2], delayBufferSample[2]; int delayedChannelIndex = 0; @@ -241,14 +254,15 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf // if there was a sample delay for this buffer, we need to pull samples prior to the nextOutput // to stick at the beginning float attenuationAndWeakChannelRatio = attenuationCoefficient * weakChannelAmplitudeRatio; - const int16_t* delayNextOutputStart = nextOutputStart - numSamplesDelay; - if (delayNextOutputStart < bufferStart) { - delayNextOutputStart = bufferStart + ringBufferSampleCapacity - numSamplesDelay; - } + AudioRingBuffer::ConstIterator delayNextOutputStart = nextOutputStart - numSamplesDelay; + //if (delayNextOutputStart < bufferStart) { + //delayNextOutputStart = bufferStart + ringBufferSampleCapacity - numSamplesDelay; + //} for (int i = 0; i < numSamplesDelay; i++) { int parentIndex = i * 2; - _clientSamples[parentIndex + delayedChannelOffset] += delayNextOutputStart[i] * attenuationAndWeakChannelRatio; + _clientSamples[parentIndex + delayedChannelOffset] += *delayNextOutputStart * attenuationAndWeakChannelRatio; + ++delayNextOutputStart; } } } else { @@ -293,13 +307,13 @@ void AudioMixer::prepareMixForListeningNode(Node* node) { AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData(); // enumerate the ARBs attached to the otherNode and add all that should be added to mix - for (int i = 0; i < otherNodeClientData->getRingBuffers().size(); i++) { - PositionalAudioRingBuffer* otherNodeBuffer = otherNodeClientData->getRingBuffers()[i]; + + const QHash& otherNodeRingBuffers = otherNodeClientData->getRingBuffers(); + QHash::ConstIterator i, end = otherNodeRingBuffers.constEnd(); + for (i = otherNodeRingBuffers.begin(); i != end; i++) { + PositionalAudioRingBuffer* otherNodeBuffer = i.value(); - if ((*otherNode != *node - || otherNodeBuffer->shouldLoopbackForNode()) - && otherNodeBuffer->willBeAddedToMix() - && otherNodeBuffer->getNextOutputTrailingLoudness() > 0.0f) { + if (*otherNode != *node || otherNodeBuffer->shouldLoopbackForNode()) { addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer); } } @@ -307,7 +321,6 @@ void AudioMixer::prepareMixForListeningNode(Node* node) { } } - void AudioMixer::readPendingDatagrams() { QByteArray receivedPacket; HifiSockAddr senderSockAddr; @@ -500,12 +513,12 @@ void AudioMixer::run() { while (!_isFinished) { - foreach (const SharedNodePointer& node, nodeList->getNodeHash()) { + /*foreach (const SharedNodePointer& node, nodeList->getNodeHash()) { if (node->getLinkedData()) { ((AudioMixerClientData*) node->getLinkedData())->checkBuffersBeforeFrameSend(_sourceUnattenuatedZone, _listenerUnattenuatedZone); } - } + }*/ const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f; const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f; @@ -599,13 +612,13 @@ void AudioMixer::run() { ++_sumListeners; } } - + /* // push forward the next output pointers for any audio buffers we used foreach (const SharedNodePointer& node, nodeList->getNodeHash()) { if (node->getLinkedData()) { ((AudioMixerClientData*) node->getLinkedData())->pushBuffersAfterFrameSend(); } - } + }*/ ++_numStatFrames; diff --git a/assignment-client/src/audio/AudioMixerClientData.cpp b/assignment-client/src/audio/AudioMixerClientData.cpp index a4983e6a95..31d0612a98 100644 --- a/assignment-client/src/audio/AudioMixerClientData.cpp +++ b/assignment-client/src/audio/AudioMixerClientData.cpp @@ -18,34 +18,28 @@ #include "AudioMixer.h" #include "AudioMixerClientData.h" -#include "MovingMinMaxAvg.h" -const int INCOMING_SEQ_STATS_HISTORY_LENGTH = INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS / - (TOO_LONG_SINCE_LAST_SEND_AUDIO_STREAM_STATS / USECS_PER_SECOND); AudioMixerClientData::AudioMixerClientData() : _ringBuffers(), - _outgoingMixedAudioSequenceNumber(0), - _incomingAvatarAudioSequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH) + _outgoingMixedAudioSequenceNumber(0) { - } AudioMixerClientData::~AudioMixerClientData() { - for (int i = 0; i < _ringBuffers.size(); i++) { - // delete this attached PositionalAudioRingBuffer - delete _ringBuffers[i]; + QHash::ConstIterator i, end = _ringBuffers.constEnd(); + for (i = _ringBuffers.begin(); i != end; i++) { + // delete this attached InboundAudioStream + delete i.value(); } } AvatarAudioRingBuffer* AudioMixerClientData::getAvatarAudioRingBuffer() const { - for (int i = 0; i < _ringBuffers.size(); i++) { - if (_ringBuffers[i]->getType() == PositionalAudioRingBuffer::Microphone) { - return (AvatarAudioRingBuffer*) _ringBuffers[i]; - } + if (_ringBuffers.contains(QUuid())) { + return (AvatarAudioRingBuffer*)_ringBuffers.value(QUuid()); } - // no AvatarAudioRingBuffer found - return NULL + // no mic stream found - return NULL return NULL; } @@ -57,96 +51,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { quint16 sequence = *(reinterpret_cast(sequenceAt)); PacketType packetType = packetTypeForPacket(packet); - if (packetType == PacketTypeMicrophoneAudioWithEcho - || packetType == PacketTypeMicrophoneAudioNoEcho - || packetType == PacketTypeSilentAudioFrame) { - - SequenceNumberStats::ArrivalInfo packetArrivalInfo = _incomingAvatarAudioSequenceNumberStats.sequenceNumberReceived(sequence); - - // grab the AvatarAudioRingBuffer from the vector (or create it if it doesn't exist) - AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer(); - - // read the first byte after the header to see if this is a stereo or mono buffer - quint8 channelFlag = packet.at(numBytesForPacketHeader(packet) + sizeof(quint16)); - bool isStereo = channelFlag == 1; - - if (avatarRingBuffer && avatarRingBuffer->isStereo() != isStereo) { - // there's a mismatch in the buffer channels for the incoming and current buffer - // so delete our current buffer and create a new one - _ringBuffers.removeOne(avatarRingBuffer); - avatarRingBuffer->deleteLater(); - avatarRingBuffer = NULL; - } - - if (!avatarRingBuffer) { - // we don't have an AvatarAudioRingBuffer yet, so add it - avatarRingBuffer = new AvatarAudioRingBuffer(isStereo, AudioMixer::getUseDynamicJitterBuffers()); - _ringBuffers.push_back(avatarRingBuffer); - } - - - // for now, late packets are simply discarded. In the future, it may be good to insert them into their correct place - // in the ring buffer (if that frame hasn't been mixed yet) - switch (packetArrivalInfo._status) { - case SequenceNumberStats::Early: { - int packetsLost = packetArrivalInfo._seqDiffFromExpected; - avatarRingBuffer->parseDataAndHandleDroppedPackets(packet, packetsLost); - break; - } - case SequenceNumberStats::OnTime: { - // ask the AvatarAudioRingBuffer instance to parse the data - avatarRingBuffer->parseDataAndHandleDroppedPackets(packet, 0); - break; - } - default: { - break; - } - } - } else if (packetType == PacketTypeInjectAudio) { - // this is injected audio - - // grab the stream identifier for this injected audio - QUuid streamIdentifier = QUuid::fromRfc4122(packet.mid(numBytesForPacketHeader(packet) + sizeof(quint16), NUM_BYTES_RFC4122_UUID)); - - if (!_incomingInjectedAudioSequenceNumberStatsMap.contains(streamIdentifier)) { - _incomingInjectedAudioSequenceNumberStatsMap.insert(streamIdentifier, SequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH)); - } - SequenceNumberStats::ArrivalInfo packetArrivalInfo = - _incomingInjectedAudioSequenceNumberStatsMap[streamIdentifier].sequenceNumberReceived(sequence); - - InjectedAudioRingBuffer* matchingInjectedRingBuffer = NULL; - - for (int i = 0; i < _ringBuffers.size(); i++) { - if (_ringBuffers[i]->getType() == PositionalAudioRingBuffer::Injector - && ((InjectedAudioRingBuffer*) _ringBuffers[i])->getStreamIdentifier() == streamIdentifier) { - matchingInjectedRingBuffer = (InjectedAudioRingBuffer*) _ringBuffers[i]; - } - } - - if (!matchingInjectedRingBuffer) { - // we don't have a matching injected audio ring buffer, so add it - matchingInjectedRingBuffer = new InjectedAudioRingBuffer(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers()); - _ringBuffers.push_back(matchingInjectedRingBuffer); - } - - // for now, late packets are simply discarded. In the future, it may be good to insert them into their correct place - // in the ring buffer (if that frame hasn't been mixed yet) - switch (packetArrivalInfo._status) { - case SequenceNumberStats::Early: { - int packetsLost = packetArrivalInfo._seqDiffFromExpected; - matchingInjectedRingBuffer->parseDataAndHandleDroppedPackets(packet, packetsLost); - break; - } - case SequenceNumberStats::OnTime: { - // ask the AvatarAudioRingBuffer instance to parse the data - matchingInjectedRingBuffer->parseDataAndHandleDroppedPackets(packet, 0); - break; - } - default: { - break; - } - } - } else if (packetType == PacketTypeAudioStreamStats) { + if (packetType == PacketTypeAudioStreamStats) { const char* dataAt = packet.data(); @@ -155,12 +60,52 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { // read the downstream audio stream stats memcpy(&_downstreamAudioStreamStats, dataAt, sizeof(AudioStreamStats)); - } + dataAt += sizeof(AudioStreamStats); + return dataAt - packet.data(); + + } else { + PositionalAudioRingBuffer* matchingStream = NULL; + + if (packetType == PacketTypeMicrophoneAudioWithEcho + || packetType == PacketTypeMicrophoneAudioNoEcho + || packetType == PacketTypeSilentAudioFrame) { + + QUuid nullUUID = QUuid(); + if (!_ringBuffers.contains(nullUUID)) { + // we don't have a mic stream yet, so add it + + // read the channel flag to see if our stream is stereo or not + const char* channelFlagAt = packet.constData() + numBytesForPacketHeader(packet) + sizeof(quint16); + quint8 channelFlag = *(reinterpret_cast(channelFlagAt)); + bool isStereo = channelFlag == 1; + + _ringBuffers.insert(nullUUID, + matchingStream = new AvatarAudioRingBuffer(isStereo, AudioMixer::getUseDynamicJitterBuffers())); + } else { + matchingStream = _ringBuffers.value(nullUUID); + } + } else if (packetType == PacketTypeInjectAudio) { + // this is injected audio + + // grab the stream identifier for this injected audio + int bytesBeforeStreamIdentifier = numBytesForPacketHeader(packet) + sizeof(quint16); + QUuid streamIdentifier = QUuid::fromRfc4122(packet.mid(bytesBeforeStreamIdentifier, NUM_BYTES_RFC4122_UUID)); + + if (!_ringBuffers.contains(streamIdentifier)) { + _ringBuffers.insert(streamIdentifier, + matchingStream = new InjectedAudioRingBuffer(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers())); + } else { + matchingStream = _ringBuffers.value(streamIdentifier); + } + } + + return matchingStream->parseData(packet); + } return 0; } -void AudioMixerClientData::checkBuffersBeforeFrameSend(AABox* checkSourceZone, AABox* listenerZone) { +/*void AudioMixerClientData::checkBuffersBeforeFrameSend(AABox* checkSourceZone, AABox* listenerZone) { for (int i = 0; i < _ringBuffers.size(); i++) { if (_ringBuffers[i]->shouldBeAddedToMix()) { // this is a ring buffer that is ready to go @@ -205,9 +150,9 @@ void AudioMixerClientData::pushBuffersAfterFrameSend() { } i++; } -} +}*/ -AudioStreamStats AudioMixerClientData::getAudioStreamStatsOfStream(const PositionalAudioRingBuffer* ringBuffer) const { +/*AudioStreamStats AudioMixerClientData::getAudioStreamStatsOfStream(const PositionalAudioRingBuffer* ringBuffer) const { AudioStreamStats streamStats; @@ -239,20 +184,9 @@ AudioStreamStats AudioMixerClientData::getAudioStreamStatsOfStream(const Positio streamStats._ringBufferSilentFramesDropped = ringBuffer->getSilentFramesDropped(); return streamStats; -} +}*/ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode) { - - // have all the seq number stats of each audio stream push their current stats into their history, - // which moves that history window 1 second forward (since that's how long since the last stats were pushed into history) - _incomingAvatarAudioSequenceNumberStats.pushStatsToHistory(); - QHash::Iterator i = _incomingInjectedAudioSequenceNumberStatsMap.begin(); - QHash::Iterator end = _incomingInjectedAudioSequenceNumberStatsMap.end(); - while (i != end) { - i.value().pushStatsToHistory(); - i++; - } - char packet[MAX_PACKET_SIZE]; NodeList* nodeList = NodeList::getInstance(); @@ -271,7 +205,7 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& // pack and send stream stats packets until all ring buffers' stats are sent int numStreamStatsRemaining = _ringBuffers.size(); - QList::ConstIterator ringBuffersIterator = _ringBuffers.constBegin(); + QHash::ConstIterator ringBuffersIterator = _ringBuffers.constBegin(); while (numStreamStatsRemaining > 0) { char* dataAt = headerEndAt; @@ -288,7 +222,7 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& // pack the calculated number of stream stats for (int i = 0; i < numStreamStatsToPack; i++) { - AudioStreamStats streamStats = getAudioStreamStatsOfStream(*ringBuffersIterator); + AudioStreamStats streamStats = ringBuffersIterator.value()->getAudioStreamStats(); memcpy(dataAt, &streamStats, sizeof(AudioStreamStats)); dataAt += sizeof(AudioStreamStats); @@ -322,7 +256,7 @@ QString AudioMixerClientData::getAudioStreamStatsString() const { AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer(); if (avatarRingBuffer) { - AudioStreamStats streamStats = getAudioStreamStatsOfStream(avatarRingBuffer); + AudioStreamStats streamStats = avatarRingBuffer->getAudioStreamStats(); result += " UPSTREAM.mic.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames) + " desired_calc:" + QString::number(avatarRingBuffer->getCalculatedDesiredJitterBufferFrames()) + " available_avg_10s:" + QString::number(streamStats._ringBufferFramesAvailableAverage) @@ -343,11 +277,12 @@ QString AudioMixerClientData::getAudioStreamStatsString() const { result = "mic unknown"; } - for (int i = 0; i < _ringBuffers.size(); i++) { - if (_ringBuffers[i]->getType() == PositionalAudioRingBuffer::Injector) { - AudioStreamStats streamStats = getAudioStreamStatsOfStream(_ringBuffers[i]); + QHash::ConstIterator i, end = _ringBuffers.end(); + for (i = _ringBuffers.begin(); i != end; i++) { + if (i.value()->getType() == PositionalAudioRingBuffer::Injector) { + AudioStreamStats streamStats = i.value()->getAudioStreamStats(); result += " UPSTREAM.inj.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames) - + " desired_calc:" + QString::number(_ringBuffers[i]->getCalculatedDesiredJitterBufferFrames()) + + " desired_calc:" + QString::number(i.value()->getCalculatedDesiredJitterBufferFrames()) + " available_avg_10s:" + QString::number(streamStats._ringBufferFramesAvailableAverage) + " available:" + QString::number(streamStats._ringBufferFramesAvailable) + " starves:" + QString::number(streamStats._ringBufferStarveCount) diff --git a/assignment-client/src/audio/AudioMixerClientData.h b/assignment-client/src/audio/AudioMixerClientData.h index 7475c0a60e..4baa7c2f3b 100644 --- a/assignment-client/src/audio/AudioMixerClientData.h +++ b/assignment-client/src/audio/AudioMixerClientData.h @@ -13,29 +13,23 @@ #define hifi_AudioMixerClientData_h #include -#include -#include +#include "PositionalAudioRingBuffer.h" #include "AvatarAudioRingBuffer.h" -#include "AudioStreamStats.h" -#include "SequenceNumberStats.h" - - -const int INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS = 30; class AudioMixerClientData : public NodeData { public: AudioMixerClientData(); ~AudioMixerClientData(); - const QList getRingBuffers() const { return _ringBuffers; } + const QHash& getRingBuffers() const { return _ringBuffers; } AvatarAudioRingBuffer* getAvatarAudioRingBuffer() const; int parseData(const QByteArray& packet); - void checkBuffersBeforeFrameSend(AABox* checkSourceZone = NULL, AABox* listenerZone = NULL); - void pushBuffersAfterFrameSend(); + //void checkBuffersBeforeFrameSend(AABox* checkSourceZone = NULL, AABox* listenerZone = NULL); + //void pushBuffersAfterFrameSend(); - AudioStreamStats getAudioStreamStatsOfStream(const PositionalAudioRingBuffer* ringBuffer) const; + //AudioStreamStats getAudioStreamStatsOfStream(const PositionalAudioRingBuffer* ringBuffer) const; QString getAudioStreamStatsString() const; void sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode); @@ -44,11 +38,9 @@ public: quint16 getOutgoingSequenceNumber() const { return _outgoingMixedAudioSequenceNumber; } private: - QList _ringBuffers; + QHash _ringBuffers; // mic stream stored under key of null UUID quint16 _outgoingMixedAudioSequenceNumber; - SequenceNumberStats _incomingAvatarAudioSequenceNumberStats; - QHash _incomingInjectedAudioSequenceNumberStatsMap; AudioStreamStats _downstreamAudioStreamStats; }; diff --git a/assignment-client/src/audio/AvatarAudioRingBuffer.cpp b/assignment-client/src/audio/AvatarAudioRingBuffer.cpp index 382e8de68b..94a95ef177 100644 --- a/assignment-client/src/audio/AvatarAudioRingBuffer.cpp +++ b/assignment-client/src/audio/AvatarAudioRingBuffer.cpp @@ -14,57 +14,50 @@ #include "AvatarAudioRingBuffer.h" AvatarAudioRingBuffer::AvatarAudioRingBuffer(bool isStereo, bool dynamicJitterBuffer) : - PositionalAudioRingBuffer(PositionalAudioRingBuffer::Microphone, isStereo, dynamicJitterBuffer) { - +PositionalAudioRingBuffer(PositionalAudioRingBuffer::Microphone, isStereo, dynamicJitterBuffer) { + } -int AvatarAudioRingBuffer::parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped) { - frameReceivedUpdateTimingStats(); +int AvatarAudioRingBuffer::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) { - _shouldLoopbackForNode = (packetTypeForPacket(packet) == PacketTypeMicrophoneAudioWithEcho); + _shouldLoopbackForNode = (type == PacketTypeMicrophoneAudioWithEcho); - // skip the packet header (includes the source UUID) - int readBytes = numBytesForPacketHeader(packet); + int readBytes = 0; - // skip the sequence number - readBytes += sizeof(quint16); - - // hop over the channel flag that has already been read in AudioMixerClientData + // read the channel flag + quint8 channelFlag = packetAfterSeqNum.at(readBytes); + bool isStereo = channelFlag == 1; readBytes += sizeof(quint8); + + // if isStereo value has changed, restart the ring buffer with new frame size + if (isStereo != _isStereo) { + _ringBuffer.resizeForFrameSize(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL); + _isStereo = isStereo; + } + // read the positional data - readBytes += parsePositionalData(packet.mid(readBytes)); + readBytes += parsePositionalData(packetAfterSeqNum.mid(readBytes)); - if (packetTypeForPacket(packet) == PacketTypeSilentAudioFrame) { - // this source had no audio to send us, but this counts as a packet - // write silence equivalent to the number of silent samples they just sent us + if (type == PacketTypeSilentAudioFrame) { int16_t numSilentSamples; - - memcpy(&numSilentSamples, packet.data() + readBytes, sizeof(int16_t)); + memcpy(&numSilentSamples, packetAfterSeqNum.data() + readBytes, sizeof(int16_t)); readBytes += sizeof(int16_t); - // add silent samples for the dropped packets as well. - // ASSUME that each dropped packet had same number of silent samples as this one - numSilentSamples *= (packetsSkipped + 1); - - // NOTE: fixes a bug in old clients that would send garbage for their number of silentSamples - // CAN'T DO THIS because ScriptEngine.cpp sends frames of different size due to having a different sending interval - // (every 16.667ms) than Audio.cpp (every 10.667ms) - //numSilentSamples = getSamplesPerFrame(); - - addDroppableSilentSamples(numSilentSamples); - + numAudioSamples = numSilentSamples; } else { - int numAudioBytes = packet.size() - readBytes; - int numAudioSamples = numAudioBytes / sizeof(int16_t); - - // add silent samples for the dropped packets. - // ASSUME that each dropped packet had same number of samples as this one - if (packetsSkipped > 0) { - addDroppableSilentSamples(packetsSkipped * numAudioSamples); - } - - // there is audio data to read - readBytes += writeData(packet.data() + readBytes, numAudioBytes); + int numAudioBytes = packetAfterSeqNum.size() - readBytes; + numAudioSamples = numAudioBytes / sizeof(int16_t); + } + return readBytes; +} + +int AvatarAudioRingBuffer::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) { + int readBytes = 0; + if (type == PacketTypeSilentAudioFrame) { + writeDroppableSilentSamples(numAudioSamples); + } else { + // there is audio data to read + readBytes += _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t)); } return readBytes; } diff --git a/assignment-client/src/audio/AvatarAudioRingBuffer.h b/assignment-client/src/audio/AvatarAudioRingBuffer.h index 96233220cd..d846748aff 100644 --- a/assignment-client/src/audio/AvatarAudioRingBuffer.h +++ b/assignment-client/src/audio/AvatarAudioRingBuffer.h @@ -19,12 +19,14 @@ class AvatarAudioRingBuffer : public PositionalAudioRingBuffer { public: AvatarAudioRingBuffer(bool isStereo = false, bool dynamicJitterBuffer = false); - - int parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped); + private: // disallow copying of AvatarAudioRingBuffer objects AvatarAudioRingBuffer(const AvatarAudioRingBuffer&); AvatarAudioRingBuffer& operator= (const AvatarAudioRingBuffer&); + + int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples); + int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples); }; #endif // hifi_AvatarAudioRingBuffer_h diff --git a/libraries/audio/src/AudioRingBuffer.cpp b/libraries/audio/src/AudioRingBuffer.cpp index 0defa2ea33..8b289d7c52 100644 --- a/libraries/audio/src/AudioRingBuffer.cpp +++ b/libraries/audio/src/AudioRingBuffer.cpp @@ -231,3 +231,18 @@ int16_t* AudioRingBuffer::shiftedPositionAccomodatingWrap(int16_t* position, int return position + numSamplesShift; } } + +float AudioRingBuffer::getNextOutputFrameLoudness() const { + float loudness = 0.0f; + int16_t* sampleAt = _nextOutput; + int16_t* _bufferLastAt = _buffer + _sampleCapacity - 1; + if (samplesAvailable() >= _numFrameSamples) { + for (int i = 0; i < _numFrameSamples; ++i) { + loudness += fabsf(*sampleAt); + sampleAt = sampleAt == _bufferLastAt ? _buffer : sampleAt + 1; + } + loudness /= _numFrameSamples; + loudness /= MAX_SAMPLE_VALUE; + } + return loudness; +} diff --git a/libraries/audio/src/AudioRingBuffer.h b/libraries/audio/src/AudioRingBuffer.h index 0ec9213db2..97ffa7e6c8 100644 --- a/libraries/audio/src/AudioRingBuffer.h +++ b/libraries/audio/src/AudioRingBuffer.h @@ -65,6 +65,8 @@ public: const int16_t& operator[] (const int index) const; void shiftReadPosition(unsigned int numSamples); + + float getNextOutputFrameLoudness() const; int samplesAvailable() const; int framesAvailable() const { return samplesAvailable() / _numFrameSamples; } @@ -99,6 +101,86 @@ protected: bool _isStarved; bool _hasStarted; bool _randomAccessMode; /// will this ringbuffer be used for random access? if so, do some special processing + +public: + class ConstIterator { //public std::iterator < std::forward_iterator_tag, int16_t > { + public: + ConstIterator() + : _capacity(0), + _bufferFirst(NULL), + _bufferLast(NULL), + _at(NULL) {} + + ConstIterator(int16_t* bufferFirst, int capacity, int16_t* at) + : _capacity(capacity), + _bufferFirst(bufferFirst), + _bufferLast(bufferFirst + capacity - 1), + _at(at) {} + + bool operator==(const ConstIterator& rhs) { return _at == rhs._at; } + bool operator!=(const ConstIterator& rhs) { return _at != rhs._at; } + int16_t operator*() { return *_at; } + + ConstIterator& operator=(const ConstIterator& rhs) { + _capacity = rhs._capacity; + _bufferFirst = rhs._bufferFirst; + _bufferLast = rhs._bufferLast; + _at = rhs._at; + return *this; + } + + ConstIterator& operator++() { + _at = (_at == _bufferLast) ? _bufferFirst : _at + 1; + return *this; + } + + ConstIterator operator++(int) { + ConstIterator tmp(*this); + ++(*this); + return tmp; + } + + ConstIterator& operator--() { + _at = (_at == _bufferFirst) ? _bufferLast : _at - 1; + return *this; + } + + ConstIterator operator--(int) { + ConstIterator tmp(*this); + --(*this); + return tmp; + } + + int16_t operator[] (int i) { + return *atShiftedBy(i); + } + + ConstIterator operator+(int i) { + return ConstIterator(_bufferFirst, _capacity, atShiftedBy(i)); + } + + ConstIterator operator-(int i) { + return ConstIterator(_bufferFirst, _capacity, atShiftedBy(-i)); + } + + private: + int16_t* atShiftedBy(int i) { + i = (_at - _bufferFirst + i) % _capacity; + if (i < 0) { + i += _capacity; + } + return _bufferFirst + i; + } + + private: + int _capacity; + int16_t* _bufferFirst; + int16_t* _bufferLast; + int16_t* _at; + }; + + + ConstIterator nextOutput() const { return ConstIterator(_buffer, _sampleCapacity, _nextOutput); } }; #endif // hifi_AudioRingBuffer_h diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index ad7830655d..727cb5c554 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -13,19 +13,19 @@ #include "PacketHeaders.h" InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers) : -_ringBuffer(numFrameSamples, false, numFramesCapacity), -_dynamicJitterBuffers(dynamicJitterBuffers), -_desiredJitterBufferFrames(1), -_isStarved(true), -_hasStarted(false), -_consecutiveNotMixedCount(0), -_starveCount(0), -_silentFramesDropped(0), -_incomingSequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS), -_lastFrameReceivedTime(0), -_interframeTimeGapStatsForJitterCalc(TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES, TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS), -_interframeTimeGapStatsForStatsPacket(TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES, TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS), -_framesAvailableStats(FRAMES_AVAILABLE_STATS_INTERVAL_SAMPLES, FRAMES_AVAILABLE_STATS_WINDOW_INTERVALS) + _ringBuffer(numFrameSamples, false, numFramesCapacity), + _dynamicJitterBuffers(dynamicJitterBuffers), + _desiredJitterBufferFrames(1), + _isStarved(true), + _hasStarted(false), + _consecutiveNotMixedCount(0), + _starveCount(0), + _silentFramesDropped(0), + _incomingSequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS), + _lastFrameReceivedTime(0), + _interframeTimeGapStatsForJitterCalc(TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES, TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS), + _interframeTimeGapStatsForStatsPacket(TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES, TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS), + _framesAvailableStats(FRAMES_AVAILABLE_STATS_INTERVAL_SAMPLES, FRAMES_AVAILABLE_STATS_WINDOW_INTERVALS) { } @@ -94,19 +94,19 @@ int InboundAudioStream::parseData(const QByteArray& packet) { return readBytes; } -bool InboundAudioStream::popFrames(int16_t* dest, int numFrames, bool starveOnFail) { +bool InboundAudioStream::popFrames(int numFrames, bool starveOnFail) { if (_isStarved) { _consecutiveNotMixedCount++; return false; } - bool framesPopped = false; + bool popped = false; int numSamplesRequested = numFrames * _ringBuffer.getNumFrameSamples(); - if (_ringBuffer.samplesAvailable >= numSamplesRequested) { - _ringBuffer.readSamples(dest, numSamplesRequested); + if (_ringBuffer.samplesAvailable() >= numSamplesRequested) { + _ringBuffer.shiftReadPosition(numSamplesRequested); _hasStarted = true; - framesPopped = true; + popped = true; } else { if (starveOnFail) { setToStarved(); @@ -116,7 +116,58 @@ bool InboundAudioStream::popFrames(int16_t* dest, int numFrames, bool starveOnFa _framesAvailableStats.update(_ringBuffer.framesAvailable()); - return framesPopped; + return popped; +} + +bool InboundAudioStream::popFrames(int16_t* dest, int numFrames, bool starveOnFail) { + if (_isStarved) { + _consecutiveNotMixedCount++; + return false; + } + + bool popped = false; + + int numSamplesRequested = numFrames * _ringBuffer.getNumFrameSamples(); + if (_ringBuffer.samplesAvailable() >= numSamplesRequested) { + _ringBuffer.readSamples(dest, numSamplesRequested); + _hasStarted = true; + popped = true; + } else { + if (starveOnFail) { + setToStarved(); + _consecutiveNotMixedCount++; + } + } + + _framesAvailableStats.update(_ringBuffer.framesAvailable()); + + return popped; +} + +bool InboundAudioStream::popFrames(AudioRingBuffer::ConstIterator* nextOutput, int numFrames, bool starveOnFail) { + if (_isStarved) { + _consecutiveNotMixedCount++; + return false; + } + + bool popped = false; + + int numSamplesRequested = numFrames * _ringBuffer.getNumFrameSamples(); + if (_ringBuffer.samplesAvailable() >= numSamplesRequested) { + *nextOutput = _ringBuffer.nextOutput(); + _ringBuffer.shiftReadPosition(numSamplesRequested); + _hasStarted = true; + popped = true; + } else { + if (starveOnFail) { + setToStarved(); + _consecutiveNotMixedCount++; + } + } + + _framesAvailableStats.update(_ringBuffer.framesAvailable()); + + return popped; } void InboundAudioStream::setToStarved() { diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index 7b24f01077..65a4f07918 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -52,14 +52,15 @@ public: int parseData(const QByteArray& packet); - - bool popFrames(int16_t* dest, int numFrames, bool starveOnFail = true); + bool popFrames(int numFrames, bool starveOnFail = true); + bool popFrames(int16_t* dest, int numFrames, bool starveOnFail = true); + bool popFrames(AudioRingBuffer::ConstIterator* nextOutput, int numFrames, bool starveOnFail = true); void setToStarved(); - + /// this function should be called once per second to ensure the seq num stats history spans ~30 seconds AudioStreamStats updateSeqHistoryAndGetAudioStreamStats(); virtual AudioStreamStats getAudioStreamStats() const; diff --git a/libraries/audio/src/InjectedAudioRingBuffer.cpp b/libraries/audio/src/InjectedAudioRingBuffer.cpp index 4723bca906..e074d51bd9 100644 --- a/libraries/audio/src/InjectedAudioRingBuffer.cpp +++ b/libraries/audio/src/InjectedAudioRingBuffer.cpp @@ -20,52 +20,50 @@ #include "InjectedAudioRingBuffer.h" InjectedAudioRingBuffer::InjectedAudioRingBuffer(const QUuid& streamIdentifier, bool dynamicJitterBuffer) : - PositionalAudioRingBuffer(PositionalAudioRingBuffer::Injector, false, dynamicJitterBuffer), - _streamIdentifier(streamIdentifier), - _radius(0.0f), - _attenuationRatio(0) +PositionalAudioRingBuffer(PositionalAudioRingBuffer::Injector, false, dynamicJitterBuffer), +_streamIdentifier(streamIdentifier), +_radius(0.0f), +_attenuationRatio(0) { - + } const uchar MAX_INJECTOR_VOLUME = 255; -int InjectedAudioRingBuffer::parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped) { - frameReceivedUpdateTimingStats(); - +int InjectedAudioRingBuffer::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) { // setup a data stream to read from this packet - QDataStream packetStream(packet); - packetStream.skipRawData(numBytesForPacketHeader(packet)); - - // push past the sequence number - packetStream.skipRawData(sizeof(quint16)); + QDataStream packetStream(packetAfterSeqNum); - // push past the stream identifier + // skip the stream identifier packetStream.skipRawData(NUM_BYTES_RFC4122_UUID); - + // pull the loopback flag and set our boolean uchar shouldLoopback; packetStream >> shouldLoopback; _shouldLoopbackForNode = (shouldLoopback == 1); - + // use parsePositionalData in parent PostionalAudioRingBuffer class to pull common positional data - packetStream.skipRawData(parsePositionalData(packet.mid(packetStream.device()->pos()))); - + packetStream.skipRawData(parsePositionalData(packetAfterSeqNum.mid(packetStream.device()->pos()))); + // pull out the radius for this injected source - if it's zero this is a point source packetStream >> _radius; - + quint8 attenuationByte = 0; packetStream >> attenuationByte; - _attenuationRatio = attenuationByte / (float) MAX_INJECTOR_VOLUME; - - int numAudioBytes = packet.size() - packetStream.device()->pos(); - int numAudioSamples = numAudioBytes / sizeof(int16_t); + _attenuationRatio = attenuationByte / (float)MAX_INJECTOR_VOLUME; - // add silent samples for the dropped packets. - // ASSUME that each dropped packet had same number of samples as this one - addDroppableSilentSamples(numAudioSamples * packetsSkipped); + int numAudioBytes = packetAfterSeqNum.size() - packetStream.device()->pos(); + numAudioSamples = numAudioBytes / sizeof(int16_t); - packetStream.skipRawData(writeData(packet.data() + packetStream.device()->pos(), numAudioBytes)); - return packetStream.device()->pos(); } + +int InjectedAudioRingBuffer::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) { + return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t)); +} + +AudioStreamStats InjectedAudioRingBuffer::getAudioStreamStats() const { + AudioStreamStats streamStats = PositionalAudioRingBuffer::getAudioStreamStats(); + streamStats._streamIdentifier = _streamIdentifier; + return streamStats; +} diff --git a/libraries/audio/src/InjectedAudioRingBuffer.h b/libraries/audio/src/InjectedAudioRingBuffer.h index 4a1f8b5292..0f7c621baa 100644 --- a/libraries/audio/src/InjectedAudioRingBuffer.h +++ b/libraries/audio/src/InjectedAudioRingBuffer.h @@ -19,18 +19,22 @@ class InjectedAudioRingBuffer : public PositionalAudioRingBuffer { public: InjectedAudioRingBuffer(const QUuid& streamIdentifier = QUuid(), bool dynamicJitterBuffer = false); - - int parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped); - - const QUuid& getStreamIdentifier() const { return _streamIdentifier; } + float getRadius() const { return _radius; } float getAttenuationRatio() const { return _attenuationRatio; } + + QUuid getStreamIdentifier() const { return _streamIdentifier; } + private: // disallow copying of InjectedAudioRingBuffer objects InjectedAudioRingBuffer(const InjectedAudioRingBuffer&); InjectedAudioRingBuffer& operator= (const InjectedAudioRingBuffer&); - - QUuid _streamIdentifier; + + AudioStreamStats getAudioStreamStats() const; + int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples); + int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples); + + const QUuid _streamIdentifier; float _radius; float _attenuationRatio; }; diff --git a/libraries/audio/src/PositionalAudioRingBuffer.cpp b/libraries/audio/src/PositionalAudioRingBuffer.cpp index 14af8c6a4a..519190d70d 100644 --- a/libraries/audio/src/PositionalAudioRingBuffer.cpp +++ b/libraries/audio/src/PositionalAudioRingBuffer.cpp @@ -9,6 +9,9 @@ // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // +#include "PositionalAudioRingBuffer.h" +#include "SharedUtil.h" + #include #include @@ -18,66 +21,27 @@ #include #include -#include "PositionalAudioRingBuffer.h" -#include "SharedUtil.h" - PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo, bool dynamicJitterBuffers) : - - AudioRingBuffer(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL, - false, AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY), - _type(type), - _position(0.0f, 0.0f, 0.0f), - _orientation(0.0f, 0.0f, 0.0f, 0.0f), - _willBeAddedToMix(false), - _shouldLoopbackForNode(false), - _shouldOutputStarveDebug(true), - _isStereo(isStereo), - _nextOutputTrailingLoudness(0.0f), - _listenerUnattenuatedZone(NULL), - _lastFrameReceivedTime(0), - _interframeTimeGapStatsForJitterCalc(TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES, TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS), - _interframeTimeGapStatsForStatsPacket(TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES, TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS), - _framesAvailableStats(FRAMES_AVAILABLE_STATS_INTERVAL_SAMPLES, FRAMES_AVAILABLE_STATS_WINDOW_INTERVALS), - _desiredJitterBufferFrames(1), - _dynamicJitterBuffers(dynamicJitterBuffers), - _consecutiveNotMixedCount(0), - _starveCount(0), - _silentFramesDropped(0) +InboundAudioStream(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL, +AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, dynamicJitterBuffers), +_type(type), +_position(0.0f, 0.0f, 0.0f), +_orientation(0.0f, 0.0f, 0.0f, 0.0f), +_shouldLoopbackForNode(false), +_isStereo(isStereo), +_nextOutputTrailingLoudness(0.0f), +_listenerUnattenuatedZone(NULL) { } -int PositionalAudioRingBuffer::parsePositionalData(const QByteArray& positionalByteArray) { - QDataStream packetStream(positionalByteArray); - - packetStream.readRawData(reinterpret_cast(&_position), sizeof(_position)); - packetStream.readRawData(reinterpret_cast(&_orientation), sizeof(_orientation)); - - // if this node sent us a NaN for first float in orientation then don't consider this good audio and bail - if (glm::isnan(_orientation.x)) { - reset(); - return 0; - } - - return packetStream.device()->pos(); -} - void PositionalAudioRingBuffer::updateNextOutputTrailingLoudness() { - // ForBoundarySamples means that we expect the number of samples not to roll of the end of the ring buffer - float nextLoudness = 0; - - if (samplesAvailable() >= _numFrameSamples) { - for (int i = 0; i < _numFrameSamples; ++i) { - nextLoudness += fabsf(_nextOutput[i]); - } - nextLoudness /= _numFrameSamples; - nextLoudness /= MAX_SAMPLE_VALUE; - } - + float nextLoudness = _ringBuffer.getNextOutputFrameLoudness(); + const int TRAILING_AVERAGE_FRAMES = 100; const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES; const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO; const float LOUDNESS_EPSILON = 0.000001f; - + if (nextLoudness >= _nextOutputTrailingLoudness) { _nextOutputTrailingLoudness = nextLoudness; } else { @@ -89,120 +53,24 @@ void PositionalAudioRingBuffer::updateNextOutputTrailingLoudness() { } } -bool PositionalAudioRingBuffer::shouldBeAddedToMix() { - int desiredJitterBufferSamples = _desiredJitterBufferFrames * _numFrameSamples; - - if (!isNotStarvedOrHasMinimumSamples(_numFrameSamples + desiredJitterBufferSamples)) { - // if the buffer was starved, allow it to accrue at least the desired number of - // jitter buffer frames before we start taking frames from it for mixing +int PositionalAudioRingBuffer::parsePositionalData(const QByteArray& positionalByteArray) { + QDataStream packetStream(positionalByteArray); - if (_shouldOutputStarveDebug) { - _shouldOutputStarveDebug = false; - } + packetStream.readRawData(reinterpret_cast(&_position), sizeof(_position)); + packetStream.readRawData(reinterpret_cast(&_orientation), sizeof(_orientation)); - _consecutiveNotMixedCount++; - return false; - } else if (samplesAvailable() < _numFrameSamples) { - // if the buffer doesn't have a full frame of samples to take for mixing, it is starved - _isStarved = true; - _starveCount++; - - _framesAvailableStats.reset(); - - // reset our _shouldOutputStarveDebug to true so the next is printed - _shouldOutputStarveDebug = true; - - _consecutiveNotMixedCount = 1; - return false; + // if this node sent us a NaN for first float in orientation then don't consider this good audio and bail + if (glm::isnan(_orientation.x)) { + // NOTE: why would we reset the ring buffer here? + _ringBuffer.reset(); + return 0; } - - // good buffer, add this to the mix - // if we just finished refilling after a starve, we have a new jitter buffer length. - // reset the frames available stats. - - _isStarved = false; - - _framesAvailableStats.update(framesAvailable()); - - // since we've read data from ring buffer at least once - we've started - _hasStarted = true; - - return true; + return packetStream.device()->pos(); } -int PositionalAudioRingBuffer::getCalculatedDesiredJitterBufferFrames() const { - const float USECS_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * USECS_PER_SECOND / (float)SAMPLE_RATE; - - int calculatedDesiredJitterBufferFrames = ceilf((float)_interframeTimeGapStatsForJitterCalc.getWindowMax() / USECS_PER_FRAME); - if (calculatedDesiredJitterBufferFrames < 1) { - calculatedDesiredJitterBufferFrames = 1; - } - return calculatedDesiredJitterBufferFrames; -} - - -void PositionalAudioRingBuffer::frameReceivedUpdateTimingStats() { - // update the two time gap stats we're keeping - quint64 now = usecTimestampNow(); - if (_lastFrameReceivedTime != 0) { - quint64 gap = now - _lastFrameReceivedTime; - _interframeTimeGapStatsForJitterCalc.update(gap); - _interframeTimeGapStatsForStatsPacket.update(gap); - } - _lastFrameReceivedTime = now; - - // recalculate the _desiredJitterBufferFrames if _interframeTimeGapStatsForJitterCalc has updated stats for us - if (_interframeTimeGapStatsForJitterCalc.getNewStatsAvailableFlag()) { - if (!_dynamicJitterBuffers) { - _desiredJitterBufferFrames = 1; // HACK to see if this fixes the audio silence - } else { - const float USECS_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * USECS_PER_SECOND / (float)SAMPLE_RATE; - - _desiredJitterBufferFrames = ceilf((float)_interframeTimeGapStatsForJitterCalc.getWindowMax() / USECS_PER_FRAME); - if (_desiredJitterBufferFrames < 1) { - _desiredJitterBufferFrames = 1; - } - const int maxDesired = _frameCapacity - 1; - if (_desiredJitterBufferFrames > maxDesired) { - _desiredJitterBufferFrames = maxDesired; - } - } - _interframeTimeGapStatsForJitterCalc.clearNewStatsAvailableFlag(); - } -} - -void PositionalAudioRingBuffer::addDroppableSilentSamples(int numSilentSamples) { - - // This adds some number of frames to the desired jitter buffer frames target we use. - // The larger this value is, the less aggressive we are about reducing the jitter buffer length. - // Setting this to 0 will try to get the jitter buffer to be exactly _desiredJitterBufferFrames long, - // which could lead immediately to a starve. - const int DESIRED_JITTER_BUFFER_FRAMES_PADDING = 1; - - // calculate how many silent frames we should drop. We only drop silent frames if - // the running avg num frames available has stabilized and it's more than - // our desired number of frames by the margin defined above. - int numSilentFramesToDrop = 0; - if (_framesAvailableStats.getNewStatsAvailableFlag() && _framesAvailableStats.isWindowFilled() - && numSilentSamples >= _numFrameSamples) { - _framesAvailableStats.clearNewStatsAvailableFlag(); - int averageJitterBufferFrames = (int)_framesAvailableStats.getWindowAverage(); - int desiredJitterBufferFramesPlusPadding = _desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING; - - if (averageJitterBufferFrames > desiredJitterBufferFramesPlusPadding) { - // our avg jitter buffer size exceeds its desired value, so ignore some silent - // frames to get that size as close to desired as possible - int numSilentFramesToDropDesired = averageJitterBufferFrames - desiredJitterBufferFramesPlusPadding; - int numSilentFramesReceived = numSilentSamples / _numFrameSamples; - numSilentFramesToDrop = std::min(numSilentFramesToDropDesired, numSilentFramesReceived); - - // since we now have a new jitter buffer length, reset the frames available stats. - _framesAvailableStats.reset(); - - _silentFramesDropped += numSilentFramesToDrop; - } - } - - addSilentFrame(numSilentSamples - numSilentFramesToDrop * _numFrameSamples); +AudioStreamStats PositionalAudioRingBuffer::getAudioStreamStats() const { + AudioStreamStats streamStats = InboundAudioStream::getAudioStreamStats(); + streamStats._streamType = _type; + return streamStats; } diff --git a/libraries/audio/src/PositionalAudioRingBuffer.h b/libraries/audio/src/PositionalAudioRingBuffer.h index 0b14a12858..edc6266613 100644 --- a/libraries/audio/src/PositionalAudioRingBuffer.h +++ b/libraries/audio/src/PositionalAudioRingBuffer.h @@ -13,105 +13,60 @@ #define hifi_PositionalAudioRingBuffer_h #include - #include -#include "AudioRingBuffer.h" -#include "MovingMinMaxAvg.h" - -// the time gaps stats for _desiredJitterBufferFrames calculation -// will recalculate the max for the past 5000 samples every 500 samples -const int TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES = 500; -const int TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS = 10; - -// the time gap stats for constructing AudioStreamStats will -// recalculate min/max/avg every ~1 second for the past ~30 seconds of time gap data -const int TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS; -const int TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS = 30; - -// the stats for calculating the average frames available will recalculate every ~1 second -// and will include data for the past ~10 seconds -const int FRAMES_AVAILABLE_STATS_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS; -const int FRAMES_AVAILABLE_STATS_WINDOW_INTERVALS = 10; +#include "InboundAudioStream.h" const int AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY = 100; -class PositionalAudioRingBuffer : public AudioRingBuffer { +class PositionalAudioRingBuffer : public InboundAudioStream { + Q_OBJECT public: enum Type { Microphone, Injector }; - - PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo = false, bool dynamicJitterBuffers = false); - - virtual int parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped) = 0; - int parsePositionalData(const QByteArray& positionalByteArray); - int parseListenModeData(const QByteArray& listenModeByteArray); - + PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo = false, bool dynamicJitterBuffers = false); + + virtual AudioStreamStats getAudioStreamStats() const; + void updateNextOutputTrailingLoudness(); float getNextOutputTrailingLoudness() const { return _nextOutputTrailingLoudness; } - - bool shouldBeAddedToMix(); - - bool willBeAddedToMix() const { return _willBeAddedToMix; } - void setWillBeAddedToMix(bool willBeAddedToMix) { _willBeAddedToMix = willBeAddedToMix; } - + bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; } - bool isStereo() const { return _isStereo; } - PositionalAudioRingBuffer::Type getType() const { return _type; } const glm::vec3& getPosition() const { return _position; } const glm::quat& getOrientation() const { return _orientation; } - AABox* getListenerUnattenuatedZone() const { return _listenerUnattenuatedZone; } + void setListenerUnattenuatedZone(AABox* listenerUnattenuatedZone) { _listenerUnattenuatedZone = listenerUnattenuatedZone; } - - int getSamplesPerFrame() const { return _isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; } - - const MovingMinMaxAvg& getInterframeTimeGapStatsForStatsPacket() const { return _interframeTimeGapStatsForStatsPacket; } - - int getCalculatedDesiredJitterBufferFrames() const; /// returns what we would calculate our desired as if asked - int getDesiredJitterBufferFrames() const { return _desiredJitterBufferFrames; } - double getFramesAvailableAverage() const { return _framesAvailableStats.getWindowAverage(); } - - int getConsecutiveNotMixedCount() const { return _consecutiveNotMixedCount; } - int getStarveCount() const { return _starveCount; } - int getSilentFramesDropped() const { return _silentFramesDropped; } protected: // disallow copying of PositionalAudioRingBuffer objects PositionalAudioRingBuffer(const PositionalAudioRingBuffer&); PositionalAudioRingBuffer& operator= (const PositionalAudioRingBuffer&); - void frameReceivedUpdateTimingStats(); - void addDroppableSilentSamples(int numSilentSamples); - - PositionalAudioRingBuffer::Type _type; + /// parses the info between the seq num and the audio data in the network packet and calculates + /// how many audio samples this packet contains + virtual int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) = 0; + + /// parses the audio data in the network packet + virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) = 0; + + int parsePositionalData(const QByteArray& positionalByteArray); + +protected: + Type _type; glm::vec3 _position; glm::quat _orientation; - bool _willBeAddedToMix; + bool _shouldLoopbackForNode; - bool _shouldOutputStarveDebug; bool _isStereo; - + float _nextOutputTrailingLoudness; AABox* _listenerUnattenuatedZone; - - quint64 _lastFrameReceivedTime; - MovingMinMaxAvg _interframeTimeGapStatsForJitterCalc; - MovingMinMaxAvg _interframeTimeGapStatsForStatsPacket; - MovingMinMaxAvg _framesAvailableStats; - - int _desiredJitterBufferFrames; - bool _dynamicJitterBuffers; - - // extra stats - int _consecutiveNotMixedCount; - int _starveCount; - int _silentFramesDropped; }; #endif // hifi_PositionalAudioRingBuffer_h From fe70c1f49a87e4d191e567cc63626caa8b25c5e8 Mon Sep 17 00:00:00 2001 From: wangyix Date: Thu, 24 Jul 2014 10:48:56 -0700 Subject: [PATCH 03/62] didn't commit for some reason --- libraries/audio/src/AudioStreamStats.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/libraries/audio/src/AudioStreamStats.h b/libraries/audio/src/AudioStreamStats.h index f73cbd3e54..4dd537afc0 100644 --- a/libraries/audio/src/AudioStreamStats.h +++ b/libraries/audio/src/AudioStreamStats.h @@ -12,13 +12,12 @@ #ifndef hifi_AudioStreamStats_h #define hifi_AudioStreamStats_h -#include "PositionalAudioRingBuffer.h" #include "SequenceNumberStats.h" class AudioStreamStats { public: AudioStreamStats() - : _streamType(PositionalAudioRingBuffer::Microphone), + : _streamType(-1), _streamIdentifier(), _timeGapMin(0), _timeGapMax(0), @@ -37,7 +36,7 @@ public: _packetStreamWindowStats() {} - PositionalAudioRingBuffer::Type _streamType; + qint32 _streamType; QUuid _streamIdentifier; quint64 _timeGapMin; From da3339fc39c22df1c4e1a316b1b52bc3db14a1e2 Mon Sep 17 00:00:00 2001 From: wangyix Date: Thu, 24 Jul 2014 11:57:00 -0700 Subject: [PATCH 04/62] minor const change to qhash iterator --- assignment-client/src/audio/AudioMixerClientData.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/assignment-client/src/audio/AudioMixerClientData.cpp b/assignment-client/src/audio/AudioMixerClientData.cpp index 31d0612a98..e23a5ffb95 100644 --- a/assignment-client/src/audio/AudioMixerClientData.cpp +++ b/assignment-client/src/audio/AudioMixerClientData.cpp @@ -28,7 +28,7 @@ AudioMixerClientData::AudioMixerClientData() : AudioMixerClientData::~AudioMixerClientData() { QHash::ConstIterator i, end = _ringBuffers.constEnd(); - for (i = _ringBuffers.begin(); i != end; i++) { + for (i = _ringBuffers.constBegin(); i != end; i++) { // delete this attached InboundAudioStream delete i.value(); } @@ -38,7 +38,6 @@ AvatarAudioRingBuffer* AudioMixerClientData::getAvatarAudioRingBuffer() const { if (_ringBuffers.contains(QUuid())) { return (AvatarAudioRingBuffer*)_ringBuffers.value(QUuid()); } - // no mic stream found - return NULL return NULL; } @@ -277,8 +276,8 @@ QString AudioMixerClientData::getAudioStreamStatsString() const { result = "mic unknown"; } - QHash::ConstIterator i, end = _ringBuffers.end(); - for (i = _ringBuffers.begin(); i != end; i++) { + QHash::ConstIterator i, end = _ringBuffers.constEnd(); + for (i = _ringBuffers.constBegin(); i != end; i++) { if (i.value()->getType() == PositionalAudioRingBuffer::Injector) { AudioStreamStats streamStats = i.value()->getAudioStreamStats(); result += " UPSTREAM.inj.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames) From 172cd91f2788c0d1c03ecd8159fa1e47f8964e28 Mon Sep 17 00:00:00 2001 From: wangyix Date: Thu, 24 Jul 2014 11:49:44 -0700 Subject: [PATCH 05/62] removed unused seq variable --- assignment-client/src/audio/AudioMixerClientData.cpp | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/assignment-client/src/audio/AudioMixerClientData.cpp b/assignment-client/src/audio/AudioMixerClientData.cpp index e23a5ffb95..df16796fc3 100644 --- a/assignment-client/src/audio/AudioMixerClientData.cpp +++ b/assignment-client/src/audio/AudioMixerClientData.cpp @@ -43,19 +43,13 @@ AvatarAudioRingBuffer* AudioMixerClientData::getAvatarAudioRingBuffer() const { } int AudioMixerClientData::parseData(const QByteArray& packet) { - - // parse sequence number for this packet - int numBytesPacketHeader = numBytesForPacketHeader(packet); - const char* sequenceAt = packet.constData() + numBytesPacketHeader; - quint16 sequence = *(reinterpret_cast(sequenceAt)); - PacketType packetType = packetTypeForPacket(packet); if (packetType == PacketTypeAudioStreamStats) { const char* dataAt = packet.data(); // skip over header, appendFlag, and num stats packed - dataAt += (numBytesPacketHeader + sizeof(quint8) + sizeof(quint16)); + dataAt += (numBytesForPacketHeader(packet) + sizeof(quint8) + sizeof(quint16)); // read the downstream audio stream stats memcpy(&_downstreamAudioStreamStats, dataAt, sizeof(AudioStreamStats)); From 3d22a11e2855cfd8e35812411bbb4d0dc220cd9f Mon Sep 17 00:00:00 2001 From: wangyix Date: Thu, 24 Jul 2014 13:58:15 -0700 Subject: [PATCH 06/62] debugging new audio stream organization --- assignment-client/src/audio/AudioMixer.cpp | 5 +- libraries/audio/src/InboundAudioStream.cpp | 68 +++++++------------ libraries/audio/src/InboundAudioStream.h | 3 + .../audio/src/PositionalAudioRingBuffer.cpp | 18 ++--- 4 files changed, 41 insertions(+), 53 deletions(-) diff --git a/assignment-client/src/audio/AudioMixer.cpp b/assignment-client/src/audio/AudioMixer.cpp index 1a436fc9bf..ce30a4b82c 100644 --- a/assignment-client/src/audio/AudioMixer.cpp +++ b/assignment-client/src/audio/AudioMixer.cpp @@ -98,15 +98,18 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf // if the frame to be mixed is silent, don't mix it if (bufferToAdd->getNextOutputTrailingLoudness() == 0.0f) { bufferToAdd->popFrames(1); + printf("trailing loudness too soft: not mixing!\n"); return; } // get pointer to frame to be mixed. If the stream cannot provide a frame (is starved), bail AudioRingBuffer::ConstIterator nextOutputStart; if (!bufferToAdd->popFrames(&nextOutputStart, 1)) { + printf("stream is starved! not mixing!\n"); return; } + printf("mixing stream\n"); float bearingRelativeAngleToSource = 0.0f; float attenuationCoefficient = 1.0f; @@ -312,7 +315,7 @@ void AudioMixer::prepareMixForListeningNode(Node* node) { QHash::ConstIterator i, end = otherNodeRingBuffers.constEnd(); for (i = otherNodeRingBuffers.begin(); i != end; i++) { PositionalAudioRingBuffer* otherNodeBuffer = i.value(); - + if (*otherNode != *node || otherNodeBuffer->shouldLoopbackForNode()) { addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer); } diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index 727cb5c554..4923e3d5e7 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -86,6 +86,7 @@ int InboundAudioStream::parseData(const QByteArray& packet) { } if (_isStarved && _ringBuffer.samplesAvailable() >= _desiredJitterBufferFrames * _ringBuffer.getNumFrameSamples()) { + printf("\nstream refilled from starve!\n"); _isStarved = false; } @@ -95,79 +96,60 @@ int InboundAudioStream::parseData(const QByteArray& packet) { } bool InboundAudioStream::popFrames(int numFrames, bool starveOnFail) { - if (_isStarved) { - _consecutiveNotMixedCount++; - return false; - } - - bool popped = false; - + bool popped; int numSamplesRequested = numFrames * _ringBuffer.getNumFrameSamples(); - if (_ringBuffer.samplesAvailable() >= numSamplesRequested) { + if (popped = shouldPop(numSamplesRequested, starveOnFail)) { _ringBuffer.shiftReadPosition(numSamplesRequested); - _hasStarted = true; - popped = true; - } else { - if (starveOnFail) { - setToStarved(); - _consecutiveNotMixedCount++; - } } - _framesAvailableStats.update(_ringBuffer.framesAvailable()); return popped; } bool InboundAudioStream::popFrames(int16_t* dest, int numFrames, bool starveOnFail) { - if (_isStarved) { - _consecutiveNotMixedCount++; - return false; - } - - bool popped = false; - + bool popped; int numSamplesRequested = numFrames * _ringBuffer.getNumFrameSamples(); - if (_ringBuffer.samplesAvailable() >= numSamplesRequested) { + if (popped = shouldPop(numSamplesRequested, starveOnFail)) { _ringBuffer.readSamples(dest, numSamplesRequested); - _hasStarted = true; - popped = true; - } else { - if (starveOnFail) { - setToStarved(); - _consecutiveNotMixedCount++; - } } - _framesAvailableStats.update(_ringBuffer.framesAvailable()); return popped; } bool InboundAudioStream::popFrames(AudioRingBuffer::ConstIterator* nextOutput, int numFrames, bool starveOnFail) { + bool popped; + int numSamplesRequested = numFrames * _ringBuffer.getNumFrameSamples(); + if (popped = shouldPop(numSamplesRequested, starveOnFail)) { + *nextOutput = _ringBuffer.nextOutput(); + _ringBuffer.shiftReadPosition(numSamplesRequested); + } + _framesAvailableStats.update(_ringBuffer.framesAvailable()); + + return popped; +} + +bool InboundAudioStream::shouldPop(int numSamples, bool starveOnFail) { + printf("\nshouldPop()\n"); + if (_isStarved) { + printf("\t we're starved, not popping\n"); _consecutiveNotMixedCount++; return false; } - bool popped = false; - - int numSamplesRequested = numFrames * _ringBuffer.getNumFrameSamples(); - if (_ringBuffer.samplesAvailable() >= numSamplesRequested) { - *nextOutput = _ringBuffer.nextOutput(); - _ringBuffer.shiftReadPosition(numSamplesRequested); + if (_ringBuffer.samplesAvailable() >= numSamples) { + printf("have requested samples and not starved, popping\n"); _hasStarted = true; - popped = true; + return true; } else { if (starveOnFail) { + printf("don't have enough samples; starved!\n"); setToStarved(); _consecutiveNotMixedCount++; } + return false; } - - _framesAvailableStats.update(_ringBuffer.framesAvailable()); - - return popped; } void InboundAudioStream::setToStarved() { diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index 65a4f07918..fc1ef07844 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -79,6 +79,9 @@ public: int getSilentFramesDropped() const { return _silentFramesDropped; } int getOverflowCount() const { return _ringBuffer.getOverflowCount(); } +private: + bool shouldPop(int numSamples, bool starveOnFail); + protected: // disallow copying of InboundAudioStream objects InboundAudioStream(const InboundAudioStream&); diff --git a/libraries/audio/src/PositionalAudioRingBuffer.cpp b/libraries/audio/src/PositionalAudioRingBuffer.cpp index 519190d70d..3d686beccc 100644 --- a/libraries/audio/src/PositionalAudioRingBuffer.cpp +++ b/libraries/audio/src/PositionalAudioRingBuffer.cpp @@ -22,15 +22,15 @@ #include PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo, bool dynamicJitterBuffers) : -InboundAudioStream(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL, -AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, dynamicJitterBuffers), -_type(type), -_position(0.0f, 0.0f, 0.0f), -_orientation(0.0f, 0.0f, 0.0f, 0.0f), -_shouldLoopbackForNode(false), -_isStereo(isStereo), -_nextOutputTrailingLoudness(0.0f), -_listenerUnattenuatedZone(NULL) + InboundAudioStream(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL, + AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, dynamicJitterBuffers), + _type(type), + _position(0.0f, 0.0f, 0.0f), + _orientation(0.0f, 0.0f, 0.0f, 0.0f), + _shouldLoopbackForNode(false), + _isStereo(isStereo), + _nextOutputTrailingLoudness(0.0f), + _listenerUnattenuatedZone(NULL) { } From 83ba4b9a1c457be159e8d1bb10f7f15aa2871622 Mon Sep 17 00:00:00 2001 From: wangyix Date: Thu, 24 Jul 2014 14:43:29 -0700 Subject: [PATCH 07/62] Audiomixer now working (added call to updateNextOutputTrailingLoudness()) --- assignment-client/src/audio/AudioMixer.cpp | 25 +++++++++++++------ libraries/audio/src/InboundAudioStream.cpp | 22 ++++++++-------- libraries/audio/src/InboundAudioStream.h | 3 ++- .../audio/src/PositionalAudioRingBuffer.cpp | 6 +++++ .../audio/src/PositionalAudioRingBuffer.h | 4 ++- 5 files changed, 39 insertions(+), 21 deletions(-) diff --git a/assignment-client/src/audio/AudioMixer.cpp b/assignment-client/src/audio/AudioMixer.cpp index ce30a4b82c..391914760a 100644 --- a/assignment-client/src/audio/AudioMixer.cpp +++ b/assignment-client/src/audio/AudioMixer.cpp @@ -98,19 +98,15 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf // if the frame to be mixed is silent, don't mix it if (bufferToAdd->getNextOutputTrailingLoudness() == 0.0f) { bufferToAdd->popFrames(1); - printf("trailing loudness too soft: not mixing!\n"); return; } // get pointer to frame to be mixed. If the stream cannot provide a frame (is starved), bail AudioRingBuffer::ConstIterator nextOutputStart; if (!bufferToAdd->popFrames(&nextOutputStart, 1)) { - printf("stream is starved! not mixing!\n"); return; } - printf("mixing stream\n"); - float bearingRelativeAngleToSource = 0.0f; float attenuationCoefficient = 1.0f; int numSamplesDelay = 0; @@ -221,7 +217,7 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf - if (!bufferToAdd->isStereo() && shouldAttenuate) { + if (!bufferToAdd->isStereo() && shouldAttenuate && false) { // this is a mono buffer, which means it gets full attenuation and spatialization // if the bearing relative angle to source is > 0 then the delayed channel is the right one @@ -269,7 +265,20 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf } } } else { - // this is a stereo buffer or an unattenuated buffer, don't perform spatialization + + int stereoDivider = bufferToAdd->isStereo() ? 1 : 2; + + if (!shouldAttenuate) { + attenuationCoefficient = 1.0f; + } + + for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s++) { + _clientSamples[s] = glm::clamp(_clientSamples[s] + (int)(nextOutputStart[s / stereoDivider] * attenuationCoefficient), + MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE); + } + + + /*// this is a stereo buffer or an unattenuated buffer, don't perform spatialization for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) { int stereoDivider = bufferToAdd->isStereo() ? 1 : 2; @@ -293,7 +302,7 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf + (int) (nextOutputStart[(s / stereoDivider) + (3 / stereoDivider)] * attenuationCoefficient), MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE); - } + }*/ } } @@ -318,6 +327,8 @@ void AudioMixer::prepareMixForListeningNode(Node* node) { if (*otherNode != *node || otherNodeBuffer->shouldLoopbackForNode()) { addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer); + } else { + otherNodeBuffer->popFrames(1); } } } diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index 4923e3d5e7..d8daefaceb 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -86,7 +86,6 @@ int InboundAudioStream::parseData(const QByteArray& packet) { } if (_isStarved && _ringBuffer.samplesAvailable() >= _desiredJitterBufferFrames * _ringBuffer.getNumFrameSamples()) { - printf("\nstream refilled from starve!\n"); _isStarved = false; } @@ -130,26 +129,25 @@ bool InboundAudioStream::popFrames(AudioRingBuffer::ConstIterator* nextOutput, i } bool InboundAudioStream::shouldPop(int numSamples, bool starveOnFail) { - printf("\nshouldPop()\n"); - if (_isStarved) { - printf("\t we're starved, not popping\n"); + // we're still refilling; don't mix _consecutiveNotMixedCount++; return false; } if (_ringBuffer.samplesAvailable() >= numSamples) { - printf("have requested samples and not starved, popping\n"); + // we have enough samples to pop, so we're good to mix _hasStarted = true; return true; - } else { - if (starveOnFail) { - printf("don't have enough samples; starved!\n"); - setToStarved(); - _consecutiveNotMixedCount++; - } - return false; } + + // we don't have enough samples, so set this stream to starve + // if starveOnFail is true + if (starveOnFail) { + setToStarved(); + _consecutiveNotMixedCount++; + } + return false; } void InboundAudioStream::setToStarved() { diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index fc1ef07844..f635013369 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -51,7 +51,7 @@ public: void resetSequenceNumberStats() { _incomingSequenceNumberStats.reset(); } - int parseData(const QByteArray& packet); + virtual int parseData(const QByteArray& packet); bool popFrames(int numFrames, bool starveOnFail = true); bool popFrames(int16_t* dest, int numFrames, bool starveOnFail = true); @@ -62,6 +62,7 @@ public: /// this function should be called once per second to ensure the seq num stats history spans ~30 seconds AudioStreamStats updateSeqHistoryAndGetAudioStreamStats(); + virtual AudioStreamStats getAudioStreamStats() const; int getCalculatedDesiredJitterBufferFrames() const; diff --git a/libraries/audio/src/PositionalAudioRingBuffer.cpp b/libraries/audio/src/PositionalAudioRingBuffer.cpp index 3d686beccc..378fad92cc 100644 --- a/libraries/audio/src/PositionalAudioRingBuffer.cpp +++ b/libraries/audio/src/PositionalAudioRingBuffer.cpp @@ -34,6 +34,12 @@ PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer:: { } +int PositionalAudioRingBuffer::parseData(const QByteArray& packet) { + int bytesRead = InboundAudioStream::parseData(packet); + updateNextOutputTrailingLoudness(); + return bytesRead; +} + void PositionalAudioRingBuffer::updateNextOutputTrailingLoudness() { float nextLoudness = _ringBuffer.getNextOutputFrameLoudness(); diff --git a/libraries/audio/src/PositionalAudioRingBuffer.h b/libraries/audio/src/PositionalAudioRingBuffer.h index edc6266613..e0d6929ec9 100644 --- a/libraries/audio/src/PositionalAudioRingBuffer.h +++ b/libraries/audio/src/PositionalAudioRingBuffer.h @@ -28,7 +28,9 @@ public: }; PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo = false, bool dynamicJitterBuffers = false); - + + int parseData(const QByteArray& packet); + virtual AudioStreamStats getAudioStreamStats() const; void updateNextOutputTrailingLoudness(); From 21402e3ff1e539244cf877e093f01121e20bdbca Mon Sep 17 00:00:00 2001 From: wangyix Date: Thu, 24 Jul 2014 14:50:13 -0700 Subject: [PATCH 08/62] cleaned up some stuff --- assignment-client/src/audio/AudioMixer.cpp | 33 ++-------------------- 1 file changed, 3 insertions(+), 30 deletions(-) diff --git a/assignment-client/src/audio/AudioMixer.cpp b/assignment-client/src/audio/AudioMixer.cpp index 391914760a..e407607388 100644 --- a/assignment-client/src/audio/AudioMixer.cpp +++ b/assignment-client/src/audio/AudioMixer.cpp @@ -254,9 +254,9 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf // to stick at the beginning float attenuationAndWeakChannelRatio = attenuationCoefficient * weakChannelAmplitudeRatio; AudioRingBuffer::ConstIterator delayNextOutputStart = nextOutputStart - numSamplesDelay; - //if (delayNextOutputStart < bufferStart) { - //delayNextOutputStart = bufferStart + ringBufferSampleCapacity - numSamplesDelay; - //} + + // TODO: delayNextOutputStart may be inside the last frame written if the ringbuffer is completely full + // maybe make AudioRingBuffer have 1 extra frame in its buffer for (int i = 0; i < numSamplesDelay; i++) { int parentIndex = i * 2; @@ -276,33 +276,6 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf _clientSamples[s] = glm::clamp(_clientSamples[s] + (int)(nextOutputStart[s / stereoDivider] * attenuationCoefficient), MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE); } - - - /*// this is a stereo buffer or an unattenuated buffer, don't perform spatialization - for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) { - - int stereoDivider = bufferToAdd->isStereo() ? 1 : 2; - - if (!shouldAttenuate) { - attenuationCoefficient = 1.0f; - } - - _clientSamples[s] = glm::clamp(_clientSamples[s] - + (int) (nextOutputStart[(s / stereoDivider)] * attenuationCoefficient), - MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE); - _clientSamples[s + 1] = glm::clamp(_clientSamples[s + 1] - + (int) (nextOutputStart[(s / stereoDivider) + (1 / stereoDivider)] - * attenuationCoefficient), - MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE); - _clientSamples[s + 2] = glm::clamp(_clientSamples[s + 2] - + (int) (nextOutputStart[(s / stereoDivider) + (2 / stereoDivider)] - * attenuationCoefficient), - MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE); - _clientSamples[s + 3] = glm::clamp(_clientSamples[s + 3] - + (int) (nextOutputStart[(s / stereoDivider) + (3 / stereoDivider)] - * attenuationCoefficient), - MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE); - }*/ } } From 9b629a73266a94d0d457089a82411415f4e84ab6 Mon Sep 17 00:00:00 2001 From: barnold1953 Date: Thu, 24 Jul 2014 16:55:23 -0700 Subject: [PATCH 09/62] Added slerp and squad to the Quat scripting interface --- libraries/script-engine/src/Quat.cpp | 10 ++++++++++ libraries/script-engine/src/Quat.h | 2 ++ 2 files changed, 12 insertions(+) diff --git a/libraries/script-engine/src/Quat.cpp b/libraries/script-engine/src/Quat.cpp index 4acc60e7b4..8308536f97 100644 --- a/libraries/script-engine/src/Quat.cpp +++ b/libraries/script-engine/src/Quat.cpp @@ -66,6 +66,16 @@ glm::quat Quat::mix(const glm::quat& q1, const glm::quat& q2, float alpha) { return safeMix(q1, q2, alpha); } +/// Spherical Linear Interpolation +glm::quat Quat::slerp(const glm::quat& q1, const glm::quat& q2, float alpha) { + return glm::slerp(q1, q2, alpha); +} + +// Spherical Quadratic Interpolation +glm::quat Quat::squad(const glm::quat& q1, const glm::quat& q2, const glm::quat& s1, const glm::quat& s2, float h) { + return glm::squad(q1, q2, s1, s2, h); +} + void Quat::print(const QString& lable, const glm::quat& q) { qDebug() << qPrintable(lable) << q.x << "," << q.y << "," << q.z << "," << q.w; } diff --git a/libraries/script-engine/src/Quat.h b/libraries/script-engine/src/Quat.h index c97ccf9a1e..190c823118 100644 --- a/libraries/script-engine/src/Quat.h +++ b/libraries/script-engine/src/Quat.h @@ -36,6 +36,8 @@ public slots: glm::vec3 safeEulerAngles(const glm::quat& orientation); // degrees glm::quat angleAxis(float angle, const glm::vec3& v); // degrees glm::quat mix(const glm::quat& q1, const glm::quat& q2, float alpha); + glm::quat slerp(const glm::quat& q1, const glm::quat& q2, float alpha); + glm::quat squad(const glm::quat& q1, const glm::quat& q2, const glm::quat& s1, const glm::quat& s2, float h); void print(const QString& lable, const glm::quat& q); }; From 473cbf2afe52e4b701579d6d46d06877d476e586 Mon Sep 17 00:00:00 2001 From: wangyix Date: Thu, 24 Jul 2014 17:15:46 -0700 Subject: [PATCH 10/62] client audio now updated with stream class; seems fine for now --- .../src/audio/AudioMixerClientData.cpp | 2 +- interface/src/Audio.cpp | 157 +++++------------- interface/src/Audio.h | 25 +-- libraries/audio/src/AudioRingBuffer.cpp | 13 +- libraries/audio/src/AudioRingBuffer.h | 24 +-- libraries/audio/src/InboundAudioStream.cpp | 16 +- libraries/audio/src/InboundAudioStream.h | 4 +- .../audio/src/InboundMixedAudioStream.cpp | 17 ++ libraries/audio/src/InboundMixedAudioStream.h | 11 ++ 9 files changed, 101 insertions(+), 168 deletions(-) create mode 100644 libraries/audio/src/InboundMixedAudioStream.cpp create mode 100644 libraries/audio/src/InboundMixedAudioStream.h diff --git a/assignment-client/src/audio/AudioMixerClientData.cpp b/assignment-client/src/audio/AudioMixerClientData.cpp index df16796fc3..f6d27c534e 100644 --- a/assignment-client/src/audio/AudioMixerClientData.cpp +++ b/assignment-client/src/audio/AudioMixerClientData.cpp @@ -215,7 +215,7 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& // pack the calculated number of stream stats for (int i = 0; i < numStreamStatsToPack; i++) { - AudioStreamStats streamStats = ringBuffersIterator.value()->getAudioStreamStats(); + AudioStreamStats streamStats = ringBuffersIterator.value()->updateSeqHistoryAndGetAudioStreamStats(); memcpy(dataAt, &streamStats, sizeof(AudioStreamStats)); dataAt += sizeof(AudioStreamStats); diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 4787c0951c..3a19a099d6 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -39,26 +39,15 @@ #include #include -#include "Application.h" #include "Audio.h" #include "Menu.h" #include "Util.h" -#include "AudioRingBuffer.h" +#include "PositionalAudioRingBuffer.h" static const float AUDIO_CALLBACK_MSECS = (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float)SAMPLE_RATE * 1000.0; static const int NUMBER_OF_NOISE_SAMPLE_FRAMES = 300; -// audio frames time gap stats (min/max/avg) for last ~30 seconds are recalculated every ~1 second -static const int TIME_GAPS_STATS_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS; -static const int TIME_GAP_STATS_WINDOW_INTERVALS = 30; - -// incoming sequence number stats history will cover last 30s -static const int INCOMING_SEQ_STATS_HISTORY_LENGTH = INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS / - (TOO_LONG_SINCE_LAST_SEND_DOWNSTREAM_AUDIO_STATS / USECS_PER_SECOND); - -// the stats for the total frames available in the ring buffer and the audio output buffer -// will sample every second, update every second, and have a moving window covering 10 seconds static const int FRAMES_AVAILABLE_STATS_WINDOW_SECONDS = 10; // Mute icon configration @@ -87,9 +76,9 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) : // this delay will slowly add up and the longer someone runs, they more delayed their audio will be. _inputRingBuffer(0), #ifdef _WIN32 - _ringBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, false, 100), + _ringBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 100, true), #else - _ringBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO), // DO NOT CHANGE THIS UNLESS YOU SOLVE THE AUDIO DEVICE DRIFT PROBLEM!!! + _ringBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 10, true), // DO NOT CHANGE THIS UNLESS YOU SOLVE THE AUDIO DEVICE DRIFT PROBLEM!!! #endif _isStereoInput(false), _averagedLatency(0.0), @@ -104,14 +93,12 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) : _noiseGateEnabled(true), _toneInjectionEnabled(false), _noiseGateFramesToClose(0), - _totalPacketsReceived(0), _totalInputAudioSamples(0), _collisionSoundMagnitude(0.0f), _collisionSoundFrequency(0.0f), _collisionSoundNoise(0.0f), _collisionSoundDuration(0.0f), _proceduralEffectSample(0), - _numFramesDisplayStarve(0), _muted(false), _processSpatialAudio(false), _spatialAudioStart(0), @@ -127,14 +114,9 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) : _scopeOutputLeft(0), _scopeOutputRight(0), _statsEnabled(false), - _starveCount(0), - _consecutiveNotMixedCount(0), _outgoingAvatarAudioSequenceNumber(0), - _incomingMixedAudioSequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH), - _interframeTimeGapStats(TIME_GAPS_STATS_INTERVAL_SAMPLES, TIME_GAP_STATS_WINDOW_INTERVALS), _audioInputMsecsReadStats(MSECS_PER_SECOND / (float)AUDIO_CALLBACK_MSECS * CALLBACK_ACCELERATOR_RATIO, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS), _inputRingBufferMsecsAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS), - _outputRingBufferFramesAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS), _audioOutputMsecsUnplayedStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS) { // clear the array of locally injected samples @@ -156,20 +138,14 @@ void Audio::reset() { } void Audio::resetStats() { - _starveCount = 0; - _consecutiveNotMixedCount = 0; + _ringBuffer.resetStats(); _audioMixerAvatarStreamAudioStats = AudioStreamStats(); _audioMixerInjectedStreamAudioStatsMap.clear(); - _incomingMixedAudioSequenceNumberStats.reset(); - - _interframeTimeGapStats.reset(); - _audioInputMsecsReadStats.reset(); _inputRingBufferMsecsAvailableStats.reset(); - _outputRingBufferFramesAvailableStats.reset(); _audioOutputMsecsUnplayedStats.reset(); } @@ -742,30 +718,6 @@ void Audio::handleAudioInput() { void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) { const int NUM_INITIAL_PACKETS_DISCARD = 3; const int STANDARD_DEVIATION_SAMPLE_COUNT = 500; - - _totalPacketsReceived++; - - double timeDiff = (double)_timeSinceLastReceived.nsecsElapsed() / 1000.0; // ns to us - _interframeTimeGapStats.update((quint64)timeDiff); - timeDiff /= USECS_PER_MSEC; // us to ms - _timeSinceLastReceived.start(); - - // Discard first few received packets for computing jitter (often they pile up on start) - if (_totalPacketsReceived > NUM_INITIAL_PACKETS_DISCARD) { - _stdev.addValue(timeDiff); - } - - if (_stdev.getSamples() > STANDARD_DEVIATION_SAMPLE_COUNT) { - _measuredJitter = _stdev.getStDev(); - _stdev.reset(); - // Set jitter buffer to be a multiple of the measured standard deviation - const int MAX_JITTER_BUFFER_SAMPLES = _ringBuffer.getSampleCapacity() / 2; - const float NUM_STANDARD_DEVIATIONS = 3.0f; - if (Menu::getInstance()->getAudioJitterBufferSamples() == 0) { - float newJitterBufferSamples = (NUM_STANDARD_DEVIATIONS * _measuredJitter) / 1000.0f * SAMPLE_RATE; - setJitterBufferSamples(glm::clamp((int)newJitterBufferSamples, 0, MAX_JITTER_BUFFER_SAMPLES)); - } - } if (_audioOutput) { // Audio output must exist and be correctly set up if we're going to process received audio @@ -806,29 +758,7 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) { } AudioStreamStats Audio::getDownstreamAudioStreamStats() const { - - AudioStreamStats stats; - stats._streamType = PositionalAudioRingBuffer::Microphone; - - stats._timeGapMin = _interframeTimeGapStats.getMin(); - stats._timeGapMax = _interframeTimeGapStats.getMax(); - stats._timeGapAverage = _interframeTimeGapStats.getAverage(); - stats._timeGapWindowMin = _interframeTimeGapStats.getWindowMin(); - stats._timeGapWindowMax = _interframeTimeGapStats.getWindowMax(); - stats._timeGapWindowAverage = _interframeTimeGapStats.getWindowAverage(); - - stats._ringBufferFramesAvailable = _ringBuffer.framesAvailable(); - stats._ringBufferFramesAvailableAverage = _outputRingBufferFramesAvailableStats.getWindowAverage(); - stats._ringBufferDesiredJitterBufferFrames = getDesiredJitterBufferFrames(); - stats._ringBufferStarveCount = _starveCount; - stats._ringBufferConsecutiveNotMixedCount = _consecutiveNotMixedCount; - stats._ringBufferOverflowCount = _ringBuffer.getOverflowCount(); - stats._ringBufferSilentFramesDropped = 0; - - stats._packetStreamStats = _incomingMixedAudioSequenceNumberStats.getStats(); - stats._packetStreamWindowStats = _incomingMixedAudioSequenceNumberStats.getStatsForHistoryWindow(); - - return stats; + return _ringBuffer.getAudioStreamStats(); } void Audio::sendDownstreamAudioStatsPacket() { @@ -837,13 +767,8 @@ void Audio::sendDownstreamAudioStatsPacket() { _inputRingBufferMsecsAvailableStats.update(getInputRingBufferMsecsAvailable()); - _outputRingBufferFramesAvailableStats.update(_ringBuffer.framesAvailable()); _audioOutputMsecsUnplayedStats.update(getAudioOutputMsecsUnplayed()); - // push the current seq number stats into history, which moves the history window forward 1s - // (since that's how often pushStatsToHistory() is called) - _incomingMixedAudioSequenceNumberStats.pushStatsToHistory(); - char packet[MAX_PACKET_SIZE]; // pack header @@ -861,7 +786,7 @@ void Audio::sendDownstreamAudioStatsPacket() { dataAt += sizeof(quint16); // pack downstream audio stream stats - AudioStreamStats stats = getDownstreamAudioStreamStats(); + AudioStreamStats stats = _ringBuffer.updateSeqHistoryAndGetAudioStreamStats(); memcpy(dataAt, &stats, sizeof(AudioStreamStats)); dataAt += sizeof(AudioStreamStats); @@ -971,59 +896,50 @@ void Audio::toggleStereoInput() { void Audio::processReceivedAudio(const QByteArray& audioByteArray) { - QUuid senderUUID = uuidFromPacketHeader(audioByteArray); - - // parse sequence number for this packet - int numBytesPacketHeader = numBytesForPacketHeader(audioByteArray); - const char* sequenceAt = audioByteArray.constData() + numBytesPacketHeader; - quint16 sequence = *((quint16*)sequenceAt); - _incomingMixedAudioSequenceNumberStats.sequenceNumberReceived(sequence, senderUUID); - // parse audio data _ringBuffer.parseData(audioByteArray); + + + if (_audioOutput->bytesFree() == _audioOutput->bufferSize()) { + // the audio output has no samples to play. set the downstream audio to starved so that it + // refills to its desired size before pushing frames + _ringBuffer.setToStarved(); + } + + float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float) _outputFormat.sampleRate()) * (_desiredOutputFormat.channelCount() / (float) _outputFormat.channelCount()); - - if (!_ringBuffer.isStarved() && _audioOutput && _audioOutput->bytesFree() == _audioOutput->bufferSize()) { - // we don't have any audio data left in the output buffer - // we just starved - //qDebug() << "Audio output just starved."; - _ringBuffer.setIsStarved(true); - _numFramesDisplayStarve = 10; - _starveCount++; - _consecutiveNotMixedCount = 0; - } - - int numNetworkOutputSamples; + int numFramesToPush; if (Menu::getInstance()->isOptionChecked(MenuOption::DisableQAudioOutputOverflowCheck)) { - numNetworkOutputSamples = _ringBuffer.samplesAvailable(); + numFramesToPush = _ringBuffer.getFramesAvailable(); } else { // make sure to push a whole number of frames to the audio output - int numFramesAudioOutputRoomFor = _audioOutput->bytesFree() / sizeof(int16_t) * networkOutputToOutputRatio / _ringBuffer.getNumFrameSamples(); - numNetworkOutputSamples = std::min(_ringBuffer.samplesAvailable(), numFramesAudioOutputRoomFor * _ringBuffer.getNumFrameSamples()); + int numFramesAudioOutputRoomFor = (int)(_audioOutput->bytesFree() / sizeof(int16_t) * networkOutputToOutputRatio) / _ringBuffer.getNumFrameSamples(); + numFramesToPush = std::min(_ringBuffer.getFramesAvailable(), numFramesAudioOutputRoomFor); } - + // if there is data in the ring buffer and room in the audio output, decide what to do - if (numNetworkOutputSamples > 0) { - - int numSamplesNeededToStartPlayback = std::min(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (_jitterBufferSamples * 2), + + AudioRingBuffer::ConstIterator ringBufferNextOutput; + if (numFramesToPush > 0 && _ringBuffer.popFrames(&ringBufferNextOutput, numFramesToPush, false)) { + + /*int numSamplesNeededToStartPlayback = std::min(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (_jitterBufferSamples * 2), _ringBuffer.getSampleCapacity()); if (!_ringBuffer.isNotStarvedOrHasMinimumSamples(numSamplesNeededToStartPlayback)) { // We are still waiting for enough samples to begin playback // qDebug() << numNetworkOutputSamples << " samples so far, waiting for " << numSamplesNeededToStartPlayback; _consecutiveNotMixedCount++; - } else { - int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio; + } else {*/ + + int numNetworkOutputSamples = numFramesToPush * NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; + int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio; QByteArray outputBuffer; outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t)); - // We are either already playing back, or we have enough audio to start playing back. - //qDebug() << "pushing " << numNetworkOutputSamples; - _ringBuffer.setIsStarved(false); int16_t* ringBufferSamples = new int16_t[numNetworkOutputSamples]; if (_processSpatialAudio) { @@ -1031,7 +947,7 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) { QByteArray buffer; buffer.resize(numNetworkOutputSamples * sizeof(int16_t)); - _ringBuffer.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples); + ringBufferNextOutput.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples); // Accumulate direct transmission of audio from sender to receiver if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) { @@ -1051,7 +967,7 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) { } else { // copy the samples we'll resample from the ring buffer - this also // pushes the read pointer of the ring buffer forwards - _ringBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples); + ringBufferNextOutput.readSamples(ringBufferSamples, numNetworkOutputSamples); } // copy the packet from the RB to the output @@ -1089,7 +1005,7 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) { } delete[] ringBufferSamples; - } + //} } } @@ -1427,13 +1343,14 @@ void Audio::renderStats(const float* color, int width, int height) { float audioInputBufferLatency = 0.0f, inputRingBufferLatency = 0.0f, networkRoundtripLatency = 0.0f, mixerRingBufferLatency = 0.0f, outputRingBufferLatency = 0.0f, audioOutputBufferLatency = 0.0f; + AudioStreamStats downstreamAudioStreamStats = _ringBuffer.getAudioStreamStats(); SharedNodePointer audioMixerNodePointer = NodeList::getInstance()->soloNodeOfType(NodeType::AudioMixer); if (!audioMixerNodePointer.isNull()) { audioInputBufferLatency = _audioInputMsecsReadStats.getWindowAverage(); inputRingBufferLatency = getInputRingBufferAverageMsecsAvailable(); networkRoundtripLatency = audioMixerNodePointer->getPingMs(); mixerRingBufferLatency = _audioMixerAvatarStreamAudioStats._ringBufferFramesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS; - outputRingBufferLatency = _outputRingBufferFramesAvailableStats.getWindowAverage() * BUFFER_SEND_INTERVAL_MSECS; + outputRingBufferLatency = downstreamAudioStreamStats._ringBufferFramesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS; audioOutputBufferLatency = _audioOutputMsecsUnplayedStats.getWindowAverage(); } float totalLatency = audioInputBufferLatency + inputRingBufferLatency + networkRoundtripLatency + mixerRingBufferLatency + outputRingBufferLatency + audioOutputBufferLatency; @@ -1478,7 +1395,7 @@ void Audio::renderStats(const float* color, int width, int height) { verticalOffset += STATS_HEIGHT_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamLabelString, color); - renderAudioStreamStats(getDownstreamAudioStreamStats(), horizontalOffset, verticalOffset, scale, rotation, font, color, true); + renderAudioStreamStats(downstreamAudioStreamStats, horizontalOffset, verticalOffset, scale, rotation, font, color, true); verticalOffset += STATS_HEIGHT_PER_LINE; // blank line @@ -1756,8 +1673,8 @@ bool Audio::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo) // setup our general output device for audio-mixer audio _audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this); - _audioOutput->setBufferSize(_ringBuffer.getSampleCapacity() * sizeof(int16_t)); - qDebug() << "Ring Buffer capacity in samples: " << _ringBuffer.getSampleCapacity(); + _audioOutput->setBufferSize(_ringBuffer.getFrameCapacity() * _outputFormat.bytesForDuration(BUFFER_SEND_INTERVAL_USECS)); + qDebug() << "Ring Buffer capacity in frames: " << _ringBuffer.getFrameCapacity(); _outputDevice = _audioOutput->start(); // setup a loopback audio output device diff --git a/interface/src/Audio.h b/interface/src/Audio.h index 67a951b8d9..3029594aea 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -31,12 +31,12 @@ #include #include -#include #include +#include "InboundMixedAudioStream.h" + static const int NUM_AUDIO_CHANNELS = 2; -static const int INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS = 30; class QAudioInput; class QAudioOutput; @@ -77,8 +77,6 @@ public: int getNetworkBufferLengthSamplesPerChannel() { return NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; } bool getProcessSpatialAudio() const { return _processSpatialAudio; } - - const SequenceNumberStats& getIncomingMixedAudioSequenceNumberStats() const { return _incomingMixedAudioSequenceNumberStats; } float getInputRingBufferMsecsAvailable() const; float getInputRingBufferAverageMsecsAvailable() const { return (float)_inputRingBufferMsecsAvailableStats.getWindowAverage(); } @@ -122,17 +120,11 @@ public slots: float getInputVolume() const { return (_audioInput) ? _audioInput->volume() : 0.0f; } void setInputVolume(float volume) { if (_audioInput) _audioInput->setVolume(volume); } - - const AudioRingBuffer& getDownstreamRingBuffer() const { return _ringBuffer; } int getDesiredJitterBufferFrames() const { return _jitterBufferSamples / _ringBuffer.getNumFrameSamples(); } - int getStarveCount() const { return _starveCount; } - int getConsecutiveNotMixedCount() const { return _consecutiveNotMixedCount; } - const AudioStreamStats& getAudioMixerAvatarStreamAudioStats() const { return _audioMixerAvatarStreamAudioStats; } const QHash& getAudioMixerInjectedStreamAudioStatsMap() const { return _audioMixerInjectedStreamAudioStatsMap; } - const MovingMinMaxAvg& getInterframeTimeGapStats() const { return _interframeTimeGapStats; } signals: bool muteToggled(); @@ -159,7 +151,7 @@ private: QAudioOutput* _proceduralAudioOutput; QIODevice* _proceduralOutputDevice; AudioRingBuffer _inputRingBuffer; - AudioRingBuffer _ringBuffer; + InboundMixedAudioStream _ringBuffer; bool _isStereoInput; QString _inputAudioDeviceName; @@ -180,7 +172,6 @@ private: bool _noiseGateEnabled; bool _toneInjectionEnabled; int _noiseGateFramesToClose; - int _totalPacketsReceived; int _totalInputAudioSamples; float _collisionSoundMagnitude; @@ -197,7 +188,6 @@ private: int _drumSoundSample; int _proceduralEffectSample; - int _numFramesDisplayStarve; bool _muted; bool _localEcho; GLuint _micTextureId; @@ -276,21 +266,14 @@ private: static const unsigned int STATS_HEIGHT_PER_LINE = 20; bool _statsEnabled; - int _starveCount; - int _consecutiveNotMixedCount; - AudioStreamStats _audioMixerAvatarStreamAudioStats; QHash _audioMixerInjectedStreamAudioStatsMap; quint16 _outgoingAvatarAudioSequenceNumber; - SequenceNumberStats _incomingMixedAudioSequenceNumberStats; - - MovingMinMaxAvg _interframeTimeGapStats; - + MovingMinMaxAvg _audioInputMsecsReadStats; MovingMinMaxAvg _inputRingBufferMsecsAvailableStats; - MovingMinMaxAvg _outputRingBufferFramesAvailableStats; MovingMinMaxAvg _audioOutputMsecsUnplayedStats; }; diff --git a/libraries/audio/src/AudioRingBuffer.cpp b/libraries/audio/src/AudioRingBuffer.cpp index 8b289d7c52..696f130523 100644 --- a/libraries/audio/src/AudioRingBuffer.cpp +++ b/libraries/audio/src/AudioRingBuffer.cpp @@ -21,13 +21,11 @@ AudioRingBuffer::AudioRingBuffer(int numFrameSamples, bool randomAccessMode, int numFramesCapacity) : NodeData(), - _overflowCount(0), _frameCapacity(numFramesCapacity), _sampleCapacity(numFrameSamples * numFramesCapacity), _isFull(false), _numFrameSamples(numFrameSamples), - _isStarved(true), - _hasStarted(false), + _overflowCount(0), _randomAccessMode(randomAccessMode) { if (numFrameSamples) { @@ -53,7 +51,6 @@ void AudioRingBuffer::reset() { _isFull = false; _endOfLastWrite = _buffer; _nextOutput = _buffer; - _isStarved = true; } void AudioRingBuffer::resizeForFrameSize(int numFrameSamples) { @@ -211,14 +208,6 @@ int AudioRingBuffer::addSilentFrame(int numSilentSamples) { return numSilentSamples * sizeof(int16_t); } -bool AudioRingBuffer::isNotStarvedOrHasMinimumSamples(int numRequiredSamples) const { - if (!_isStarved) { - return true; - } else { - return samplesAvailable() >= numRequiredSamples; - } -} - int16_t* AudioRingBuffer::shiftedPositionAccomodatingWrap(int16_t* position, int numSamplesShift) const { if (numSamplesShift > 0 && position + numSamplesShift >= _buffer + _sampleCapacity) { diff --git a/libraries/audio/src/AudioRingBuffer.h b/libraries/audio/src/AudioRingBuffer.h index 97ffa7e6c8..cd968bb86e 100644 --- a/libraries/audio/src/AudioRingBuffer.h +++ b/libraries/audio/src/AudioRingBuffer.h @@ -73,13 +73,7 @@ public: int getNumFrameSamples() const { return _numFrameSamples; } - bool isNotStarvedOrHasMinimumSamples(int numRequiredSamples) const; - - bool isStarved() const { return _isStarved; } - void setIsStarved(bool isStarved) { _isStarved = isStarved; } - int getOverflowCount() const { return _overflowCount; } /// how many times has the ring buffer has overwritten old data - bool hasStarted() const { return _hasStarted; } int addSilentFrame(int numSilentSamples); protected: @@ -89,8 +83,6 @@ protected: int16_t* shiftedPositionAccomodatingWrap(int16_t* position, int numSamplesShift) const; - int _overflowCount; /// how many times has the ring buffer has overwritten old data - int _frameCapacity; int _sampleCapacity; bool _isFull; @@ -98,10 +90,14 @@ protected: int16_t* _nextOutput; int16_t* _endOfLastWrite; int16_t* _buffer; - bool _isStarved; - bool _hasStarted; bool _randomAccessMode; /// will this ringbuffer be used for random access? if so, do some special processing + int _overflowCount; /// how many times has the ring buffer has overwritten old data + + //bool _isStarved; + //bool _hasStarted; + + public: class ConstIterator { //public std::iterator < std::forward_iterator_tag, int16_t > { public: @@ -162,6 +158,14 @@ public: ConstIterator operator-(int i) { return ConstIterator(_bufferFirst, _capacity, atShiftedBy(-i)); } + + void readSamples(int16_t* dest, int numSamples) { + for (int i = 0; i < numSamples; i++) { + *dest = *(*this); + ++dest; + ++(*this); + } + } private: int16_t* atShiftedBy(int i) { diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index d8daefaceb..ea45b761b1 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -31,9 +31,13 @@ InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacit void InboundAudioStream::reset() { _ringBuffer.reset(); - _desiredJitterBufferFrames = 1; _isStarved = true; _hasStarted = false; + resetStats(); +} + +void InboundAudioStream::resetStats() { + _desiredJitterBufferFrames = 1; _consecutiveNotMixedCount = 0; _starveCount = 0; _silentFramesDropped = 0; @@ -85,7 +89,7 @@ int InboundAudioStream::parseData(const QByteArray& packet) { } } - if (_isStarved && _ringBuffer.samplesAvailable() >= _desiredJitterBufferFrames * _ringBuffer.getNumFrameSamples()) { + if (_isStarved && _ringBuffer.framesAvailable() >= _desiredJitterBufferFrames) { _isStarved = false; } @@ -144,13 +148,19 @@ bool InboundAudioStream::shouldPop(int numSamples, bool starveOnFail) { // we don't have enough samples, so set this stream to starve // if starveOnFail is true if (starveOnFail) { - setToStarved(); + starved(); _consecutiveNotMixedCount++; } return false; } void InboundAudioStream::setToStarved() { + if (!_isStarved && _ringBuffer.framesAvailable() < _desiredJitterBufferFrames) { + starved(); + } +} + +void InboundAudioStream::starved() { _isStarved = true; _consecutiveNotMixedCount = 0; _starveCount++; diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index f635013369..375fccae9e 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -48,7 +48,7 @@ public: void reset(); void flushBuffer() { _ringBuffer.reset(); } - void resetSequenceNumberStats() { _incomingSequenceNumberStats.reset(); } + void resetStats(); virtual int parseData(const QByteArray& packet); @@ -69,6 +69,7 @@ public: int getDesiredJitterBufferFrames() const { return _desiredJitterBufferFrames; } int getNumFrameSamples() const { return _ringBuffer.getNumFrameSamples(); } + int getFrameCapacity() const { return _ringBuffer.getFrameCapacity(); } int getFramesAvailable() const { return _ringBuffer.framesAvailable(); } double getFramesAvailableAverage() const { return _framesAvailableStats.getWindowAverage(); } @@ -82,6 +83,7 @@ public: private: bool shouldPop(int numSamples, bool starveOnFail); + void starved(); protected: // disallow copying of InboundAudioStream objects diff --git a/libraries/audio/src/InboundMixedAudioStream.cpp b/libraries/audio/src/InboundMixedAudioStream.cpp new file mode 100644 index 0000000000..a1a753a892 --- /dev/null +++ b/libraries/audio/src/InboundMixedAudioStream.cpp @@ -0,0 +1,17 @@ + +#include "InboundMixedAudioStream.h" + +InboundMixedAudioStream::InboundMixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers) + : InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers) +{ +} + +int InboundMixedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) { + // mixed audio packets do not have any info between the seq num and the audio data. + numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t); + return 0; +} + +int InboundMixedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) { + return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t)); +} diff --git a/libraries/audio/src/InboundMixedAudioStream.h b/libraries/audio/src/InboundMixedAudioStream.h new file mode 100644 index 0000000000..c23e6559e2 --- /dev/null +++ b/libraries/audio/src/InboundMixedAudioStream.h @@ -0,0 +1,11 @@ + +#include "InboundAudioStream.h" +#include "PacketHeaders.h" + +class InboundMixedAudioStream : public InboundAudioStream { +public: + InboundMixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers); +protected: + int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples); + int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples); +}; From 059007c99ccf7515f439e2b5f334b47dbaad59fe Mon Sep 17 00:00:00 2001 From: wangyix Date: Fri, 25 Jul 2014 09:43:08 -0700 Subject: [PATCH 11/62] cleaned up code (removed old code that was commented out) --- assignment-client/src/audio/AudioMixer.cpp | 15 -- .../src/audio/AudioMixerClientData.cpp | 81 ---------- interface/src/Audio.cpp | 140 +++++++++--------- interface/src/Audio.h | 3 + 4 files changed, 70 insertions(+), 169 deletions(-) diff --git a/assignment-client/src/audio/AudioMixer.cpp b/assignment-client/src/audio/AudioMixer.cpp index e407607388..1ee0a80fad 100644 --- a/assignment-client/src/audio/AudioMixer.cpp +++ b/assignment-client/src/audio/AudioMixer.cpp @@ -499,14 +499,6 @@ void AudioMixer::run() { int framesSinceCutoffEvent = TRAILING_AVERAGE_FRAMES; while (!_isFinished) { - - /*foreach (const SharedNodePointer& node, nodeList->getNodeHash()) { - if (node->getLinkedData()) { - ((AudioMixerClientData*) node->getLinkedData())->checkBuffersBeforeFrameSend(_sourceUnattenuatedZone, - _listenerUnattenuatedZone); - } - }*/ - const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f; const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f; @@ -599,13 +591,6 @@ void AudioMixer::run() { ++_sumListeners; } } - /* - // push forward the next output pointers for any audio buffers we used - foreach (const SharedNodePointer& node, nodeList->getNodeHash()) { - if (node->getLinkedData()) { - ((AudioMixerClientData*) node->getLinkedData())->pushBuffersAfterFrameSend(); - } - }*/ ++_numStatFrames; diff --git a/assignment-client/src/audio/AudioMixerClientData.cpp b/assignment-client/src/audio/AudioMixerClientData.cpp index f6d27c534e..9d5c1c6a74 100644 --- a/assignment-client/src/audio/AudioMixerClientData.cpp +++ b/assignment-client/src/audio/AudioMixerClientData.cpp @@ -98,87 +98,6 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { return 0; } -/*void AudioMixerClientData::checkBuffersBeforeFrameSend(AABox* checkSourceZone, AABox* listenerZone) { - for (int i = 0; i < _ringBuffers.size(); i++) { - if (_ringBuffers[i]->shouldBeAddedToMix()) { - // this is a ring buffer that is ready to go - // set its flag so we know to push its buffer when all is said and done - _ringBuffers[i]->setWillBeAddedToMix(true); - - // calculate the average loudness for the next NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL - // that would be mixed in - _ringBuffers[i]->updateNextOutputTrailingLoudness(); - - if (checkSourceZone && checkSourceZone->contains(_ringBuffers[i]->getPosition())) { - _ringBuffers[i]->setListenerUnattenuatedZone(listenerZone); - } else { - _ringBuffers[i]->setListenerUnattenuatedZone(NULL); - } - } - } -} - -void AudioMixerClientData::pushBuffersAfterFrameSend() { - - QList::iterator i = _ringBuffers.begin(); - while (i != _ringBuffers.end()) { - // this was a used buffer, push the output pointer forwards - PositionalAudioRingBuffer* audioBuffer = *i; - - const int INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD = 100; - - if (audioBuffer->willBeAddedToMix()) { - audioBuffer->shiftReadPosition(audioBuffer->getSamplesPerFrame()); - audioBuffer->setWillBeAddedToMix(false); - } else if (audioBuffer->getType() == PositionalAudioRingBuffer::Injector - && audioBuffer->hasStarted() && audioBuffer->isStarved() - && audioBuffer->getConsecutiveNotMixedCount() > INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD) { - // this is an empty audio buffer that has starved, safe to delete - // also delete its sequence number stats - QUuid streamIdentifier = ((InjectedAudioRingBuffer*)audioBuffer)->getStreamIdentifier(); - _incomingInjectedAudioSequenceNumberStatsMap.remove(streamIdentifier); - delete audioBuffer; - i = _ringBuffers.erase(i); - continue; - } - i++; - } -}*/ - -/*AudioStreamStats AudioMixerClientData::getAudioStreamStatsOfStream(const PositionalAudioRingBuffer* ringBuffer) const { - - AudioStreamStats streamStats; - - streamStats._streamType = ringBuffer->getType(); - if (streamStats._streamType == PositionalAudioRingBuffer::Injector) { - streamStats._streamIdentifier = ((InjectedAudioRingBuffer*)ringBuffer)->getStreamIdentifier(); - const SequenceNumberStats& sequenceNumberStats = _incomingInjectedAudioSequenceNumberStatsMap[streamStats._streamIdentifier]; - streamStats._packetStreamStats = sequenceNumberStats.getStats(); - streamStats._packetStreamWindowStats = sequenceNumberStats.getStatsForHistoryWindow(); - } else { - streamStats._packetStreamStats = _incomingAvatarAudioSequenceNumberStats.getStats(); - streamStats._packetStreamWindowStats = _incomingAvatarAudioSequenceNumberStats.getStatsForHistoryWindow(); - } - - const MovingMinMaxAvg& timeGapStats = ringBuffer->getInterframeTimeGapStatsForStatsPacket(); - streamStats._timeGapMin = timeGapStats.getMin(); - streamStats._timeGapMax = timeGapStats.getMax(); - streamStats._timeGapAverage = timeGapStats.getAverage(); - streamStats._timeGapWindowMin = timeGapStats.getWindowMin(); - streamStats._timeGapWindowMax = timeGapStats.getWindowMax(); - streamStats._timeGapWindowAverage = timeGapStats.getWindowAverage(); - - streamStats._ringBufferFramesAvailable = ringBuffer->framesAvailable(); - streamStats._ringBufferFramesAvailableAverage = ringBuffer->getFramesAvailableAverage(); - streamStats._ringBufferDesiredJitterBufferFrames = ringBuffer->getDesiredJitterBufferFrames(); - streamStats._ringBufferStarveCount = ringBuffer->getStarveCount(); - streamStats._ringBufferConsecutiveNotMixedCount = ringBuffer->getConsecutiveNotMixedCount(); - streamStats._ringBufferOverflowCount = ringBuffer->getOverflowCount(); - streamStats._ringBufferSilentFramesDropped = ringBuffer->getSilentFramesDropped(); - - return streamStats; -}*/ - void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode) { char packet[MAX_PACKET_SIZE]; NodeList* nodeList = NodeList::getInstance(); diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 3a19a099d6..0f79c0c6a1 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -899,6 +899,11 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) { // parse audio data _ringBuffer.parseData(audioByteArray); + pushAudioToOutput(); +} + + +void Audio::pushAudioToOutput() { if (_audioOutput->bytesFree() == _audioOutput->bufferSize()) { // the audio output has no samples to play. set the downstream audio to starved so that it @@ -906,10 +911,8 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) { _ringBuffer.setToStarved(); } - - - float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float) _outputFormat.sampleRate()) - * (_desiredOutputFormat.channelCount() / (float) _outputFormat.channelCount()); + float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float)_outputFormat.sampleRate()) + * (_desiredOutputFormat.channelCount() / (float)_outputFormat.channelCount()); int numFramesToPush; if (Menu::getInstance()->isOptionChecked(MenuOption::DisableQAudioOutputOverflowCheck)) { @@ -925,90 +928,81 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) { AudioRingBuffer::ConstIterator ringBufferNextOutput; if (numFramesToPush > 0 && _ringBuffer.popFrames(&ringBufferNextOutput, numFramesToPush, false)) { - /*int numSamplesNeededToStartPlayback = std::min(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (_jitterBufferSamples * 2), - _ringBuffer.getSampleCapacity()); - - if (!_ringBuffer.isNotStarvedOrHasMinimumSamples(numSamplesNeededToStartPlayback)) { - // We are still waiting for enough samples to begin playback - // qDebug() << numNetworkOutputSamples << " samples so far, waiting for " << numSamplesNeededToStartPlayback; - _consecutiveNotMixedCount++; - } else {*/ - int numNetworkOutputSamples = numFramesToPush * NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio; - QByteArray outputBuffer; - outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t)); + QByteArray outputBuffer; + outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t)); - int16_t* ringBufferSamples = new int16_t[numNetworkOutputSamples]; - if (_processSpatialAudio) { - unsigned int sampleTime = _spatialAudioStart; - QByteArray buffer; - buffer.resize(numNetworkOutputSamples * sizeof(int16_t)); + int16_t* ringBufferSamples = new int16_t[numNetworkOutputSamples]; + if (_processSpatialAudio) { + unsigned int sampleTime = _spatialAudioStart; + QByteArray buffer; + buffer.resize(numNetworkOutputSamples * sizeof(int16_t)); - ringBufferNextOutput.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples); + ringBufferNextOutput.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples); - // Accumulate direct transmission of audio from sender to receiver - if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) { - emit preProcessOriginalInboundAudio(sampleTime, buffer, _desiredOutputFormat); - addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples); - } - - // Send audio off for spatial processing - emit processInboundAudio(sampleTime, buffer, _desiredOutputFormat); - - // copy the samples we'll resample from the spatial audio ring buffer - this also - // pushes the read pointer of the spatial audio ring buffer forwards - _spatialAudioRingBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples); - - // Advance the start point for the next packet of audio to arrive - _spatialAudioStart += numNetworkOutputSamples / _desiredOutputFormat.channelCount(); - } else { - // copy the samples we'll resample from the ring buffer - this also - // pushes the read pointer of the ring buffer forwards - ringBufferNextOutput.readSamples(ringBufferSamples, numNetworkOutputSamples); + // Accumulate direct transmission of audio from sender to receiver + if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) { + emit preProcessOriginalInboundAudio(sampleTime, buffer, _desiredOutputFormat); + addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples); } - // copy the packet from the RB to the output - linearResampling(ringBufferSamples, - (int16_t*) outputBuffer.data(), - numNetworkOutputSamples, - numDeviceOutputSamples, - _desiredOutputFormat, _outputFormat); + // Send audio off for spatial processing + emit processInboundAudio(sampleTime, buffer, _desiredOutputFormat); - if (_outputDevice) { - _outputDevice->write(outputBuffer); + // copy the samples we'll resample from the spatial audio ring buffer - this also + // pushes the read pointer of the spatial audio ring buffer forwards + _spatialAudioRingBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples); + + // Advance the start point for the next packet of audio to arrive + _spatialAudioStart += numNetworkOutputSamples / _desiredOutputFormat.channelCount(); + } else { + // copy the samples we'll resample from the ring buffer - this also + // pushes the read pointer of the ring buffer forwards + ringBufferNextOutput.readSamples(ringBufferSamples, numNetworkOutputSamples); + } + + // copy the packet from the RB to the output + linearResampling(ringBufferSamples, + (int16_t*)outputBuffer.data(), + numNetworkOutputSamples, + numDeviceOutputSamples, + _desiredOutputFormat, _outputFormat); + + if (_outputDevice) { + _outputDevice->write(outputBuffer); + } + + if (_scopeEnabled && !_scopeEnabledPause) { + unsigned int numAudioChannels = _desiredOutputFormat.channelCount(); + int16_t* samples = ringBufferSamples; + for (int numSamples = numNetworkOutputSamples / numAudioChannels; numSamples > 0; numSamples -= NETWORK_SAMPLES_PER_FRAME) { + + unsigned int audioChannel = 0; + addBufferToScope( + _scopeOutputLeft, + _scopeOutputOffset, + samples, audioChannel, numAudioChannels); + + audioChannel = 1; + addBufferToScope( + _scopeOutputRight, + _scopeOutputOffset, + samples, audioChannel, numAudioChannels); + + _scopeOutputOffset += NETWORK_SAMPLES_PER_FRAME; + _scopeOutputOffset %= _samplesPerScope; + samples += NETWORK_SAMPLES_PER_FRAME * numAudioChannels; } + } - if (_scopeEnabled && !_scopeEnabledPause) { - unsigned int numAudioChannels = _desiredOutputFormat.channelCount(); - int16_t* samples = ringBufferSamples; - for (int numSamples = numNetworkOutputSamples / numAudioChannels; numSamples > 0; numSamples -= NETWORK_SAMPLES_PER_FRAME) { - - unsigned int audioChannel = 0; - addBufferToScope( - _scopeOutputLeft, - _scopeOutputOffset, - samples, audioChannel, numAudioChannels); - - audioChannel = 1; - addBufferToScope( - _scopeOutputRight, - _scopeOutputOffset, - samples, audioChannel, numAudioChannels); - - _scopeOutputOffset += NETWORK_SAMPLES_PER_FRAME; - _scopeOutputOffset %= _samplesPerScope; - samples += NETWORK_SAMPLES_PER_FRAME * numAudioChannels; - } - } - - delete[] ringBufferSamples; - //} + delete[] ringBufferSamples; } } + void Audio::processProceduralAudio(int16_t* monoInput, int numSamples) { // zero out the locally injected audio in preparation for audio procedural sounds diff --git a/interface/src/Audio.h b/interface/src/Audio.h index 3029594aea..d2bdc748ea 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -215,6 +215,9 @@ private: // Process received audio void processReceivedAudio(const QByteArray& audioByteArray); + // Pushes frames from the output ringbuffer to the audio output device + void pushAudioToOutput(); + bool switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo); bool switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo); From 822ba4da48ac9069d7ca8870f804c32a95b9d822 Mon Sep 17 00:00:00 2001 From: wangyix Date: Fri, 25 Jul 2014 10:21:38 -0700 Subject: [PATCH 12/62] cleaned up code more, dancer.js audio not working --- assignment-client/src/audio/AudioMixer.cpp | 48 ++++++++++++++----- assignment-client/src/audio/AudioMixer.h | 3 +- .../src/audio/AudioMixerClientData.h | 3 -- libraries/audio/src/InboundAudioStream.cpp | 25 +++++----- 4 files changed, 49 insertions(+), 30 deletions(-) diff --git a/assignment-client/src/audio/AudioMixer.cpp b/assignment-client/src/audio/AudioMixer.cpp index 1ee0a80fad..069ba7476c 100644 --- a/assignment-client/src/audio/AudioMixer.cpp +++ b/assignment-client/src/audio/AudioMixer.cpp @@ -94,14 +94,18 @@ const float ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE = 0.18f; const float ATTENUATION_EPSILON_DISTANCE = 0.1f; void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd, - AvatarAudioRingBuffer* listeningNodeBuffer) { - // if the frame to be mixed is silent, don't mix it - if (bufferToAdd->getNextOutputTrailingLoudness() == 0.0f) { + AvatarAudioRingBuffer* listeningNodeBuffer, + bool bufferToAddBelongsToListener) { + + // if the buffer to be added belongs to the listener and it should not be echoed or + // if the buffer frame to be added is too soft, pop a frame from the buffer without mixing it. + if ((bufferToAddBelongsToListener && !bufferToAdd->shouldLoopbackForNode()) + || bufferToAdd->getNextOutputTrailingLoudness() == 0.0f) { bufferToAdd->popFrames(1); return; } - // get pointer to frame to be mixed. If the stream cannot provide a frame (is starved), bail + // get pointer to the frame to be mixed. If the stream cannot provide a frame (is starved), bail AudioRingBuffer::ConstIterator nextOutputStart; if (!bufferToAdd->popFrames(&nextOutputStart, 1)) { return; @@ -217,16 +221,13 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf - if (!bufferToAdd->isStereo() && shouldAttenuate && false) { + if (!bufferToAdd->isStereo() && shouldAttenuate) { // this is a mono buffer, which means it gets full attenuation and spatialization // if the bearing relative angle to source is > 0 then the delayed channel is the right one int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0; int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0; - //const int16_t* bufferStart = bufferToAdd->getBuffer(); - //int ringBufferSampleCapacity = bufferToAdd->getSampleCapacity(); - int16_t correctBufferSample[2], delayBufferSample[2]; int delayedChannelIndex = 0; @@ -276,6 +277,31 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf _clientSamples[s] = glm::clamp(_clientSamples[s] + (int)(nextOutputStart[s / stereoDivider] * attenuationCoefficient), MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE); } + + /*for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) { + + int stereoDivider = bufferToAdd->isStereo() ? 1 : 2; + + if (!shouldAttenuate) { + attenuationCoefficient = 1.0f; + } + + _clientSamples[s] = glm::clamp(_clientSamples[s] + + (int)(nextOutputStart[(s / stereoDivider)] * attenuationCoefficient), + MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE); + _clientSamples[s + 1] = glm::clamp(_clientSamples[s + 1] + + (int)(nextOutputStart[(s / stereoDivider) + (1 / stereoDivider)] + * attenuationCoefficient), + MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE); + _clientSamples[s + 2] = glm::clamp(_clientSamples[s + 2] + + (int)(nextOutputStart[(s / stereoDivider) + (2 / stereoDivider)] + * attenuationCoefficient), + MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE); + _clientSamples[s + 3] = glm::clamp(_clientSamples[s + 3] + + (int)(nextOutputStart[(s / stereoDivider) + (3 / stereoDivider)] + * attenuationCoefficient), + MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE); + }*/ } } @@ -298,11 +324,7 @@ void AudioMixer::prepareMixForListeningNode(Node* node) { for (i = otherNodeRingBuffers.begin(); i != end; i++) { PositionalAudioRingBuffer* otherNodeBuffer = i.value(); - if (*otherNode != *node || otherNodeBuffer->shouldLoopbackForNode()) { - addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer); - } else { - otherNodeBuffer->popFrames(1); - } + addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer, *otherNode == *node); } } } diff --git a/assignment-client/src/audio/AudioMixer.h b/assignment-client/src/audio/AudioMixer.h index afab7d47dc..beb2539057 100644 --- a/assignment-client/src/audio/AudioMixer.h +++ b/assignment-client/src/audio/AudioMixer.h @@ -42,7 +42,8 @@ public slots: private: /// adds one buffer to the mix for a listening node void addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd, - AvatarAudioRingBuffer* listeningNodeBuffer); + AvatarAudioRingBuffer* listeningNodeBuffer, + bool bufferToAddBelongsToListener); /// prepares and sends a mix to one Node void prepareMixForListeningNode(Node* node); diff --git a/assignment-client/src/audio/AudioMixerClientData.h b/assignment-client/src/audio/AudioMixerClientData.h index 4baa7c2f3b..19592b1253 100644 --- a/assignment-client/src/audio/AudioMixerClientData.h +++ b/assignment-client/src/audio/AudioMixerClientData.h @@ -26,10 +26,7 @@ public: AvatarAudioRingBuffer* getAvatarAudioRingBuffer() const; int parseData(const QByteArray& packet); - //void checkBuffersBeforeFrameSend(AABox* checkSourceZone = NULL, AABox* listenerZone = NULL); - //void pushBuffersAfterFrameSend(); - //AudioStreamStats getAudioStreamStatsOfStream(const PositionalAudioRingBuffer* ringBuffer) const; QString getAudioStreamStatsString() const; void sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode); diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index ea45b761b1..591dde772c 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -66,7 +66,6 @@ int InboundAudioStream::parseData(const QByteArray& packet) { // TODO: handle generalized silent packet here????? - // parse the info after the seq number and before the audio data.(the stream properties) int numAudioSamples; readBytes += parseStreamProperties(packetType, packet.mid(readBytes), numAudioSamples); @@ -75,18 +74,18 @@ int InboundAudioStream::parseData(const QByteArray& packet) { // For now, late packets are ignored. It may be good in the future to insert the late audio frame // into the ring buffer to fill in the missing frame if it hasn't been mixed yet. switch (arrivalInfo._status) { - case SequenceNumberStats::Early: { - int packetsDropped = arrivalInfo._seqDiffFromExpected; - writeSamplesForDroppedPackets(packetsDropped * numAudioSamples); - // fall through to OnTime case - } - case SequenceNumberStats::OnTime: { - readBytes += parseAudioData(packetType, packet.mid(readBytes), numAudioSamples); - break; - } - default: { - break; - } + case SequenceNumberStats::Early: { + int packetsDropped = arrivalInfo._seqDiffFromExpected; + writeSamplesForDroppedPackets(packetsDropped * numAudioSamples); + // fall through to OnTime case + } + case SequenceNumberStats::OnTime: { + readBytes += parseAudioData(packetType, packet.mid(readBytes), numAudioSamples); + break; + } + default: { + break; + } } if (_isStarved && _ringBuffer.framesAvailable() >= _desiredJitterBufferFrames) { From a9d26b3934f3e6e0e02ed352dc98c5298213a55b Mon Sep 17 00:00:00 2001 From: wangyix Date: Fri, 25 Jul 2014 12:58:10 -0700 Subject: [PATCH 13/62] fixed repeat-popping in audiomixer --- assignment-client/src/audio/AudioMixer.cpp | 121 +++++++----------- assignment-client/src/audio/AudioMixer.h | 3 +- .../src/audio/AudioMixerClientData.cpp | 7 + .../src/audio/AudioMixerClientData.h | 2 + interface/src/Audio.cpp | 8 +- libraries/audio/src/InboundAudioStream.cpp | 53 ++++---- libraries/audio/src/InboundAudioStream.h | 10 +- 7 files changed, 92 insertions(+), 112 deletions(-) diff --git a/assignment-client/src/audio/AudioMixer.cpp b/assignment-client/src/audio/AudioMixer.cpp index 069ba7476c..5e28ca05e1 100644 --- a/assignment-client/src/audio/AudioMixer.cpp +++ b/assignment-client/src/audio/AudioMixer.cpp @@ -94,23 +94,7 @@ const float ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE = 0.18f; const float ATTENUATION_EPSILON_DISTANCE = 0.1f; void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd, - AvatarAudioRingBuffer* listeningNodeBuffer, - bool bufferToAddBelongsToListener) { - - // if the buffer to be added belongs to the listener and it should not be echoed or - // if the buffer frame to be added is too soft, pop a frame from the buffer without mixing it. - if ((bufferToAddBelongsToListener && !bufferToAdd->shouldLoopbackForNode()) - || bufferToAdd->getNextOutputTrailingLoudness() == 0.0f) { - bufferToAdd->popFrames(1); - return; - } - - // get pointer to the frame to be mixed. If the stream cannot provide a frame (is starved), bail - AudioRingBuffer::ConstIterator nextOutputStart; - if (!bufferToAdd->popFrames(&nextOutputStart, 1)) { - return; - } - + AvatarAudioRingBuffer* listeningNodeBuffer) { float bearingRelativeAngleToSource = 0.0f; float attenuationCoefficient = 1.0f; int numSamplesDelay = 0; @@ -219,7 +203,7 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf } } - + AudioRingBuffer::ConstIterator bufferPopOutput = bufferToAdd->getLastPopOutput(); if (!bufferToAdd->isStereo() && shouldAttenuate) { // this is a mono buffer, which means it gets full attenuation and spatialization @@ -236,8 +220,8 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) { // setup the int16_t variables for the two sample sets - correctBufferSample[0] = nextOutputStart[s / 2] * attenuationCoefficient; - correctBufferSample[1] = nextOutputStart[(s / 2) + 1] * attenuationCoefficient; + correctBufferSample[0] = bufferPopOutput[s / 2] * attenuationCoefficient; + correctBufferSample[1] = bufferPopOutput[(s / 2) + 1] * attenuationCoefficient; delayedChannelIndex = s + (numSamplesDelay * 2) + delayedChannelOffset; @@ -254,15 +238,15 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf // if there was a sample delay for this buffer, we need to pull samples prior to the nextOutput // to stick at the beginning float attenuationAndWeakChannelRatio = attenuationCoefficient * weakChannelAmplitudeRatio; - AudioRingBuffer::ConstIterator delayNextOutputStart = nextOutputStart - numSamplesDelay; + AudioRingBuffer::ConstIterator delayBufferPopOutput = bufferPopOutput - numSamplesDelay; - // TODO: delayNextOutputStart may be inside the last frame written if the ringbuffer is completely full + // TODO: delayBufferPopOutput may be inside the last frame written if the ringbuffer is completely full // maybe make AudioRingBuffer have 1 extra frame in its buffer for (int i = 0; i < numSamplesDelay; i++) { int parentIndex = i * 2; - _clientSamples[parentIndex + delayedChannelOffset] += *delayNextOutputStart * attenuationAndWeakChannelRatio; - ++delayNextOutputStart; + _clientSamples[parentIndex + delayedChannelOffset] += *delayBufferPopOutput * attenuationAndWeakChannelRatio; + ++delayBufferPopOutput; } } } else { @@ -274,34 +258,9 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf } for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s++) { - _clientSamples[s] = glm::clamp(_clientSamples[s] + (int)(nextOutputStart[s / stereoDivider] * attenuationCoefficient), + _clientSamples[s] = glm::clamp(_clientSamples[s] + (int)(bufferPopOutput[s / stereoDivider] * attenuationCoefficient), MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE); } - - /*for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) { - - int stereoDivider = bufferToAdd->isStereo() ? 1 : 2; - - if (!shouldAttenuate) { - attenuationCoefficient = 1.0f; - } - - _clientSamples[s] = glm::clamp(_clientSamples[s] - + (int)(nextOutputStart[(s / stereoDivider)] * attenuationCoefficient), - MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE); - _clientSamples[s + 1] = glm::clamp(_clientSamples[s + 1] - + (int)(nextOutputStart[(s / stereoDivider) + (1 / stereoDivider)] - * attenuationCoefficient), - MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE); - _clientSamples[s + 2] = glm::clamp(_clientSamples[s + 2] - + (int)(nextOutputStart[(s / stereoDivider) + (2 / stereoDivider)] - * attenuationCoefficient), - MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE); - _clientSamples[s + 3] = glm::clamp(_clientSamples[s + 3] - + (int)(nextOutputStart[(s / stereoDivider) + (3 / stereoDivider)] - * attenuationCoefficient), - MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE); - }*/ } } @@ -324,7 +283,12 @@ void AudioMixer::prepareMixForListeningNode(Node* node) { for (i = otherNodeRingBuffers.begin(); i != end; i++) { PositionalAudioRingBuffer* otherNodeBuffer = i.value(); - addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer, *otherNode == *node); + if ((*otherNode != *node || otherNodeBuffer->shouldLoopbackForNode()) + && otherNodeBuffer->lastPopSucceeded() + && otherNodeBuffer->getNextOutputTrailingLoudness() > 0.0f) { + + addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer); + } } } } @@ -581,36 +545,43 @@ void AudioMixer::run() { } foreach (const SharedNodePointer& node, nodeList->getNodeHash()) { - if (node->getType() == NodeType::Agent && node->getActiveSocket() && node->getLinkedData() - && ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer()) { - + if (node->getActiveSocket() && node->getLinkedData()) { + AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData(); - prepareMixForListeningNode(node.data()); - - // pack header - int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeMixedAudio); - char* dataAt = clientMixBuffer + numBytesPacketHeader; + // request a frame from each audio stream. a pointer to the popped data is stored as a member + // in InboundAudioStream. That's how the popped audio data will be read for mixing + nodeData->audioStreamsPopFrameForMixing(); - // pack sequence number - quint16 sequence = nodeData->getOutgoingSequenceNumber(); - memcpy(dataAt, &sequence, sizeof(quint16)); - dataAt += sizeof(quint16); + if (node->getType() == NodeType::Agent //&& node->getActiveSocket() && node->getLinkedData() + && ((AudioMixerClientData*)node->getLinkedData())->getAvatarAudioRingBuffer()) { - // pack mixed audio samples - memcpy(dataAt, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO); - dataAt += NETWORK_BUFFER_LENGTH_BYTES_STEREO; + prepareMixForListeningNode(node.data()); - // send mixed audio packet - nodeList->writeDatagram(clientMixBuffer, dataAt - clientMixBuffer, node); - nodeData->incrementOutgoingMixedAudioSequenceNumber(); - - // send an audio stream stats packet if it's time - if (sendAudioStreamStats) { - nodeData->sendAudioStreamStatsPackets(node); + // pack header + int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeMixedAudio); + char* dataAt = clientMixBuffer + numBytesPacketHeader; + + // pack sequence number + quint16 sequence = nodeData->getOutgoingSequenceNumber(); + memcpy(dataAt, &sequence, sizeof(quint16)); + dataAt += sizeof(quint16); + + // pack mixed audio samples + memcpy(dataAt, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO); + dataAt += NETWORK_BUFFER_LENGTH_BYTES_STEREO; + + // send mixed audio packet + nodeList->writeDatagram(clientMixBuffer, dataAt - clientMixBuffer, node); + nodeData->incrementOutgoingMixedAudioSequenceNumber(); + + // send an audio stream stats packet if it's time + if (sendAudioStreamStats) { + nodeData->sendAudioStreamStatsPackets(node); + } + + ++_sumListeners; } - - ++_sumListeners; } } diff --git a/assignment-client/src/audio/AudioMixer.h b/assignment-client/src/audio/AudioMixer.h index beb2539057..afab7d47dc 100644 --- a/assignment-client/src/audio/AudioMixer.h +++ b/assignment-client/src/audio/AudioMixer.h @@ -42,8 +42,7 @@ public slots: private: /// adds one buffer to the mix for a listening node void addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd, - AvatarAudioRingBuffer* listeningNodeBuffer, - bool bufferToAddBelongsToListener); + AvatarAudioRingBuffer* listeningNodeBuffer); /// prepares and sends a mix to one Node void prepareMixForListeningNode(Node* node); diff --git a/assignment-client/src/audio/AudioMixerClientData.cpp b/assignment-client/src/audio/AudioMixerClientData.cpp index 9d5c1c6a74..c288a4f721 100644 --- a/assignment-client/src/audio/AudioMixerClientData.cpp +++ b/assignment-client/src/audio/AudioMixerClientData.cpp @@ -98,6 +98,13 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { return 0; } +void AudioMixerClientData::audioStreamsPopFrameForMixing() { + QHash::ConstIterator i, end = _ringBuffers.constEnd(); + for (i = _ringBuffers.constBegin(); i != end; i++) { + i.value()->popFrames(1); + } +} + void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode) { char packet[MAX_PACKET_SIZE]; NodeList* nodeList = NodeList::getInstance(); diff --git a/assignment-client/src/audio/AudioMixerClientData.h b/assignment-client/src/audio/AudioMixerClientData.h index 19592b1253..92dddab7e4 100644 --- a/assignment-client/src/audio/AudioMixerClientData.h +++ b/assignment-client/src/audio/AudioMixerClientData.h @@ -27,6 +27,8 @@ public: int parseData(const QByteArray& packet); + void audioStreamsPopFrameForMixing(); + QString getAudioStreamStatsString() const; void sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode); diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 0f79c0c6a1..808c076cb0 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -925,8 +925,7 @@ void Audio::pushAudioToOutput() { // if there is data in the ring buffer and room in the audio output, decide what to do - AudioRingBuffer::ConstIterator ringBufferNextOutput; - if (numFramesToPush > 0 && _ringBuffer.popFrames(&ringBufferNextOutput, numFramesToPush, false)) { + if (numFramesToPush > 0 && _ringBuffer.popFrames(numFramesToPush, false)) { int numNetworkOutputSamples = numFramesToPush * NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio; @@ -934,6 +933,7 @@ void Audio::pushAudioToOutput() { QByteArray outputBuffer; outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t)); + AudioRingBuffer::ConstIterator ringBufferPopOutput = _ringBuffer.getLastPopOutput(); int16_t* ringBufferSamples = new int16_t[numNetworkOutputSamples]; if (_processSpatialAudio) { @@ -941,7 +941,7 @@ void Audio::pushAudioToOutput() { QByteArray buffer; buffer.resize(numNetworkOutputSamples * sizeof(int16_t)); - ringBufferNextOutput.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples); + ringBufferPopOutput.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples); // Accumulate direct transmission of audio from sender to receiver if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) { @@ -961,7 +961,7 @@ void Audio::pushAudioToOutput() { } else { // copy the samples we'll resample from the ring buffer - this also // pushes the read pointer of the ring buffer forwards - ringBufferNextOutput.readSamples(ringBufferSamples, numNetworkOutputSamples); + ringBufferPopOutput.readSamples(ringBufferSamples, numNetworkOutputSamples); } // copy the packet from the RB to the output diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index 591dde772c..501f898654 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -14,6 +14,8 @@ InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers) : _ringBuffer(numFrameSamples, false, numFramesCapacity), + _lastPopSucceeded(false), + _lastPopOutput(), _dynamicJitterBuffers(dynamicJitterBuffers), _desiredJitterBufferFrames(1), _isStarved(true), @@ -98,37 +100,30 @@ int InboundAudioStream::parseData(const QByteArray& packet) { } bool InboundAudioStream::popFrames(int numFrames, bool starveOnFail) { - bool popped; int numSamplesRequested = numFrames * _ringBuffer.getNumFrameSamples(); - if (popped = shouldPop(numSamplesRequested, starveOnFail)) { - _ringBuffer.shiftReadPosition(numSamplesRequested); + if (_isStarved) { + // we're still refilling; don't pop + _consecutiveNotMixedCount++; + _lastPopSucceeded = false; + } else { + if (_ringBuffer.samplesAvailable() >= numSamplesRequested) { + // we have enough samples to pop, so we're good to mix + _lastPopOutput = _ringBuffer.nextOutput(); + _ringBuffer.shiftReadPosition(numSamplesRequested); + + _hasStarted = true; + _lastPopSucceeded = true; + } else { + // we don't have enough samples, so set this stream to starve + // if starveOnFail is true + if (starveOnFail) { + starved(); + _consecutiveNotMixedCount++; + } + _lastPopSucceeded = false; + } } - _framesAvailableStats.update(_ringBuffer.framesAvailable()); - - return popped; -} - -bool InboundAudioStream::popFrames(int16_t* dest, int numFrames, bool starveOnFail) { - bool popped; - int numSamplesRequested = numFrames * _ringBuffer.getNumFrameSamples(); - if (popped = shouldPop(numSamplesRequested, starveOnFail)) { - _ringBuffer.readSamples(dest, numSamplesRequested); - } - _framesAvailableStats.update(_ringBuffer.framesAvailable()); - - return popped; -} - -bool InboundAudioStream::popFrames(AudioRingBuffer::ConstIterator* nextOutput, int numFrames, bool starveOnFail) { - bool popped; - int numSamplesRequested = numFrames * _ringBuffer.getNumFrameSamples(); - if (popped = shouldPop(numSamplesRequested, starveOnFail)) { - *nextOutput = _ringBuffer.nextOutput(); - _ringBuffer.shiftReadPosition(numSamplesRequested); - } - _framesAvailableStats.update(_ringBuffer.framesAvailable()); - - return popped; + return _lastPopSucceeded; } bool InboundAudioStream::shouldPop(int numSamples, bool starveOnFail) { diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index 375fccae9e..4eaf554ec7 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -53,9 +53,12 @@ public: virtual int parseData(const QByteArray& packet); + bool popFrames(int numFrames, bool starveOnFail = true); - bool popFrames(int16_t* dest, int numFrames, bool starveOnFail = true); - bool popFrames(AudioRingBuffer::ConstIterator* nextOutput, int numFrames, bool starveOnFail = true); + + bool lastPopSucceeded() const { return _lastPopSucceeded; }; + const AudioRingBuffer::ConstIterator& getLastPopOutput() const { return _lastPopOutput; } + void setToStarved(); @@ -105,6 +108,9 @@ protected: AudioRingBuffer _ringBuffer; + bool _lastPopSucceeded; + AudioRingBuffer::ConstIterator _lastPopOutput; + bool _dynamicJitterBuffers; int _desiredJitterBufferFrames; From f7e043f52d90e79742a3525d9fd2c581429aeb23 Mon Sep 17 00:00:00 2001 From: wangyix Date: Fri, 25 Jul 2014 14:52:39 -0700 Subject: [PATCH 14/62] cleaned up code --- .../src/audio/AvatarAudioRingBuffer.cpp | 4 ++-- interface/src/Audio.cpp | 2 -- libraries/audio/src/AudioRingBuffer.cpp | 4 ++-- libraries/audio/src/AudioRingBuffer.h | 5 ---- libraries/audio/src/InboundAudioStream.cpp | 24 +------------------ libraries/audio/src/InboundAudioStream.h | 2 -- .../audio/src/InjectedAudioRingBuffer.cpp | 8 +++---- 7 files changed, 9 insertions(+), 40 deletions(-) diff --git a/assignment-client/src/audio/AvatarAudioRingBuffer.cpp b/assignment-client/src/audio/AvatarAudioRingBuffer.cpp index 94a95ef177..588d198023 100644 --- a/assignment-client/src/audio/AvatarAudioRingBuffer.cpp +++ b/assignment-client/src/audio/AvatarAudioRingBuffer.cpp @@ -14,8 +14,8 @@ #include "AvatarAudioRingBuffer.h" AvatarAudioRingBuffer::AvatarAudioRingBuffer(bool isStereo, bool dynamicJitterBuffer) : -PositionalAudioRingBuffer(PositionalAudioRingBuffer::Microphone, isStereo, dynamicJitterBuffer) { - + PositionalAudioRingBuffer(PositionalAudioRingBuffer::Microphone, isStereo, dynamicJitterBuffer) +{ } int AvatarAudioRingBuffer::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) { diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 808c076cb0..4344f14655 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -902,7 +902,6 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) { pushAudioToOutput(); } - void Audio::pushAudioToOutput() { if (_audioOutput->bytesFree() == _audioOutput->bufferSize()) { @@ -1002,7 +1001,6 @@ void Audio::pushAudioToOutput() { } } - void Audio::processProceduralAudio(int16_t* monoInput, int numSamples) { // zero out the locally injected audio in preparation for audio procedural sounds diff --git a/libraries/audio/src/AudioRingBuffer.cpp b/libraries/audio/src/AudioRingBuffer.cpp index 696f130523..e63c105cd2 100644 --- a/libraries/audio/src/AudioRingBuffer.cpp +++ b/libraries/audio/src/AudioRingBuffer.cpp @@ -25,8 +25,8 @@ AudioRingBuffer::AudioRingBuffer(int numFrameSamples, bool randomAccessMode, int _sampleCapacity(numFrameSamples * numFramesCapacity), _isFull(false), _numFrameSamples(numFrameSamples), - _overflowCount(0), - _randomAccessMode(randomAccessMode) + _randomAccessMode(randomAccessMode), + _overflowCount(0) { if (numFrameSamples) { _buffer = new int16_t[_sampleCapacity]; diff --git a/libraries/audio/src/AudioRingBuffer.h b/libraries/audio/src/AudioRingBuffer.h index cd968bb86e..a4fa906e97 100644 --- a/libraries/audio/src/AudioRingBuffer.h +++ b/libraries/audio/src/AudioRingBuffer.h @@ -94,10 +94,6 @@ protected: int _overflowCount; /// how many times has the ring buffer has overwritten old data - //bool _isStarved; - //bool _hasStarted; - - public: class ConstIterator { //public std::iterator < std::forward_iterator_tag, int16_t > { public: @@ -183,7 +179,6 @@ public: int16_t* _at; }; - ConstIterator nextOutput() const { return ConstIterator(_buffer, _sampleCapacity, _nextOutput); } }; diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index 501f898654..2d889845b2 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -118,36 +118,14 @@ bool InboundAudioStream::popFrames(int numFrames, bool starveOnFail) { // if starveOnFail is true if (starveOnFail) { starved(); - _consecutiveNotMixedCount++; } + _consecutiveNotMixedCount++; _lastPopSucceeded = false; } } return _lastPopSucceeded; } -bool InboundAudioStream::shouldPop(int numSamples, bool starveOnFail) { - if (_isStarved) { - // we're still refilling; don't mix - _consecutiveNotMixedCount++; - return false; - } - - if (_ringBuffer.samplesAvailable() >= numSamples) { - // we have enough samples to pop, so we're good to mix - _hasStarted = true; - return true; - } - - // we don't have enough samples, so set this stream to starve - // if starveOnFail is true - if (starveOnFail) { - starved(); - _consecutiveNotMixedCount++; - } - return false; -} - void InboundAudioStream::setToStarved() { if (!_isStarved && _ringBuffer.framesAvailable() < _desiredJitterBufferFrames) { starved(); diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index 4eaf554ec7..c052eef2bb 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -85,7 +85,6 @@ public: int getOverflowCount() const { return _ringBuffer.getOverflowCount(); } private: - bool shouldPop(int numSamples, bool starveOnFail); void starved(); protected: @@ -117,7 +116,6 @@ protected: bool _isStarved; bool _hasStarted; - // stats int _consecutiveNotMixedCount; diff --git a/libraries/audio/src/InjectedAudioRingBuffer.cpp b/libraries/audio/src/InjectedAudioRingBuffer.cpp index e074d51bd9..e35e428671 100644 --- a/libraries/audio/src/InjectedAudioRingBuffer.cpp +++ b/libraries/audio/src/InjectedAudioRingBuffer.cpp @@ -20,10 +20,10 @@ #include "InjectedAudioRingBuffer.h" InjectedAudioRingBuffer::InjectedAudioRingBuffer(const QUuid& streamIdentifier, bool dynamicJitterBuffer) : -PositionalAudioRingBuffer(PositionalAudioRingBuffer::Injector, false, dynamicJitterBuffer), -_streamIdentifier(streamIdentifier), -_radius(0.0f), -_attenuationRatio(0) + PositionalAudioRingBuffer(PositionalAudioRingBuffer::Injector, false, dynamicJitterBuffer), + _streamIdentifier(streamIdentifier), + _radius(0.0f), + _attenuationRatio(0) { } From fd9d7baa06df8cb9a7baa6be55e7eef9fefa3a7d Mon Sep 17 00:00:00 2001 From: wangyix Date: Fri, 25 Jul 2014 15:14:10 -0700 Subject: [PATCH 15/62] updated Agent with audiostream; added initial packets discard to audiostream --- assignment-client/src/Agent.cpp | 13 ------------- assignment-client/src/Agent.h | 1 - interface/src/Audio.cpp | 3 --- libraries/audio/src/InboundAudioStream.cpp | 14 +++++++++----- libraries/audio/src/InboundAudioStream.h | 2 +- 5 files changed, 10 insertions(+), 23 deletions(-) diff --git a/assignment-client/src/Agent.cpp b/assignment-client/src/Agent.cpp index 0449e0d682..0c23cdf12f 100644 --- a/assignment-client/src/Agent.cpp +++ b/assignment-client/src/Agent.cpp @@ -148,21 +148,8 @@ void Agent::readPendingDatagrams() { } else if (datagramPacketType == PacketTypeMixedAudio) { - QUuid senderUUID = uuidFromPacketHeader(receivedPacket); - - // parse sequence number for this packet - int numBytesPacketHeader = numBytesForPacketHeader(receivedPacket); - const char* sequenceAt = receivedPacket.constData() + numBytesPacketHeader; - quint16 sequence = *(reinterpret_cast(sequenceAt)); - _incomingMixedAudioSequenceNumberStats.sequenceNumberReceived(sequence, senderUUID); - - // parse the data and grab the average loudness _receivedAudioBuffer.parseData(receivedPacket); - // pretend like we have read the samples from this buffer so it does not fill - static int16_t garbageAudioBuffer[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO]; - _receivedAudioBuffer.readSamples(garbageAudioBuffer, NETWORK_BUFFER_LENGTH_SAMPLES_STEREO); - // let this continue through to the NodeList so it updates last heard timestamp // for the sending audio mixer NodeList::getInstance()->processNodeData(senderSockAddr, receivedPacket); diff --git a/assignment-client/src/Agent.h b/assignment-client/src/Agent.h index ec8f7c88cb..2398eda0f2 100644 --- a/assignment-client/src/Agent.h +++ b/assignment-client/src/Agent.h @@ -71,7 +71,6 @@ private: ModelTreeHeadlessViewer _modelViewer; MixedAudioRingBuffer _receivedAudioBuffer; - SequenceNumberStats _incomingMixedAudioSequenceNumberStats; AvatarHashMap _avatarHashMap; }; diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 4344f14655..3896c42cc3 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -716,9 +716,6 @@ void Audio::handleAudioInput() { } void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) { - const int NUM_INITIAL_PACKETS_DISCARD = 3; - const int STANDARD_DEVIATION_SAMPLE_COUNT = 500; - if (_audioOutput) { // Audio output must exist and be correctly set up if we're going to process received audio processReceivedAudio(audioByteArray); diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index 2d889845b2..105507992c 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -51,8 +51,6 @@ void InboundAudioStream::resetStats() { } int InboundAudioStream::parseData(const QByteArray& packet) { - frameReceivedUpdateTimingStats(); - PacketType packetType = packetTypeForPacket(packet); QUuid senderUUID = uuidFromPacketHeader(packet); @@ -64,7 +62,7 @@ int InboundAudioStream::parseData(const QByteArray& packet) { // parse sequence number and track it quint16 sequence = *(reinterpret_cast(sequenceAt)); readBytes += sizeof(quint16); - SequenceNumberStats::ArrivalInfo arrivalInfo = _incomingSequenceNumberStats.sequenceNumberReceived(sequence, senderUUID); + SequenceNumberStats::ArrivalInfo arrivalInfo = frameReceivedUpdateNetworkStats(sequence, senderUUID); // TODO: handle generalized silent packet here????? @@ -150,10 +148,14 @@ int InboundAudioStream::getCalculatedDesiredJitterBufferFrames() const { } -void InboundAudioStream::frameReceivedUpdateTimingStats() { +SequenceNumberStats::ArrivalInfo InboundAudioStream::frameReceivedUpdateNetworkStats(quint16 sequenceNumber, const QUuid& senderUUID) { + const int NUM_INITIAL_PACKETS_DISCARD = 3; + + SequenceNumberStats::ArrivalInfo arrivalInfo = _incomingSequenceNumberStats.sequenceNumberReceived(sequenceNumber, senderUUID); + // update the two time gap stats we're keeping quint64 now = usecTimestampNow(); - if (_lastFrameReceivedTime != 0) { + if (_incomingSequenceNumberStats.getNumReceived() >= NUM_INITIAL_PACKETS_DISCARD) { quint64 gap = now - _lastFrameReceivedTime; _interframeTimeGapStatsForJitterCalc.update(gap); _interframeTimeGapStatsForStatsPacket.update(gap); @@ -174,6 +176,8 @@ void InboundAudioStream::frameReceivedUpdateTimingStats() { } _interframeTimeGapStatsForJitterCalc.clearNewStatsAvailableFlag(); } + + return arrivalInfo; } int InboundAudioStream::writeDroppableSilentSamples(int numSilentSamples) { diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index c052eef2bb..8460a8a92e 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -101,7 +101,7 @@ protected: int writeDroppableSilentSamples(int numSilentSamples); int writeSamplesForDroppedPackets(int numSamples); - void frameReceivedUpdateTimingStats(); + SequenceNumberStats::ArrivalInfo frameReceivedUpdateNetworkStats(quint16 sequenceNumber, const QUuid& senderUUID); protected: From e17556384d1f3afb59f01921a46ab9f23a37a27d Mon Sep 17 00:00:00 2001 From: wangyix Date: Fri, 25 Jul 2014 15:51:34 -0700 Subject: [PATCH 16/62] added code to clear agent audiostream frames to prevent buildup --- assignment-client/src/Agent.cpp | 6 +++++- assignment-client/src/Agent.h | 8 +++++--- libraries/audio/src/AudioRingBuffer.cpp | 10 +++++++--- libraries/audio/src/AudioRingBuffer.h | 2 ++ libraries/audio/src/InboundAudioStream.cpp | 7 +++++++ libraries/audio/src/InboundAudioStream.h | 3 +-- libraries/audio/src/InboundMixedAudioStream.h | 3 +++ 7 files changed, 30 insertions(+), 9 deletions(-) diff --git a/assignment-client/src/Agent.cpp b/assignment-client/src/Agent.cpp index 0c23cdf12f..c82f35ff7f 100644 --- a/assignment-client/src/Agent.cpp +++ b/assignment-client/src/Agent.cpp @@ -36,7 +36,7 @@ Agent::Agent(const QByteArray& packet) : _voxelEditSender(), _particleEditSender(), _modelEditSender(), - _receivedAudioBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO), + _receivedAudioBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 1, false), _avatarHashMap() { // be the parent of the script engine so it gets moved when we do @@ -149,6 +149,10 @@ void Agent::readPendingDatagrams() { } else if (datagramPacketType == PacketTypeMixedAudio) { _receivedAudioBuffer.parseData(receivedPacket); + + _lastReceivedAudioLoudness = _receivedAudioBuffer.getNextOutputFrameLoudness(); + + _receivedAudioBuffer.clearBuffer(); // let this continue through to the NodeList so it updates last heard timestamp // for the sending audio mixer diff --git a/assignment-client/src/Agent.h b/assignment-client/src/Agent.h index 2398eda0f2..b713062840 100644 --- a/assignment-client/src/Agent.h +++ b/assignment-client/src/Agent.h @@ -19,7 +19,6 @@ #include #include -#include #include #include #include @@ -31,6 +30,8 @@ #include #include +#include "InboundMixedAudioStream.h" + class Agent : public ThreadedAssignment { Q_OBJECT @@ -51,7 +52,7 @@ public: void setIsListeningToAudioStream(bool isListeningToAudioStream) { _scriptEngine.setIsListeningToAudioStream(isListeningToAudioStream); } - float getLastReceivedAudioLoudness() const { return _receivedAudioBuffer.getLastReadFrameAverageLoudness(); } + float getLastReceivedAudioLoudness() const { return _lastReceivedAudioLoudness; } virtual void aboutToFinish(); @@ -70,7 +71,8 @@ private: VoxelTreeHeadlessViewer _voxelViewer; ModelTreeHeadlessViewer _modelViewer; - MixedAudioRingBuffer _receivedAudioBuffer; + InboundMixedAudioStream _receivedAudioBuffer; + float _lastReceivedAudioLoudness; AvatarHashMap _avatarHashMap; }; diff --git a/libraries/audio/src/AudioRingBuffer.cpp b/libraries/audio/src/AudioRingBuffer.cpp index e63c105cd2..7ad103642f 100644 --- a/libraries/audio/src/AudioRingBuffer.cpp +++ b/libraries/audio/src/AudioRingBuffer.cpp @@ -47,10 +47,8 @@ AudioRingBuffer::~AudioRingBuffer() { } void AudioRingBuffer::reset() { + clear(); _overflowCount = 0; - _isFull = false; - _endOfLastWrite = _buffer; - _nextOutput = _buffer; } void AudioRingBuffer::resizeForFrameSize(int numFrameSamples) { @@ -64,6 +62,12 @@ void AudioRingBuffer::resizeForFrameSize(int numFrameSamples) { reset(); } +void AudioRingBuffer::clear() { + _isFull = false; + _endOfLastWrite = _buffer; + _nextOutput = _buffer; +} + int AudioRingBuffer::parseData(const QByteArray& packet) { // skip packet header and sequence number int numBytesBeforeAudioData = numBytesForPacketHeader(packet) + sizeof(quint16); diff --git a/libraries/audio/src/AudioRingBuffer.h b/libraries/audio/src/AudioRingBuffer.h index a4fa906e97..b788f2aa67 100644 --- a/libraries/audio/src/AudioRingBuffer.h +++ b/libraries/audio/src/AudioRingBuffer.h @@ -46,6 +46,8 @@ public: void reset(); void resizeForFrameSize(int numFrameSamples); + void clear(); + int getSampleCapacity() const { return _sampleCapacity; } int getFrameCapacity() const { return _frameCapacity; } diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index 105507992c..33a7c9b093 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -33,6 +33,8 @@ InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacit void InboundAudioStream::reset() { _ringBuffer.reset(); + _lastPopSucceeded = false; + _lastPopOutput = AudioRingBuffer::ConstIterator(); _isStarved = true; _hasStarted = false; resetStats(); @@ -50,6 +52,11 @@ void InboundAudioStream::resetStats() { _framesAvailableStats.reset(); } +void InboundAudioStream::clearBuffer() { + _ringBuffer.clear(); + _framesAvailableStats.reset(); +} + int InboundAudioStream::parseData(const QByteArray& packet) { PacketType packetType = packetTypeForPacket(packet); QUuid senderUUID = uuidFromPacketHeader(packet); diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index 8460a8a92e..1b9f028d6f 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -47,9 +47,8 @@ public: InboundAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers); void reset(); - void flushBuffer() { _ringBuffer.reset(); } void resetStats(); - + void clearBuffer(); virtual int parseData(const QByteArray& packet); diff --git a/libraries/audio/src/InboundMixedAudioStream.h b/libraries/audio/src/InboundMixedAudioStream.h index c23e6559e2..e35b0198f8 100644 --- a/libraries/audio/src/InboundMixedAudioStream.h +++ b/libraries/audio/src/InboundMixedAudioStream.h @@ -5,6 +5,9 @@ class InboundMixedAudioStream : public InboundAudioStream { public: InboundMixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers); + + float getNextOutputFrameLoudness() const { return _ringBuffer.getNextOutputFrameLoudness(); } + protected: int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples); int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples); From 78031622dbdce0c9a38a7768b5fb4b513667d64f Mon Sep 17 00:00:00 2001 From: wangyix Date: Fri, 25 Jul 2014 16:06:56 -0700 Subject: [PATCH 17/62] removed MixedAudioRingBuffer class files --- libraries/audio/src/MixedAudioRingBuffer.cpp | 52 -------------------- libraries/audio/src/MixedAudioRingBuffer.h | 29 ----------- 2 files changed, 81 deletions(-) delete mode 100644 libraries/audio/src/MixedAudioRingBuffer.cpp delete mode 100644 libraries/audio/src/MixedAudioRingBuffer.h diff --git a/libraries/audio/src/MixedAudioRingBuffer.cpp b/libraries/audio/src/MixedAudioRingBuffer.cpp deleted file mode 100644 index c975d7b68e..0000000000 --- a/libraries/audio/src/MixedAudioRingBuffer.cpp +++ /dev/null @@ -1,52 +0,0 @@ -// -// MixedAudioRingBuffer.cpp -// libraries/audio/src -// -// Created by Stephen Birarda on 2014. -// Copyright 2014 High Fidelity, Inc. -// -// Distributed under the Apache License, Version 2.0. -// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html -// - -#include "MixedAudioRingBuffer.h" - -MixedAudioRingBuffer::MixedAudioRingBuffer(int numFrameSamples) : - AudioRingBuffer(numFrameSamples), - _lastReadFrameAverageLoudness(0.0f) -{ - -} - -qint64 MixedAudioRingBuffer::readSamples(int16_t* destination, qint64 maxSamples) { - // calculate the average loudness for the frame about to go out - - // read from _nextOutput either _numFrameSamples or to the end of the buffer - int samplesFromNextOutput = _buffer + _sampleCapacity - _nextOutput; - if (samplesFromNextOutput > _numFrameSamples) { - samplesFromNextOutput = _numFrameSamples; - } - - float averageLoudness = 0.0f; - - for (int s = 0; s < samplesFromNextOutput; s++) { - averageLoudness += fabsf(_nextOutput[s]); - } - - // read samples from the beginning of the buffer, if any - int samplesFromBeginning = _numFrameSamples - samplesFromNextOutput; - - if (samplesFromBeginning > 0) { - for (int b = 0; b < samplesFromBeginning; b++) { - averageLoudness += fabsf(_buffer[b]); - } - } - - // divide by the number of samples and the MAX_SAMPLE_VALUE to get a float from 0 - 1 - averageLoudness /= (float) _numFrameSamples; - averageLoudness /= (float) MAX_SAMPLE_VALUE; - - _lastReadFrameAverageLoudness = averageLoudness; - - return AudioRingBuffer::readSamples(destination, maxSamples); -} diff --git a/libraries/audio/src/MixedAudioRingBuffer.h b/libraries/audio/src/MixedAudioRingBuffer.h deleted file mode 100644 index 25574a3ea6..0000000000 --- a/libraries/audio/src/MixedAudioRingBuffer.h +++ /dev/null @@ -1,29 +0,0 @@ -// -// MixedAudioRingBuffer.h -// libraries/audio/src -// -// Created by Stephen Birarda on 2014. -// Copyright 2014 High Fidelity, Inc. -// -// Distributed under the Apache License, Version 2.0. -// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html -// - -#ifndef hifi_MixedAudioRingBuffer_h -#define hifi_MixedAudioRingBuffer_h - -#include "AudioRingBuffer.h" - -class MixedAudioRingBuffer : public AudioRingBuffer { - Q_OBJECT -public: - MixedAudioRingBuffer(int numFrameSamples); - - float getLastReadFrameAverageLoudness() const { return _lastReadFrameAverageLoudness; } - - qint64 readSamples(int16_t* destination, qint64 maxSamples); -private: - float _lastReadFrameAverageLoudness; -}; - -#endif // hifi_MixedAudioRingBuffer_h From e6f913edb12fc3ad2935410befbdbc4a78e958be Mon Sep 17 00:00:00 2001 From: wangyix Date: Fri, 25 Jul 2014 16:32:08 -0700 Subject: [PATCH 18/62] dead injected streams are now periodically removed in AudioMixer --- assignment-client/src/audio/AudioMixer.cpp | 2 +- .../src/audio/AudioMixerClientData.cpp | 28 +++++++++++++++++++ .../src/audio/AudioMixerClientData.h | 2 ++ 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/assignment-client/src/audio/AudioMixer.cpp b/assignment-client/src/audio/AudioMixer.cpp index 5e28ca05e1..f6b3e5118a 100644 --- a/assignment-client/src/audio/AudioMixer.cpp +++ b/assignment-client/src/audio/AudioMixer.cpp @@ -553,7 +553,7 @@ void AudioMixer::run() { // in InboundAudioStream. That's how the popped audio data will be read for mixing nodeData->audioStreamsPopFrameForMixing(); - if (node->getType() == NodeType::Agent //&& node->getActiveSocket() && node->getLinkedData() + if (node->getType() == NodeType::Agent && ((AudioMixerClientData*)node->getLinkedData())->getAvatarAudioRingBuffer()) { prepareMixForListeningNode(node.data()); diff --git a/assignment-client/src/audio/AudioMixerClientData.cpp b/assignment-client/src/audio/AudioMixerClientData.cpp index c288a4f721..246ff5089e 100644 --- a/assignment-client/src/audio/AudioMixerClientData.cpp +++ b/assignment-client/src/audio/AudioMixerClientData.cpp @@ -105,7 +105,35 @@ void AudioMixerClientData::audioStreamsPopFrameForMixing() { } } +void AudioMixerClientData::removeDeadInjectedStreams() { + + const int INJECTOR_CONSECUTIVE_NOT_MIXED_AFTER_STARTED_THRESHOLD = 100; + + // we have this second threshold in case the injected audio is so short that the ringbuffer + // never even reaches its desired size, which means it will never start. + const int INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD = 1000; + + QHash::Iterator i = _ringBuffers.begin(), end = _ringBuffers.end(); + while (i != end) { + PositionalAudioRingBuffer* audioStream = i.value(); + if (audioStream->getType() == PositionalAudioRingBuffer::Injector && audioStream->isStarved()) { + int notMixedThreshold = audioStream->hasStarted() ? INJECTOR_CONSECUTIVE_NOT_MIXED_AFTER_STARTED_THRESHOLD + : INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD; + if (audioStream->getConsecutiveNotMixedCount() >= notMixedThreshold) { + delete audioStream; + i = _ringBuffers.erase(i); + continue; + } + } + ++i; + } +} + void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode) { + + // since audio stream stats packets are sent periodically, this is a good place to remove our dead injected streams. + removeDeadInjectedStreams(); + char packet[MAX_PACKET_SIZE]; NodeList* nodeList = NodeList::getInstance(); diff --git a/assignment-client/src/audio/AudioMixerClientData.h b/assignment-client/src/audio/AudioMixerClientData.h index 92dddab7e4..b4ad063c02 100644 --- a/assignment-client/src/audio/AudioMixerClientData.h +++ b/assignment-client/src/audio/AudioMixerClientData.h @@ -29,6 +29,8 @@ public: void audioStreamsPopFrameForMixing(); + void removeDeadInjectedStreams(); + QString getAudioStreamStatsString() const; void sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode); From fa500dc7fd0583a7e4c505d4755bd4dd800bf8c6 Mon Sep 17 00:00:00 2001 From: wangyix Date: Fri, 25 Jul 2014 17:38:36 -0700 Subject: [PATCH 19/62] removed unused var --- tests/networking/src/SequenceNumberStatsTests.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/networking/src/SequenceNumberStatsTests.cpp b/tests/networking/src/SequenceNumberStatsTests.cpp index de487267e0..6f22b3e7d7 100644 --- a/tests/networking/src/SequenceNumberStatsTests.cpp +++ b/tests/networking/src/SequenceNumberStatsTests.cpp @@ -254,8 +254,7 @@ void SequenceNumberStatsTests::pruneTest() { numSent++; numEarly++; numLost += 10; - - const QSet& missingSet = stats.getMissingSet(); + assert(missingSet.size() <= 1000); for (int i = 0; i < 10; i++) { From cf649c8365ae298ff28e832c450ff81cdcfe8312 Mon Sep 17 00:00:00 2001 From: wangyix Date: Fri, 25 Jul 2014 18:20:16 -0700 Subject: [PATCH 20/62] Revert "removed unused var" This reverts commit fa500dc7fd0583a7e4c505d4755bd4dd800bf8c6. --- tests/networking/src/SequenceNumberStatsTests.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/networking/src/SequenceNumberStatsTests.cpp b/tests/networking/src/SequenceNumberStatsTests.cpp index 6f22b3e7d7..de487267e0 100644 --- a/tests/networking/src/SequenceNumberStatsTests.cpp +++ b/tests/networking/src/SequenceNumberStatsTests.cpp @@ -254,7 +254,8 @@ void SequenceNumberStatsTests::pruneTest() { numSent++; numEarly++; numLost += 10; - + + const QSet& missingSet = stats.getMissingSet(); assert(missingSet.size() <= 1000); for (int i = 0; i < 10; i++) { From 438606d567eccdf6dabedaa924fa4d22c35a37da Mon Sep 17 00:00:00 2001 From: wangyix Date: Sun, 27 Jul 2014 11:23:47 -0700 Subject: [PATCH 21/62] _consecutiveNotMixedCount++ moved back into if(starveOnFail) --- libraries/audio/src/InboundAudioStream.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index 33a7c9b093..f190430b61 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -123,8 +123,8 @@ bool InboundAudioStream::popFrames(int numFrames, bool starveOnFail) { // if starveOnFail is true if (starveOnFail) { starved(); + _consecutiveNotMixedCount++; } - _consecutiveNotMixedCount++; _lastPopSucceeded = false; } } From c129db16cbe4b555306aa85907fd97ccebb780f4 Mon Sep 17 00:00:00 2001 From: wangyix Date: Mon, 28 Jul 2014 09:40:44 -0700 Subject: [PATCH 22/62] formatting fix, AudioRingBuffer no longer NodeData, --- assignment-client/src/audio/AudioMixerClientData.cpp | 9 ++++++--- libraries/audio/src/AudioRingBuffer.cpp | 7 ------- libraries/audio/src/AudioRingBuffer.h | 5 +---- libraries/audio/src/InboundAudioStream.h | 2 +- 4 files changed, 8 insertions(+), 15 deletions(-) diff --git a/assignment-client/src/audio/AudioMixerClientData.cpp b/assignment-client/src/audio/AudioMixerClientData.cpp index 246ff5089e..220246b078 100644 --- a/assignment-client/src/audio/AudioMixerClientData.cpp +++ b/assignment-client/src/audio/AudioMixerClientData.cpp @@ -27,7 +27,8 @@ AudioMixerClientData::AudioMixerClientData() : } AudioMixerClientData::~AudioMixerClientData() { - QHash::ConstIterator i, end = _ringBuffers.constEnd(); + QHash::ConstIterator i; + QHash::ConstIterator end = _ringBuffers.constEnd(); for (i = _ringBuffers.constBegin(); i != end; i++) { // delete this attached InboundAudioStream delete i.value(); @@ -99,7 +100,8 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { } void AudioMixerClientData::audioStreamsPopFrameForMixing() { - QHash::ConstIterator i, end = _ringBuffers.constEnd(); + QHash::ConstIterator i; + QHash::ConstIterator end = _ringBuffers.constEnd(); for (i = _ringBuffers.constBegin(); i != end; i++) { i.value()->popFrames(1); } @@ -224,7 +226,8 @@ QString AudioMixerClientData::getAudioStreamStatsString() const { result = "mic unknown"; } - QHash::ConstIterator i, end = _ringBuffers.constEnd(); + QHash::ConstIterator i; + QHash::ConstIterator end = _ringBuffers.constEnd(); for (i = _ringBuffers.constBegin(); i != end; i++) { if (i.value()->getType() == PositionalAudioRingBuffer::Injector) { AudioStreamStats streamStats = i.value()->getAudioStreamStats(); diff --git a/libraries/audio/src/AudioRingBuffer.cpp b/libraries/audio/src/AudioRingBuffer.cpp index 7ad103642f..8dbc90883b 100644 --- a/libraries/audio/src/AudioRingBuffer.cpp +++ b/libraries/audio/src/AudioRingBuffer.cpp @@ -20,7 +20,6 @@ AudioRingBuffer::AudioRingBuffer(int numFrameSamples, bool randomAccessMode, int numFramesCapacity) : - NodeData(), _frameCapacity(numFramesCapacity), _sampleCapacity(numFrameSamples * numFramesCapacity), _isFull(false), @@ -68,12 +67,6 @@ void AudioRingBuffer::clear() { _nextOutput = _buffer; } -int AudioRingBuffer::parseData(const QByteArray& packet) { - // skip packet header and sequence number - int numBytesBeforeAudioData = numBytesForPacketHeader(packet) + sizeof(quint16); - return writeData(packet.data() + numBytesBeforeAudioData, packet.size() - numBytesBeforeAudioData); -} - int AudioRingBuffer::readSamples(int16_t* destination, int maxSamples) { return readData((char*) destination, maxSamples * sizeof(int16_t)); } diff --git a/libraries/audio/src/AudioRingBuffer.h b/libraries/audio/src/AudioRingBuffer.h index b788f2aa67..824b197c93 100644 --- a/libraries/audio/src/AudioRingBuffer.h +++ b/libraries/audio/src/AudioRingBuffer.h @@ -37,8 +37,7 @@ const int MIN_SAMPLE_VALUE = std::numeric_limits::min(); const int DEFAULT_RING_BUFFER_FRAME_CAPACITY = 10; -class AudioRingBuffer : public NodeData { - Q_OBJECT +class AudioRingBuffer { public: AudioRingBuffer(int numFrameSamples, bool randomAccessMode = false, int numFramesCapacity = DEFAULT_RING_BUFFER_FRAME_CAPACITY); ~AudioRingBuffer(); @@ -51,8 +50,6 @@ public: int getSampleCapacity() const { return _sampleCapacity; } int getFrameCapacity() const { return _frameCapacity; } - int parseData(const QByteArray& packet); - // assume callers using this will never wrap around the end const int16_t* getNextOutput() const { return _nextOutput; } const int16_t* getBuffer() const { return _buffer; } diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index 1b9f028d6f..4624a9fd38 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -32,7 +32,7 @@ const int TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS = 30; // the stats for calculating the average frames available will recalculate every ~1 second // and will include data for the past ~2 seconds const int FRAMES_AVAILABLE_STATS_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS; -const int FRAMES_AVAILABLE_STATS_WINDOW_INTERVALS = 2; +const int FRAMES_AVAILABLE_STATS_WINDOW_INTERVALS = 10; // the internal history buffer of the incoming seq stats will cover 30s to calculate // packet loss % over last 30s From 45b4777e6056c9893fc66349128dd02abebfcda8 Mon Sep 17 00:00:00 2001 From: wangyix Date: Mon, 28 Jul 2014 09:47:40 -0700 Subject: [PATCH 23/62] cleaned up QHash iterator code again --- assignment-client/src/audio/AudioMixer.cpp | 4 ++-- assignment-client/src/audio/AudioMixerClientData.cpp | 9 +++------ 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/assignment-client/src/audio/AudioMixer.cpp b/assignment-client/src/audio/AudioMixer.cpp index f6b3e5118a..ee823e4310 100644 --- a/assignment-client/src/audio/AudioMixer.cpp +++ b/assignment-client/src/audio/AudioMixer.cpp @@ -279,8 +279,8 @@ void AudioMixer::prepareMixForListeningNode(Node* node) { // enumerate the ARBs attached to the otherNode and add all that should be added to mix const QHash& otherNodeRingBuffers = otherNodeClientData->getRingBuffers(); - QHash::ConstIterator i, end = otherNodeRingBuffers.constEnd(); - for (i = otherNodeRingBuffers.begin(); i != end; i++) { + QHash::ConstIterator i; + for (i = otherNodeRingBuffers.begin(); i != otherNodeRingBuffers.constEnd(); i++) { PositionalAudioRingBuffer* otherNodeBuffer = i.value(); if ((*otherNode != *node || otherNodeBuffer->shouldLoopbackForNode()) diff --git a/assignment-client/src/audio/AudioMixerClientData.cpp b/assignment-client/src/audio/AudioMixerClientData.cpp index 220246b078..d6ff7c6f34 100644 --- a/assignment-client/src/audio/AudioMixerClientData.cpp +++ b/assignment-client/src/audio/AudioMixerClientData.cpp @@ -28,8 +28,7 @@ AudioMixerClientData::AudioMixerClientData() : AudioMixerClientData::~AudioMixerClientData() { QHash::ConstIterator i; - QHash::ConstIterator end = _ringBuffers.constEnd(); - for (i = _ringBuffers.constBegin(); i != end; i++) { + for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) { // delete this attached InboundAudioStream delete i.value(); } @@ -101,8 +100,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { void AudioMixerClientData::audioStreamsPopFrameForMixing() { QHash::ConstIterator i; - QHash::ConstIterator end = _ringBuffers.constEnd(); - for (i = _ringBuffers.constBegin(); i != end; i++) { + for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) { i.value()->popFrames(1); } } @@ -227,8 +225,7 @@ QString AudioMixerClientData::getAudioStreamStatsString() const { } QHash::ConstIterator i; - QHash::ConstIterator end = _ringBuffers.constEnd(); - for (i = _ringBuffers.constBegin(); i != end; i++) { + for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd; i++) { if (i.value()->getType() == PositionalAudioRingBuffer::Injector) { AudioStreamStats streamStats = i.value()->getAudioStreamStats(); result += " UPSTREAM.inj.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames) From 9bbd055404535b40a7f4453da4e300b0c363b7fb Mon Sep 17 00:00:00 2001 From: wangyix Date: Mon, 28 Jul 2014 11:41:08 -0700 Subject: [PATCH 24/62] added stdev method of jitter calc to InboundAudioStream --- .../src/audio/AudioMixerClientData.cpp | 6 +- interface/src/Audio.cpp | 4 +- libraries/audio/src/InboundAudioStream.cpp | 64 +++++++++++-------- libraries/audio/src/InboundAudioStream.h | 20 +++++- .../audio/src/InboundMixedAudioStream.cpp | 4 +- libraries/audio/src/InboundMixedAudioStream.h | 2 +- libraries/shared/src/StdDev.cpp | 4 +- libraries/shared/src/StdDev.h | 4 +- 8 files changed, 66 insertions(+), 42 deletions(-) diff --git a/assignment-client/src/audio/AudioMixerClientData.cpp b/assignment-client/src/audio/AudioMixerClientData.cpp index d6ff7c6f34..0745536983 100644 --- a/assignment-client/src/audio/AudioMixerClientData.cpp +++ b/assignment-client/src/audio/AudioMixerClientData.cpp @@ -205,7 +205,7 @@ QString AudioMixerClientData::getAudioStreamStatsString() const { if (avatarRingBuffer) { AudioStreamStats streamStats = avatarRingBuffer->getAudioStreamStats(); result += " UPSTREAM.mic.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames) - + " desired_calc:" + QString::number(avatarRingBuffer->getCalculatedDesiredJitterBufferFrames()) + + " desired_calc:" + QString::number(avatarRingBuffer->getCalculatedJitterBufferFrames()) + " available_avg_10s:" + QString::number(streamStats._ringBufferFramesAvailableAverage) + " available:" + QString::number(streamStats._ringBufferFramesAvailable) + " starves:" + QString::number(streamStats._ringBufferStarveCount) @@ -225,11 +225,11 @@ QString AudioMixerClientData::getAudioStreamStatsString() const { } QHash::ConstIterator i; - for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd; i++) { + for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) { if (i.value()->getType() == PositionalAudioRingBuffer::Injector) { AudioStreamStats streamStats = i.value()->getAudioStreamStats(); result += " UPSTREAM.inj.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames) - + " desired_calc:" + QString::number(i.value()->getCalculatedDesiredJitterBufferFrames()) + + " desired_calc:" + QString::number(i.value()->getCalculatedJitterBufferFrames()) + " available_avg_10s:" + QString::number(streamStats._ringBufferFramesAvailableAverage) + " available:" + QString::number(streamStats._ringBufferFramesAvailable) + " starves:" + QString::number(streamStats._ringBufferStarveCount) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 3896c42cc3..0e024ccc58 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -76,9 +76,9 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) : // this delay will slowly add up and the longer someone runs, they more delayed their audio will be. _inputRingBuffer(0), #ifdef _WIN32 - _ringBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 100, true), + _ringBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 100, true, true), #else - _ringBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 10, true), // DO NOT CHANGE THIS UNLESS YOU SOLVE THE AUDIO DEVICE DRIFT PROBLEM!!! + _ringBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 10, true, true), // DO NOT CHANGE THIS UNLESS YOU SOLVE THE AUDIO DEVICE DRIFT PROBLEM!!! #endif _isStereoInput(false), _averagedLatency(0.0), diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index f190430b61..f260fd83d1 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -12,11 +12,14 @@ #include "InboundAudioStream.h" #include "PacketHeaders.h" -InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers) : +InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, bool useStDevForJitterCalc) : _ringBuffer(numFrameSamples, false, numFramesCapacity), _lastPopSucceeded(false), _lastPopOutput(), _dynamicJitterBuffers(dynamicJitterBuffers), + _useStDevForJitterCalc(useStDevForJitterCalc), + _calculatedJitterBufferFramesUsingMaxGap(0), + _calculatedJitterBufferFramesUsingStDev(0), _desiredJitterBufferFrames(1), _isStarved(true), _hasStarted(false), @@ -143,46 +146,51 @@ void InboundAudioStream::starved() { _starveCount++; } - -int InboundAudioStream::getCalculatedDesiredJitterBufferFrames() const { - const float USECS_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * USECS_PER_SECOND / (float)SAMPLE_RATE; - - int calculatedDesiredJitterBufferFrames = ceilf((float)_interframeTimeGapStatsForJitterCalc.getWindowMax() / USECS_PER_FRAME); - if (calculatedDesiredJitterBufferFrames < 1) { - calculatedDesiredJitterBufferFrames = 1; - } - return calculatedDesiredJitterBufferFrames; +int InboundAudioStream::clampDesiredJitterBufferFramesValue(int desired) const { + const int MIN_FRAMES_DESIRED = 0; + const int MAX_FRAMES_DESIRED = _ringBuffer.getFrameCapacity(); + return glm::clamp(desired, MIN_FRAMES_DESIRED, MAX_FRAMES_DESIRED); } - SequenceNumberStats::ArrivalInfo InboundAudioStream::frameReceivedUpdateNetworkStats(quint16 sequenceNumber, const QUuid& senderUUID) { - const int NUM_INITIAL_PACKETS_DISCARD = 3; - + // track the sequence number we received SequenceNumberStats::ArrivalInfo arrivalInfo = _incomingSequenceNumberStats.sequenceNumberReceived(sequenceNumber, senderUUID); - // update the two time gap stats we're keeping + // update our timegap stats and desired jitter buffer frames if necessary + // discard the first few packets we receive since they usually have gaps that aren't represensative of normal jitter + const int NUM_INITIAL_PACKETS_DISCARD = 3; quint64 now = usecTimestampNow(); - if (_incomingSequenceNumberStats.getNumReceived() >= NUM_INITIAL_PACKETS_DISCARD) { + if (_incomingSequenceNumberStats.getNumReceived() > NUM_INITIAL_PACKETS_DISCARD) { quint64 gap = now - _lastFrameReceivedTime; - _interframeTimeGapStatsForJitterCalc.update(gap); _interframeTimeGapStatsForStatsPacket.update(gap); - } - _lastFrameReceivedTime = now; - // recalculate the _desiredJitterBufferFrames if _interframeTimeGapStatsForJitterCalc has updated stats for us - if (_interframeTimeGapStatsForJitterCalc.getNewStatsAvailableFlag()) { - if (!_dynamicJitterBuffers) { - _desiredJitterBufferFrames = 1; // HACK to see if this fixes the audio silence - } else { - _desiredJitterBufferFrames = getCalculatedDesiredJitterBufferFrames(); + const float USECS_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * USECS_PER_SECOND / (float)SAMPLE_RATE; - const int maxDesired = _ringBuffer.getFrameCapacity() - 1; - if (_desiredJitterBufferFrames > maxDesired) { - _desiredJitterBufferFrames = maxDesired; + // update stats for Freddy's method of jitter calc + _interframeTimeGapStatsForJitterCalc.update(gap); + if (_interframeTimeGapStatsForJitterCalc.getNewStatsAvailableFlag()) { + _calculatedJitterBufferFramesUsingMaxGap = ceilf((float)_interframeTimeGapStatsForJitterCalc.getWindowMax() / USECS_PER_FRAME); + _interframeTimeGapStatsForJitterCalc.clearNewStatsAvailableFlag(); + + if (_dynamicJitterBuffers && !_useStDevForJitterCalc) { + _desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(_calculatedJitterBufferFramesUsingMaxGap); + } + } + + // update stats for Philip's method of jitter calc + _stdev.addValue(gap); + const int STANDARD_DEVIATION_SAMPLE_COUNT = 500; + if (_stdev.getSamples() > STANDARD_DEVIATION_SAMPLE_COUNT) { + const float NUM_STANDARD_DEVIATIONS = 3.0f; + _calculatedJitterBufferFramesUsingStDev = (int)ceilf(2 * (NUM_STANDARD_DEVIATIONS * _stdev.getStDev()) / USECS_PER_FRAME) + 1; + _stdev.reset(); + + if (_dynamicJitterBuffers && _useStDevForJitterCalc) { + _desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(_calculatedJitterBufferFramesUsingStDev); } } - _interframeTimeGapStatsForJitterCalc.clearNewStatsAvailableFlag(); } + _lastFrameReceivedTime = now; return arrivalInfo; } diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index 4624a9fd38..d78f373095 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -18,6 +18,7 @@ #include "SequenceNumberStats.h" #include "AudioStreamStats.h" #include "PacketHeaders.h" +#include "StdDev.h" // the time gaps stats for _desiredJitterBufferFrames calculation // will recalculate the max for the past 5000 samples every 500 samples @@ -44,7 +45,7 @@ const int INBOUND_RING_BUFFER_FRAME_CAPACITY = 100; class InboundAudioStream : public NodeData { Q_OBJECT public: - InboundAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers); + InboundAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, bool useStDevForJitterCalc = false); void reset(); void resetStats(); @@ -67,7 +68,15 @@ public: virtual AudioStreamStats getAudioStreamStats() const; - int getCalculatedDesiredJitterBufferFrames() const; + /// returns the desired number of jitter buffer frames under the dyanmic jitter buffers scheme + int getCalculatedJitterBufferFrames() const { return _useStDevForJitterCalc ? + _calculatedJitterBufferFramesUsingStDev : _calculatedJitterBufferFramesUsingMaxGap; }; + + /// returns the desired number of jitter buffer frames using Philip's method + int getCalculatedJitterBufferFramesUsingStDev() const { return _calculatedJitterBufferFramesUsingStDev; } + + /// returns the desired number of jitter buffer frames using Freddy's method + int getCalculatedJitterBufferFramesUsingMaxGap() const { return _calculatedJitterBufferFramesUsingMaxGap; } int getDesiredJitterBufferFrames() const { return _desiredJitterBufferFrames; } int getNumFrameSamples() const { return _ringBuffer.getNumFrameSamples(); } @@ -86,6 +95,8 @@ public: private: void starved(); + int clampDesiredJitterBufferFramesValue(int desired) const; + protected: // disallow copying of InboundAudioStream objects InboundAudioStream(const InboundAudioStream&); @@ -110,6 +121,10 @@ protected: AudioRingBuffer::ConstIterator _lastPopOutput; bool _dynamicJitterBuffers; + bool _useStDevForJitterCalc; + + int _calculatedJitterBufferFramesUsingMaxGap; + int _calculatedJitterBufferFramesUsingStDev; int _desiredJitterBufferFrames; bool _isStarved; @@ -125,6 +140,7 @@ protected: quint64 _lastFrameReceivedTime; MovingMinMaxAvg _interframeTimeGapStatsForJitterCalc; + StDev _stdev; MovingMinMaxAvg _interframeTimeGapStatsForStatsPacket; // TODO: change this to time-weighted moving avg diff --git a/libraries/audio/src/InboundMixedAudioStream.cpp b/libraries/audio/src/InboundMixedAudioStream.cpp index a1a753a892..208e5d8dcc 100644 --- a/libraries/audio/src/InboundMixedAudioStream.cpp +++ b/libraries/audio/src/InboundMixedAudioStream.cpp @@ -1,8 +1,8 @@ #include "InboundMixedAudioStream.h" -InboundMixedAudioStream::InboundMixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers) - : InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers) +InboundMixedAudioStream::InboundMixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, bool useStDevForJitterCalc) + : InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, useStDevForJitterCalc) { } diff --git a/libraries/audio/src/InboundMixedAudioStream.h b/libraries/audio/src/InboundMixedAudioStream.h index e35b0198f8..d6a8d493ca 100644 --- a/libraries/audio/src/InboundMixedAudioStream.h +++ b/libraries/audio/src/InboundMixedAudioStream.h @@ -4,7 +4,7 @@ class InboundMixedAudioStream : public InboundAudioStream { public: - InboundMixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers); + InboundMixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, bool useStDevForJitterCalc = false); float getNextOutputFrameLoudness() const { return _ringBuffer.getNextOutputFrameLoudness(); } diff --git a/libraries/shared/src/StdDev.cpp b/libraries/shared/src/StdDev.cpp index 4528d0fa4f..c5d78345e4 100644 --- a/libraries/shared/src/StdDev.cpp +++ b/libraries/shared/src/StdDev.cpp @@ -29,7 +29,7 @@ void StDev::addValue(float v) { if (sampleCount == MAX_STDEV_SAMPLES) sampleCount = 0; } -float StDev::getAverage() { +float StDev::getAverage() const { float average = 0; for (int i = 0; i < sampleCount; i++) { average += data[i]; @@ -49,7 +49,7 @@ float StDev::getMax() { else return 0; }*/ -float StDev::getStDev() { +float StDev::getStDev() const { float average = getAverage(); float stdev = 0; for (int i = 0; i < sampleCount; i++) { diff --git a/libraries/shared/src/StdDev.h b/libraries/shared/src/StdDev.h index 77873a3549..a05cc32992 100644 --- a/libraries/shared/src/StdDev.h +++ b/libraries/shared/src/StdDev.h @@ -17,8 +17,8 @@ class StDev { StDev(); void reset(); void addValue(float v); - float getAverage(); - float getStDev(); + float getAverage() const; + float getStDev() const; int getSamples() const { return sampleCount; } private: float * data; From 32dbc6cbdb9dad28638c9ba117d0bbd1389911dd Mon Sep 17 00:00:00 2001 From: wangyix Date: Mon, 28 Jul 2014 11:52:05 -0700 Subject: [PATCH 25/62] renamed audioringbuffer classes to audiostream --- assignment-client/src/audio/AudioMixer.cpp | 24 +++++++-------- assignment-client/src/audio/AudioMixer.h | 8 ++--- .../src/audio/AudioMixerClientData.cpp | 30 +++++++++---------- .../src/audio/AudioMixerClientData.h | 10 +++---- ...ioRingBuffer.cpp => AvatarAudioStream.cpp} | 12 ++++---- ...rAudioRingBuffer.h => AvatarAudioStream.h} | 14 ++++----- interface/src/Audio.cpp | 4 +-- libraries/audio/src/InboundAudioStream.h | 2 +- ...RingBuffer.cpp => InjectedAudioStream.cpp} | 16 +++++----- ...udioRingBuffer.h => InjectedAudioStream.h} | 14 ++++----- ...ngBuffer.cpp => PositionalAudioStream.cpp} | 14 ++++----- ...ioRingBuffer.h => PositionalAudioStream.h} | 14 ++++----- 12 files changed, 81 insertions(+), 81 deletions(-) rename assignment-client/src/audio/{AvatarAudioRingBuffer.cpp => AvatarAudioStream.cpp} (76%) rename assignment-client/src/audio/{AvatarAudioRingBuffer.h => AvatarAudioStream.h} (61%) rename libraries/audio/src/{InjectedAudioRingBuffer.cpp => InjectedAudioStream.cpp} (71%) rename libraries/audio/src/{InjectedAudioRingBuffer.h => InjectedAudioStream.h} (69%) rename libraries/audio/src/{PositionalAudioRingBuffer.cpp => PositionalAudioStream.cpp} (82%) rename libraries/audio/src/{PositionalAudioRingBuffer.h => PositionalAudioStream.h} (80%) diff --git a/assignment-client/src/audio/AudioMixer.cpp b/assignment-client/src/audio/AudioMixer.cpp index ee823e4310..48789c6415 100644 --- a/assignment-client/src/audio/AudioMixer.cpp +++ b/assignment-client/src/audio/AudioMixer.cpp @@ -52,8 +52,8 @@ #include "AudioRingBuffer.h" #include "AudioMixerClientData.h" -#include "AvatarAudioRingBuffer.h" -#include "InjectedAudioRingBuffer.h" +#include "AvatarAudioStream.h" +#include "InjectedAudioStream.h" #include "AudioMixer.h" @@ -93,8 +93,8 @@ const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f; const float ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE = 0.18f; const float ATTENUATION_EPSILON_DISTANCE = 0.1f; -void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd, - AvatarAudioRingBuffer* listeningNodeBuffer) { +void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream* bufferToAdd, + AvatarAudioStream* listeningNodeBuffer) { float bearingRelativeAngleToSource = 0.0f; float attenuationCoefficient = 1.0f; int numSamplesDelay = 0; @@ -125,8 +125,8 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf shouldAttenuate = !bufferToAdd->getListenerUnattenuatedZone()->contains(listeningNodeBuffer->getPosition()); } - if (bufferToAdd->getType() == PositionalAudioRingBuffer::Injector) { - attenuationCoefficient *= reinterpret_cast(bufferToAdd)->getAttenuationRatio(); + if (bufferToAdd->getType() == PositionalAudioStream::Injector) { + attenuationCoefficient *= reinterpret_cast(bufferToAdd)->getAttenuationRatio(); } shouldAttenuate = shouldAttenuate && distanceBetween > ATTENUATION_EPSILON_DISTANCE; @@ -137,8 +137,8 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf float distanceSquareToSource = glm::dot(relativePosition, relativePosition); float radius = 0.0f; - if (bufferToAdd->getType() == PositionalAudioRingBuffer::Injector) { - radius = reinterpret_cast(bufferToAdd)->getRadius(); + if (bufferToAdd->getType() == PositionalAudioStream::Injector) { + radius = reinterpret_cast(bufferToAdd)->getRadius(); } if (radius == 0 || (distanceSquareToSource > radius * radius)) { @@ -265,7 +265,7 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf } void AudioMixer::prepareMixForListeningNode(Node* node) { - AvatarAudioRingBuffer* nodeRingBuffer = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer(); + AvatarAudioStream* nodeRingBuffer = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer(); // zero out the client mix for this node memset(_clientSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_STEREO); @@ -278,10 +278,10 @@ void AudioMixer::prepareMixForListeningNode(Node* node) { // enumerate the ARBs attached to the otherNode and add all that should be added to mix - const QHash& otherNodeRingBuffers = otherNodeClientData->getRingBuffers(); - QHash::ConstIterator i; + const QHash& otherNodeRingBuffers = otherNodeClientData->getRingBuffers(); + QHash::ConstIterator i; for (i = otherNodeRingBuffers.begin(); i != otherNodeRingBuffers.constEnd(); i++) { - PositionalAudioRingBuffer* otherNodeBuffer = i.value(); + PositionalAudioStream* otherNodeBuffer = i.value(); if ((*otherNode != *node || otherNodeBuffer->shouldLoopbackForNode()) && otherNodeBuffer->lastPopSucceeded() diff --git a/assignment-client/src/audio/AudioMixer.h b/assignment-client/src/audio/AudioMixer.h index afab7d47dc..73b3e0ff94 100644 --- a/assignment-client/src/audio/AudioMixer.h +++ b/assignment-client/src/audio/AudioMixer.h @@ -16,8 +16,8 @@ #include #include -class PositionalAudioRingBuffer; -class AvatarAudioRingBuffer; +class PositionalAudioStream; +class AvatarAudioStream; const int SAMPLE_PHASE_DELAY_AT_90 = 20; @@ -41,8 +41,8 @@ public slots: private: /// adds one buffer to the mix for a listening node - void addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd, - AvatarAudioRingBuffer* listeningNodeBuffer); + void addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream* bufferToAdd, + AvatarAudioStream* listeningNodeBuffer); /// prepares and sends a mix to one Node void prepareMixForListeningNode(Node* node); diff --git a/assignment-client/src/audio/AudioMixerClientData.cpp b/assignment-client/src/audio/AudioMixerClientData.cpp index 0745536983..d436870a71 100644 --- a/assignment-client/src/audio/AudioMixerClientData.cpp +++ b/assignment-client/src/audio/AudioMixerClientData.cpp @@ -14,7 +14,7 @@ #include #include -#include "InjectedAudioRingBuffer.h" +#include "InjectedAudioStream.h" #include "AudioMixer.h" #include "AudioMixerClientData.h" @@ -27,16 +27,16 @@ AudioMixerClientData::AudioMixerClientData() : } AudioMixerClientData::~AudioMixerClientData() { - QHash::ConstIterator i; + QHash::ConstIterator i; for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) { // delete this attached InboundAudioStream delete i.value(); } } -AvatarAudioRingBuffer* AudioMixerClientData::getAvatarAudioRingBuffer() const { +AvatarAudioStream* AudioMixerClientData::getAvatarAudioRingBuffer() const { if (_ringBuffers.contains(QUuid())) { - return (AvatarAudioRingBuffer*)_ringBuffers.value(QUuid()); + return (AvatarAudioStream*)_ringBuffers.value(QUuid()); } // no mic stream found - return NULL return NULL; @@ -58,7 +58,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { return dataAt - packet.data(); } else { - PositionalAudioRingBuffer* matchingStream = NULL; + PositionalAudioStream* matchingStream = NULL; if (packetType == PacketTypeMicrophoneAudioWithEcho || packetType == PacketTypeMicrophoneAudioNoEcho @@ -74,7 +74,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { bool isStereo = channelFlag == 1; _ringBuffers.insert(nullUUID, - matchingStream = new AvatarAudioRingBuffer(isStereo, AudioMixer::getUseDynamicJitterBuffers())); + matchingStream = new AvatarAudioStream(isStereo, AudioMixer::getUseDynamicJitterBuffers())); } else { matchingStream = _ringBuffers.value(nullUUID); } @@ -87,7 +87,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { if (!_ringBuffers.contains(streamIdentifier)) { _ringBuffers.insert(streamIdentifier, - matchingStream = new InjectedAudioRingBuffer(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers())); + matchingStream = new InjectedAudioStream(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers())); } else { matchingStream = _ringBuffers.value(streamIdentifier); } @@ -99,7 +99,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { } void AudioMixerClientData::audioStreamsPopFrameForMixing() { - QHash::ConstIterator i; + QHash::ConstIterator i; for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) { i.value()->popFrames(1); } @@ -113,10 +113,10 @@ void AudioMixerClientData::removeDeadInjectedStreams() { // never even reaches its desired size, which means it will never start. const int INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD = 1000; - QHash::Iterator i = _ringBuffers.begin(), end = _ringBuffers.end(); + QHash::Iterator i = _ringBuffers.begin(), end = _ringBuffers.end(); while (i != end) { - PositionalAudioRingBuffer* audioStream = i.value(); - if (audioStream->getType() == PositionalAudioRingBuffer::Injector && audioStream->isStarved()) { + PositionalAudioStream* audioStream = i.value(); + if (audioStream->getType() == PositionalAudioStream::Injector && audioStream->isStarved()) { int notMixedThreshold = audioStream->hasStarted() ? INJECTOR_CONSECUTIVE_NOT_MIXED_AFTER_STARTED_THRESHOLD : INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD; if (audioStream->getConsecutiveNotMixedCount() >= notMixedThreshold) { @@ -152,7 +152,7 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& // pack and send stream stats packets until all ring buffers' stats are sent int numStreamStatsRemaining = _ringBuffers.size(); - QHash::ConstIterator ringBuffersIterator = _ringBuffers.constBegin(); + QHash::ConstIterator ringBuffersIterator = _ringBuffers.constBegin(); while (numStreamStatsRemaining > 0) { char* dataAt = headerEndAt; @@ -201,7 +201,7 @@ QString AudioMixerClientData::getAudioStreamStatsString() const { + " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax) + " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage); - AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer(); + AvatarAudioStream* avatarRingBuffer = getAvatarAudioRingBuffer(); if (avatarRingBuffer) { AudioStreamStats streamStats = avatarRingBuffer->getAudioStreamStats(); result += " UPSTREAM.mic.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames) @@ -224,9 +224,9 @@ QString AudioMixerClientData::getAudioStreamStatsString() const { result = "mic unknown"; } - QHash::ConstIterator i; + QHash::ConstIterator i; for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) { - if (i.value()->getType() == PositionalAudioRingBuffer::Injector) { + if (i.value()->getType() == PositionalAudioStream::Injector) { AudioStreamStats streamStats = i.value()->getAudioStreamStats(); result += " UPSTREAM.inj.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames) + " desired_calc:" + QString::number(i.value()->getCalculatedJitterBufferFrames()) diff --git a/assignment-client/src/audio/AudioMixerClientData.h b/assignment-client/src/audio/AudioMixerClientData.h index b4ad063c02..55fb1355e5 100644 --- a/assignment-client/src/audio/AudioMixerClientData.h +++ b/assignment-client/src/audio/AudioMixerClientData.h @@ -14,16 +14,16 @@ #include -#include "PositionalAudioRingBuffer.h" -#include "AvatarAudioRingBuffer.h" +#include "PositionalAudioStream.h" +#include "AvatarAudioStream.h" class AudioMixerClientData : public NodeData { public: AudioMixerClientData(); ~AudioMixerClientData(); - const QHash& getRingBuffers() const { return _ringBuffers; } - AvatarAudioRingBuffer* getAvatarAudioRingBuffer() const; + const QHash& getRingBuffers() const { return _ringBuffers; } + AvatarAudioStream* getAvatarAudioRingBuffer() const; int parseData(const QByteArray& packet); @@ -39,7 +39,7 @@ public: quint16 getOutgoingSequenceNumber() const { return _outgoingMixedAudioSequenceNumber; } private: - QHash _ringBuffers; // mic stream stored under key of null UUID + QHash _ringBuffers; // mic stream stored under key of null UUID quint16 _outgoingMixedAudioSequenceNumber; diff --git a/assignment-client/src/audio/AvatarAudioRingBuffer.cpp b/assignment-client/src/audio/AvatarAudioStream.cpp similarity index 76% rename from assignment-client/src/audio/AvatarAudioRingBuffer.cpp rename to assignment-client/src/audio/AvatarAudioStream.cpp index 588d198023..c6a7d31468 100644 --- a/assignment-client/src/audio/AvatarAudioRingBuffer.cpp +++ b/assignment-client/src/audio/AvatarAudioStream.cpp @@ -1,5 +1,5 @@ // -// AvatarAudioRingBuffer.cpp +// AvatarAudioStream.cpp // assignment-client/src/audio // // Created by Stephen Birarda on 6/5/13. @@ -11,14 +11,14 @@ #include -#include "AvatarAudioRingBuffer.h" +#include "AvatarAudioStream.h" -AvatarAudioRingBuffer::AvatarAudioRingBuffer(bool isStereo, bool dynamicJitterBuffer) : - PositionalAudioRingBuffer(PositionalAudioRingBuffer::Microphone, isStereo, dynamicJitterBuffer) +AvatarAudioStream::AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer) : + PositionalAudioStream(PositionalAudioStream::Microphone, isStereo, dynamicJitterBuffer) { } -int AvatarAudioRingBuffer::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) { +int AvatarAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) { _shouldLoopbackForNode = (type == PacketTypeMicrophoneAudioWithEcho); @@ -51,7 +51,7 @@ int AvatarAudioRingBuffer::parseStreamProperties(PacketType type, const QByteArr return readBytes; } -int AvatarAudioRingBuffer::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) { +int AvatarAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) { int readBytes = 0; if (type == PacketTypeSilentAudioFrame) { writeDroppableSilentSamples(numAudioSamples); diff --git a/assignment-client/src/audio/AvatarAudioRingBuffer.h b/assignment-client/src/audio/AvatarAudioStream.h similarity index 61% rename from assignment-client/src/audio/AvatarAudioRingBuffer.h rename to assignment-client/src/audio/AvatarAudioStream.h index d846748aff..2b5f921299 100644 --- a/assignment-client/src/audio/AvatarAudioRingBuffer.h +++ b/assignment-client/src/audio/AvatarAudioStream.h @@ -1,5 +1,5 @@ // -// AvatarAudioRingBuffer.h +// AvatarAudioStream.h // assignment-client/src/audio // // Created by Stephen Birarda on 6/5/13. @@ -14,16 +14,16 @@ #include -#include "PositionalAudioRingBuffer.h" +#include "PositionalAudioStream.h" -class AvatarAudioRingBuffer : public PositionalAudioRingBuffer { +class AvatarAudioStream : public PositionalAudioStream { public: - AvatarAudioRingBuffer(bool isStereo = false, bool dynamicJitterBuffer = false); + AvatarAudioStream(bool isStereo = false, bool dynamicJitterBuffer = false); private: - // disallow copying of AvatarAudioRingBuffer objects - AvatarAudioRingBuffer(const AvatarAudioRingBuffer&); - AvatarAudioRingBuffer& operator= (const AvatarAudioRingBuffer&); + // disallow copying of AvatarAudioStream objects + AvatarAudioStream(const AvatarAudioStream&); + AvatarAudioStream& operator= (const AvatarAudioStream&); int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples); int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples); diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 0e024ccc58..0ec74de0b1 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -42,7 +42,7 @@ #include "Audio.h" #include "Menu.h" #include "Util.h" -#include "PositionalAudioRingBuffer.h" +#include "PositionalAudioStream.h" static const float AUDIO_CALLBACK_MSECS = (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float)SAMPLE_RATE * 1000.0; @@ -746,7 +746,7 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) { memcpy(&streamStats, dataAt, sizeof(AudioStreamStats)); dataAt += sizeof(AudioStreamStats); - if (streamStats._streamType == PositionalAudioRingBuffer::Microphone) { + if (streamStats._streamType == PositionalAudioStream::Microphone) { _audioMixerAvatarStreamAudioStats = streamStats; } else { _audioMixerInjectedStreamAudioStatsMap[streamStats._streamIdentifier] = streamStats; diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index d78f373095..76b4ce18e8 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -120,7 +120,7 @@ protected: bool _lastPopSucceeded; AudioRingBuffer::ConstIterator _lastPopOutput; - bool _dynamicJitterBuffers; + const bool _dynamicJitterBuffers; bool _useStDevForJitterCalc; int _calculatedJitterBufferFramesUsingMaxGap; diff --git a/libraries/audio/src/InjectedAudioRingBuffer.cpp b/libraries/audio/src/InjectedAudioStream.cpp similarity index 71% rename from libraries/audio/src/InjectedAudioRingBuffer.cpp rename to libraries/audio/src/InjectedAudioStream.cpp index e35e428671..4c23fbd823 100644 --- a/libraries/audio/src/InjectedAudioRingBuffer.cpp +++ b/libraries/audio/src/InjectedAudioStream.cpp @@ -1,5 +1,5 @@ // -// InjectedAudioRingBuffer.cpp +// InjectedAudioStream.cpp // libraries/audio/src // // Created by Stephen Birarda on 6/5/13. @@ -17,10 +17,10 @@ #include #include -#include "InjectedAudioRingBuffer.h" +#include "InjectedAudioStream.h" -InjectedAudioRingBuffer::InjectedAudioRingBuffer(const QUuid& streamIdentifier, bool dynamicJitterBuffer) : - PositionalAudioRingBuffer(PositionalAudioRingBuffer::Injector, false, dynamicJitterBuffer), +InjectedAudioStream::InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer) : + PositionalAudioStream(PositionalAudioStream::Injector, false, dynamicJitterBuffer), _streamIdentifier(streamIdentifier), _radius(0.0f), _attenuationRatio(0) @@ -30,7 +30,7 @@ InjectedAudioRingBuffer::InjectedAudioRingBuffer(const QUuid& streamIdentifier, const uchar MAX_INJECTOR_VOLUME = 255; -int InjectedAudioRingBuffer::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) { +int InjectedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) { // setup a data stream to read from this packet QDataStream packetStream(packetAfterSeqNum); @@ -58,12 +58,12 @@ int InjectedAudioRingBuffer::parseStreamProperties(PacketType type, const QByteA return packetStream.device()->pos(); } -int InjectedAudioRingBuffer::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) { +int InjectedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) { return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t)); } -AudioStreamStats InjectedAudioRingBuffer::getAudioStreamStats() const { - AudioStreamStats streamStats = PositionalAudioRingBuffer::getAudioStreamStats(); +AudioStreamStats InjectedAudioStream::getAudioStreamStats() const { + AudioStreamStats streamStats = PositionalAudioStream::getAudioStreamStats(); streamStats._streamIdentifier = _streamIdentifier; return streamStats; } diff --git a/libraries/audio/src/InjectedAudioRingBuffer.h b/libraries/audio/src/InjectedAudioStream.h similarity index 69% rename from libraries/audio/src/InjectedAudioRingBuffer.h rename to libraries/audio/src/InjectedAudioStream.h index 0f7c621baa..81659c4836 100644 --- a/libraries/audio/src/InjectedAudioRingBuffer.h +++ b/libraries/audio/src/InjectedAudioStream.h @@ -1,5 +1,5 @@ // -// InjectedAudioRingBuffer.h +// InjectedAudioStream.h // libraries/audio/src // // Created by Stephen Birarda on 6/5/13. @@ -14,11 +14,11 @@ #include -#include "PositionalAudioRingBuffer.h" +#include "PositionalAudioStream.h" -class InjectedAudioRingBuffer : public PositionalAudioRingBuffer { +class InjectedAudioStream : public PositionalAudioStream { public: - InjectedAudioRingBuffer(const QUuid& streamIdentifier = QUuid(), bool dynamicJitterBuffer = false); + InjectedAudioStream(const QUuid& streamIdentifier = QUuid(), bool dynamicJitterBuffer = false); float getRadius() const { return _radius; } float getAttenuationRatio() const { return _attenuationRatio; } @@ -26,9 +26,9 @@ public: QUuid getStreamIdentifier() const { return _streamIdentifier; } private: - // disallow copying of InjectedAudioRingBuffer objects - InjectedAudioRingBuffer(const InjectedAudioRingBuffer&); - InjectedAudioRingBuffer& operator= (const InjectedAudioRingBuffer&); + // disallow copying of InjectedAudioStream objects + InjectedAudioStream(const InjectedAudioStream&); + InjectedAudioStream& operator= (const InjectedAudioStream&); AudioStreamStats getAudioStreamStats() const; int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples); diff --git a/libraries/audio/src/PositionalAudioRingBuffer.cpp b/libraries/audio/src/PositionalAudioStream.cpp similarity index 82% rename from libraries/audio/src/PositionalAudioRingBuffer.cpp rename to libraries/audio/src/PositionalAudioStream.cpp index 378fad92cc..b50e339185 100644 --- a/libraries/audio/src/PositionalAudioRingBuffer.cpp +++ b/libraries/audio/src/PositionalAudioStream.cpp @@ -1,5 +1,5 @@ // -// PositionalAudioRingBuffer.cpp +// PositionalAudioStream.cpp // libraries/audio/src // // Created by Stephen Birarda on 6/5/13. @@ -9,7 +9,7 @@ // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // -#include "PositionalAudioRingBuffer.h" +#include "PositionalAudioStream.h" #include "SharedUtil.h" #include @@ -21,7 +21,7 @@ #include #include -PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo, bool dynamicJitterBuffers) : +PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers) : InboundAudioStream(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL, AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, dynamicJitterBuffers), _type(type), @@ -34,13 +34,13 @@ PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer:: { } -int PositionalAudioRingBuffer::parseData(const QByteArray& packet) { +int PositionalAudioStream::parseData(const QByteArray& packet) { int bytesRead = InboundAudioStream::parseData(packet); updateNextOutputTrailingLoudness(); return bytesRead; } -void PositionalAudioRingBuffer::updateNextOutputTrailingLoudness() { +void PositionalAudioStream::updateNextOutputTrailingLoudness() { float nextLoudness = _ringBuffer.getNextOutputFrameLoudness(); const int TRAILING_AVERAGE_FRAMES = 100; @@ -59,7 +59,7 @@ void PositionalAudioRingBuffer::updateNextOutputTrailingLoudness() { } } -int PositionalAudioRingBuffer::parsePositionalData(const QByteArray& positionalByteArray) { +int PositionalAudioStream::parsePositionalData(const QByteArray& positionalByteArray) { QDataStream packetStream(positionalByteArray); packetStream.readRawData(reinterpret_cast(&_position), sizeof(_position)); @@ -75,7 +75,7 @@ int PositionalAudioRingBuffer::parsePositionalData(const QByteArray& positionalB return packetStream.device()->pos(); } -AudioStreamStats PositionalAudioRingBuffer::getAudioStreamStats() const { +AudioStreamStats PositionalAudioStream::getAudioStreamStats() const { AudioStreamStats streamStats = InboundAudioStream::getAudioStreamStats(); streamStats._streamType = _type; return streamStats; diff --git a/libraries/audio/src/PositionalAudioRingBuffer.h b/libraries/audio/src/PositionalAudioStream.h similarity index 80% rename from libraries/audio/src/PositionalAudioRingBuffer.h rename to libraries/audio/src/PositionalAudioStream.h index e0d6929ec9..de76edaa63 100644 --- a/libraries/audio/src/PositionalAudioRingBuffer.h +++ b/libraries/audio/src/PositionalAudioStream.h @@ -1,5 +1,5 @@ // -// PositionalAudioRingBuffer.h +// PositionalAudioStream.h // libraries/audio/src // // Created by Stephen Birarda on 6/5/13. @@ -19,7 +19,7 @@ const int AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY = 100; -class PositionalAudioRingBuffer : public InboundAudioStream { +class PositionalAudioStream : public InboundAudioStream { Q_OBJECT public: enum Type { @@ -27,7 +27,7 @@ public: Injector }; - PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo = false, bool dynamicJitterBuffers = false); + PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo = false, bool dynamicJitterBuffers = false); int parseData(const QByteArray& packet); @@ -38,7 +38,7 @@ public: bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; } bool isStereo() const { return _isStereo; } - PositionalAudioRingBuffer::Type getType() const { return _type; } + PositionalAudioStream::Type getType() const { return _type; } const glm::vec3& getPosition() const { return _position; } const glm::quat& getOrientation() const { return _orientation; } AABox* getListenerUnattenuatedZone() const { return _listenerUnattenuatedZone; } @@ -46,9 +46,9 @@ public: void setListenerUnattenuatedZone(AABox* listenerUnattenuatedZone) { _listenerUnattenuatedZone = listenerUnattenuatedZone; } protected: - // disallow copying of PositionalAudioRingBuffer objects - PositionalAudioRingBuffer(const PositionalAudioRingBuffer&); - PositionalAudioRingBuffer& operator= (const PositionalAudioRingBuffer&); + // disallow copying of PositionalAudioStream objects + PositionalAudioStream(const PositionalAudioStream&); + PositionalAudioStream& operator= (const PositionalAudioStream&); /// parses the info between the seq num and the audio data in the network packet and calculates /// how many audio samples this packet contains From 6fc5c74c0b6a0d68f014aa5d86caea48cb1ee40e Mon Sep 17 00:00:00 2001 From: wangyix Date: Mon, 28 Jul 2014 13:08:53 -0700 Subject: [PATCH 26/62] renamed ringbuffer variables to audiostream --- assignment-client/src/Agent.cpp | 8 +- assignment-client/src/Agent.h | 4 +- assignment-client/src/audio/AudioMixer.cpp | 89 +++++++++--------- assignment-client/src/audio/AudioMixer.h | 6 +- .../src/audio/AudioMixerClientData.cpp | 90 +++++++++---------- .../src/audio/AudioMixerClientData.h | 6 +- .../src/audio/AvatarAudioStream.h | 6 +- interface/src/Application.cpp | 10 +-- interface/src/Audio.cpp | 78 ++++++++-------- interface/src/Audio.h | 18 ++-- libraries/audio/src/AudioStreamStats.h | 28 +++--- libraries/audio/src/InboundAudioStream.cpp | 19 ++-- libraries/audio/src/InboundAudioStream.h | 11 ++- .../audio/src/InboundMixedAudioStream.cpp | 17 ---- libraries/audio/src/InboundMixedAudioStream.h | 14 --- libraries/audio/src/InjectedAudioStream.h | 6 +- libraries/audio/src/MixedAudioStream.cpp | 17 ++++ libraries/audio/src/MixedAudioStream.h | 29 ++++++ libraries/audio/src/PositionalAudioStream.h | 6 +- 19 files changed, 237 insertions(+), 225 deletions(-) delete mode 100644 libraries/audio/src/InboundMixedAudioStream.cpp delete mode 100644 libraries/audio/src/InboundMixedAudioStream.h create mode 100644 libraries/audio/src/MixedAudioStream.cpp create mode 100644 libraries/audio/src/MixedAudioStream.h diff --git a/assignment-client/src/Agent.cpp b/assignment-client/src/Agent.cpp index c82f35ff7f..d4da989198 100644 --- a/assignment-client/src/Agent.cpp +++ b/assignment-client/src/Agent.cpp @@ -36,7 +36,7 @@ Agent::Agent(const QByteArray& packet) : _voxelEditSender(), _particleEditSender(), _modelEditSender(), - _receivedAudioBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 1, false), + _receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 1, false), _avatarHashMap() { // be the parent of the script engine so it gets moved when we do @@ -148,11 +148,11 @@ void Agent::readPendingDatagrams() { } else if (datagramPacketType == PacketTypeMixedAudio) { - _receivedAudioBuffer.parseData(receivedPacket); + _receivedAudioStream.parseData(receivedPacket); - _lastReceivedAudioLoudness = _receivedAudioBuffer.getNextOutputFrameLoudness(); + _lastReceivedAudioLoudness = _receivedAudioStream.getNextOutputFrameLoudness(); - _receivedAudioBuffer.clearBuffer(); + _receivedAudioStream.clearBuffer(); // let this continue through to the NodeList so it updates last heard timestamp // for the sending audio mixer diff --git a/assignment-client/src/Agent.h b/assignment-client/src/Agent.h index b713062840..cd2476fe02 100644 --- a/assignment-client/src/Agent.h +++ b/assignment-client/src/Agent.h @@ -30,7 +30,7 @@ #include #include -#include "InboundMixedAudioStream.h" +#include "MixedAudioStream.h" class Agent : public ThreadedAssignment { @@ -71,7 +71,7 @@ private: VoxelTreeHeadlessViewer _voxelViewer; ModelTreeHeadlessViewer _modelViewer; - InboundMixedAudioStream _receivedAudioBuffer; + MixedAudioStream _receivedAudioStream; float _lastReceivedAudioLoudness; AvatarHashMap _avatarHashMap; diff --git a/assignment-client/src/audio/AudioMixer.cpp b/assignment-client/src/audio/AudioMixer.cpp index 48789c6415..d3ec39ace1 100644 --- a/assignment-client/src/audio/AudioMixer.cpp +++ b/assignment-client/src/audio/AudioMixer.cpp @@ -61,7 +61,7 @@ const float LOUDNESS_TO_DISTANCE_RATIO = 0.00001f; const QString AUDIO_MIXER_LOGGING_TARGET_NAME = "audio-mixer"; -void attachNewBufferToNode(Node *newNode) { +void attachNewNodeDataToNode(Node *newNode) { if (!newNode->getLinkedData()) { newNode->setLinkedData(new AudioMixerClientData()); } @@ -93,19 +93,19 @@ const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f; const float ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE = 0.18f; const float ATTENUATION_EPSILON_DISTANCE = 0.1f; -void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream* bufferToAdd, - AvatarAudioStream* listeningNodeBuffer) { +void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd, + AvatarAudioStream* listeningNodeStream) { float bearingRelativeAngleToSource = 0.0f; float attenuationCoefficient = 1.0f; int numSamplesDelay = 0; float weakChannelAmplitudeRatio = 1.0f; - bool shouldAttenuate = (bufferToAdd != listeningNodeBuffer); + bool shouldAttenuate = (streamToAdd != listeningNodeStream); if (shouldAttenuate) { - // if the two buffer pointers do not match then these are different buffers - glm::vec3 relativePosition = bufferToAdd->getPosition() - listeningNodeBuffer->getPosition(); + // if the two stream pointers do not match then these are different streams + glm::vec3 relativePosition = streamToAdd->getPosition() - listeningNodeStream->getPosition(); float distanceBetween = glm::length(relativePosition); @@ -113,7 +113,7 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream* distanceBetween = EPSILON; } - if (bufferToAdd->getNextOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) { + if (streamToAdd->getNextOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) { // according to mixer performance we have decided this does not get to be mixed in // bail out return; @@ -121,24 +121,24 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream* ++_sumMixes; - if (bufferToAdd->getListenerUnattenuatedZone()) { - shouldAttenuate = !bufferToAdd->getListenerUnattenuatedZone()->contains(listeningNodeBuffer->getPosition()); + if (streamToAdd->getListenerUnattenuatedZone()) { + shouldAttenuate = !streamToAdd->getListenerUnattenuatedZone()->contains(listeningNodeStream->getPosition()); } - if (bufferToAdd->getType() == PositionalAudioStream::Injector) { - attenuationCoefficient *= reinterpret_cast(bufferToAdd)->getAttenuationRatio(); + if (streamToAdd->getType() == PositionalAudioStream::Injector) { + attenuationCoefficient *= reinterpret_cast(streamToAdd)->getAttenuationRatio(); } shouldAttenuate = shouldAttenuate && distanceBetween > ATTENUATION_EPSILON_DISTANCE; if (shouldAttenuate) { - glm::quat inverseOrientation = glm::inverse(listeningNodeBuffer->getOrientation()); + glm::quat inverseOrientation = glm::inverse(listeningNodeStream->getOrientation()); float distanceSquareToSource = glm::dot(relativePosition, relativePosition); float radius = 0.0f; - if (bufferToAdd->getType() == PositionalAudioStream::Injector) { - radius = reinterpret_cast(bufferToAdd)->getRadius(); + if (streamToAdd->getType() == PositionalAudioStream::Injector) { + radius = reinterpret_cast(streamToAdd)->getRadius(); } if (radius == 0 || (distanceSquareToSource > radius * radius)) { @@ -154,7 +154,7 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream* } else { // calculate the angle delivery for off-axis attenuation - glm::vec3 rotatedListenerPosition = glm::inverse(bufferToAdd->getOrientation()) * relativePosition; + glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd->getOrientation()) * relativePosition; float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f), glm::normalize(rotatedListenerPosition)); @@ -203,16 +203,16 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream* } } - AudioRingBuffer::ConstIterator bufferPopOutput = bufferToAdd->getLastPopOutput(); + AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd->getLastPopOutput(); - if (!bufferToAdd->isStereo() && shouldAttenuate) { - // this is a mono buffer, which means it gets full attenuation and spatialization + if (!streamToAdd->isStereo() && shouldAttenuate) { + // this is a mono stream, which means it gets full attenuation and spatialization // if the bearing relative angle to source is > 0 then the delayed channel is the right one int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0; int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0; - int16_t correctBufferSample[2], delayBufferSample[2]; + int16_t correctStreamSample[2], delayStreamSample[2]; int delayedChannelIndex = 0; const int SINGLE_STEREO_OFFSET = 2; @@ -220,52 +220,51 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream* for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) { // setup the int16_t variables for the two sample sets - correctBufferSample[0] = bufferPopOutput[s / 2] * attenuationCoefficient; - correctBufferSample[1] = bufferPopOutput[(s / 2) + 1] * attenuationCoefficient; + correctStreamSample[0] = streamPopOutput[s / 2] * attenuationCoefficient; + correctStreamSample[1] = streamPopOutput[(s / 2) + 1] * attenuationCoefficient; delayedChannelIndex = s + (numSamplesDelay * 2) + delayedChannelOffset; - delayBufferSample[0] = correctBufferSample[0] * weakChannelAmplitudeRatio; - delayBufferSample[1] = correctBufferSample[1] * weakChannelAmplitudeRatio; + delayStreamSample[0] = correctStreamSample[0] * weakChannelAmplitudeRatio; + delayStreamSample[1] = correctStreamSample[1] * weakChannelAmplitudeRatio; - _clientSamples[s + goodChannelOffset] += correctBufferSample[0]; - _clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET] += correctBufferSample[1]; - _clientSamples[delayedChannelIndex] += delayBufferSample[0]; - _clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET] += delayBufferSample[1]; + _clientSamples[s + goodChannelOffset] += correctStreamSample[0]; + _clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET] += correctStreamSample[1]; + _clientSamples[delayedChannelIndex] += delayStreamSample[0]; + _clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET] += delayStreamSample[1]; } if (numSamplesDelay > 0) { - // if there was a sample delay for this buffer, we need to pull samples prior to the nextOutput + // if there was a sample delay for this stream, we need to pull samples prior to the popped output // to stick at the beginning float attenuationAndWeakChannelRatio = attenuationCoefficient * weakChannelAmplitudeRatio; - AudioRingBuffer::ConstIterator delayBufferPopOutput = bufferPopOutput - numSamplesDelay; + AudioRingBuffer::ConstIterator delayStreamPopOutput = streamPopOutput - numSamplesDelay; - // TODO: delayBufferPopOutput may be inside the last frame written if the ringbuffer is completely full + // TODO: delayStreamPopOutput may be inside the last frame written if the ringbuffer is completely full // maybe make AudioRingBuffer have 1 extra frame in its buffer for (int i = 0; i < numSamplesDelay; i++) { int parentIndex = i * 2; - _clientSamples[parentIndex + delayedChannelOffset] += *delayBufferPopOutput * attenuationAndWeakChannelRatio; - ++delayBufferPopOutput; + _clientSamples[parentIndex + delayedChannelOffset] += *delayStreamPopOutput * attenuationAndWeakChannelRatio; + ++delayStreamPopOutput; } } } else { - - int stereoDivider = bufferToAdd->isStereo() ? 1 : 2; + int stereoDivider = streamToAdd->isStereo() ? 1 : 2; if (!shouldAttenuate) { attenuationCoefficient = 1.0f; } for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s++) { - _clientSamples[s] = glm::clamp(_clientSamples[s] + (int)(bufferPopOutput[s / stereoDivider] * attenuationCoefficient), + _clientSamples[s] = glm::clamp(_clientSamples[s] + (int)(streamPopOutput[s / stereoDivider] * attenuationCoefficient), MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE); } } } void AudioMixer::prepareMixForListeningNode(Node* node) { - AvatarAudioStream* nodeRingBuffer = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer(); + AvatarAudioStream* nodeAudioStream = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioStream(); // zero out the client mix for this node memset(_clientSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_STEREO); @@ -278,16 +277,16 @@ void AudioMixer::prepareMixForListeningNode(Node* node) { // enumerate the ARBs attached to the otherNode and add all that should be added to mix - const QHash& otherNodeRingBuffers = otherNodeClientData->getRingBuffers(); + const QHash& otherNodeAudioStreams = otherNodeClientData->getAudioStreams(); QHash::ConstIterator i; - for (i = otherNodeRingBuffers.begin(); i != otherNodeRingBuffers.constEnd(); i++) { - PositionalAudioStream* otherNodeBuffer = i.value(); + for (i = otherNodeAudioStreams.begin(); i != otherNodeAudioStreams.constEnd(); i++) { + PositionalAudioStream* otherNodeStream = i.value(); - if ((*otherNode != *node || otherNodeBuffer->shouldLoopbackForNode()) - && otherNodeBuffer->lastPopSucceeded() - && otherNodeBuffer->getNextOutputTrailingLoudness() > 0.0f) { + if ((*otherNode != *node || otherNodeStream->shouldLoopbackForNode()) + && otherNodeStream->lastPopSucceeded() + && otherNodeStream->getNextOutputTrailingLoudness() > 0.0f) { - addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer); + addStreamToMixForListeningNodeWithStream(otherNodeStream, nodeAudioStream); } } } @@ -392,7 +391,7 @@ void AudioMixer::run() { nodeList->addNodeTypeToInterestSet(NodeType::Agent); - nodeList->linkedDataCreateCallback = attachNewBufferToNode; + nodeList->linkedDataCreateCallback = attachNewNodeDataToNode; // setup a NetworkAccessManager to ask the domain-server for our settings NetworkAccessManager& networkManager = NetworkAccessManager::getInstance(); @@ -554,7 +553,7 @@ void AudioMixer::run() { nodeData->audioStreamsPopFrameForMixing(); if (node->getType() == NodeType::Agent - && ((AudioMixerClientData*)node->getLinkedData())->getAvatarAudioRingBuffer()) { + && ((AudioMixerClientData*)node->getLinkedData())->getAvatarAudioStream()) { prepareMixForListeningNode(node.data()); diff --git a/assignment-client/src/audio/AudioMixer.h b/assignment-client/src/audio/AudioMixer.h index 73b3e0ff94..bfdb49f393 100644 --- a/assignment-client/src/audio/AudioMixer.h +++ b/assignment-client/src/audio/AudioMixer.h @@ -40,9 +40,9 @@ public slots: static bool getUseDynamicJitterBuffers() { return _useDynamicJitterBuffers; } private: - /// adds one buffer to the mix for a listening node - void addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream* bufferToAdd, - AvatarAudioStream* listeningNodeBuffer); + /// adds one stream to the mix for a listening node + void addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd, + AvatarAudioStream* listeningNodeStream); /// prepares and sends a mix to one Node void prepareMixForListeningNode(Node* node); diff --git a/assignment-client/src/audio/AudioMixerClientData.cpp b/assignment-client/src/audio/AudioMixerClientData.cpp index d436870a71..17e46f3692 100644 --- a/assignment-client/src/audio/AudioMixerClientData.cpp +++ b/assignment-client/src/audio/AudioMixerClientData.cpp @@ -21,22 +21,22 @@ AudioMixerClientData::AudioMixerClientData() : - _ringBuffers(), + _audioStreams(), _outgoingMixedAudioSequenceNumber(0) { } AudioMixerClientData::~AudioMixerClientData() { QHash::ConstIterator i; - for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) { + for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) { // delete this attached InboundAudioStream delete i.value(); } } -AvatarAudioStream* AudioMixerClientData::getAvatarAudioRingBuffer() const { - if (_ringBuffers.contains(QUuid())) { - return (AvatarAudioStream*)_ringBuffers.value(QUuid()); +AvatarAudioStream* AudioMixerClientData::getAvatarAudioStream() const { + if (_audioStreams.contains(QUuid())) { + return (AvatarAudioStream*)_audioStreams.value(QUuid()); } // no mic stream found - return NULL return NULL; @@ -65,7 +65,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { || packetType == PacketTypeSilentAudioFrame) { QUuid nullUUID = QUuid(); - if (!_ringBuffers.contains(nullUUID)) { + if (!_audioStreams.contains(nullUUID)) { // we don't have a mic stream yet, so add it // read the channel flag to see if our stream is stereo or not @@ -73,10 +73,10 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { quint8 channelFlag = *(reinterpret_cast(channelFlagAt)); bool isStereo = channelFlag == 1; - _ringBuffers.insert(nullUUID, + _audioStreams.insert(nullUUID, matchingStream = new AvatarAudioStream(isStereo, AudioMixer::getUseDynamicJitterBuffers())); } else { - matchingStream = _ringBuffers.value(nullUUID); + matchingStream = _audioStreams.value(nullUUID); } } else if (packetType == PacketTypeInjectAudio) { // this is injected audio @@ -85,11 +85,11 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { int bytesBeforeStreamIdentifier = numBytesForPacketHeader(packet) + sizeof(quint16); QUuid streamIdentifier = QUuid::fromRfc4122(packet.mid(bytesBeforeStreamIdentifier, NUM_BYTES_RFC4122_UUID)); - if (!_ringBuffers.contains(streamIdentifier)) { - _ringBuffers.insert(streamIdentifier, + if (!_audioStreams.contains(streamIdentifier)) { + _audioStreams.insert(streamIdentifier, matchingStream = new InjectedAudioStream(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers())); } else { - matchingStream = _ringBuffers.value(streamIdentifier); + matchingStream = _audioStreams.value(streamIdentifier); } } @@ -100,7 +100,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { void AudioMixerClientData::audioStreamsPopFrameForMixing() { QHash::ConstIterator i; - for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) { + for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) { i.value()->popFrames(1); } } @@ -109,11 +109,11 @@ void AudioMixerClientData::removeDeadInjectedStreams() { const int INJECTOR_CONSECUTIVE_NOT_MIXED_AFTER_STARTED_THRESHOLD = 100; - // we have this second threshold in case the injected audio is so short that the ringbuffer + // we have this second threshold in case the injected audio is so short that the injected stream // never even reaches its desired size, which means it will never start. const int INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD = 1000; - QHash::Iterator i = _ringBuffers.begin(), end = _ringBuffers.end(); + QHash::Iterator i = _audioStreams.begin(), end = _audioStreams.end(); while (i != end) { PositionalAudioStream* audioStream = i.value(); if (audioStream->getType() == PositionalAudioStream::Injector && audioStream->isStarved()) { @@ -121,7 +121,7 @@ void AudioMixerClientData::removeDeadInjectedStreams() { : INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD; if (audioStream->getConsecutiveNotMixedCount() >= notMixedThreshold) { delete audioStream; - i = _ringBuffers.erase(i); + i = _audioStreams.erase(i); continue; } } @@ -150,9 +150,9 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& // calculate how many stream stat structs we can fit in each packet const int numStreamStatsRoomFor = (MAX_PACKET_SIZE - numBytesPacketHeader - sizeof(quint8) - sizeof(quint16)) / sizeof(AudioStreamStats); - // pack and send stream stats packets until all ring buffers' stats are sent - int numStreamStatsRemaining = _ringBuffers.size(); - QHash::ConstIterator ringBuffersIterator = _ringBuffers.constBegin(); + // pack and send stream stats packets until all audio streams' stats are sent + int numStreamStatsRemaining = _audioStreams.size(); + QHash::ConstIterator audioStreamsIterator = _audioStreams.constBegin(); while (numStreamStatsRemaining > 0) { char* dataAt = headerEndAt; @@ -169,11 +169,11 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& // pack the calculated number of stream stats for (int i = 0; i < numStreamStatsToPack; i++) { - AudioStreamStats streamStats = ringBuffersIterator.value()->updateSeqHistoryAndGetAudioStreamStats(); + AudioStreamStats streamStats = audioStreamsIterator.value()->updateSeqHistoryAndGetAudioStreamStats(); memcpy(dataAt, &streamStats, sizeof(AudioStreamStats)); dataAt += sizeof(AudioStreamStats); - ringBuffersIterator++; + audioStreamsIterator++; } numStreamStatsRemaining -= numStreamStatsToPack; @@ -185,12 +185,12 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& QString AudioMixerClientData::getAudioStreamStatsString() const { QString result; AudioStreamStats streamStats = _downstreamAudioStreamStats; - result += "DOWNSTREAM.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames) - + " available_avg_10s:" + QString::number(streamStats._ringBufferFramesAvailableAverage) - + " available:" + QString::number(streamStats._ringBufferFramesAvailable) - + " starves:" + QString::number(streamStats._ringBufferStarveCount) - + " not_mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount) - + " overflows:" + QString::number(streamStats._ringBufferOverflowCount) + result += "DOWNSTREAM.desired:" + QString::number(streamStats._desiredJitterBufferFrames) + + " available_avg_10s:" + QString::number(streamStats._framesAvailableAverage) + + " available:" + QString::number(streamStats._framesAvailable) + + " starves:" + QString::number(streamStats._starveCount) + + " not_mixed:" + QString::number(streamStats._consecutiveNotMixedCount) + + " overflows:" + QString::number(streamStats._overflowCount) + " silents_dropped: ?" + " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2) + " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2) @@ -201,17 +201,17 @@ QString AudioMixerClientData::getAudioStreamStatsString() const { + " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax) + " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage); - AvatarAudioStream* avatarRingBuffer = getAvatarAudioRingBuffer(); - if (avatarRingBuffer) { - AudioStreamStats streamStats = avatarRingBuffer->getAudioStreamStats(); - result += " UPSTREAM.mic.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames) - + " desired_calc:" + QString::number(avatarRingBuffer->getCalculatedJitterBufferFrames()) - + " available_avg_10s:" + QString::number(streamStats._ringBufferFramesAvailableAverage) - + " available:" + QString::number(streamStats._ringBufferFramesAvailable) - + " starves:" + QString::number(streamStats._ringBufferStarveCount) - + " not_mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount) - + " overflows:" + QString::number(streamStats._ringBufferOverflowCount) - + " silents_dropped:" + QString::number(streamStats._ringBufferSilentFramesDropped) + AvatarAudioStream* avatarAudioStream = getAvatarAudioStream(); + if (avatarAudioStream) { + AudioStreamStats streamStats = avatarAudioStream->getAudioStreamStats(); + result += " UPSTREAM.mic.desired:" + QString::number(streamStats._desiredJitterBufferFrames) + + " desired_calc:" + QString::number(avatarAudioStream->getCalculatedJitterBufferFrames()) + + " available_avg_10s:" + QString::number(streamStats._framesAvailableAverage) + + " available:" + QString::number(streamStats._framesAvailable) + + " starves:" + QString::number(streamStats._starveCount) + + " not_mixed:" + QString::number(streamStats._consecutiveNotMixedCount) + + " overflows:" + QString::number(streamStats._overflowCount) + + " silents_dropped:" + QString::number(streamStats._silentFramesDropped) + " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2) + " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2) + " min_gap:" + formatUsecTime(streamStats._timeGapMin) @@ -225,17 +225,17 @@ QString AudioMixerClientData::getAudioStreamStatsString() const { } QHash::ConstIterator i; - for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) { + for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) { if (i.value()->getType() == PositionalAudioStream::Injector) { AudioStreamStats streamStats = i.value()->getAudioStreamStats(); - result += " UPSTREAM.inj.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames) + result += " UPSTREAM.inj.desired:" + QString::number(streamStats._desiredJitterBufferFrames) + " desired_calc:" + QString::number(i.value()->getCalculatedJitterBufferFrames()) - + " available_avg_10s:" + QString::number(streamStats._ringBufferFramesAvailableAverage) - + " available:" + QString::number(streamStats._ringBufferFramesAvailable) - + " starves:" + QString::number(streamStats._ringBufferStarveCount) - + " not_mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount) - + " overflows:" + QString::number(streamStats._ringBufferOverflowCount) - + " silents_dropped:" + QString::number(streamStats._ringBufferSilentFramesDropped) + + " available_avg_10s:" + QString::number(streamStats._framesAvailableAverage) + + " available:" + QString::number(streamStats._framesAvailable) + + " starves:" + QString::number(streamStats._starveCount) + + " not_mixed:" + QString::number(streamStats._consecutiveNotMixedCount) + + " overflows:" + QString::number(streamStats._overflowCount) + + " silents_dropped:" + QString::number(streamStats._silentFramesDropped) + " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2) + " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2) + " min_gap:" + formatUsecTime(streamStats._timeGapMin) diff --git a/assignment-client/src/audio/AudioMixerClientData.h b/assignment-client/src/audio/AudioMixerClientData.h index 55fb1355e5..287f4f7b65 100644 --- a/assignment-client/src/audio/AudioMixerClientData.h +++ b/assignment-client/src/audio/AudioMixerClientData.h @@ -22,8 +22,8 @@ public: AudioMixerClientData(); ~AudioMixerClientData(); - const QHash& getRingBuffers() const { return _ringBuffers; } - AvatarAudioStream* getAvatarAudioRingBuffer() const; + const QHash& getAudioStreams() const { return _audioStreams; } + AvatarAudioStream* getAvatarAudioStream() const; int parseData(const QByteArray& packet); @@ -39,7 +39,7 @@ public: quint16 getOutgoingSequenceNumber() const { return _outgoingMixedAudioSequenceNumber; } private: - QHash _ringBuffers; // mic stream stored under key of null UUID + QHash _audioStreams; // mic stream stored under key of null UUID quint16 _outgoingMixedAudioSequenceNumber; diff --git a/assignment-client/src/audio/AvatarAudioStream.h b/assignment-client/src/audio/AvatarAudioStream.h index 2b5f921299..de7920c278 100644 --- a/assignment-client/src/audio/AvatarAudioStream.h +++ b/assignment-client/src/audio/AvatarAudioStream.h @@ -9,8 +9,8 @@ // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // -#ifndef hifi_AvatarAudioRingBuffer_h -#define hifi_AvatarAudioRingBuffer_h +#ifndef hifi_AvatarAudioStream_h +#define hifi_AvatarAudioStream_h #include @@ -29,4 +29,4 @@ private: int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples); }; -#endif // hifi_AvatarAudioRingBuffer_h +#endif // hifi_AvatarAudioStream_h diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index cab8e6691f..3ef4334fa5 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -103,10 +103,6 @@ const int IDLE_SIMULATE_MSECS = 16; // How often should call simul // in the idle loop? (60 FPS is default) static QTimer* idleTimer = NULL; -const int STARTUP_JITTER_SAMPLES = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / 2; - // Startup optimistically with small jitter buffer that - // will start playback on the second received audio packet. - const QString CHECK_VERSION_URL = "https://highfidelity.io/latestVersion.xml"; const QString SKIP_FILENAME = QStandardPaths::writableLocation(QStandardPaths::DataLocation) + "/hifi.skipversion"; @@ -162,7 +158,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) : _touchAvgY(0.0f), _isTouchPressed(false), _mousePressed(false), - _audio(STARTUP_JITTER_SAMPLES), + _audio(), _enableProcessVoxelsThread(true), _octreeProcessor(), _voxelHideShowThread(&_voxels), @@ -1712,8 +1708,8 @@ void Application::init() { _lastTimeUpdated.start(); Menu::getInstance()->loadSettings(); - if (Menu::getInstance()->getAudioJitterBufferSamples() != 0) { - _audio.setJitterBufferSamples(Menu::getInstance()->getAudioJitterBufferSamples()); + if (Menu::getInstance()->getAudioJitterBufferFrames() != 0) { + _audio.overrideDesiredJitterBufferFramesTo(Menu::getInstance()->getAudioJitterBufferFrames()); } qDebug("Loaded settings"); diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 0ec74de0b1..e830e5f6d4 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -54,7 +54,7 @@ static const int FRAMES_AVAILABLE_STATS_WINDOW_SECONDS = 10; static const int MUTE_ICON_SIZE = 24; -Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) : +Audio::Audio(QObject* parent) : AbstractAudioInterface(parent), _audioInput(NULL), _desiredInputFormat(), @@ -76,14 +76,12 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) : // this delay will slowly add up and the longer someone runs, they more delayed their audio will be. _inputRingBuffer(0), #ifdef _WIN32 - _ringBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 100, true, true), + _receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 100, true, true), #else - _ringBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 10, true, true), // DO NOT CHANGE THIS UNLESS YOU SOLVE THE AUDIO DEVICE DRIFT PROBLEM!!! + _receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 10, true, true), // DO NOT CHANGE THIS UNLESS YOU SOLVE THE AUDIO DEVICE DRIFT PROBLEM!!! #endif _isStereoInput(false), _averagedLatency(0.0), - _measuredJitter(0), - _jitterBufferSamples(initialJitterBufferSamples), _lastInputLoudness(0), _timeSinceLastClip(-1.0), _dcOffset(0), @@ -132,13 +130,13 @@ void Audio::init(QGLWidget *parent) { } void Audio::reset() { - _ringBuffer.reset(); + _receivedAudioStream.reset(); resetStats(); } void Audio::resetStats() { - _ringBuffer.resetStats(); + _receivedAudioStream.resetStats(); _audioMixerAvatarStreamAudioStats = AudioStreamStats(); _audioMixerInjectedStreamAudioStatsMap.clear(); @@ -715,7 +713,7 @@ void Audio::handleAudioInput() { } } -void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) { +void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) { if (_audioOutput) { // Audio output must exist and be correctly set up if we're going to process received audio processReceivedAudio(audioByteArray); @@ -755,7 +753,7 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) { } AudioStreamStats Audio::getDownstreamAudioStreamStats() const { - return _ringBuffer.getAudioStreamStats(); + return _receivedAudioStream.getAudioStreamStats(); } void Audio::sendDownstreamAudioStatsPacket() { @@ -783,7 +781,7 @@ void Audio::sendDownstreamAudioStatsPacket() { dataAt += sizeof(quint16); // pack downstream audio stream stats - AudioStreamStats stats = _ringBuffer.updateSeqHistoryAndGetAudioStreamStats(); + AudioStreamStats stats = _receivedAudioStream.updateSeqHistoryAndGetAudioStreamStats(); memcpy(dataAt, &stats, sizeof(AudioStreamStats)); dataAt += sizeof(AudioStreamStats); @@ -894,7 +892,7 @@ void Audio::toggleStereoInput() { void Audio::processReceivedAudio(const QByteArray& audioByteArray) { // parse audio data - _ringBuffer.parseData(audioByteArray); + _receivedAudioStream.parseData(audioByteArray); pushAudioToOutput(); } @@ -904,7 +902,7 @@ void Audio::pushAudioToOutput() { if (_audioOutput->bytesFree() == _audioOutput->bufferSize()) { // the audio output has no samples to play. set the downstream audio to starved so that it // refills to its desired size before pushing frames - _ringBuffer.setToStarved(); + _receivedAudioStream.setToStarved(); } float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float)_outputFormat.sampleRate()) @@ -912,16 +910,16 @@ void Audio::pushAudioToOutput() { int numFramesToPush; if (Menu::getInstance()->isOptionChecked(MenuOption::DisableQAudioOutputOverflowCheck)) { - numFramesToPush = _ringBuffer.getFramesAvailable(); + numFramesToPush = _receivedAudioStream.getFramesAvailable(); } else { // make sure to push a whole number of frames to the audio output - int numFramesAudioOutputRoomFor = (int)(_audioOutput->bytesFree() / sizeof(int16_t) * networkOutputToOutputRatio) / _ringBuffer.getNumFrameSamples(); - numFramesToPush = std::min(_ringBuffer.getFramesAvailable(), numFramesAudioOutputRoomFor); + int numFramesAudioOutputRoomFor = (int)(_audioOutput->bytesFree() / sizeof(int16_t) * networkOutputToOutputRatio) / _receivedAudioStream.getNumFrameSamples(); + numFramesToPush = std::min(_receivedAudioStream.getFramesAvailable(), numFramesAudioOutputRoomFor); } - // if there is data in the ring buffer and room in the audio output, decide what to do + // if there is data in the received stream and room in the audio output, decide what to do - if (numFramesToPush > 0 && _ringBuffer.popFrames(numFramesToPush, false)) { + if (numFramesToPush > 0 && _receivedAudioStream.popFrames(numFramesToPush, false)) { int numNetworkOutputSamples = numFramesToPush * NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio; @@ -929,15 +927,15 @@ void Audio::pushAudioToOutput() { QByteArray outputBuffer; outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t)); - AudioRingBuffer::ConstIterator ringBufferPopOutput = _ringBuffer.getLastPopOutput(); + AudioRingBuffer::ConstIterator receivedAudioStreamPopOutput = _receivedAudioStream.getLastPopOutput(); - int16_t* ringBufferSamples = new int16_t[numNetworkOutputSamples]; + int16_t* receivedSamples = new int16_t[numNetworkOutputSamples]; if (_processSpatialAudio) { unsigned int sampleTime = _spatialAudioStart; QByteArray buffer; buffer.resize(numNetworkOutputSamples * sizeof(int16_t)); - ringBufferPopOutput.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples); + receivedAudioStreamPopOutput.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples); // Accumulate direct transmission of audio from sender to receiver if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) { @@ -950,18 +948,18 @@ void Audio::pushAudioToOutput() { // copy the samples we'll resample from the spatial audio ring buffer - this also // pushes the read pointer of the spatial audio ring buffer forwards - _spatialAudioRingBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples); + _spatialAudioRingBuffer.readSamples(receivedSamples, numNetworkOutputSamples); // Advance the start point for the next packet of audio to arrive _spatialAudioStart += numNetworkOutputSamples / _desiredOutputFormat.channelCount(); } else { // copy the samples we'll resample from the ring buffer - this also // pushes the read pointer of the ring buffer forwards - ringBufferPopOutput.readSamples(ringBufferSamples, numNetworkOutputSamples); + receivedAudioStreamPopOutput.readSamples(receivedSamples, numNetworkOutputSamples); } // copy the packet from the RB to the output - linearResampling(ringBufferSamples, + linearResampling(receivedSamples, (int16_t*)outputBuffer.data(), numNetworkOutputSamples, numDeviceOutputSamples, @@ -973,7 +971,7 @@ void Audio::pushAudioToOutput() { if (_scopeEnabled && !_scopeEnabledPause) { unsigned int numAudioChannels = _desiredOutputFormat.channelCount(); - int16_t* samples = ringBufferSamples; + int16_t* samples = receivedSamples; for (int numSamples = numNetworkOutputSamples / numAudioChannels; numSamples > 0; numSamples -= NETWORK_SAMPLES_PER_FRAME) { unsigned int audioChannel = 0; @@ -994,7 +992,7 @@ void Audio::pushAudioToOutput() { } } - delete[] ringBufferSamples; + delete[] receivedSamples; } } @@ -1332,14 +1330,14 @@ void Audio::renderStats(const float* color, int width, int height) { float audioInputBufferLatency = 0.0f, inputRingBufferLatency = 0.0f, networkRoundtripLatency = 0.0f, mixerRingBufferLatency = 0.0f, outputRingBufferLatency = 0.0f, audioOutputBufferLatency = 0.0f; - AudioStreamStats downstreamAudioStreamStats = _ringBuffer.getAudioStreamStats(); + AudioStreamStats downstreamAudioStreamStats = _receivedAudioStream.getAudioStreamStats(); SharedNodePointer audioMixerNodePointer = NodeList::getInstance()->soloNodeOfType(NodeType::AudioMixer); if (!audioMixerNodePointer.isNull()) { audioInputBufferLatency = _audioInputMsecsReadStats.getWindowAverage(); inputRingBufferLatency = getInputRingBufferAverageMsecsAvailable(); networkRoundtripLatency = audioMixerNodePointer->getPingMs(); - mixerRingBufferLatency = _audioMixerAvatarStreamAudioStats._ringBufferFramesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS; - outputRingBufferLatency = downstreamAudioStreamStats._ringBufferFramesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS; + mixerRingBufferLatency = _audioMixerAvatarStreamAudioStats._framesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS; + outputRingBufferLatency = downstreamAudioStreamStats._framesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS; audioOutputBufferLatency = _audioOutputMsecsUnplayedStats.getWindowAverage(); } float totalLatency = audioInputBufferLatency + inputRingBufferLatency + networkRoundtripLatency + mixerRingBufferLatency + outputRingBufferLatency + audioOutputBufferLatency; @@ -1427,26 +1425,26 @@ void Audio::renderAudioStreamStats(const AudioStreamStats& streamStats, int hori const float BUFFER_SEND_INTERVAL_MSECS = BUFFER_SEND_INTERVAL_USECS / (float)USECS_PER_MSEC; sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u+%d, available: %u+%d", - streamStats._ringBufferDesiredJitterBufferFrames, - streamStats._ringBufferFramesAvailableAverage, + streamStats._desiredJitterBufferFrames, + streamStats._framesAvailableAverage, (int)(getAudioOutputAverageMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS), - streamStats._ringBufferFramesAvailable, + streamStats._framesAvailable, (int)(getAudioOutputMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS)); } else { sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u, available: %u", - streamStats._ringBufferDesiredJitterBufferFrames, - streamStats._ringBufferFramesAvailableAverage, - streamStats._ringBufferFramesAvailable); + streamStats._desiredJitterBufferFrames, + streamStats._framesAvailableAverage, + streamStats._framesAvailable); } verticalOffset += STATS_HEIGHT_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color); sprintf(stringBuffer, " Ringbuffer stats | starves: %u, prev_starve_lasted: %u, frames_dropped: %u, overflows: %u", - streamStats._ringBufferStarveCount, - streamStats._ringBufferConsecutiveNotMixedCount, - streamStats._ringBufferSilentFramesDropped, - streamStats._ringBufferOverflowCount); + streamStats._starveCount, + streamStats._consecutiveNotMixedCount, + streamStats._silentFramesDropped, + streamStats._overflowCount); verticalOffset += STATS_HEIGHT_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color); @@ -1662,8 +1660,8 @@ bool Audio::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo) // setup our general output device for audio-mixer audio _audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this); - _audioOutput->setBufferSize(_ringBuffer.getFrameCapacity() * _outputFormat.bytesForDuration(BUFFER_SEND_INTERVAL_USECS)); - qDebug() << "Ring Buffer capacity in frames: " << _ringBuffer.getFrameCapacity(); + _audioOutput->setBufferSize(_receivedAudioStream.getFrameCapacity() * _outputFormat.bytesForDuration(BUFFER_SEND_INTERVAL_USECS)); + qDebug() << "Ring Buffer capacity in frames: " << _receivedAudioStream.getFrameCapacity(); _outputDevice = _audioOutput->start(); // setup a loopback audio output device diff --git a/interface/src/Audio.h b/interface/src/Audio.h index d2bdc748ea..3efdb1d3b1 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -33,7 +33,7 @@ #include #include -#include "InboundMixedAudioStream.h" +#include "MixedAudioStream.h" static const int NUM_AUDIO_CHANNELS = 2; @@ -46,19 +46,19 @@ class Audio : public AbstractAudioInterface { Q_OBJECT public: // setup for audio I/O - Audio(int16_t initialJitterBufferSamples, QObject* parent = 0); + Audio(QObject* parent = 0); float getLastInputLoudness() const { return glm::max(_lastInputLoudness - _noiseGateMeasuredFloor, 0.f); } float getTimeSinceLastClip() const { return _timeSinceLastClip; } float getAudioAverageInputLoudness() const { return _lastInputLoudness; } void setNoiseGateEnabled(bool noiseGateEnabled) { _noiseGateEnabled = noiseGateEnabled; } - - void setJitterBufferSamples(int samples) { _jitterBufferSamples = samples; } - int getJitterBufferSamples() { return _jitterBufferSamples; } virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen); virtual void startDrumSound(float volume, float frequency, float duration, float decay); + + void overrideDesiredJitterBufferFramesTo(int desired) { _receivedAudioStream.overrideDesiredJitterBufferFramesTo(desired); } + int getDesiredJitterBufferFrames() const { return _receivedAudioStream.getDesiredJitterBufferFrames(); } float getCollisionSoundMagnitude() { return _collisionSoundMagnitude; } @@ -87,7 +87,7 @@ public: public slots: void start(); void stop(); - void addReceivedAudioToBuffer(const QByteArray& audioByteArray); + void addReceivedAudioToStream(const QByteArray& audioByteArray); void parseAudioStreamStatsPacket(const QByteArray& packet); void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples); void handleAudioInput(); @@ -120,8 +120,6 @@ public slots: float getInputVolume() const { return (_audioInput) ? _audioInput->volume() : 0.0f; } void setInputVolume(float volume) { if (_audioInput) _audioInput->setVolume(volume); } - - int getDesiredJitterBufferFrames() const { return _jitterBufferSamples / _ringBuffer.getNumFrameSamples(); } const AudioStreamStats& getAudioMixerAvatarStreamAudioStats() const { return _audioMixerAvatarStreamAudioStats; } const QHash& getAudioMixerInjectedStreamAudioStatsMap() const { return _audioMixerInjectedStreamAudioStatsMap; } @@ -151,7 +149,7 @@ private: QAudioOutput* _proceduralAudioOutput; QIODevice* _proceduralOutputDevice; AudioRingBuffer _inputRingBuffer; - InboundMixedAudioStream _ringBuffer; + MixedAudioStream _receivedAudioStream; bool _isStereoInput; QString _inputAudioDeviceName; @@ -160,8 +158,6 @@ private: StDev _stdev; QElapsedTimer _timeSinceLastReceived; float _averagedLatency; - float _measuredJitter; - int16_t _jitterBufferSamples; float _lastInputLoudness; float _timeSinceLastClip; float _dcOffset; diff --git a/libraries/audio/src/AudioStreamStats.h b/libraries/audio/src/AudioStreamStats.h index 4dd537afc0..784e163b3b 100644 --- a/libraries/audio/src/AudioStreamStats.h +++ b/libraries/audio/src/AudioStreamStats.h @@ -25,13 +25,13 @@ public: _timeGapWindowMin(0), _timeGapWindowMax(0), _timeGapWindowAverage(0.0f), - _ringBufferFramesAvailable(0), - _ringBufferFramesAvailableAverage(0), - _ringBufferDesiredJitterBufferFrames(0), - _ringBufferStarveCount(0), - _ringBufferConsecutiveNotMixedCount(0), - _ringBufferOverflowCount(0), - _ringBufferSilentFramesDropped(0), + _framesAvailable(0), + _framesAvailableAverage(0), + _desiredJitterBufferFrames(0), + _starveCount(0), + _consecutiveNotMixedCount(0), + _overflowCount(0), + _silentFramesDropped(0), _packetStreamStats(), _packetStreamWindowStats() {} @@ -46,13 +46,13 @@ public: quint64 _timeGapWindowMax; float _timeGapWindowAverage; - quint32 _ringBufferFramesAvailable; - quint16 _ringBufferFramesAvailableAverage; - quint16 _ringBufferDesiredJitterBufferFrames; - quint32 _ringBufferStarveCount; - quint32 _ringBufferConsecutiveNotMixedCount; - quint32 _ringBufferOverflowCount; - quint32 _ringBufferSilentFramesDropped; + quint32 _framesAvailable; + quint16 _framesAvailableAverage; + quint16 _desiredJitterBufferFrames; + quint32 _starveCount; + quint32 _consecutiveNotMixedCount; + quint32 _overflowCount; + quint32 _silentFramesDropped; PacketStreamStats _packetStreamStats; PacketStreamStats _packetStreamWindowStats; diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index f260fd83d1..a22a002548 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -146,6 +146,11 @@ void InboundAudioStream::starved() { _starveCount++; } +void InboundAudioStream::overrideDesiredJitterBufferFramesTo(int desired) { + _dynamicJitterBuffers = false; + _desiredJitterBufferFrames = desired; +} + int InboundAudioStream::clampDesiredJitterBufferFramesValue(int desired) const { const int MIN_FRAMES_DESIRED = 0; const int MAX_FRAMES_DESIRED = _ringBuffer.getFrameCapacity(); @@ -244,13 +249,13 @@ AudioStreamStats InboundAudioStream::getAudioStreamStats() const { streamStats._timeGapWindowMax = _interframeTimeGapStatsForStatsPacket.getWindowMax(); streamStats._timeGapWindowAverage = _interframeTimeGapStatsForStatsPacket.getWindowAverage(); - streamStats._ringBufferFramesAvailable = _ringBuffer.framesAvailable(); - streamStats._ringBufferFramesAvailableAverage = _framesAvailableStats.getWindowAverage(); - streamStats._ringBufferDesiredJitterBufferFrames = _desiredJitterBufferFrames; - streamStats._ringBufferStarveCount = _starveCount; - streamStats._ringBufferConsecutiveNotMixedCount = _consecutiveNotMixedCount; - streamStats._ringBufferOverflowCount = _ringBuffer.getOverflowCount(); - streamStats._ringBufferSilentFramesDropped = _silentFramesDropped; + streamStats._framesAvailable = _ringBuffer.framesAvailable(); + streamStats._framesAvailableAverage = _framesAvailableStats.getWindowAverage(); + streamStats._desiredJitterBufferFrames = _desiredJitterBufferFrames; + streamStats._starveCount = _starveCount; + streamStats._consecutiveNotMixedCount = _consecutiveNotMixedCount; + streamStats._overflowCount = _ringBuffer.getOverflowCount(); + streamStats._silentFramesDropped = _silentFramesDropped; streamStats._packetStreamStats = _incomingSequenceNumberStats.getStats(); streamStats._packetStreamWindowStats = _incomingSequenceNumberStats.getStatsForHistoryWindow(); diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index 76b4ce18e8..82ebbc5ab0 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -62,6 +62,8 @@ public: void setToStarved(); + /// turns off dyanmic jitter buffers and sets the desired jitter buffer frames to specified value + void overrideDesiredJitterBufferFramesTo(int desired); /// this function should be called once per second to ensure the seq num stats history spans ~30 seconds AudioStreamStats updateSeqHistoryAndGetAudioStreamStats(); @@ -95,8 +97,11 @@ public: private: void starved(); + SequenceNumberStats::ArrivalInfo frameReceivedUpdateNetworkStats(quint16 sequenceNumber, const QUuid& senderUUID); int clampDesiredJitterBufferFramesValue(int desired) const; + int writeSamplesForDroppedPackets(int numSamples); + protected: // disallow copying of InboundAudioStream objects InboundAudioStream(const InboundAudioStream&); @@ -110,9 +115,7 @@ protected: virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) = 0; int writeDroppableSilentSamples(int numSilentSamples); - int writeSamplesForDroppedPackets(int numSamples); - SequenceNumberStats::ArrivalInfo frameReceivedUpdateNetworkStats(quint16 sequenceNumber, const QUuid& senderUUID); - + protected: AudioRingBuffer _ringBuffer; @@ -120,7 +123,7 @@ protected: bool _lastPopSucceeded; AudioRingBuffer::ConstIterator _lastPopOutput; - const bool _dynamicJitterBuffers; + bool _dynamicJitterBuffers; bool _useStDevForJitterCalc; int _calculatedJitterBufferFramesUsingMaxGap; diff --git a/libraries/audio/src/InboundMixedAudioStream.cpp b/libraries/audio/src/InboundMixedAudioStream.cpp deleted file mode 100644 index 208e5d8dcc..0000000000 --- a/libraries/audio/src/InboundMixedAudioStream.cpp +++ /dev/null @@ -1,17 +0,0 @@ - -#include "InboundMixedAudioStream.h" - -InboundMixedAudioStream::InboundMixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, bool useStDevForJitterCalc) - : InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, useStDevForJitterCalc) -{ -} - -int InboundMixedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) { - // mixed audio packets do not have any info between the seq num and the audio data. - numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t); - return 0; -} - -int InboundMixedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) { - return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t)); -} diff --git a/libraries/audio/src/InboundMixedAudioStream.h b/libraries/audio/src/InboundMixedAudioStream.h deleted file mode 100644 index d6a8d493ca..0000000000 --- a/libraries/audio/src/InboundMixedAudioStream.h +++ /dev/null @@ -1,14 +0,0 @@ - -#include "InboundAudioStream.h" -#include "PacketHeaders.h" - -class InboundMixedAudioStream : public InboundAudioStream { -public: - InboundMixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, bool useStDevForJitterCalc = false); - - float getNextOutputFrameLoudness() const { return _ringBuffer.getNextOutputFrameLoudness(); } - -protected: - int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples); - int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples); -}; diff --git a/libraries/audio/src/InjectedAudioStream.h b/libraries/audio/src/InjectedAudioStream.h index 81659c4836..b92736b0ba 100644 --- a/libraries/audio/src/InjectedAudioStream.h +++ b/libraries/audio/src/InjectedAudioStream.h @@ -9,8 +9,8 @@ // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // -#ifndef hifi_InjectedAudioRingBuffer_h -#define hifi_InjectedAudioRingBuffer_h +#ifndef hifi_InjectedAudioStream_h +#define hifi_InjectedAudioStream_h #include @@ -39,4 +39,4 @@ private: float _attenuationRatio; }; -#endif // hifi_InjectedAudioRingBuffer_h +#endif // hifi_InjectedAudioStream_h diff --git a/libraries/audio/src/MixedAudioStream.cpp b/libraries/audio/src/MixedAudioStream.cpp new file mode 100644 index 0000000000..b2c57c46d6 --- /dev/null +++ b/libraries/audio/src/MixedAudioStream.cpp @@ -0,0 +1,17 @@ + +#include "MixedAudioStream.h" + +MixedAudioStream::MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, bool useStDevForJitterCalc) + : InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, useStDevForJitterCalc) +{ +} + +int MixedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) { + // mixed audio packets do not have any info between the seq num and the audio data. + numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t); + return 0; +} + +int MixedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) { + return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t)); +} diff --git a/libraries/audio/src/MixedAudioStream.h b/libraries/audio/src/MixedAudioStream.h new file mode 100644 index 0000000000..3f52a3c979 --- /dev/null +++ b/libraries/audio/src/MixedAudioStream.h @@ -0,0 +1,29 @@ +// +// MixedAudioStream.h +// libraries/audio/src +// +// Created by Stephen Birarda on 6/5/13. +// Copyright 2013 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#ifndef hifi_MixedAudioStream_h +#define hifi_MixedAudioStream + +#include "InboundAudioStream.h" +#include "PacketHeaders.h" + +class MixedAudioStream : public InboundAudioStream { +public: + MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, bool useStDevForJitterCalc = false); + + float getNextOutputFrameLoudness() const { return _ringBuffer.getNextOutputFrameLoudness(); } + +protected: + int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples); + int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples); +}; + +#endif // hifi_MixedAudioStream_h diff --git a/libraries/audio/src/PositionalAudioStream.h b/libraries/audio/src/PositionalAudioStream.h index de76edaa63..06835b93a8 100644 --- a/libraries/audio/src/PositionalAudioStream.h +++ b/libraries/audio/src/PositionalAudioStream.h @@ -9,8 +9,8 @@ // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // -#ifndef hifi_PositionalAudioRingBuffer_h -#define hifi_PositionalAudioRingBuffer_h +#ifndef hifi_PositionalAudioStream_h +#define hifi_PositionalAudioStream_h #include #include @@ -71,4 +71,4 @@ protected: AABox* _listenerUnattenuatedZone; }; -#endif // hifi_PositionalAudioRingBuffer_h +#endif // hifi_PositionalAudioStream_h From 145b7f8a0e27abc47d88dd4fc0fe92c12656b398 Mon Sep 17 00:00:00 2001 From: wangyix Date: Mon, 28 Jul 2014 14:05:17 -0700 Subject: [PATCH 27/62] got Preferences jitter buffer frames override working --- interface/src/DatagramProcessor.cpp | 2 +- interface/src/Menu.cpp | 6 +++--- interface/src/Menu.h | 6 +++--- interface/src/ui/PreferencesDialog.cpp | 10 +++++++--- interface/src/ui/Stats.cpp | 5 ++--- libraries/audio/src/InboundAudioStream.h | 2 ++ 6 files changed, 18 insertions(+), 13 deletions(-) diff --git a/interface/src/DatagramProcessor.cpp b/interface/src/DatagramProcessor.cpp index 6c39994bf3..8fda094f42 100644 --- a/interface/src/DatagramProcessor.cpp +++ b/interface/src/DatagramProcessor.cpp @@ -48,7 +48,7 @@ void DatagramProcessor::processDatagrams() { // only process this packet if we have a match on the packet version switch (packetTypeForPacket(incomingPacket)) { case PacketTypeMixedAudio: - QMetaObject::invokeMethod(&application->_audio, "addReceivedAudioToBuffer", Qt::QueuedConnection, + QMetaObject::invokeMethod(&application->_audio, "addReceivedAudioToStream", Qt::QueuedConnection, Q_ARG(QByteArray, incomingPacket)); break; case PacketTypeAudioStreamStats: diff --git a/interface/src/Menu.cpp b/interface/src/Menu.cpp index f0fcc20201..fb1bbd07cf 100644 --- a/interface/src/Menu.cpp +++ b/interface/src/Menu.cpp @@ -82,7 +82,7 @@ const int CONSOLE_HEIGHT = 200; Menu::Menu() : _actionHash(), - _audioJitterBufferSamples(0), + _audioJitterBufferFrames(0), _bandwidthDialog(NULL), _fieldOfView(DEFAULT_FIELD_OF_VIEW_DEGREES), _realWorldFieldOfView(DEFAULT_REAL_WORLD_FIELD_OF_VIEW_DEGREES), @@ -627,7 +627,7 @@ void Menu::loadSettings(QSettings* settings) { lockedSettings = true; } - _audioJitterBufferSamples = loadSetting(settings, "audioJitterBufferSamples", 0); + _audioJitterBufferFrames = loadSetting(settings, "audioJitterBufferFrames", 0); _fieldOfView = loadSetting(settings, "fieldOfView", DEFAULT_FIELD_OF_VIEW_DEGREES); _realWorldFieldOfView = loadSetting(settings, "realWorldFieldOfView", DEFAULT_REAL_WORLD_FIELD_OF_VIEW_DEGREES); _faceshiftEyeDeflection = loadSetting(settings, "faceshiftEyeDeflection", DEFAULT_FACESHIFT_EYE_DEFLECTION); @@ -677,7 +677,7 @@ void Menu::saveSettings(QSettings* settings) { lockedSettings = true; } - settings->setValue("audioJitterBufferSamples", _audioJitterBufferSamples); + settings->setValue("audioJitterBufferFrames", _audioJitterBufferFrames); settings->setValue("fieldOfView", _fieldOfView); settings->setValue("faceshiftEyeDeflection", _faceshiftEyeDeflection); settings->setValue("maxVoxels", _maxVoxels); diff --git a/interface/src/Menu.h b/interface/src/Menu.h index 81dd26dc01..6d5ad4e78c 100644 --- a/interface/src/Menu.h +++ b/interface/src/Menu.h @@ -85,8 +85,8 @@ public: void triggerOption(const QString& menuOption); QAction* getActionForOption(const QString& menuOption); - float getAudioJitterBufferSamples() const { return _audioJitterBufferSamples; } - void setAudioJitterBufferSamples(float audioJitterBufferSamples) { _audioJitterBufferSamples = audioJitterBufferSamples; } + float getAudioJitterBufferFrames() const { return _audioJitterBufferFrames; } + void setAudioJitterBufferFrames(float audioJitterBufferSamples) { _audioJitterBufferFrames = audioJitterBufferSamples; } float getFieldOfView() const { return _fieldOfView; } void setFieldOfView(float fieldOfView) { _fieldOfView = fieldOfView; } float getRealWorldFieldOfView() const { return _realWorldFieldOfView; } @@ -257,7 +257,7 @@ private: QHash _actionHash; - int _audioJitterBufferSamples; /// number of extra samples to wait before starting audio playback + int _audioJitterBufferFrames; /// number of extra samples to wait before starting audio playback BandwidthDialog* _bandwidthDialog; float _fieldOfView; /// in Degrees, doesn't apply to HMD like Oculus float _realWorldFieldOfView; // The actual FOV set by the user's monitor size and view distance diff --git a/interface/src/ui/PreferencesDialog.cpp b/interface/src/ui/PreferencesDialog.cpp index 01e80ae5e5..6f87b08093 100644 --- a/interface/src/ui/PreferencesDialog.cpp +++ b/interface/src/ui/PreferencesDialog.cpp @@ -149,7 +149,7 @@ void PreferencesDialog::loadPreferences() { ui.faceshiftEyeDeflectionSider->setValue(menuInstance->getFaceshiftEyeDeflection() * ui.faceshiftEyeDeflectionSider->maximum()); - ui.audioJitterSpin->setValue(menuInstance->getAudioJitterBufferSamples()); + ui.audioJitterSpin->setValue(menuInstance->getAudioJitterBufferFrames()); ui.realWorldFieldOfViewSpin->setValue(menuInstance->getRealWorldFieldOfView()); @@ -239,8 +239,12 @@ void PreferencesDialog::savePreferences() { Menu::getInstance()->setInvertSixenseButtons(ui.invertSixenseButtonsCheckBox->isChecked()); - Menu::getInstance()->setAudioJitterBufferSamples(ui.audioJitterSpin->value()); - Application::getInstance()->getAudio()->setJitterBufferSamples(ui.audioJitterSpin->value()); + Menu::getInstance()->setAudioJitterBufferFrames(ui.audioJitterSpin->value()); + if (Menu::getInstance()->getAudioJitterBufferFrames() != 0) { + Application::getInstance()->getAudio()->overrideDesiredJitterBufferFramesTo(ui.audioJitterSpin->value()); + } else { + Application::getInstance()->getAudio()->unoverrideDesiredJitterBufferFrames(); + } Application::getInstance()->resizeGL(Application::getInstance()->getGLWidget()->width(), Application::getInstance()->getGLWidget()->height()); diff --git a/interface/src/ui/Stats.cpp b/interface/src/ui/Stats.cpp index 77598e0c5e..15e441c638 100644 --- a/interface/src/ui/Stats.cpp +++ b/interface/src/ui/Stats.cpp @@ -285,9 +285,8 @@ void Stats::display( char audioJitter[30]; sprintf(audioJitter, - "Buffer msecs %.1f", - (float) (audio->getNetworkBufferLengthSamplesPerChannel() + (float) audio->getJitterBufferSamples()) / - (float) audio->getNetworkSampleRate() * 1000.f); + "Buffer msecs %.1f", + audio->getDesiredJitterBufferFrames() * BUFFER_SEND_INTERVAL_USECS / (float)USECS_PER_MSEC); drawText(30, glWidget->height() - 22, scale, rotation, font, audioJitter, color); diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index 82ebbc5ab0..958491bca1 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -64,6 +64,7 @@ public: /// turns off dyanmic jitter buffers and sets the desired jitter buffer frames to specified value void overrideDesiredJitterBufferFramesTo(int desired); + void unoverrideDesiredJitterBufferFrames(); /// this function should be called once per second to ensure the seq num stats history spans ~30 seconds AudioStreamStats updateSeqHistoryAndGetAudioStreamStats(); @@ -124,6 +125,7 @@ protected: AudioRingBuffer::ConstIterator _lastPopOutput; bool _dynamicJitterBuffers; + bool _dynamicJitterBuffersOverride; bool _useStDevForJitterCalc; int _calculatedJitterBufferFramesUsingMaxGap; From ed9bfdc50369b08907709ab4c14631a5eeaee7a6 Mon Sep 17 00:00:00 2001 From: wangyix Date: Mon, 28 Jul 2014 14:08:45 -0700 Subject: [PATCH 28/62] these didn't commit for some reason --- interface/src/Audio.h | 1 + libraries/audio/src/InboundAudioStream.cpp | 16 ++++++++++++---- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/interface/src/Audio.h b/interface/src/Audio.h index 3efdb1d3b1..87472740d0 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -58,6 +58,7 @@ public: virtual void startDrumSound(float volume, float frequency, float duration, float decay); void overrideDesiredJitterBufferFramesTo(int desired) { _receivedAudioStream.overrideDesiredJitterBufferFramesTo(desired); } + void unoverrideDesiredJitterBufferFrames() { _receivedAudioStream.unoverrideDesiredJitterBufferFrames(); } int getDesiredJitterBufferFrames() const { return _receivedAudioStream.getDesiredJitterBufferFrames(); } float getCollisionSoundMagnitude() { return _collisionSoundMagnitude; } diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index a22a002548..c7e2b145a9 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -17,6 +17,7 @@ InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacit _lastPopSucceeded(false), _lastPopOutput(), _dynamicJitterBuffers(dynamicJitterBuffers), + _dynamicJitterBuffersOverride(false), _useStDevForJitterCalc(useStDevForJitterCalc), _calculatedJitterBufferFramesUsingMaxGap(0), _calculatedJitterBufferFramesUsingStDev(0), @@ -147,10 +148,17 @@ void InboundAudioStream::starved() { } void InboundAudioStream::overrideDesiredJitterBufferFramesTo(int desired) { - _dynamicJitterBuffers = false; + _dynamicJitterBuffersOverride = true; _desiredJitterBufferFrames = desired; } +void InboundAudioStream::unoverrideDesiredJitterBufferFrames() { + _dynamicJitterBuffersOverride = false; + if (!_dynamicJitterBuffers) { + _desiredJitterBufferFrames = 1; + } +} + int InboundAudioStream::clampDesiredJitterBufferFramesValue(int desired) const { const int MIN_FRAMES_DESIRED = 0; const int MAX_FRAMES_DESIRED = _ringBuffer.getFrameCapacity(); @@ -177,7 +185,7 @@ SequenceNumberStats::ArrivalInfo InboundAudioStream::frameReceivedUpdateNetworkS _calculatedJitterBufferFramesUsingMaxGap = ceilf((float)_interframeTimeGapStatsForJitterCalc.getWindowMax() / USECS_PER_FRAME); _interframeTimeGapStatsForJitterCalc.clearNewStatsAvailableFlag(); - if (_dynamicJitterBuffers && !_useStDevForJitterCalc) { + if (_dynamicJitterBuffers && !_dynamicJitterBuffersOverride && !_useStDevForJitterCalc) { _desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(_calculatedJitterBufferFramesUsingMaxGap); } } @@ -187,10 +195,10 @@ SequenceNumberStats::ArrivalInfo InboundAudioStream::frameReceivedUpdateNetworkS const int STANDARD_DEVIATION_SAMPLE_COUNT = 500; if (_stdev.getSamples() > STANDARD_DEVIATION_SAMPLE_COUNT) { const float NUM_STANDARD_DEVIATIONS = 3.0f; - _calculatedJitterBufferFramesUsingStDev = (int)ceilf(2 * (NUM_STANDARD_DEVIATIONS * _stdev.getStDev()) / USECS_PER_FRAME) + 1; + _calculatedJitterBufferFramesUsingStDev = (int)ceilf(NUM_STANDARD_DEVIATIONS * _stdev.getStDev() / USECS_PER_FRAME) + 1; _stdev.reset(); - if (_dynamicJitterBuffers && _useStDevForJitterCalc) { + if (_dynamicJitterBuffers && !_dynamicJitterBuffersOverride && _useStDevForJitterCalc) { _desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(_calculatedJitterBufferFramesUsingStDev); } } From 97054d668b3717d984c3a8b6d3f633412d4e1816 Mon Sep 17 00:00:00 2001 From: wangyix Date: Mon, 28 Jul 2014 14:15:14 -0700 Subject: [PATCH 29/62] fixed header macros --- libraries/audio/src/MixedAudioStream.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/audio/src/MixedAudioStream.h b/libraries/audio/src/MixedAudioStream.h index 3f52a3c979..17769be128 100644 --- a/libraries/audio/src/MixedAudioStream.h +++ b/libraries/audio/src/MixedAudioStream.h @@ -10,7 +10,7 @@ // #ifndef hifi_MixedAudioStream_h -#define hifi_MixedAudioStream +#define hifi_MixedAudioStream_h #include "InboundAudioStream.h" #include "PacketHeaders.h" From d0a1d732c9b2e15ffad8739d9664c9b56ecc929e Mon Sep 17 00:00:00 2001 From: wangyix Date: Mon, 28 Jul 2014 14:18:22 -0700 Subject: [PATCH 30/62] updated preferences UI "samples" to "frames" --- interface/ui/preferencesDialog.ui | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/interface/ui/preferencesDialog.ui b/interface/ui/preferencesDialog.ui index a1a794bc34..d610b4e8bd 100644 --- a/interface/ui/preferencesDialog.ui +++ b/interface/ui/preferencesDialog.ui @@ -1489,7 +1489,7 @@ padding: 10px;margin-top:10px color: rgb(51, 51, 51) - Audio Jitter Buffer Samples (0 for automatic) + Audio Jitter Buffer Frames (0 for automatic) 15 From d221f69767ae1e6b3d64fbeeb887d47bc17546c9 Mon Sep 17 00:00:00 2001 From: wangyix Date: Mon, 28 Jul 2014 14:22:04 -0700 Subject: [PATCH 31/62] clamp overridden desired jitter buffer frames value --- libraries/audio/src/InboundAudioStream.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index c7e2b145a9..12326fb01d 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -149,7 +149,7 @@ void InboundAudioStream::starved() { void InboundAudioStream::overrideDesiredJitterBufferFramesTo(int desired) { _dynamicJitterBuffersOverride = true; - _desiredJitterBufferFrames = desired; + _desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(desired); } void InboundAudioStream::unoverrideDesiredJitterBufferFrames() { From 7e59723522b233ec98b1fef405b61c656e925435 Mon Sep 17 00:00:00 2001 From: wangyix Date: Mon, 28 Jul 2014 14:24:58 -0700 Subject: [PATCH 32/62] removed +1 for philip's jitter frames calculation --- libraries/audio/src/InboundAudioStream.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index 12326fb01d..bfaa4c6d63 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -195,7 +195,7 @@ SequenceNumberStats::ArrivalInfo InboundAudioStream::frameReceivedUpdateNetworkS const int STANDARD_DEVIATION_SAMPLE_COUNT = 500; if (_stdev.getSamples() > STANDARD_DEVIATION_SAMPLE_COUNT) { const float NUM_STANDARD_DEVIATIONS = 3.0f; - _calculatedJitterBufferFramesUsingStDev = (int)ceilf(NUM_STANDARD_DEVIATIONS * _stdev.getStDev() / USECS_PER_FRAME) + 1; + _calculatedJitterBufferFramesUsingStDev = (int)ceilf(NUM_STANDARD_DEVIATIONS * _stdev.getStDev() / USECS_PER_FRAME); _stdev.reset(); if (_dynamicJitterBuffers && !_dynamicJitterBuffersOverride && _useStDevForJitterCalc) { From 5c47e9013c696b9baf13fb9f5289ab8c75f4bf69 Mon Sep 17 00:00:00 2001 From: barnold1953 Date: Mon, 28 Jul 2014 18:36:25 -0700 Subject: [PATCH 33/62] Working procedural walk animation with two keyframes --- examples/dancer.js | 561 +++++++++++++++++++++++++++ libraries/script-engine/src/Quat.cpp | 4 + libraries/script-engine/src/Quat.h | 1 + 3 files changed, 566 insertions(+) create mode 100644 examples/dancer.js diff --git a/examples/dancer.js b/examples/dancer.js new file mode 100644 index 0000000000..e7a75f7596 --- /dev/null +++ b/examples/dancer.js @@ -0,0 +1,561 @@ +// +// dancer.js +// hifi +// +// Created by Stephen Birarda on 2/20/14. +// Modified by Philip on 3/3/14 +// Copyright (c) 2014 HighFidelity, Inc. All rights reserved. +// +// This is an example script that demonstrates an NPC avatar. +// +// + +function getRandomFloat(min, max) { +return Math.random() * (max - min) + min; +} + +function getRandomInt (min, max) { +return Math.floor(Math.random() * (max - min + 1)) + min; +} + +function printVector(string, vector) { +print(string + " " + vector.x + ", " + vector.y + ", " + vector.z); +} + +var CHANCE_OF_MOVING = 0.01; +var CHANCE_OF_SOUND = 0; +var CHANCE_OF_HEAD_TURNING = 0.05; +var CHANCE_OF_BIG_MOVE = 0.1; +var CHANCE_OF_WAVING = 0.009; + +var isMoving = true; +var isTurningHead = false; +var isPlay +ingAudio = false; +var isWaving = false; +var waveFrequency = 0.0; +var waveAmplitude = 0.0; + +var X_MIN = 5.50; +var X_MAX = 5.60; +var Z_MIN = 5.00; +var Z_MAX = 5.10; +var Y_PELVIS = 1.0; +var MAX_PELVIS_DELTA = 2.5; + +var AVATAR_PELVIS_HEIGHT = 0.75; + +var MOVE_RANGE_SMALL = 1.0; +var TURN_RANGE = 70.0; +var STOP_TOLERANCE = 0.05; +var MOVE_RATE = 0.05; +var TURN_RATE = 0.15; +var PITCH_RATE = 0.20; +var PITCH_RANGE = 30.0; + +var firstPosition = { x: getRandomFloat(X_MIN, X_MAX), y: Y_PELVIS, z: getRandomFloat(Z_MIN, Z_MAX) }; +var targetPosition = { x: 0, y: 0, z: 0 }; +var targetDirection = { x: 0, y: 0, z: 0, w: 0 }; +var currentDirection = { x: 0, y: 0, z: 0, w: 0 }; +var targetHeadPitch = 0.0; + +var cumulativeTime = 0.0; + +var basePelvisHeight = 0.0; +var pelvisOscillatorPosition = 0.0; +var pelvisOscillatorVelocity = 0.0; + +function clamp(val, min, max){ + return Math.max(min, Math.min(max, val)) +} + +// pick an integer between 1 and 100 that is not 28 for the face model for this bot +botNumber = 28; + +while (botNumber == 28) { + botNumber = getRandomInt(1, 100); +} + +if (botNumber <= 20) { + newFaceFilePrefix = "ron"; + newBodyFilePrefix = "defaultAvatar_body" +} else { + if (botNumber <= 40) { + newFaceFilePrefix = "superhero"; + } else if (botNumber <= 60) { + newFaceFilePrefix = "amber"; + } else if (botNumber <= 80) { + newFaceFilePrefix = "ron"; + } else { + newFaceFilePrefix = "angie"; + } + + newBodyFilePrefix = "bot" + botNumber; +} + + newFaceFilePrefix = "ron"; + newBodyFilePrefix = "bot" + 63; + +// set the face model fst using the bot number +// there is no need to change the body model - we're using the default +Avatar.faceModelURL = "https://s3-us-west-1.amazonaws.com/highfidelity-public/meshes/" + newFaceFilePrefix + ".fst"; +Avatar.skeletonModelURL = "https://s3-us-west-1.amazonaws.com/highfidelity-public/meshes/" + newBodyFilePrefix + "_a.fst"; +Avatar.billboardURL = "https://s3-us-west-1.amazonaws.com/highfidelity-public/meshes/billboards/bot" + botNumber + ".png"; + +Agent.isAvatar = true; +Agent.isListeningToAudioStream = true; + +// change the avatar's position to the random one +Avatar.position = firstPosition; +basePelvisHeight = firstPosition.y; +printVector("New dancer, position = ", Avatar.position); + +function loadSounds() { + var sound_filenames = ["AB1.raw", "Anchorman2.raw", "B1.raw", "B1.raw", "Bale1.raw", "Bandcamp.raw", + "Big1.raw", "Big2.raw", "Brian1.raw", "Buster1.raw", "CES1.raw", "CES2.raw", "CES3.raw", "CES4.raw", + "Carrie1.raw", "Carrie3.raw", "Charlotte1.raw", "EN1.raw", "EN2.raw", "EN3.raw", "Eugene1.raw", "Francesco1.raw", + "Italian1.raw", "Japanese1.raw", "Leigh1.raw", "Lucille1.raw", "Lucille2.raw", "MeanGirls.raw", "Murray2.raw", + "Nigel1.raw", "PennyLane.raw", "Pitt1.raw", "Ricardo.raw", "SN.raw", "Sake1.raw", "Samantha1.raw", "Samantha2.raw", + "Spicoli1.raw", "Supernatural.raw", "Swearengen1.raw", "TheDude.raw", "Tony.raw", "Triumph1.raw", "Uma1.raw", + "Walken1.raw", "Walken2.raw", "Z1.raw", "Z2.raw" + ]; + + var SOUND_BASE_URL = "https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/Cocktail+Party+Snippets/Raws/"; + + for (var i = 0; i < sound_filenames.length; i++) { + sounds.push(new Sound(SOUND_BASE_URL + sound_filenames[i])); + } +} + +var sounds = []; +loadSounds(); + +function loadAnimations() { + + var animation_filenames = []; + var ANIMATION_BASE_URL = "http://highfidelity-dev.s3.amazonaws.com/animations/"; + + if (botNumber < 20) { + animation_filenames = ["robot/wave_hip_hop_dance.fbx", "robot/robot_hip_hop_dance.fbx"]; + } else if (botNumber <= 40) { + animation_filenames = ["superhero/house_dancing_2.fbx", "superhero/house_dancing_3.fbx", "superhero/house_dancing_4.fbx"]; + } else if (botNumber <= 60) { + animation_filenames = ["amber/house_dancing.fbx"] + } else if (botNumber <= 80) { + animation_filenames = ["ron/hip_hop_dancing.fbx", "ron/gangnam_style.fbx"]; + } else { + animation_filenames = ["angie/hip_hop_dancing_6.fbx"]; + } + + for (var i = 0; i < animation_filenames.length; i++) { + animations.push(AnimationCache.getAnimation(ANIMATION_BASE_URL + animation_filenames[i])); + } +} + +var animations = []; +loadAnimations(); + +function playRandomSound() { + if (!Agent.isPlayingAvatarSound) { + var whichSound = Math.floor((Math.random() * sounds.length) % sounds.length); + Agent.playAvatarSound(sounds[whichSound]); + } +} + +function stopWaving() { + isWaving = false; + Avatar.clearJointData(SHOULDER_JOINT_NUMBER); + Avatar.clearJointData(ELBOW_JOINT_NUMBER); + Avatar.clearJointData(JOINT_SPINE); +} + +//Animation KeyFrame constructor. rightJoints and leftJoints must be the same size +function WalkKeyFrame(rightJoints, leftJoints, singleJoints) { + this.rotations = []; + + for (var i = 0; i < rightJoints.length; i++) { + this.rotations[this.rotations.length] = rightJoints[i]; + this.rotations[this.rotations.length] = leftJoints[i]; + } + for (var i = 0; i < singleJoints.length; i++) { + this.rotations[this.rotations.length] = singleJoints[i]; + } +} + +//Procedural walk animation using two keyframes +//We use a separate array for front and back joints +var frontKeyFrames = []; +var backKeyFrames = []; +//for non mirrored joints such as the spine +var singleKeyFrames = []; +//Pitch, yaw, and roll for the joints +var frontAngles = []; +var backAngles = []; +//for non mirrored joints such as the spine +var singleAngles = []; + + + +//Actual joint mappings +var SHOULDER_JOINT_NUMBER = 15; +var ELBOW_JOINT_NUMBER = 16; +var JOINT_R_HIP = 1; +var JOINT_R_KNEE = 2; +var JOINT_L_HIP = 6; +var JOINT_L_KNEE = 7; +var JOINT_R_ARM = 15; +var JOINT_R_FOREARM = 16; +var JOINT_L_ARM = 39; +var JOINT_L_FOREARM = 40; +var JOINT_SPINE = 11; + +// ******************************* Animation Is Defined Below ************************************* + +var NUM_FRAMES = 2; +for (var i = 0; i < NUM_FRAMES; i++) { + frontAngles[i] = []; + backAngles[i] = []; + singleAngles[i] = []; + frontKeyFrames[i] = []; + backKeyFrames[i] = []; + singleKeyFrames[i] = []; +} +//Joint order for actual joint mappings, should be interleaved R,L,R,L,...S,S,S for R = right, L = left, S = single +var JOINT_ORDER = [JOINT_R_HIP, JOINT_L_HIP, JOINT_R_KNEE, JOINT_L_KNEE, JOINT_R_ARM, JOINT_L_ARM, JOINT_R_FOREARM, JOINT_L_FOREARM, JOINT_SPINE]; + +//Joint indices for joints that are duplicated, such as arms, It must match JOINT_ORDER +var HIP = 0; +var KNEE = 1; +var ARM = 2; +var FOREARM = 3; +//Joint indices for single joints +var SPINE = 0; + +//Symmetry multipliers for dthe left half [pitch, roll, yaw]. -1 means reflect, 1 means no reflect +var SYMMETRY = []; +SYMMETRY[HIP] = [1, -1, -1]; +SYMMETRY[KNEE] = [1, -1, -1]; +SYMMETRY[ARM] = [1, -1, -1]; +SYMMETRY[FOREARM] = [1, -1, -1]; + +//We have to store the angles so we can invert yaw and roll when making the animation +//symmetrical + + +//Front refers to leg, not arm. +//Legs Extending +frontAngles[0][HIP] = [30.0, 0.0, 8.0]; +frontAngles[0][KNEE] = [-15.0, 0.0, 0.0]; +frontAngles[0][ARM] = [85.0, -25.0, 0.0]; +frontAngles[0][FOREARM] = [0.0, 0.0, -15.0]; + +backAngles[0][HIP] = [-15, 0.0, 8.0]; +backAngles[0][KNEE] = [-28, 0.0, 0.0]; +backAngles[0][ARM] = [85.0, 20.0, 0.0]; +backAngles[0][FOREARM] = [10.0, 0.0, -25.0]; + +singleAngles[0][SPINE] = [-0.0, 0.0, 0.0]; + +//Legs Passing +frontAngles[1][HIP] = [6.0, 0.0, 8.0]; +frontAngles[1][KNEE] = [-12.0, 0.0, 0.0]; +frontAngles[1][ARM] = [85.0, 0.0, 0.0]; +frontAngles[1][FOREARM] = [0.0, 0.0, -15.0]; + +backAngles[1][HIP] = [10.0, 0.0, 8.0]; +backAngles[1][KNEE] = [-55.0, 0.0, 0.0]; +backAngles[1][ARM] = [85.0, 0.0, 0.0]; +backAngles[1][FOREARM] = [0.0, 0.0, -15.0]; + +singleAngles[1][SPINE] = [0.0, 0.0, 0.0]; + +// ******************************* Animation Is Defined Above ************************************* + +//Actual keyframes for the animation +var walkKeyFrames = []; +//Generate quaternions from the angles +for (var i = 0; i < frontAngles.length; i++) { + for (var j = 0; j < frontAngles[i].length; j++) { + frontKeyFrames[i][j] = Quat.fromPitchYawRollDegrees(frontAngles[i][j][0], frontAngles[i][j][1], frontAngles[i][j][2]); + backKeyFrames[i][j] = Quat.fromPitchYawRollDegrees(SYMMETRY[j][0] * backAngles[i][j][0], SYMMETRY[j][1] * backAngles[i][j][1], SYMMETRY[j][2] * backAngles[i][j][2]); + } +} +for (var i = 0; i < singleAngles.length; i++) { + for (var j = 0; j < singleAngles[i].length; j++) { + singleKeyFrames[i][j] = Quat.fromPitchYawRollDegrees(singleAngles[i][j][0], singleAngles[i][j][1], singleAngles[i][j][2]); + } +} +walkKeyFrames[0] = new WalkKeyFrame(frontKeyFrames[0], backKeyFrames[0], singleKeyFrames[0]); +walkKeyFrames[1] = new WalkKeyFrame(frontKeyFrames[1], backKeyFrames[1], singleKeyFrames[1]); + +//Generate mirrored quaternions for the other half of the body +for (var i = 0; i < frontAngles.length; i++) { + for (var j = 0; j < frontAngles[i].length; j++) { + frontKeyFrames[i][j] = Quat.fromPitchYawRollDegrees(SYMMETRY[j][0] * frontAngles[i][j][0], SYMMETRY[j][1] * frontAngles[i][j][1], SYMMETRY[j][2] * frontAngles[i][j][2]); + backKeyFrames[i][j] = Quat.fromPitchYawRollDegrees(backAngles[i][j][0], backAngles[i][j][1], backAngles[i][j][2]); + } +} +for (var i = 0; i < singleAngles.length; i++) { + for (var j = 0; j < singleAngles[i].length; j++) { + singleKeyFrames[i][j] = Quat.fromPitchYawRollDegrees(-singleAngles[i][j][0], -singleAngles[i][j][1], -singleAngles[i][j][2]); + } +} +walkKeyFrames[2] = new WalkKeyFrame(backKeyFrames[0], frontKeyFrames[0], singleKeyFrames[0]); +walkKeyFrames[3] = new WalkKeyFrame(backKeyFrames[1], frontKeyFrames[1], singleKeyFrames[1]); + +//Hook up pointers to the next keyframe +for (var i = 0; i < walkKeyFrames.length - 1; i++) { + walkKeyFrames[i].nextFrame = walkKeyFrames[i+1]; +} +walkKeyFrames[walkKeyFrames.length-1].nextFrame = walkKeyFrames[0]; + +//Set up the bezier curve control points using technique described at +//https://www.cs.tcd.ie/publications/tech-reports/reports.94/TCD-CS-94-18.pdf +//Set up all C1 +for (var i = 0; i < walkKeyFrames.length; i++) { + walkKeyFrames[i].nextFrame.controlPoints = []; + for (var j = 0; j < walkKeyFrames[i].rotations.length; j++) { + walkKeyFrames[i].nextFrame.controlPoints[j] = []; + var R = Quat.slerp(walkKeyFrames[i].rotations[j], walkKeyFrames[i].nextFrame.rotations[j], 2.0); + var T = Quat.slerp(R, walkKeyFrames[i].nextFrame.nextFrame.rotations[j], 0.5); + walkKeyFrames[i].nextFrame.controlPoints[j][0] = Quat.slerp(walkKeyFrames[i].nextFrame.rotations[j], T, 0.33333); + } +} +//Set up all C2 +for (var i = 0; i < walkKeyFrames.length; i++) { + for (var j = 0; j < walkKeyFrames[i].rotations.length; j++) { + walkKeyFrames[i].controlPoints[j][1] = Quat.slerp(walkKeyFrames[i].nextFrame.rotations[j], walkKeyFrames[i].nextFrame.controlPoints[j][0], -1.0); + } +} +//DeCasteljau evaluation to evaluate the bezier curve +function deCasteljau(k1, k2, c1, c2, f) { + var a = Quat.slerp(k1, c1, f); + var b = Quat.slerp(c1, c2, f); + var c = Quat.slerp(c2, k2, f); + var d = Quat.slerp(a, b, f); + var e = Quat.slerp(b, c, f); + return Quat.slerp(d, e, f); +} + +var currentFrame = 0; + +var walkTime = 0.0; +var walkFrequency = 3.0; + +function keepWalking(deltaTime) { + + walkTime += walkFrequency * deltaTime; + if (walkTime > 1.0) { + walkTime = 0.0; + currentFrame++; + if (currentFrame > 3) { + currentFrame = 0; + } + } + + var frame = walkKeyFrames[currentFrame]; + + for (var i = 0; i < JOINT_ORDER.length; i++) { + Avatar.setJointData(JOINT_ORDER[i], deCasteljau(frame.rotations[i], frame.nextFrame.rotations[i], frame.controlPoints[i][0], frame.controlPoints[i][1], walkTime)); + } +} + +function stopWalking() { + Avatar.clearJointData(JOINT_R_HIP); + Avatar.clearJointData(JOINT_R_KNEE); + Avatar.clearJointData(JOINT_L_HIP); + Avatar.clearJointData(JOINT_L_KNEE); +} + +var trailingAverageLoudness = 0; +var MAX_SAMPLE = 32767; +var DB_METER_BASE = Math.log(MAX_SAMPLE); + +var RAND_RATIO_LAST = getRandomFloat(0.1, 0.3); +var RAND_TRAILING = 1 - RAND_RATIO_LAST; + +function jumpWithLoudness(deltaTime) { + // potentially change pelvis height depending on trailing average loudness + + pelvisOscillatorVelocity += deltaTime * Agent.lastReceivedAudioLoudness * 700.0 ; + + pelvisOscillatorVelocity -= pelvisOscillatorPosition * 0.75; + pelvisOscillatorVelocity *= 0.97; + pelvisOscillatorPosition += deltaTime * pelvisOscillatorVelocity; + Avatar.headPitch = pelvisOscillatorPosition * 60.0; + + var pelvisPosition = Avatar.position; + pelvisPosition.y = (Y_PELVIS - 0.35) + pelvisOscillatorPosition; + + if (pelvisPosition.y < Y_PELVIS) { + pelvisPosition.y = Y_PELVIS; + } else if (pelvisPosition.y > Y_PELVIS + 1.0) { + pelvisPosition.y = Y_PELVIS + 1.0; + } + + Avatar.position = pelvisPosition; +} + +var jointMapping = null; +var frameIndex = 0.0; +var isPlayingDanceAnimation = false; +var randomAnimation = null; +var animationLoops = 1; +var forcedMove = false; + +var FRAME_RATE = 30.0; + +var wasMovingLastFrame = false; +var wasDancing = false; + +function danceAnimation(deltaTime) { + + var flooredFrame = Math.floor(frameIndex); + + if (jointMapping === null || flooredFrame >= randomAnimation.frames.length * animationLoops) { + // we've run our animation for our number of loops, start a new one + frameIndex = 0.0; + jointMapping = null; + randomAnimation = null; + } + + if (isMoving || (!wasMovingLastFrame && frameIndex === 0)) { + if (!isMoving) { + forcedMove = true; + possiblyStopDancing(); + } + + wasMovingLastFrame = true; + handleWalking(); + } else { + if (jointMapping === null) { + // pick a random animation + var whichAnimation = Math.floor((Math.random() * animations.length) % animations.length); + randomAnimation = animations[whichAnimation]; + + var avatarJointNames = Avatar.jointNames; + var animationJointNames = randomAnimation.jointNames; + if (avatarJointNames === 0 || animationJointNames.length === 0) { + return; + } + jointMapping = new Array(avatarJointNames.length); + for (var i = 0; i < avatarJointNames.length; i++) { + jointMapping[i] = animationJointNames.indexOf(avatarJointNames[i]); + } + } + + frameIndex += deltaTime * FRAME_RATE; + var frames = randomAnimation.frames; + var rotations = frames[flooredFrame % frames.length].rotations; + for (var j = 0; j < jointMapping.length; j++) { + var rotationIndex = jointMapping[j]; + if (rotationIndex != -1) { + Avatar.setJointData(j, rotations[rotationIndex]); + } + } + + wasMovingLastFrame = false; + wasDancing = true; + } +} + +function handleHeadTurn() { + if (!isTurningHead && (Math.random() < CHANCE_OF_HEAD_TURNING)) { + targetHeadPitch = getRandomFloat(-PITCH_RANGE, PITCH_RANGE); + isTurningHead = true; + } else { + Avatar.headPitch = Avatar.headPitch + (targetHeadPitch - Avatar.headPitch) * PITCH_RATE; + if (Math.abs(Avatar.headPitch - targetHeadPitch) < STOP_TOLERANCE) { + isTurningHead = false; + } + } +} + +var currentShoulderQuat = Avatar.getJointRotation(SHOULDER_JOINT_NUMBER); +var targetShoulderQuat = currentShoulderQuat; +var idleShoulderQuat = currentShoulderQuat; +var currentSpineQuat = Avatar.getJointRotation(JOINT_SPINE); +var targetSpineQuat = currentSpineQuat; +var idleSpineQuat = currentSpineQuat; +var currentElbowQuat = Avatar.getJointRotation(ELBOW_JOINT_NUMBER); +var targetElbowQuat = currentElbowQuat; +var idleElbowQuat = currentElbowQuat; + +function handleWalking(deltaTime) { + if (forcedMove || (!isMoving && Math.random() < CHANCE_OF_MOVING)) { + // Set new target location + targetDirection = Quat.multiply(Avatar.orientation, Quat.angleAxis(getRandomFloat(-TURN_RANGE, TURN_RANGE), { x:0, y:1, z:0 })); + var front = Quat.getFront(targetDirection); + + targetPosition = Vec3.sum(Avatar.position, Vec3.multiply(front, getRandomFloat(0.0, MOVE_RANGE_SMALL))); + + targetPosition.x = clamp(targetPosition.x, X_MIN, X_MAX); + targetPosition.z = clamp(targetPosition.z, Z_MIN, Z_MAX); + targetPosition.y = Y_PELVIS; + + wasMovingLastFrame = true; + isMoving = true; + forcedMove = false; + } else if (isMoving) { + keepWalking(deltaTime); + // Avatar.position = Vec3.sum(Avatar.position, Vec3.multiply(Vec3.subtract(targetPosition, Avatar.position), MOVE_RATE)); + Avatar.orientation = Quat.slerp(Avatar.orientation, targetDirection, TURN_RATE); + var diff = Vec3.subtract(Avatar.position, targetPosition); + diff.y = 0.0; + + wasMovingLastFrame = true; + + if (Vec3.length(diff) < STOP_TOLERANCE) { + isMoving = false; + stopWalking(); + } + } +} + +function handleTalking() { + if (Math.random() < CHANCE_OF_SOUND) { + playRandomSound(); + } +} + +function changePelvisHeight(newHeight) { + var newPosition = Avatar.position; + newPosition.y = newHeight; + Avatar.position = newPosition; +} + +function possiblyStopDancing() { + if (wasDancing) { + for (var j = 0; j < Avatar.jointNames.length; j++) { + Avatar.clearJointData(j); + } + + changePelvisHeight(Y_PELVIS); + } +} + +function updateBehavior(deltaTime) { + cumulativeTime += deltaTime; + + if (AvatarList.containsAvatarWithDisplayName("mrdj")) { + if (wasMovingLastFrame && !wasDancing) { + isMoving = false; + } + + // we have a DJ, shouldn't we be dancing? + jumpWithLoudness(deltaTime); + danceAnimation(deltaTime); + } else { + // make sure we're not dancing anymore + possiblyStopDancing(); + + wasDancing = false; + + // no DJ, let's just chill on the dancefloor - randomly walking and talking + handleHeadTurn(); + handleWalking(deltaTime); + handleTalking(); + } +} + +Script.update.connect(updateBehavior); \ No newline at end of file diff --git a/libraries/script-engine/src/Quat.cpp b/libraries/script-engine/src/Quat.cpp index 8308536f97..66281883f0 100644 --- a/libraries/script-engine/src/Quat.cpp +++ b/libraries/script-engine/src/Quat.cpp @@ -76,6 +76,10 @@ glm::quat Quat::squad(const glm::quat& q1, const glm::quat& q2, const glm::quat& return glm::squad(q1, q2, s1, s2, h); } +float Quat::dot(const glm::quat& q1, const glm::quat& q2) { + return glm::dot(q1, q2); +} + void Quat::print(const QString& lable, const glm::quat& q) { qDebug() << qPrintable(lable) << q.x << "," << q.y << "," << q.z << "," << q.w; } diff --git a/libraries/script-engine/src/Quat.h b/libraries/script-engine/src/Quat.h index 190c823118..faae636f02 100644 --- a/libraries/script-engine/src/Quat.h +++ b/libraries/script-engine/src/Quat.h @@ -38,6 +38,7 @@ public slots: glm::quat mix(const glm::quat& q1, const glm::quat& q2, float alpha); glm::quat slerp(const glm::quat& q1, const glm::quat& q2, float alpha); glm::quat squad(const glm::quat& q1, const glm::quat& q2, const glm::quat& s1, const glm::quat& s2, float h); + float dot(const glm::quat& q1, const glm::quat& q2); void print(const QString& lable, const glm::quat& q); }; From aead7a682398b047a8fa7bc3a881f8509b84e97b Mon Sep 17 00:00:00 2001 From: wangyix Date: Mon, 28 Jul 2014 22:48:29 -0700 Subject: [PATCH 34/62] removed get pointer methods from AudioRingBUffer --- libraries/audio/src/AudioRingBuffer.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/libraries/audio/src/AudioRingBuffer.h b/libraries/audio/src/AudioRingBuffer.h index 824b197c93..00be17f02b 100644 --- a/libraries/audio/src/AudioRingBuffer.h +++ b/libraries/audio/src/AudioRingBuffer.h @@ -50,10 +50,6 @@ public: int getSampleCapacity() const { return _sampleCapacity; } int getFrameCapacity() const { return _frameCapacity; } - // assume callers using this will never wrap around the end - const int16_t* getNextOutput() const { return _nextOutput; } - const int16_t* getBuffer() const { return _buffer; } - int readSamples(int16_t* destination, int maxSamples); int writeSamples(const int16_t* source, int maxSamples); From ccedb1bd207c4702d58dacc11f3a0a21aaf81338 Mon Sep 17 00:00:00 2001 From: wangyix Date: Tue, 29 Jul 2014 10:40:01 -0700 Subject: [PATCH 35/62] forgot some code in checkBuffersBeforeFrameSend corrected updateLastPopOutputTrailingLoudness behavior to match what the old code did. --- assignment-client/src/audio/AudioMixer.cpp | 20 ++++++------- .../src/audio/AudioMixerClientData.cpp | 17 +++++++++-- .../src/audio/AudioMixerClientData.h | 2 +- libraries/audio/src/AudioRingBuffer.cpp | 29 ++++++++++++------- libraries/audio/src/AudioRingBuffer.h | 10 +++++-- libraries/audio/src/PositionalAudioStream.cpp | 22 +++++--------- libraries/audio/src/PositionalAudioStream.h | 15 ++-------- 7 files changed, 64 insertions(+), 51 deletions(-) diff --git a/assignment-client/src/audio/AudioMixer.cpp b/assignment-client/src/audio/AudioMixer.cpp index d3ec39ace1..d8d9006a1d 100644 --- a/assignment-client/src/audio/AudioMixer.cpp +++ b/assignment-client/src/audio/AudioMixer.cpp @@ -113,7 +113,7 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream* distanceBetween = EPSILON; } - if (streamToAdd->getNextOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) { + if (streamToAdd->getLastPopOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) { // according to mixer performance we have decided this does not get to be mixed in // bail out return; @@ -284,7 +284,7 @@ void AudioMixer::prepareMixForListeningNode(Node* node) { if ((*otherNode != *node || otherNodeStream->shouldLoopbackForNode()) && otherNodeStream->lastPopSucceeded() - && otherNodeStream->getNextOutputTrailingLoudness() > 0.0f) { + && otherNodeStream->getLastPopOutputTrailingLoudness() > 0.0f) { addStreamToMixForListeningNodeWithStream(otherNodeStream, nodeAudioStream); } @@ -544,16 +544,16 @@ void AudioMixer::run() { } foreach (const SharedNodePointer& node, nodeList->getNodeHash()) { - if (node->getActiveSocket() && node->getLinkedData()) { - + if (node->getLinkedData()) { AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData(); - // request a frame from each audio stream. a pointer to the popped data is stored as a member - // in InboundAudioStream. That's how the popped audio data will be read for mixing - nodeData->audioStreamsPopFrameForMixing(); - - if (node->getType() == NodeType::Agent - && ((AudioMixerClientData*)node->getLinkedData())->getAvatarAudioStream()) { + // this function will request a frame from each audio stream. + // a pointer to the popped data is stored as a member in InboundAudioStream. + // That's how the popped audio data will be read for mixing (but only if the pop was successful) + nodeData->checkBuffersBeforeFrameSend(_sourceUnattenuatedZone, _listenerUnattenuatedZone); + + if (node->getType() == NodeType::Agent && node->getActiveSocket() + && nodeData->getAvatarAudioStream()) { prepareMixForListeningNode(node.data()); diff --git a/assignment-client/src/audio/AudioMixerClientData.cpp b/assignment-client/src/audio/AudioMixerClientData.cpp index 17e46f3692..e681a6ccbf 100644 --- a/assignment-client/src/audio/AudioMixerClientData.cpp +++ b/assignment-client/src/audio/AudioMixerClientData.cpp @@ -98,10 +98,23 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { return 0; } -void AudioMixerClientData::audioStreamsPopFrameForMixing() { +void AudioMixerClientData::checkBuffersBeforeFrameSend(AABox* checkSourceZone, AABox* listenerZone) { QHash::ConstIterator i; for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) { - i.value()->popFrames(1); + PositionalAudioStream* stream = i.value(); + if (stream->popFrames(1)) { + // this is a ring buffer that is ready to go + + // calculate the trailing avg loudness for the next frame + // that would be mixed in + stream->updateLastPopOutputTrailingLoudness(); + + if (checkSourceZone && checkSourceZone->contains(stream->getPosition())) { + stream->setListenerUnattenuatedZone(listenerZone); + } else { + stream->setListenerUnattenuatedZone(NULL); + } + } } } diff --git a/assignment-client/src/audio/AudioMixerClientData.h b/assignment-client/src/audio/AudioMixerClientData.h index 287f4f7b65..80f3f9e3ca 100644 --- a/assignment-client/src/audio/AudioMixerClientData.h +++ b/assignment-client/src/audio/AudioMixerClientData.h @@ -27,7 +27,7 @@ public: int parseData(const QByteArray& packet); - void audioStreamsPopFrameForMixing(); + void checkBuffersBeforeFrameSend(AABox* checkSourceZone, AABox* listenerZone); void removeDeadInjectedStreams(); diff --git a/libraries/audio/src/AudioRingBuffer.cpp b/libraries/audio/src/AudioRingBuffer.cpp index 8dbc90883b..c687ab8648 100644 --- a/libraries/audio/src/AudioRingBuffer.cpp +++ b/libraries/audio/src/AudioRingBuffer.cpp @@ -218,17 +218,26 @@ int16_t* AudioRingBuffer::shiftedPositionAccomodatingWrap(int16_t* position, int } } -float AudioRingBuffer::getNextOutputFrameLoudness() const { +float AudioRingBuffer::getFrameLoudness(const int16_t* frameStart) const { float loudness = 0.0f; - int16_t* sampleAt = _nextOutput; - int16_t* _bufferLastAt = _buffer + _sampleCapacity - 1; - if (samplesAvailable() >= _numFrameSamples) { - for (int i = 0; i < _numFrameSamples; ++i) { - loudness += fabsf(*sampleAt); - sampleAt = sampleAt == _bufferLastAt ? _buffer : sampleAt + 1; - } - loudness /= _numFrameSamples; - loudness /= MAX_SAMPLE_VALUE; + const int16_t* sampleAt = frameStart; + const int16_t* _bufferLastAt = _buffer + _sampleCapacity - 1; + + for (int i = 0; i < _numFrameSamples; ++i) { + loudness += fabsf(*sampleAt); + sampleAt = sampleAt == _bufferLastAt ? _buffer : sampleAt + 1; } + loudness /= _numFrameSamples; + loudness /= MAX_SAMPLE_VALUE; + return loudness; } + +float AudioRingBuffer::getFrameLoudness(ConstIterator frameStart) const { + return getFrameLoudness(&(*frameStart)); +} + +float AudioRingBuffer::getNextOutputFrameLoudness() const { + return getFrameLoudness(_nextOutput); +} + diff --git a/libraries/audio/src/AudioRingBuffer.h b/libraries/audio/src/AudioRingBuffer.h index 00be17f02b..ed680b18b1 100644 --- a/libraries/audio/src/AudioRingBuffer.h +++ b/libraries/audio/src/AudioRingBuffer.h @@ -71,6 +71,10 @@ public: int getOverflowCount() const { return _overflowCount; } /// how many times has the ring buffer has overwritten old data int addSilentFrame(int numSilentSamples); + +private: + float getFrameLoudness(const int16_t* frameStart) const; + protected: // disallow copying of AudioRingBuffer objects AudioRingBuffer(const AudioRingBuffer&); @@ -106,7 +110,7 @@ public: bool operator==(const ConstIterator& rhs) { return _at == rhs._at; } bool operator!=(const ConstIterator& rhs) { return _at != rhs._at; } - int16_t operator*() { return *_at; } + const int16_t& operator*() { return *_at; } ConstIterator& operator=(const ConstIterator& rhs) { _capacity = rhs._capacity; @@ -138,7 +142,7 @@ public: return tmp; } - int16_t operator[] (int i) { + const int16_t& operator[] (int i) { return *atShiftedBy(i); } @@ -175,6 +179,8 @@ public: }; ConstIterator nextOutput() const { return ConstIterator(_buffer, _sampleCapacity, _nextOutput); } + + float getFrameLoudness(ConstIterator frameStart) const; }; #endif // hifi_AudioRingBuffer_h diff --git a/libraries/audio/src/PositionalAudioStream.cpp b/libraries/audio/src/PositionalAudioStream.cpp index b50e339185..1e465b925d 100644 --- a/libraries/audio/src/PositionalAudioStream.cpp +++ b/libraries/audio/src/PositionalAudioStream.cpp @@ -29,32 +29,26 @@ PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, b _orientation(0.0f, 0.0f, 0.0f, 0.0f), _shouldLoopbackForNode(false), _isStereo(isStereo), - _nextOutputTrailingLoudness(0.0f), + _lastPopOutputTrailingLoudness(0.0f), _listenerUnattenuatedZone(NULL) { } -int PositionalAudioStream::parseData(const QByteArray& packet) { - int bytesRead = InboundAudioStream::parseData(packet); - updateNextOutputTrailingLoudness(); - return bytesRead; -} - -void PositionalAudioStream::updateNextOutputTrailingLoudness() { - float nextLoudness = _ringBuffer.getNextOutputFrameLoudness(); +void PositionalAudioStream::updateLastPopOutputTrailingLoudness() { + float lastPopLoudness = _ringBuffer.getFrameLoudness(_lastPopOutput); const int TRAILING_AVERAGE_FRAMES = 100; const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES; const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO; const float LOUDNESS_EPSILON = 0.000001f; - if (nextLoudness >= _nextOutputTrailingLoudness) { - _nextOutputTrailingLoudness = nextLoudness; + if (lastPopLoudness >= _lastPopOutputTrailingLoudness) { + _lastPopOutputTrailingLoudness = lastPopLoudness; } else { - _nextOutputTrailingLoudness = (_nextOutputTrailingLoudness * PREVIOUS_FRAMES_RATIO) + (CURRENT_FRAME_RATIO * nextLoudness); + _lastPopOutputTrailingLoudness = (_lastPopOutputTrailingLoudness * PREVIOUS_FRAMES_RATIO) + (CURRENT_FRAME_RATIO * lastPopLoudness); - if (_nextOutputTrailingLoudness < LOUDNESS_EPSILON) { - _nextOutputTrailingLoudness = 0; + if (_lastPopOutputTrailingLoudness < LOUDNESS_EPSILON) { + _lastPopOutputTrailingLoudness = 0; } } } diff --git a/libraries/audio/src/PositionalAudioStream.h b/libraries/audio/src/PositionalAudioStream.h index 06835b93a8..c9739d9588 100644 --- a/libraries/audio/src/PositionalAudioStream.h +++ b/libraries/audio/src/PositionalAudioStream.h @@ -29,12 +29,10 @@ public: PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo = false, bool dynamicJitterBuffers = false); - int parseData(const QByteArray& packet); - virtual AudioStreamStats getAudioStreamStats() const; - void updateNextOutputTrailingLoudness(); - float getNextOutputTrailingLoudness() const { return _nextOutputTrailingLoudness; } + void updateLastPopOutputTrailingLoudness(); + float getLastPopOutputTrailingLoudness() const { return _lastPopOutputTrailingLoudness; } bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; } bool isStereo() const { return _isStereo; } @@ -50,13 +48,6 @@ protected: PositionalAudioStream(const PositionalAudioStream&); PositionalAudioStream& operator= (const PositionalAudioStream&); - /// parses the info between the seq num and the audio data in the network packet and calculates - /// how many audio samples this packet contains - virtual int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) = 0; - - /// parses the audio data in the network packet - virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) = 0; - int parsePositionalData(const QByteArray& positionalByteArray); protected: @@ -67,7 +58,7 @@ protected: bool _shouldLoopbackForNode; bool _isStereo; - float _nextOutputTrailingLoudness; + float _lastPopOutputTrailingLoudness; AABox* _listenerUnattenuatedZone; }; From c709a103ad63790c7b3c78bdf712c11d6c4edb8e Mon Sep 17 00:00:00 2001 From: wangyix Date: Tue, 29 Jul 2014 10:42:30 -0700 Subject: [PATCH 36/62] minor comment change --- assignment-client/src/audio/AudioMixer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/assignment-client/src/audio/AudioMixer.cpp b/assignment-client/src/audio/AudioMixer.cpp index d8d9006a1d..9c86c2faf2 100644 --- a/assignment-client/src/audio/AudioMixer.cpp +++ b/assignment-client/src/audio/AudioMixer.cpp @@ -547,7 +547,7 @@ void AudioMixer::run() { if (node->getLinkedData()) { AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData(); - // this function will request a frame from each audio stream. + // this function will attempt to pop a frame from each audio stream. // a pointer to the popped data is stored as a member in InboundAudioStream. // That's how the popped audio data will be read for mixing (but only if the pop was successful) nodeData->checkBuffersBeforeFrameSend(_sourceUnattenuatedZone, _listenerUnattenuatedZone); From 71c23eac1e33d1efc98bc3994e4ee6b070b50901 Mon Sep 17 00:00:00 2001 From: wangyix Date: Tue, 29 Jul 2014 13:51:46 -0700 Subject: [PATCH 37/62] added TimeWeightedAvg to InboundAudioStream _maxFramesOverDesired hardcoded right now --- .../src/audio/AudioMixerClientData.cpp | 4 +- interface/src/Audio.cpp | 2 +- libraries/audio/src/AudioStreamStats.h | 4 +- libraries/audio/src/InboundAudioStream.cpp | 85 ++++++++++++------- libraries/audio/src/InboundAudioStream.h | 38 ++++++--- libraries/shared/src/TimeWeightedAvg.h | 80 +++++++++++++++++ 6 files changed, 164 insertions(+), 49 deletions(-) create mode 100644 libraries/shared/src/TimeWeightedAvg.h diff --git a/assignment-client/src/audio/AudioMixerClientData.cpp b/assignment-client/src/audio/AudioMixerClientData.cpp index e681a6ccbf..d3c16dc04e 100644 --- a/assignment-client/src/audio/AudioMixerClientData.cpp +++ b/assignment-client/src/audio/AudioMixerClientData.cpp @@ -224,7 +224,7 @@ QString AudioMixerClientData::getAudioStreamStatsString() const { + " starves:" + QString::number(streamStats._starveCount) + " not_mixed:" + QString::number(streamStats._consecutiveNotMixedCount) + " overflows:" + QString::number(streamStats._overflowCount) - + " silents_dropped:" + QString::number(streamStats._silentFramesDropped) + + " silents_dropped:" + QString::number(streamStats._framesDropped) + " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2) + " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2) + " min_gap:" + formatUsecTime(streamStats._timeGapMin) @@ -248,7 +248,7 @@ QString AudioMixerClientData::getAudioStreamStatsString() const { + " starves:" + QString::number(streamStats._starveCount) + " not_mixed:" + QString::number(streamStats._consecutiveNotMixedCount) + " overflows:" + QString::number(streamStats._overflowCount) - + " silents_dropped:" + QString::number(streamStats._silentFramesDropped) + + " silents_dropped:" + QString::number(streamStats._framesDropped) + " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2) + " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2) + " min_gap:" + formatUsecTime(streamStats._timeGapMin) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index e830e5f6d4..1be38377a6 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -1443,7 +1443,7 @@ void Audio::renderAudioStreamStats(const AudioStreamStats& streamStats, int hori sprintf(stringBuffer, " Ringbuffer stats | starves: %u, prev_starve_lasted: %u, frames_dropped: %u, overflows: %u", streamStats._starveCount, streamStats._consecutiveNotMixedCount, - streamStats._silentFramesDropped, + streamStats._framesDropped, streamStats._overflowCount); verticalOffset += STATS_HEIGHT_PER_LINE; drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color); diff --git a/libraries/audio/src/AudioStreamStats.h b/libraries/audio/src/AudioStreamStats.h index 784e163b3b..148fad6557 100644 --- a/libraries/audio/src/AudioStreamStats.h +++ b/libraries/audio/src/AudioStreamStats.h @@ -31,7 +31,7 @@ public: _starveCount(0), _consecutiveNotMixedCount(0), _overflowCount(0), - _silentFramesDropped(0), + _framesDropped(0), _packetStreamStats(), _packetStreamWindowStats() {} @@ -52,7 +52,7 @@ public: quint32 _starveCount; quint32 _consecutiveNotMixedCount; quint32 _overflowCount; - quint32 _silentFramesDropped; + quint32 _framesDropped; PacketStreamStats _packetStreamStats; PacketStreamStats _packetStreamWindowStats; diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index bfaa4c6d63..c70fd090ed 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -12,7 +12,8 @@ #include "InboundAudioStream.h" #include "PacketHeaders.h" -InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, bool useStDevForJitterCalc) : +InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity, + bool dynamicJitterBuffers, /*int maxFramesOverDesired,*/ bool useStDevForJitterCalc) : _ringBuffer(numFrameSamples, false, numFramesCapacity), _lastPopSucceeded(false), _lastPopOutput(), @@ -22,16 +23,19 @@ InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacit _calculatedJitterBufferFramesUsingMaxGap(0), _calculatedJitterBufferFramesUsingStDev(0), _desiredJitterBufferFrames(1), + _maxFramesOverDesired(20),//maxFramesOverDesired), // PLACEHOLDER!!!!!!!!! _isStarved(true), _hasStarted(false), _consecutiveNotMixedCount(0), _starveCount(0), _silentFramesDropped(0), + _oldFramesDropped(0), _incomingSequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS), _lastFrameReceivedTime(0), _interframeTimeGapStatsForJitterCalc(TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES, TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS), _interframeTimeGapStatsForStatsPacket(TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES, TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS), - _framesAvailableStats(FRAMES_AVAILABLE_STATS_INTERVAL_SAMPLES, FRAMES_AVAILABLE_STATS_WINDOW_INTERVALS) + _framesAvailableStat(), + _framesAvailableAvg(0) { } @@ -49,16 +53,19 @@ void InboundAudioStream::resetStats() { _consecutiveNotMixedCount = 0; _starveCount = 0; _silentFramesDropped = 0; + _oldFramesDropped = 0; _incomingSequenceNumberStats.reset(); _lastFrameReceivedTime = 0; _interframeTimeGapStatsForJitterCalc.reset(); _interframeTimeGapStatsForStatsPacket.reset(); - _framesAvailableStats.reset(); + _framesAvailableStat.reset(); + _framesAvailableAvg = 0; } void InboundAudioStream::clearBuffer() { _ringBuffer.clear(); - _framesAvailableStats.reset(); + _framesAvailableStat.reset(); + _framesAvailableAvg = 0; } int InboundAudioStream::parseData(const QByteArray& packet) { @@ -99,11 +106,24 @@ int InboundAudioStream::parseData(const QByteArray& packet) { } } - if (_isStarved && _ringBuffer.framesAvailable() >= _desiredJitterBufferFrames) { + int framesAvailable = _ringBuffer.framesAvailable(); + // if this stream was starved, check if we're still starved. + if (_isStarved && framesAvailable >= _desiredJitterBufferFrames) { _isStarved = false; } + // if the ringbuffer exceeds the desired size by more than the threshold specified, + // drop the oldest frames so the ringbuffer is down to the desired size. + if (framesAvailable > _desiredJitterBufferFrames + _maxFramesOverDesired) { + int framesToDrop = framesAvailable - (_desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING); + _ringBuffer.shiftReadPosition(framesToDrop * _ringBuffer.getNumFrameSamples()); + printf("dropped %d old frames\n", framesToDrop); + _framesAvailableStat.reset(); + _framesAvailableAvg = 0; - _framesAvailableStats.update(_ringBuffer.framesAvailable()); + _oldFramesDropped += framesToDrop; + } + + framesAvailableChanged(); return readBytes; } @@ -119,6 +139,7 @@ bool InboundAudioStream::popFrames(int numFrames, bool starveOnFail) { // we have enough samples to pop, so we're good to mix _lastPopOutput = _ringBuffer.nextOutput(); _ringBuffer.shiftReadPosition(numSamplesRequested); + framesAvailableChanged(); _hasStarted = true; _lastPopSucceeded = true; @@ -135,6 +156,15 @@ bool InboundAudioStream::popFrames(int numFrames, bool starveOnFail) { return _lastPopSucceeded; } +void InboundAudioStream::framesAvailableChanged() { + _framesAvailableStat.updateWithSample(_ringBuffer.framesAvailable()); + if (_framesAvailableStat.getElapsedUsecs() >= FRAMES_AVAILABLE_STATS_WINDOW_USECS) { + _framesAvailableAvg = (int)ceil(_framesAvailableStat.getAverage()); + _framesAvailableStat.reset(); + printf("10s samples filled; frames avail avg = %d\n", _framesAvailableAvg); + } +} + void InboundAudioStream::setToStarved() { if (!_isStarved && _ringBuffer.framesAvailable() < _desiredJitterBufferFrames) { starved(); @@ -209,37 +239,26 @@ SequenceNumberStats::ArrivalInfo InboundAudioStream::frameReceivedUpdateNetworkS } int InboundAudioStream::writeDroppableSilentSamples(int numSilentSamples) { - - // This adds some number of frames to the desired jitter buffer frames target we use. - // The larger this value is, the less aggressive we are about reducing the jitter buffer length. - // Setting this to 0 will try to get the jitter buffer to be exactly _desiredJitterBufferFrames long, - // which could lead immediately to a starve. - const int DESIRED_JITTER_BUFFER_FRAMES_PADDING = 1; - - // calculate how many silent frames we should drop. We only drop silent frames if - // the running avg num frames available has stabilized and it's more than - // our desired number of frames by the margin defined above. + + // calculate how many silent frames we should drop. int samplesPerFrame = _ringBuffer.getNumFrameSamples(); + int desiredJitterBufferFramesPlusPadding = _desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING; int numSilentFramesToDrop = 0; - if (_framesAvailableStats.getNewStatsAvailableFlag() && _framesAvailableStats.isWindowFilled() - && numSilentSamples >= samplesPerFrame) { - _framesAvailableStats.clearNewStatsAvailableFlag(); - int averageJitterBufferFrames = (int)getFramesAvailableAverage(); - int desiredJitterBufferFramesPlusPadding = _desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING; - if (averageJitterBufferFrames > desiredJitterBufferFramesPlusPadding) { - // our avg jitter buffer size exceeds its desired value, so ignore some silent - // frames to get that size as close to desired as possible - int numSilentFramesToDropDesired = averageJitterBufferFrames - desiredJitterBufferFramesPlusPadding; - int numSilentFramesReceived = numSilentSamples / samplesPerFrame; - numSilentFramesToDrop = std::min(numSilentFramesToDropDesired, numSilentFramesReceived); + if (numSilentSamples >= samplesPerFrame && _framesAvailableAvg > desiredJitterBufferFramesPlusPadding) { - // since we now have a new jitter buffer length, reset the frames available stats. - _framesAvailableStats.reset(); + // our avg jitter buffer size exceeds its desired value, so ignore some silent + // frames to get that size as close to desired as possible + int numSilentFramesToDropDesired = _framesAvailableAvg - desiredJitterBufferFramesPlusPadding; + int numSilentFramesReceived = numSilentSamples / samplesPerFrame; + numSilentFramesToDrop = std::min(numSilentFramesToDropDesired, numSilentFramesReceived); - _silentFramesDropped += numSilentFramesToDrop; - } + // dont reset _framesAvailableAvg here; we want to be able to drop further silent frames + // without waiting for _framesAvailableStat to fill up to 10s of samples. + _framesAvailableAvg -= numSilentFramesToDrop; + _framesAvailableStat.reset(); } + return _ringBuffer.addSilentFrame(numSilentSamples - numSilentFramesToDrop * samplesPerFrame); } @@ -258,12 +277,12 @@ AudioStreamStats InboundAudioStream::getAudioStreamStats() const { streamStats._timeGapWindowAverage = _interframeTimeGapStatsForStatsPacket.getWindowAverage(); streamStats._framesAvailable = _ringBuffer.framesAvailable(); - streamStats._framesAvailableAverage = _framesAvailableStats.getWindowAverage(); + streamStats._framesAvailableAverage = _framesAvailableAvg; streamStats._desiredJitterBufferFrames = _desiredJitterBufferFrames; streamStats._starveCount = _starveCount; streamStats._consecutiveNotMixedCount = _consecutiveNotMixedCount; streamStats._overflowCount = _ringBuffer.getOverflowCount(); - streamStats._silentFramesDropped = _silentFramesDropped; + streamStats._framesDropped = _silentFramesDropped + _oldFramesDropped; // TODO: add separate stat for old frames dropped streamStats._packetStreamStats = _incomingSequenceNumberStats.getStats(); streamStats._packetStreamWindowStats = _incomingSequenceNumberStats.getStatsForHistoryWindow(); diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index 958491bca1..c8d854eeb1 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -19,6 +19,13 @@ #include "AudioStreamStats.h" #include "PacketHeaders.h" #include "StdDev.h" +#include "TimeWeightedAvg.h" + +// This adds some number of frames to the desired jitter buffer frames target we use when we're dropping frames. +// The larger this value is, the less aggressive we are about reducing the jitter buffer length. +// Setting this to 0 will try to get the jitter buffer to be exactly _desiredJitterBufferFrames long when dropping frames, +// which could lead to a starve soon after. +const int DESIRED_JITTER_BUFFER_FRAMES_PADDING = 1; // the time gaps stats for _desiredJitterBufferFrames calculation // will recalculate the max for the past 5000 samples every 500 samples @@ -30,10 +37,7 @@ const int TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS = 10; const int TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS; const int TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS = 30; -// the stats for calculating the average frames available will recalculate every ~1 second -// and will include data for the past ~2 seconds -const int FRAMES_AVAILABLE_STATS_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS; -const int FRAMES_AVAILABLE_STATS_WINDOW_INTERVALS = 10; +const int FRAMES_AVAILABLE_STATS_WINDOW_USECS = 10 * USECS_PER_SECOND; // the internal history buffer of the incoming seq stats will cover 30s to calculate // packet loss % over last 30s @@ -45,7 +49,9 @@ const int INBOUND_RING_BUFFER_FRAME_CAPACITY = 100; class InboundAudioStream : public NodeData { Q_OBJECT public: - InboundAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, bool useStDevForJitterCalc = false); + InboundAudioStream(int numFrameSamples, int numFramesCapacity, + bool dynamicJitterBuffers, //int maxFramesOverDesired, + bool useStDevForJitterCalc = false); void reset(); void resetStats(); @@ -85,7 +91,7 @@ public: int getNumFrameSamples() const { return _ringBuffer.getNumFrameSamples(); } int getFrameCapacity() const { return _ringBuffer.getFrameCapacity(); } int getFramesAvailable() const { return _ringBuffer.framesAvailable(); } - double getFramesAvailableAverage() const { return _framesAvailableStats.getWindowAverage(); } + double getFramesAvailableAverage() const { return _framesAvailableAvg; } bool isStarved() const { return _isStarved; } bool hasStarted() const { return _hasStarted; } @@ -103,6 +109,8 @@ private: int writeSamplesForDroppedPackets(int numSamples); + void framesAvailableChanged(); + protected: // disallow copying of InboundAudioStream objects InboundAudioStream(const InboundAudioStream&); @@ -124,14 +132,21 @@ protected: bool _lastPopSucceeded; AudioRingBuffer::ConstIterator _lastPopOutput; - bool _dynamicJitterBuffers; - bool _dynamicJitterBuffersOverride; + bool _dynamicJitterBuffers; // if false, _desiredJitterBufferFrames is locked at 1 (old behavior) + bool _dynamicJitterBuffersOverride; // used for locking the _desiredJitterBufferFrames to some number while running + + // if jitter buffer is dynamic, this determines what method of calculating _desiredJitterBufferFrames + // if true, Philip's timegap std dev calculation is used. Otherwise, Freddy's max timegap calculation is used bool _useStDevForJitterCalc; - int _calculatedJitterBufferFramesUsingMaxGap; int _calculatedJitterBufferFramesUsingStDev; + int _desiredJitterBufferFrames; + // if there are more than _desiredJitterBufferFrames + _maxFramesOverDesired frames, old ringbuffer frames + // will be dropped to keep audio delay from building up + int _maxFramesOverDesired; + bool _isStarved; bool _hasStarted; @@ -140,6 +155,7 @@ protected: int _consecutiveNotMixedCount; int _starveCount; int _silentFramesDropped; + int _oldFramesDropped; SequenceNumberStats _incomingSequenceNumberStats; @@ -148,8 +164,8 @@ protected: StDev _stdev; MovingMinMaxAvg _interframeTimeGapStatsForStatsPacket; - // TODO: change this to time-weighted moving avg - MovingMinMaxAvg _framesAvailableStats; + TimeWeightedAvg _framesAvailableStat; + int _framesAvailableAvg; }; #endif // hifi_InboundAudioStream_h diff --git a/libraries/shared/src/TimeWeightedAvg.h b/libraries/shared/src/TimeWeightedAvg.h new file mode 100644 index 0000000000..9412078413 --- /dev/null +++ b/libraries/shared/src/TimeWeightedAvg.h @@ -0,0 +1,80 @@ +// +// TimeWeightedAvg.h +// libraries/shared/src +// +// Created by Yixin Wang on 7/29/2014 +// Copyright 2013 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#ifndef hifi_TimeWeightedAvg_h +#define hifi_TimeWeightedAvg_h + +#include "SharedUtil.h" + +template +class TimeWeightedAvg { + +public: + + TimeWeightedAvg() + : _firstSampleTime(0), + _lastSample(), + _lastSampleTime(0), + _weightedSampleSumExcludingLastSample(0.0) + {} + + void reset() { + _firstSampleTime = 0; + _lastSampleTime = 0; + _weightedSampleSumExcludingLastSample = 0.0; + } + + void updateWithSample(T sample) { + quint64 now = usecTimestampNow(); + + if (_firstSampleTime == 0) { + _firstSampleTime = now; + } else { + _weightedSampleSumExcludingLastSample = getWeightedSampleSum(now); + } + + _lastSample = sample; + _lastSampleTime = now; + } + + double getAverage() const { + if (_firstSampleTime == 0) { + return 0.0; + } + quint64 now = usecTimestampNow(); + quint64 elapsed = now - _firstSampleTime; + return getWeightedSampleSum(now) / (double)elapsed; + } + + quint64 getElapsedUsecs() const { + if (_firstSampleTime == 0) { + return 0; + } + return usecTimestampNow() - _firstSampleTime; + } + +private: + // if no sample has been collected yet, the return value is undefined + double getWeightedSampleSum(quint64 now) const { + quint64 lastSampleLasted = now - _lastSampleTime; + return _weightedSampleSumExcludingLastSample + (double)_lastSample * (double)lastSampleLasted; + } + +private: + quint64 _firstSampleTime; + + T _lastSample; + quint64 _lastSampleTime; + + double _weightedSampleSumExcludingLastSample; +}; + +#endif // hifi_TimeWeightedAvg_h From bb2adb2721a5e35c42b085349b60c6b4efa1e67d Mon Sep 17 00:00:00 2001 From: wangyix Date: Tue, 29 Jul 2014 14:02:18 -0700 Subject: [PATCH 38/62] minor refactoring --- libraries/audio/src/InboundAudioStream.cpp | 26 +++++++++++----------- libraries/audio/src/InboundAudioStream.h | 9 +++++--- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index c70fd090ed..a1d76655ac 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -35,7 +35,7 @@ InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacit _interframeTimeGapStatsForJitterCalc(TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES, TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS), _interframeTimeGapStatsForStatsPacket(TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES, TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS), _framesAvailableStat(), - _framesAvailableAvg(0) + _currentJitterBufferFrames(0) { } @@ -59,13 +59,13 @@ void InboundAudioStream::resetStats() { _interframeTimeGapStatsForJitterCalc.reset(); _interframeTimeGapStatsForStatsPacket.reset(); _framesAvailableStat.reset(); - _framesAvailableAvg = 0; + _currentJitterBufferFrames = 0; } void InboundAudioStream::clearBuffer() { _ringBuffer.clear(); _framesAvailableStat.reset(); - _framesAvailableAvg = 0; + _currentJitterBufferFrames = 0; } int InboundAudioStream::parseData(const QByteArray& packet) { @@ -116,9 +116,9 @@ int InboundAudioStream::parseData(const QByteArray& packet) { if (framesAvailable > _desiredJitterBufferFrames + _maxFramesOverDesired) { int framesToDrop = framesAvailable - (_desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING); _ringBuffer.shiftReadPosition(framesToDrop * _ringBuffer.getNumFrameSamples()); - printf("dropped %d old frames\n", framesToDrop); + _framesAvailableStat.reset(); - _framesAvailableAvg = 0; + _currentJitterBufferFrames = 0; _oldFramesDropped += framesToDrop; } @@ -158,10 +158,10 @@ bool InboundAudioStream::popFrames(int numFrames, bool starveOnFail) { void InboundAudioStream::framesAvailableChanged() { _framesAvailableStat.updateWithSample(_ringBuffer.framesAvailable()); - if (_framesAvailableStat.getElapsedUsecs() >= FRAMES_AVAILABLE_STATS_WINDOW_USECS) { - _framesAvailableAvg = (int)ceil(_framesAvailableStat.getAverage()); + + if (_framesAvailableStat.getElapsedUsecs() >= FRAMES_AVAILABLE_STAT_WINDOW_USECS) { + _currentJitterBufferFrames = (int)ceil(_framesAvailableStat.getAverage()); _framesAvailableStat.reset(); - printf("10s samples filled; frames avail avg = %d\n", _framesAvailableAvg); } } @@ -245,17 +245,17 @@ int InboundAudioStream::writeDroppableSilentSamples(int numSilentSamples) { int desiredJitterBufferFramesPlusPadding = _desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING; int numSilentFramesToDrop = 0; - if (numSilentSamples >= samplesPerFrame && _framesAvailableAvg > desiredJitterBufferFramesPlusPadding) { + if (numSilentSamples >= samplesPerFrame && _currentJitterBufferFrames > desiredJitterBufferFramesPlusPadding) { // our avg jitter buffer size exceeds its desired value, so ignore some silent // frames to get that size as close to desired as possible - int numSilentFramesToDropDesired = _framesAvailableAvg - desiredJitterBufferFramesPlusPadding; + int numSilentFramesToDropDesired = _currentJitterBufferFrames - desiredJitterBufferFramesPlusPadding; int numSilentFramesReceived = numSilentSamples / samplesPerFrame; numSilentFramesToDrop = std::min(numSilentFramesToDropDesired, numSilentFramesReceived); - // dont reset _framesAvailableAvg here; we want to be able to drop further silent frames + // dont reset _currentJitterBufferFrames here; we want to be able to drop further silent frames // without waiting for _framesAvailableStat to fill up to 10s of samples. - _framesAvailableAvg -= numSilentFramesToDrop; + _currentJitterBufferFrames -= numSilentFramesToDrop; _framesAvailableStat.reset(); } @@ -277,7 +277,7 @@ AudioStreamStats InboundAudioStream::getAudioStreamStats() const { streamStats._timeGapWindowAverage = _interframeTimeGapStatsForStatsPacket.getWindowAverage(); streamStats._framesAvailable = _ringBuffer.framesAvailable(); - streamStats._framesAvailableAverage = _framesAvailableAvg; + streamStats._framesAvailableAverage = _currentJitterBufferFrames; streamStats._desiredJitterBufferFrames = _desiredJitterBufferFrames; streamStats._starveCount = _starveCount; streamStats._consecutiveNotMixedCount = _consecutiveNotMixedCount; diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index c8d854eeb1..1a880d565f 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -37,7 +37,7 @@ const int TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS = 10; const int TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS; const int TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS = 30; -const int FRAMES_AVAILABLE_STATS_WINDOW_USECS = 10 * USECS_PER_SECOND; +const int FRAMES_AVAILABLE_STAT_WINDOW_USECS = 10 * USECS_PER_SECOND; // the internal history buffer of the incoming seq stats will cover 30s to calculate // packet loss % over last 30s @@ -91,7 +91,7 @@ public: int getNumFrameSamples() const { return _ringBuffer.getNumFrameSamples(); } int getFrameCapacity() const { return _ringBuffer.getFrameCapacity(); } int getFramesAvailable() const { return _ringBuffer.framesAvailable(); } - double getFramesAvailableAverage() const { return _framesAvailableAvg; } + double getFramesAvailableAverage() const { return _framesAvailableStat.getAverage(); } bool isStarved() const { return _isStarved; } bool hasStarted() const { return _hasStarted; } @@ -165,7 +165,10 @@ protected: MovingMinMaxAvg _interframeTimeGapStatsForStatsPacket; TimeWeightedAvg _framesAvailableStat; - int _framesAvailableAvg; + + // this value is based on the time-weighted avg from _framesAvailableStat. it is only used for + // dropping silent frames right now. + int _currentJitterBufferFrames; }; #endif // hifi_InboundAudioStream_h From 6be62842e3e1ede19769946c806ff84b7937c617 Mon Sep 17 00:00:00 2001 From: barnold1953 Date: Tue, 29 Jul 2014 15:45:12 -0700 Subject: [PATCH 39/62] Added walkWheel for syncinc animation speed, plus other improvements. --- examples/proceduralAnimation.js | 0 examples/{dancer.js => proceduralBot.js} | 214 +++++++++-------------- 2 files changed, 81 insertions(+), 133 deletions(-) create mode 100644 examples/proceduralAnimation.js rename examples/{dancer.js => proceduralBot.js} (74%) diff --git a/examples/proceduralAnimation.js b/examples/proceduralAnimation.js new file mode 100644 index 0000000000..e69de29bb2 diff --git a/examples/dancer.js b/examples/proceduralBot.js similarity index 74% rename from examples/dancer.js rename to examples/proceduralBot.js index e7a75f7596..ce88f596b6 100644 --- a/examples/dancer.js +++ b/examples/proceduralBot.js @@ -1,15 +1,18 @@ // -// dancer.js +// proceduralBot.js // hifi // -// Created by Stephen Birarda on 2/20/14. -// Modified by Philip on 3/3/14 +// Created by Ben Arnold on 7/29/2013 +// // Copyright (c) 2014 HighFidelity, Inc. All rights reserved. // // This is an example script that demonstrates an NPC avatar. // // +//For procedural walk animation +Script.include("proceduralAnimation.js"); + function getRandomFloat(min, max) { return Math.random() * (max - min) + min; } @@ -22,41 +25,41 @@ function printVector(string, vector) { print(string + " " + vector.x + ", " + vector.y + ", " + vector.z); } -var CHANCE_OF_MOVING = 0.01; +var CHANCE_OF_MOVING = 0.1; var CHANCE_OF_SOUND = 0; var CHANCE_OF_HEAD_TURNING = 0.05; var CHANCE_OF_BIG_MOVE = 0.1; var CHANCE_OF_WAVING = 0.009; -var isMoving = true; +var isMoving = false; var isTurningHead = false; -var isPlay -ingAudio = false; +var isPlayingAudio = false; var isWaving = false; var waveFrequency = 0.0; var waveAmplitude = 0.0; -var X_MIN = 5.50; -var X_MAX = 5.60; -var Z_MIN = 5.00; -var Z_MAX = 5.10; +var X_MIN = 0.50; +var X_MAX = 15.60; +var Z_MIN = 0.50; +var Z_MAX = 15.10; var Y_PELVIS = 1.0; var MAX_PELVIS_DELTA = 2.5; var AVATAR_PELVIS_HEIGHT = 0.75; -var MOVE_RANGE_SMALL = 1.0; +var MOVE_RANGE_SMALL = 10.0; var TURN_RANGE = 70.0; var STOP_TOLERANCE = 0.05; var MOVE_RATE = 0.05; -var TURN_RATE = 0.15; -var PITCH_RATE = 0.20; -var PITCH_RANGE = 30.0; +var TURN_RATE = 0.2; +var PITCH_RATE = 0.10; +var PITCH_RANGE = 20.0; -var firstPosition = { x: getRandomFloat(X_MIN, X_MAX), y: Y_PELVIS, z: getRandomFloat(Z_MIN, Z_MAX) }; +//var firstPosition = { x: getRandomFloat(X_MIN, X_MAX), y: Y_PELVIS, z: getRandomFloat(Z_MIN, Z_MAX) }; +var firstPosition = { x: 0.5, y: Y_PELVIS, z: 0.5 }; var targetPosition = { x: 0, y: 0, z: 0 }; -var targetDirection = { x: 0, y: 0, z: 0, w: 0 }; -var currentDirection = { x: 0, y: 0, z: 0, w: 0 }; +var targetOrientation = { x: 0, y: 0, z: 0, w: 0 }; +var currentOrientation = { x: 0, y: 0, z: 0, w: 0 }; var targetHeadPitch = 0.0; var cumulativeTime = 0.0; @@ -130,30 +133,6 @@ function loadSounds() { var sounds = []; loadSounds(); -function loadAnimations() { - - var animation_filenames = []; - var ANIMATION_BASE_URL = "http://highfidelity-dev.s3.amazonaws.com/animations/"; - - if (botNumber < 20) { - animation_filenames = ["robot/wave_hip_hop_dance.fbx", "robot/robot_hip_hop_dance.fbx"]; - } else if (botNumber <= 40) { - animation_filenames = ["superhero/house_dancing_2.fbx", "superhero/house_dancing_3.fbx", "superhero/house_dancing_4.fbx"]; - } else if (botNumber <= 60) { - animation_filenames = ["amber/house_dancing.fbx"] - } else if (botNumber <= 80) { - animation_filenames = ["ron/hip_hop_dancing.fbx", "ron/gangnam_style.fbx"]; - } else { - animation_filenames = ["angie/hip_hop_dancing_6.fbx"]; - } - - for (var i = 0; i < animation_filenames.length; i++) { - animations.push(AnimationCache.getAnimation(ANIMATION_BASE_URL + animation_filenames[i])); - } -} - -var animations = []; -loadAnimations(); function playRandomSound() { if (!Agent.isPlayingAvatarSound) { @@ -162,13 +141,6 @@ function playRandomSound() { } } -function stopWaving() { - isWaving = false; - Avatar.clearJointData(SHOULDER_JOINT_NUMBER); - Avatar.clearJointData(ELBOW_JOINT_NUMBER); - Avatar.clearJointData(JOINT_SPINE); -} - //Animation KeyFrame constructor. rightJoints and leftJoints must be the same size function WalkKeyFrame(rightJoints, leftJoints, singleJoints) { this.rotations = []; @@ -231,13 +203,6 @@ var FOREARM = 3; //Joint indices for single joints var SPINE = 0; -//Symmetry multipliers for dthe left half [pitch, roll, yaw]. -1 means reflect, 1 means no reflect -var SYMMETRY = []; -SYMMETRY[HIP] = [1, -1, -1]; -SYMMETRY[KNEE] = [1, -1, -1]; -SYMMETRY[ARM] = [1, -1, -1]; -SYMMETRY[FOREARM] = [1, -1, -1]; - //We have to store the angles so we can invert yaw and roll when making the animation //symmetrical @@ -254,7 +219,7 @@ backAngles[0][KNEE] = [-28, 0.0, 0.0]; backAngles[0][ARM] = [85.0, 20.0, 0.0]; backAngles[0][FOREARM] = [10.0, 0.0, -25.0]; -singleAngles[0][SPINE] = [-0.0, 0.0, 0.0]; +singleAngles[0][SPINE] = [0.0, -15.0, 5.0]; //Legs Passing frontAngles[1][HIP] = [6.0, 0.0, 8.0]; @@ -277,7 +242,7 @@ var walkKeyFrames = []; for (var i = 0; i < frontAngles.length; i++) { for (var j = 0; j < frontAngles[i].length; j++) { frontKeyFrames[i][j] = Quat.fromPitchYawRollDegrees(frontAngles[i][j][0], frontAngles[i][j][1], frontAngles[i][j][2]); - backKeyFrames[i][j] = Quat.fromPitchYawRollDegrees(SYMMETRY[j][0] * backAngles[i][j][0], SYMMETRY[j][1] * backAngles[i][j][1], SYMMETRY[j][2] * backAngles[i][j][2]); + backKeyFrames[i][j] = Quat.fromPitchYawRollDegrees(backAngles[i][j][0], -backAngles[i][j][1], -backAngles[i][j][2]); } } for (var i = 0; i < singleAngles.length; i++) { @@ -291,7 +256,7 @@ walkKeyFrames[1] = new WalkKeyFrame(frontKeyFrames[1], backKeyFrames[1], singleK //Generate mirrored quaternions for the other half of the body for (var i = 0; i < frontAngles.length; i++) { for (var j = 0; j < frontAngles[i].length; j++) { - frontKeyFrames[i][j] = Quat.fromPitchYawRollDegrees(SYMMETRY[j][0] * frontAngles[i][j][0], SYMMETRY[j][1] * frontAngles[i][j][1], SYMMETRY[j][2] * frontAngles[i][j][2]); + frontKeyFrames[i][j] = Quat.fromPitchYawRollDegrees(frontAngles[i][j][0], -frontAngles[i][j][1], -frontAngles[i][j][2]); backKeyFrames[i][j] = Quat.fromPitchYawRollDegrees(backAngles[i][j][0], backAngles[i][j][1], backAngles[i][j][2]); } } @@ -340,12 +305,18 @@ function deCasteljau(k1, k2, c1, c2, f) { var currentFrame = 0; var walkTime = 0.0; -var walkFrequency = 3.0; + +var walkWheelRadius = 0.5; +var walkWheelRate = 2.0 * 3.141592 * walkWheelRadius / 8.0; + +var avatarAcceleration = 0.75; +var avatarVelocity = 0.0; +var avatarMaxVelocity = 1.4; function keepWalking(deltaTime) { - walkTime += walkFrequency * deltaTime; - if (walkTime > 1.0) { + walkTime += avatarVelocity * deltaTime; + if (walkTime > walkWheelRate) { walkTime = 0.0; currentFrame++; if (currentFrame > 3) { @@ -355,18 +326,13 @@ function keepWalking(deltaTime) { var frame = walkKeyFrames[currentFrame]; + var interp = walkTime / walkWheelRate; + for (var i = 0; i < JOINT_ORDER.length; i++) { - Avatar.setJointData(JOINT_ORDER[i], deCasteljau(frame.rotations[i], frame.nextFrame.rotations[i], frame.controlPoints[i][0], frame.controlPoints[i][1], walkTime)); + Avatar.setJointData(JOINT_ORDER[i], deCasteljau(frame.rotations[i], frame.nextFrame.rotations[i], frame.controlPoints[i][0], frame.controlPoints[i][1], interp)); } } -function stopWalking() { - Avatar.clearJointData(JOINT_R_HIP); - Avatar.clearJointData(JOINT_R_KNEE); - Avatar.clearJointData(JOINT_L_HIP); - Avatar.clearJointData(JOINT_L_KNEE); -} - var trailingAverageLoudness = 0; var MAX_SAMPLE = 32767; var DB_METER_BASE = Math.log(MAX_SAMPLE); @@ -408,56 +374,6 @@ var FRAME_RATE = 30.0; var wasMovingLastFrame = false; var wasDancing = false; -function danceAnimation(deltaTime) { - - var flooredFrame = Math.floor(frameIndex); - - if (jointMapping === null || flooredFrame >= randomAnimation.frames.length * animationLoops) { - // we've run our animation for our number of loops, start a new one - frameIndex = 0.0; - jointMapping = null; - randomAnimation = null; - } - - if (isMoving || (!wasMovingLastFrame && frameIndex === 0)) { - if (!isMoving) { - forcedMove = true; - possiblyStopDancing(); - } - - wasMovingLastFrame = true; - handleWalking(); - } else { - if (jointMapping === null) { - // pick a random animation - var whichAnimation = Math.floor((Math.random() * animations.length) % animations.length); - randomAnimation = animations[whichAnimation]; - - var avatarJointNames = Avatar.jointNames; - var animationJointNames = randomAnimation.jointNames; - if (avatarJointNames === 0 || animationJointNames.length === 0) { - return; - } - jointMapping = new Array(avatarJointNames.length); - for (var i = 0; i < avatarJointNames.length; i++) { - jointMapping[i] = animationJointNames.indexOf(avatarJointNames[i]); - } - } - - frameIndex += deltaTime * FRAME_RATE; - var frames = randomAnimation.frames; - var rotations = frames[flooredFrame % frames.length].rotations; - for (var j = 0; j < jointMapping.length; j++) { - var rotationIndex = jointMapping[j]; - if (rotationIndex != -1) { - Avatar.setJointData(j, rotations[rotationIndex]); - } - } - - wasMovingLastFrame = false; - wasDancing = true; - } -} function handleHeadTurn() { if (!isTurningHead && (Math.random() < CHANCE_OF_HEAD_TURNING)) { @@ -481,14 +397,32 @@ var currentElbowQuat = Avatar.getJointRotation(ELBOW_JOINT_NUMBER); var targetElbowQuat = currentElbowQuat; var idleElbowQuat = currentElbowQuat; +function stopWalking() { + Avatar.clearJointData(JOINT_R_HIP); + Avatar.clearJointData(JOINT_R_KNEE); + Avatar.clearJointData(JOINT_L_HIP); + Avatar.clearJointData(JOINT_L_KNEE); + avatarVelocity = 0.0; + isMoving = false; +} + +var MAX_ATTEMPTS = 40; function handleWalking(deltaTime) { + if (forcedMove || (!isMoving && Math.random() < CHANCE_OF_MOVING)) { // Set new target location - targetDirection = Quat.multiply(Avatar.orientation, Quat.angleAxis(getRandomFloat(-TURN_RANGE, TURN_RANGE), { x:0, y:1, z:0 })); - var front = Quat.getFront(targetDirection); - - targetPosition = Vec3.sum(Avatar.position, Vec3.multiply(front, getRandomFloat(0.0, MOVE_RANGE_SMALL))); + //Keep trying new orientations if the desired target location is out of bounds + var attempts = 0; + do { + targetOrientation = Quat.multiply(Avatar.orientation, Quat.angleAxis(getRandomFloat(-TURN_RANGE, TURN_RANGE), { x:0, y:1, z:0 })); + var front = Quat.getFront(targetOrientation); + + targetPosition = Vec3.sum(Avatar.position, Vec3.multiply(front, getRandomFloat(0.0, MOVE_RANGE_SMALL))); + } + while ((targetPosition.x < X_MIN || targetPosition.x > X_MAX || targetPosition.z < Z_MIN || targetPosition.z > Z_MAX) + && attempts < MAX_ATEMPTS); + targetPosition.x = clamp(targetPosition.x, X_MIN, X_MAX); targetPosition.z = clamp(targetPosition.z, Z_MIN, Z_MAX); targetPosition.y = Y_PELVIS; @@ -498,16 +432,31 @@ function handleWalking(deltaTime) { forcedMove = false; } else if (isMoving) { keepWalking(deltaTime); - // Avatar.position = Vec3.sum(Avatar.position, Vec3.multiply(Vec3.subtract(targetPosition, Avatar.position), MOVE_RATE)); - Avatar.orientation = Quat.slerp(Avatar.orientation, targetDirection, TURN_RATE); - var diff = Vec3.subtract(Avatar.position, targetPosition); - diff.y = 0.0; - wasMovingLastFrame = true; + var targetVector = Vec3.subtract(targetPosition, Avatar.position); + var distance = Vec3.length(targetVector); + if (distance <= avatarVelocity * deltaTime) { + Avatar.position = targetPosition; + stopWalking(); + } else { + var direction = Vec3.normalize(targetVector); + //Figure out if we should be slowing down + var t = avatarVelocity / avatarAcceleration; + var d = (avatarVelocity / 2.0) * t; + if (distance < d) { + avatarVelocity -= avatarAcceleration * deltaTime; + if (avatarVelocity <= 0) { + stopWalking(); + } + } else { + avatarVelocity += avatarAcceleration * deltaTime; + if (avatarVelocity > avatarMaxVelocity) avatarVelocity = avatarMaxVelocity; + } + Avatar.position = Vec3.sum(Avatar.position, Vec3.multiply(direction, avatarVelocity * deltaTime)); + Avatar.orientation = Quat.mix(Avatar.orientation, targetOrientation, TURN_RATE); + + wasMovingLastFrame = true; - if (Vec3.length(diff) < STOP_TOLERANCE) { - isMoving = false; - stopWalking(); } } } @@ -544,7 +493,6 @@ function updateBehavior(deltaTime) { // we have a DJ, shouldn't we be dancing? jumpWithLoudness(deltaTime); - danceAnimation(deltaTime); } else { // make sure we're not dancing anymore possiblyStopDancing(); From d49970d01001226bf45ca6ee7b44063472b9464f Mon Sep 17 00:00:00 2001 From: wangyix Date: Tue, 29 Jul 2014 15:57:32 -0700 Subject: [PATCH 40/62] maxFramesOverDesired added; settable from domain page and preferences --- assignment-client/src/Agent.cpp | 2 +- assignment-client/src/audio/AudioMixer.cpp | 5 + assignment-client/src/audio/AudioMixer.h | 3 + .../src/audio/AudioMixerClientData.cpp | 4 +- .../src/audio/AvatarAudioStream.cpp | 4 +- .../src/audio/AvatarAudioStream.h | 2 +- .../resources/web/settings/describe.json | 6 ++ interface/src/Application.cpp | 3 + interface/src/Audio.cpp | 4 +- interface/src/Audio.h | 3 + interface/src/Menu.cpp | 3 + interface/src/Menu.h | 3 + interface/src/ui/PreferencesDialog.cpp | 7 +- interface/ui/preferencesDialog.ui | 95 ++++++++++++++++++- libraries/audio/src/InboundAudioStream.cpp | 4 +- libraries/audio/src/InboundAudioStream.h | 5 +- libraries/audio/src/InjectedAudioStream.cpp | 4 +- libraries/audio/src/InjectedAudioStream.h | 2 +- libraries/audio/src/MixedAudioStream.cpp | 4 +- libraries/audio/src/MixedAudioStream.h | 2 +- libraries/audio/src/PositionalAudioStream.cpp | 4 +- libraries/audio/src/PositionalAudioStream.h | 2 +- 22 files changed, 149 insertions(+), 22 deletions(-) diff --git a/assignment-client/src/Agent.cpp b/assignment-client/src/Agent.cpp index d4da989198..90009636f6 100644 --- a/assignment-client/src/Agent.cpp +++ b/assignment-client/src/Agent.cpp @@ -36,7 +36,7 @@ Agent::Agent(const QByteArray& packet) : _voxelEditSender(), _particleEditSender(), _modelEditSender(), - _receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 1, false), + _receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 1, false, 0), _avatarHashMap() { // be the parent of the script engine so it gets moved when we do diff --git a/assignment-client/src/audio/AudioMixer.cpp b/assignment-client/src/audio/AudioMixer.cpp index 9c86c2faf2..5f52a175cc 100644 --- a/assignment-client/src/audio/AudioMixer.cpp +++ b/assignment-client/src/audio/AudioMixer.cpp @@ -68,6 +68,7 @@ void attachNewNodeDataToNode(Node *newNode) { } bool AudioMixer::_useDynamicJitterBuffers = false; +int AudioMixer::_maxFramesOverDesired = 20; AudioMixer::AudioMixer(const QByteArray& packet) : ThreadedAssignment(packet), @@ -469,6 +470,10 @@ void AudioMixer::run() { qDebug() << "Dynamic jitter buffers disabled, using old behavior."; _useDynamicJitterBuffers = false; } + + const QString MAX_FRAMES_OVER_DESIRED_JSON_KEY = "max-frames-over-desired"; + _maxFramesOverDesired = audioGroupObject[MAX_FRAMES_OVER_DESIRED_JSON_KEY].toString().toInt(); + qDebug() << "Max frames over desired:" << _maxFramesOverDesired; } int nextFrame = 0; diff --git a/assignment-client/src/audio/AudioMixer.h b/assignment-client/src/audio/AudioMixer.h index bfdb49f393..9aca4d3cee 100644 --- a/assignment-client/src/audio/AudioMixer.h +++ b/assignment-client/src/audio/AudioMixer.h @@ -38,6 +38,7 @@ public slots: void sendStatsPacket(); static bool getUseDynamicJitterBuffers() { return _useDynamicJitterBuffers; } + static int getMaxFramesOverDesired() { return _maxFramesOverDesired; } private: /// adds one stream to the mix for a listening node @@ -59,7 +60,9 @@ private: int _sumMixes; AABox* _sourceUnattenuatedZone; AABox* _listenerUnattenuatedZone; + static bool _useDynamicJitterBuffers; + static int _maxFramesOverDesired; quint64 _lastSendAudioStreamStatsTime; }; diff --git a/assignment-client/src/audio/AudioMixerClientData.cpp b/assignment-client/src/audio/AudioMixerClientData.cpp index d3c16dc04e..79c5d6c3a8 100644 --- a/assignment-client/src/audio/AudioMixerClientData.cpp +++ b/assignment-client/src/audio/AudioMixerClientData.cpp @@ -74,7 +74,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { bool isStereo = channelFlag == 1; _audioStreams.insert(nullUUID, - matchingStream = new AvatarAudioStream(isStereo, AudioMixer::getUseDynamicJitterBuffers())); + matchingStream = new AvatarAudioStream(isStereo, AudioMixer::getUseDynamicJitterBuffers(), AudioMixer::getMaxFramesOverDesired())); } else { matchingStream = _audioStreams.value(nullUUID); } @@ -87,7 +87,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { if (!_audioStreams.contains(streamIdentifier)) { _audioStreams.insert(streamIdentifier, - matchingStream = new InjectedAudioStream(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers())); + matchingStream = new InjectedAudioStream(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers(), AudioMixer::getMaxFramesOverDesired())); } else { matchingStream = _audioStreams.value(streamIdentifier); } diff --git a/assignment-client/src/audio/AvatarAudioStream.cpp b/assignment-client/src/audio/AvatarAudioStream.cpp index c6a7d31468..88a2276ddb 100644 --- a/assignment-client/src/audio/AvatarAudioStream.cpp +++ b/assignment-client/src/audio/AvatarAudioStream.cpp @@ -13,8 +13,8 @@ #include "AvatarAudioStream.h" -AvatarAudioStream::AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer) : - PositionalAudioStream(PositionalAudioStream::Microphone, isStereo, dynamicJitterBuffer) +AvatarAudioStream::AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer, int maxFramesOverDesired) : + PositionalAudioStream(PositionalAudioStream::Microphone, isStereo, dynamicJitterBuffer, maxFramesOverDesired) { } diff --git a/assignment-client/src/audio/AvatarAudioStream.h b/assignment-client/src/audio/AvatarAudioStream.h index de7920c278..545bee4e0a 100644 --- a/assignment-client/src/audio/AvatarAudioStream.h +++ b/assignment-client/src/audio/AvatarAudioStream.h @@ -18,7 +18,7 @@ class AvatarAudioStream : public PositionalAudioStream { public: - AvatarAudioStream(bool isStereo = false, bool dynamicJitterBuffer = false); + AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer, int maxFramesOverDesired); private: // disallow copying of AvatarAudioStream objects diff --git a/domain-server/resources/web/settings/describe.json b/domain-server/resources/web/settings/describe.json index 227b6bf0cd..7cba68e795 100644 --- a/domain-server/resources/web/settings/describe.json +++ b/domain-server/resources/web/settings/describe.json @@ -8,6 +8,12 @@ "help": "Boxes for source and listener (corner x, corner y, corner z, size x, size y, size z, corner x, corner y, corner z, size x, size y, size z)", "placeholder": "no zone", "default": "" + }, + "max-frames-over-desired": { + "label": "Max Frames Over Desired", + "help": "The highest number of frames an AudioMixer's ringbuffer can exceed the desired jitter buffer frames by", + "placeholder": "20", + "default": "" }, "dynamic-jitter-buffer": { "type": "checkbox", diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index 3ef4334fa5..f4e67fea31 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -1711,6 +1711,9 @@ void Application::init() { if (Menu::getInstance()->getAudioJitterBufferFrames() != 0) { _audio.overrideDesiredJitterBufferFramesTo(Menu::getInstance()->getAudioJitterBufferFrames()); } + + _audio.setMaxFramesOverDesired(Menu::getInstance()->getMaxFramesOverDesired()); + qDebug("Loaded settings"); // initialize our face trackers after loading the menu settings diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 1be38377a6..2a801eb009 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -76,9 +76,9 @@ Audio::Audio(QObject* parent) : // this delay will slowly add up and the longer someone runs, they more delayed their audio will be. _inputRingBuffer(0), #ifdef _WIN32 - _receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 100, true, true), + _receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 100, true, 20, true), #else - _receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 10, true, true), // DO NOT CHANGE THIS UNLESS YOU SOLVE THE AUDIO DEVICE DRIFT PROBLEM!!! + _receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 10, true, 20, true), // DO NOT CHANGE THIS UNLESS YOU SOLVE THE AUDIO DEVICE DRIFT PROBLEM!!! #endif _isStereoInput(false), _averagedLatency(0.0), diff --git a/interface/src/Audio.h b/interface/src/Audio.h index 87472740d0..cb042ef25e 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -59,6 +59,9 @@ public: void overrideDesiredJitterBufferFramesTo(int desired) { _receivedAudioStream.overrideDesiredJitterBufferFramesTo(desired); } void unoverrideDesiredJitterBufferFrames() { _receivedAudioStream.unoverrideDesiredJitterBufferFrames(); } + + void setMaxFramesOverDesired(int maxFramesOverDesired) { _receivedAudioStream.setMaxFramesOverDesired(maxFramesOverDesired); } + int getDesiredJitterBufferFrames() const { return _receivedAudioStream.getDesiredJitterBufferFrames(); } float getCollisionSoundMagnitude() { return _collisionSoundMagnitude; } diff --git a/interface/src/Menu.cpp b/interface/src/Menu.cpp index fb1bbd07cf..3fe2ac9e5d 100644 --- a/interface/src/Menu.cpp +++ b/interface/src/Menu.cpp @@ -83,6 +83,7 @@ const int CONSOLE_HEIGHT = 200; Menu::Menu() : _actionHash(), _audioJitterBufferFrames(0), + _maxFramesOverDesired(0), _bandwidthDialog(NULL), _fieldOfView(DEFAULT_FIELD_OF_VIEW_DEGREES), _realWorldFieldOfView(DEFAULT_REAL_WORLD_FIELD_OF_VIEW_DEGREES), @@ -628,6 +629,7 @@ void Menu::loadSettings(QSettings* settings) { } _audioJitterBufferFrames = loadSetting(settings, "audioJitterBufferFrames", 0); + _maxFramesOverDesired = loadSetting(settings, "maxFramesOverDesired", 0); _fieldOfView = loadSetting(settings, "fieldOfView", DEFAULT_FIELD_OF_VIEW_DEGREES); _realWorldFieldOfView = loadSetting(settings, "realWorldFieldOfView", DEFAULT_REAL_WORLD_FIELD_OF_VIEW_DEGREES); _faceshiftEyeDeflection = loadSetting(settings, "faceshiftEyeDeflection", DEFAULT_FACESHIFT_EYE_DEFLECTION); @@ -678,6 +680,7 @@ void Menu::saveSettings(QSettings* settings) { } settings->setValue("audioJitterBufferFrames", _audioJitterBufferFrames); + settings->setValue("maxFramesOverDesired", _maxFramesOverDesired); settings->setValue("fieldOfView", _fieldOfView); settings->setValue("faceshiftEyeDeflection", _faceshiftEyeDeflection); settings->setValue("maxVoxels", _maxVoxels); diff --git a/interface/src/Menu.h b/interface/src/Menu.h index 6d5ad4e78c..2bc750007a 100644 --- a/interface/src/Menu.h +++ b/interface/src/Menu.h @@ -87,6 +87,8 @@ public: float getAudioJitterBufferFrames() const { return _audioJitterBufferFrames; } void setAudioJitterBufferFrames(float audioJitterBufferSamples) { _audioJitterBufferFrames = audioJitterBufferSamples; } + int getMaxFramesOverDesired() const { return _maxFramesOverDesired; } + void setMaxFramesOverDesired(int maxFramesOverDesired) { _maxFramesOverDesired = maxFramesOverDesired; } float getFieldOfView() const { return _fieldOfView; } void setFieldOfView(float fieldOfView) { _fieldOfView = fieldOfView; } float getRealWorldFieldOfView() const { return _realWorldFieldOfView; } @@ -258,6 +260,7 @@ private: QHash _actionHash; int _audioJitterBufferFrames; /// number of extra samples to wait before starting audio playback + int _maxFramesOverDesired; BandwidthDialog* _bandwidthDialog; float _fieldOfView; /// in Degrees, doesn't apply to HMD like Oculus float _realWorldFieldOfView; // The actual FOV set by the user's monitor size and view distance diff --git a/interface/src/ui/PreferencesDialog.cpp b/interface/src/ui/PreferencesDialog.cpp index 6f87b08093..7d18ae4490 100644 --- a/interface/src/ui/PreferencesDialog.cpp +++ b/interface/src/ui/PreferencesDialog.cpp @@ -151,6 +151,8 @@ void PreferencesDialog::loadPreferences() { ui.audioJitterSpin->setValue(menuInstance->getAudioJitterBufferFrames()); + ui.maxFramesOverDesiredSpin->setValue(menuInstance->getMaxFramesOverDesired()); + ui.realWorldFieldOfViewSpin->setValue(menuInstance->getRealWorldFieldOfView()); ui.fieldOfViewSpin->setValue(menuInstance->getFieldOfView()); @@ -241,11 +243,14 @@ void PreferencesDialog::savePreferences() { Menu::getInstance()->setAudioJitterBufferFrames(ui.audioJitterSpin->value()); if (Menu::getInstance()->getAudioJitterBufferFrames() != 0) { - Application::getInstance()->getAudio()->overrideDesiredJitterBufferFramesTo(ui.audioJitterSpin->value()); + Application::getInstance()->getAudio()->overrideDesiredJitterBufferFramesTo(Menu::getInstance()->getAudioJitterBufferFrames()); } else { Application::getInstance()->getAudio()->unoverrideDesiredJitterBufferFrames(); } + Menu::getInstance()->setMaxFramesOverDesired(ui.maxFramesOverDesiredSpin->value()); + Application::getInstance()->getAudio()->setMaxFramesOverDesired(Menu::getInstance()->getMaxFramesOverDesired()); + Application::getInstance()->resizeGL(Application::getInstance()->getGLWidget()->width(), Application::getInstance()->getGLWidget()->height()); } diff --git a/interface/ui/preferencesDialog.ui b/interface/ui/preferencesDialog.ui index d610b4e8bd..566c24e4e3 100644 --- a/interface/ui/preferencesDialog.ui +++ b/interface/ui/preferencesDialog.ui @@ -1543,7 +1543,7 @@ padding: 10px;margin-top:10px - -10000 + 0 10000 @@ -1555,6 +1555,99 @@ padding: 10px;margin-top:10px + + + + 0 + + + 10 + + + 0 + + + 10 + + + + + + Arial + + + + color: rgb(51, 51, 51) + + + Max Frames Over Desired + + + 15 + + + maxFramesOverDesiredSpin + + + + + + + + Arial + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + + 0 + 0 + + + + + 95 + 36 + + + + + 70 + 16777215 + + + + + Arial + + + + 0 + + + 10000 + + + 1 + + + + + + + diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index a1d76655ac..cbf9e5a0bb 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -13,7 +13,7 @@ #include "PacketHeaders.h" InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity, - bool dynamicJitterBuffers, /*int maxFramesOverDesired,*/ bool useStDevForJitterCalc) : + bool dynamicJitterBuffers, int maxFramesOverDesired, bool useStDevForJitterCalc) : _ringBuffer(numFrameSamples, false, numFramesCapacity), _lastPopSucceeded(false), _lastPopOutput(), @@ -23,7 +23,7 @@ InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacit _calculatedJitterBufferFramesUsingMaxGap(0), _calculatedJitterBufferFramesUsingStDev(0), _desiredJitterBufferFrames(1), - _maxFramesOverDesired(20),//maxFramesOverDesired), // PLACEHOLDER!!!!!!!!! + _maxFramesOverDesired(maxFramesOverDesired), _isStarved(true), _hasStarted(false), _consecutiveNotMixedCount(0), diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index 1a880d565f..4b1db354c1 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -50,7 +50,7 @@ class InboundAudioStream : public NodeData { Q_OBJECT public: InboundAudioStream(int numFrameSamples, int numFramesCapacity, - bool dynamicJitterBuffers, //int maxFramesOverDesired, + bool dynamicJitterBuffers, int maxFramesOverDesired, bool useStDevForJitterCalc = false); void reset(); @@ -75,6 +75,8 @@ public: /// this function should be called once per second to ensure the seq num stats history spans ~30 seconds AudioStreamStats updateSeqHistoryAndGetAudioStreamStats(); + void setMaxFramesOverDesired(int maxFramesOverDesired) { _maxFramesOverDesired = maxFramesOverDesired; } + virtual AudioStreamStats getAudioStreamStats() const; /// returns the desired number of jitter buffer frames under the dyanmic jitter buffers scheme @@ -88,6 +90,7 @@ public: int getCalculatedJitterBufferFramesUsingMaxGap() const { return _calculatedJitterBufferFramesUsingMaxGap; } int getDesiredJitterBufferFrames() const { return _desiredJitterBufferFrames; } + int getMaxFramesOverDesired() const { return _maxFramesOverDesired; } int getNumFrameSamples() const { return _ringBuffer.getNumFrameSamples(); } int getFrameCapacity() const { return _ringBuffer.getFrameCapacity(); } int getFramesAvailable() const { return _ringBuffer.framesAvailable(); } diff --git a/libraries/audio/src/InjectedAudioStream.cpp b/libraries/audio/src/InjectedAudioStream.cpp index 4c23fbd823..c50b609be0 100644 --- a/libraries/audio/src/InjectedAudioStream.cpp +++ b/libraries/audio/src/InjectedAudioStream.cpp @@ -19,8 +19,8 @@ #include "InjectedAudioStream.h" -InjectedAudioStream::InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer) : - PositionalAudioStream(PositionalAudioStream::Injector, false, dynamicJitterBuffer), +InjectedAudioStream::InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer, int maxFramesOverDesired) : + PositionalAudioStream(PositionalAudioStream::Injector, false, dynamicJitterBuffer, maxFramesOverDesired), _streamIdentifier(streamIdentifier), _radius(0.0f), _attenuationRatio(0) diff --git a/libraries/audio/src/InjectedAudioStream.h b/libraries/audio/src/InjectedAudioStream.h index b92736b0ba..d856c4cbd4 100644 --- a/libraries/audio/src/InjectedAudioStream.h +++ b/libraries/audio/src/InjectedAudioStream.h @@ -18,7 +18,7 @@ class InjectedAudioStream : public PositionalAudioStream { public: - InjectedAudioStream(const QUuid& streamIdentifier = QUuid(), bool dynamicJitterBuffer = false); + InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer, int maxFramesOverDesired); float getRadius() const { return _radius; } float getAttenuationRatio() const { return _attenuationRatio; } diff --git a/libraries/audio/src/MixedAudioStream.cpp b/libraries/audio/src/MixedAudioStream.cpp index b2c57c46d6..4a388d2b14 100644 --- a/libraries/audio/src/MixedAudioStream.cpp +++ b/libraries/audio/src/MixedAudioStream.cpp @@ -1,8 +1,8 @@ #include "MixedAudioStream.h" -MixedAudioStream::MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, bool useStDevForJitterCalc) - : InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, useStDevForJitterCalc) +MixedAudioStream::MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int maxFramesOverDesired, bool useStDevForJitterCalc) + : InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, maxFramesOverDesired, useStDevForJitterCalc) { } diff --git a/libraries/audio/src/MixedAudioStream.h b/libraries/audio/src/MixedAudioStream.h index 17769be128..30b3061548 100644 --- a/libraries/audio/src/MixedAudioStream.h +++ b/libraries/audio/src/MixedAudioStream.h @@ -17,7 +17,7 @@ class MixedAudioStream : public InboundAudioStream { public: - MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, bool useStDevForJitterCalc = false); + MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int maxFramesOverDesired, bool useStDevForJitterCalc = false); float getNextOutputFrameLoudness() const { return _ringBuffer.getNextOutputFrameLoudness(); } diff --git a/libraries/audio/src/PositionalAudioStream.cpp b/libraries/audio/src/PositionalAudioStream.cpp index 1e465b925d..cc6a9add90 100644 --- a/libraries/audio/src/PositionalAudioStream.cpp +++ b/libraries/audio/src/PositionalAudioStream.cpp @@ -21,9 +21,9 @@ #include #include -PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers) : +PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers, int maxFramesOverDesired) : InboundAudioStream(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL, - AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, dynamicJitterBuffers), + AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, dynamicJitterBuffers, maxFramesOverDesired), _type(type), _position(0.0f, 0.0f, 0.0f), _orientation(0.0f, 0.0f, 0.0f, 0.0f), diff --git a/libraries/audio/src/PositionalAudioStream.h b/libraries/audio/src/PositionalAudioStream.h index c9739d9588..5df9972311 100644 --- a/libraries/audio/src/PositionalAudioStream.h +++ b/libraries/audio/src/PositionalAudioStream.h @@ -27,7 +27,7 @@ public: Injector }; - PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo = false, bool dynamicJitterBuffers = false); + PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers, int maxFramesOverDesired); virtual AudioStreamStats getAudioStreamStats() const; From 9be9f1417acad39c8ca5d1d5d0be24dc28562347 Mon Sep 17 00:00:00 2001 From: barnold1953 Date: Tue, 29 Jul 2014 16:51:57 -0700 Subject: [PATCH 41/62] Made some API changes for procedural animation --- examples/proceduralAnimation.js | 134 ++++++++++++++++++++++++++++++++ 1 file changed, 134 insertions(+) diff --git a/examples/proceduralAnimation.js b/examples/proceduralAnimation.js index e69de29bb2..7eb0873994 100644 --- a/examples/proceduralAnimation.js +++ b/examples/proceduralAnimation.js @@ -0,0 +1,134 @@ +// +// proceduralAnimation.js +// hifi +// +// Created by Ben Arnold on 7/29/14. +// Copyright (c) 2014 HighFidelity, Inc. All rights reserved. +// +// This is a Procedural Animation API for creating procedural animations in JS. +// To include it in your JS files, simply use the following line at the top: +// Script.include("proceduralAnimation.js"); + +// You can see a usage example in proceduralBot.js + +ProcAnimAPI = function() { + + // generateKeyFrames(rightAngles, leftAngles, middleAngles, numFrames) + // + // Parameters: + // rightAngles - An array of tuples. The angles in degrees for the joints + // on the right side of the body + // leftAngles - An array of tuples. The angles in degrees for the joints + // on the left side of the body + // middleAngles - An array of tuples. The angles in degrees for the joints + // on the left side of the body + // numFrames - The number of frames in the animation, before mirroring. + // for a 4 frame walk animation, simply supply 2 frames + // and generateKeyFrames will return 4 frames. + // + // Return Value: + // Returns an array of KeyFrames. Each KeyFrame has an array of quaternions + // for each of the joints, generated from the input angles. They will be ordered + // R,L,R,L,...M,M,M,M where R ~ rightAngles, L ~ leftAngles, M ~ middlesAngles. + // The size of the returned array will be numFrames * 2 + this.generateKeyframes = function(rightAngles, leftAngles, middleAngles, numFrames) { + + if (rightAngles.length != leftAngles.length) { + print("ERROR: generateKeyFrames(...) rightAngles and leftAngles must have equal length."); + } + + //for mirrored joints, such as the arms or legs + var rightQuats = []; + var leftQuats = []; + //for non mirrored joints such as the spine + var middleQuats = []; + + for (var i = 0; i < numFrames; i++) { + rightQuats[i] = []; + leftQuats[i] = []; + middleQuats[i] = []; + } + + var finalKeyFrames = []; + //Generate quaternions + for (var i = 0; i < rightAngles.length; i++) { + for (var j = 0; j < rightAngles[i].length; j++) { + rightQuats[i][j] = Quat.fromPitchYawRollDegrees(rightAngles[i][j][0], rightAngles[i][j][1], rightAngles[i][j][2]); + leftQuats[i][j] = Quat.fromPitchYawRollDegrees(leftAngles[i][j][0], -leftAngles[i][j][1], -leftAngles[i][j][2]); + } + } + for (var i = 0; i < middleAngles.length; i++) { + for (var j = 0; j < middleAngles[i].length; j++) { + middleQuats[i][j] = Quat.fromPitchYawRollDegrees(middleAngles[i][j][0], middleAngles[i][j][1], middleAngles[i][j][2]); + } + } + finalKeyFrames[0] = new KeyFrame(rightQuats[0], leftQuats[0], middleQuats[0]); + finalKeyFrames[1] = new KeyFrame(rightQuats[1], leftQuats[1], middleQuats[1]); + + //Generate mirrored quaternions for the other half of the animation + for (var i = 0; i < rightAngles.length; i++) { + for (var j = 0; j < rightAngles[i].length; j++) { + rightQuats[i][j] = Quat.fromPitchYawRollDegrees(rightAngles[i][j][0], -rightAngles[i][j][1], -rightAngles[i][j][2]); + leftQuats[i][j] = Quat.fromPitchYawRollDegrees(leftAngles[i][j][0], leftAngles[i][j][1], leftAngles[i][j][2]); + } + } + for (var i = 0; i < middleAngles.length; i++) { + for (var j = 0; j < middleAngles[i].length; j++) { + middleQuats[i][j] = Quat.fromPitchYawRollDegrees(-middleAngles[i][j][0], -middleAngles[i][j][1], -middleAngles[i][j][2]); + } + } + finalKeyFrames[2] = new KeyFrame(leftQuats[0], rightQuats[0], middleQuats[0]); + finalKeyFrames[3] = new KeyFrame(leftQuats[1], rightQuats[1], middleQuats[1]); + + //Hook up pointers to the next keyframe + for (var i = 0; i < finalKeyFrames.length - 1; i++) { + finalKeyFrames[i].nextFrame = finalKeyFrames[i+1]; + } + finalKeyFrames[finalKeyFrames.length-1].nextFrame = finalKeyFrames[0]; + + //Set up the bezier curve control points using technique described at + //https://www.cs.tcd.ie/publications/tech-reports/reports.94/TCD-CS-94-18.pdf + //Set up all C1 + for (var i = 0; i < finalKeyFrames.length; i++) { + finalKeyFrames[i].nextFrame.controlPoints = []; + for (var j = 0; j < finalKeyFrames[i].rotations.length; j++) { + finalKeyFrames[i].nextFrame.controlPoints[j] = []; + var R = Quat.slerp(finalKeyFrames[i].rotations[j], finalKeyFrames[i].nextFrame.rotations[j], 2.0); + var T = Quat.slerp(R, finalKeyFrames[i].nextFrame.nextFrame.rotations[j], 0.5); + finalKeyFrames[i].nextFrame.controlPoints[j][0] = Quat.slerp(finalKeyFrames[i].nextFrame.rotations[j], T, 0.33333); + } + } + //Set up all C2 + for (var i = 0; i < finalKeyFrames.length; i++) { + for (var j = 0; j < finalKeyFrames[i].rotations.length; j++) { + finalKeyFrames[i].controlPoints[j][1] = Quat.slerp(finalKeyFrames[i].nextFrame.rotations[j], finalKeyFrames[i].nextFrame.controlPoints[j][0], -1.0); + } + } + + return finalKeyFrames; + } + + // Animation KeyFrame constructor. rightJoints and leftJoints must be the same size + this.KeyFrame = function(rightJoints, leftJoints, middleJoints) { + this.rotations = []; + + for (var i = 0; i < rightJoints.length; i++) { + this.rotations[this.rotations.length] = rightJoints[i]; + this.rotations[this.rotations.length] = leftJoints[i]; + } + for (var i = 0; i < middleJoints.length; i++) { + this.rotations[this.rotations.length] = middleJoints[i]; + } + } + + // DeCasteljau evaluation to evaluate the bezier curve. + // This is a very natural looking interpolation + this.deCasteljau = function(k1, k2, c1, c2, f) { + var a = Quat.slerp(k1, c1, f); + var b = Quat.slerp(c1, c2, f); + var c = Quat.slerp(c2, k2, f); + var d = Quat.slerp(a, b, f); + var e = Quat.slerp(b, c, f); + return Quat.slerp(d, e, f); + } +} \ No newline at end of file From bcf9a3c20c92a8adc5b332d5723ca0263de79f18 Mon Sep 17 00:00:00 2001 From: wangyix Date: Tue, 29 Jul 2014 17:17:10 -0700 Subject: [PATCH 42/62] changed default max frames over desired in client to 20 --- interface/src/Menu.cpp | 2 +- interface/src/Menu.h | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/interface/src/Menu.cpp b/interface/src/Menu.cpp index 3fe2ac9e5d..e72c929a44 100644 --- a/interface/src/Menu.cpp +++ b/interface/src/Menu.cpp @@ -629,7 +629,7 @@ void Menu::loadSettings(QSettings* settings) { } _audioJitterBufferFrames = loadSetting(settings, "audioJitterBufferFrames", 0); - _maxFramesOverDesired = loadSetting(settings, "maxFramesOverDesired", 0); + _maxFramesOverDesired = loadSetting(settings, "maxFramesOverDesired", DEFAULT_MAX_FRAMES_OVER_DESIRED); _fieldOfView = loadSetting(settings, "fieldOfView", DEFAULT_FIELD_OF_VIEW_DEGREES); _realWorldFieldOfView = loadSetting(settings, "realWorldFieldOfView", DEFAULT_REAL_WORLD_FIELD_OF_VIEW_DEGREES); _faceshiftEyeDeflection = loadSetting(settings, "faceshiftEyeDeflection", DEFAULT_FACESHIFT_EYE_DEFLECTION); diff --git a/interface/src/Menu.h b/interface/src/Menu.h index 2bc750007a..66c4115927 100644 --- a/interface/src/Menu.h +++ b/interface/src/Menu.h @@ -47,6 +47,8 @@ const float ADJUST_LOD_MAX_SIZE_SCALE = DEFAULT_OCTREE_SIZE_SCALE; const float MINIMUM_AVATAR_LOD_DISTANCE_MULTIPLIER = 0.1f; const float MAXIMUM_AVATAR_LOD_DISTANCE_MULTIPLIER = 15.0f; +const int DEFAULT_MAX_FRAMES_OVER_DESIRED = 20; + enum FrustumDrawMode { FRUSTUM_DRAW_MODE_ALL, FRUSTUM_DRAW_MODE_VECTORS, From ecfd5c275710f436fe6523544e05c7b3e376081c Mon Sep 17 00:00:00 2001 From: wangyix Date: Tue, 29 Jul 2014 17:37:59 -0700 Subject: [PATCH 43/62] default max frames over desired changed to 10 --- assignment-client/src/audio/AudioMixer.cpp | 8 ++++++-- domain-server/resources/web/settings/describe.json | 2 +- interface/src/Menu.h | 2 -- libraries/audio/src/InboundAudioStream.h | 2 ++ 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/assignment-client/src/audio/AudioMixer.cpp b/assignment-client/src/audio/AudioMixer.cpp index 5f52a175cc..d81b8336f0 100644 --- a/assignment-client/src/audio/AudioMixer.cpp +++ b/assignment-client/src/audio/AudioMixer.cpp @@ -68,7 +68,7 @@ void attachNewNodeDataToNode(Node *newNode) { } bool AudioMixer::_useDynamicJitterBuffers = false; -int AudioMixer::_maxFramesOverDesired = 20; +int AudioMixer::_maxFramesOverDesired = 0; AudioMixer::AudioMixer(const QByteArray& packet) : ThreadedAssignment(packet), @@ -472,7 +472,11 @@ void AudioMixer::run() { } const QString MAX_FRAMES_OVER_DESIRED_JSON_KEY = "max-frames-over-desired"; - _maxFramesOverDesired = audioGroupObject[MAX_FRAMES_OVER_DESIRED_JSON_KEY].toString().toInt(); + bool ok; + _maxFramesOverDesired = audioGroupObject[MAX_FRAMES_OVER_DESIRED_JSON_KEY].toString().toInt(&ok); + if (!ok) { + _maxFramesOverDesired = DEFAULT_MAX_FRAMES_OVER_DESIRED; + } qDebug() << "Max frames over desired:" << _maxFramesOverDesired; } diff --git a/domain-server/resources/web/settings/describe.json b/domain-server/resources/web/settings/describe.json index 7cba68e795..3eff806c1a 100644 --- a/domain-server/resources/web/settings/describe.json +++ b/domain-server/resources/web/settings/describe.json @@ -12,7 +12,7 @@ "max-frames-over-desired": { "label": "Max Frames Over Desired", "help": "The highest number of frames an AudioMixer's ringbuffer can exceed the desired jitter buffer frames by", - "placeholder": "20", + "placeholder": "10", "default": "" }, "dynamic-jitter-buffer": { diff --git a/interface/src/Menu.h b/interface/src/Menu.h index 66c4115927..2bc750007a 100644 --- a/interface/src/Menu.h +++ b/interface/src/Menu.h @@ -47,8 +47,6 @@ const float ADJUST_LOD_MAX_SIZE_SCALE = DEFAULT_OCTREE_SIZE_SCALE; const float MINIMUM_AVATAR_LOD_DISTANCE_MULTIPLIER = 0.1f; const float MAXIMUM_AVATAR_LOD_DISTANCE_MULTIPLIER = 15.0f; -const int DEFAULT_MAX_FRAMES_OVER_DESIRED = 20; - enum FrustumDrawMode { FRUSTUM_DRAW_MODE_ALL, FRUSTUM_DRAW_MODE_VECTORS, diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index 4b1db354c1..a63e09320a 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -45,6 +45,8 @@ const int INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS = 30; const int INBOUND_RING_BUFFER_FRAME_CAPACITY = 100; +const int DEFAULT_MAX_FRAMES_OVER_DESIRED = 10; + class InboundAudioStream : public NodeData { Q_OBJECT From 1c6834cdd6c93b19d8f676c26d8e3e9a13cbc624 Mon Sep 17 00:00:00 2001 From: barnold1953 Date: Tue, 29 Jul 2014 17:49:33 -0700 Subject: [PATCH 44/62] Improved the API and cleaned up the code --- ...Animation.js => proceduralAnimationAPI.js} | 16 +- examples/proceduralBot.js | 198 ++++-------------- 2 files changed, 44 insertions(+), 170 deletions(-) rename examples/{proceduralAnimation.js => proceduralAnimationAPI.js} (91%) diff --git a/examples/proceduralAnimation.js b/examples/proceduralAnimationAPI.js similarity index 91% rename from examples/proceduralAnimation.js rename to examples/proceduralAnimationAPI.js index 7eb0873994..ae6b5c3d31 100644 --- a/examples/proceduralAnimation.js +++ b/examples/proceduralAnimationAPI.js @@ -10,6 +10,8 @@ // Script.include("proceduralAnimation.js"); // You can see a usage example in proceduralBot.js +// The current implementation is quite simple. If you would like a feature +// to be added or expanded, you can contact Ben at brb555@vols.utk.edu ProcAnimAPI = function() { @@ -62,8 +64,8 @@ ProcAnimAPI = function() { middleQuats[i][j] = Quat.fromPitchYawRollDegrees(middleAngles[i][j][0], middleAngles[i][j][1], middleAngles[i][j][2]); } } - finalKeyFrames[0] = new KeyFrame(rightQuats[0], leftQuats[0], middleQuats[0]); - finalKeyFrames[1] = new KeyFrame(rightQuats[1], leftQuats[1], middleQuats[1]); + finalKeyFrames[0] = new this.KeyFrame(rightQuats[0], leftQuats[0], middleQuats[0]); + finalKeyFrames[1] = new this.KeyFrame(rightQuats[1], leftQuats[1], middleQuats[1]); //Generate mirrored quaternions for the other half of the animation for (var i = 0; i < rightAngles.length; i++) { @@ -77,8 +79,8 @@ ProcAnimAPI = function() { middleQuats[i][j] = Quat.fromPitchYawRollDegrees(-middleAngles[i][j][0], -middleAngles[i][j][1], -middleAngles[i][j][2]); } } - finalKeyFrames[2] = new KeyFrame(leftQuats[0], rightQuats[0], middleQuats[0]); - finalKeyFrames[3] = new KeyFrame(leftQuats[1], rightQuats[1], middleQuats[1]); + finalKeyFrames[2] = new this.KeyFrame(leftQuats[0], rightQuats[0], middleQuats[0]); + finalKeyFrames[3] = new this.KeyFrame(leftQuats[1], rightQuats[1], middleQuats[1]); //Hook up pointers to the next keyframe for (var i = 0; i < finalKeyFrames.length - 1; i++) { @@ -106,7 +108,7 @@ ProcAnimAPI = function() { } return finalKeyFrames; - } + }; // Animation KeyFrame constructor. rightJoints and leftJoints must be the same size this.KeyFrame = function(rightJoints, leftJoints, middleJoints) { @@ -119,7 +121,7 @@ ProcAnimAPI = function() { for (var i = 0; i < middleJoints.length; i++) { this.rotations[this.rotations.length] = middleJoints[i]; } - } + }; // DeCasteljau evaluation to evaluate the bezier curve. // This is a very natural looking interpolation @@ -130,5 +132,5 @@ ProcAnimAPI = function() { var d = Quat.slerp(a, b, f); var e = Quat.slerp(b, c, f); return Quat.slerp(d, e, f); - } + }; } \ No newline at end of file diff --git a/examples/proceduralBot.js b/examples/proceduralBot.js index ce88f596b6..05838da095 100644 --- a/examples/proceduralBot.js +++ b/examples/proceduralBot.js @@ -11,32 +11,30 @@ // //For procedural walk animation -Script.include("proceduralAnimation.js"); +Script.include("http://s3-us-west-1.amazonaws.com/highfidelity-public/scripts/proceduralAnimationAPI.js"); + +var procAnimAPI = new ProcAnimAPI(); function getRandomFloat(min, max) { -return Math.random() * (max - min) + min; + return Math.random() * (max - min) + min; } function getRandomInt (min, max) { -return Math.floor(Math.random() * (max - min + 1)) + min; + return Math.floor(Math.random() * (max - min + 1)) + min; } function printVector(string, vector) { -print(string + " " + vector.x + ", " + vector.y + ", " + vector.z); + print(string + " " + vector.x + ", " + vector.y + ", " + vector.z); } var CHANCE_OF_MOVING = 0.1; var CHANCE_OF_SOUND = 0; var CHANCE_OF_HEAD_TURNING = 0.05; var CHANCE_OF_BIG_MOVE = 0.1; -var CHANCE_OF_WAVING = 0.009; var isMoving = false; var isTurningHead = false; var isPlayingAudio = false; -var isWaving = false; -var waveFrequency = 0.0; -var waveAmplitude = 0.0; var X_MIN = 0.50; var X_MAX = 15.60; @@ -141,32 +139,13 @@ function playRandomSound() { } } -//Animation KeyFrame constructor. rightJoints and leftJoints must be the same size -function WalkKeyFrame(rightJoints, leftJoints, singleJoints) { - this.rotations = []; - - for (var i = 0; i < rightJoints.length; i++) { - this.rotations[this.rotations.length] = rightJoints[i]; - this.rotations[this.rotations.length] = leftJoints[i]; - } - for (var i = 0; i < singleJoints.length; i++) { - this.rotations[this.rotations.length] = singleJoints[i]; - } -} - //Procedural walk animation using two keyframes //We use a separate array for front and back joints -var frontKeyFrames = []; -var backKeyFrames = []; -//for non mirrored joints such as the spine -var singleKeyFrames = []; //Pitch, yaw, and roll for the joints -var frontAngles = []; -var backAngles = []; +var rightAngles = []; +var leftAngles = []; //for non mirrored joints such as the spine -var singleAngles = []; - - +var middleAngles = []; //Actual joint mappings var SHOULDER_JOINT_NUMBER = 15; @@ -185,12 +164,9 @@ var JOINT_SPINE = 11; var NUM_FRAMES = 2; for (var i = 0; i < NUM_FRAMES; i++) { - frontAngles[i] = []; - backAngles[i] = []; - singleAngles[i] = []; - frontKeyFrames[i] = []; - backKeyFrames[i] = []; - singleKeyFrames[i] = []; + rightAngles[i] = []; + leftAngles[i] = []; + middleAngles[i] = []; } //Joint order for actual joint mappings, should be interleaved R,L,R,L,...S,S,S for R = right, L = left, S = single var JOINT_ORDER = [JOINT_R_HIP, JOINT_L_HIP, JOINT_R_KNEE, JOINT_L_KNEE, JOINT_R_ARM, JOINT_L_ARM, JOINT_R_FOREARM, JOINT_L_FOREARM, JOINT_SPINE]; @@ -206,101 +182,37 @@ var SPINE = 0; //We have to store the angles so we can invert yaw and roll when making the animation //symmetrical - //Front refers to leg, not arm. //Legs Extending -frontAngles[0][HIP] = [30.0, 0.0, 8.0]; -frontAngles[0][KNEE] = [-15.0, 0.0, 0.0]; -frontAngles[0][ARM] = [85.0, -25.0, 0.0]; -frontAngles[0][FOREARM] = [0.0, 0.0, -15.0]; +rightAngles[0][HIP] = [30.0, 0.0, 8.0]; +rightAngles[0][KNEE] = [-15.0, 0.0, 0.0]; +rightAngles[0][ARM] = [85.0, -25.0, 0.0]; +rightAngles[0][FOREARM] = [0.0, 0.0, -15.0]; -backAngles[0][HIP] = [-15, 0.0, 8.0]; -backAngles[0][KNEE] = [-28, 0.0, 0.0]; -backAngles[0][ARM] = [85.0, 20.0, 0.0]; -backAngles[0][FOREARM] = [10.0, 0.0, -25.0]; +leftAngles[0][HIP] = [-15, 0.0, 8.0]; +leftAngles[0][KNEE] = [-28, 0.0, 0.0]; +leftAngles[0][ARM] = [85.0, 20.0, 0.0]; +leftAngles[0][FOREARM] = [10.0, 0.0, -25.0]; -singleAngles[0][SPINE] = [0.0, -15.0, 5.0]; +middleAngles[0][SPINE] = [0.0, -15.0, 5.0]; //Legs Passing -frontAngles[1][HIP] = [6.0, 0.0, 8.0]; -frontAngles[1][KNEE] = [-12.0, 0.0, 0.0]; -frontAngles[1][ARM] = [85.0, 0.0, 0.0]; -frontAngles[1][FOREARM] = [0.0, 0.0, -15.0]; +rightAngles[1][HIP] = [6.0, 0.0, 8.0]; +rightAngles[1][KNEE] = [-12.0, 0.0, 0.0]; +rightAngles[1][ARM] = [85.0, 0.0, 0.0]; +rightAngles[1][FOREARM] = [0.0, 0.0, -15.0]; -backAngles[1][HIP] = [10.0, 0.0, 8.0]; -backAngles[1][KNEE] = [-55.0, 0.0, 0.0]; -backAngles[1][ARM] = [85.0, 0.0, 0.0]; -backAngles[1][FOREARM] = [0.0, 0.0, -15.0]; +leftAngles[1][HIP] = [10.0, 0.0, 8.0]; +leftAngles[1][KNEE] = [-55.0, 0.0, 0.0]; +leftAngles[1][ARM] = [85.0, 0.0, 0.0]; +leftAngles[1][FOREARM] = [0.0, 0.0, -15.0]; -singleAngles[1][SPINE] = [0.0, 0.0, 0.0]; +middleAngles[1][SPINE] = [0.0, 0.0, 0.0]; // ******************************* Animation Is Defined Above ************************************* //Actual keyframes for the animation -var walkKeyFrames = []; -//Generate quaternions from the angles -for (var i = 0; i < frontAngles.length; i++) { - for (var j = 0; j < frontAngles[i].length; j++) { - frontKeyFrames[i][j] = Quat.fromPitchYawRollDegrees(frontAngles[i][j][0], frontAngles[i][j][1], frontAngles[i][j][2]); - backKeyFrames[i][j] = Quat.fromPitchYawRollDegrees(backAngles[i][j][0], -backAngles[i][j][1], -backAngles[i][j][2]); - } -} -for (var i = 0; i < singleAngles.length; i++) { - for (var j = 0; j < singleAngles[i].length; j++) { - singleKeyFrames[i][j] = Quat.fromPitchYawRollDegrees(singleAngles[i][j][0], singleAngles[i][j][1], singleAngles[i][j][2]); - } -} -walkKeyFrames[0] = new WalkKeyFrame(frontKeyFrames[0], backKeyFrames[0], singleKeyFrames[0]); -walkKeyFrames[1] = new WalkKeyFrame(frontKeyFrames[1], backKeyFrames[1], singleKeyFrames[1]); - -//Generate mirrored quaternions for the other half of the body -for (var i = 0; i < frontAngles.length; i++) { - for (var j = 0; j < frontAngles[i].length; j++) { - frontKeyFrames[i][j] = Quat.fromPitchYawRollDegrees(frontAngles[i][j][0], -frontAngles[i][j][1], -frontAngles[i][j][2]); - backKeyFrames[i][j] = Quat.fromPitchYawRollDegrees(backAngles[i][j][0], backAngles[i][j][1], backAngles[i][j][2]); - } -} -for (var i = 0; i < singleAngles.length; i++) { - for (var j = 0; j < singleAngles[i].length; j++) { - singleKeyFrames[i][j] = Quat.fromPitchYawRollDegrees(-singleAngles[i][j][0], -singleAngles[i][j][1], -singleAngles[i][j][2]); - } -} -walkKeyFrames[2] = new WalkKeyFrame(backKeyFrames[0], frontKeyFrames[0], singleKeyFrames[0]); -walkKeyFrames[3] = new WalkKeyFrame(backKeyFrames[1], frontKeyFrames[1], singleKeyFrames[1]); - -//Hook up pointers to the next keyframe -for (var i = 0; i < walkKeyFrames.length - 1; i++) { - walkKeyFrames[i].nextFrame = walkKeyFrames[i+1]; -} -walkKeyFrames[walkKeyFrames.length-1].nextFrame = walkKeyFrames[0]; - -//Set up the bezier curve control points using technique described at -//https://www.cs.tcd.ie/publications/tech-reports/reports.94/TCD-CS-94-18.pdf -//Set up all C1 -for (var i = 0; i < walkKeyFrames.length; i++) { - walkKeyFrames[i].nextFrame.controlPoints = []; - for (var j = 0; j < walkKeyFrames[i].rotations.length; j++) { - walkKeyFrames[i].nextFrame.controlPoints[j] = []; - var R = Quat.slerp(walkKeyFrames[i].rotations[j], walkKeyFrames[i].nextFrame.rotations[j], 2.0); - var T = Quat.slerp(R, walkKeyFrames[i].nextFrame.nextFrame.rotations[j], 0.5); - walkKeyFrames[i].nextFrame.controlPoints[j][0] = Quat.slerp(walkKeyFrames[i].nextFrame.rotations[j], T, 0.33333); - } -} -//Set up all C2 -for (var i = 0; i < walkKeyFrames.length; i++) { - for (var j = 0; j < walkKeyFrames[i].rotations.length; j++) { - walkKeyFrames[i].controlPoints[j][1] = Quat.slerp(walkKeyFrames[i].nextFrame.rotations[j], walkKeyFrames[i].nextFrame.controlPoints[j][0], -1.0); - } -} -//DeCasteljau evaluation to evaluate the bezier curve -function deCasteljau(k1, k2, c1, c2, f) { - var a = Quat.slerp(k1, c1, f); - var b = Quat.slerp(c1, c2, f); - var c = Quat.slerp(c2, k2, f); - var d = Quat.slerp(a, b, f); - var e = Quat.slerp(b, c, f); - return Quat.slerp(d, e, f); -} +var walkKeyFrames = procAnimAPI.generateKeyframes(rightAngles, leftAngles, middleAngles, NUM_FRAMES); var currentFrame = 0; @@ -314,7 +226,7 @@ var avatarVelocity = 0.0; var avatarMaxVelocity = 1.4; function keepWalking(deltaTime) { - + walkTime += avatarVelocity * deltaTime; if (walkTime > walkWheelRate) { walkTime = 0.0; @@ -329,17 +241,10 @@ function keepWalking(deltaTime) { var interp = walkTime / walkWheelRate; for (var i = 0; i < JOINT_ORDER.length; i++) { - Avatar.setJointData(JOINT_ORDER[i], deCasteljau(frame.rotations[i], frame.nextFrame.rotations[i], frame.controlPoints[i][0], frame.controlPoints[i][1], interp)); + Avatar.setJointData(JOINT_ORDER[i], procAnimAPI.deCasteljau(frame.rotations[i], frame.nextFrame.rotations[i], frame.controlPoints[i][0], frame.controlPoints[i][1], interp)); } } -var trailingAverageLoudness = 0; -var MAX_SAMPLE = 32767; -var DB_METER_BASE = Math.log(MAX_SAMPLE); - -var RAND_RATIO_LAST = getRandomFloat(0.1, 0.3); -var RAND_TRAILING = 1 - RAND_RATIO_LAST; - function jumpWithLoudness(deltaTime) { // potentially change pelvis height depending on trailing average loudness @@ -362,18 +267,9 @@ function jumpWithLoudness(deltaTime) { Avatar.position = pelvisPosition; } -var jointMapping = null; -var frameIndex = 0.0; -var isPlayingDanceAnimation = false; -var randomAnimation = null; -var animationLoops = 1; var forcedMove = false; -var FRAME_RATE = 30.0; - var wasMovingLastFrame = false; -var wasDancing = false; - function handleHeadTurn() { if (!isTurningHead && (Math.random() < CHANCE_OF_HEAD_TURNING)) { @@ -387,16 +283,6 @@ function handleHeadTurn() { } } -var currentShoulderQuat = Avatar.getJointRotation(SHOULDER_JOINT_NUMBER); -var targetShoulderQuat = currentShoulderQuat; -var idleShoulderQuat = currentShoulderQuat; -var currentSpineQuat = Avatar.getJointRotation(JOINT_SPINE); -var targetSpineQuat = currentSpineQuat; -var idleSpineQuat = currentSpineQuat; -var currentElbowQuat = Avatar.getJointRotation(ELBOW_JOINT_NUMBER); -var targetElbowQuat = currentElbowQuat; -var idleElbowQuat = currentElbowQuat; - function stopWalking() { Avatar.clearJointData(JOINT_R_HIP); Avatar.clearJointData(JOINT_R_KNEE); @@ -421,7 +307,7 @@ function handleWalking(deltaTime) { targetPosition = Vec3.sum(Avatar.position, Vec3.multiply(front, getRandomFloat(0.0, MOVE_RANGE_SMALL))); } while ((targetPosition.x < X_MIN || targetPosition.x > X_MAX || targetPosition.z < Z_MIN || targetPosition.z > Z_MAX) - && attempts < MAX_ATEMPTS); + && attempts < MAX_ATTEMPTS); targetPosition.x = clamp(targetPosition.x, X_MIN, X_MAX); targetPosition.z = clamp(targetPosition.z, Z_MIN, Z_MAX); @@ -473,31 +359,17 @@ function changePelvisHeight(newHeight) { Avatar.position = newPosition; } -function possiblyStopDancing() { - if (wasDancing) { - for (var j = 0; j < Avatar.jointNames.length; j++) { - Avatar.clearJointData(j); - } - - changePelvisHeight(Y_PELVIS); - } -} - function updateBehavior(deltaTime) { cumulativeTime += deltaTime; if (AvatarList.containsAvatarWithDisplayName("mrdj")) { - if (wasMovingLastFrame && !wasDancing) { + if (wasMovingLastFrame) { isMoving = false; } // we have a DJ, shouldn't we be dancing? jumpWithLoudness(deltaTime); } else { - // make sure we're not dancing anymore - possiblyStopDancing(); - - wasDancing = false; // no DJ, let's just chill on the dancefloor - randomly walking and talking handleHeadTurn(); From 656131b7eec947e8646a7e29bb9e2d23f24c53e2 Mon Sep 17 00:00:00 2001 From: wangyix Date: Wed, 30 Jul 2014 10:24:24 -0700 Subject: [PATCH 45/62] made frames available avg window 2s; added missing _silentFramesDropped update --- libraries/audio/src/InboundAudioStream.cpp | 4 +++- libraries/audio/src/InboundAudioStream.h | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index cbf9e5a0bb..fd20f24588 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -256,6 +256,8 @@ int InboundAudioStream::writeDroppableSilentSamples(int numSilentSamples) { // dont reset _currentJitterBufferFrames here; we want to be able to drop further silent frames // without waiting for _framesAvailableStat to fill up to 10s of samples. _currentJitterBufferFrames -= numSilentFramesToDrop; + _silentFramesDropped += numSilentFramesToDrop; + _framesAvailableStat.reset(); } @@ -277,7 +279,7 @@ AudioStreamStats InboundAudioStream::getAudioStreamStats() const { streamStats._timeGapWindowAverage = _interframeTimeGapStatsForStatsPacket.getWindowAverage(); streamStats._framesAvailable = _ringBuffer.framesAvailable(); - streamStats._framesAvailableAverage = _currentJitterBufferFrames; + streamStats._framesAvailableAverage = _framesAvailableStat.getAverage(); streamStats._desiredJitterBufferFrames = _desiredJitterBufferFrames; streamStats._starveCount = _starveCount; streamStats._consecutiveNotMixedCount = _consecutiveNotMixedCount; diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index a63e09320a..268b9b5e76 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -37,7 +37,9 @@ const int TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS = 10; const int TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS; const int TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS = 30; -const int FRAMES_AVAILABLE_STAT_WINDOW_USECS = 10 * USECS_PER_SECOND; +// this controls the window size of the time-weighted avg of frames available. Every time the window fills up, +// _currentJitterBufferFrames is updated with the time-weighted avg and the running time-weighted avg is reset. +const int FRAMES_AVAILABLE_STAT_WINDOW_USECS = 2 * USECS_PER_SECOND; // the internal history buffer of the incoming seq stats will cover 30s to calculate // packet loss % over last 30s From 5b0662f1d068e0b9c966c48d851d458595aee1e9 Mon Sep 17 00:00:00 2001 From: wangyix Date: Wed, 30 Jul 2014 10:51:50 -0700 Subject: [PATCH 46/62] fixed setToStarved() behavior of InboundAudioStream --- libraries/audio/src/InboundAudioStream.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index fd20f24588..39cc8757f4 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -166,8 +166,9 @@ void InboundAudioStream::framesAvailableChanged() { } void InboundAudioStream::setToStarved() { - if (!_isStarved && _ringBuffer.framesAvailable() < _desiredJitterBufferFrames) { - starved(); + starved(); + if (_ringBuffer.framesAvailable() >= _desiredJitterBufferFrames) { + _isStarved = false; } } From 30c17067a1ff9d6c2676edac254c562588a01c5a Mon Sep 17 00:00:00 2001 From: wangyix Date: Wed, 30 Jul 2014 11:02:10 -0700 Subject: [PATCH 47/62] moved pushAudioOutput to handleAudioInput --- interface/src/Audio.cpp | 6 ++++-- libraries/audio/src/InboundAudioStream.cpp | 5 +++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 2a801eb009..32d35ff797 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -711,6 +711,8 @@ void Audio::handleAudioInput() { } delete[] inputAudioSamples; } + + pushAudioToOutput(); } void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) { @@ -894,7 +896,7 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) { // parse audio data _receivedAudioStream.parseData(audioByteArray); - pushAudioToOutput(); + //pushAudioToOutput(); } void Audio::pushAudioToOutput() { @@ -1660,7 +1662,7 @@ bool Audio::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo) // setup our general output device for audio-mixer audio _audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this); - _audioOutput->setBufferSize(_receivedAudioStream.getFrameCapacity() * _outputFormat.bytesForDuration(BUFFER_SEND_INTERVAL_USECS)); + _audioOutput->setBufferSize(/*_receivedAudioStream.getFrameCapacity()*/ 10 * _outputFormat.bytesForDuration(BUFFER_SEND_INTERVAL_USECS)); qDebug() << "Ring Buffer capacity in frames: " << _receivedAudioStream.getFrameCapacity(); _outputDevice = _audioOutput->start(); diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index fd20f24588..39cc8757f4 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -166,8 +166,9 @@ void InboundAudioStream::framesAvailableChanged() { } void InboundAudioStream::setToStarved() { - if (!_isStarved && _ringBuffer.framesAvailable() < _desiredJitterBufferFrames) { - starved(); + starved(); + if (_ringBuffer.framesAvailable() >= _desiredJitterBufferFrames) { + _isStarved = false; } } From 2b77a72aba3a2cf761e9eb09b86c194f170dc6d2 Mon Sep 17 00:00:00 2001 From: wangyix Date: Wed, 30 Jul 2014 11:13:15 -0700 Subject: [PATCH 48/62] changed Mac downstream ringbuffer to 100 frames --- interface/src/Audio.cpp | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 32d35ff797..e47582efc7 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -75,11 +75,7 @@ Audio::Audio(QObject* parent) : // slower than real time (or at least the desired sample rate). If you increase the size of the ring buffer, then it // this delay will slowly add up and the longer someone runs, they more delayed their audio will be. _inputRingBuffer(0), -#ifdef _WIN32 - _receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 100, true, 20, true), -#else - _receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 10, true, 20, true), // DO NOT CHANGE THIS UNLESS YOU SOLVE THE AUDIO DEVICE DRIFT PROBLEM!!! -#endif + _receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 100, true, 0, true), _isStereoInput(false), _averagedLatency(0.0), _lastInputLoudness(0), @@ -896,6 +892,12 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) { // parse audio data _receivedAudioStream.parseData(audioByteArray); + + // This call has been moved to handleAudioInput. handleAudioInput is called at a much more regular interval + // than processReceivedAudio since handleAudioInput does not experience network-related jitter. + // This way, we reduce the jitter of the frames being pushed to the audio output, allowing us to use a reduced + // buffer size for it, which reduces latency. + //pushAudioToOutput(); } @@ -1659,11 +1661,13 @@ bool Audio::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo) if (adjustedFormatForAudioDevice(outputDeviceInfo, _desiredOutputFormat, _outputFormat)) { qDebug() << "The format to be used for audio output is" << _outputFormat; - + + const int AUDIO_OUTPUT_BUFFER_SIZE_FRAMES = 10; + // setup our general output device for audio-mixer audio _audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this); - _audioOutput->setBufferSize(/*_receivedAudioStream.getFrameCapacity()*/ 10 * _outputFormat.bytesForDuration(BUFFER_SEND_INTERVAL_USECS)); - qDebug() << "Ring Buffer capacity in frames: " << _receivedAudioStream.getFrameCapacity(); + _audioOutput->setBufferSize(AUDIO_OUTPUT_BUFFER_SIZE_FRAMES * _outputFormat.bytesForDuration(BUFFER_SEND_INTERVAL_USECS)); + qDebug() << "Ring Buffer capacity in frames: " << AUDIO_OUTPUT_BUFFER_SIZE_FRAMES; _outputDevice = _audioOutput->start(); // setup a loopback audio output device From af5eedb50e7b116796c4feb069cae946836defe2 Mon Sep 17 00:00:00 2001 From: wangyix Date: Wed, 30 Jul 2014 11:39:29 -0700 Subject: [PATCH 49/62] fixed sequenceNumberStats sender changed msgs --- libraries/networking/src/SequenceNumberStats.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/libraries/networking/src/SequenceNumberStats.cpp b/libraries/networking/src/SequenceNumberStats.cpp index 421d8ddc9a..66d57500a5 100644 --- a/libraries/networking/src/SequenceNumberStats.cpp +++ b/libraries/networking/src/SequenceNumberStats.cpp @@ -36,9 +36,11 @@ SequenceNumberStats::ArrivalInfo SequenceNumberStats::sequenceNumberReceived(qui // if the sender node has changed, reset all stats if (senderUUID != _lastSenderUUID) { - qDebug() << "sequence number stats was reset due to new sender node"; - qDebug() << "previous:" << _lastSenderUUID << "current:" << senderUUID; - reset(); + if (_stats._numReceived > 0) { + qDebug() << "sequence number stats was reset due to new sender node"; + qDebug() << "previous:" << _lastSenderUUID << "current:" << senderUUID; + reset(); + } _lastSenderUUID = senderUUID; } From 81e5419287a9772f8080c64be8dd7c48f50e341e Mon Sep 17 00:00:00 2001 From: wangyix Date: Wed, 30 Jul 2014 11:43:33 -0700 Subject: [PATCH 50/62] added CTRL+A shortcut to display audio stats --- interface/src/Menu.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/interface/src/Menu.cpp b/interface/src/Menu.cpp index e72c929a44..6af6cb0d38 100644 --- a/interface/src/Menu.cpp +++ b/interface/src/Menu.cpp @@ -595,7 +595,7 @@ Menu::Menu() : false); addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioStats, - 0, + Qt::CTRL | Qt::Key_A, false, appInstance->getAudio(), SLOT(toggleStats())); From efd4b7ad676ccae66bed8a61470aaaf0a5c3c2ee Mon Sep 17 00:00:00 2001 From: wangyix Date: Wed, 30 Jul 2014 12:04:03 -0700 Subject: [PATCH 51/62] switched upstream/downstream stats; added menu item to disable injected stats --- interface/src/Audio.cpp | 45 ++++++++++++++++++++++++----------------- interface/src/Audio.h | 2 ++ interface/src/Menu.cpp | 6 ++++++ interface/src/Menu.h | 1 + 4 files changed, 35 insertions(+), 19 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index e47582efc7..b4d34c2716 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -108,6 +108,7 @@ Audio::Audio(QObject* parent) : _scopeOutputLeft(0), _scopeOutputRight(0), _statsEnabled(false), + _statsShowInjectedStreams(false), _outgoingAvatarAudioSequenceNumber(0), _audioInputMsecsReadStats(MSECS_PER_SECOND / (float)AUDIO_CALLBACK_MSECS * CALLBACK_ACCELERATOR_RATIO, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS), _inputRingBufferMsecsAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS), @@ -1220,6 +1221,10 @@ void Audio::toggleStats() { _statsEnabled = !_statsEnabled; } +void Audio::toggleStatsShowInjectedStreams() { + _statsShowInjectedStreams = !_statsShowInjectedStreams; +} + void Audio::selectAudioScopeFiveFrames() { if (Menu::getInstance()->isOptionChecked(MenuOption::AudioScopeFiveFrames)) { reallocateScope(5); @@ -1304,10 +1309,10 @@ void Audio::renderStats(const float* color, int width, int height) { return; } - const int LINES_WHEN_CENTERED = 30; - const int CENTERED_BACKGROUND_HEIGHT = STATS_HEIGHT_PER_LINE * LINES_WHEN_CENTERED; + const int linesWhenCentered = _statsShowInjectedStreams ? 30 : 23; + const int CENTERED_BACKGROUND_HEIGHT = STATS_HEIGHT_PER_LINE * linesWhenCentered; - int lines = _audioMixerInjectedStreamAudioStatsMap.size() * 7 + 23; + int lines = _statsShowInjectedStreams ? _audioMixerInjectedStreamAudioStatsMap.size() * 7 + 23 : 23; int statsHeight = STATS_HEIGHT_PER_LINE * lines; @@ -1381,6 +1386,14 @@ void Audio::renderStats(const float* color, int width, int height) { verticalOffset += STATS_HEIGHT_PER_LINE; // blank line + char upstreamMicLabelString[] = "Upstream mic audio stats:"; + verticalOffset += STATS_HEIGHT_PER_LINE; + drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamMicLabelString, color); + + renderAudioStreamStats(_audioMixerAvatarStreamAudioStats, horizontalOffset, verticalOffset, scale, rotation, font, color); + + + verticalOffset += STATS_HEIGHT_PER_LINE; // blank line char downstreamLabelString[] = "Downstream mixed audio stats:"; verticalOffset += STATS_HEIGHT_PER_LINE; @@ -1389,26 +1402,20 @@ void Audio::renderStats(const float* color, int width, int height) { renderAudioStreamStats(downstreamAudioStreamStats, horizontalOffset, verticalOffset, scale, rotation, font, color, true); - verticalOffset += STATS_HEIGHT_PER_LINE; // blank line + if (_statsShowInjectedStreams) { - char upstreamMicLabelString[] = "Upstream mic audio stats:"; - verticalOffset += STATS_HEIGHT_PER_LINE; - drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamMicLabelString, color); + foreach(const AudioStreamStats& injectedStreamAudioStats, _audioMixerInjectedStreamAudioStatsMap) { - renderAudioStreamStats(_audioMixerAvatarStreamAudioStats, horizontalOffset, verticalOffset, scale, rotation, font, color); + verticalOffset += STATS_HEIGHT_PER_LINE; // blank line + char upstreamInjectedLabelString[512]; + sprintf(upstreamInjectedLabelString, "Upstream injected audio stats: stream ID: %s", + injectedStreamAudioStats._streamIdentifier.toString().toLatin1().data()); + verticalOffset += STATS_HEIGHT_PER_LINE; + drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamInjectedLabelString, color); - foreach(const AudioStreamStats& injectedStreamAudioStats, _audioMixerInjectedStreamAudioStatsMap) { - - verticalOffset += STATS_HEIGHT_PER_LINE; // blank line - - char upstreamInjectedLabelString[512]; - sprintf(upstreamInjectedLabelString, "Upstream injected audio stats: stream ID: %s", - injectedStreamAudioStats._streamIdentifier.toString().toLatin1().data()); - verticalOffset += STATS_HEIGHT_PER_LINE; - drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamInjectedLabelString, color); - - renderAudioStreamStats(injectedStreamAudioStats, horizontalOffset, verticalOffset, scale, rotation, font, color); + renderAudioStreamStats(injectedStreamAudioStats, horizontalOffset, verticalOffset, scale, rotation, font, color); + } } } diff --git a/interface/src/Audio.h b/interface/src/Audio.h index cb042ef25e..99ca3a6b8b 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -104,6 +104,7 @@ public slots: void toggleScope(); void toggleScopePause(); void toggleStats(); + void toggleStatsShowInjectedStreams(); void toggleAudioSpatialProcessing(); void toggleStereoInput(); void selectAudioScopeFiveFrames(); @@ -268,6 +269,7 @@ private: #endif static const unsigned int STATS_HEIGHT_PER_LINE = 20; bool _statsEnabled; + bool _statsShowInjectedStreams; AudioStreamStats _audioMixerAvatarStreamAudioStats; QHash _audioMixerInjectedStreamAudioStatsMap; diff --git a/interface/src/Menu.cpp b/interface/src/Menu.cpp index 6af6cb0d38..3279aff249 100644 --- a/interface/src/Menu.cpp +++ b/interface/src/Menu.cpp @@ -600,6 +600,12 @@ Menu::Menu() : appInstance->getAudio(), SLOT(toggleStats())); + addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioStatsShowInjectedStreams, + 0, + false, + appInstance->getAudio(), + SLOT(toggleStatsShowInjectedStreams())); + addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::DisableQAudioOutputOverflowCheck, 0, true); addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel, diff --git a/interface/src/Menu.h b/interface/src/Menu.h index 2bc750007a..cba9703e32 100644 --- a/interface/src/Menu.h +++ b/interface/src/Menu.h @@ -319,6 +319,7 @@ namespace MenuOption { const QString AudioScopePause = "Pause Audio Scope"; const QString AudioScopeTwentyFrames = "Twenty"; const QString AudioStats = "Audio Stats"; + const QString AudioStatsShowInjectedStreams = "Audio Stats Show Injected Streams"; const QString AudioSpatialProcessingAlternateDistanceAttenuate = "Alternate distance attenuation"; const QString AudioSpatialProcessing = "Audio Spatial Processing"; const QString AudioSpatialProcessingDontDistanceAttenuate = "Don't calculate distance attenuation"; From 303f4bf4f1f106f704819525a81f3608de876b5b Mon Sep 17 00:00:00 2001 From: barnold1953 Date: Wed, 30 Jul 2014 12:08:07 -0700 Subject: [PATCH 52/62] Improved API, added footstep sounds --- examples/proceduralAnimationAPI.js | 54 +++--- examples/proceduralBot.js | 264 ++++++++++++++++------------- 2 files changed, 176 insertions(+), 142 deletions(-) diff --git a/examples/proceduralAnimationAPI.js b/examples/proceduralAnimationAPI.js index ae6b5c3d31..61397ec180 100644 --- a/examples/proceduralAnimationAPI.js +++ b/examples/proceduralAnimationAPI.js @@ -64,8 +64,10 @@ ProcAnimAPI = function() { middleQuats[i][j] = Quat.fromPitchYawRollDegrees(middleAngles[i][j][0], middleAngles[i][j][1], middleAngles[i][j][2]); } } - finalKeyFrames[0] = new this.KeyFrame(rightQuats[0], leftQuats[0], middleQuats[0]); - finalKeyFrames[1] = new this.KeyFrame(rightQuats[1], leftQuats[1], middleQuats[1]); + + for (var i = 0; i < numFrames; i++) { + finalKeyFrames[i] = new this.KeyFrame(rightQuats[i], leftQuats[i], middleQuats[i]); + } //Generate mirrored quaternions for the other half of the animation for (var i = 0; i < rightAngles.length; i++) { @@ -79,35 +81,41 @@ ProcAnimAPI = function() { middleQuats[i][j] = Quat.fromPitchYawRollDegrees(-middleAngles[i][j][0], -middleAngles[i][j][1], -middleAngles[i][j][2]); } } - finalKeyFrames[2] = new this.KeyFrame(leftQuats[0], rightQuats[0], middleQuats[0]); - finalKeyFrames[3] = new this.KeyFrame(leftQuats[1], rightQuats[1], middleQuats[1]); - - //Hook up pointers to the next keyframe - for (var i = 0; i < finalKeyFrames.length - 1; i++) { - finalKeyFrames[i].nextFrame = finalKeyFrames[i+1]; + for (var i = 0; i < numFrames; i++) { + finalKeyFrames[numFrames + i] = new this.KeyFrame(leftQuats[i], rightQuats[i], middleQuats[i]); } - finalKeyFrames[finalKeyFrames.length-1].nextFrame = finalKeyFrames[0]; + + //Generate control points + this.computeBezierControlPoints(finalKeyFrames); + + return finalKeyFrames; + }; + + //Computes 2 controlPoints to each keyframe to be used in the bezier evaluation. + //Technique is described at: //https://www.cs.tcd.ie/publications/tech-reports/reports.94/TCD-CS-94-18.pdf + this.computeBezierControlPoints = function(keyFrames) { + //Hook up pointers to the next keyframe + for (var i = 0; i < keyFrames.length - 1; i++) { + keyFrames[i].nextFrame = keyFrames[i+1]; + } + keyFrames[keyFrames.length-1].nextFrame = keyFrames[0]; - //Set up the bezier curve control points using technique described at - //https://www.cs.tcd.ie/publications/tech-reports/reports.94/TCD-CS-94-18.pdf //Set up all C1 - for (var i = 0; i < finalKeyFrames.length; i++) { - finalKeyFrames[i].nextFrame.controlPoints = []; - for (var j = 0; j < finalKeyFrames[i].rotations.length; j++) { - finalKeyFrames[i].nextFrame.controlPoints[j] = []; - var R = Quat.slerp(finalKeyFrames[i].rotations[j], finalKeyFrames[i].nextFrame.rotations[j], 2.0); - var T = Quat.slerp(R, finalKeyFrames[i].nextFrame.nextFrame.rotations[j], 0.5); - finalKeyFrames[i].nextFrame.controlPoints[j][0] = Quat.slerp(finalKeyFrames[i].nextFrame.rotations[j], T, 0.33333); + for (var i = 0; i < keyFrames.length; i++) { + keyFrames[i].nextFrame.controlPoints = []; + for (var j = 0; j < keyFrames[i].rotations.length; j++) { + keyFrames[i].nextFrame.controlPoints[j] = []; + var R = Quat.slerp(keyFrames[i].rotations[j], keyFrames[i].nextFrame.rotations[j], 2.0); + var T = Quat.slerp(R, keyFrames[i].nextFrame.nextFrame.rotations[j], 0.5); + keyFrames[i].nextFrame.controlPoints[j][0] = Quat.slerp(keyFrames[i].nextFrame.rotations[j], T, 0.33333); } } //Set up all C2 - for (var i = 0; i < finalKeyFrames.length; i++) { - for (var j = 0; j < finalKeyFrames[i].rotations.length; j++) { - finalKeyFrames[i].controlPoints[j][1] = Quat.slerp(finalKeyFrames[i].nextFrame.rotations[j], finalKeyFrames[i].nextFrame.controlPoints[j][0], -1.0); + for (var i = 0; i < keyFrames.length; i++) { + for (var j = 0; j < keyFrames[i].rotations.length; j++) { + keyFrames[i].controlPoints[j][1] = Quat.slerp(keyFrames[i].nextFrame.rotations[j], keyFrames[i].nextFrame.controlPoints[j][0], -1.0); } } - - return finalKeyFrames; }; // Animation KeyFrame constructor. rightJoints and leftJoints must be the same size diff --git a/examples/proceduralBot.js b/examples/proceduralBot.js index 05838da095..d8218c8341 100644 --- a/examples/proceduralBot.js +++ b/examples/proceduralBot.js @@ -27,7 +27,7 @@ function printVector(string, vector) { print(string + " " + vector.x + ", " + vector.y + ", " + vector.z); } -var CHANCE_OF_MOVING = 0.1; +var CHANCE_OF_MOVING = 0.01; var CHANCE_OF_SOUND = 0; var CHANCE_OF_HEAD_TURNING = 0.05; var CHANCE_OF_BIG_MOVE = 0.1; @@ -60,8 +60,6 @@ var targetOrientation = { x: 0, y: 0, z: 0, w: 0 }; var currentOrientation = { x: 0, y: 0, z: 0, w: 0 }; var targetHeadPitch = 0.0; -var cumulativeTime = 0.0; - var basePelvisHeight = 0.0; var pelvisOscillatorPosition = 0.0; var pelvisOscillatorVelocity = 0.0; @@ -209,11 +207,34 @@ leftAngles[1][FOREARM] = [0.0, 0.0, -15.0]; middleAngles[1][SPINE] = [0.0, 0.0, 0.0]; -// ******************************* Animation Is Defined Above ************************************* - //Actual keyframes for the animation var walkKeyFrames = procAnimAPI.generateKeyframes(rightAngles, leftAngles, middleAngles, NUM_FRAMES); +// ******************************* Animation Is Defined Above ************************************* + +// ********************************** Standing Key Frame ****************************************** +//We don't have to do any mirroring or anything, since this is just a single pose. +var rightQuats = []; +var leftQuats = []; +var middleQuats = []; + +rightQuats[HIP] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 7.0); +rightQuats[KNEE] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 0.0); +rightQuats[ARM] = Quat.fromPitchYawRollDegrees(85.0, 0.0, 0.0); +rightQuats[FOREARM] = Quat.fromPitchYawRollDegrees(0.0, 0.0, -10.0); + +leftQuats[HIP] = Quat.fromPitchYawRollDegrees(0, 0.0, -7.0); +leftQuats[KNEE] = Quat.fromPitchYawRollDegrees(0, 0.0, 0.0); +leftQuats[ARM] = Quat.fromPitchYawRollDegrees(85.0, 0.0, 0.0); +leftQuats[FOREARM] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 10.0); + +middleQuats[SPINE] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 0.0); + +var standingKeyFrame = new procAnimAPI.KeyFrame(rightQuats, leftQuats, middleQuats); + +// ************************************************************************************************ + + var currentFrame = 0; var walkTime = 0.0; @@ -225,46 +246,56 @@ var avatarAcceleration = 0.75; var avatarVelocity = 0.0; var avatarMaxVelocity = 1.4; -function keepWalking(deltaTime) { - - walkTime += avatarVelocity * deltaTime; - if (walkTime > walkWheelRate) { - walkTime = 0.0; - currentFrame++; - if (currentFrame > 3) { - currentFrame = 0; - } - } +function handleAnimation(deltaTime) { - var frame = walkKeyFrames[currentFrame]; + if (avatarVelocity == 0.0) { + walkTime = 0.0; + currentFrame = 0; + } else { + walkTime += avatarVelocity * deltaTime; + if (walkTime > walkWheelRate) { + walkTime = 0.0; + currentFrame++; + if (currentFrame > 3) { + currentFrame = 0; + } + } + } + + var frame = walkKeyFrames[currentFrame]; - var interp = walkTime / walkWheelRate; + var walkInterp = walkTime / walkWheelRate; + var animInterp = avatarVelocity / (avatarMaxVelocity / 2.0); + if (animInterp > 1.0) animInterp = 1.0; - for (var i = 0; i < JOINT_ORDER.length; i++) { - Avatar.setJointData(JOINT_ORDER[i], procAnimAPI.deCasteljau(frame.rotations[i], frame.nextFrame.rotations[i], frame.controlPoints[i][0], frame.controlPoints[i][1], interp)); - } + for (var i = 0; i < JOINT_ORDER.length; i++) { + var walkJoint = procAnimAPI.deCasteljau(frame.rotations[i], frame.nextFrame.rotations[i], frame.controlPoints[i][0], frame.controlPoints[i][1], walkInterp); + var standJoint = standingKeyFrame.rotations[i]; + var finalJoint = Quat.mix(standJoint, walkJoint, animInterp); + Avatar.setJointData(JOINT_ORDER[i], finalJoint); + } } function jumpWithLoudness(deltaTime) { - // potentially change pelvis height depending on trailing average loudness - - pelvisOscillatorVelocity += deltaTime * Agent.lastReceivedAudioLoudness * 700.0 ; + // potentially change pelvis height depending on trailing average loudness - pelvisOscillatorVelocity -= pelvisOscillatorPosition * 0.75; - pelvisOscillatorVelocity *= 0.97; - pelvisOscillatorPosition += deltaTime * pelvisOscillatorVelocity; - Avatar.headPitch = pelvisOscillatorPosition * 60.0; + pelvisOscillatorVelocity += deltaTime * Agent.lastReceivedAudioLoudness * 700.0 ; - var pelvisPosition = Avatar.position; - pelvisPosition.y = (Y_PELVIS - 0.35) + pelvisOscillatorPosition; - - if (pelvisPosition.y < Y_PELVIS) { - pelvisPosition.y = Y_PELVIS; - } else if (pelvisPosition.y > Y_PELVIS + 1.0) { - pelvisPosition.y = Y_PELVIS + 1.0; - } - - Avatar.position = pelvisPosition; + pelvisOscillatorVelocity -= pelvisOscillatorPosition * 0.75; + pelvisOscillatorVelocity *= 0.97; + pelvisOscillatorPosition += deltaTime * pelvisOscillatorVelocity; + Avatar.headPitch = pelvisOscillatorPosition * 60.0; + + var pelvisPosition = Avatar.position; + pelvisPosition.y = (Y_PELVIS - 0.35) + pelvisOscillatorPosition; + + if (pelvisPosition.y < Y_PELVIS) { + pelvisPosition.y = Y_PELVIS; + } else if (pelvisPosition.y > Y_PELVIS + 1.0) { + pelvisPosition.y = Y_PELVIS + 1.0; + } + + Avatar.position = pelvisPosition; } var forcedMove = false; @@ -272,110 +303,105 @@ var forcedMove = false; var wasMovingLastFrame = false; function handleHeadTurn() { - if (!isTurningHead && (Math.random() < CHANCE_OF_HEAD_TURNING)) { - targetHeadPitch = getRandomFloat(-PITCH_RANGE, PITCH_RANGE); - isTurningHead = true; - } else { - Avatar.headPitch = Avatar.headPitch + (targetHeadPitch - Avatar.headPitch) * PITCH_RATE; - if (Math.abs(Avatar.headPitch - targetHeadPitch) < STOP_TOLERANCE) { - isTurningHead = false; + if (!isTurningHead && (Math.random() < CHANCE_OF_HEAD_TURNING)) { + targetHeadPitch = getRandomFloat(-PITCH_RANGE, PITCH_RANGE); + isTurningHead = true; + } else { + Avatar.headPitch = Avatar.headPitch + (targetHeadPitch - Avatar.headPitch) * PITCH_RATE; + if (Math.abs(Avatar.headPitch - targetHeadPitch) < STOP_TOLERANCE) { + isTurningHead = false; + } } - } } function stopWalking() { - Avatar.clearJointData(JOINT_R_HIP); - Avatar.clearJointData(JOINT_R_KNEE); - Avatar.clearJointData(JOINT_L_HIP); - Avatar.clearJointData(JOINT_L_KNEE); - avatarVelocity = 0.0; - isMoving = false; + avatarVelocity = 0.0; + isMoving = false; } var MAX_ATTEMPTS = 40; function handleWalking(deltaTime) { - if (forcedMove || (!isMoving && Math.random() < CHANCE_OF_MOVING)) { - // Set new target location - - //Keep trying new orientations if the desired target location is out of bounds - var attempts = 0; - do { - targetOrientation = Quat.multiply(Avatar.orientation, Quat.angleAxis(getRandomFloat(-TURN_RANGE, TURN_RANGE), { x:0, y:1, z:0 })); - var front = Quat.getFront(targetOrientation); - - targetPosition = Vec3.sum(Avatar.position, Vec3.multiply(front, getRandomFloat(0.0, MOVE_RANGE_SMALL))); - } - while ((targetPosition.x < X_MIN || targetPosition.x > X_MAX || targetPosition.z < Z_MIN || targetPosition.z > Z_MAX) - && attempts < MAX_ATTEMPTS); - - targetPosition.x = clamp(targetPosition.x, X_MIN, X_MAX); - targetPosition.z = clamp(targetPosition.z, Z_MIN, Z_MAX); - targetPosition.y = Y_PELVIS; - - wasMovingLastFrame = true; - isMoving = true; - forcedMove = false; - } else if (isMoving) { - keepWalking(deltaTime); - - var targetVector = Vec3.subtract(targetPosition, Avatar.position); - var distance = Vec3.length(targetVector); - if (distance <= avatarVelocity * deltaTime) { - Avatar.position = targetPosition; - stopWalking(); - } else { - var direction = Vec3.normalize(targetVector); - //Figure out if we should be slowing down - var t = avatarVelocity / avatarAcceleration; - var d = (avatarVelocity / 2.0) * t; - if (distance < d) { - avatarVelocity -= avatarAcceleration * deltaTime; - if (avatarVelocity <= 0) { - stopWalking(); - } - } else { - avatarVelocity += avatarAcceleration * deltaTime; - if (avatarVelocity > avatarMaxVelocity) avatarVelocity = avatarMaxVelocity; + if (forcedMove || (!isMoving && Math.random() < CHANCE_OF_MOVING)) { + // Set new target location + + //Keep trying new orientations if the desired target location is out of bounds + var attempts = 0; + do { + targetOrientation = Quat.multiply(Avatar.orientation, Quat.angleAxis(getRandomFloat(-TURN_RANGE, TURN_RANGE), { x:0, y:1, z:0 })); + var front = Quat.getFront(targetOrientation); + + targetPosition = Vec3.sum(Avatar.position, Vec3.multiply(front, getRandomFloat(0.0, MOVE_RANGE_SMALL))); + } + while ((targetPosition.x < X_MIN || targetPosition.x > X_MAX || targetPosition.z < Z_MIN || targetPosition.z > Z_MAX) + && attempts < MAX_ATTEMPTS); + + targetPosition.x = clamp(targetPosition.x, X_MIN, X_MAX); + targetPosition.z = clamp(targetPosition.z, Z_MIN, Z_MAX); + targetPosition.y = Y_PELVIS; + + wasMovingLastFrame = true; + isMoving = true; + forcedMove = false; + } else if (isMoving) { + + var targetVector = Vec3.subtract(targetPosition, Avatar.position); + var distance = Vec3.length(targetVector); + if (distance <= avatarVelocity * deltaTime) { + Avatar.position = targetPosition; + stopWalking(); + } else { + var direction = Vec3.normalize(targetVector); + //Figure out if we should be slowing down + var t = avatarVelocity / avatarAcceleration; + var d = (avatarVelocity / 2.0) * t; + if (distance < d) { + avatarVelocity -= avatarAcceleration * deltaTime; + if (avatarVelocity <= 0) { + stopWalking(); + } + } else { + avatarVelocity += avatarAcceleration * deltaTime; + if (avatarVelocity > avatarMaxVelocity) avatarVelocity = avatarMaxVelocity; + } + Avatar.position = Vec3.sum(Avatar.position, Vec3.multiply(direction, avatarVelocity * deltaTime)); + Avatar.orientation = Quat.mix(Avatar.orientation, targetOrientation, TURN_RATE); + + wasMovingLastFrame = true; + } - Avatar.position = Vec3.sum(Avatar.position, Vec3.multiply(direction, avatarVelocity * deltaTime)); - Avatar.orientation = Quat.mix(Avatar.orientation, targetOrientation, TURN_RATE); - - wasMovingLastFrame = true; - } - } } function handleTalking() { - if (Math.random() < CHANCE_OF_SOUND) { - playRandomSound(); - } + if (Math.random() < CHANCE_OF_SOUND) { + playRandomSound(); + } } function changePelvisHeight(newHeight) { - var newPosition = Avatar.position; - newPosition.y = newHeight; - Avatar.position = newPosition; + var newPosition = Avatar.position; + newPosition.y = newHeight; + Avatar.position = newPosition; } function updateBehavior(deltaTime) { - cumulativeTime += deltaTime; - if (AvatarList.containsAvatarWithDisplayName("mrdj")) { - if (wasMovingLastFrame) { - isMoving = false; + if (AvatarList.containsAvatarWithDisplayName("mrdj")) { + if (wasMovingLastFrame) { + isMoving = false; + } + + // we have a DJ, shouldn't we be dancing? + jumpWithLoudness(deltaTime); + } else { + + // no DJ, let's just chill on the dancefloor - randomly walking and talking + handleHeadTurn(); + handleAnimation(deltaTime); + handleWalking(deltaTime); + handleTalking(); } - - // we have a DJ, shouldn't we be dancing? - jumpWithLoudness(deltaTime); - } else { - - // no DJ, let's just chill on the dancefloor - randomly walking and talking - handleHeadTurn(); - handleWalking(deltaTime); - handleTalking(); - } } Script.update.connect(updateBehavior); \ No newline at end of file From a2945eaa96bd36b3cc5e38f3a527f2f7a1908b5a Mon Sep 17 00:00:00 2001 From: wangyix Date: Wed, 30 Jul 2014 12:52:35 -0700 Subject: [PATCH 53/62] qaudiooutput overflow check enabled by default --- interface/src/Menu.cpp | 2 +- interface/src/Menu.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/interface/src/Menu.cpp b/interface/src/Menu.cpp index 3279aff249..ca74ad8aa6 100644 --- a/interface/src/Menu.cpp +++ b/interface/src/Menu.cpp @@ -606,7 +606,7 @@ Menu::Menu() : appInstance->getAudio(), SLOT(toggleStatsShowInjectedStreams())); - addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::DisableQAudioOutputOverflowCheck, 0, true); + addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::DisableQAudioOutputOverflowCheck, 0, false); addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel, Qt::CTRL | Qt::SHIFT | Qt::Key_V, diff --git a/interface/src/Menu.h b/interface/src/Menu.h index cba9703e32..35e3e75d6a 100644 --- a/interface/src/Menu.h +++ b/interface/src/Menu.h @@ -353,7 +353,7 @@ namespace MenuOption { const QString DisableActivityLogger = "Disable Activity Logger"; const QString DisableAutoAdjustLOD = "Disable Automatically Adjusting LOD"; const QString DisableNackPackets = "Disable NACK Packets"; - const QString DisableQAudioOutputOverflowCheck = "Disable Audio Output Overflow Check"; + const QString DisableQAudioOutputOverflowCheck = "Disable Audio Output Device Overflow Check"; const QString DisplayFrustum = "Display Frustum"; const QString DisplayHands = "Display Hands"; const QString DisplayHandTargets = "Display Hand Targets"; From 055a97fc2d634db805a5730c22ae783a71fe8cef Mon Sep 17 00:00:00 2001 From: barnold1953 Date: Wed, 30 Jul 2014 14:18:38 -0700 Subject: [PATCH 54/62] Added facial animation to the procedural bot --- examples/proceduralBot.js | 288 ++++++++++++++++++++++++++++++++++---- 1 file changed, 263 insertions(+), 25 deletions(-) diff --git a/examples/proceduralBot.js b/examples/proceduralBot.js index d8218c8341..f02ca934dc 100644 --- a/examples/proceduralBot.js +++ b/examples/proceduralBot.js @@ -27,10 +27,10 @@ function printVector(string, vector) { print(string + " " + vector.x + ", " + vector.y + ", " + vector.z); } -var CHANCE_OF_MOVING = 0.01; -var CHANCE_OF_SOUND = 0; +var CHANCE_OF_MOVING = 0.00; +var CHANCE_OF_SOUND = 0.005; var CHANCE_OF_HEAD_TURNING = 0.05; -var CHANCE_OF_BIG_MOVE = 0.1; +var CHANCE_OF_BIG_MOVE = 1.0; var isMoving = false; var isTurningHead = false; @@ -45,12 +45,13 @@ var MAX_PELVIS_DELTA = 2.5; var AVATAR_PELVIS_HEIGHT = 0.75; -var MOVE_RANGE_SMALL = 10.0; +var MOVE_RANGE_SMALL = 3.0; +var MOVE_RANGE_BIG = 10.0; var TURN_RANGE = 70.0; var STOP_TOLERANCE = 0.05; var MOVE_RATE = 0.05; var TURN_RATE = 0.2; -var PITCH_RATE = 0.10; +var PITCH_RATE = 0.05; var PITCH_RANGE = 20.0; //var firstPosition = { x: getRandomFloat(X_MIN, X_MAX), y: Y_PELVIS, z: getRandomFloat(Z_MIN, Z_MAX) }; @@ -110,33 +111,254 @@ basePelvisHeight = firstPosition.y; printVector("New dancer, position = ", Avatar.position); function loadSounds() { - var sound_filenames = ["AB1.raw", "Anchorman2.raw", "B1.raw", "B1.raw", "Bale1.raw", "Bandcamp.raw", + var sound_filenames = ["AB1.raw", "Anchorman2.raw", "B1.raw", "B1.raw", "Bale1.raw", "Bandcamp.raw", "Big1.raw", "Big2.raw", "Brian1.raw", "Buster1.raw", "CES1.raw", "CES2.raw", "CES3.raw", "CES4.raw", "Carrie1.raw", "Carrie3.raw", "Charlotte1.raw", "EN1.raw", "EN2.raw", "EN3.raw", "Eugene1.raw", "Francesco1.raw", "Italian1.raw", "Japanese1.raw", "Leigh1.raw", "Lucille1.raw", "Lucille2.raw", "MeanGirls.raw", "Murray2.raw", "Nigel1.raw", "PennyLane.raw", "Pitt1.raw", "Ricardo.raw", "SN.raw", "Sake1.raw", "Samantha1.raw", "Samantha2.raw", "Spicoli1.raw", "Supernatural.raw", "Swearengen1.raw", "TheDude.raw", "Tony.raw", "Triumph1.raw", "Uma1.raw", "Walken1.raw", "Walken2.raw", "Z1.raw", "Z2.raw" - ]; - - var SOUND_BASE_URL = "https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/Cocktail+Party+Snippets/Raws/"; - - for (var i = 0; i < sound_filenames.length; i++) { - sounds.push(new Sound(SOUND_BASE_URL + sound_filenames[i])); - } + ]; + + var footstep_filenames = ["FootstepW2Left-12db.wav", "FootstepW2Right-12db.wav", "FootstepW3Left-12db.wav", "FootstepW3Right-12db.wav", + "FootstepW5Left-12db.wav", "FootstepW5Right-12db.wav"]; + + var SOUND_BASE_URL = "https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/Cocktail+Party+Snippets/Raws/"; + + var FOOTSTEP_BASE_URL = "http://highfidelity-public.s3-us-west-1.amazonaws.com/sounds/Footsteps/"; + + for (var i = 0; i < sound_filenames.length; i++) { + sounds.push(new Sound(SOUND_BASE_URL + sound_filenames[i])); + } + + for (var i = 0; i < footstep_filenames.length; i++) { + footstepSounds.push(new Sound(FOOTSTEP_BASE_URL + footstep_filenames[i])); + } } var sounds = []; +var footstepSounds = []; loadSounds(); function playRandomSound() { - if (!Agent.isPlayingAvatarSound) { - var whichSound = Math.floor((Math.random() * sounds.length) % sounds.length); - Agent.playAvatarSound(sounds[whichSound]); - } + if (!Agent.isPlayingAvatarSound) { + var whichSound = Math.floor((Math.random() * sounds.length)); + Agent.playAvatarSound(sounds[whichSound]); + } } +function playRandomFootstepSound() { + + var whichSound = Math.floor((Math.random() * footstepSounds.length)); + var options = new AudioInjectionOptions(); + options.position = Avatar.position; + options.volume = 1.0; + Audio.playSound(footstepSounds[whichSound], options); + +} + +// ************************************ Facial Animation ********************************** +var allBlendShapes = []; +var targetBlendCoefficient = []; +var currentBlendCoefficient = []; + +//Blendshape constructor +function addBlendshapeToPose(pose, shapeIndex, val) { + var index = pose.blendShapes.length; + pose.blendShapes[index] = {shapeIndex: shapeIndex, val: val }; +} +//The mood of the avatar, determines face. 0 = happy, 1 = angry, 2 = sad. +var avatarMood = 0; +var currentExpression = -1; +//Face pose constructor +var happyPoses = []; + +happyPoses[0] = {blendShapes: []}; +addBlendshapeToPose(happyPoses[0], 28, 0.7); //MouthSmile_L +addBlendshapeToPose(happyPoses[0], 29, 0.7); //MouthSmile_R + +happyPoses[1] = {blendShapes: []}; +addBlendshapeToPose(happyPoses[1], 28, 1.0); //MouthSmile_L +addBlendshapeToPose(happyPoses[1], 29, 1.0); //MouthSmile_R +addBlendshapeToPose(happyPoses[1], 21, 0.2); //JawOpen + +happyPoses[2] = {blendShapes: []}; +addBlendshapeToPose(happyPoses[2], 28, 1.0); //MouthSmile_L +addBlendshapeToPose(happyPoses[2], 29, 1.0); //MouthSmile_R +addBlendshapeToPose(happyPoses[2], 21, 0.5); //JawOpen +addBlendshapeToPose(happyPoses[2], 46, 1.0); //CheekSquint_L +addBlendshapeToPose(happyPoses[2], 47, 1.0); //CheekSquint_R +addBlendshapeToPose(happyPoses[2], 17, 1.0); //BrowsU_L +addBlendshapeToPose(happyPoses[2], 18, 1.0); //BrowsU_R + +var angryPoses = []; + +angryPoses[0] = {blendShapes: []}; +addBlendshapeToPose(angryPoses[0], 26, 0.6); //MouthFrown_L +addBlendshapeToPose(angryPoses[0], 27, 0.6); //MouthFrown_R +addBlendshapeToPose(angryPoses[0], 14, 0.6); //BrowsD_L +addBlendshapeToPose(angryPoses[0], 15, 0.6); //BrowsD_R + +angryPoses[1] = {blendShapes: []}; +addBlendshapeToPose(angryPoses[1], 26, 0.9); //MouthFrown_L +addBlendshapeToPose(angryPoses[1], 27, 0.9); //MouthFrown_R +addBlendshapeToPose(angryPoses[1], 14, 0.9); //BrowsD_L +addBlendshapeToPose(angryPoses[1], 15, 0.9); //BrowsD_R + +angryPoses[2] = {blendShapes: []}; +addBlendshapeToPose(angryPoses[2], 26, 1.0); //MouthFrown_L +addBlendshapeToPose(angryPoses[2], 27, 1.0); //MouthFrown_R +addBlendshapeToPose(angryPoses[2], 14, 1.0); //BrowsD_L +addBlendshapeToPose(angryPoses[2], 15, 1.0); //BrowsD_R +addBlendshapeToPose(angryPoses[2], 21, 0.5); //JawOpen +addBlendshapeToPose(angryPoses[2], 46, 1.0); //CheekSquint_L +addBlendshapeToPose(angryPoses[2], 47, 1.0); //CheekSquint_R + +var sadPoses = []; + +sadPoses[0] = {blendShapes: []}; +addBlendshapeToPose(sadPoses[0], 26, 0.6); //MouthFrown_L +addBlendshapeToPose(sadPoses[0], 27, 0.6); //MouthFrown_R +addBlendshapeToPose(sadPoses[0], 16, 0.2); //BrowsU_C +addBlendshapeToPose(sadPoses[0], 2, 0.6); //EyeSquint_L +addBlendshapeToPose(sadPoses[0], 3, 0.6); //EyeSquint_R + +sadPoses[1] = {blendShapes: []}; +addBlendshapeToPose(sadPoses[1], 26, 0.9); //MouthFrown_L +addBlendshapeToPose(sadPoses[1], 27, 0.9); //MouthFrown_R +addBlendshapeToPose(sadPoses[1], 16, 0.6); //BrowsU_C +addBlendshapeToPose(sadPoses[1], 2, 0.9); //EyeSquint_L +addBlendshapeToPose(sadPoses[1], 3, 0.9); //EyeSquint_R + +sadPoses[2] = {blendShapes: []}; +addBlendshapeToPose(sadPoses[2], 26, 1.0); //MouthFrown_L +addBlendshapeToPose(sadPoses[2], 27, 1.0); //MouthFrown_R +addBlendshapeToPose(sadPoses[2], 16, 0.1); //BrowsU_C +addBlendshapeToPose(sadPoses[2], 2, 1.0); //EyeSquint_L +addBlendshapeToPose(sadPoses[2], 3, 1.0); //EyeSquint_R +addBlendshapeToPose(sadPoses[2], 21, 0.3); //JawOpen + +var facePoses = []; +facePoses[0] = happyPoses; +facePoses[1] = angryPoses; +facePoses[2] = sadPoses; + + +function addBlendShape(s) { + allBlendShapes[allBlendShapes.length] = s; +} + +//It is imperative that the following blendshapes are all present and are in the correct order +addBlendShape("EyeBlink_L"); //0 +addBlendShape("EyeBlink_R"); //1 +addBlendShape("EyeSquint_L"); //2 +addBlendShape("EyeSquint_R"); //3 +addBlendShape("EyeDown_L"); //4 +addBlendShape("EyeDown_R"); //5 +addBlendShape("EyeIn_L"); //6 +addBlendShape("EyeIn_R"); //7 +addBlendShape("EyeOpen_L"); //8 +addBlendShape("EyeOpen_R"); //9 +addBlendShape("EyeOut_L"); //10 +addBlendShape("EyeOut_R"); //11 +addBlendShape("EyeUp_L"); //12 +addBlendShape("EyeUp_R"); //13 +addBlendShape("BrowsD_L"); //14 +addBlendShape("BrowsD_R"); //15 +addBlendShape("BrowsU_C"); //16 +addBlendShape("BrowsU_L"); //17 +addBlendShape("BrowsU_R"); //18 +addBlendShape("JawFwd"); //19 +addBlendShape("JawLeft"); //20 +addBlendShape("JawOpen"); //21 +addBlendShape("JawChew"); //22 +addBlendShape("JawRight"); //23 +addBlendShape("MouthLeft"); //24 +addBlendShape("MouthRight"); //25 +addBlendShape("MouthFrown_L"); //26 +addBlendShape("MouthFrown_R"); //27 +addBlendShape("MouthSmile_L"); //28 +addBlendShape("MouthSmile_R"); //29 +addBlendShape("MouthDimple_L"); //30 +addBlendShape("MouthDimple_R"); //31 +addBlendShape("LipsStretch_L"); //32 +addBlendShape("LipsStretch_R"); //33 +addBlendShape("LipsUpperClose"); //34 +addBlendShape("LipsLowerClose"); //35 +addBlendShape("LipsUpperUp"); //36 +addBlendShape("LipsLowerDown"); //37 +addBlendShape("LipsUpperOpen"); //38 +addBlendShape("LipsLowerOpen"); //39 +addBlendShape("LipsFunnel"); //40 +addBlendShape("LipsPucker"); //41 +addBlendShape("ChinLowerRaise"); //42 +addBlendShape("ChinUpperRaise"); //43 +addBlendShape("Sneer"); //44 +addBlendShape("Puff"); //45 +addBlendShape("CheekSquint_L"); //46 +addBlendShape("CheekSquint_R"); //47 + +for (var i = 0; i < allBlendShapes.length; i++) { + targetBlendCoefficient[i] = 0; + currentBlendCoefficient[i] = 0; +} + +function setRandomExpression() { + + //Clear all expression data for current expression + if (currentExpression != -1) { + var expression = facePoses[avatarMood][currentExpression]; + for (var i = 0; i < expression.blendShapes.length; i++) { + targetBlendCoefficient[expression.blendShapes[i].shapeIndex] = 0.0; + } + } + //Get a new current expression + currentExpression = Math.floor(Math.random() * facePoses[avatarMood].length); + var expression = facePoses[avatarMood][currentExpression]; + for (var i = 0; i < expression.blendShapes.length; i++) { + targetBlendCoefficient[expression.blendShapes[i].shapeIndex] = expression.blendShapes[i].val; + } +} + +var expressionChangeSpeed = 0.1; +function updateBlendShapes(deltaTime) { + + for (var i = 0; i < allBlendShapes.length; i++) { + currentBlendCoefficient[i] += (targetBlendCoefficient[i] - currentBlendCoefficient[i]) * expressionChangeSpeed; + Avatar.setBlendshape(allBlendShapes[i], currentBlendCoefficient[i]); + } +} + +var BLINK_SPEED = 0.15; +var CHANCE_TO_BLINK = 0.0025; +var MAX_BLINK = 0.85; +var blink = 0.0; +var isBlinking = false; +function updateBlinking(deltaTime) { + if (isBlinking == false) { + if (Math.random() < CHANCE_TO_BLINK) { + isBlinking = true; + } else { + blink -= BLINK_SPEED; + if (blink < 0.0) blink = 0.0; + } + } else { + blink += BLINK_SPEED; + if (blink > MAX_BLINK) { + blink = MAX_BLINK; + isBlinking = false; + } + } + + currentBlendCoefficient[0] = blink; + currentBlendCoefficient[1] = blink; + targetBlendCoefficient[0] = blink; + targetBlendCoefficient[1] = blink; +} + +// ************************************************************************************* + //Procedural walk animation using two keyframes //We use a separate array for front and back joints //Pitch, yaw, and roll for the joints @@ -248,24 +470,33 @@ var avatarMaxVelocity = 1.4; function handleAnimation(deltaTime) { + updateBlinking(deltaTime); + updateBlendShapes(deltaTime); + + if (Math.random() < 0.01) { + setRandomExpression(); + } + if (avatarVelocity == 0.0) { walkTime = 0.0; currentFrame = 0; } else { - walkTime += avatarVelocity * deltaTime; - if (walkTime > walkWheelRate) { - walkTime = 0.0; - currentFrame++; - if (currentFrame > 3) { + walkTime += avatarVelocity * deltaTime; + if (walkTime > walkWheelRate) { + walkTime = 0.0; + currentFrame++; + if (currentFrame % 2 == 1) { + playRandomFootstepSound(); + } + if (currentFrame > 3) { currentFrame = 0; } } } - var frame = walkKeyFrames[currentFrame]; var walkInterp = walkTime / walkWheelRate; - var animInterp = avatarVelocity / (avatarMaxVelocity / 2.0); + var animInterp = avatarVelocity / (avatarMaxVelocity / 1.3); if (animInterp > 1.0) animInterp = 1.0; for (var i = 0; i < JOINT_ORDER.length; i++) { @@ -325,13 +556,20 @@ function handleWalking(deltaTime) { if (forcedMove || (!isMoving && Math.random() < CHANCE_OF_MOVING)) { // Set new target location + var moveRange; + if (Math.random() < CHANCE_OF_BIG_MOVE) { + moveRange = MOVE_RANGE_BIG; + } else { + moveRange = MOVE_RANGE_SMALL; + } + //Keep trying new orientations if the desired target location is out of bounds var attempts = 0; do { targetOrientation = Quat.multiply(Avatar.orientation, Quat.angleAxis(getRandomFloat(-TURN_RANGE, TURN_RANGE), { x:0, y:1, z:0 })); var front = Quat.getFront(targetOrientation); - targetPosition = Vec3.sum(Avatar.position, Vec3.multiply(front, getRandomFloat(0.0, MOVE_RANGE_SMALL))); + targetPosition = Vec3.sum(Avatar.position, Vec3.multiply(front, getRandomFloat(0.0, moveRange))); } while ((targetPosition.x < X_MIN || targetPosition.x > X_MAX || targetPosition.z < Z_MIN || targetPosition.z > Z_MAX) && attempts < MAX_ATTEMPTS); From 66c1aba7f7de493965a2bf23a236f89533f726ef Mon Sep 17 00:00:00 2001 From: barnold1953 Date: Wed, 30 Jul 2014 15:23:03 -0700 Subject: [PATCH 55/62] Animation improvements, exposed roll/yaw head control. --- examples/proceduralBot.js | 78 ++++++++++++++++++++++-------- libraries/avatars/src/AvatarData.h | 10 +++- 2 files changed, 66 insertions(+), 22 deletions(-) diff --git a/examples/proceduralBot.js b/examples/proceduralBot.js index f02ca934dc..9b10b28243 100644 --- a/examples/proceduralBot.js +++ b/examples/proceduralBot.js @@ -27,9 +27,9 @@ function printVector(string, vector) { print(string + " " + vector.x + ", " + vector.y + ", " + vector.z); } -var CHANCE_OF_MOVING = 0.00; -var CHANCE_OF_SOUND = 0.005; -var CHANCE_OF_HEAD_TURNING = 0.05; +var CHANCE_OF_MOVING = 0.000; +var CHANCE_OF_SOUND = 0;//0.005; +var CHANCE_OF_HEAD_TURNING = 0.01; var CHANCE_OF_BIG_MOVE = 1.0; var isMoving = false; @@ -51,8 +51,9 @@ var TURN_RANGE = 70.0; var STOP_TOLERANCE = 0.05; var MOVE_RATE = 0.05; var TURN_RATE = 0.2; -var PITCH_RATE = 0.05; -var PITCH_RANGE = 20.0; +var HEAD_TURN_RATE = 0.05; +var PITCH_RANGE = 15.0; +var YAW_RANGE = 35.0; //var firstPosition = { x: getRandomFloat(X_MIN, X_MAX), y: Y_PELVIS, z: getRandomFloat(Z_MIN, Z_MAX) }; var firstPosition = { x: 0.5, y: Y_PELVIS, z: 0.5 }; @@ -60,6 +61,7 @@ var targetPosition = { x: 0, y: 0, z: 0 }; var targetOrientation = { x: 0, y: 0, z: 0, w: 0 }; var currentOrientation = { x: 0, y: 0, z: 0, w: 0 }; var targetHeadPitch = 0.0; +var targetHeadYaw = 0.0; var basePelvisHeight = 0.0; var pelvisOscillatorPosition = 0.0; @@ -93,8 +95,8 @@ if (botNumber <= 20) { newBodyFilePrefix = "bot" + botNumber; } - newFaceFilePrefix = "ron"; - newBodyFilePrefix = "bot" + 63; +// newFaceFilePrefix = "ron"; +// newBodyFilePrefix = "bot" + 63; // set the face model fst using the bot number // there is no need to change the body model - we're using the default @@ -379,6 +381,10 @@ var JOINT_R_FOREARM = 16; var JOINT_L_ARM = 39; var JOINT_L_FOREARM = 40; var JOINT_SPINE = 11; +var JOINT_R_FOOT = 3; +var JOINT_L_FOOT = 8; +var JOINT_R_TOE = 4; +var JOINT_L_TOE = 9; // ******************************* Animation Is Defined Below ************************************* @@ -389,15 +395,29 @@ for (var i = 0; i < NUM_FRAMES; i++) { middleAngles[i] = []; } //Joint order for actual joint mappings, should be interleaved R,L,R,L,...S,S,S for R = right, L = left, S = single -var JOINT_ORDER = [JOINT_R_HIP, JOINT_L_HIP, JOINT_R_KNEE, JOINT_L_KNEE, JOINT_R_ARM, JOINT_L_ARM, JOINT_R_FOREARM, JOINT_L_FOREARM, JOINT_SPINE]; - -//Joint indices for joints that are duplicated, such as arms, It must match JOINT_ORDER +var JOINT_ORDER = []; +//*** right / left joints *** var HIP = 0; +JOINT_ORDER.push(JOINT_R_HIP); +JOINT_ORDER.push(JOINT_L_HIP); var KNEE = 1; +JOINT_ORDER.push(JOINT_R_KNEE); +JOINT_ORDER.push(JOINT_L_KNEE); var ARM = 2; +JOINT_ORDER.push(JOINT_R_ARM); +JOINT_ORDER.push(JOINT_L_ARM); var FOREARM = 3; -//Joint indices for single joints +JOINT_ORDER.push(JOINT_R_FOREARM); +JOINT_ORDER.push(JOINT_L_FOREARM); +var FOOT = 4; +JOINT_ORDER.push(JOINT_R_FOOT); +JOINT_ORDER.push(JOINT_L_FOOT); +var TOE = 5; +JOINT_ORDER.push(JOINT_R_TOE); +JOINT_ORDER.push(JOINT_L_TOE); +//*** middle joints *** var SPINE = 0; +JOINT_ORDER.push(JOINT_SPINE); //We have to store the angles so we can invert yaw and roll when making the animation //symmetrical @@ -408,11 +428,15 @@ rightAngles[0][HIP] = [30.0, 0.0, 8.0]; rightAngles[0][KNEE] = [-15.0, 0.0, 0.0]; rightAngles[0][ARM] = [85.0, -25.0, 0.0]; rightAngles[0][FOREARM] = [0.0, 0.0, -15.0]; +rightAngles[0][FOOT] = [0.0, 0.0, 0.0]; +rightAngles[0][TOE] = [0.0, 0.0, 0.0]; leftAngles[0][HIP] = [-15, 0.0, 8.0]; -leftAngles[0][KNEE] = [-28, 0.0, 0.0]; +leftAngles[0][KNEE] = [-26, 0.0, 0.0]; leftAngles[0][ARM] = [85.0, 20.0, 0.0]; leftAngles[0][FOREARM] = [10.0, 0.0, -25.0]; +leftAngles[0][FOOT] = [-13.0, 0.0, 0.0]; +leftAngles[0][TOE] = [34.0, 0.0, 0.0]; middleAngles[0][SPINE] = [0.0, -15.0, 5.0]; @@ -421,11 +445,15 @@ rightAngles[1][HIP] = [6.0, 0.0, 8.0]; rightAngles[1][KNEE] = [-12.0, 0.0, 0.0]; rightAngles[1][ARM] = [85.0, 0.0, 0.0]; rightAngles[1][FOREARM] = [0.0, 0.0, -15.0]; +rightAngles[1][FOOT] = [6.0, -8.0, 0.0]; +rightAngles[1][TOE] = [0.0, 0.0, 0.0]; leftAngles[1][HIP] = [10.0, 0.0, 8.0]; -leftAngles[1][KNEE] = [-55.0, 0.0, 0.0]; +leftAngles[1][KNEE] = [-60.0, 0.0, 0.0]; leftAngles[1][ARM] = [85.0, 0.0, 0.0]; leftAngles[1][FOREARM] = [0.0, 0.0, -15.0]; +leftAngles[1][FOOT] = [0.0, 0.0, 0.0]; +leftAngles[1][TOE] = [0.0, 0.0, 0.0]; middleAngles[1][SPINE] = [0.0, 0.0, 0.0]; @@ -444,11 +472,15 @@ rightQuats[HIP] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 7.0); rightQuats[KNEE] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 0.0); rightQuats[ARM] = Quat.fromPitchYawRollDegrees(85.0, 0.0, 0.0); rightQuats[FOREARM] = Quat.fromPitchYawRollDegrees(0.0, 0.0, -10.0); +rightQuats[FOOT] = Quat.fromPitchYawRollDegrees(0.0, -8.0, 0.0); +rightQuats[TOE] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 0.0); leftQuats[HIP] = Quat.fromPitchYawRollDegrees(0, 0.0, -7.0); leftQuats[KNEE] = Quat.fromPitchYawRollDegrees(0, 0.0, 0.0); leftQuats[ARM] = Quat.fromPitchYawRollDegrees(85.0, 0.0, 0.0); leftQuats[FOREARM] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 10.0); +leftQuats[FOOT] = Quat.fromPitchYawRollDegrees(0.0, 8.0, 0.0); +leftQuats[TOE] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 0.0); middleQuats[SPINE] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 0.0); @@ -477,12 +509,12 @@ function handleAnimation(deltaTime) { setRandomExpression(); } - if (avatarVelocity == 0.0) { - walkTime = 0.0; - currentFrame = 0; - } else { - walkTime += avatarVelocity * deltaTime; - if (walkTime > walkWheelRate) { + if (avatarVelocity == 0.0) { + walkTime = 0.0; + currentFrame = 0; + } else { + walkTime += avatarVelocity * deltaTime; + if (walkTime > walkWheelRate) { walkTime = 0.0; currentFrame++; if (currentFrame % 2 == 1) { @@ -493,6 +525,7 @@ function handleAnimation(deltaTime) { } } } + var frame = walkKeyFrames[currentFrame]; var walkInterp = walkTime / walkWheelRate; @@ -536,10 +569,13 @@ var wasMovingLastFrame = false; function handleHeadTurn() { if (!isTurningHead && (Math.random() < CHANCE_OF_HEAD_TURNING)) { targetHeadPitch = getRandomFloat(-PITCH_RANGE, PITCH_RANGE); + targetHeadYaw = getRandomFloat(-YAW_RANGE, YAW_RANGE); isTurningHead = true; } else { - Avatar.headPitch = Avatar.headPitch + (targetHeadPitch - Avatar.headPitch) * PITCH_RATE; - if (Math.abs(Avatar.headPitch - targetHeadPitch) < STOP_TOLERANCE) { + Avatar.headPitch = Avatar.headPitch + (targetHeadPitch - Avatar.headPitch) * HEAD_TURN_RATE; + Avatar.headYaw = Avatar.headYaw + (targetHeadYaw - Avatar.headYaw) * HEAD_TURN_RATE; + if (Math.abs(Avatar.headPitch - targetHeadPitch) < STOP_TOLERANCE && + Math.abs(Avatar.headYaw - targetHeadYaw) < STOP_TOLERANCE) { isTurningHead = false; } } diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index 008aecc817..8533b8b0e8 100755 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -120,6 +120,8 @@ class AvatarData : public QObject { Q_PROPERTY(glm::quat orientation READ getOrientation WRITE setOrientation) Q_PROPERTY(glm::quat headOrientation READ getHeadOrientation WRITE setHeadOrientation) Q_PROPERTY(float headPitch READ getHeadPitch WRITE setHeadPitch) + Q_PROPERTY(float headYaw READ getHeadYaw WRITE setHeadYaw) + Q_PROPERTY(float headRoll READ getHeadRoll WRITE setHeadRoll) Q_PROPERTY(float audioLoudness READ getAudioLoudness WRITE setAudioLoudness) Q_PROPERTY(float audioAverageLoudness READ getAudioAverageLoudness WRITE setAudioAverageLoudness) @@ -171,7 +173,13 @@ public: // access to Head().set/getMousePitch (degrees) float getHeadPitch() const { return _headData->getBasePitch(); } - void setHeadPitch(float value) { _headData->setBasePitch(value); }; + void setHeadPitch(float value) { _headData->setBasePitch(value); } + + float getHeadYaw() const { return _headData->getBaseYaw(); } + void setHeadYaw(float value) { _headData->setBaseYaw(value); } + + float getHeadRoll() const { return _headData->getBaseRoll(); } + void setHeadRoll(float value) { _headData->setBaseRoll(value); } // access to Head().set/getAverageLoudness float getAudioLoudness() const { return _headData->getAudioLoudness(); } From daeb2a898dfea1b046d969fad0e80134206452d9 Mon Sep 17 00:00:00 2001 From: wangyix Date: Wed, 30 Jul 2014 15:35:40 -0700 Subject: [PATCH 56/62] added staticDesiredJitterBufferFrames feature; AudioMixer crashing moved some callbacks from Application::updateAvatar() to Application::update() --- assignment-client/src/audio/AudioMixer.cpp | 65 +++++++++++-------- assignment-client/src/audio/AudioMixer.h | 2 + .../src/audio/AudioMixerClientData.cpp | 8 +-- .../src/audio/AvatarAudioStream.cpp | 4 +- .../src/audio/AvatarAudioStream.h | 2 +- .../resources/web/settings/describe.json | 32 +++++---- interface/src/Application.cpp | 38 ++++++----- interface/src/Audio.cpp | 4 -- interface/src/Audio.h | 5 +- interface/src/ui/PreferencesDialog.cpp | 5 +- libraries/audio/src/InboundAudioStream.cpp | 31 ++++----- libraries/audio/src/InboundAudioStream.h | 12 ++-- libraries/audio/src/InjectedAudioStream.cpp | 4 +- libraries/audio/src/InjectedAudioStream.h | 2 +- libraries/audio/src/PositionalAudioStream.cpp | 5 +- libraries/audio/src/PositionalAudioStream.h | 3 +- 16 files changed, 122 insertions(+), 100 deletions(-) diff --git a/assignment-client/src/audio/AudioMixer.cpp b/assignment-client/src/audio/AudioMixer.cpp index d81b8336f0..c1d75af8ba 100644 --- a/assignment-client/src/audio/AudioMixer.cpp +++ b/assignment-client/src/audio/AudioMixer.cpp @@ -68,6 +68,7 @@ void attachNewNodeDataToNode(Node *newNode) { } bool AudioMixer::_useDynamicJitterBuffers = false; +int AudioMixer::_staticDesiredJitterBufferFrames = 0; int AudioMixer::_maxFramesOverDesired = 0; AudioMixer::AudioMixer(const QByteArray& packet) : @@ -436,48 +437,58 @@ void AudioMixer::run() { if (settingsObject.contains(AUDIO_GROUP_KEY)) { QJsonObject audioGroupObject = settingsObject[AUDIO_GROUP_KEY].toObject(); - const QString UNATTENUATED_ZONE_KEY = "unattenuated-zone"; - - QString unattenuatedZoneString = audioGroupObject[UNATTENUATED_ZONE_KEY].toString(); - if (!unattenuatedZoneString.isEmpty()) { - QStringList zoneStringList = unattenuatedZoneString.split(','); - - glm::vec3 sourceCorner(zoneStringList[0].toFloat(), zoneStringList[1].toFloat(), zoneStringList[2].toFloat()); - glm::vec3 sourceDimensions(zoneStringList[3].toFloat(), zoneStringList[4].toFloat(), zoneStringList[5].toFloat()); - - glm::vec3 listenerCorner(zoneStringList[6].toFloat(), zoneStringList[7].toFloat(), zoneStringList[8].toFloat()); - glm::vec3 listenerDimensions(zoneStringList[9].toFloat(), zoneStringList[10].toFloat(), zoneStringList[11].toFloat()); - - _sourceUnattenuatedZone = new AABox(sourceCorner, sourceDimensions); - _listenerUnattenuatedZone = new AABox(listenerCorner, listenerDimensions); - - glm::vec3 sourceCenter = _sourceUnattenuatedZone->calcCenter(); - glm::vec3 destinationCenter = _listenerUnattenuatedZone->calcCenter(); - - qDebug() << "There is an unattenuated zone with source center at" - << QString("%1, %2, %3").arg(sourceCenter.x).arg(sourceCenter.y).arg(sourceCenter.z); - qDebug() << "Buffers inside this zone will not be attenuated inside a box with center at" - << QString("%1, %2, %3").arg(destinationCenter.x).arg(destinationCenter.y).arg(destinationCenter.z); - } - // check the payload to see if we have asked for dynamicJitterBuffer support - const QString DYNAMIC_JITTER_BUFFER_JSON_KEY = "dynamic-jitter-buffer"; + const QString DYNAMIC_JITTER_BUFFER_JSON_KEY = "A-dynamic-jitter-buffer"; bool shouldUseDynamicJitterBuffers = audioGroupObject[DYNAMIC_JITTER_BUFFER_JSON_KEY].toBool(); if (shouldUseDynamicJitterBuffers) { qDebug() << "Enable dynamic jitter buffers."; _useDynamicJitterBuffers = true; } else { - qDebug() << "Dynamic jitter buffers disabled, using old behavior."; + qDebug() << "Dynamic jitter buffers disabled."; _useDynamicJitterBuffers = false; } - const QString MAX_FRAMES_OVER_DESIRED_JSON_KEY = "max-frames-over-desired"; bool ok; + + const QString DESIRED_JITTER_BUFFER_FRAMES_KEY = "B-desired-jitter-buffer-frames"; + _staticDesiredJitterBufferFrames = audioGroupObject[DESIRED_JITTER_BUFFER_FRAMES_KEY].toString().toInt(&ok); + if (!ok) { + _staticDesiredJitterBufferFrames = DEFAULT_DESIRED_JITTER_BUFFER_FRAMES; + } + qDebug() << "Static desired jitter buffer frames:" << _staticDesiredJitterBufferFrames; + + const QString MAX_FRAMES_OVER_DESIRED_JSON_KEY = "C-max-frames-over-desired"; _maxFramesOverDesired = audioGroupObject[MAX_FRAMES_OVER_DESIRED_JSON_KEY].toString().toInt(&ok); if (!ok) { _maxFramesOverDesired = DEFAULT_MAX_FRAMES_OVER_DESIRED; } qDebug() << "Max frames over desired:" << _maxFramesOverDesired; + + + + const QString UNATTENUATED_ZONE_KEY = "D-unattenuated-zone"; + + QString unattenuatedZoneString = audioGroupObject[UNATTENUATED_ZONE_KEY].toString(); + if (!unattenuatedZoneString.isEmpty()) { + QStringList zoneStringList = unattenuatedZoneString.split(','); + + glm::vec3 sourceCorner(zoneStringList[0].toFloat(), zoneStringList[1].toFloat(), zoneStringList[2].toFloat()); + glm::vec3 sourceDimensions(zoneStringList[3].toFloat(), zoneStringList[4].toFloat(), zoneStringList[5].toFloat()); + + glm::vec3 listenerCorner(zoneStringList[6].toFloat(), zoneStringList[7].toFloat(), zoneStringList[8].toFloat()); + glm::vec3 listenerDimensions(zoneStringList[9].toFloat(), zoneStringList[10].toFloat(), zoneStringList[11].toFloat()); + + _sourceUnattenuatedZone = new AABox(sourceCorner, sourceDimensions); + _listenerUnattenuatedZone = new AABox(listenerCorner, listenerDimensions); + + glm::vec3 sourceCenter = _sourceUnattenuatedZone->calcCenter(); + glm::vec3 destinationCenter = _listenerUnattenuatedZone->calcCenter(); + + qDebug() << "There is an unattenuated zone with source center at" + << QString("%1, %2, %3").arg(sourceCenter.x).arg(sourceCenter.y).arg(sourceCenter.z); + qDebug() << "Buffers inside this zone will not be attenuated inside a box with center at" + << QString("%1, %2, %3").arg(destinationCenter.x).arg(destinationCenter.y).arg(destinationCenter.z); + } } int nextFrame = 0; diff --git a/assignment-client/src/audio/AudioMixer.h b/assignment-client/src/audio/AudioMixer.h index 9aca4d3cee..83769a4209 100644 --- a/assignment-client/src/audio/AudioMixer.h +++ b/assignment-client/src/audio/AudioMixer.h @@ -38,6 +38,7 @@ public slots: void sendStatsPacket(); static bool getUseDynamicJitterBuffers() { return _useDynamicJitterBuffers; } + static int getStaticDesiredJitterBufferFrames() { return _staticDesiredJitterBufferFrames; } static int getMaxFramesOverDesired() { return _maxFramesOverDesired; } private: @@ -62,6 +63,7 @@ private: AABox* _listenerUnattenuatedZone; static bool _useDynamicJitterBuffers; + static int _staticDesiredJitterBufferFrames; static int _maxFramesOverDesired; quint64 _lastSendAudioStreamStatsTime; diff --git a/assignment-client/src/audio/AudioMixerClientData.cpp b/assignment-client/src/audio/AudioMixerClientData.cpp index 79c5d6c3a8..fc6878eaa3 100644 --- a/assignment-client/src/audio/AudioMixerClientData.cpp +++ b/assignment-client/src/audio/AudioMixerClientData.cpp @@ -74,7 +74,8 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { bool isStereo = channelFlag == 1; _audioStreams.insert(nullUUID, - matchingStream = new AvatarAudioStream(isStereo, AudioMixer::getUseDynamicJitterBuffers(), AudioMixer::getMaxFramesOverDesired())); + matchingStream = new AvatarAudioStream(isStereo, AudioMixer::getUseDynamicJitterBuffers(), + AudioMixer::getStaticDesiredJitterBufferFrames(), AudioMixer::getMaxFramesOverDesired())); } else { matchingStream = _audioStreams.value(nullUUID); } @@ -87,9 +88,8 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { if (!_audioStreams.contains(streamIdentifier)) { _audioStreams.insert(streamIdentifier, - matchingStream = new InjectedAudioStream(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers(), AudioMixer::getMaxFramesOverDesired())); - } else { - matchingStream = _audioStreams.value(streamIdentifier); + matchingStream = new InjectedAudioStream(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers(), + AudioMixer::getStaticDesiredJitterBufferFrames(), AudioMixer::getMaxFramesOverDesired())); } } diff --git a/assignment-client/src/audio/AvatarAudioStream.cpp b/assignment-client/src/audio/AvatarAudioStream.cpp index 88a2276ddb..fcb78d7a6c 100644 --- a/assignment-client/src/audio/AvatarAudioStream.cpp +++ b/assignment-client/src/audio/AvatarAudioStream.cpp @@ -13,8 +13,8 @@ #include "AvatarAudioStream.h" -AvatarAudioStream::AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer, int maxFramesOverDesired) : - PositionalAudioStream(PositionalAudioStream::Microphone, isStereo, dynamicJitterBuffer, maxFramesOverDesired) +AvatarAudioStream::AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired) : + PositionalAudioStream(PositionalAudioStream::Microphone, isStereo, dynamicJitterBuffer, staticDesiredJitterBufferFrames, maxFramesOverDesired) { } diff --git a/assignment-client/src/audio/AvatarAudioStream.h b/assignment-client/src/audio/AvatarAudioStream.h index 545bee4e0a..ebad4585e0 100644 --- a/assignment-client/src/audio/AvatarAudioStream.h +++ b/assignment-client/src/audio/AvatarAudioStream.h @@ -18,7 +18,7 @@ class AvatarAudioStream : public PositionalAudioStream { public: - AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer, int maxFramesOverDesired); + AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired); private: // disallow copying of AvatarAudioStream objects diff --git a/domain-server/resources/web/settings/describe.json b/domain-server/resources/web/settings/describe.json index 3eff806c1a..f4920a7b50 100644 --- a/domain-server/resources/web/settings/describe.json +++ b/domain-server/resources/web/settings/describe.json @@ -3,23 +3,29 @@ "label": "Audio", "assignment-types": [0], "settings": { - "unattenuated-zone": { - "label": "Unattenuated Zone", - "help": "Boxes for source and listener (corner x, corner y, corner z, size x, size y, size z, corner x, corner y, corner z, size x, size y, size z)", - "placeholder": "no zone", - "default": "" - }, - "max-frames-over-desired": { - "label": "Max Frames Over Desired", - "help": "The highest number of frames an AudioMixer's ringbuffer can exceed the desired jitter buffer frames by", - "placeholder": "10", - "default": "" - }, - "dynamic-jitter-buffer": { + "A-dynamic-jitter-buffer": { "type": "checkbox", "label": "Dynamic Jitter Buffers", "help": "Dynamically buffer client audio based on perceived jitter in packet receipt timing", "default": false + }, + "B-desired-jitter-buffer-frames": { + "label": "Desired Jitter Buffer Frames", + "help": "If dynamic jitter buffers is disabled, this determines the target number of frames maintained by the AudioMixer's jitter buffers", + "placeholder": "1", + "default": "1" + }, + "C-max-frames-over-desired": { + "label": "Max Frames Over Desired", + "help": "The highest number of frames an AudioMixer's ringbuffer can exceed the desired jitter buffer frames by", + "placeholder": "10", + "default": "10" + }, + "D-unattenuated-zone": { + "label": "Unattenuated Zone", + "help": "Boxes for source and listener (corner x, corner y, corner z, size x, size y, size z, corner x, corner y, corner z, size x, size y, size z)", + "placeholder": "no zone", + "default": "" } } } diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index f4e67fea31..35be9c356d 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -1709,7 +1709,10 @@ void Application::init() { Menu::getInstance()->loadSettings(); if (Menu::getInstance()->getAudioJitterBufferFrames() != 0) { - _audio.overrideDesiredJitterBufferFramesTo(Menu::getInstance()->getAudioJitterBufferFrames()); + _audio.setDynamicJitterBuffers(false); + _audio.setStaticDesiredJitterBufferFrames(Menu::getInstance()->getAudioJitterBufferFrames()); + } else { + _audio.setDynamicJitterBuffers(true); } _audio.setMaxFramesOverDesired(Menu::getInstance()->getMaxFramesOverDesired()); @@ -2107,21 +2110,6 @@ void Application::update(float deltaTime) { // let external parties know we're updating emit simulating(deltaTime); } -} - -void Application::updateMyAvatar(float deltaTime) { - bool showWarnings = Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings); - PerformanceWarning warn(showWarnings, "Application::updateMyAvatar()"); - - _myAvatar->update(deltaTime); - - { - // send head/hand data to the avatar mixer and voxel server - PerformanceTimer perfTimer("send"); - QByteArray packet = byteArrayWithPopulatedHeader(PacketTypeAvatarData); - packet.append(_myAvatar->toByteArray()); - controlledBroadcastToNodes(packet, NodeSet() << NodeType::AvatarMixer); - } // Update _viewFrustum with latest camera and view frustum data... // NOTE: we get this from the view frustum, to make it simpler, since the @@ -2164,16 +2152,32 @@ void Application::updateMyAvatar(float deltaTime) { } } + // send packet containing downstream audio stats to the AudioMixer { quint64 sinceLastNack = now - _lastSendDownstreamAudioStats; if (sinceLastNack > TOO_LONG_SINCE_LAST_SEND_DOWNSTREAM_AUDIO_STATS) { _lastSendDownstreamAudioStats = now; - + QMetaObject::invokeMethod(&_audio, "sendDownstreamAudioStatsPacket", Qt::QueuedConnection); } } } +void Application::updateMyAvatar(float deltaTime) { + bool showWarnings = Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings); + PerformanceWarning warn(showWarnings, "Application::updateMyAvatar()"); + + _myAvatar->update(deltaTime); + + { + // send head/hand data to the avatar mixer and voxel server + PerformanceTimer perfTimer("send"); + QByteArray packet = byteArrayWithPopulatedHeader(PacketTypeAvatarData); + packet.append(_myAvatar->toByteArray()); + controlledBroadcastToNodes(packet, NodeSet() << NodeType::AvatarMixer); + } +} + int Application::sendNackPackets() { if (Menu::getInstance()->isOptionChecked(MenuOption::DisableNackPackets)) { diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index b4d34c2716..57c6dd7a5f 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -751,10 +751,6 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) { } } -AudioStreamStats Audio::getDownstreamAudioStreamStats() const { - return _receivedAudioStream.getAudioStreamStats(); -} - void Audio::sendDownstreamAudioStatsPacket() { // since this function is called every second, we'll sample some of our stats here diff --git a/interface/src/Audio.h b/interface/src/Audio.h index 99ca3a6b8b..3006446db1 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -57,8 +57,8 @@ public: virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen); virtual void startDrumSound(float volume, float frequency, float duration, float decay); - void overrideDesiredJitterBufferFramesTo(int desired) { _receivedAudioStream.overrideDesiredJitterBufferFramesTo(desired); } - void unoverrideDesiredJitterBufferFrames() { _receivedAudioStream.unoverrideDesiredJitterBufferFrames(); } + void setDynamicJitterBuffers(bool dynamicJitterBuffers) { _receivedAudioStream.setDynamicJitterBuffers(dynamicJitterBuffers); } + void setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames) { _receivedAudioStream.setStaticDesiredJitterBufferFrames(staticDesiredJitterBufferFrames); } void setMaxFramesOverDesired(int maxFramesOverDesired) { _receivedAudioStream.setMaxFramesOverDesired(maxFramesOverDesired); } @@ -113,7 +113,6 @@ public slots: virtual void handleAudioByteArray(const QByteArray& audioByteArray); - AudioStreamStats getDownstreamAudioStreamStats() const; void sendDownstreamAudioStatsPacket(); bool switchInputToAudioDevice(const QString& inputDeviceName); diff --git a/interface/src/ui/PreferencesDialog.cpp b/interface/src/ui/PreferencesDialog.cpp index 7d18ae4490..4ebd5f4c1a 100644 --- a/interface/src/ui/PreferencesDialog.cpp +++ b/interface/src/ui/PreferencesDialog.cpp @@ -243,9 +243,10 @@ void PreferencesDialog::savePreferences() { Menu::getInstance()->setAudioJitterBufferFrames(ui.audioJitterSpin->value()); if (Menu::getInstance()->getAudioJitterBufferFrames() != 0) { - Application::getInstance()->getAudio()->overrideDesiredJitterBufferFramesTo(Menu::getInstance()->getAudioJitterBufferFrames()); + Application::getInstance()->getAudio()->setDynamicJitterBuffers(false); + Application::getInstance()->getAudio()->setStaticDesiredJitterBufferFrames(Menu::getInstance()->getAudioJitterBufferFrames()); } else { - Application::getInstance()->getAudio()->unoverrideDesiredJitterBufferFrames(); + Application::getInstance()->getAudio()->setDynamicJitterBuffers(true); } Menu::getInstance()->setMaxFramesOverDesired(ui.maxFramesOverDesiredSpin->value()); diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index 39cc8757f4..467c88a2d4 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -13,16 +13,16 @@ #include "PacketHeaders.h" InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity, - bool dynamicJitterBuffers, int maxFramesOverDesired, bool useStDevForJitterCalc) : + bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc) : _ringBuffer(numFrameSamples, false, numFramesCapacity), _lastPopSucceeded(false), _lastPopOutput(), _dynamicJitterBuffers(dynamicJitterBuffers), - _dynamicJitterBuffersOverride(false), + _staticDesiredJitterBufferFrames(staticDesiredJitterBufferFrames), _useStDevForJitterCalc(useStDevForJitterCalc), _calculatedJitterBufferFramesUsingMaxGap(0), _calculatedJitterBufferFramesUsingStDev(0), - _desiredJitterBufferFrames(1), + _desiredJitterBufferFrames(dynamicJitterBuffers ? 1 : staticDesiredJitterBufferFrames), _maxFramesOverDesired(maxFramesOverDesired), _isStarved(true), _hasStarted(false), @@ -49,7 +49,9 @@ void InboundAudioStream::reset() { } void InboundAudioStream::resetStats() { - _desiredJitterBufferFrames = 1; + if (_dynamicJitterBuffers) { + _desiredJitterBufferFrames = 1; + } _consecutiveNotMixedCount = 0; _starveCount = 0; _silentFramesDropped = 0; @@ -178,16 +180,15 @@ void InboundAudioStream::starved() { _starveCount++; } -void InboundAudioStream::overrideDesiredJitterBufferFramesTo(int desired) { - _dynamicJitterBuffersOverride = true; - _desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(desired); -} - -void InboundAudioStream::unoverrideDesiredJitterBufferFrames() { - _dynamicJitterBuffersOverride = false; - if (!_dynamicJitterBuffers) { - _desiredJitterBufferFrames = 1; +void InboundAudioStream::setDynamicJitterBuffers(bool dynamicJitterBuffers) { + if (!dynamicJitterBuffers) { + _desiredJitterBufferFrames = _staticDesiredJitterBufferFrames; + } else { + if (!_dynamicJitterBuffers) { + _desiredJitterBufferFrames = 1; + } } + _dynamicJitterBuffers = dynamicJitterBuffers; } int InboundAudioStream::clampDesiredJitterBufferFramesValue(int desired) const { @@ -216,7 +217,7 @@ SequenceNumberStats::ArrivalInfo InboundAudioStream::frameReceivedUpdateNetworkS _calculatedJitterBufferFramesUsingMaxGap = ceilf((float)_interframeTimeGapStatsForJitterCalc.getWindowMax() / USECS_PER_FRAME); _interframeTimeGapStatsForJitterCalc.clearNewStatsAvailableFlag(); - if (_dynamicJitterBuffers && !_dynamicJitterBuffersOverride && !_useStDevForJitterCalc) { + if (_dynamicJitterBuffers && !_useStDevForJitterCalc) { _desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(_calculatedJitterBufferFramesUsingMaxGap); } } @@ -229,7 +230,7 @@ SequenceNumberStats::ArrivalInfo InboundAudioStream::frameReceivedUpdateNetworkS _calculatedJitterBufferFramesUsingStDev = (int)ceilf(NUM_STANDARD_DEVIATIONS * _stdev.getStDev() / USECS_PER_FRAME); _stdev.reset(); - if (_dynamicJitterBuffers && !_dynamicJitterBuffersOverride && _useStDevForJitterCalc) { + if (_dynamicJitterBuffers && _useStDevForJitterCalc) { _desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(_calculatedJitterBufferFramesUsingStDev); } } diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index 268b9b5e76..ac24bed2f5 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -48,13 +48,13 @@ const int INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS = 30; const int INBOUND_RING_BUFFER_FRAME_CAPACITY = 100; const int DEFAULT_MAX_FRAMES_OVER_DESIRED = 10; - +const int DEFAULT_DESIRED_JITTER_BUFFER_FRAMES = 1; class InboundAudioStream : public NodeData { Q_OBJECT public: InboundAudioStream(int numFrameSamples, int numFramesCapacity, - bool dynamicJitterBuffers, int maxFramesOverDesired, + bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc = false); void reset(); @@ -72,9 +72,9 @@ public: void setToStarved(); - /// turns off dyanmic jitter buffers and sets the desired jitter buffer frames to specified value - void overrideDesiredJitterBufferFramesTo(int desired); - void unoverrideDesiredJitterBufferFrames(); + + void setDynamicJitterBuffers(bool dynamicJitterBuffers); + void setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames) { _staticDesiredJitterBufferFrames = staticDesiredJitterBufferFrames; } /// this function should be called once per second to ensure the seq num stats history spans ~30 seconds AudioStreamStats updateSeqHistoryAndGetAudioStreamStats(); @@ -140,7 +140,7 @@ protected: AudioRingBuffer::ConstIterator _lastPopOutput; bool _dynamicJitterBuffers; // if false, _desiredJitterBufferFrames is locked at 1 (old behavior) - bool _dynamicJitterBuffersOverride; // used for locking the _desiredJitterBufferFrames to some number while running + int _staticDesiredJitterBufferFrames; // if jitter buffer is dynamic, this determines what method of calculating _desiredJitterBufferFrames // if true, Philip's timegap std dev calculation is used. Otherwise, Freddy's max timegap calculation is used diff --git a/libraries/audio/src/InjectedAudioStream.cpp b/libraries/audio/src/InjectedAudioStream.cpp index c50b609be0..37190abc73 100644 --- a/libraries/audio/src/InjectedAudioStream.cpp +++ b/libraries/audio/src/InjectedAudioStream.cpp @@ -19,8 +19,8 @@ #include "InjectedAudioStream.h" -InjectedAudioStream::InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer, int maxFramesOverDesired) : - PositionalAudioStream(PositionalAudioStream::Injector, false, dynamicJitterBuffer, maxFramesOverDesired), +InjectedAudioStream::InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired) : + PositionalAudioStream(PositionalAudioStream::Injector, false, dynamicJitterBuffer, staticDesiredJitterBufferFrames, maxFramesOverDesired), _streamIdentifier(streamIdentifier), _radius(0.0f), _attenuationRatio(0) diff --git a/libraries/audio/src/InjectedAudioStream.h b/libraries/audio/src/InjectedAudioStream.h index d856c4cbd4..3cbfad9276 100644 --- a/libraries/audio/src/InjectedAudioStream.h +++ b/libraries/audio/src/InjectedAudioStream.h @@ -18,7 +18,7 @@ class InjectedAudioStream : public PositionalAudioStream { public: - InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer, int maxFramesOverDesired); + InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired); float getRadius() const { return _radius; } float getAttenuationRatio() const { return _attenuationRatio; } diff --git a/libraries/audio/src/PositionalAudioStream.cpp b/libraries/audio/src/PositionalAudioStream.cpp index cc6a9add90..7b407ba62c 100644 --- a/libraries/audio/src/PositionalAudioStream.cpp +++ b/libraries/audio/src/PositionalAudioStream.cpp @@ -21,9 +21,10 @@ #include #include -PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers, int maxFramesOverDesired) : +PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers, + int staticDesiredJitterBufferFrames, int maxFramesOverDesired) : InboundAudioStream(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL, - AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, dynamicJitterBuffers, maxFramesOverDesired), + AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, dynamicJitterBuffers, staticDesiredJitterBufferFrames, maxFramesOverDesired), _type(type), _position(0.0f, 0.0f, 0.0f), _orientation(0.0f, 0.0f, 0.0f, 0.0f), diff --git a/libraries/audio/src/PositionalAudioStream.h b/libraries/audio/src/PositionalAudioStream.h index 5df9972311..f99dc3a464 100644 --- a/libraries/audio/src/PositionalAudioStream.h +++ b/libraries/audio/src/PositionalAudioStream.h @@ -27,7 +27,8 @@ public: Injector }; - PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers, int maxFramesOverDesired); + PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, + int maxFramesOverDesired); virtual AudioStreamStats getAudioStreamStats() const; From 24be668d36720b4101e956c7d1d8f31c6fed1a3f Mon Sep 17 00:00:00 2001 From: wangyix Date: Wed, 30 Jul 2014 15:47:49 -0700 Subject: [PATCH 57/62] added check before pushAudioToOutput to prevent starvecount buildup --- interface/src/Audio.cpp | 4 +++- libraries/audio/src/InboundAudioStream.h | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 57c6dd7a5f..ddabb71e63 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -709,7 +709,9 @@ void Audio::handleAudioInput() { delete[] inputAudioSamples; } - pushAudioToOutput(); + if (_receivedAudioStream.getPacketReceived() > 0) { + pushAudioToOutput(); + } } void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) { diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index ac24bed2f5..ecf36bc4c1 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -108,6 +108,8 @@ public: int getSilentFramesDropped() const { return _silentFramesDropped; } int getOverflowCount() const { return _ringBuffer.getOverflowCount(); } + int getPacketReceived() const { return _incomingSequenceNumberStats.getNumReceived(); } + private: void starved(); From 8bb5c2b84c1b04070adf280d95ae5273ae8cc9b3 Mon Sep 17 00:00:00 2001 From: barnold1953 Date: Wed, 30 Jul 2014 16:59:15 -0700 Subject: [PATCH 58/62] Small improvements to script, renamed to bot_procedural --- .../{proceduralBot.js => bot_procedural.js.} | 39 ++++++------------- 1 file changed, 11 insertions(+), 28 deletions(-) rename examples/{proceduralBot.js => bot_procedural.js.} (96%) diff --git a/examples/proceduralBot.js b/examples/bot_procedural.js. similarity index 96% rename from examples/proceduralBot.js rename to examples/bot_procedural.js. index 9b10b28243..17e54007cc 100644 --- a/examples/proceduralBot.js +++ b/examples/bot_procedural.js. @@ -27,8 +27,8 @@ function printVector(string, vector) { print(string + " " + vector.x + ", " + vector.y + ", " + vector.z); } -var CHANCE_OF_MOVING = 0.000; -var CHANCE_OF_SOUND = 0;//0.005; +var CHANCE_OF_MOVING = 0.005; +var CHANCE_OF_SOUND = 0.005; var CHANCE_OF_HEAD_TURNING = 0.01; var CHANCE_OF_BIG_MOVE = 1.0; @@ -40,11 +40,11 @@ var X_MIN = 0.50; var X_MAX = 15.60; var Z_MIN = 0.50; var Z_MAX = 15.10; -var Y_PELVIS = 1.0; +var Y_FEET = 0.0; +var AVATAR_PELVIS_HEIGHT = 0.84; +var Y_PELVIS = Y_FEET + AVATAR_PELVIS_HEIGHT; var MAX_PELVIS_DELTA = 2.5; -var AVATAR_PELVIS_HEIGHT = 0.75; - var MOVE_RANGE_SMALL = 3.0; var MOVE_RANGE_BIG = 10.0; var TURN_RANGE = 70.0; @@ -71,32 +71,15 @@ function clamp(val, min, max){ return Math.max(min, Math.min(max, val)) } -// pick an integer between 1 and 100 that is not 28 for the face model for this bot -botNumber = 28; +//Array of all valid bot numbers +var validBotNumbers = []; -while (botNumber == 28) { - botNumber = getRandomInt(1, 100); -} +// right now we only use bot 63, since many other bots have messed up skeletons and LOD issues +var botNumber = 63;//getRandomInt(0, 99); -if (botNumber <= 20) { - newFaceFilePrefix = "ron"; - newBodyFilePrefix = "defaultAvatar_body" -} else { - if (botNumber <= 40) { - newFaceFilePrefix = "superhero"; - } else if (botNumber <= 60) { - newFaceFilePrefix = "amber"; - } else if (botNumber <= 80) { - newFaceFilePrefix = "ron"; - } else { - newFaceFilePrefix = "angie"; - } +var newFaceFilePrefix = "ron"; - newBodyFilePrefix = "bot" + botNumber; -} - -// newFaceFilePrefix = "ron"; -// newBodyFilePrefix = "bot" + 63; +var newBodyFilePrefix = "bot" + botNumber; // set the face model fst using the bot number // there is no need to change the body model - we're using the default From 7281eca5a4cbf7a392382e016a12dd6282563c18 Mon Sep 17 00:00:00 2001 From: wangyix Date: Wed, 30 Jul 2014 17:20:05 -0700 Subject: [PATCH 59/62] fixed inj crash; added statcDesired to MixedAudioStream --- assignment-client/src/Agent.cpp | 2 +- assignment-client/src/audio/AudioMixerClientData.cpp | 2 ++ interface/src/Audio.cpp | 2 +- libraries/audio/src/InboundAudioStream.cpp | 7 +++++++ libraries/audio/src/InboundAudioStream.h | 2 +- libraries/audio/src/MixedAudioStream.cpp | 4 ++-- libraries/audio/src/MixedAudioStream.h | 2 +- 7 files changed, 15 insertions(+), 6 deletions(-) diff --git a/assignment-client/src/Agent.cpp b/assignment-client/src/Agent.cpp index 90009636f6..096b482398 100644 --- a/assignment-client/src/Agent.cpp +++ b/assignment-client/src/Agent.cpp @@ -36,7 +36,7 @@ Agent::Agent(const QByteArray& packet) : _voxelEditSender(), _particleEditSender(), _modelEditSender(), - _receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 1, false, 0), + _receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 1, false, 1, 0), _avatarHashMap() { // be the parent of the script engine so it gets moved when we do diff --git a/assignment-client/src/audio/AudioMixerClientData.cpp b/assignment-client/src/audio/AudioMixerClientData.cpp index fc6878eaa3..54c723243c 100644 --- a/assignment-client/src/audio/AudioMixerClientData.cpp +++ b/assignment-client/src/audio/AudioMixerClientData.cpp @@ -90,6 +90,8 @@ int AudioMixerClientData::parseData(const QByteArray& packet) { _audioStreams.insert(streamIdentifier, matchingStream = new InjectedAudioStream(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers(), AudioMixer::getStaticDesiredJitterBufferFrames(), AudioMixer::getMaxFramesOverDesired())); + } else { + matchingStream = _audioStreams.value(streamIdentifier); } } diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index ddabb71e63..4ed1f7aeb3 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -75,7 +75,7 @@ Audio::Audio(QObject* parent) : // slower than real time (or at least the desired sample rate). If you increase the size of the ring buffer, then it // this delay will slowly add up and the longer someone runs, they more delayed their audio will be. _inputRingBuffer(0), - _receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 100, true, 0, true), + _receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 100, true, 0, 0, true), _isStereoInput(false), _averagedLatency(0.0), _lastInputLoudness(0), diff --git a/libraries/audio/src/InboundAudioStream.cpp b/libraries/audio/src/InboundAudioStream.cpp index 467c88a2d4..6ade4b17e9 100644 --- a/libraries/audio/src/InboundAudioStream.cpp +++ b/libraries/audio/src/InboundAudioStream.cpp @@ -191,6 +191,13 @@ void InboundAudioStream::setDynamicJitterBuffers(bool dynamicJitterBuffers) { _dynamicJitterBuffers = dynamicJitterBuffers; } +void InboundAudioStream::setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames) { + _staticDesiredJitterBufferFrames = staticDesiredJitterBufferFrames; + if (!_dynamicJitterBuffers) { + _desiredJitterBufferFrames = _staticDesiredJitterBufferFrames; + } +} + int InboundAudioStream::clampDesiredJitterBufferFramesValue(int desired) const { const int MIN_FRAMES_DESIRED = 0; const int MAX_FRAMES_DESIRED = _ringBuffer.getFrameCapacity(); diff --git a/libraries/audio/src/InboundAudioStream.h b/libraries/audio/src/InboundAudioStream.h index ecf36bc4c1..06bd329fee 100644 --- a/libraries/audio/src/InboundAudioStream.h +++ b/libraries/audio/src/InboundAudioStream.h @@ -74,7 +74,7 @@ public: void setDynamicJitterBuffers(bool dynamicJitterBuffers); - void setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames) { _staticDesiredJitterBufferFrames = staticDesiredJitterBufferFrames; } + void setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames); /// this function should be called once per second to ensure the seq num stats history spans ~30 seconds AudioStreamStats updateSeqHistoryAndGetAudioStreamStats(); diff --git a/libraries/audio/src/MixedAudioStream.cpp b/libraries/audio/src/MixedAudioStream.cpp index 4a388d2b14..b9e2abfe0b 100644 --- a/libraries/audio/src/MixedAudioStream.cpp +++ b/libraries/audio/src/MixedAudioStream.cpp @@ -1,8 +1,8 @@ #include "MixedAudioStream.h" -MixedAudioStream::MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int maxFramesOverDesired, bool useStDevForJitterCalc) - : InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, maxFramesOverDesired, useStDevForJitterCalc) +MixedAudioStream::MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc) + : InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, staticDesiredJitterBufferFrames, maxFramesOverDesired, useStDevForJitterCalc) { } diff --git a/libraries/audio/src/MixedAudioStream.h b/libraries/audio/src/MixedAudioStream.h index 30b3061548..5b79519ac5 100644 --- a/libraries/audio/src/MixedAudioStream.h +++ b/libraries/audio/src/MixedAudioStream.h @@ -17,7 +17,7 @@ class MixedAudioStream : public InboundAudioStream { public: - MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int maxFramesOverDesired, bool useStDevForJitterCalc = false); + MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc); float getNextOutputFrameLoudness() const { return _ringBuffer.getNextOutputFrameLoudness(); } From 8d6896a1da4843ace8524ca94659f6758cf5b86a Mon Sep 17 00:00:00 2001 From: wangyix Date: Wed, 30 Jul 2014 17:26:58 -0700 Subject: [PATCH 60/62] forgot an arg in Agent.cpp --- assignment-client/src/Agent.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/assignment-client/src/Agent.cpp b/assignment-client/src/Agent.cpp index 096b482398..1946375fae 100644 --- a/assignment-client/src/Agent.cpp +++ b/assignment-client/src/Agent.cpp @@ -36,7 +36,7 @@ Agent::Agent(const QByteArray& packet) : _voxelEditSender(), _particleEditSender(), _modelEditSender(), - _receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 1, false, 1, 0), + _receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 1, false, 1, 0, false), _avatarHashMap() { // be the parent of the script engine so it gets moved when we do From b6c0b4fd218543a7356a6d4c49ba6aa2a2b81477 Mon Sep 17 00:00:00 2001 From: barnold1953 Date: Wed, 30 Jul 2014 17:39:24 -0700 Subject: [PATCH 61/62] Random mood in procedural bot --- examples/bot_procedural.js. | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/examples/bot_procedural.js. b/examples/bot_procedural.js. index 17e54007cc..00c2829924 100644 --- a/examples/bot_procedural.js. +++ b/examples/bot_procedural.js. @@ -1,5 +1,5 @@ // -// proceduralBot.js +// bot_procedural.js // hifi // // Created by Ben Arnold on 7/29/2013 @@ -154,7 +154,18 @@ function addBlendshapeToPose(pose, shapeIndex, val) { pose.blendShapes[index] = {shapeIndex: shapeIndex, val: val }; } //The mood of the avatar, determines face. 0 = happy, 1 = angry, 2 = sad. -var avatarMood = 0; + +//Randomly pick avatar mood. 80% happy, 10% mad 10% sad +var randMood = Math.floor(Math.random() * 11); +var avatarMood; +if (randMood == 0) { + avatarMood = 1; +} else if (randMood == 2) { + avatarMood = 2; +} else { + avatarMood = 0; +} + var currentExpression = -1; //Face pose constructor var happyPoses = []; From a65f81c71aa26cfeb0f49a6721cf47d0707d09ed Mon Sep 17 00:00:00 2001 From: barnold1953 Date: Wed, 30 Jul 2014 18:25:58 -0700 Subject: [PATCH 62/62] Random start position for bot --- examples/bot_procedural.js. | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/examples/bot_procedural.js. b/examples/bot_procedural.js. index 00c2829924..265b887e0a 100644 --- a/examples/bot_procedural.js. +++ b/examples/bot_procedural.js. @@ -55,8 +55,7 @@ var HEAD_TURN_RATE = 0.05; var PITCH_RANGE = 15.0; var YAW_RANGE = 35.0; -//var firstPosition = { x: getRandomFloat(X_MIN, X_MAX), y: Y_PELVIS, z: getRandomFloat(Z_MIN, Z_MAX) }; -var firstPosition = { x: 0.5, y: Y_PELVIS, z: 0.5 }; +var firstPosition = { x: getRandomFloat(X_MIN, X_MAX), y: Y_PELVIS, z: getRandomFloat(Z_MIN, Z_MAX) }; var targetPosition = { x: 0, y: 0, z: 0 }; var targetOrientation = { x: 0, y: 0, z: 0, w: 0 }; var currentOrientation = { x: 0, y: 0, z: 0, w: 0 };