renamed ringbuffer variables to audiostream

This commit is contained in:
wangyix 2014-07-28 13:08:53 -07:00
parent 32dbc6cbdb
commit 6fc5c74c0b
19 changed files with 237 additions and 225 deletions

View file

@ -36,7 +36,7 @@ Agent::Agent(const QByteArray& packet) :
_voxelEditSender(), _voxelEditSender(),
_particleEditSender(), _particleEditSender(),
_modelEditSender(), _modelEditSender(),
_receivedAudioBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 1, false), _receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 1, false),
_avatarHashMap() _avatarHashMap()
{ {
// be the parent of the script engine so it gets moved when we do // be the parent of the script engine so it gets moved when we do
@ -148,11 +148,11 @@ void Agent::readPendingDatagrams() {
} else if (datagramPacketType == PacketTypeMixedAudio) { } else if (datagramPacketType == PacketTypeMixedAudio) {
_receivedAudioBuffer.parseData(receivedPacket); _receivedAudioStream.parseData(receivedPacket);
_lastReceivedAudioLoudness = _receivedAudioBuffer.getNextOutputFrameLoudness(); _lastReceivedAudioLoudness = _receivedAudioStream.getNextOutputFrameLoudness();
_receivedAudioBuffer.clearBuffer(); _receivedAudioStream.clearBuffer();
// let this continue through to the NodeList so it updates last heard timestamp // let this continue through to the NodeList so it updates last heard timestamp
// for the sending audio mixer // for the sending audio mixer

View file

@ -30,7 +30,7 @@
#include <VoxelEditPacketSender.h> #include <VoxelEditPacketSender.h>
#include <VoxelTreeHeadlessViewer.h> #include <VoxelTreeHeadlessViewer.h>
#include "InboundMixedAudioStream.h" #include "MixedAudioStream.h"
class Agent : public ThreadedAssignment { class Agent : public ThreadedAssignment {
@ -71,7 +71,7 @@ private:
VoxelTreeHeadlessViewer _voxelViewer; VoxelTreeHeadlessViewer _voxelViewer;
ModelTreeHeadlessViewer _modelViewer; ModelTreeHeadlessViewer _modelViewer;
InboundMixedAudioStream _receivedAudioBuffer; MixedAudioStream _receivedAudioStream;
float _lastReceivedAudioLoudness; float _lastReceivedAudioLoudness;
AvatarHashMap _avatarHashMap; AvatarHashMap _avatarHashMap;

View file

@ -61,7 +61,7 @@ const float LOUDNESS_TO_DISTANCE_RATIO = 0.00001f;
const QString AUDIO_MIXER_LOGGING_TARGET_NAME = "audio-mixer"; const QString AUDIO_MIXER_LOGGING_TARGET_NAME = "audio-mixer";
void attachNewBufferToNode(Node *newNode) { void attachNewNodeDataToNode(Node *newNode) {
if (!newNode->getLinkedData()) { if (!newNode->getLinkedData()) {
newNode->setLinkedData(new AudioMixerClientData()); newNode->setLinkedData(new AudioMixerClientData());
} }
@ -93,19 +93,19 @@ const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f;
const float ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE = 0.18f; const float ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE = 0.18f;
const float ATTENUATION_EPSILON_DISTANCE = 0.1f; const float ATTENUATION_EPSILON_DISTANCE = 0.1f;
void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream* bufferToAdd, void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
AvatarAudioStream* listeningNodeBuffer) { AvatarAudioStream* listeningNodeStream) {
float bearingRelativeAngleToSource = 0.0f; float bearingRelativeAngleToSource = 0.0f;
float attenuationCoefficient = 1.0f; float attenuationCoefficient = 1.0f;
int numSamplesDelay = 0; int numSamplesDelay = 0;
float weakChannelAmplitudeRatio = 1.0f; float weakChannelAmplitudeRatio = 1.0f;
bool shouldAttenuate = (bufferToAdd != listeningNodeBuffer); bool shouldAttenuate = (streamToAdd != listeningNodeStream);
if (shouldAttenuate) { if (shouldAttenuate) {
// if the two buffer pointers do not match then these are different buffers // if the two stream pointers do not match then these are different streams
glm::vec3 relativePosition = bufferToAdd->getPosition() - listeningNodeBuffer->getPosition(); glm::vec3 relativePosition = streamToAdd->getPosition() - listeningNodeStream->getPosition();
float distanceBetween = glm::length(relativePosition); float distanceBetween = glm::length(relativePosition);
@ -113,7 +113,7 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream*
distanceBetween = EPSILON; distanceBetween = EPSILON;
} }
if (bufferToAdd->getNextOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) { if (streamToAdd->getNextOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) {
// according to mixer performance we have decided this does not get to be mixed in // according to mixer performance we have decided this does not get to be mixed in
// bail out // bail out
return; return;
@ -121,24 +121,24 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream*
++_sumMixes; ++_sumMixes;
if (bufferToAdd->getListenerUnattenuatedZone()) { if (streamToAdd->getListenerUnattenuatedZone()) {
shouldAttenuate = !bufferToAdd->getListenerUnattenuatedZone()->contains(listeningNodeBuffer->getPosition()); shouldAttenuate = !streamToAdd->getListenerUnattenuatedZone()->contains(listeningNodeStream->getPosition());
} }
if (bufferToAdd->getType() == PositionalAudioStream::Injector) { if (streamToAdd->getType() == PositionalAudioStream::Injector) {
attenuationCoefficient *= reinterpret_cast<InjectedAudioStream*>(bufferToAdd)->getAttenuationRatio(); attenuationCoefficient *= reinterpret_cast<InjectedAudioStream*>(streamToAdd)->getAttenuationRatio();
} }
shouldAttenuate = shouldAttenuate && distanceBetween > ATTENUATION_EPSILON_DISTANCE; shouldAttenuate = shouldAttenuate && distanceBetween > ATTENUATION_EPSILON_DISTANCE;
if (shouldAttenuate) { if (shouldAttenuate) {
glm::quat inverseOrientation = glm::inverse(listeningNodeBuffer->getOrientation()); glm::quat inverseOrientation = glm::inverse(listeningNodeStream->getOrientation());
float distanceSquareToSource = glm::dot(relativePosition, relativePosition); float distanceSquareToSource = glm::dot(relativePosition, relativePosition);
float radius = 0.0f; float radius = 0.0f;
if (bufferToAdd->getType() == PositionalAudioStream::Injector) { if (streamToAdd->getType() == PositionalAudioStream::Injector) {
radius = reinterpret_cast<InjectedAudioStream*>(bufferToAdd)->getRadius(); radius = reinterpret_cast<InjectedAudioStream*>(streamToAdd)->getRadius();
} }
if (radius == 0 || (distanceSquareToSource > radius * radius)) { if (radius == 0 || (distanceSquareToSource > radius * radius)) {
@ -154,7 +154,7 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream*
} else { } else {
// calculate the angle delivery for off-axis attenuation // calculate the angle delivery for off-axis attenuation
glm::vec3 rotatedListenerPosition = glm::inverse(bufferToAdd->getOrientation()) * relativePosition; glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd->getOrientation()) * relativePosition;
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f), float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
glm::normalize(rotatedListenerPosition)); glm::normalize(rotatedListenerPosition));
@ -203,16 +203,16 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream*
} }
} }
AudioRingBuffer::ConstIterator bufferPopOutput = bufferToAdd->getLastPopOutput(); AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd->getLastPopOutput();
if (!bufferToAdd->isStereo() && shouldAttenuate) { if (!streamToAdd->isStereo() && shouldAttenuate) {
// this is a mono buffer, which means it gets full attenuation and spatialization // this is a mono stream, which means it gets full attenuation and spatialization
// if the bearing relative angle to source is > 0 then the delayed channel is the right one // if the bearing relative angle to source is > 0 then the delayed channel is the right one
int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0; int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0;
int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0; int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0;
int16_t correctBufferSample[2], delayBufferSample[2]; int16_t correctStreamSample[2], delayStreamSample[2];
int delayedChannelIndex = 0; int delayedChannelIndex = 0;
const int SINGLE_STEREO_OFFSET = 2; const int SINGLE_STEREO_OFFSET = 2;
@ -220,52 +220,51 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream*
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) { for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
// setup the int16_t variables for the two sample sets // setup the int16_t variables for the two sample sets
correctBufferSample[0] = bufferPopOutput[s / 2] * attenuationCoefficient; correctStreamSample[0] = streamPopOutput[s / 2] * attenuationCoefficient;
correctBufferSample[1] = bufferPopOutput[(s / 2) + 1] * attenuationCoefficient; correctStreamSample[1] = streamPopOutput[(s / 2) + 1] * attenuationCoefficient;
delayedChannelIndex = s + (numSamplesDelay * 2) + delayedChannelOffset; delayedChannelIndex = s + (numSamplesDelay * 2) + delayedChannelOffset;
delayBufferSample[0] = correctBufferSample[0] * weakChannelAmplitudeRatio; delayStreamSample[0] = correctStreamSample[0] * weakChannelAmplitudeRatio;
delayBufferSample[1] = correctBufferSample[1] * weakChannelAmplitudeRatio; delayStreamSample[1] = correctStreamSample[1] * weakChannelAmplitudeRatio;
_clientSamples[s + goodChannelOffset] += correctBufferSample[0]; _clientSamples[s + goodChannelOffset] += correctStreamSample[0];
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET] += correctBufferSample[1]; _clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET] += correctStreamSample[1];
_clientSamples[delayedChannelIndex] += delayBufferSample[0]; _clientSamples[delayedChannelIndex] += delayStreamSample[0];
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET] += delayBufferSample[1]; _clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET] += delayStreamSample[1];
} }
if (numSamplesDelay > 0) { if (numSamplesDelay > 0) {
// if there was a sample delay for this buffer, we need to pull samples prior to the nextOutput // if there was a sample delay for this stream, we need to pull samples prior to the popped output
// to stick at the beginning // to stick at the beginning
float attenuationAndWeakChannelRatio = attenuationCoefficient * weakChannelAmplitudeRatio; float attenuationAndWeakChannelRatio = attenuationCoefficient * weakChannelAmplitudeRatio;
AudioRingBuffer::ConstIterator delayBufferPopOutput = bufferPopOutput - numSamplesDelay; AudioRingBuffer::ConstIterator delayStreamPopOutput = streamPopOutput - numSamplesDelay;
// TODO: delayBufferPopOutput may be inside the last frame written if the ringbuffer is completely full // TODO: delayStreamPopOutput may be inside the last frame written if the ringbuffer is completely full
// maybe make AudioRingBuffer have 1 extra frame in its buffer // maybe make AudioRingBuffer have 1 extra frame in its buffer
for (int i = 0; i < numSamplesDelay; i++) { for (int i = 0; i < numSamplesDelay; i++) {
int parentIndex = i * 2; int parentIndex = i * 2;
_clientSamples[parentIndex + delayedChannelOffset] += *delayBufferPopOutput * attenuationAndWeakChannelRatio; _clientSamples[parentIndex + delayedChannelOffset] += *delayStreamPopOutput * attenuationAndWeakChannelRatio;
++delayBufferPopOutput; ++delayStreamPopOutput;
} }
} }
} else { } else {
int stereoDivider = streamToAdd->isStereo() ? 1 : 2;
int stereoDivider = bufferToAdd->isStereo() ? 1 : 2;
if (!shouldAttenuate) { if (!shouldAttenuate) {
attenuationCoefficient = 1.0f; attenuationCoefficient = 1.0f;
} }
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s++) { for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s++) {
_clientSamples[s] = glm::clamp(_clientSamples[s] + (int)(bufferPopOutput[s / stereoDivider] * attenuationCoefficient), _clientSamples[s] = glm::clamp(_clientSamples[s] + (int)(streamPopOutput[s / stereoDivider] * attenuationCoefficient),
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE); MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
} }
} }
} }
void AudioMixer::prepareMixForListeningNode(Node* node) { void AudioMixer::prepareMixForListeningNode(Node* node) {
AvatarAudioStream* nodeRingBuffer = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer(); AvatarAudioStream* nodeAudioStream = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioStream();
// zero out the client mix for this node // zero out the client mix for this node
memset(_clientSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_STEREO); memset(_clientSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
@ -278,16 +277,16 @@ void AudioMixer::prepareMixForListeningNode(Node* node) {
// enumerate the ARBs attached to the otherNode and add all that should be added to mix // enumerate the ARBs attached to the otherNode and add all that should be added to mix
const QHash<QUuid, PositionalAudioStream*>& otherNodeRingBuffers = otherNodeClientData->getRingBuffers(); const QHash<QUuid, PositionalAudioStream*>& otherNodeAudioStreams = otherNodeClientData->getAudioStreams();
QHash<QUuid, PositionalAudioStream*>::ConstIterator i; QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = otherNodeRingBuffers.begin(); i != otherNodeRingBuffers.constEnd(); i++) { for (i = otherNodeAudioStreams.begin(); i != otherNodeAudioStreams.constEnd(); i++) {
PositionalAudioStream* otherNodeBuffer = i.value(); PositionalAudioStream* otherNodeStream = i.value();
if ((*otherNode != *node || otherNodeBuffer->shouldLoopbackForNode()) if ((*otherNode != *node || otherNodeStream->shouldLoopbackForNode())
&& otherNodeBuffer->lastPopSucceeded() && otherNodeStream->lastPopSucceeded()
&& otherNodeBuffer->getNextOutputTrailingLoudness() > 0.0f) { && otherNodeStream->getNextOutputTrailingLoudness() > 0.0f) {
addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer); addStreamToMixForListeningNodeWithStream(otherNodeStream, nodeAudioStream);
} }
} }
} }
@ -392,7 +391,7 @@ void AudioMixer::run() {
nodeList->addNodeTypeToInterestSet(NodeType::Agent); nodeList->addNodeTypeToInterestSet(NodeType::Agent);
nodeList->linkedDataCreateCallback = attachNewBufferToNode; nodeList->linkedDataCreateCallback = attachNewNodeDataToNode;
// setup a NetworkAccessManager to ask the domain-server for our settings // setup a NetworkAccessManager to ask the domain-server for our settings
NetworkAccessManager& networkManager = NetworkAccessManager::getInstance(); NetworkAccessManager& networkManager = NetworkAccessManager::getInstance();
@ -554,7 +553,7 @@ void AudioMixer::run() {
nodeData->audioStreamsPopFrameForMixing(); nodeData->audioStreamsPopFrameForMixing();
if (node->getType() == NodeType::Agent if (node->getType() == NodeType::Agent
&& ((AudioMixerClientData*)node->getLinkedData())->getAvatarAudioRingBuffer()) { && ((AudioMixerClientData*)node->getLinkedData())->getAvatarAudioStream()) {
prepareMixForListeningNode(node.data()); prepareMixForListeningNode(node.data());

View file

@ -40,9 +40,9 @@ public slots:
static bool getUseDynamicJitterBuffers() { return _useDynamicJitterBuffers; } static bool getUseDynamicJitterBuffers() { return _useDynamicJitterBuffers; }
private: private:
/// adds one buffer to the mix for a listening node /// adds one stream to the mix for a listening node
void addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream* bufferToAdd, void addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
AvatarAudioStream* listeningNodeBuffer); AvatarAudioStream* listeningNodeStream);
/// prepares and sends a mix to one Node /// prepares and sends a mix to one Node
void prepareMixForListeningNode(Node* node); void prepareMixForListeningNode(Node* node);

View file

@ -21,22 +21,22 @@
AudioMixerClientData::AudioMixerClientData() : AudioMixerClientData::AudioMixerClientData() :
_ringBuffers(), _audioStreams(),
_outgoingMixedAudioSequenceNumber(0) _outgoingMixedAudioSequenceNumber(0)
{ {
} }
AudioMixerClientData::~AudioMixerClientData() { AudioMixerClientData::~AudioMixerClientData() {
QHash<QUuid, PositionalAudioStream*>::ConstIterator i; QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) { for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
// delete this attached InboundAudioStream // delete this attached InboundAudioStream
delete i.value(); delete i.value();
} }
} }
AvatarAudioStream* AudioMixerClientData::getAvatarAudioRingBuffer() const { AvatarAudioStream* AudioMixerClientData::getAvatarAudioStream() const {
if (_ringBuffers.contains(QUuid())) { if (_audioStreams.contains(QUuid())) {
return (AvatarAudioStream*)_ringBuffers.value(QUuid()); return (AvatarAudioStream*)_audioStreams.value(QUuid());
} }
// no mic stream found - return NULL // no mic stream found - return NULL
return NULL; return NULL;
@ -65,7 +65,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|| packetType == PacketTypeSilentAudioFrame) { || packetType == PacketTypeSilentAudioFrame) {
QUuid nullUUID = QUuid(); QUuid nullUUID = QUuid();
if (!_ringBuffers.contains(nullUUID)) { if (!_audioStreams.contains(nullUUID)) {
// we don't have a mic stream yet, so add it // we don't have a mic stream yet, so add it
// read the channel flag to see if our stream is stereo or not // read the channel flag to see if our stream is stereo or not
@ -73,10 +73,10 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
quint8 channelFlag = *(reinterpret_cast<const quint8*>(channelFlagAt)); quint8 channelFlag = *(reinterpret_cast<const quint8*>(channelFlagAt));
bool isStereo = channelFlag == 1; bool isStereo = channelFlag == 1;
_ringBuffers.insert(nullUUID, _audioStreams.insert(nullUUID,
matchingStream = new AvatarAudioStream(isStereo, AudioMixer::getUseDynamicJitterBuffers())); matchingStream = new AvatarAudioStream(isStereo, AudioMixer::getUseDynamicJitterBuffers()));
} else { } else {
matchingStream = _ringBuffers.value(nullUUID); matchingStream = _audioStreams.value(nullUUID);
} }
} else if (packetType == PacketTypeInjectAudio) { } else if (packetType == PacketTypeInjectAudio) {
// this is injected audio // this is injected audio
@ -85,11 +85,11 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
int bytesBeforeStreamIdentifier = numBytesForPacketHeader(packet) + sizeof(quint16); int bytesBeforeStreamIdentifier = numBytesForPacketHeader(packet) + sizeof(quint16);
QUuid streamIdentifier = QUuid::fromRfc4122(packet.mid(bytesBeforeStreamIdentifier, NUM_BYTES_RFC4122_UUID)); QUuid streamIdentifier = QUuid::fromRfc4122(packet.mid(bytesBeforeStreamIdentifier, NUM_BYTES_RFC4122_UUID));
if (!_ringBuffers.contains(streamIdentifier)) { if (!_audioStreams.contains(streamIdentifier)) {
_ringBuffers.insert(streamIdentifier, _audioStreams.insert(streamIdentifier,
matchingStream = new InjectedAudioStream(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers())); matchingStream = new InjectedAudioStream(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers()));
} else { } else {
matchingStream = _ringBuffers.value(streamIdentifier); matchingStream = _audioStreams.value(streamIdentifier);
} }
} }
@ -100,7 +100,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
void AudioMixerClientData::audioStreamsPopFrameForMixing() { void AudioMixerClientData::audioStreamsPopFrameForMixing() {
QHash<QUuid, PositionalAudioStream*>::ConstIterator i; QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) { for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
i.value()->popFrames(1); i.value()->popFrames(1);
} }
} }
@ -109,11 +109,11 @@ void AudioMixerClientData::removeDeadInjectedStreams() {
const int INJECTOR_CONSECUTIVE_NOT_MIXED_AFTER_STARTED_THRESHOLD = 100; const int INJECTOR_CONSECUTIVE_NOT_MIXED_AFTER_STARTED_THRESHOLD = 100;
// we have this second threshold in case the injected audio is so short that the ringbuffer // we have this second threshold in case the injected audio is so short that the injected stream
// never even reaches its desired size, which means it will never start. // never even reaches its desired size, which means it will never start.
const int INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD = 1000; const int INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD = 1000;
QHash<QUuid, PositionalAudioStream*>::Iterator i = _ringBuffers.begin(), end = _ringBuffers.end(); QHash<QUuid, PositionalAudioStream*>::Iterator i = _audioStreams.begin(), end = _audioStreams.end();
while (i != end) { while (i != end) {
PositionalAudioStream* audioStream = i.value(); PositionalAudioStream* audioStream = i.value();
if (audioStream->getType() == PositionalAudioStream::Injector && audioStream->isStarved()) { if (audioStream->getType() == PositionalAudioStream::Injector && audioStream->isStarved()) {
@ -121,7 +121,7 @@ void AudioMixerClientData::removeDeadInjectedStreams() {
: INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD; : INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD;
if (audioStream->getConsecutiveNotMixedCount() >= notMixedThreshold) { if (audioStream->getConsecutiveNotMixedCount() >= notMixedThreshold) {
delete audioStream; delete audioStream;
i = _ringBuffers.erase(i); i = _audioStreams.erase(i);
continue; continue;
} }
} }
@ -150,9 +150,9 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
// calculate how many stream stat structs we can fit in each packet // calculate how many stream stat structs we can fit in each packet
const int numStreamStatsRoomFor = (MAX_PACKET_SIZE - numBytesPacketHeader - sizeof(quint8) - sizeof(quint16)) / sizeof(AudioStreamStats); const int numStreamStatsRoomFor = (MAX_PACKET_SIZE - numBytesPacketHeader - sizeof(quint8) - sizeof(quint16)) / sizeof(AudioStreamStats);
// pack and send stream stats packets until all ring buffers' stats are sent // pack and send stream stats packets until all audio streams' stats are sent
int numStreamStatsRemaining = _ringBuffers.size(); int numStreamStatsRemaining = _audioStreams.size();
QHash<QUuid, PositionalAudioStream*>::ConstIterator ringBuffersIterator = _ringBuffers.constBegin(); QHash<QUuid, PositionalAudioStream*>::ConstIterator audioStreamsIterator = _audioStreams.constBegin();
while (numStreamStatsRemaining > 0) { while (numStreamStatsRemaining > 0) {
char* dataAt = headerEndAt; char* dataAt = headerEndAt;
@ -169,11 +169,11 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
// pack the calculated number of stream stats // pack the calculated number of stream stats
for (int i = 0; i < numStreamStatsToPack; i++) { for (int i = 0; i < numStreamStatsToPack; i++) {
AudioStreamStats streamStats = ringBuffersIterator.value()->updateSeqHistoryAndGetAudioStreamStats(); AudioStreamStats streamStats = audioStreamsIterator.value()->updateSeqHistoryAndGetAudioStreamStats();
memcpy(dataAt, &streamStats, sizeof(AudioStreamStats)); memcpy(dataAt, &streamStats, sizeof(AudioStreamStats));
dataAt += sizeof(AudioStreamStats); dataAt += sizeof(AudioStreamStats);
ringBuffersIterator++; audioStreamsIterator++;
} }
numStreamStatsRemaining -= numStreamStatsToPack; numStreamStatsRemaining -= numStreamStatsToPack;
@ -185,12 +185,12 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
QString AudioMixerClientData::getAudioStreamStatsString() const { QString AudioMixerClientData::getAudioStreamStatsString() const {
QString result; QString result;
AudioStreamStats streamStats = _downstreamAudioStreamStats; AudioStreamStats streamStats = _downstreamAudioStreamStats;
result += "DOWNSTREAM.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames) result += "DOWNSTREAM.desired:" + QString::number(streamStats._desiredJitterBufferFrames)
+ " available_avg_10s:" + QString::number(streamStats._ringBufferFramesAvailableAverage) + " available_avg_10s:" + QString::number(streamStats._framesAvailableAverage)
+ " available:" + QString::number(streamStats._ringBufferFramesAvailable) + " available:" + QString::number(streamStats._framesAvailable)
+ " starves:" + QString::number(streamStats._ringBufferStarveCount) + " starves:" + QString::number(streamStats._starveCount)
+ " not_mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount) + " not_mixed:" + QString::number(streamStats._consecutiveNotMixedCount)
+ " overflows:" + QString::number(streamStats._ringBufferOverflowCount) + " overflows:" + QString::number(streamStats._overflowCount)
+ " silents_dropped: ?" + " silents_dropped: ?"
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2) + " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2) + " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
@ -201,17 +201,17 @@ QString AudioMixerClientData::getAudioStreamStatsString() const {
+ " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax) + " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax)
+ " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage); + " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage);
AvatarAudioStream* avatarRingBuffer = getAvatarAudioRingBuffer(); AvatarAudioStream* avatarAudioStream = getAvatarAudioStream();
if (avatarRingBuffer) { if (avatarAudioStream) {
AudioStreamStats streamStats = avatarRingBuffer->getAudioStreamStats(); AudioStreamStats streamStats = avatarAudioStream->getAudioStreamStats();
result += " UPSTREAM.mic.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames) result += " UPSTREAM.mic.desired:" + QString::number(streamStats._desiredJitterBufferFrames)
+ " desired_calc:" + QString::number(avatarRingBuffer->getCalculatedJitterBufferFrames()) + " desired_calc:" + QString::number(avatarAudioStream->getCalculatedJitterBufferFrames())
+ " available_avg_10s:" + QString::number(streamStats._ringBufferFramesAvailableAverage) + " available_avg_10s:" + QString::number(streamStats._framesAvailableAverage)
+ " available:" + QString::number(streamStats._ringBufferFramesAvailable) + " available:" + QString::number(streamStats._framesAvailable)
+ " starves:" + QString::number(streamStats._ringBufferStarveCount) + " starves:" + QString::number(streamStats._starveCount)
+ " not_mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount) + " not_mixed:" + QString::number(streamStats._consecutiveNotMixedCount)
+ " overflows:" + QString::number(streamStats._ringBufferOverflowCount) + " overflows:" + QString::number(streamStats._overflowCount)
+ " silents_dropped:" + QString::number(streamStats._ringBufferSilentFramesDropped) + " silents_dropped:" + QString::number(streamStats._silentFramesDropped)
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2) + " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2) + " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
+ " min_gap:" + formatUsecTime(streamStats._timeGapMin) + " min_gap:" + formatUsecTime(streamStats._timeGapMin)
@ -225,17 +225,17 @@ QString AudioMixerClientData::getAudioStreamStatsString() const {
} }
QHash<QUuid, PositionalAudioStream*>::ConstIterator i; QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) { for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
if (i.value()->getType() == PositionalAudioStream::Injector) { if (i.value()->getType() == PositionalAudioStream::Injector) {
AudioStreamStats streamStats = i.value()->getAudioStreamStats(); AudioStreamStats streamStats = i.value()->getAudioStreamStats();
result += " UPSTREAM.inj.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames) result += " UPSTREAM.inj.desired:" + QString::number(streamStats._desiredJitterBufferFrames)
+ " desired_calc:" + QString::number(i.value()->getCalculatedJitterBufferFrames()) + " desired_calc:" + QString::number(i.value()->getCalculatedJitterBufferFrames())
+ " available_avg_10s:" + QString::number(streamStats._ringBufferFramesAvailableAverage) + " available_avg_10s:" + QString::number(streamStats._framesAvailableAverage)
+ " available:" + QString::number(streamStats._ringBufferFramesAvailable) + " available:" + QString::number(streamStats._framesAvailable)
+ " starves:" + QString::number(streamStats._ringBufferStarveCount) + " starves:" + QString::number(streamStats._starveCount)
+ " not_mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount) + " not_mixed:" + QString::number(streamStats._consecutiveNotMixedCount)
+ " overflows:" + QString::number(streamStats._ringBufferOverflowCount) + " overflows:" + QString::number(streamStats._overflowCount)
+ " silents_dropped:" + QString::number(streamStats._ringBufferSilentFramesDropped) + " silents_dropped:" + QString::number(streamStats._silentFramesDropped)
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2) + " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2) + " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
+ " min_gap:" + formatUsecTime(streamStats._timeGapMin) + " min_gap:" + formatUsecTime(streamStats._timeGapMin)

View file

@ -22,8 +22,8 @@ public:
AudioMixerClientData(); AudioMixerClientData();
~AudioMixerClientData(); ~AudioMixerClientData();
const QHash<QUuid, PositionalAudioStream*>& getRingBuffers() const { return _ringBuffers; } const QHash<QUuid, PositionalAudioStream*>& getAudioStreams() const { return _audioStreams; }
AvatarAudioStream* getAvatarAudioRingBuffer() const; AvatarAudioStream* getAvatarAudioStream() const;
int parseData(const QByteArray& packet); int parseData(const QByteArray& packet);
@ -39,7 +39,7 @@ public:
quint16 getOutgoingSequenceNumber() const { return _outgoingMixedAudioSequenceNumber; } quint16 getOutgoingSequenceNumber() const { return _outgoingMixedAudioSequenceNumber; }
private: private:
QHash<QUuid, PositionalAudioStream*> _ringBuffers; // mic stream stored under key of null UUID QHash<QUuid, PositionalAudioStream*> _audioStreams; // mic stream stored under key of null UUID
quint16 _outgoingMixedAudioSequenceNumber; quint16 _outgoingMixedAudioSequenceNumber;

View file

@ -9,8 +9,8 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
// //
#ifndef hifi_AvatarAudioRingBuffer_h #ifndef hifi_AvatarAudioStream_h
#define hifi_AvatarAudioRingBuffer_h #define hifi_AvatarAudioStream_h
#include <QtCore/QUuid> #include <QtCore/QUuid>
@ -29,4 +29,4 @@ private:
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples); int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
}; };
#endif // hifi_AvatarAudioRingBuffer_h #endif // hifi_AvatarAudioStream_h

View file

@ -103,10 +103,6 @@ const int IDLE_SIMULATE_MSECS = 16; // How often should call simul
// in the idle loop? (60 FPS is default) // in the idle loop? (60 FPS is default)
static QTimer* idleTimer = NULL; static QTimer* idleTimer = NULL;
const int STARTUP_JITTER_SAMPLES = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / 2;
// Startup optimistically with small jitter buffer that
// will start playback on the second received audio packet.
const QString CHECK_VERSION_URL = "https://highfidelity.io/latestVersion.xml"; const QString CHECK_VERSION_URL = "https://highfidelity.io/latestVersion.xml";
const QString SKIP_FILENAME = QStandardPaths::writableLocation(QStandardPaths::DataLocation) + "/hifi.skipversion"; const QString SKIP_FILENAME = QStandardPaths::writableLocation(QStandardPaths::DataLocation) + "/hifi.skipversion";
@ -162,7 +158,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
_touchAvgY(0.0f), _touchAvgY(0.0f),
_isTouchPressed(false), _isTouchPressed(false),
_mousePressed(false), _mousePressed(false),
_audio(STARTUP_JITTER_SAMPLES), _audio(),
_enableProcessVoxelsThread(true), _enableProcessVoxelsThread(true),
_octreeProcessor(), _octreeProcessor(),
_voxelHideShowThread(&_voxels), _voxelHideShowThread(&_voxels),
@ -1712,8 +1708,8 @@ void Application::init() {
_lastTimeUpdated.start(); _lastTimeUpdated.start();
Menu::getInstance()->loadSettings(); Menu::getInstance()->loadSettings();
if (Menu::getInstance()->getAudioJitterBufferSamples() != 0) { if (Menu::getInstance()->getAudioJitterBufferFrames() != 0) {
_audio.setJitterBufferSamples(Menu::getInstance()->getAudioJitterBufferSamples()); _audio.overrideDesiredJitterBufferFramesTo(Menu::getInstance()->getAudioJitterBufferFrames());
} }
qDebug("Loaded settings"); qDebug("Loaded settings");

View file

@ -54,7 +54,7 @@ static const int FRAMES_AVAILABLE_STATS_WINDOW_SECONDS = 10;
static const int MUTE_ICON_SIZE = 24; static const int MUTE_ICON_SIZE = 24;
Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) : Audio::Audio(QObject* parent) :
AbstractAudioInterface(parent), AbstractAudioInterface(parent),
_audioInput(NULL), _audioInput(NULL),
_desiredInputFormat(), _desiredInputFormat(),
@ -76,14 +76,12 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
// this delay will slowly add up and the longer someone runs, they more delayed their audio will be. // this delay will slowly add up and the longer someone runs, they more delayed their audio will be.
_inputRingBuffer(0), _inputRingBuffer(0),
#ifdef _WIN32 #ifdef _WIN32
_ringBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 100, true, true), _receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 100, true, true),
#else #else
_ringBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 10, true, true), // DO NOT CHANGE THIS UNLESS YOU SOLVE THE AUDIO DEVICE DRIFT PROBLEM!!! _receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 10, true, true), // DO NOT CHANGE THIS UNLESS YOU SOLVE THE AUDIO DEVICE DRIFT PROBLEM!!!
#endif #endif
_isStereoInput(false), _isStereoInput(false),
_averagedLatency(0.0), _averagedLatency(0.0),
_measuredJitter(0),
_jitterBufferSamples(initialJitterBufferSamples),
_lastInputLoudness(0), _lastInputLoudness(0),
_timeSinceLastClip(-1.0), _timeSinceLastClip(-1.0),
_dcOffset(0), _dcOffset(0),
@ -132,13 +130,13 @@ void Audio::init(QGLWidget *parent) {
} }
void Audio::reset() { void Audio::reset() {
_ringBuffer.reset(); _receivedAudioStream.reset();
resetStats(); resetStats();
} }
void Audio::resetStats() { void Audio::resetStats() {
_ringBuffer.resetStats(); _receivedAudioStream.resetStats();
_audioMixerAvatarStreamAudioStats = AudioStreamStats(); _audioMixerAvatarStreamAudioStats = AudioStreamStats();
_audioMixerInjectedStreamAudioStatsMap.clear(); _audioMixerInjectedStreamAudioStatsMap.clear();
@ -715,7 +713,7 @@ void Audio::handleAudioInput() {
} }
} }
void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) { void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) {
if (_audioOutput) { if (_audioOutput) {
// Audio output must exist and be correctly set up if we're going to process received audio // Audio output must exist and be correctly set up if we're going to process received audio
processReceivedAudio(audioByteArray); processReceivedAudio(audioByteArray);
@ -755,7 +753,7 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
} }
AudioStreamStats Audio::getDownstreamAudioStreamStats() const { AudioStreamStats Audio::getDownstreamAudioStreamStats() const {
return _ringBuffer.getAudioStreamStats(); return _receivedAudioStream.getAudioStreamStats();
} }
void Audio::sendDownstreamAudioStatsPacket() { void Audio::sendDownstreamAudioStatsPacket() {
@ -783,7 +781,7 @@ void Audio::sendDownstreamAudioStatsPacket() {
dataAt += sizeof(quint16); dataAt += sizeof(quint16);
// pack downstream audio stream stats // pack downstream audio stream stats
AudioStreamStats stats = _ringBuffer.updateSeqHistoryAndGetAudioStreamStats(); AudioStreamStats stats = _receivedAudioStream.updateSeqHistoryAndGetAudioStreamStats();
memcpy(dataAt, &stats, sizeof(AudioStreamStats)); memcpy(dataAt, &stats, sizeof(AudioStreamStats));
dataAt += sizeof(AudioStreamStats); dataAt += sizeof(AudioStreamStats);
@ -894,7 +892,7 @@ void Audio::toggleStereoInput() {
void Audio::processReceivedAudio(const QByteArray& audioByteArray) { void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
// parse audio data // parse audio data
_ringBuffer.parseData(audioByteArray); _receivedAudioStream.parseData(audioByteArray);
pushAudioToOutput(); pushAudioToOutput();
} }
@ -904,7 +902,7 @@ void Audio::pushAudioToOutput() {
if (_audioOutput->bytesFree() == _audioOutput->bufferSize()) { if (_audioOutput->bytesFree() == _audioOutput->bufferSize()) {
// the audio output has no samples to play. set the downstream audio to starved so that it // the audio output has no samples to play. set the downstream audio to starved so that it
// refills to its desired size before pushing frames // refills to its desired size before pushing frames
_ringBuffer.setToStarved(); _receivedAudioStream.setToStarved();
} }
float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float)_outputFormat.sampleRate()) float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float)_outputFormat.sampleRate())
@ -912,16 +910,16 @@ void Audio::pushAudioToOutput() {
int numFramesToPush; int numFramesToPush;
if (Menu::getInstance()->isOptionChecked(MenuOption::DisableQAudioOutputOverflowCheck)) { if (Menu::getInstance()->isOptionChecked(MenuOption::DisableQAudioOutputOverflowCheck)) {
numFramesToPush = _ringBuffer.getFramesAvailable(); numFramesToPush = _receivedAudioStream.getFramesAvailable();
} else { } else {
// make sure to push a whole number of frames to the audio output // make sure to push a whole number of frames to the audio output
int numFramesAudioOutputRoomFor = (int)(_audioOutput->bytesFree() / sizeof(int16_t) * networkOutputToOutputRatio) / _ringBuffer.getNumFrameSamples(); int numFramesAudioOutputRoomFor = (int)(_audioOutput->bytesFree() / sizeof(int16_t) * networkOutputToOutputRatio) / _receivedAudioStream.getNumFrameSamples();
numFramesToPush = std::min(_ringBuffer.getFramesAvailable(), numFramesAudioOutputRoomFor); numFramesToPush = std::min(_receivedAudioStream.getFramesAvailable(), numFramesAudioOutputRoomFor);
} }
// if there is data in the ring buffer and room in the audio output, decide what to do // if there is data in the received stream and room in the audio output, decide what to do
if (numFramesToPush > 0 && _ringBuffer.popFrames(numFramesToPush, false)) { if (numFramesToPush > 0 && _receivedAudioStream.popFrames(numFramesToPush, false)) {
int numNetworkOutputSamples = numFramesToPush * NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; int numNetworkOutputSamples = numFramesToPush * NETWORK_BUFFER_LENGTH_SAMPLES_STEREO;
int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio; int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio;
@ -929,15 +927,15 @@ void Audio::pushAudioToOutput() {
QByteArray outputBuffer; QByteArray outputBuffer;
outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t)); outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t));
AudioRingBuffer::ConstIterator ringBufferPopOutput = _ringBuffer.getLastPopOutput(); AudioRingBuffer::ConstIterator receivedAudioStreamPopOutput = _receivedAudioStream.getLastPopOutput();
int16_t* ringBufferSamples = new int16_t[numNetworkOutputSamples]; int16_t* receivedSamples = new int16_t[numNetworkOutputSamples];
if (_processSpatialAudio) { if (_processSpatialAudio) {
unsigned int sampleTime = _spatialAudioStart; unsigned int sampleTime = _spatialAudioStart;
QByteArray buffer; QByteArray buffer;
buffer.resize(numNetworkOutputSamples * sizeof(int16_t)); buffer.resize(numNetworkOutputSamples * sizeof(int16_t));
ringBufferPopOutput.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples); receivedAudioStreamPopOutput.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples);
// Accumulate direct transmission of audio from sender to receiver // Accumulate direct transmission of audio from sender to receiver
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) { if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) {
@ -950,18 +948,18 @@ void Audio::pushAudioToOutput() {
// copy the samples we'll resample from the spatial audio ring buffer - this also // copy the samples we'll resample from the spatial audio ring buffer - this also
// pushes the read pointer of the spatial audio ring buffer forwards // pushes the read pointer of the spatial audio ring buffer forwards
_spatialAudioRingBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples); _spatialAudioRingBuffer.readSamples(receivedSamples, numNetworkOutputSamples);
// Advance the start point for the next packet of audio to arrive // Advance the start point for the next packet of audio to arrive
_spatialAudioStart += numNetworkOutputSamples / _desiredOutputFormat.channelCount(); _spatialAudioStart += numNetworkOutputSamples / _desiredOutputFormat.channelCount();
} else { } else {
// copy the samples we'll resample from the ring buffer - this also // copy the samples we'll resample from the ring buffer - this also
// pushes the read pointer of the ring buffer forwards // pushes the read pointer of the ring buffer forwards
ringBufferPopOutput.readSamples(ringBufferSamples, numNetworkOutputSamples); receivedAudioStreamPopOutput.readSamples(receivedSamples, numNetworkOutputSamples);
} }
// copy the packet from the RB to the output // copy the packet from the RB to the output
linearResampling(ringBufferSamples, linearResampling(receivedSamples,
(int16_t*)outputBuffer.data(), (int16_t*)outputBuffer.data(),
numNetworkOutputSamples, numNetworkOutputSamples,
numDeviceOutputSamples, numDeviceOutputSamples,
@ -973,7 +971,7 @@ void Audio::pushAudioToOutput() {
if (_scopeEnabled && !_scopeEnabledPause) { if (_scopeEnabled && !_scopeEnabledPause) {
unsigned int numAudioChannels = _desiredOutputFormat.channelCount(); unsigned int numAudioChannels = _desiredOutputFormat.channelCount();
int16_t* samples = ringBufferSamples; int16_t* samples = receivedSamples;
for (int numSamples = numNetworkOutputSamples / numAudioChannels; numSamples > 0; numSamples -= NETWORK_SAMPLES_PER_FRAME) { for (int numSamples = numNetworkOutputSamples / numAudioChannels; numSamples > 0; numSamples -= NETWORK_SAMPLES_PER_FRAME) {
unsigned int audioChannel = 0; unsigned int audioChannel = 0;
@ -994,7 +992,7 @@ void Audio::pushAudioToOutput() {
} }
} }
delete[] ringBufferSamples; delete[] receivedSamples;
} }
} }
@ -1332,14 +1330,14 @@ void Audio::renderStats(const float* color, int width, int height) {
float audioInputBufferLatency = 0.0f, inputRingBufferLatency = 0.0f, networkRoundtripLatency = 0.0f, mixerRingBufferLatency = 0.0f, outputRingBufferLatency = 0.0f, audioOutputBufferLatency = 0.0f; float audioInputBufferLatency = 0.0f, inputRingBufferLatency = 0.0f, networkRoundtripLatency = 0.0f, mixerRingBufferLatency = 0.0f, outputRingBufferLatency = 0.0f, audioOutputBufferLatency = 0.0f;
AudioStreamStats downstreamAudioStreamStats = _ringBuffer.getAudioStreamStats(); AudioStreamStats downstreamAudioStreamStats = _receivedAudioStream.getAudioStreamStats();
SharedNodePointer audioMixerNodePointer = NodeList::getInstance()->soloNodeOfType(NodeType::AudioMixer); SharedNodePointer audioMixerNodePointer = NodeList::getInstance()->soloNodeOfType(NodeType::AudioMixer);
if (!audioMixerNodePointer.isNull()) { if (!audioMixerNodePointer.isNull()) {
audioInputBufferLatency = _audioInputMsecsReadStats.getWindowAverage(); audioInputBufferLatency = _audioInputMsecsReadStats.getWindowAverage();
inputRingBufferLatency = getInputRingBufferAverageMsecsAvailable(); inputRingBufferLatency = getInputRingBufferAverageMsecsAvailable();
networkRoundtripLatency = audioMixerNodePointer->getPingMs(); networkRoundtripLatency = audioMixerNodePointer->getPingMs();
mixerRingBufferLatency = _audioMixerAvatarStreamAudioStats._ringBufferFramesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS; mixerRingBufferLatency = _audioMixerAvatarStreamAudioStats._framesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS;
outputRingBufferLatency = downstreamAudioStreamStats._ringBufferFramesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS; outputRingBufferLatency = downstreamAudioStreamStats._framesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS;
audioOutputBufferLatency = _audioOutputMsecsUnplayedStats.getWindowAverage(); audioOutputBufferLatency = _audioOutputMsecsUnplayedStats.getWindowAverage();
} }
float totalLatency = audioInputBufferLatency + inputRingBufferLatency + networkRoundtripLatency + mixerRingBufferLatency + outputRingBufferLatency + audioOutputBufferLatency; float totalLatency = audioInputBufferLatency + inputRingBufferLatency + networkRoundtripLatency + mixerRingBufferLatency + outputRingBufferLatency + audioOutputBufferLatency;
@ -1427,26 +1425,26 @@ void Audio::renderAudioStreamStats(const AudioStreamStats& streamStats, int hori
const float BUFFER_SEND_INTERVAL_MSECS = BUFFER_SEND_INTERVAL_USECS / (float)USECS_PER_MSEC; const float BUFFER_SEND_INTERVAL_MSECS = BUFFER_SEND_INTERVAL_USECS / (float)USECS_PER_MSEC;
sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u+%d, available: %u+%d", sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u+%d, available: %u+%d",
streamStats._ringBufferDesiredJitterBufferFrames, streamStats._desiredJitterBufferFrames,
streamStats._ringBufferFramesAvailableAverage, streamStats._framesAvailableAverage,
(int)(getAudioOutputAverageMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS), (int)(getAudioOutputAverageMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS),
streamStats._ringBufferFramesAvailable, streamStats._framesAvailable,
(int)(getAudioOutputMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS)); (int)(getAudioOutputMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS));
} else { } else {
sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u, available: %u", sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u, available: %u",
streamStats._ringBufferDesiredJitterBufferFrames, streamStats._desiredJitterBufferFrames,
streamStats._ringBufferFramesAvailableAverage, streamStats._framesAvailableAverage,
streamStats._ringBufferFramesAvailable); streamStats._framesAvailable);
} }
verticalOffset += STATS_HEIGHT_PER_LINE; verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color); drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
sprintf(stringBuffer, " Ringbuffer stats | starves: %u, prev_starve_lasted: %u, frames_dropped: %u, overflows: %u", sprintf(stringBuffer, " Ringbuffer stats | starves: %u, prev_starve_lasted: %u, frames_dropped: %u, overflows: %u",
streamStats._ringBufferStarveCount, streamStats._starveCount,
streamStats._ringBufferConsecutiveNotMixedCount, streamStats._consecutiveNotMixedCount,
streamStats._ringBufferSilentFramesDropped, streamStats._silentFramesDropped,
streamStats._ringBufferOverflowCount); streamStats._overflowCount);
verticalOffset += STATS_HEIGHT_PER_LINE; verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color); drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
@ -1662,8 +1660,8 @@ bool Audio::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo)
// setup our general output device for audio-mixer audio // setup our general output device for audio-mixer audio
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this); _audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
_audioOutput->setBufferSize(_ringBuffer.getFrameCapacity() * _outputFormat.bytesForDuration(BUFFER_SEND_INTERVAL_USECS)); _audioOutput->setBufferSize(_receivedAudioStream.getFrameCapacity() * _outputFormat.bytesForDuration(BUFFER_SEND_INTERVAL_USECS));
qDebug() << "Ring Buffer capacity in frames: " << _ringBuffer.getFrameCapacity(); qDebug() << "Ring Buffer capacity in frames: " << _receivedAudioStream.getFrameCapacity();
_outputDevice = _audioOutput->start(); _outputDevice = _audioOutput->start();
// setup a loopback audio output device // setup a loopback audio output device

View file

@ -33,7 +33,7 @@
#include <AbstractAudioInterface.h> #include <AbstractAudioInterface.h>
#include <StdDev.h> #include <StdDev.h>
#include "InboundMixedAudioStream.h" #include "MixedAudioStream.h"
static const int NUM_AUDIO_CHANNELS = 2; static const int NUM_AUDIO_CHANNELS = 2;
@ -46,19 +46,19 @@ class Audio : public AbstractAudioInterface {
Q_OBJECT Q_OBJECT
public: public:
// setup for audio I/O // setup for audio I/O
Audio(int16_t initialJitterBufferSamples, QObject* parent = 0); Audio(QObject* parent = 0);
float getLastInputLoudness() const { return glm::max(_lastInputLoudness - _noiseGateMeasuredFloor, 0.f); } float getLastInputLoudness() const { return glm::max(_lastInputLoudness - _noiseGateMeasuredFloor, 0.f); }
float getTimeSinceLastClip() const { return _timeSinceLastClip; } float getTimeSinceLastClip() const { return _timeSinceLastClip; }
float getAudioAverageInputLoudness() const { return _lastInputLoudness; } float getAudioAverageInputLoudness() const { return _lastInputLoudness; }
void setNoiseGateEnabled(bool noiseGateEnabled) { _noiseGateEnabled = noiseGateEnabled; } void setNoiseGateEnabled(bool noiseGateEnabled) { _noiseGateEnabled = noiseGateEnabled; }
void setJitterBufferSamples(int samples) { _jitterBufferSamples = samples; }
int getJitterBufferSamples() { return _jitterBufferSamples; }
virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen); virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen);
virtual void startDrumSound(float volume, float frequency, float duration, float decay); virtual void startDrumSound(float volume, float frequency, float duration, float decay);
void overrideDesiredJitterBufferFramesTo(int desired) { _receivedAudioStream.overrideDesiredJitterBufferFramesTo(desired); }
int getDesiredJitterBufferFrames() const { return _receivedAudioStream.getDesiredJitterBufferFrames(); }
float getCollisionSoundMagnitude() { return _collisionSoundMagnitude; } float getCollisionSoundMagnitude() { return _collisionSoundMagnitude; }
@ -87,7 +87,7 @@ public:
public slots: public slots:
void start(); void start();
void stop(); void stop();
void addReceivedAudioToBuffer(const QByteArray& audioByteArray); void addReceivedAudioToStream(const QByteArray& audioByteArray);
void parseAudioStreamStatsPacket(const QByteArray& packet); void parseAudioStreamStatsPacket(const QByteArray& packet);
void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples); void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples);
void handleAudioInput(); void handleAudioInput();
@ -120,8 +120,6 @@ public slots:
float getInputVolume() const { return (_audioInput) ? _audioInput->volume() : 0.0f; } float getInputVolume() const { return (_audioInput) ? _audioInput->volume() : 0.0f; }
void setInputVolume(float volume) { if (_audioInput) _audioInput->setVolume(volume); } void setInputVolume(float volume) { if (_audioInput) _audioInput->setVolume(volume); }
int getDesiredJitterBufferFrames() const { return _jitterBufferSamples / _ringBuffer.getNumFrameSamples(); }
const AudioStreamStats& getAudioMixerAvatarStreamAudioStats() const { return _audioMixerAvatarStreamAudioStats; } const AudioStreamStats& getAudioMixerAvatarStreamAudioStats() const { return _audioMixerAvatarStreamAudioStats; }
const QHash<QUuid, AudioStreamStats>& getAudioMixerInjectedStreamAudioStatsMap() const { return _audioMixerInjectedStreamAudioStatsMap; } const QHash<QUuid, AudioStreamStats>& getAudioMixerInjectedStreamAudioStatsMap() const { return _audioMixerInjectedStreamAudioStatsMap; }
@ -151,7 +149,7 @@ private:
QAudioOutput* _proceduralAudioOutput; QAudioOutput* _proceduralAudioOutput;
QIODevice* _proceduralOutputDevice; QIODevice* _proceduralOutputDevice;
AudioRingBuffer _inputRingBuffer; AudioRingBuffer _inputRingBuffer;
InboundMixedAudioStream _ringBuffer; MixedAudioStream _receivedAudioStream;
bool _isStereoInput; bool _isStereoInput;
QString _inputAudioDeviceName; QString _inputAudioDeviceName;
@ -160,8 +158,6 @@ private:
StDev _stdev; StDev _stdev;
QElapsedTimer _timeSinceLastReceived; QElapsedTimer _timeSinceLastReceived;
float _averagedLatency; float _averagedLatency;
float _measuredJitter;
int16_t _jitterBufferSamples;
float _lastInputLoudness; float _lastInputLoudness;
float _timeSinceLastClip; float _timeSinceLastClip;
float _dcOffset; float _dcOffset;

View file

@ -25,13 +25,13 @@ public:
_timeGapWindowMin(0), _timeGapWindowMin(0),
_timeGapWindowMax(0), _timeGapWindowMax(0),
_timeGapWindowAverage(0.0f), _timeGapWindowAverage(0.0f),
_ringBufferFramesAvailable(0), _framesAvailable(0),
_ringBufferFramesAvailableAverage(0), _framesAvailableAverage(0),
_ringBufferDesiredJitterBufferFrames(0), _desiredJitterBufferFrames(0),
_ringBufferStarveCount(0), _starveCount(0),
_ringBufferConsecutiveNotMixedCount(0), _consecutiveNotMixedCount(0),
_ringBufferOverflowCount(0), _overflowCount(0),
_ringBufferSilentFramesDropped(0), _silentFramesDropped(0),
_packetStreamStats(), _packetStreamStats(),
_packetStreamWindowStats() _packetStreamWindowStats()
{} {}
@ -46,13 +46,13 @@ public:
quint64 _timeGapWindowMax; quint64 _timeGapWindowMax;
float _timeGapWindowAverage; float _timeGapWindowAverage;
quint32 _ringBufferFramesAvailable; quint32 _framesAvailable;
quint16 _ringBufferFramesAvailableAverage; quint16 _framesAvailableAverage;
quint16 _ringBufferDesiredJitterBufferFrames; quint16 _desiredJitterBufferFrames;
quint32 _ringBufferStarveCount; quint32 _starveCount;
quint32 _ringBufferConsecutiveNotMixedCount; quint32 _consecutiveNotMixedCount;
quint32 _ringBufferOverflowCount; quint32 _overflowCount;
quint32 _ringBufferSilentFramesDropped; quint32 _silentFramesDropped;
PacketStreamStats _packetStreamStats; PacketStreamStats _packetStreamStats;
PacketStreamStats _packetStreamWindowStats; PacketStreamStats _packetStreamWindowStats;

View file

@ -146,6 +146,11 @@ void InboundAudioStream::starved() {
_starveCount++; _starveCount++;
} }
void InboundAudioStream::overrideDesiredJitterBufferFramesTo(int desired) {
_dynamicJitterBuffers = false;
_desiredJitterBufferFrames = desired;
}
int InboundAudioStream::clampDesiredJitterBufferFramesValue(int desired) const { int InboundAudioStream::clampDesiredJitterBufferFramesValue(int desired) const {
const int MIN_FRAMES_DESIRED = 0; const int MIN_FRAMES_DESIRED = 0;
const int MAX_FRAMES_DESIRED = _ringBuffer.getFrameCapacity(); const int MAX_FRAMES_DESIRED = _ringBuffer.getFrameCapacity();
@ -244,13 +249,13 @@ AudioStreamStats InboundAudioStream::getAudioStreamStats() const {
streamStats._timeGapWindowMax = _interframeTimeGapStatsForStatsPacket.getWindowMax(); streamStats._timeGapWindowMax = _interframeTimeGapStatsForStatsPacket.getWindowMax();
streamStats._timeGapWindowAverage = _interframeTimeGapStatsForStatsPacket.getWindowAverage(); streamStats._timeGapWindowAverage = _interframeTimeGapStatsForStatsPacket.getWindowAverage();
streamStats._ringBufferFramesAvailable = _ringBuffer.framesAvailable(); streamStats._framesAvailable = _ringBuffer.framesAvailable();
streamStats._ringBufferFramesAvailableAverage = _framesAvailableStats.getWindowAverage(); streamStats._framesAvailableAverage = _framesAvailableStats.getWindowAverage();
streamStats._ringBufferDesiredJitterBufferFrames = _desiredJitterBufferFrames; streamStats._desiredJitterBufferFrames = _desiredJitterBufferFrames;
streamStats._ringBufferStarveCount = _starveCount; streamStats._starveCount = _starveCount;
streamStats._ringBufferConsecutiveNotMixedCount = _consecutiveNotMixedCount; streamStats._consecutiveNotMixedCount = _consecutiveNotMixedCount;
streamStats._ringBufferOverflowCount = _ringBuffer.getOverflowCount(); streamStats._overflowCount = _ringBuffer.getOverflowCount();
streamStats._ringBufferSilentFramesDropped = _silentFramesDropped; streamStats._silentFramesDropped = _silentFramesDropped;
streamStats._packetStreamStats = _incomingSequenceNumberStats.getStats(); streamStats._packetStreamStats = _incomingSequenceNumberStats.getStats();
streamStats._packetStreamWindowStats = _incomingSequenceNumberStats.getStatsForHistoryWindow(); streamStats._packetStreamWindowStats = _incomingSequenceNumberStats.getStatsForHistoryWindow();

View file

@ -62,6 +62,8 @@ public:
void setToStarved(); void setToStarved();
/// turns off dyanmic jitter buffers and sets the desired jitter buffer frames to specified value
void overrideDesiredJitterBufferFramesTo(int desired);
/// this function should be called once per second to ensure the seq num stats history spans ~30 seconds /// this function should be called once per second to ensure the seq num stats history spans ~30 seconds
AudioStreamStats updateSeqHistoryAndGetAudioStreamStats(); AudioStreamStats updateSeqHistoryAndGetAudioStreamStats();
@ -95,8 +97,11 @@ public:
private: private:
void starved(); void starved();
SequenceNumberStats::ArrivalInfo frameReceivedUpdateNetworkStats(quint16 sequenceNumber, const QUuid& senderUUID);
int clampDesiredJitterBufferFramesValue(int desired) const; int clampDesiredJitterBufferFramesValue(int desired) const;
int writeSamplesForDroppedPackets(int numSamples);
protected: protected:
// disallow copying of InboundAudioStream objects // disallow copying of InboundAudioStream objects
InboundAudioStream(const InboundAudioStream&); InboundAudioStream(const InboundAudioStream&);
@ -110,9 +115,7 @@ protected:
virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) = 0; virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) = 0;
int writeDroppableSilentSamples(int numSilentSamples); int writeDroppableSilentSamples(int numSilentSamples);
int writeSamplesForDroppedPackets(int numSamples);
SequenceNumberStats::ArrivalInfo frameReceivedUpdateNetworkStats(quint16 sequenceNumber, const QUuid& senderUUID);
protected: protected:
AudioRingBuffer _ringBuffer; AudioRingBuffer _ringBuffer;
@ -120,7 +123,7 @@ protected:
bool _lastPopSucceeded; bool _lastPopSucceeded;
AudioRingBuffer::ConstIterator _lastPopOutput; AudioRingBuffer::ConstIterator _lastPopOutput;
const bool _dynamicJitterBuffers; bool _dynamicJitterBuffers;
bool _useStDevForJitterCalc; bool _useStDevForJitterCalc;
int _calculatedJitterBufferFramesUsingMaxGap; int _calculatedJitterBufferFramesUsingMaxGap;

View file

@ -1,17 +0,0 @@
#include "InboundMixedAudioStream.h"
InboundMixedAudioStream::InboundMixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, bool useStDevForJitterCalc)
: InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, useStDevForJitterCalc)
{
}
int InboundMixedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
// mixed audio packets do not have any info between the seq num and the audio data.
numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t);
return 0;
}
int InboundMixedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
}

View file

@ -1,14 +0,0 @@
#include "InboundAudioStream.h"
#include "PacketHeaders.h"
class InboundMixedAudioStream : public InboundAudioStream {
public:
InboundMixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, bool useStDevForJitterCalc = false);
float getNextOutputFrameLoudness() const { return _ringBuffer.getNextOutputFrameLoudness(); }
protected:
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
};

View file

@ -9,8 +9,8 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
// //
#ifndef hifi_InjectedAudioRingBuffer_h #ifndef hifi_InjectedAudioStream_h
#define hifi_InjectedAudioRingBuffer_h #define hifi_InjectedAudioStream_h
#include <QtCore/QUuid> #include <QtCore/QUuid>
@ -39,4 +39,4 @@ private:
float _attenuationRatio; float _attenuationRatio;
}; };
#endif // hifi_InjectedAudioRingBuffer_h #endif // hifi_InjectedAudioStream_h

View file

@ -0,0 +1,17 @@
#include "MixedAudioStream.h"
MixedAudioStream::MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, bool useStDevForJitterCalc)
: InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, useStDevForJitterCalc)
{
}
int MixedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
// mixed audio packets do not have any info between the seq num and the audio data.
numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t);
return 0;
}
int MixedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
}

View file

@ -0,0 +1,29 @@
//
// MixedAudioStream.h
// libraries/audio/src
//
// Created by Stephen Birarda on 6/5/13.
// Copyright 2013 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_MixedAudioStream_h
#define hifi_MixedAudioStream
#include "InboundAudioStream.h"
#include "PacketHeaders.h"
class MixedAudioStream : public InboundAudioStream {
public:
MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, bool useStDevForJitterCalc = false);
float getNextOutputFrameLoudness() const { return _ringBuffer.getNextOutputFrameLoudness(); }
protected:
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
};
#endif // hifi_MixedAudioStream_h

View file

@ -9,8 +9,8 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
// //
#ifndef hifi_PositionalAudioRingBuffer_h #ifndef hifi_PositionalAudioStream_h
#define hifi_PositionalAudioRingBuffer_h #define hifi_PositionalAudioStream_h
#include <glm/gtx/quaternion.hpp> #include <glm/gtx/quaternion.hpp>
#include <AABox.h> #include <AABox.h>
@ -71,4 +71,4 @@ protected:
AABox* _listenerUnattenuatedZone; AABox* _listenerUnattenuatedZone;
}; };
#endif // hifi_PositionalAudioRingBuffer_h #endif // hifi_PositionalAudioStream_h