renamed audioringbuffer classes to audiostream

This commit is contained in:
wangyix 2014-07-28 11:52:05 -07:00
parent 9bbd055404
commit 32dbc6cbdb
12 changed files with 81 additions and 81 deletions

View file

@ -52,8 +52,8 @@
#include "AudioRingBuffer.h" #include "AudioRingBuffer.h"
#include "AudioMixerClientData.h" #include "AudioMixerClientData.h"
#include "AvatarAudioRingBuffer.h" #include "AvatarAudioStream.h"
#include "InjectedAudioRingBuffer.h" #include "InjectedAudioStream.h"
#include "AudioMixer.h" #include "AudioMixer.h"
@ -93,8 +93,8 @@ const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f;
const float ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE = 0.18f; const float ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE = 0.18f;
const float ATTENUATION_EPSILON_DISTANCE = 0.1f; const float ATTENUATION_EPSILON_DISTANCE = 0.1f;
void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd, void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream* bufferToAdd,
AvatarAudioRingBuffer* listeningNodeBuffer) { AvatarAudioStream* listeningNodeBuffer) {
float bearingRelativeAngleToSource = 0.0f; float bearingRelativeAngleToSource = 0.0f;
float attenuationCoefficient = 1.0f; float attenuationCoefficient = 1.0f;
int numSamplesDelay = 0; int numSamplesDelay = 0;
@ -125,8 +125,8 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
shouldAttenuate = !bufferToAdd->getListenerUnattenuatedZone()->contains(listeningNodeBuffer->getPosition()); shouldAttenuate = !bufferToAdd->getListenerUnattenuatedZone()->contains(listeningNodeBuffer->getPosition());
} }
if (bufferToAdd->getType() == PositionalAudioRingBuffer::Injector) { if (bufferToAdd->getType() == PositionalAudioStream::Injector) {
attenuationCoefficient *= reinterpret_cast<InjectedAudioRingBuffer*>(bufferToAdd)->getAttenuationRatio(); attenuationCoefficient *= reinterpret_cast<InjectedAudioStream*>(bufferToAdd)->getAttenuationRatio();
} }
shouldAttenuate = shouldAttenuate && distanceBetween > ATTENUATION_EPSILON_DISTANCE; shouldAttenuate = shouldAttenuate && distanceBetween > ATTENUATION_EPSILON_DISTANCE;
@ -137,8 +137,8 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
float distanceSquareToSource = glm::dot(relativePosition, relativePosition); float distanceSquareToSource = glm::dot(relativePosition, relativePosition);
float radius = 0.0f; float radius = 0.0f;
if (bufferToAdd->getType() == PositionalAudioRingBuffer::Injector) { if (bufferToAdd->getType() == PositionalAudioStream::Injector) {
radius = reinterpret_cast<InjectedAudioRingBuffer*>(bufferToAdd)->getRadius(); radius = reinterpret_cast<InjectedAudioStream*>(bufferToAdd)->getRadius();
} }
if (radius == 0 || (distanceSquareToSource > radius * radius)) { if (radius == 0 || (distanceSquareToSource > radius * radius)) {
@ -265,7 +265,7 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
} }
void AudioMixer::prepareMixForListeningNode(Node* node) { void AudioMixer::prepareMixForListeningNode(Node* node) {
AvatarAudioRingBuffer* nodeRingBuffer = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer(); AvatarAudioStream* nodeRingBuffer = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer();
// zero out the client mix for this node // zero out the client mix for this node
memset(_clientSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_STEREO); memset(_clientSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
@ -278,10 +278,10 @@ void AudioMixer::prepareMixForListeningNode(Node* node) {
// enumerate the ARBs attached to the otherNode and add all that should be added to mix // enumerate the ARBs attached to the otherNode and add all that should be added to mix
const QHash<QUuid, PositionalAudioRingBuffer*>& otherNodeRingBuffers = otherNodeClientData->getRingBuffers(); const QHash<QUuid, PositionalAudioStream*>& otherNodeRingBuffers = otherNodeClientData->getRingBuffers();
QHash<QUuid, PositionalAudioRingBuffer*>::ConstIterator i; QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = otherNodeRingBuffers.begin(); i != otherNodeRingBuffers.constEnd(); i++) { for (i = otherNodeRingBuffers.begin(); i != otherNodeRingBuffers.constEnd(); i++) {
PositionalAudioRingBuffer* otherNodeBuffer = i.value(); PositionalAudioStream* otherNodeBuffer = i.value();
if ((*otherNode != *node || otherNodeBuffer->shouldLoopbackForNode()) if ((*otherNode != *node || otherNodeBuffer->shouldLoopbackForNode())
&& otherNodeBuffer->lastPopSucceeded() && otherNodeBuffer->lastPopSucceeded()

View file

@ -16,8 +16,8 @@
#include <AudioRingBuffer.h> #include <AudioRingBuffer.h>
#include <ThreadedAssignment.h> #include <ThreadedAssignment.h>
class PositionalAudioRingBuffer; class PositionalAudioStream;
class AvatarAudioRingBuffer; class AvatarAudioStream;
const int SAMPLE_PHASE_DELAY_AT_90 = 20; const int SAMPLE_PHASE_DELAY_AT_90 = 20;
@ -41,8 +41,8 @@ public slots:
private: private:
/// adds one buffer to the mix for a listening node /// adds one buffer to the mix for a listening node
void addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd, void addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream* bufferToAdd,
AvatarAudioRingBuffer* listeningNodeBuffer); AvatarAudioStream* listeningNodeBuffer);
/// prepares and sends a mix to one Node /// prepares and sends a mix to one Node
void prepareMixForListeningNode(Node* node); void prepareMixForListeningNode(Node* node);

View file

@ -14,7 +14,7 @@
#include <PacketHeaders.h> #include <PacketHeaders.h>
#include <UUID.h> #include <UUID.h>
#include "InjectedAudioRingBuffer.h" #include "InjectedAudioStream.h"
#include "AudioMixer.h" #include "AudioMixer.h"
#include "AudioMixerClientData.h" #include "AudioMixerClientData.h"
@ -27,16 +27,16 @@ AudioMixerClientData::AudioMixerClientData() :
} }
AudioMixerClientData::~AudioMixerClientData() { AudioMixerClientData::~AudioMixerClientData() {
QHash<QUuid, PositionalAudioRingBuffer*>::ConstIterator i; QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) { for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) {
// delete this attached InboundAudioStream // delete this attached InboundAudioStream
delete i.value(); delete i.value();
} }
} }
AvatarAudioRingBuffer* AudioMixerClientData::getAvatarAudioRingBuffer() const { AvatarAudioStream* AudioMixerClientData::getAvatarAudioRingBuffer() const {
if (_ringBuffers.contains(QUuid())) { if (_ringBuffers.contains(QUuid())) {
return (AvatarAudioRingBuffer*)_ringBuffers.value(QUuid()); return (AvatarAudioStream*)_ringBuffers.value(QUuid());
} }
// no mic stream found - return NULL // no mic stream found - return NULL
return NULL; return NULL;
@ -58,7 +58,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
return dataAt - packet.data(); return dataAt - packet.data();
} else { } else {
PositionalAudioRingBuffer* matchingStream = NULL; PositionalAudioStream* matchingStream = NULL;
if (packetType == PacketTypeMicrophoneAudioWithEcho if (packetType == PacketTypeMicrophoneAudioWithEcho
|| packetType == PacketTypeMicrophoneAudioNoEcho || packetType == PacketTypeMicrophoneAudioNoEcho
@ -74,7 +74,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
bool isStereo = channelFlag == 1; bool isStereo = channelFlag == 1;
_ringBuffers.insert(nullUUID, _ringBuffers.insert(nullUUID,
matchingStream = new AvatarAudioRingBuffer(isStereo, AudioMixer::getUseDynamicJitterBuffers())); matchingStream = new AvatarAudioStream(isStereo, AudioMixer::getUseDynamicJitterBuffers()));
} else { } else {
matchingStream = _ringBuffers.value(nullUUID); matchingStream = _ringBuffers.value(nullUUID);
} }
@ -87,7 +87,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
if (!_ringBuffers.contains(streamIdentifier)) { if (!_ringBuffers.contains(streamIdentifier)) {
_ringBuffers.insert(streamIdentifier, _ringBuffers.insert(streamIdentifier,
matchingStream = new InjectedAudioRingBuffer(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers())); matchingStream = new InjectedAudioStream(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers()));
} else { } else {
matchingStream = _ringBuffers.value(streamIdentifier); matchingStream = _ringBuffers.value(streamIdentifier);
} }
@ -99,7 +99,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
} }
void AudioMixerClientData::audioStreamsPopFrameForMixing() { void AudioMixerClientData::audioStreamsPopFrameForMixing() {
QHash<QUuid, PositionalAudioRingBuffer*>::ConstIterator i; QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) { for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) {
i.value()->popFrames(1); i.value()->popFrames(1);
} }
@ -113,10 +113,10 @@ void AudioMixerClientData::removeDeadInjectedStreams() {
// never even reaches its desired size, which means it will never start. // never even reaches its desired size, which means it will never start.
const int INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD = 1000; const int INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD = 1000;
QHash<QUuid, PositionalAudioRingBuffer*>::Iterator i = _ringBuffers.begin(), end = _ringBuffers.end(); QHash<QUuid, PositionalAudioStream*>::Iterator i = _ringBuffers.begin(), end = _ringBuffers.end();
while (i != end) { while (i != end) {
PositionalAudioRingBuffer* audioStream = i.value(); PositionalAudioStream* audioStream = i.value();
if (audioStream->getType() == PositionalAudioRingBuffer::Injector && audioStream->isStarved()) { if (audioStream->getType() == PositionalAudioStream::Injector && audioStream->isStarved()) {
int notMixedThreshold = audioStream->hasStarted() ? INJECTOR_CONSECUTIVE_NOT_MIXED_AFTER_STARTED_THRESHOLD int notMixedThreshold = audioStream->hasStarted() ? INJECTOR_CONSECUTIVE_NOT_MIXED_AFTER_STARTED_THRESHOLD
: INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD; : INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD;
if (audioStream->getConsecutiveNotMixedCount() >= notMixedThreshold) { if (audioStream->getConsecutiveNotMixedCount() >= notMixedThreshold) {
@ -152,7 +152,7 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
// pack and send stream stats packets until all ring buffers' stats are sent // pack and send stream stats packets until all ring buffers' stats are sent
int numStreamStatsRemaining = _ringBuffers.size(); int numStreamStatsRemaining = _ringBuffers.size();
QHash<QUuid, PositionalAudioRingBuffer*>::ConstIterator ringBuffersIterator = _ringBuffers.constBegin(); QHash<QUuid, PositionalAudioStream*>::ConstIterator ringBuffersIterator = _ringBuffers.constBegin();
while (numStreamStatsRemaining > 0) { while (numStreamStatsRemaining > 0) {
char* dataAt = headerEndAt; char* dataAt = headerEndAt;
@ -201,7 +201,7 @@ QString AudioMixerClientData::getAudioStreamStatsString() const {
+ " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax) + " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax)
+ " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage); + " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage);
AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer(); AvatarAudioStream* avatarRingBuffer = getAvatarAudioRingBuffer();
if (avatarRingBuffer) { if (avatarRingBuffer) {
AudioStreamStats streamStats = avatarRingBuffer->getAudioStreamStats(); AudioStreamStats streamStats = avatarRingBuffer->getAudioStreamStats();
result += " UPSTREAM.mic.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames) result += " UPSTREAM.mic.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames)
@ -224,9 +224,9 @@ QString AudioMixerClientData::getAudioStreamStatsString() const {
result = "mic unknown"; result = "mic unknown";
} }
QHash<QUuid, PositionalAudioRingBuffer*>::ConstIterator i; QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) { for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) {
if (i.value()->getType() == PositionalAudioRingBuffer::Injector) { if (i.value()->getType() == PositionalAudioStream::Injector) {
AudioStreamStats streamStats = i.value()->getAudioStreamStats(); AudioStreamStats streamStats = i.value()->getAudioStreamStats();
result += " UPSTREAM.inj.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames) result += " UPSTREAM.inj.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames)
+ " desired_calc:" + QString::number(i.value()->getCalculatedJitterBufferFrames()) + " desired_calc:" + QString::number(i.value()->getCalculatedJitterBufferFrames())

View file

@ -14,16 +14,16 @@
#include <AABox.h> #include <AABox.h>
#include "PositionalAudioRingBuffer.h" #include "PositionalAudioStream.h"
#include "AvatarAudioRingBuffer.h" #include "AvatarAudioStream.h"
class AudioMixerClientData : public NodeData { class AudioMixerClientData : public NodeData {
public: public:
AudioMixerClientData(); AudioMixerClientData();
~AudioMixerClientData(); ~AudioMixerClientData();
const QHash<QUuid, PositionalAudioRingBuffer*>& getRingBuffers() const { return _ringBuffers; } const QHash<QUuid, PositionalAudioStream*>& getRingBuffers() const { return _ringBuffers; }
AvatarAudioRingBuffer* getAvatarAudioRingBuffer() const; AvatarAudioStream* getAvatarAudioRingBuffer() const;
int parseData(const QByteArray& packet); int parseData(const QByteArray& packet);
@ -39,7 +39,7 @@ public:
quint16 getOutgoingSequenceNumber() const { return _outgoingMixedAudioSequenceNumber; } quint16 getOutgoingSequenceNumber() const { return _outgoingMixedAudioSequenceNumber; }
private: private:
QHash<QUuid, PositionalAudioRingBuffer*> _ringBuffers; // mic stream stored under key of null UUID QHash<QUuid, PositionalAudioStream*> _ringBuffers; // mic stream stored under key of null UUID
quint16 _outgoingMixedAudioSequenceNumber; quint16 _outgoingMixedAudioSequenceNumber;

View file

@ -1,5 +1,5 @@
// //
// AvatarAudioRingBuffer.cpp // AvatarAudioStream.cpp
// assignment-client/src/audio // assignment-client/src/audio
// //
// Created by Stephen Birarda on 6/5/13. // Created by Stephen Birarda on 6/5/13.
@ -11,14 +11,14 @@
#include <PacketHeaders.h> #include <PacketHeaders.h>
#include "AvatarAudioRingBuffer.h" #include "AvatarAudioStream.h"
AvatarAudioRingBuffer::AvatarAudioRingBuffer(bool isStereo, bool dynamicJitterBuffer) : AvatarAudioStream::AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer) :
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Microphone, isStereo, dynamicJitterBuffer) PositionalAudioStream(PositionalAudioStream::Microphone, isStereo, dynamicJitterBuffer)
{ {
} }
int AvatarAudioRingBuffer::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) { int AvatarAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
_shouldLoopbackForNode = (type == PacketTypeMicrophoneAudioWithEcho); _shouldLoopbackForNode = (type == PacketTypeMicrophoneAudioWithEcho);
@ -51,7 +51,7 @@ int AvatarAudioRingBuffer::parseStreamProperties(PacketType type, const QByteArr
return readBytes; return readBytes;
} }
int AvatarAudioRingBuffer::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) { int AvatarAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
int readBytes = 0; int readBytes = 0;
if (type == PacketTypeSilentAudioFrame) { if (type == PacketTypeSilentAudioFrame) {
writeDroppableSilentSamples(numAudioSamples); writeDroppableSilentSamples(numAudioSamples);

View file

@ -1,5 +1,5 @@
// //
// AvatarAudioRingBuffer.h // AvatarAudioStream.h
// assignment-client/src/audio // assignment-client/src/audio
// //
// Created by Stephen Birarda on 6/5/13. // Created by Stephen Birarda on 6/5/13.
@ -14,16 +14,16 @@
#include <QtCore/QUuid> #include <QtCore/QUuid>
#include "PositionalAudioRingBuffer.h" #include "PositionalAudioStream.h"
class AvatarAudioRingBuffer : public PositionalAudioRingBuffer { class AvatarAudioStream : public PositionalAudioStream {
public: public:
AvatarAudioRingBuffer(bool isStereo = false, bool dynamicJitterBuffer = false); AvatarAudioStream(bool isStereo = false, bool dynamicJitterBuffer = false);
private: private:
// disallow copying of AvatarAudioRingBuffer objects // disallow copying of AvatarAudioStream objects
AvatarAudioRingBuffer(const AvatarAudioRingBuffer&); AvatarAudioStream(const AvatarAudioStream&);
AvatarAudioRingBuffer& operator= (const AvatarAudioRingBuffer&); AvatarAudioStream& operator= (const AvatarAudioStream&);
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples); int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples); int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);

View file

@ -42,7 +42,7 @@
#include "Audio.h" #include "Audio.h"
#include "Menu.h" #include "Menu.h"
#include "Util.h" #include "Util.h"
#include "PositionalAudioRingBuffer.h" #include "PositionalAudioStream.h"
static const float AUDIO_CALLBACK_MSECS = (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float)SAMPLE_RATE * 1000.0; static const float AUDIO_CALLBACK_MSECS = (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float)SAMPLE_RATE * 1000.0;
@ -746,7 +746,7 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
memcpy(&streamStats, dataAt, sizeof(AudioStreamStats)); memcpy(&streamStats, dataAt, sizeof(AudioStreamStats));
dataAt += sizeof(AudioStreamStats); dataAt += sizeof(AudioStreamStats);
if (streamStats._streamType == PositionalAudioRingBuffer::Microphone) { if (streamStats._streamType == PositionalAudioStream::Microphone) {
_audioMixerAvatarStreamAudioStats = streamStats; _audioMixerAvatarStreamAudioStats = streamStats;
} else { } else {
_audioMixerInjectedStreamAudioStatsMap[streamStats._streamIdentifier] = streamStats; _audioMixerInjectedStreamAudioStatsMap[streamStats._streamIdentifier] = streamStats;

View file

@ -120,7 +120,7 @@ protected:
bool _lastPopSucceeded; bool _lastPopSucceeded;
AudioRingBuffer::ConstIterator _lastPopOutput; AudioRingBuffer::ConstIterator _lastPopOutput;
bool _dynamicJitterBuffers; const bool _dynamicJitterBuffers;
bool _useStDevForJitterCalc; bool _useStDevForJitterCalc;
int _calculatedJitterBufferFramesUsingMaxGap; int _calculatedJitterBufferFramesUsingMaxGap;

View file

@ -1,5 +1,5 @@
// //
// InjectedAudioRingBuffer.cpp // InjectedAudioStream.cpp
// libraries/audio/src // libraries/audio/src
// //
// Created by Stephen Birarda on 6/5/13. // Created by Stephen Birarda on 6/5/13.
@ -17,10 +17,10 @@
#include <PacketHeaders.h> #include <PacketHeaders.h>
#include <UUID.h> #include <UUID.h>
#include "InjectedAudioRingBuffer.h" #include "InjectedAudioStream.h"
InjectedAudioRingBuffer::InjectedAudioRingBuffer(const QUuid& streamIdentifier, bool dynamicJitterBuffer) : InjectedAudioStream::InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer) :
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Injector, false, dynamicJitterBuffer), PositionalAudioStream(PositionalAudioStream::Injector, false, dynamicJitterBuffer),
_streamIdentifier(streamIdentifier), _streamIdentifier(streamIdentifier),
_radius(0.0f), _radius(0.0f),
_attenuationRatio(0) _attenuationRatio(0)
@ -30,7 +30,7 @@ InjectedAudioRingBuffer::InjectedAudioRingBuffer(const QUuid& streamIdentifier,
const uchar MAX_INJECTOR_VOLUME = 255; const uchar MAX_INJECTOR_VOLUME = 255;
int InjectedAudioRingBuffer::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) { int InjectedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
// setup a data stream to read from this packet // setup a data stream to read from this packet
QDataStream packetStream(packetAfterSeqNum); QDataStream packetStream(packetAfterSeqNum);
@ -58,12 +58,12 @@ int InjectedAudioRingBuffer::parseStreamProperties(PacketType type, const QByteA
return packetStream.device()->pos(); return packetStream.device()->pos();
} }
int InjectedAudioRingBuffer::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) { int InjectedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t)); return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
} }
AudioStreamStats InjectedAudioRingBuffer::getAudioStreamStats() const { AudioStreamStats InjectedAudioStream::getAudioStreamStats() const {
AudioStreamStats streamStats = PositionalAudioRingBuffer::getAudioStreamStats(); AudioStreamStats streamStats = PositionalAudioStream::getAudioStreamStats();
streamStats._streamIdentifier = _streamIdentifier; streamStats._streamIdentifier = _streamIdentifier;
return streamStats; return streamStats;
} }

View file

@ -1,5 +1,5 @@
// //
// InjectedAudioRingBuffer.h // InjectedAudioStream.h
// libraries/audio/src // libraries/audio/src
// //
// Created by Stephen Birarda on 6/5/13. // Created by Stephen Birarda on 6/5/13.
@ -14,11 +14,11 @@
#include <QtCore/QUuid> #include <QtCore/QUuid>
#include "PositionalAudioRingBuffer.h" #include "PositionalAudioStream.h"
class InjectedAudioRingBuffer : public PositionalAudioRingBuffer { class InjectedAudioStream : public PositionalAudioStream {
public: public:
InjectedAudioRingBuffer(const QUuid& streamIdentifier = QUuid(), bool dynamicJitterBuffer = false); InjectedAudioStream(const QUuid& streamIdentifier = QUuid(), bool dynamicJitterBuffer = false);
float getRadius() const { return _radius; } float getRadius() const { return _radius; }
float getAttenuationRatio() const { return _attenuationRatio; } float getAttenuationRatio() const { return _attenuationRatio; }
@ -26,9 +26,9 @@ public:
QUuid getStreamIdentifier() const { return _streamIdentifier; } QUuid getStreamIdentifier() const { return _streamIdentifier; }
private: private:
// disallow copying of InjectedAudioRingBuffer objects // disallow copying of InjectedAudioStream objects
InjectedAudioRingBuffer(const InjectedAudioRingBuffer&); InjectedAudioStream(const InjectedAudioStream&);
InjectedAudioRingBuffer& operator= (const InjectedAudioRingBuffer&); InjectedAudioStream& operator= (const InjectedAudioStream&);
AudioStreamStats getAudioStreamStats() const; AudioStreamStats getAudioStreamStats() const;
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples); int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);

View file

@ -1,5 +1,5 @@
// //
// PositionalAudioRingBuffer.cpp // PositionalAudioStream.cpp
// libraries/audio/src // libraries/audio/src
// //
// Created by Stephen Birarda on 6/5/13. // Created by Stephen Birarda on 6/5/13.
@ -9,7 +9,7 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
// //
#include "PositionalAudioRingBuffer.h" #include "PositionalAudioStream.h"
#include "SharedUtil.h" #include "SharedUtil.h"
#include <cstring> #include <cstring>
@ -21,7 +21,7 @@
#include <PacketHeaders.h> #include <PacketHeaders.h>
#include <UUID.h> #include <UUID.h>
PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo, bool dynamicJitterBuffers) : PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers) :
InboundAudioStream(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL, InboundAudioStream(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, dynamicJitterBuffers), AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, dynamicJitterBuffers),
_type(type), _type(type),
@ -34,13 +34,13 @@ PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::
{ {
} }
int PositionalAudioRingBuffer::parseData(const QByteArray& packet) { int PositionalAudioStream::parseData(const QByteArray& packet) {
int bytesRead = InboundAudioStream::parseData(packet); int bytesRead = InboundAudioStream::parseData(packet);
updateNextOutputTrailingLoudness(); updateNextOutputTrailingLoudness();
return bytesRead; return bytesRead;
} }
void PositionalAudioRingBuffer::updateNextOutputTrailingLoudness() { void PositionalAudioStream::updateNextOutputTrailingLoudness() {
float nextLoudness = _ringBuffer.getNextOutputFrameLoudness(); float nextLoudness = _ringBuffer.getNextOutputFrameLoudness();
const int TRAILING_AVERAGE_FRAMES = 100; const int TRAILING_AVERAGE_FRAMES = 100;
@ -59,7 +59,7 @@ void PositionalAudioRingBuffer::updateNextOutputTrailingLoudness() {
} }
} }
int PositionalAudioRingBuffer::parsePositionalData(const QByteArray& positionalByteArray) { int PositionalAudioStream::parsePositionalData(const QByteArray& positionalByteArray) {
QDataStream packetStream(positionalByteArray); QDataStream packetStream(positionalByteArray);
packetStream.readRawData(reinterpret_cast<char*>(&_position), sizeof(_position)); packetStream.readRawData(reinterpret_cast<char*>(&_position), sizeof(_position));
@ -75,7 +75,7 @@ int PositionalAudioRingBuffer::parsePositionalData(const QByteArray& positionalB
return packetStream.device()->pos(); return packetStream.device()->pos();
} }
AudioStreamStats PositionalAudioRingBuffer::getAudioStreamStats() const { AudioStreamStats PositionalAudioStream::getAudioStreamStats() const {
AudioStreamStats streamStats = InboundAudioStream::getAudioStreamStats(); AudioStreamStats streamStats = InboundAudioStream::getAudioStreamStats();
streamStats._streamType = _type; streamStats._streamType = _type;
return streamStats; return streamStats;

View file

@ -1,5 +1,5 @@
// //
// PositionalAudioRingBuffer.h // PositionalAudioStream.h
// libraries/audio/src // libraries/audio/src
// //
// Created by Stephen Birarda on 6/5/13. // Created by Stephen Birarda on 6/5/13.
@ -19,7 +19,7 @@
const int AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY = 100; const int AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY = 100;
class PositionalAudioRingBuffer : public InboundAudioStream { class PositionalAudioStream : public InboundAudioStream {
Q_OBJECT Q_OBJECT
public: public:
enum Type { enum Type {
@ -27,7 +27,7 @@ public:
Injector Injector
}; };
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo = false, bool dynamicJitterBuffers = false); PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo = false, bool dynamicJitterBuffers = false);
int parseData(const QByteArray& packet); int parseData(const QByteArray& packet);
@ -38,7 +38,7 @@ public:
bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; } bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; }
bool isStereo() const { return _isStereo; } bool isStereo() const { return _isStereo; }
PositionalAudioRingBuffer::Type getType() const { return _type; } PositionalAudioStream::Type getType() const { return _type; }
const glm::vec3& getPosition() const { return _position; } const glm::vec3& getPosition() const { return _position; }
const glm::quat& getOrientation() const { return _orientation; } const glm::quat& getOrientation() const { return _orientation; }
AABox* getListenerUnattenuatedZone() const { return _listenerUnattenuatedZone; } AABox* getListenerUnattenuatedZone() const { return _listenerUnattenuatedZone; }
@ -46,9 +46,9 @@ public:
void setListenerUnattenuatedZone(AABox* listenerUnattenuatedZone) { _listenerUnattenuatedZone = listenerUnattenuatedZone; } void setListenerUnattenuatedZone(AABox* listenerUnattenuatedZone) { _listenerUnattenuatedZone = listenerUnattenuatedZone; }
protected: protected:
// disallow copying of PositionalAudioRingBuffer objects // disallow copying of PositionalAudioStream objects
PositionalAudioRingBuffer(const PositionalAudioRingBuffer&); PositionalAudioStream(const PositionalAudioStream&);
PositionalAudioRingBuffer& operator= (const PositionalAudioRingBuffer&); PositionalAudioStream& operator= (const PositionalAudioStream&);
/// parses the info between the seq num and the audio data in the network packet and calculates /// parses the info between the seq num and the audio data in the network packet and calculates
/// how many audio samples this packet contains /// how many audio samples this packet contains