mirror of
https://github.com/lubosz/overte.git
synced 2025-04-23 01:04:06 +02:00
renamed audioringbuffer classes to audiostream
This commit is contained in:
parent
9bbd055404
commit
32dbc6cbdb
12 changed files with 81 additions and 81 deletions
|
@ -52,8 +52,8 @@
|
|||
|
||||
#include "AudioRingBuffer.h"
|
||||
#include "AudioMixerClientData.h"
|
||||
#include "AvatarAudioRingBuffer.h"
|
||||
#include "InjectedAudioRingBuffer.h"
|
||||
#include "AvatarAudioStream.h"
|
||||
#include "InjectedAudioStream.h"
|
||||
|
||||
#include "AudioMixer.h"
|
||||
|
||||
|
@ -93,8 +93,8 @@ const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f;
|
|||
const float ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE = 0.18f;
|
||||
const float ATTENUATION_EPSILON_DISTANCE = 0.1f;
|
||||
|
||||
void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd,
|
||||
AvatarAudioRingBuffer* listeningNodeBuffer) {
|
||||
void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream* bufferToAdd,
|
||||
AvatarAudioStream* listeningNodeBuffer) {
|
||||
float bearingRelativeAngleToSource = 0.0f;
|
||||
float attenuationCoefficient = 1.0f;
|
||||
int numSamplesDelay = 0;
|
||||
|
@ -125,8 +125,8 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
|||
shouldAttenuate = !bufferToAdd->getListenerUnattenuatedZone()->contains(listeningNodeBuffer->getPosition());
|
||||
}
|
||||
|
||||
if (bufferToAdd->getType() == PositionalAudioRingBuffer::Injector) {
|
||||
attenuationCoefficient *= reinterpret_cast<InjectedAudioRingBuffer*>(bufferToAdd)->getAttenuationRatio();
|
||||
if (bufferToAdd->getType() == PositionalAudioStream::Injector) {
|
||||
attenuationCoefficient *= reinterpret_cast<InjectedAudioStream*>(bufferToAdd)->getAttenuationRatio();
|
||||
}
|
||||
|
||||
shouldAttenuate = shouldAttenuate && distanceBetween > ATTENUATION_EPSILON_DISTANCE;
|
||||
|
@ -137,8 +137,8 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
|||
float distanceSquareToSource = glm::dot(relativePosition, relativePosition);
|
||||
float radius = 0.0f;
|
||||
|
||||
if (bufferToAdd->getType() == PositionalAudioRingBuffer::Injector) {
|
||||
radius = reinterpret_cast<InjectedAudioRingBuffer*>(bufferToAdd)->getRadius();
|
||||
if (bufferToAdd->getType() == PositionalAudioStream::Injector) {
|
||||
radius = reinterpret_cast<InjectedAudioStream*>(bufferToAdd)->getRadius();
|
||||
}
|
||||
|
||||
if (radius == 0 || (distanceSquareToSource > radius * radius)) {
|
||||
|
@ -265,7 +265,7 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
|||
}
|
||||
|
||||
void AudioMixer::prepareMixForListeningNode(Node* node) {
|
||||
AvatarAudioRingBuffer* nodeRingBuffer = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer();
|
||||
AvatarAudioStream* nodeRingBuffer = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer();
|
||||
|
||||
// zero out the client mix for this node
|
||||
memset(_clientSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
||||
|
@ -278,10 +278,10 @@ void AudioMixer::prepareMixForListeningNode(Node* node) {
|
|||
|
||||
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
|
||||
|
||||
const QHash<QUuid, PositionalAudioRingBuffer*>& otherNodeRingBuffers = otherNodeClientData->getRingBuffers();
|
||||
QHash<QUuid, PositionalAudioRingBuffer*>::ConstIterator i;
|
||||
const QHash<QUuid, PositionalAudioStream*>& otherNodeRingBuffers = otherNodeClientData->getRingBuffers();
|
||||
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
||||
for (i = otherNodeRingBuffers.begin(); i != otherNodeRingBuffers.constEnd(); i++) {
|
||||
PositionalAudioRingBuffer* otherNodeBuffer = i.value();
|
||||
PositionalAudioStream* otherNodeBuffer = i.value();
|
||||
|
||||
if ((*otherNode != *node || otherNodeBuffer->shouldLoopbackForNode())
|
||||
&& otherNodeBuffer->lastPopSucceeded()
|
||||
|
|
|
@ -16,8 +16,8 @@
|
|||
#include <AudioRingBuffer.h>
|
||||
#include <ThreadedAssignment.h>
|
||||
|
||||
class PositionalAudioRingBuffer;
|
||||
class AvatarAudioRingBuffer;
|
||||
class PositionalAudioStream;
|
||||
class AvatarAudioStream;
|
||||
|
||||
const int SAMPLE_PHASE_DELAY_AT_90 = 20;
|
||||
|
||||
|
@ -41,8 +41,8 @@ public slots:
|
|||
|
||||
private:
|
||||
/// adds one buffer to the mix for a listening node
|
||||
void addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd,
|
||||
AvatarAudioRingBuffer* listeningNodeBuffer);
|
||||
void addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream* bufferToAdd,
|
||||
AvatarAudioStream* listeningNodeBuffer);
|
||||
|
||||
/// prepares and sends a mix to one Node
|
||||
void prepareMixForListeningNode(Node* node);
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#include <PacketHeaders.h>
|
||||
#include <UUID.h>
|
||||
|
||||
#include "InjectedAudioRingBuffer.h"
|
||||
#include "InjectedAudioStream.h"
|
||||
|
||||
#include "AudioMixer.h"
|
||||
#include "AudioMixerClientData.h"
|
||||
|
@ -27,16 +27,16 @@ AudioMixerClientData::AudioMixerClientData() :
|
|||
}
|
||||
|
||||
AudioMixerClientData::~AudioMixerClientData() {
|
||||
QHash<QUuid, PositionalAudioRingBuffer*>::ConstIterator i;
|
||||
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
||||
for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) {
|
||||
// delete this attached InboundAudioStream
|
||||
delete i.value();
|
||||
}
|
||||
}
|
||||
|
||||
AvatarAudioRingBuffer* AudioMixerClientData::getAvatarAudioRingBuffer() const {
|
||||
AvatarAudioStream* AudioMixerClientData::getAvatarAudioRingBuffer() const {
|
||||
if (_ringBuffers.contains(QUuid())) {
|
||||
return (AvatarAudioRingBuffer*)_ringBuffers.value(QUuid());
|
||||
return (AvatarAudioStream*)_ringBuffers.value(QUuid());
|
||||
}
|
||||
// no mic stream found - return NULL
|
||||
return NULL;
|
||||
|
@ -58,7 +58,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
|||
return dataAt - packet.data();
|
||||
|
||||
} else {
|
||||
PositionalAudioRingBuffer* matchingStream = NULL;
|
||||
PositionalAudioStream* matchingStream = NULL;
|
||||
|
||||
if (packetType == PacketTypeMicrophoneAudioWithEcho
|
||||
|| packetType == PacketTypeMicrophoneAudioNoEcho
|
||||
|
@ -74,7 +74,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
|||
bool isStereo = channelFlag == 1;
|
||||
|
||||
_ringBuffers.insert(nullUUID,
|
||||
matchingStream = new AvatarAudioRingBuffer(isStereo, AudioMixer::getUseDynamicJitterBuffers()));
|
||||
matchingStream = new AvatarAudioStream(isStereo, AudioMixer::getUseDynamicJitterBuffers()));
|
||||
} else {
|
||||
matchingStream = _ringBuffers.value(nullUUID);
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
|||
|
||||
if (!_ringBuffers.contains(streamIdentifier)) {
|
||||
_ringBuffers.insert(streamIdentifier,
|
||||
matchingStream = new InjectedAudioRingBuffer(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers()));
|
||||
matchingStream = new InjectedAudioStream(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers()));
|
||||
} else {
|
||||
matchingStream = _ringBuffers.value(streamIdentifier);
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
|||
}
|
||||
|
||||
void AudioMixerClientData::audioStreamsPopFrameForMixing() {
|
||||
QHash<QUuid, PositionalAudioRingBuffer*>::ConstIterator i;
|
||||
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
||||
for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) {
|
||||
i.value()->popFrames(1);
|
||||
}
|
||||
|
@ -113,10 +113,10 @@ void AudioMixerClientData::removeDeadInjectedStreams() {
|
|||
// never even reaches its desired size, which means it will never start.
|
||||
const int INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD = 1000;
|
||||
|
||||
QHash<QUuid, PositionalAudioRingBuffer*>::Iterator i = _ringBuffers.begin(), end = _ringBuffers.end();
|
||||
QHash<QUuid, PositionalAudioStream*>::Iterator i = _ringBuffers.begin(), end = _ringBuffers.end();
|
||||
while (i != end) {
|
||||
PositionalAudioRingBuffer* audioStream = i.value();
|
||||
if (audioStream->getType() == PositionalAudioRingBuffer::Injector && audioStream->isStarved()) {
|
||||
PositionalAudioStream* audioStream = i.value();
|
||||
if (audioStream->getType() == PositionalAudioStream::Injector && audioStream->isStarved()) {
|
||||
int notMixedThreshold = audioStream->hasStarted() ? INJECTOR_CONSECUTIVE_NOT_MIXED_AFTER_STARTED_THRESHOLD
|
||||
: INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD;
|
||||
if (audioStream->getConsecutiveNotMixedCount() >= notMixedThreshold) {
|
||||
|
@ -152,7 +152,7 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
|
|||
|
||||
// pack and send stream stats packets until all ring buffers' stats are sent
|
||||
int numStreamStatsRemaining = _ringBuffers.size();
|
||||
QHash<QUuid, PositionalAudioRingBuffer*>::ConstIterator ringBuffersIterator = _ringBuffers.constBegin();
|
||||
QHash<QUuid, PositionalAudioStream*>::ConstIterator ringBuffersIterator = _ringBuffers.constBegin();
|
||||
while (numStreamStatsRemaining > 0) {
|
||||
|
||||
char* dataAt = headerEndAt;
|
||||
|
@ -201,7 +201,7 @@ QString AudioMixerClientData::getAudioStreamStatsString() const {
|
|||
+ " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax)
|
||||
+ " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage);
|
||||
|
||||
AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();
|
||||
AvatarAudioStream* avatarRingBuffer = getAvatarAudioRingBuffer();
|
||||
if (avatarRingBuffer) {
|
||||
AudioStreamStats streamStats = avatarRingBuffer->getAudioStreamStats();
|
||||
result += " UPSTREAM.mic.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames)
|
||||
|
@ -224,9 +224,9 @@ QString AudioMixerClientData::getAudioStreamStatsString() const {
|
|||
result = "mic unknown";
|
||||
}
|
||||
|
||||
QHash<QUuid, PositionalAudioRingBuffer*>::ConstIterator i;
|
||||
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
||||
for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) {
|
||||
if (i.value()->getType() == PositionalAudioRingBuffer::Injector) {
|
||||
if (i.value()->getType() == PositionalAudioStream::Injector) {
|
||||
AudioStreamStats streamStats = i.value()->getAudioStreamStats();
|
||||
result += " UPSTREAM.inj.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames)
|
||||
+ " desired_calc:" + QString::number(i.value()->getCalculatedJitterBufferFrames())
|
||||
|
|
|
@ -14,16 +14,16 @@
|
|||
|
||||
#include <AABox.h>
|
||||
|
||||
#include "PositionalAudioRingBuffer.h"
|
||||
#include "AvatarAudioRingBuffer.h"
|
||||
#include "PositionalAudioStream.h"
|
||||
#include "AvatarAudioStream.h"
|
||||
|
||||
class AudioMixerClientData : public NodeData {
|
||||
public:
|
||||
AudioMixerClientData();
|
||||
~AudioMixerClientData();
|
||||
|
||||
const QHash<QUuid, PositionalAudioRingBuffer*>& getRingBuffers() const { return _ringBuffers; }
|
||||
AvatarAudioRingBuffer* getAvatarAudioRingBuffer() const;
|
||||
const QHash<QUuid, PositionalAudioStream*>& getRingBuffers() const { return _ringBuffers; }
|
||||
AvatarAudioStream* getAvatarAudioRingBuffer() const;
|
||||
|
||||
int parseData(const QByteArray& packet);
|
||||
|
||||
|
@ -39,7 +39,7 @@ public:
|
|||
quint16 getOutgoingSequenceNumber() const { return _outgoingMixedAudioSequenceNumber; }
|
||||
|
||||
private:
|
||||
QHash<QUuid, PositionalAudioRingBuffer*> _ringBuffers; // mic stream stored under key of null UUID
|
||||
QHash<QUuid, PositionalAudioStream*> _ringBuffers; // mic stream stored under key of null UUID
|
||||
|
||||
quint16 _outgoingMixedAudioSequenceNumber;
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
//
|
||||
// AvatarAudioRingBuffer.cpp
|
||||
// AvatarAudioStream.cpp
|
||||
// assignment-client/src/audio
|
||||
//
|
||||
// Created by Stephen Birarda on 6/5/13.
|
||||
|
@ -11,14 +11,14 @@
|
|||
|
||||
#include <PacketHeaders.h>
|
||||
|
||||
#include "AvatarAudioRingBuffer.h"
|
||||
#include "AvatarAudioStream.h"
|
||||
|
||||
AvatarAudioRingBuffer::AvatarAudioRingBuffer(bool isStereo, bool dynamicJitterBuffer) :
|
||||
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Microphone, isStereo, dynamicJitterBuffer)
|
||||
AvatarAudioStream::AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer) :
|
||||
PositionalAudioStream(PositionalAudioStream::Microphone, isStereo, dynamicJitterBuffer)
|
||||
{
|
||||
}
|
||||
|
||||
int AvatarAudioRingBuffer::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
|
||||
int AvatarAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
|
||||
|
||||
_shouldLoopbackForNode = (type == PacketTypeMicrophoneAudioWithEcho);
|
||||
|
||||
|
@ -51,7 +51,7 @@ int AvatarAudioRingBuffer::parseStreamProperties(PacketType type, const QByteArr
|
|||
return readBytes;
|
||||
}
|
||||
|
||||
int AvatarAudioRingBuffer::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
||||
int AvatarAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
||||
int readBytes = 0;
|
||||
if (type == PacketTypeSilentAudioFrame) {
|
||||
writeDroppableSilentSamples(numAudioSamples);
|
|
@ -1,5 +1,5 @@
|
|||
//
|
||||
// AvatarAudioRingBuffer.h
|
||||
// AvatarAudioStream.h
|
||||
// assignment-client/src/audio
|
||||
//
|
||||
// Created by Stephen Birarda on 6/5/13.
|
||||
|
@ -14,16 +14,16 @@
|
|||
|
||||
#include <QtCore/QUuid>
|
||||
|
||||
#include "PositionalAudioRingBuffer.h"
|
||||
#include "PositionalAudioStream.h"
|
||||
|
||||
class AvatarAudioRingBuffer : public PositionalAudioRingBuffer {
|
||||
class AvatarAudioStream : public PositionalAudioStream {
|
||||
public:
|
||||
AvatarAudioRingBuffer(bool isStereo = false, bool dynamicJitterBuffer = false);
|
||||
AvatarAudioStream(bool isStereo = false, bool dynamicJitterBuffer = false);
|
||||
|
||||
private:
|
||||
// disallow copying of AvatarAudioRingBuffer objects
|
||||
AvatarAudioRingBuffer(const AvatarAudioRingBuffer&);
|
||||
AvatarAudioRingBuffer& operator= (const AvatarAudioRingBuffer&);
|
||||
// disallow copying of AvatarAudioStream objects
|
||||
AvatarAudioStream(const AvatarAudioStream&);
|
||||
AvatarAudioStream& operator= (const AvatarAudioStream&);
|
||||
|
||||
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
|
||||
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
|
|
@ -42,7 +42,7 @@
|
|||
#include "Audio.h"
|
||||
#include "Menu.h"
|
||||
#include "Util.h"
|
||||
#include "PositionalAudioRingBuffer.h"
|
||||
#include "PositionalAudioStream.h"
|
||||
|
||||
static const float AUDIO_CALLBACK_MSECS = (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float)SAMPLE_RATE * 1000.0;
|
||||
|
||||
|
@ -746,7 +746,7 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
|
|||
memcpy(&streamStats, dataAt, sizeof(AudioStreamStats));
|
||||
dataAt += sizeof(AudioStreamStats);
|
||||
|
||||
if (streamStats._streamType == PositionalAudioRingBuffer::Microphone) {
|
||||
if (streamStats._streamType == PositionalAudioStream::Microphone) {
|
||||
_audioMixerAvatarStreamAudioStats = streamStats;
|
||||
} else {
|
||||
_audioMixerInjectedStreamAudioStatsMap[streamStats._streamIdentifier] = streamStats;
|
||||
|
|
|
@ -120,7 +120,7 @@ protected:
|
|||
bool _lastPopSucceeded;
|
||||
AudioRingBuffer::ConstIterator _lastPopOutput;
|
||||
|
||||
bool _dynamicJitterBuffers;
|
||||
const bool _dynamicJitterBuffers;
|
||||
bool _useStDevForJitterCalc;
|
||||
|
||||
int _calculatedJitterBufferFramesUsingMaxGap;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
//
|
||||
// InjectedAudioRingBuffer.cpp
|
||||
// InjectedAudioStream.cpp
|
||||
// libraries/audio/src
|
||||
//
|
||||
// Created by Stephen Birarda on 6/5/13.
|
||||
|
@ -17,10 +17,10 @@
|
|||
#include <PacketHeaders.h>
|
||||
#include <UUID.h>
|
||||
|
||||
#include "InjectedAudioRingBuffer.h"
|
||||
#include "InjectedAudioStream.h"
|
||||
|
||||
InjectedAudioRingBuffer::InjectedAudioRingBuffer(const QUuid& streamIdentifier, bool dynamicJitterBuffer) :
|
||||
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Injector, false, dynamicJitterBuffer),
|
||||
InjectedAudioStream::InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer) :
|
||||
PositionalAudioStream(PositionalAudioStream::Injector, false, dynamicJitterBuffer),
|
||||
_streamIdentifier(streamIdentifier),
|
||||
_radius(0.0f),
|
||||
_attenuationRatio(0)
|
||||
|
@ -30,7 +30,7 @@ InjectedAudioRingBuffer::InjectedAudioRingBuffer(const QUuid& streamIdentifier,
|
|||
|
||||
const uchar MAX_INJECTOR_VOLUME = 255;
|
||||
|
||||
int InjectedAudioRingBuffer::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
|
||||
int InjectedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
|
||||
// setup a data stream to read from this packet
|
||||
QDataStream packetStream(packetAfterSeqNum);
|
||||
|
||||
|
@ -58,12 +58,12 @@ int InjectedAudioRingBuffer::parseStreamProperties(PacketType type, const QByteA
|
|||
return packetStream.device()->pos();
|
||||
}
|
||||
|
||||
int InjectedAudioRingBuffer::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
||||
int InjectedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
||||
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
|
||||
}
|
||||
|
||||
AudioStreamStats InjectedAudioRingBuffer::getAudioStreamStats() const {
|
||||
AudioStreamStats streamStats = PositionalAudioRingBuffer::getAudioStreamStats();
|
||||
AudioStreamStats InjectedAudioStream::getAudioStreamStats() const {
|
||||
AudioStreamStats streamStats = PositionalAudioStream::getAudioStreamStats();
|
||||
streamStats._streamIdentifier = _streamIdentifier;
|
||||
return streamStats;
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
//
|
||||
// InjectedAudioRingBuffer.h
|
||||
// InjectedAudioStream.h
|
||||
// libraries/audio/src
|
||||
//
|
||||
// Created by Stephen Birarda on 6/5/13.
|
||||
|
@ -14,11 +14,11 @@
|
|||
|
||||
#include <QtCore/QUuid>
|
||||
|
||||
#include "PositionalAudioRingBuffer.h"
|
||||
#include "PositionalAudioStream.h"
|
||||
|
||||
class InjectedAudioRingBuffer : public PositionalAudioRingBuffer {
|
||||
class InjectedAudioStream : public PositionalAudioStream {
|
||||
public:
|
||||
InjectedAudioRingBuffer(const QUuid& streamIdentifier = QUuid(), bool dynamicJitterBuffer = false);
|
||||
InjectedAudioStream(const QUuid& streamIdentifier = QUuid(), bool dynamicJitterBuffer = false);
|
||||
|
||||
float getRadius() const { return _radius; }
|
||||
float getAttenuationRatio() const { return _attenuationRatio; }
|
||||
|
@ -26,9 +26,9 @@ public:
|
|||
QUuid getStreamIdentifier() const { return _streamIdentifier; }
|
||||
|
||||
private:
|
||||
// disallow copying of InjectedAudioRingBuffer objects
|
||||
InjectedAudioRingBuffer(const InjectedAudioRingBuffer&);
|
||||
InjectedAudioRingBuffer& operator= (const InjectedAudioRingBuffer&);
|
||||
// disallow copying of InjectedAudioStream objects
|
||||
InjectedAudioStream(const InjectedAudioStream&);
|
||||
InjectedAudioStream& operator= (const InjectedAudioStream&);
|
||||
|
||||
AudioStreamStats getAudioStreamStats() const;
|
||||
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
|
|
@ -1,5 +1,5 @@
|
|||
//
|
||||
// PositionalAudioRingBuffer.cpp
|
||||
// PositionalAudioStream.cpp
|
||||
// libraries/audio/src
|
||||
//
|
||||
// Created by Stephen Birarda on 6/5/13.
|
||||
|
@ -9,7 +9,7 @@
|
|||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "PositionalAudioRingBuffer.h"
|
||||
#include "PositionalAudioStream.h"
|
||||
#include "SharedUtil.h"
|
||||
|
||||
#include <cstring>
|
||||
|
@ -21,7 +21,7 @@
|
|||
#include <PacketHeaders.h>
|
||||
#include <UUID.h>
|
||||
|
||||
PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo, bool dynamicJitterBuffers) :
|
||||
PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers) :
|
||||
InboundAudioStream(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
|
||||
AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, dynamicJitterBuffers),
|
||||
_type(type),
|
||||
|
@ -34,13 +34,13 @@ PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::
|
|||
{
|
||||
}
|
||||
|
||||
int PositionalAudioRingBuffer::parseData(const QByteArray& packet) {
|
||||
int PositionalAudioStream::parseData(const QByteArray& packet) {
|
||||
int bytesRead = InboundAudioStream::parseData(packet);
|
||||
updateNextOutputTrailingLoudness();
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
void PositionalAudioRingBuffer::updateNextOutputTrailingLoudness() {
|
||||
void PositionalAudioStream::updateNextOutputTrailingLoudness() {
|
||||
float nextLoudness = _ringBuffer.getNextOutputFrameLoudness();
|
||||
|
||||
const int TRAILING_AVERAGE_FRAMES = 100;
|
||||
|
@ -59,7 +59,7 @@ void PositionalAudioRingBuffer::updateNextOutputTrailingLoudness() {
|
|||
}
|
||||
}
|
||||
|
||||
int PositionalAudioRingBuffer::parsePositionalData(const QByteArray& positionalByteArray) {
|
||||
int PositionalAudioStream::parsePositionalData(const QByteArray& positionalByteArray) {
|
||||
QDataStream packetStream(positionalByteArray);
|
||||
|
||||
packetStream.readRawData(reinterpret_cast<char*>(&_position), sizeof(_position));
|
||||
|
@ -75,7 +75,7 @@ int PositionalAudioRingBuffer::parsePositionalData(const QByteArray& positionalB
|
|||
return packetStream.device()->pos();
|
||||
}
|
||||
|
||||
AudioStreamStats PositionalAudioRingBuffer::getAudioStreamStats() const {
|
||||
AudioStreamStats PositionalAudioStream::getAudioStreamStats() const {
|
||||
AudioStreamStats streamStats = InboundAudioStream::getAudioStreamStats();
|
||||
streamStats._streamType = _type;
|
||||
return streamStats;
|
|
@ -1,5 +1,5 @@
|
|||
//
|
||||
// PositionalAudioRingBuffer.h
|
||||
// PositionalAudioStream.h
|
||||
// libraries/audio/src
|
||||
//
|
||||
// Created by Stephen Birarda on 6/5/13.
|
||||
|
@ -19,7 +19,7 @@
|
|||
|
||||
const int AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY = 100;
|
||||
|
||||
class PositionalAudioRingBuffer : public InboundAudioStream {
|
||||
class PositionalAudioStream : public InboundAudioStream {
|
||||
Q_OBJECT
|
||||
public:
|
||||
enum Type {
|
||||
|
@ -27,7 +27,7 @@ public:
|
|||
Injector
|
||||
};
|
||||
|
||||
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo = false, bool dynamicJitterBuffers = false);
|
||||
PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo = false, bool dynamicJitterBuffers = false);
|
||||
|
||||
int parseData(const QByteArray& packet);
|
||||
|
||||
|
@ -38,7 +38,7 @@ public:
|
|||
|
||||
bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; }
|
||||
bool isStereo() const { return _isStereo; }
|
||||
PositionalAudioRingBuffer::Type getType() const { return _type; }
|
||||
PositionalAudioStream::Type getType() const { return _type; }
|
||||
const glm::vec3& getPosition() const { return _position; }
|
||||
const glm::quat& getOrientation() const { return _orientation; }
|
||||
AABox* getListenerUnattenuatedZone() const { return _listenerUnattenuatedZone; }
|
||||
|
@ -46,9 +46,9 @@ public:
|
|||
void setListenerUnattenuatedZone(AABox* listenerUnattenuatedZone) { _listenerUnattenuatedZone = listenerUnattenuatedZone; }
|
||||
|
||||
protected:
|
||||
// disallow copying of PositionalAudioRingBuffer objects
|
||||
PositionalAudioRingBuffer(const PositionalAudioRingBuffer&);
|
||||
PositionalAudioRingBuffer& operator= (const PositionalAudioRingBuffer&);
|
||||
// disallow copying of PositionalAudioStream objects
|
||||
PositionalAudioStream(const PositionalAudioStream&);
|
||||
PositionalAudioStream& operator= (const PositionalAudioStream&);
|
||||
|
||||
/// parses the info between the seq num and the audio data in the network packet and calculates
|
||||
/// how many audio samples this packet contains
|
Loading…
Reference in a new issue