renamed ringbuffer variables to audiostream

This commit is contained in:
wangyix 2014-07-28 13:08:53 -07:00
parent 32dbc6cbdb
commit 6fc5c74c0b
19 changed files with 237 additions and 225 deletions

View file

@ -36,7 +36,7 @@ Agent::Agent(const QByteArray& packet) :
_voxelEditSender(),
_particleEditSender(),
_modelEditSender(),
_receivedAudioBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 1, false),
_receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 1, false),
_avatarHashMap()
{
// be the parent of the script engine so it gets moved when we do
@ -148,11 +148,11 @@ void Agent::readPendingDatagrams() {
} else if (datagramPacketType == PacketTypeMixedAudio) {
_receivedAudioBuffer.parseData(receivedPacket);
_receivedAudioStream.parseData(receivedPacket);
_lastReceivedAudioLoudness = _receivedAudioBuffer.getNextOutputFrameLoudness();
_lastReceivedAudioLoudness = _receivedAudioStream.getNextOutputFrameLoudness();
_receivedAudioBuffer.clearBuffer();
_receivedAudioStream.clearBuffer();
// let this continue through to the NodeList so it updates last heard timestamp
// for the sending audio mixer

View file

@ -30,7 +30,7 @@
#include <VoxelEditPacketSender.h>
#include <VoxelTreeHeadlessViewer.h>
#include "InboundMixedAudioStream.h"
#include "MixedAudioStream.h"
class Agent : public ThreadedAssignment {
@ -71,7 +71,7 @@ private:
VoxelTreeHeadlessViewer _voxelViewer;
ModelTreeHeadlessViewer _modelViewer;
InboundMixedAudioStream _receivedAudioBuffer;
MixedAudioStream _receivedAudioStream;
float _lastReceivedAudioLoudness;
AvatarHashMap _avatarHashMap;

View file

@ -61,7 +61,7 @@ const float LOUDNESS_TO_DISTANCE_RATIO = 0.00001f;
const QString AUDIO_MIXER_LOGGING_TARGET_NAME = "audio-mixer";
void attachNewBufferToNode(Node *newNode) {
void attachNewNodeDataToNode(Node *newNode) {
if (!newNode->getLinkedData()) {
newNode->setLinkedData(new AudioMixerClientData());
}
@ -93,19 +93,19 @@ const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f;
const float ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE = 0.18f;
const float ATTENUATION_EPSILON_DISTANCE = 0.1f;
void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream* bufferToAdd,
AvatarAudioStream* listeningNodeBuffer) {
void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
AvatarAudioStream* listeningNodeStream) {
float bearingRelativeAngleToSource = 0.0f;
float attenuationCoefficient = 1.0f;
int numSamplesDelay = 0;
float weakChannelAmplitudeRatio = 1.0f;
bool shouldAttenuate = (bufferToAdd != listeningNodeBuffer);
bool shouldAttenuate = (streamToAdd != listeningNodeStream);
if (shouldAttenuate) {
// if the two buffer pointers do not match then these are different buffers
glm::vec3 relativePosition = bufferToAdd->getPosition() - listeningNodeBuffer->getPosition();
// if the two stream pointers do not match then these are different streams
glm::vec3 relativePosition = streamToAdd->getPosition() - listeningNodeStream->getPosition();
float distanceBetween = glm::length(relativePosition);
@ -113,7 +113,7 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream*
distanceBetween = EPSILON;
}
if (bufferToAdd->getNextOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) {
if (streamToAdd->getNextOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) {
// according to mixer performance we have decided this does not get to be mixed in
// bail out
return;
@ -121,24 +121,24 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream*
++_sumMixes;
if (bufferToAdd->getListenerUnattenuatedZone()) {
shouldAttenuate = !bufferToAdd->getListenerUnattenuatedZone()->contains(listeningNodeBuffer->getPosition());
if (streamToAdd->getListenerUnattenuatedZone()) {
shouldAttenuate = !streamToAdd->getListenerUnattenuatedZone()->contains(listeningNodeStream->getPosition());
}
if (bufferToAdd->getType() == PositionalAudioStream::Injector) {
attenuationCoefficient *= reinterpret_cast<InjectedAudioStream*>(bufferToAdd)->getAttenuationRatio();
if (streamToAdd->getType() == PositionalAudioStream::Injector) {
attenuationCoefficient *= reinterpret_cast<InjectedAudioStream*>(streamToAdd)->getAttenuationRatio();
}
shouldAttenuate = shouldAttenuate && distanceBetween > ATTENUATION_EPSILON_DISTANCE;
if (shouldAttenuate) {
glm::quat inverseOrientation = glm::inverse(listeningNodeBuffer->getOrientation());
glm::quat inverseOrientation = glm::inverse(listeningNodeStream->getOrientation());
float distanceSquareToSource = glm::dot(relativePosition, relativePosition);
float radius = 0.0f;
if (bufferToAdd->getType() == PositionalAudioStream::Injector) {
radius = reinterpret_cast<InjectedAudioStream*>(bufferToAdd)->getRadius();
if (streamToAdd->getType() == PositionalAudioStream::Injector) {
radius = reinterpret_cast<InjectedAudioStream*>(streamToAdd)->getRadius();
}
if (radius == 0 || (distanceSquareToSource > radius * radius)) {
@ -154,7 +154,7 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream*
} else {
// calculate the angle delivery for off-axis attenuation
glm::vec3 rotatedListenerPosition = glm::inverse(bufferToAdd->getOrientation()) * relativePosition;
glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd->getOrientation()) * relativePosition;
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
glm::normalize(rotatedListenerPosition));
@ -203,16 +203,16 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream*
}
}
AudioRingBuffer::ConstIterator bufferPopOutput = bufferToAdd->getLastPopOutput();
AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd->getLastPopOutput();
if (!bufferToAdd->isStereo() && shouldAttenuate) {
// this is a mono buffer, which means it gets full attenuation and spatialization
if (!streamToAdd->isStereo() && shouldAttenuate) {
// this is a mono stream, which means it gets full attenuation and spatialization
// if the bearing relative angle to source is > 0 then the delayed channel is the right one
int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0;
int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0;
int16_t correctBufferSample[2], delayBufferSample[2];
int16_t correctStreamSample[2], delayStreamSample[2];
int delayedChannelIndex = 0;
const int SINGLE_STEREO_OFFSET = 2;
@ -220,52 +220,51 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream*
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
// setup the int16_t variables for the two sample sets
correctBufferSample[0] = bufferPopOutput[s / 2] * attenuationCoefficient;
correctBufferSample[1] = bufferPopOutput[(s / 2) + 1] * attenuationCoefficient;
correctStreamSample[0] = streamPopOutput[s / 2] * attenuationCoefficient;
correctStreamSample[1] = streamPopOutput[(s / 2) + 1] * attenuationCoefficient;
delayedChannelIndex = s + (numSamplesDelay * 2) + delayedChannelOffset;
delayBufferSample[0] = correctBufferSample[0] * weakChannelAmplitudeRatio;
delayBufferSample[1] = correctBufferSample[1] * weakChannelAmplitudeRatio;
delayStreamSample[0] = correctStreamSample[0] * weakChannelAmplitudeRatio;
delayStreamSample[1] = correctStreamSample[1] * weakChannelAmplitudeRatio;
_clientSamples[s + goodChannelOffset] += correctBufferSample[0];
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET] += correctBufferSample[1];
_clientSamples[delayedChannelIndex] += delayBufferSample[0];
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET] += delayBufferSample[1];
_clientSamples[s + goodChannelOffset] += correctStreamSample[0];
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET] += correctStreamSample[1];
_clientSamples[delayedChannelIndex] += delayStreamSample[0];
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET] += delayStreamSample[1];
}
if (numSamplesDelay > 0) {
// if there was a sample delay for this buffer, we need to pull samples prior to the nextOutput
// if there was a sample delay for this stream, we need to pull samples prior to the popped output
// to stick at the beginning
float attenuationAndWeakChannelRatio = attenuationCoefficient * weakChannelAmplitudeRatio;
AudioRingBuffer::ConstIterator delayBufferPopOutput = bufferPopOutput - numSamplesDelay;
AudioRingBuffer::ConstIterator delayStreamPopOutput = streamPopOutput - numSamplesDelay;
// TODO: delayBufferPopOutput may be inside the last frame written if the ringbuffer is completely full
// TODO: delayStreamPopOutput may be inside the last frame written if the ringbuffer is completely full
// maybe make AudioRingBuffer have 1 extra frame in its buffer
for (int i = 0; i < numSamplesDelay; i++) {
int parentIndex = i * 2;
_clientSamples[parentIndex + delayedChannelOffset] += *delayBufferPopOutput * attenuationAndWeakChannelRatio;
++delayBufferPopOutput;
_clientSamples[parentIndex + delayedChannelOffset] += *delayStreamPopOutput * attenuationAndWeakChannelRatio;
++delayStreamPopOutput;
}
}
} else {
int stereoDivider = bufferToAdd->isStereo() ? 1 : 2;
int stereoDivider = streamToAdd->isStereo() ? 1 : 2;
if (!shouldAttenuate) {
attenuationCoefficient = 1.0f;
}
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s++) {
_clientSamples[s] = glm::clamp(_clientSamples[s] + (int)(bufferPopOutput[s / stereoDivider] * attenuationCoefficient),
_clientSamples[s] = glm::clamp(_clientSamples[s] + (int)(streamPopOutput[s / stereoDivider] * attenuationCoefficient),
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
}
}
}
void AudioMixer::prepareMixForListeningNode(Node* node) {
AvatarAudioStream* nodeRingBuffer = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer();
AvatarAudioStream* nodeAudioStream = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioStream();
// zero out the client mix for this node
memset(_clientSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
@ -278,16 +277,16 @@ void AudioMixer::prepareMixForListeningNode(Node* node) {
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
const QHash<QUuid, PositionalAudioStream*>& otherNodeRingBuffers = otherNodeClientData->getRingBuffers();
const QHash<QUuid, PositionalAudioStream*>& otherNodeAudioStreams = otherNodeClientData->getAudioStreams();
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = otherNodeRingBuffers.begin(); i != otherNodeRingBuffers.constEnd(); i++) {
PositionalAudioStream* otherNodeBuffer = i.value();
for (i = otherNodeAudioStreams.begin(); i != otherNodeAudioStreams.constEnd(); i++) {
PositionalAudioStream* otherNodeStream = i.value();
if ((*otherNode != *node || otherNodeBuffer->shouldLoopbackForNode())
&& otherNodeBuffer->lastPopSucceeded()
&& otherNodeBuffer->getNextOutputTrailingLoudness() > 0.0f) {
if ((*otherNode != *node || otherNodeStream->shouldLoopbackForNode())
&& otherNodeStream->lastPopSucceeded()
&& otherNodeStream->getNextOutputTrailingLoudness() > 0.0f) {
addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer);
addStreamToMixForListeningNodeWithStream(otherNodeStream, nodeAudioStream);
}
}
}
@ -392,7 +391,7 @@ void AudioMixer::run() {
nodeList->addNodeTypeToInterestSet(NodeType::Agent);
nodeList->linkedDataCreateCallback = attachNewBufferToNode;
nodeList->linkedDataCreateCallback = attachNewNodeDataToNode;
// setup a NetworkAccessManager to ask the domain-server for our settings
NetworkAccessManager& networkManager = NetworkAccessManager::getInstance();
@ -554,7 +553,7 @@ void AudioMixer::run() {
nodeData->audioStreamsPopFrameForMixing();
if (node->getType() == NodeType::Agent
&& ((AudioMixerClientData*)node->getLinkedData())->getAvatarAudioRingBuffer()) {
&& ((AudioMixerClientData*)node->getLinkedData())->getAvatarAudioStream()) {
prepareMixForListeningNode(node.data());

View file

@ -40,9 +40,9 @@ public slots:
static bool getUseDynamicJitterBuffers() { return _useDynamicJitterBuffers; }
private:
/// adds one buffer to the mix for a listening node
void addBufferToMixForListeningNodeWithBuffer(PositionalAudioStream* bufferToAdd,
AvatarAudioStream* listeningNodeBuffer);
/// adds one stream to the mix for a listening node
void addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
AvatarAudioStream* listeningNodeStream);
/// prepares and sends a mix to one Node
void prepareMixForListeningNode(Node* node);

View file

@ -21,22 +21,22 @@
AudioMixerClientData::AudioMixerClientData() :
_ringBuffers(),
_audioStreams(),
_outgoingMixedAudioSequenceNumber(0)
{
}
AudioMixerClientData::~AudioMixerClientData() {
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) {
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
// delete this attached InboundAudioStream
delete i.value();
}
}
AvatarAudioStream* AudioMixerClientData::getAvatarAudioRingBuffer() const {
if (_ringBuffers.contains(QUuid())) {
return (AvatarAudioStream*)_ringBuffers.value(QUuid());
AvatarAudioStream* AudioMixerClientData::getAvatarAudioStream() const {
if (_audioStreams.contains(QUuid())) {
return (AvatarAudioStream*)_audioStreams.value(QUuid());
}
// no mic stream found - return NULL
return NULL;
@ -65,7 +65,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|| packetType == PacketTypeSilentAudioFrame) {
QUuid nullUUID = QUuid();
if (!_ringBuffers.contains(nullUUID)) {
if (!_audioStreams.contains(nullUUID)) {
// we don't have a mic stream yet, so add it
// read the channel flag to see if our stream is stereo or not
@ -73,10 +73,10 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
quint8 channelFlag = *(reinterpret_cast<const quint8*>(channelFlagAt));
bool isStereo = channelFlag == 1;
_ringBuffers.insert(nullUUID,
_audioStreams.insert(nullUUID,
matchingStream = new AvatarAudioStream(isStereo, AudioMixer::getUseDynamicJitterBuffers()));
} else {
matchingStream = _ringBuffers.value(nullUUID);
matchingStream = _audioStreams.value(nullUUID);
}
} else if (packetType == PacketTypeInjectAudio) {
// this is injected audio
@ -85,11 +85,11 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
int bytesBeforeStreamIdentifier = numBytesForPacketHeader(packet) + sizeof(quint16);
QUuid streamIdentifier = QUuid::fromRfc4122(packet.mid(bytesBeforeStreamIdentifier, NUM_BYTES_RFC4122_UUID));
if (!_ringBuffers.contains(streamIdentifier)) {
_ringBuffers.insert(streamIdentifier,
if (!_audioStreams.contains(streamIdentifier)) {
_audioStreams.insert(streamIdentifier,
matchingStream = new InjectedAudioStream(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers()));
} else {
matchingStream = _ringBuffers.value(streamIdentifier);
matchingStream = _audioStreams.value(streamIdentifier);
}
}
@ -100,7 +100,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
void AudioMixerClientData::audioStreamsPopFrameForMixing() {
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) {
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
i.value()->popFrames(1);
}
}
@ -109,11 +109,11 @@ void AudioMixerClientData::removeDeadInjectedStreams() {
const int INJECTOR_CONSECUTIVE_NOT_MIXED_AFTER_STARTED_THRESHOLD = 100;
// we have this second threshold in case the injected audio is so short that the ringbuffer
// we have this second threshold in case the injected audio is so short that the injected stream
// never even reaches its desired size, which means it will never start.
const int INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD = 1000;
QHash<QUuid, PositionalAudioStream*>::Iterator i = _ringBuffers.begin(), end = _ringBuffers.end();
QHash<QUuid, PositionalAudioStream*>::Iterator i = _audioStreams.begin(), end = _audioStreams.end();
while (i != end) {
PositionalAudioStream* audioStream = i.value();
if (audioStream->getType() == PositionalAudioStream::Injector && audioStream->isStarved()) {
@ -121,7 +121,7 @@ void AudioMixerClientData::removeDeadInjectedStreams() {
: INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD;
if (audioStream->getConsecutiveNotMixedCount() >= notMixedThreshold) {
delete audioStream;
i = _ringBuffers.erase(i);
i = _audioStreams.erase(i);
continue;
}
}
@ -150,9 +150,9 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
// calculate how many stream stat structs we can fit in each packet
const int numStreamStatsRoomFor = (MAX_PACKET_SIZE - numBytesPacketHeader - sizeof(quint8) - sizeof(quint16)) / sizeof(AudioStreamStats);
// pack and send stream stats packets until all ring buffers' stats are sent
int numStreamStatsRemaining = _ringBuffers.size();
QHash<QUuid, PositionalAudioStream*>::ConstIterator ringBuffersIterator = _ringBuffers.constBegin();
// pack and send stream stats packets until all audio streams' stats are sent
int numStreamStatsRemaining = _audioStreams.size();
QHash<QUuid, PositionalAudioStream*>::ConstIterator audioStreamsIterator = _audioStreams.constBegin();
while (numStreamStatsRemaining > 0) {
char* dataAt = headerEndAt;
@ -169,11 +169,11 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
// pack the calculated number of stream stats
for (int i = 0; i < numStreamStatsToPack; i++) {
AudioStreamStats streamStats = ringBuffersIterator.value()->updateSeqHistoryAndGetAudioStreamStats();
AudioStreamStats streamStats = audioStreamsIterator.value()->updateSeqHistoryAndGetAudioStreamStats();
memcpy(dataAt, &streamStats, sizeof(AudioStreamStats));
dataAt += sizeof(AudioStreamStats);
ringBuffersIterator++;
audioStreamsIterator++;
}
numStreamStatsRemaining -= numStreamStatsToPack;
@ -185,12 +185,12 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
QString AudioMixerClientData::getAudioStreamStatsString() const {
QString result;
AudioStreamStats streamStats = _downstreamAudioStreamStats;
result += "DOWNSTREAM.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames)
+ " available_avg_10s:" + QString::number(streamStats._ringBufferFramesAvailableAverage)
+ " available:" + QString::number(streamStats._ringBufferFramesAvailable)
+ " starves:" + QString::number(streamStats._ringBufferStarveCount)
+ " not_mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount)
+ " overflows:" + QString::number(streamStats._ringBufferOverflowCount)
result += "DOWNSTREAM.desired:" + QString::number(streamStats._desiredJitterBufferFrames)
+ " available_avg_10s:" + QString::number(streamStats._framesAvailableAverage)
+ " available:" + QString::number(streamStats._framesAvailable)
+ " starves:" + QString::number(streamStats._starveCount)
+ " not_mixed:" + QString::number(streamStats._consecutiveNotMixedCount)
+ " overflows:" + QString::number(streamStats._overflowCount)
+ " silents_dropped: ?"
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
@ -201,17 +201,17 @@ QString AudioMixerClientData::getAudioStreamStatsString() const {
+ " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax)
+ " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage);
AvatarAudioStream* avatarRingBuffer = getAvatarAudioRingBuffer();
if (avatarRingBuffer) {
AudioStreamStats streamStats = avatarRingBuffer->getAudioStreamStats();
result += " UPSTREAM.mic.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames)
+ " desired_calc:" + QString::number(avatarRingBuffer->getCalculatedJitterBufferFrames())
+ " available_avg_10s:" + QString::number(streamStats._ringBufferFramesAvailableAverage)
+ " available:" + QString::number(streamStats._ringBufferFramesAvailable)
+ " starves:" + QString::number(streamStats._ringBufferStarveCount)
+ " not_mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount)
+ " overflows:" + QString::number(streamStats._ringBufferOverflowCount)
+ " silents_dropped:" + QString::number(streamStats._ringBufferSilentFramesDropped)
AvatarAudioStream* avatarAudioStream = getAvatarAudioStream();
if (avatarAudioStream) {
AudioStreamStats streamStats = avatarAudioStream->getAudioStreamStats();
result += " UPSTREAM.mic.desired:" + QString::number(streamStats._desiredJitterBufferFrames)
+ " desired_calc:" + QString::number(avatarAudioStream->getCalculatedJitterBufferFrames())
+ " available_avg_10s:" + QString::number(streamStats._framesAvailableAverage)
+ " available:" + QString::number(streamStats._framesAvailable)
+ " starves:" + QString::number(streamStats._starveCount)
+ " not_mixed:" + QString::number(streamStats._consecutiveNotMixedCount)
+ " overflows:" + QString::number(streamStats._overflowCount)
+ " silents_dropped:" + QString::number(streamStats._silentFramesDropped)
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
+ " min_gap:" + formatUsecTime(streamStats._timeGapMin)
@ -225,17 +225,17 @@ QString AudioMixerClientData::getAudioStreamStatsString() const {
}
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = _ringBuffers.constBegin(); i != _ringBuffers.constEnd(); i++) {
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
if (i.value()->getType() == PositionalAudioStream::Injector) {
AudioStreamStats streamStats = i.value()->getAudioStreamStats();
result += " UPSTREAM.inj.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames)
result += " UPSTREAM.inj.desired:" + QString::number(streamStats._desiredJitterBufferFrames)
+ " desired_calc:" + QString::number(i.value()->getCalculatedJitterBufferFrames())
+ " available_avg_10s:" + QString::number(streamStats._ringBufferFramesAvailableAverage)
+ " available:" + QString::number(streamStats._ringBufferFramesAvailable)
+ " starves:" + QString::number(streamStats._ringBufferStarveCount)
+ " not_mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount)
+ " overflows:" + QString::number(streamStats._ringBufferOverflowCount)
+ " silents_dropped:" + QString::number(streamStats._ringBufferSilentFramesDropped)
+ " available_avg_10s:" + QString::number(streamStats._framesAvailableAverage)
+ " available:" + QString::number(streamStats._framesAvailable)
+ " starves:" + QString::number(streamStats._starveCount)
+ " not_mixed:" + QString::number(streamStats._consecutiveNotMixedCount)
+ " overflows:" + QString::number(streamStats._overflowCount)
+ " silents_dropped:" + QString::number(streamStats._silentFramesDropped)
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
+ " min_gap:" + formatUsecTime(streamStats._timeGapMin)

View file

@ -22,8 +22,8 @@ public:
AudioMixerClientData();
~AudioMixerClientData();
const QHash<QUuid, PositionalAudioStream*>& getRingBuffers() const { return _ringBuffers; }
AvatarAudioStream* getAvatarAudioRingBuffer() const;
const QHash<QUuid, PositionalAudioStream*>& getAudioStreams() const { return _audioStreams; }
AvatarAudioStream* getAvatarAudioStream() const;
int parseData(const QByteArray& packet);
@ -39,7 +39,7 @@ public:
quint16 getOutgoingSequenceNumber() const { return _outgoingMixedAudioSequenceNumber; }
private:
QHash<QUuid, PositionalAudioStream*> _ringBuffers; // mic stream stored under key of null UUID
QHash<QUuid, PositionalAudioStream*> _audioStreams; // mic stream stored under key of null UUID
quint16 _outgoingMixedAudioSequenceNumber;

View file

@ -9,8 +9,8 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AvatarAudioRingBuffer_h
#define hifi_AvatarAudioRingBuffer_h
#ifndef hifi_AvatarAudioStream_h
#define hifi_AvatarAudioStream_h
#include <QtCore/QUuid>
@ -29,4 +29,4 @@ private:
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
};
#endif // hifi_AvatarAudioRingBuffer_h
#endif // hifi_AvatarAudioStream_h

View file

@ -103,10 +103,6 @@ const int IDLE_SIMULATE_MSECS = 16; // How often should call simul
// in the idle loop? (60 FPS is default)
static QTimer* idleTimer = NULL;
const int STARTUP_JITTER_SAMPLES = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / 2;
// Startup optimistically with small jitter buffer that
// will start playback on the second received audio packet.
const QString CHECK_VERSION_URL = "https://highfidelity.io/latestVersion.xml";
const QString SKIP_FILENAME = QStandardPaths::writableLocation(QStandardPaths::DataLocation) + "/hifi.skipversion";
@ -162,7 +158,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
_touchAvgY(0.0f),
_isTouchPressed(false),
_mousePressed(false),
_audio(STARTUP_JITTER_SAMPLES),
_audio(),
_enableProcessVoxelsThread(true),
_octreeProcessor(),
_voxelHideShowThread(&_voxels),
@ -1712,8 +1708,8 @@ void Application::init() {
_lastTimeUpdated.start();
Menu::getInstance()->loadSettings();
if (Menu::getInstance()->getAudioJitterBufferSamples() != 0) {
_audio.setJitterBufferSamples(Menu::getInstance()->getAudioJitterBufferSamples());
if (Menu::getInstance()->getAudioJitterBufferFrames() != 0) {
_audio.overrideDesiredJitterBufferFramesTo(Menu::getInstance()->getAudioJitterBufferFrames());
}
qDebug("Loaded settings");

View file

@ -54,7 +54,7 @@ static const int FRAMES_AVAILABLE_STATS_WINDOW_SECONDS = 10;
static const int MUTE_ICON_SIZE = 24;
Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
Audio::Audio(QObject* parent) :
AbstractAudioInterface(parent),
_audioInput(NULL),
_desiredInputFormat(),
@ -76,14 +76,12 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
// this delay will slowly add up and the longer someone runs, they more delayed their audio will be.
_inputRingBuffer(0),
#ifdef _WIN32
_ringBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 100, true, true),
_receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 100, true, true),
#else
_ringBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 10, true, true), // DO NOT CHANGE THIS UNLESS YOU SOLVE THE AUDIO DEVICE DRIFT PROBLEM!!!
_receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 10, true, true), // DO NOT CHANGE THIS UNLESS YOU SOLVE THE AUDIO DEVICE DRIFT PROBLEM!!!
#endif
_isStereoInput(false),
_averagedLatency(0.0),
_measuredJitter(0),
_jitterBufferSamples(initialJitterBufferSamples),
_lastInputLoudness(0),
_timeSinceLastClip(-1.0),
_dcOffset(0),
@ -132,13 +130,13 @@ void Audio::init(QGLWidget *parent) {
}
void Audio::reset() {
_ringBuffer.reset();
_receivedAudioStream.reset();
resetStats();
}
void Audio::resetStats() {
_ringBuffer.resetStats();
_receivedAudioStream.resetStats();
_audioMixerAvatarStreamAudioStats = AudioStreamStats();
_audioMixerInjectedStreamAudioStatsMap.clear();
@ -715,7 +713,7 @@ void Audio::handleAudioInput() {
}
}
void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) {
if (_audioOutput) {
// Audio output must exist and be correctly set up if we're going to process received audio
processReceivedAudio(audioByteArray);
@ -755,7 +753,7 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
}
AudioStreamStats Audio::getDownstreamAudioStreamStats() const {
return _ringBuffer.getAudioStreamStats();
return _receivedAudioStream.getAudioStreamStats();
}
void Audio::sendDownstreamAudioStatsPacket() {
@ -783,7 +781,7 @@ void Audio::sendDownstreamAudioStatsPacket() {
dataAt += sizeof(quint16);
// pack downstream audio stream stats
AudioStreamStats stats = _ringBuffer.updateSeqHistoryAndGetAudioStreamStats();
AudioStreamStats stats = _receivedAudioStream.updateSeqHistoryAndGetAudioStreamStats();
memcpy(dataAt, &stats, sizeof(AudioStreamStats));
dataAt += sizeof(AudioStreamStats);
@ -894,7 +892,7 @@ void Audio::toggleStereoInput() {
void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
// parse audio data
_ringBuffer.parseData(audioByteArray);
_receivedAudioStream.parseData(audioByteArray);
pushAudioToOutput();
}
@ -904,7 +902,7 @@ void Audio::pushAudioToOutput() {
if (_audioOutput->bytesFree() == _audioOutput->bufferSize()) {
// the audio output has no samples to play. set the downstream audio to starved so that it
// refills to its desired size before pushing frames
_ringBuffer.setToStarved();
_receivedAudioStream.setToStarved();
}
float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float)_outputFormat.sampleRate())
@ -912,16 +910,16 @@ void Audio::pushAudioToOutput() {
int numFramesToPush;
if (Menu::getInstance()->isOptionChecked(MenuOption::DisableQAudioOutputOverflowCheck)) {
numFramesToPush = _ringBuffer.getFramesAvailable();
numFramesToPush = _receivedAudioStream.getFramesAvailable();
} else {
// make sure to push a whole number of frames to the audio output
int numFramesAudioOutputRoomFor = (int)(_audioOutput->bytesFree() / sizeof(int16_t) * networkOutputToOutputRatio) / _ringBuffer.getNumFrameSamples();
numFramesToPush = std::min(_ringBuffer.getFramesAvailable(), numFramesAudioOutputRoomFor);
int numFramesAudioOutputRoomFor = (int)(_audioOutput->bytesFree() / sizeof(int16_t) * networkOutputToOutputRatio) / _receivedAudioStream.getNumFrameSamples();
numFramesToPush = std::min(_receivedAudioStream.getFramesAvailable(), numFramesAudioOutputRoomFor);
}
// if there is data in the ring buffer and room in the audio output, decide what to do
// if there is data in the received stream and room in the audio output, decide what to do
if (numFramesToPush > 0 && _ringBuffer.popFrames(numFramesToPush, false)) {
if (numFramesToPush > 0 && _receivedAudioStream.popFrames(numFramesToPush, false)) {
int numNetworkOutputSamples = numFramesToPush * NETWORK_BUFFER_LENGTH_SAMPLES_STEREO;
int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio;
@ -929,15 +927,15 @@ void Audio::pushAudioToOutput() {
QByteArray outputBuffer;
outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t));
AudioRingBuffer::ConstIterator ringBufferPopOutput = _ringBuffer.getLastPopOutput();
AudioRingBuffer::ConstIterator receivedAudioStreamPopOutput = _receivedAudioStream.getLastPopOutput();
int16_t* ringBufferSamples = new int16_t[numNetworkOutputSamples];
int16_t* receivedSamples = new int16_t[numNetworkOutputSamples];
if (_processSpatialAudio) {
unsigned int sampleTime = _spatialAudioStart;
QByteArray buffer;
buffer.resize(numNetworkOutputSamples * sizeof(int16_t));
ringBufferPopOutput.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples);
receivedAudioStreamPopOutput.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples);
// Accumulate direct transmission of audio from sender to receiver
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) {
@ -950,18 +948,18 @@ void Audio::pushAudioToOutput() {
// copy the samples we'll resample from the spatial audio ring buffer - this also
// pushes the read pointer of the spatial audio ring buffer forwards
_spatialAudioRingBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples);
_spatialAudioRingBuffer.readSamples(receivedSamples, numNetworkOutputSamples);
// Advance the start point for the next packet of audio to arrive
_spatialAudioStart += numNetworkOutputSamples / _desiredOutputFormat.channelCount();
} else {
// copy the samples we'll resample from the ring buffer - this also
// pushes the read pointer of the ring buffer forwards
ringBufferPopOutput.readSamples(ringBufferSamples, numNetworkOutputSamples);
receivedAudioStreamPopOutput.readSamples(receivedSamples, numNetworkOutputSamples);
}
// copy the packet from the RB to the output
linearResampling(ringBufferSamples,
linearResampling(receivedSamples,
(int16_t*)outputBuffer.data(),
numNetworkOutputSamples,
numDeviceOutputSamples,
@ -973,7 +971,7 @@ void Audio::pushAudioToOutput() {
if (_scopeEnabled && !_scopeEnabledPause) {
unsigned int numAudioChannels = _desiredOutputFormat.channelCount();
int16_t* samples = ringBufferSamples;
int16_t* samples = receivedSamples;
for (int numSamples = numNetworkOutputSamples / numAudioChannels; numSamples > 0; numSamples -= NETWORK_SAMPLES_PER_FRAME) {
unsigned int audioChannel = 0;
@ -994,7 +992,7 @@ void Audio::pushAudioToOutput() {
}
}
delete[] ringBufferSamples;
delete[] receivedSamples;
}
}
@ -1332,14 +1330,14 @@ void Audio::renderStats(const float* color, int width, int height) {
float audioInputBufferLatency = 0.0f, inputRingBufferLatency = 0.0f, networkRoundtripLatency = 0.0f, mixerRingBufferLatency = 0.0f, outputRingBufferLatency = 0.0f, audioOutputBufferLatency = 0.0f;
AudioStreamStats downstreamAudioStreamStats = _ringBuffer.getAudioStreamStats();
AudioStreamStats downstreamAudioStreamStats = _receivedAudioStream.getAudioStreamStats();
SharedNodePointer audioMixerNodePointer = NodeList::getInstance()->soloNodeOfType(NodeType::AudioMixer);
if (!audioMixerNodePointer.isNull()) {
audioInputBufferLatency = _audioInputMsecsReadStats.getWindowAverage();
inputRingBufferLatency = getInputRingBufferAverageMsecsAvailable();
networkRoundtripLatency = audioMixerNodePointer->getPingMs();
mixerRingBufferLatency = _audioMixerAvatarStreamAudioStats._ringBufferFramesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS;
outputRingBufferLatency = downstreamAudioStreamStats._ringBufferFramesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS;
mixerRingBufferLatency = _audioMixerAvatarStreamAudioStats._framesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS;
outputRingBufferLatency = downstreamAudioStreamStats._framesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS;
audioOutputBufferLatency = _audioOutputMsecsUnplayedStats.getWindowAverage();
}
float totalLatency = audioInputBufferLatency + inputRingBufferLatency + networkRoundtripLatency + mixerRingBufferLatency + outputRingBufferLatency + audioOutputBufferLatency;
@ -1427,26 +1425,26 @@ void Audio::renderAudioStreamStats(const AudioStreamStats& streamStats, int hori
const float BUFFER_SEND_INTERVAL_MSECS = BUFFER_SEND_INTERVAL_USECS / (float)USECS_PER_MSEC;
sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u+%d, available: %u+%d",
streamStats._ringBufferDesiredJitterBufferFrames,
streamStats._ringBufferFramesAvailableAverage,
streamStats._desiredJitterBufferFrames,
streamStats._framesAvailableAverage,
(int)(getAudioOutputAverageMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS),
streamStats._ringBufferFramesAvailable,
streamStats._framesAvailable,
(int)(getAudioOutputMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS));
} else {
sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u, available: %u",
streamStats._ringBufferDesiredJitterBufferFrames,
streamStats._ringBufferFramesAvailableAverage,
streamStats._ringBufferFramesAvailable);
streamStats._desiredJitterBufferFrames,
streamStats._framesAvailableAverage,
streamStats._framesAvailable);
}
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
sprintf(stringBuffer, " Ringbuffer stats | starves: %u, prev_starve_lasted: %u, frames_dropped: %u, overflows: %u",
streamStats._ringBufferStarveCount,
streamStats._ringBufferConsecutiveNotMixedCount,
streamStats._ringBufferSilentFramesDropped,
streamStats._ringBufferOverflowCount);
streamStats._starveCount,
streamStats._consecutiveNotMixedCount,
streamStats._silentFramesDropped,
streamStats._overflowCount);
verticalOffset += STATS_HEIGHT_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
@ -1662,8 +1660,8 @@ bool Audio::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo)
// setup our general output device for audio-mixer audio
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
_audioOutput->setBufferSize(_ringBuffer.getFrameCapacity() * _outputFormat.bytesForDuration(BUFFER_SEND_INTERVAL_USECS));
qDebug() << "Ring Buffer capacity in frames: " << _ringBuffer.getFrameCapacity();
_audioOutput->setBufferSize(_receivedAudioStream.getFrameCapacity() * _outputFormat.bytesForDuration(BUFFER_SEND_INTERVAL_USECS));
qDebug() << "Ring Buffer capacity in frames: " << _receivedAudioStream.getFrameCapacity();
_outputDevice = _audioOutput->start();
// setup a loopback audio output device

View file

@ -33,7 +33,7 @@
#include <AbstractAudioInterface.h>
#include <StdDev.h>
#include "InboundMixedAudioStream.h"
#include "MixedAudioStream.h"
static const int NUM_AUDIO_CHANNELS = 2;
@ -46,19 +46,19 @@ class Audio : public AbstractAudioInterface {
Q_OBJECT
public:
// setup for audio I/O
Audio(int16_t initialJitterBufferSamples, QObject* parent = 0);
Audio(QObject* parent = 0);
float getLastInputLoudness() const { return glm::max(_lastInputLoudness - _noiseGateMeasuredFloor, 0.f); }
float getTimeSinceLastClip() const { return _timeSinceLastClip; }
float getAudioAverageInputLoudness() const { return _lastInputLoudness; }
void setNoiseGateEnabled(bool noiseGateEnabled) { _noiseGateEnabled = noiseGateEnabled; }
void setJitterBufferSamples(int samples) { _jitterBufferSamples = samples; }
int getJitterBufferSamples() { return _jitterBufferSamples; }
virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen);
virtual void startDrumSound(float volume, float frequency, float duration, float decay);
void overrideDesiredJitterBufferFramesTo(int desired) { _receivedAudioStream.overrideDesiredJitterBufferFramesTo(desired); }
int getDesiredJitterBufferFrames() const { return _receivedAudioStream.getDesiredJitterBufferFrames(); }
float getCollisionSoundMagnitude() { return _collisionSoundMagnitude; }
@ -87,7 +87,7 @@ public:
public slots:
void start();
void stop();
void addReceivedAudioToBuffer(const QByteArray& audioByteArray);
void addReceivedAudioToStream(const QByteArray& audioByteArray);
void parseAudioStreamStatsPacket(const QByteArray& packet);
void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples);
void handleAudioInput();
@ -120,8 +120,6 @@ public slots:
float getInputVolume() const { return (_audioInput) ? _audioInput->volume() : 0.0f; }
void setInputVolume(float volume) { if (_audioInput) _audioInput->setVolume(volume); }
int getDesiredJitterBufferFrames() const { return _jitterBufferSamples / _ringBuffer.getNumFrameSamples(); }
const AudioStreamStats& getAudioMixerAvatarStreamAudioStats() const { return _audioMixerAvatarStreamAudioStats; }
const QHash<QUuid, AudioStreamStats>& getAudioMixerInjectedStreamAudioStatsMap() const { return _audioMixerInjectedStreamAudioStatsMap; }
@ -151,7 +149,7 @@ private:
QAudioOutput* _proceduralAudioOutput;
QIODevice* _proceduralOutputDevice;
AudioRingBuffer _inputRingBuffer;
InboundMixedAudioStream _ringBuffer;
MixedAudioStream _receivedAudioStream;
bool _isStereoInput;
QString _inputAudioDeviceName;
@ -160,8 +158,6 @@ private:
StDev _stdev;
QElapsedTimer _timeSinceLastReceived;
float _averagedLatency;
float _measuredJitter;
int16_t _jitterBufferSamples;
float _lastInputLoudness;
float _timeSinceLastClip;
float _dcOffset;

View file

@ -25,13 +25,13 @@ public:
_timeGapWindowMin(0),
_timeGapWindowMax(0),
_timeGapWindowAverage(0.0f),
_ringBufferFramesAvailable(0),
_ringBufferFramesAvailableAverage(0),
_ringBufferDesiredJitterBufferFrames(0),
_ringBufferStarveCount(0),
_ringBufferConsecutiveNotMixedCount(0),
_ringBufferOverflowCount(0),
_ringBufferSilentFramesDropped(0),
_framesAvailable(0),
_framesAvailableAverage(0),
_desiredJitterBufferFrames(0),
_starveCount(0),
_consecutiveNotMixedCount(0),
_overflowCount(0),
_silentFramesDropped(0),
_packetStreamStats(),
_packetStreamWindowStats()
{}
@ -46,13 +46,13 @@ public:
quint64 _timeGapWindowMax;
float _timeGapWindowAverage;
quint32 _ringBufferFramesAvailable;
quint16 _ringBufferFramesAvailableAverage;
quint16 _ringBufferDesiredJitterBufferFrames;
quint32 _ringBufferStarveCount;
quint32 _ringBufferConsecutiveNotMixedCount;
quint32 _ringBufferOverflowCount;
quint32 _ringBufferSilentFramesDropped;
quint32 _framesAvailable;
quint16 _framesAvailableAverage;
quint16 _desiredJitterBufferFrames;
quint32 _starveCount;
quint32 _consecutiveNotMixedCount;
quint32 _overflowCount;
quint32 _silentFramesDropped;
PacketStreamStats _packetStreamStats;
PacketStreamStats _packetStreamWindowStats;

View file

@ -146,6 +146,11 @@ void InboundAudioStream::starved() {
_starveCount++;
}
void InboundAudioStream::overrideDesiredJitterBufferFramesTo(int desired) {
_dynamicJitterBuffers = false;
_desiredJitterBufferFrames = desired;
}
int InboundAudioStream::clampDesiredJitterBufferFramesValue(int desired) const {
const int MIN_FRAMES_DESIRED = 0;
const int MAX_FRAMES_DESIRED = _ringBuffer.getFrameCapacity();
@ -244,13 +249,13 @@ AudioStreamStats InboundAudioStream::getAudioStreamStats() const {
streamStats._timeGapWindowMax = _interframeTimeGapStatsForStatsPacket.getWindowMax();
streamStats._timeGapWindowAverage = _interframeTimeGapStatsForStatsPacket.getWindowAverage();
streamStats._ringBufferFramesAvailable = _ringBuffer.framesAvailable();
streamStats._ringBufferFramesAvailableAverage = _framesAvailableStats.getWindowAverage();
streamStats._ringBufferDesiredJitterBufferFrames = _desiredJitterBufferFrames;
streamStats._ringBufferStarveCount = _starveCount;
streamStats._ringBufferConsecutiveNotMixedCount = _consecutiveNotMixedCount;
streamStats._ringBufferOverflowCount = _ringBuffer.getOverflowCount();
streamStats._ringBufferSilentFramesDropped = _silentFramesDropped;
streamStats._framesAvailable = _ringBuffer.framesAvailable();
streamStats._framesAvailableAverage = _framesAvailableStats.getWindowAverage();
streamStats._desiredJitterBufferFrames = _desiredJitterBufferFrames;
streamStats._starveCount = _starveCount;
streamStats._consecutiveNotMixedCount = _consecutiveNotMixedCount;
streamStats._overflowCount = _ringBuffer.getOverflowCount();
streamStats._silentFramesDropped = _silentFramesDropped;
streamStats._packetStreamStats = _incomingSequenceNumberStats.getStats();
streamStats._packetStreamWindowStats = _incomingSequenceNumberStats.getStatsForHistoryWindow();

View file

@ -62,6 +62,8 @@ public:
void setToStarved();
/// turns off dyanmic jitter buffers and sets the desired jitter buffer frames to specified value
void overrideDesiredJitterBufferFramesTo(int desired);
/// this function should be called once per second to ensure the seq num stats history spans ~30 seconds
AudioStreamStats updateSeqHistoryAndGetAudioStreamStats();
@ -95,8 +97,11 @@ public:
private:
void starved();
SequenceNumberStats::ArrivalInfo frameReceivedUpdateNetworkStats(quint16 sequenceNumber, const QUuid& senderUUID);
int clampDesiredJitterBufferFramesValue(int desired) const;
int writeSamplesForDroppedPackets(int numSamples);
protected:
// disallow copying of InboundAudioStream objects
InboundAudioStream(const InboundAudioStream&);
@ -110,9 +115,7 @@ protected:
virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) = 0;
int writeDroppableSilentSamples(int numSilentSamples);
int writeSamplesForDroppedPackets(int numSamples);
SequenceNumberStats::ArrivalInfo frameReceivedUpdateNetworkStats(quint16 sequenceNumber, const QUuid& senderUUID);
protected:
AudioRingBuffer _ringBuffer;
@ -120,7 +123,7 @@ protected:
bool _lastPopSucceeded;
AudioRingBuffer::ConstIterator _lastPopOutput;
const bool _dynamicJitterBuffers;
bool _dynamicJitterBuffers;
bool _useStDevForJitterCalc;
int _calculatedJitterBufferFramesUsingMaxGap;

View file

@ -1,17 +0,0 @@
#include "InboundMixedAudioStream.h"
InboundMixedAudioStream::InboundMixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, bool useStDevForJitterCalc)
: InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, useStDevForJitterCalc)
{
}
int InboundMixedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
// mixed audio packets do not have any info between the seq num and the audio data.
numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t);
return 0;
}
int InboundMixedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
}

View file

@ -1,14 +0,0 @@
#include "InboundAudioStream.h"
#include "PacketHeaders.h"
class InboundMixedAudioStream : public InboundAudioStream {
public:
InboundMixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, bool useStDevForJitterCalc = false);
float getNextOutputFrameLoudness() const { return _ringBuffer.getNextOutputFrameLoudness(); }
protected:
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
};

View file

@ -9,8 +9,8 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_InjectedAudioRingBuffer_h
#define hifi_InjectedAudioRingBuffer_h
#ifndef hifi_InjectedAudioStream_h
#define hifi_InjectedAudioStream_h
#include <QtCore/QUuid>
@ -39,4 +39,4 @@ private:
float _attenuationRatio;
};
#endif // hifi_InjectedAudioRingBuffer_h
#endif // hifi_InjectedAudioStream_h

View file

@ -0,0 +1,17 @@
#include "MixedAudioStream.h"
MixedAudioStream::MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, bool useStDevForJitterCalc)
: InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, useStDevForJitterCalc)
{
}
int MixedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
// mixed audio packets do not have any info between the seq num and the audio data.
numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t);
return 0;
}
int MixedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
}

View file

@ -0,0 +1,29 @@
//
// MixedAudioStream.h
// libraries/audio/src
//
// Created by Stephen Birarda on 6/5/13.
// Copyright 2013 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_MixedAudioStream_h
#define hifi_MixedAudioStream
#include "InboundAudioStream.h"
#include "PacketHeaders.h"
class MixedAudioStream : public InboundAudioStream {
public:
MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, bool useStDevForJitterCalc = false);
float getNextOutputFrameLoudness() const { return _ringBuffer.getNextOutputFrameLoudness(); }
protected:
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
};
#endif // hifi_MixedAudioStream_h

View file

@ -9,8 +9,8 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_PositionalAudioRingBuffer_h
#define hifi_PositionalAudioRingBuffer_h
#ifndef hifi_PositionalAudioStream_h
#define hifi_PositionalAudioStream_h
#include <glm/gtx/quaternion.hpp>
#include <AABox.h>
@ -71,4 +71,4 @@ protected:
AABox* _listenerUnattenuatedZone;
};
#endif // hifi_PositionalAudioRingBuffer_h
#endif // hifi_PositionalAudioStream_h