mirror of
https://github.com/overte-org/overte.git
synced 2025-07-23 06:07:54 +02:00
Merge branch 'master' of https://github.com/highfidelity/hifi into domain-server-auth
This commit is contained in:
commit
453c37f4b9
46 changed files with 2604 additions and 1404 deletions
|
@ -38,7 +38,7 @@ Agent::Agent(const QByteArray& packet) :
|
||||||
_voxelEditSender(),
|
_voxelEditSender(),
|
||||||
_particleEditSender(),
|
_particleEditSender(),
|
||||||
_modelEditSender(),
|
_modelEditSender(),
|
||||||
_receivedAudioBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO),
|
_receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 1, false, 1, 0, false),
|
||||||
_avatarHashMap()
|
_avatarHashMap()
|
||||||
{
|
{
|
||||||
// be the parent of the script engine so it gets moved when we do
|
// be the parent of the script engine so it gets moved when we do
|
||||||
|
@ -150,20 +150,11 @@ void Agent::readPendingDatagrams() {
|
||||||
|
|
||||||
} else if (datagramPacketType == PacketTypeMixedAudio) {
|
} else if (datagramPacketType == PacketTypeMixedAudio) {
|
||||||
|
|
||||||
QUuid senderUUID = uuidFromPacketHeader(receivedPacket);
|
_receivedAudioStream.parseData(receivedPacket);
|
||||||
|
|
||||||
// parse sequence number for this packet
|
_lastReceivedAudioLoudness = _receivedAudioStream.getNextOutputFrameLoudness();
|
||||||
int numBytesPacketHeader = numBytesForPacketHeader(receivedPacket);
|
|
||||||
const char* sequenceAt = receivedPacket.constData() + numBytesPacketHeader;
|
|
||||||
quint16 sequence = *(reinterpret_cast<const quint16*>(sequenceAt));
|
|
||||||
_incomingMixedAudioSequenceNumberStats.sequenceNumberReceived(sequence, senderUUID);
|
|
||||||
|
|
||||||
// parse the data and grab the average loudness
|
_receivedAudioStream.clearBuffer();
|
||||||
_receivedAudioBuffer.parseData(receivedPacket);
|
|
||||||
|
|
||||||
// pretend like we have read the samples from this buffer so it does not fill
|
|
||||||
static int16_t garbageAudioBuffer[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO];
|
|
||||||
_receivedAudioBuffer.readSamples(garbageAudioBuffer, NETWORK_BUFFER_LENGTH_SAMPLES_STEREO);
|
|
||||||
|
|
||||||
// let this continue through to the NodeList so it updates last heard timestamp
|
// let this continue through to the NodeList so it updates last heard timestamp
|
||||||
// for the sending audio mixer
|
// for the sending audio mixer
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
#include <QtCore/QUrl>
|
#include <QtCore/QUrl>
|
||||||
|
|
||||||
#include <AvatarHashMap.h>
|
#include <AvatarHashMap.h>
|
||||||
#include <MixedAudioRingBuffer.h>
|
|
||||||
#include <ModelEditPacketSender.h>
|
#include <ModelEditPacketSender.h>
|
||||||
#include <ModelTree.h>
|
#include <ModelTree.h>
|
||||||
#include <ModelTreeHeadlessViewer.h>
|
#include <ModelTreeHeadlessViewer.h>
|
||||||
|
@ -31,6 +30,8 @@
|
||||||
#include <VoxelEditPacketSender.h>
|
#include <VoxelEditPacketSender.h>
|
||||||
#include <VoxelTreeHeadlessViewer.h>
|
#include <VoxelTreeHeadlessViewer.h>
|
||||||
|
|
||||||
|
#include "MixedAudioStream.h"
|
||||||
|
|
||||||
|
|
||||||
class Agent : public ThreadedAssignment {
|
class Agent : public ThreadedAssignment {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
|
@ -51,7 +52,7 @@ public:
|
||||||
void setIsListeningToAudioStream(bool isListeningToAudioStream)
|
void setIsListeningToAudioStream(bool isListeningToAudioStream)
|
||||||
{ _scriptEngine.setIsListeningToAudioStream(isListeningToAudioStream); }
|
{ _scriptEngine.setIsListeningToAudioStream(isListeningToAudioStream); }
|
||||||
|
|
||||||
float getLastReceivedAudioLoudness() const { return _receivedAudioBuffer.getLastReadFrameAverageLoudness(); }
|
float getLastReceivedAudioLoudness() const { return _lastReceivedAudioLoudness; }
|
||||||
|
|
||||||
virtual void aboutToFinish();
|
virtual void aboutToFinish();
|
||||||
|
|
||||||
|
@ -70,8 +71,8 @@ private:
|
||||||
VoxelTreeHeadlessViewer _voxelViewer;
|
VoxelTreeHeadlessViewer _voxelViewer;
|
||||||
ModelTreeHeadlessViewer _modelViewer;
|
ModelTreeHeadlessViewer _modelViewer;
|
||||||
|
|
||||||
MixedAudioRingBuffer _receivedAudioBuffer;
|
MixedAudioStream _receivedAudioStream;
|
||||||
SequenceNumberStats _incomingMixedAudioSequenceNumberStats;
|
float _lastReceivedAudioLoudness;
|
||||||
|
|
||||||
AvatarHashMap _avatarHashMap;
|
AvatarHashMap _avatarHashMap;
|
||||||
};
|
};
|
||||||
|
|
|
@ -52,8 +52,8 @@
|
||||||
|
|
||||||
#include "AudioRingBuffer.h"
|
#include "AudioRingBuffer.h"
|
||||||
#include "AudioMixerClientData.h"
|
#include "AudioMixerClientData.h"
|
||||||
#include "AvatarAudioRingBuffer.h"
|
#include "AvatarAudioStream.h"
|
||||||
#include "InjectedAudioRingBuffer.h"
|
#include "InjectedAudioStream.h"
|
||||||
|
|
||||||
#include "AudioMixer.h"
|
#include "AudioMixer.h"
|
||||||
|
|
||||||
|
@ -61,13 +61,15 @@ const float LOUDNESS_TO_DISTANCE_RATIO = 0.00001f;
|
||||||
|
|
||||||
const QString AUDIO_MIXER_LOGGING_TARGET_NAME = "audio-mixer";
|
const QString AUDIO_MIXER_LOGGING_TARGET_NAME = "audio-mixer";
|
||||||
|
|
||||||
void attachNewBufferToNode(Node *newNode) {
|
void attachNewNodeDataToNode(Node *newNode) {
|
||||||
if (!newNode->getLinkedData()) {
|
if (!newNode->getLinkedData()) {
|
||||||
newNode->setLinkedData(new AudioMixerClientData());
|
newNode->setLinkedData(new AudioMixerClientData());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AudioMixer::_useDynamicJitterBuffers = false;
|
bool AudioMixer::_useDynamicJitterBuffers = false;
|
||||||
|
int AudioMixer::_staticDesiredJitterBufferFrames = 0;
|
||||||
|
int AudioMixer::_maxFramesOverDesired = 0;
|
||||||
|
|
||||||
AudioMixer::AudioMixer(const QByteArray& packet) :
|
AudioMixer::AudioMixer(const QByteArray& packet) :
|
||||||
ThreadedAssignment(packet),
|
ThreadedAssignment(packet),
|
||||||
|
@ -93,19 +95,19 @@ const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f;
|
||||||
const float ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE = 0.18f;
|
const float ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE = 0.18f;
|
||||||
const float ATTENUATION_EPSILON_DISTANCE = 0.1f;
|
const float ATTENUATION_EPSILON_DISTANCE = 0.1f;
|
||||||
|
|
||||||
void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd,
|
void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
|
||||||
AvatarAudioRingBuffer* listeningNodeBuffer) {
|
AvatarAudioStream* listeningNodeStream) {
|
||||||
float bearingRelativeAngleToSource = 0.0f;
|
float bearingRelativeAngleToSource = 0.0f;
|
||||||
float attenuationCoefficient = 1.0f;
|
float attenuationCoefficient = 1.0f;
|
||||||
int numSamplesDelay = 0;
|
int numSamplesDelay = 0;
|
||||||
float weakChannelAmplitudeRatio = 1.0f;
|
float weakChannelAmplitudeRatio = 1.0f;
|
||||||
|
|
||||||
bool shouldAttenuate = (bufferToAdd != listeningNodeBuffer);
|
bool shouldAttenuate = (streamToAdd != listeningNodeStream);
|
||||||
|
|
||||||
if (shouldAttenuate) {
|
if (shouldAttenuate) {
|
||||||
|
|
||||||
// if the two buffer pointers do not match then these are different buffers
|
// if the two stream pointers do not match then these are different streams
|
||||||
glm::vec3 relativePosition = bufferToAdd->getPosition() - listeningNodeBuffer->getPosition();
|
glm::vec3 relativePosition = streamToAdd->getPosition() - listeningNodeStream->getPosition();
|
||||||
|
|
||||||
float distanceBetween = glm::length(relativePosition);
|
float distanceBetween = glm::length(relativePosition);
|
||||||
|
|
||||||
|
@ -113,7 +115,7 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
||||||
distanceBetween = EPSILON;
|
distanceBetween = EPSILON;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bufferToAdd->getNextOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) {
|
if (streamToAdd->getLastPopOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) {
|
||||||
// according to mixer performance we have decided this does not get to be mixed in
|
// according to mixer performance we have decided this does not get to be mixed in
|
||||||
// bail out
|
// bail out
|
||||||
return;
|
return;
|
||||||
|
@ -121,24 +123,24 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
||||||
|
|
||||||
++_sumMixes;
|
++_sumMixes;
|
||||||
|
|
||||||
if (bufferToAdd->getListenerUnattenuatedZone()) {
|
if (streamToAdd->getListenerUnattenuatedZone()) {
|
||||||
shouldAttenuate = !bufferToAdd->getListenerUnattenuatedZone()->contains(listeningNodeBuffer->getPosition());
|
shouldAttenuate = !streamToAdd->getListenerUnattenuatedZone()->contains(listeningNodeStream->getPosition());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bufferToAdd->getType() == PositionalAudioRingBuffer::Injector) {
|
if (streamToAdd->getType() == PositionalAudioStream::Injector) {
|
||||||
attenuationCoefficient *= reinterpret_cast<InjectedAudioRingBuffer*>(bufferToAdd)->getAttenuationRatio();
|
attenuationCoefficient *= reinterpret_cast<InjectedAudioStream*>(streamToAdd)->getAttenuationRatio();
|
||||||
}
|
}
|
||||||
|
|
||||||
shouldAttenuate = shouldAttenuate && distanceBetween > ATTENUATION_EPSILON_DISTANCE;
|
shouldAttenuate = shouldAttenuate && distanceBetween > ATTENUATION_EPSILON_DISTANCE;
|
||||||
|
|
||||||
if (shouldAttenuate) {
|
if (shouldAttenuate) {
|
||||||
glm::quat inverseOrientation = glm::inverse(listeningNodeBuffer->getOrientation());
|
glm::quat inverseOrientation = glm::inverse(listeningNodeStream->getOrientation());
|
||||||
|
|
||||||
float distanceSquareToSource = glm::dot(relativePosition, relativePosition);
|
float distanceSquareToSource = glm::dot(relativePosition, relativePosition);
|
||||||
float radius = 0.0f;
|
float radius = 0.0f;
|
||||||
|
|
||||||
if (bufferToAdd->getType() == PositionalAudioRingBuffer::Injector) {
|
if (streamToAdd->getType() == PositionalAudioStream::Injector) {
|
||||||
radius = reinterpret_cast<InjectedAudioRingBuffer*>(bufferToAdd)->getRadius();
|
radius = reinterpret_cast<InjectedAudioStream*>(streamToAdd)->getRadius();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (radius == 0 || (distanceSquareToSource > radius * radius)) {
|
if (radius == 0 || (distanceSquareToSource > radius * radius)) {
|
||||||
|
@ -154,7 +156,7 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// calculate the angle delivery for off-axis attenuation
|
// calculate the angle delivery for off-axis attenuation
|
||||||
glm::vec3 rotatedListenerPosition = glm::inverse(bufferToAdd->getOrientation()) * relativePosition;
|
glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd->getOrientation()) * relativePosition;
|
||||||
|
|
||||||
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
|
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
|
||||||
glm::normalize(rotatedListenerPosition));
|
glm::normalize(rotatedListenerPosition));
|
||||||
|
@ -203,19 +205,16 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const int16_t* nextOutputStart = bufferToAdd->getNextOutput();
|
AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd->getLastPopOutput();
|
||||||
|
|
||||||
if (!bufferToAdd->isStereo() && shouldAttenuate) {
|
if (!streamToAdd->isStereo() && shouldAttenuate) {
|
||||||
// this is a mono buffer, which means it gets full attenuation and spatialization
|
// this is a mono stream, which means it gets full attenuation and spatialization
|
||||||
|
|
||||||
// if the bearing relative angle to source is > 0 then the delayed channel is the right one
|
// if the bearing relative angle to source is > 0 then the delayed channel is the right one
|
||||||
int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0;
|
int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0;
|
||||||
int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0;
|
int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0;
|
||||||
|
|
||||||
const int16_t* bufferStart = bufferToAdd->getBuffer();
|
int16_t correctStreamSample[2], delayStreamSample[2];
|
||||||
int ringBufferSampleCapacity = bufferToAdd->getSampleCapacity();
|
|
||||||
|
|
||||||
int16_t correctBufferSample[2], delayBufferSample[2];
|
|
||||||
int delayedChannelIndex = 0;
|
int delayedChannelIndex = 0;
|
||||||
|
|
||||||
const int SINGLE_STEREO_OFFSET = 2;
|
const int SINGLE_STEREO_OFFSET = 2;
|
||||||
|
@ -223,65 +222,51 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
||||||
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
|
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
|
||||||
|
|
||||||
// setup the int16_t variables for the two sample sets
|
// setup the int16_t variables for the two sample sets
|
||||||
correctBufferSample[0] = nextOutputStart[s / 2] * attenuationCoefficient;
|
correctStreamSample[0] = streamPopOutput[s / 2] * attenuationCoefficient;
|
||||||
correctBufferSample[1] = nextOutputStart[(s / 2) + 1] * attenuationCoefficient;
|
correctStreamSample[1] = streamPopOutput[(s / 2) + 1] * attenuationCoefficient;
|
||||||
|
|
||||||
delayedChannelIndex = s + (numSamplesDelay * 2) + delayedChannelOffset;
|
delayedChannelIndex = s + (numSamplesDelay * 2) + delayedChannelOffset;
|
||||||
|
|
||||||
delayBufferSample[0] = correctBufferSample[0] * weakChannelAmplitudeRatio;
|
delayStreamSample[0] = correctStreamSample[0] * weakChannelAmplitudeRatio;
|
||||||
delayBufferSample[1] = correctBufferSample[1] * weakChannelAmplitudeRatio;
|
delayStreamSample[1] = correctStreamSample[1] * weakChannelAmplitudeRatio;
|
||||||
|
|
||||||
_clientSamples[s + goodChannelOffset] += correctBufferSample[0];
|
_clientSamples[s + goodChannelOffset] += correctStreamSample[0];
|
||||||
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET] += correctBufferSample[1];
|
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET] += correctStreamSample[1];
|
||||||
_clientSamples[delayedChannelIndex] += delayBufferSample[0];
|
_clientSamples[delayedChannelIndex] += delayStreamSample[0];
|
||||||
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET] += delayBufferSample[1];
|
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET] += delayStreamSample[1];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (numSamplesDelay > 0) {
|
if (numSamplesDelay > 0) {
|
||||||
// if there was a sample delay for this buffer, we need to pull samples prior to the nextOutput
|
// if there was a sample delay for this stream, we need to pull samples prior to the popped output
|
||||||
// to stick at the beginning
|
// to stick at the beginning
|
||||||
float attenuationAndWeakChannelRatio = attenuationCoefficient * weakChannelAmplitudeRatio;
|
float attenuationAndWeakChannelRatio = attenuationCoefficient * weakChannelAmplitudeRatio;
|
||||||
const int16_t* delayNextOutputStart = nextOutputStart - numSamplesDelay;
|
AudioRingBuffer::ConstIterator delayStreamPopOutput = streamPopOutput - numSamplesDelay;
|
||||||
if (delayNextOutputStart < bufferStart) {
|
|
||||||
delayNextOutputStart = bufferStart + ringBufferSampleCapacity - numSamplesDelay;
|
// TODO: delayStreamPopOutput may be inside the last frame written if the ringbuffer is completely full
|
||||||
}
|
// maybe make AudioRingBuffer have 1 extra frame in its buffer
|
||||||
|
|
||||||
for (int i = 0; i < numSamplesDelay; i++) {
|
for (int i = 0; i < numSamplesDelay; i++) {
|
||||||
int parentIndex = i * 2;
|
int parentIndex = i * 2;
|
||||||
_clientSamples[parentIndex + delayedChannelOffset] += delayNextOutputStart[i] * attenuationAndWeakChannelRatio;
|
_clientSamples[parentIndex + delayedChannelOffset] += *delayStreamPopOutput * attenuationAndWeakChannelRatio;
|
||||||
|
++delayStreamPopOutput;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// this is a stereo buffer or an unattenuated buffer, don't perform spatialization
|
int stereoDivider = streamToAdd->isStereo() ? 1 : 2;
|
||||||
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
|
|
||||||
|
if (!shouldAttenuate) {
|
||||||
int stereoDivider = bufferToAdd->isStereo() ? 1 : 2;
|
attenuationCoefficient = 1.0f;
|
||||||
|
}
|
||||||
if (!shouldAttenuate) {
|
|
||||||
attenuationCoefficient = 1.0f;
|
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s++) {
|
||||||
}
|
_clientSamples[s] = glm::clamp(_clientSamples[s] + (int)(streamPopOutput[s / stereoDivider] * attenuationCoefficient),
|
||||||
|
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||||
_clientSamples[s] = glm::clamp(_clientSamples[s]
|
|
||||||
+ (int) (nextOutputStart[(s / stereoDivider)] * attenuationCoefficient),
|
|
||||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
|
||||||
_clientSamples[s + 1] = glm::clamp(_clientSamples[s + 1]
|
|
||||||
+ (int) (nextOutputStart[(s / stereoDivider) + (1 / stereoDivider)]
|
|
||||||
* attenuationCoefficient),
|
|
||||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
|
||||||
_clientSamples[s + 2] = glm::clamp(_clientSamples[s + 2]
|
|
||||||
+ (int) (nextOutputStart[(s / stereoDivider) + (2 / stereoDivider)]
|
|
||||||
* attenuationCoefficient),
|
|
||||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
|
||||||
_clientSamples[s + 3] = glm::clamp(_clientSamples[s + 3]
|
|
||||||
+ (int) (nextOutputStart[(s / stereoDivider) + (3 / stereoDivider)]
|
|
||||||
* attenuationCoefficient),
|
|
||||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixer::prepareMixForListeningNode(Node* node) {
|
void AudioMixer::prepareMixForListeningNode(Node* node) {
|
||||||
AvatarAudioRingBuffer* nodeRingBuffer = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer();
|
AvatarAudioStream* nodeAudioStream = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioStream();
|
||||||
|
|
||||||
// zero out the client mix for this node
|
// zero out the client mix for this node
|
||||||
memset(_clientSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
memset(_clientSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
||||||
|
@ -293,21 +278,23 @@ void AudioMixer::prepareMixForListeningNode(Node* node) {
|
||||||
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
|
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
|
||||||
|
|
||||||
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
|
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
|
||||||
for (int i = 0; i < otherNodeClientData->getRingBuffers().size(); i++) {
|
|
||||||
PositionalAudioRingBuffer* otherNodeBuffer = otherNodeClientData->getRingBuffers()[i];
|
const QHash<QUuid, PositionalAudioStream*>& otherNodeAudioStreams = otherNodeClientData->getAudioStreams();
|
||||||
|
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
||||||
if ((*otherNode != *node
|
for (i = otherNodeAudioStreams.begin(); i != otherNodeAudioStreams.constEnd(); i++) {
|
||||||
|| otherNodeBuffer->shouldLoopbackForNode())
|
PositionalAudioStream* otherNodeStream = i.value();
|
||||||
&& otherNodeBuffer->willBeAddedToMix()
|
|
||||||
&& otherNodeBuffer->getNextOutputTrailingLoudness() > 0.0f) {
|
if ((*otherNode != *node || otherNodeStream->shouldLoopbackForNode())
|
||||||
addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer);
|
&& otherNodeStream->lastPopSucceeded()
|
||||||
|
&& otherNodeStream->getLastPopOutputTrailingLoudness() > 0.0f) {
|
||||||
|
|
||||||
|
addStreamToMixForListeningNodeWithStream(otherNodeStream, nodeAudioStream);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void AudioMixer::readPendingDatagrams() {
|
void AudioMixer::readPendingDatagrams() {
|
||||||
QByteArray receivedPacket;
|
QByteArray receivedPacket;
|
||||||
HifiSockAddr senderSockAddr;
|
HifiSockAddr senderSockAddr;
|
||||||
|
@ -406,7 +393,7 @@ void AudioMixer::run() {
|
||||||
|
|
||||||
nodeList->addNodeTypeToInterestSet(NodeType::Agent);
|
nodeList->addNodeTypeToInterestSet(NodeType::Agent);
|
||||||
|
|
||||||
nodeList->linkedDataCreateCallback = attachNewBufferToNode;
|
nodeList->linkedDataCreateCallback = attachNewNodeDataToNode;
|
||||||
|
|
||||||
// wait until we have the domain-server settings, otherwise we bail
|
// wait until we have the domain-server settings, otherwise we bail
|
||||||
DomainHandler& domainHandler = nodeList->getDomainHandler();
|
DomainHandler& domainHandler = nodeList->getDomainHandler();
|
||||||
|
@ -434,40 +421,58 @@ void AudioMixer::run() {
|
||||||
if (settingsObject.contains(AUDIO_GROUP_KEY)) {
|
if (settingsObject.contains(AUDIO_GROUP_KEY)) {
|
||||||
QJsonObject audioGroupObject = settingsObject[AUDIO_GROUP_KEY].toObject();
|
QJsonObject audioGroupObject = settingsObject[AUDIO_GROUP_KEY].toObject();
|
||||||
|
|
||||||
const QString UNATTENUATED_ZONE_KEY = "unattenuated-zone";
|
|
||||||
|
|
||||||
QString unattenuatedZoneString = audioGroupObject[UNATTENUATED_ZONE_KEY].toString();
|
|
||||||
if (!unattenuatedZoneString.isEmpty()) {
|
|
||||||
QStringList zoneStringList = unattenuatedZoneString.split(',');
|
|
||||||
|
|
||||||
glm::vec3 sourceCorner(zoneStringList[0].toFloat(), zoneStringList[1].toFloat(), zoneStringList[2].toFloat());
|
|
||||||
glm::vec3 sourceDimensions(zoneStringList[3].toFloat(), zoneStringList[4].toFloat(), zoneStringList[5].toFloat());
|
|
||||||
|
|
||||||
glm::vec3 listenerCorner(zoneStringList[6].toFloat(), zoneStringList[7].toFloat(), zoneStringList[8].toFloat());
|
|
||||||
glm::vec3 listenerDimensions(zoneStringList[9].toFloat(), zoneStringList[10].toFloat(), zoneStringList[11].toFloat());
|
|
||||||
|
|
||||||
_sourceUnattenuatedZone = new AABox(sourceCorner, sourceDimensions);
|
|
||||||
_listenerUnattenuatedZone = new AABox(listenerCorner, listenerDimensions);
|
|
||||||
|
|
||||||
glm::vec3 sourceCenter = _sourceUnattenuatedZone->calcCenter();
|
|
||||||
glm::vec3 destinationCenter = _listenerUnattenuatedZone->calcCenter();
|
|
||||||
|
|
||||||
qDebug() << "There is an unattenuated zone with source center at"
|
|
||||||
<< QString("%1, %2, %3").arg(sourceCenter.x).arg(sourceCenter.y).arg(sourceCenter.z);
|
|
||||||
qDebug() << "Buffers inside this zone will not be attenuated inside a box with center at"
|
|
||||||
<< QString("%1, %2, %3").arg(destinationCenter.x).arg(destinationCenter.y).arg(destinationCenter.z);
|
|
||||||
}
|
|
||||||
|
|
||||||
// check the payload to see if we have asked for dynamicJitterBuffer support
|
// check the payload to see if we have asked for dynamicJitterBuffer support
|
||||||
const QString DYNAMIC_JITTER_BUFFER_JSON_KEY = "dynamic-jitter-buffer";
|
const QString DYNAMIC_JITTER_BUFFER_JSON_KEY = "A-dynamic-jitter-buffer";
|
||||||
bool shouldUseDynamicJitterBuffers = audioGroupObject[DYNAMIC_JITTER_BUFFER_JSON_KEY].toBool();
|
bool shouldUseDynamicJitterBuffers = audioGroupObject[DYNAMIC_JITTER_BUFFER_JSON_KEY].toBool();
|
||||||
if (shouldUseDynamicJitterBuffers) {
|
if (shouldUseDynamicJitterBuffers) {
|
||||||
qDebug() << "Enable dynamic jitter buffers.";
|
qDebug() << "Enable dynamic jitter buffers.";
|
||||||
_useDynamicJitterBuffers = true;
|
_useDynamicJitterBuffers = true;
|
||||||
} else {
|
} else {
|
||||||
qDebug() << "Dynamic jitter buffers disabled, using old behavior.";
|
qDebug() << "Dynamic jitter buffers disabled.";
|
||||||
_useDynamicJitterBuffers = false;
|
_useDynamicJitterBuffers = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool ok;
|
||||||
|
|
||||||
|
const QString DESIRED_JITTER_BUFFER_FRAMES_KEY = "B-desired-jitter-buffer-frames";
|
||||||
|
_staticDesiredJitterBufferFrames = audioGroupObject[DESIRED_JITTER_BUFFER_FRAMES_KEY].toString().toInt(&ok);
|
||||||
|
if (!ok) {
|
||||||
|
_staticDesiredJitterBufferFrames = DEFAULT_DESIRED_JITTER_BUFFER_FRAMES;
|
||||||
|
}
|
||||||
|
qDebug() << "Static desired jitter buffer frames:" << _staticDesiredJitterBufferFrames;
|
||||||
|
|
||||||
|
const QString MAX_FRAMES_OVER_DESIRED_JSON_KEY = "C-max-frames-over-desired";
|
||||||
|
_maxFramesOverDesired = audioGroupObject[MAX_FRAMES_OVER_DESIRED_JSON_KEY].toString().toInt(&ok);
|
||||||
|
if (!ok) {
|
||||||
|
_maxFramesOverDesired = DEFAULT_MAX_FRAMES_OVER_DESIRED;
|
||||||
|
}
|
||||||
|
qDebug() << "Max frames over desired:" << _maxFramesOverDesired;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
const QString UNATTENUATED_ZONE_KEY = "D-unattenuated-zone";
|
||||||
|
|
||||||
|
QString unattenuatedZoneString = audioGroupObject[UNATTENUATED_ZONE_KEY].toString();
|
||||||
|
if (!unattenuatedZoneString.isEmpty()) {
|
||||||
|
QStringList zoneStringList = unattenuatedZoneString.split(',');
|
||||||
|
|
||||||
|
glm::vec3 sourceCorner(zoneStringList[0].toFloat(), zoneStringList[1].toFloat(), zoneStringList[2].toFloat());
|
||||||
|
glm::vec3 sourceDimensions(zoneStringList[3].toFloat(), zoneStringList[4].toFloat(), zoneStringList[5].toFloat());
|
||||||
|
|
||||||
|
glm::vec3 listenerCorner(zoneStringList[6].toFloat(), zoneStringList[7].toFloat(), zoneStringList[8].toFloat());
|
||||||
|
glm::vec3 listenerDimensions(zoneStringList[9].toFloat(), zoneStringList[10].toFloat(), zoneStringList[11].toFloat());
|
||||||
|
|
||||||
|
_sourceUnattenuatedZone = new AABox(sourceCorner, sourceDimensions);
|
||||||
|
_listenerUnattenuatedZone = new AABox(listenerCorner, listenerDimensions);
|
||||||
|
|
||||||
|
glm::vec3 sourceCenter = _sourceUnattenuatedZone->calcCenter();
|
||||||
|
glm::vec3 destinationCenter = _listenerUnattenuatedZone->calcCenter();
|
||||||
|
|
||||||
|
qDebug() << "There is an unattenuated zone with source center at"
|
||||||
|
<< QString("%1, %2, %3").arg(sourceCenter.x).arg(sourceCenter.y).arg(sourceCenter.z);
|
||||||
|
qDebug() << "Buffers inside this zone will not be attenuated inside a box with center at"
|
||||||
|
<< QString("%1, %2, %3").arg(destinationCenter.x).arg(destinationCenter.y).arg(destinationCenter.z);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int nextFrame = 0;
|
int nextFrame = 0;
|
||||||
|
@ -483,14 +488,6 @@ void AudioMixer::run() {
|
||||||
int framesSinceCutoffEvent = TRAILING_AVERAGE_FRAMES;
|
int framesSinceCutoffEvent = TRAILING_AVERAGE_FRAMES;
|
||||||
|
|
||||||
while (!_isFinished) {
|
while (!_isFinished) {
|
||||||
|
|
||||||
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
|
||||||
if (node->getLinkedData()) {
|
|
||||||
((AudioMixerClientData*) node->getLinkedData())->checkBuffersBeforeFrameSend(_sourceUnattenuatedZone,
|
|
||||||
_listenerUnattenuatedZone);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f;
|
const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f;
|
||||||
const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f;
|
const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f;
|
||||||
|
|
||||||
|
@ -551,43 +548,43 @@ void AudioMixer::run() {
|
||||||
}
|
}
|
||||||
|
|
||||||
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
||||||
if (node->getType() == NodeType::Agent && node->getActiveSocket() && node->getLinkedData()
|
if (node->getLinkedData()) {
|
||||||
&& ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer()) {
|
|
||||||
|
|
||||||
AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();
|
AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();
|
||||||
|
|
||||||
prepareMixForListeningNode(node.data());
|
// this function will attempt to pop a frame from each audio stream.
|
||||||
|
// a pointer to the popped data is stored as a member in InboundAudioStream.
|
||||||
// pack header
|
// That's how the popped audio data will be read for mixing (but only if the pop was successful)
|
||||||
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeMixedAudio);
|
nodeData->checkBuffersBeforeFrameSend(_sourceUnattenuatedZone, _listenerUnattenuatedZone);
|
||||||
char* dataAt = clientMixBuffer + numBytesPacketHeader;
|
|
||||||
|
if (node->getType() == NodeType::Agent && node->getActiveSocket()
|
||||||
|
&& nodeData->getAvatarAudioStream()) {
|
||||||
|
|
||||||
// pack sequence number
|
prepareMixForListeningNode(node.data());
|
||||||
quint16 sequence = nodeData->getOutgoingSequenceNumber();
|
|
||||||
memcpy(dataAt, &sequence, sizeof(quint16));
|
|
||||||
dataAt += sizeof(quint16);
|
|
||||||
|
|
||||||
// pack mixed audio samples
|
// pack header
|
||||||
memcpy(dataAt, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeMixedAudio);
|
||||||
dataAt += NETWORK_BUFFER_LENGTH_BYTES_STEREO;
|
char* dataAt = clientMixBuffer + numBytesPacketHeader;
|
||||||
|
|
||||||
// send mixed audio packet
|
// pack sequence number
|
||||||
nodeList->writeDatagram(clientMixBuffer, dataAt - clientMixBuffer, node);
|
quint16 sequence = nodeData->getOutgoingSequenceNumber();
|
||||||
nodeData->incrementOutgoingMixedAudioSequenceNumber();
|
memcpy(dataAt, &sequence, sizeof(quint16));
|
||||||
|
dataAt += sizeof(quint16);
|
||||||
// send an audio stream stats packet if it's time
|
|
||||||
if (sendAudioStreamStats) {
|
// pack mixed audio samples
|
||||||
nodeData->sendAudioStreamStatsPackets(node);
|
memcpy(dataAt, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
||||||
|
dataAt += NETWORK_BUFFER_LENGTH_BYTES_STEREO;
|
||||||
|
|
||||||
|
// send mixed audio packet
|
||||||
|
nodeList->writeDatagram(clientMixBuffer, dataAt - clientMixBuffer, node);
|
||||||
|
nodeData->incrementOutgoingMixedAudioSequenceNumber();
|
||||||
|
|
||||||
|
// send an audio stream stats packet if it's time
|
||||||
|
if (sendAudioStreamStats) {
|
||||||
|
nodeData->sendAudioStreamStatsPackets(node);
|
||||||
|
}
|
||||||
|
|
||||||
|
++_sumListeners;
|
||||||
}
|
}
|
||||||
|
|
||||||
++_sumListeners;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// push forward the next output pointers for any audio buffers we used
|
|
||||||
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
|
||||||
if (node->getLinkedData()) {
|
|
||||||
((AudioMixerClientData*) node->getLinkedData())->pushBuffersAfterFrameSend();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -16,8 +16,8 @@
|
||||||
#include <AudioRingBuffer.h>
|
#include <AudioRingBuffer.h>
|
||||||
#include <ThreadedAssignment.h>
|
#include <ThreadedAssignment.h>
|
||||||
|
|
||||||
class PositionalAudioRingBuffer;
|
class PositionalAudioStream;
|
||||||
class AvatarAudioRingBuffer;
|
class AvatarAudioStream;
|
||||||
|
|
||||||
const int SAMPLE_PHASE_DELAY_AT_90 = 20;
|
const int SAMPLE_PHASE_DELAY_AT_90 = 20;
|
||||||
|
|
||||||
|
@ -38,11 +38,13 @@ public slots:
|
||||||
void sendStatsPacket();
|
void sendStatsPacket();
|
||||||
|
|
||||||
static bool getUseDynamicJitterBuffers() { return _useDynamicJitterBuffers; }
|
static bool getUseDynamicJitterBuffers() { return _useDynamicJitterBuffers; }
|
||||||
|
static int getStaticDesiredJitterBufferFrames() { return _staticDesiredJitterBufferFrames; }
|
||||||
|
static int getMaxFramesOverDesired() { return _maxFramesOverDesired; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// adds one buffer to the mix for a listening node
|
/// adds one stream to the mix for a listening node
|
||||||
void addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd,
|
void addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
|
||||||
AvatarAudioRingBuffer* listeningNodeBuffer);
|
AvatarAudioStream* listeningNodeStream);
|
||||||
|
|
||||||
/// prepares and sends a mix to one Node
|
/// prepares and sends a mix to one Node
|
||||||
void prepareMixForListeningNode(Node* node);
|
void prepareMixForListeningNode(Node* node);
|
||||||
|
@ -59,7 +61,10 @@ private:
|
||||||
int _sumMixes;
|
int _sumMixes;
|
||||||
AABox* _sourceUnattenuatedZone;
|
AABox* _sourceUnattenuatedZone;
|
||||||
AABox* _listenerUnattenuatedZone;
|
AABox* _listenerUnattenuatedZone;
|
||||||
|
|
||||||
static bool _useDynamicJitterBuffers;
|
static bool _useDynamicJitterBuffers;
|
||||||
|
static int _staticDesiredJitterBufferFrames;
|
||||||
|
static int _maxFramesOverDesired;
|
||||||
|
|
||||||
quint64 _lastSendAudioStreamStatsTime;
|
quint64 _lastSendAudioStreamStatsTime;
|
||||||
};
|
};
|
||||||
|
|
|
@ -14,244 +14,140 @@
|
||||||
#include <PacketHeaders.h>
|
#include <PacketHeaders.h>
|
||||||
#include <UUID.h>
|
#include <UUID.h>
|
||||||
|
|
||||||
#include "InjectedAudioRingBuffer.h"
|
#include "InjectedAudioStream.h"
|
||||||
|
|
||||||
#include "AudioMixer.h"
|
#include "AudioMixer.h"
|
||||||
#include "AudioMixerClientData.h"
|
#include "AudioMixerClientData.h"
|
||||||
#include "MovingMinMaxAvg.h"
|
|
||||||
|
|
||||||
const int INCOMING_SEQ_STATS_HISTORY_LENGTH = INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS /
|
|
||||||
(TOO_LONG_SINCE_LAST_SEND_AUDIO_STREAM_STATS / USECS_PER_SECOND);
|
|
||||||
|
|
||||||
AudioMixerClientData::AudioMixerClientData() :
|
AudioMixerClientData::AudioMixerClientData() :
|
||||||
_ringBuffers(),
|
_audioStreams(),
|
||||||
_outgoingMixedAudioSequenceNumber(0),
|
_outgoingMixedAudioSequenceNumber(0)
|
||||||
_incomingAvatarAudioSequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH)
|
|
||||||
{
|
{
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioMixerClientData::~AudioMixerClientData() {
|
AudioMixerClientData::~AudioMixerClientData() {
|
||||||
for (int i = 0; i < _ringBuffers.size(); i++) {
|
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
||||||
// delete this attached PositionalAudioRingBuffer
|
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
|
||||||
delete _ringBuffers[i];
|
// delete this attached InboundAudioStream
|
||||||
|
delete i.value();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
AvatarAudioRingBuffer* AudioMixerClientData::getAvatarAudioRingBuffer() const {
|
AvatarAudioStream* AudioMixerClientData::getAvatarAudioStream() const {
|
||||||
for (int i = 0; i < _ringBuffers.size(); i++) {
|
if (_audioStreams.contains(QUuid())) {
|
||||||
if (_ringBuffers[i]->getType() == PositionalAudioRingBuffer::Microphone) {
|
return (AvatarAudioStream*)_audioStreams.value(QUuid());
|
||||||
return (AvatarAudioRingBuffer*) _ringBuffers[i];
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
// no mic stream found - return NULL
|
||||||
// no AvatarAudioRingBuffer found - return NULL
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioMixerClientData::parseData(const QByteArray& packet) {
|
int AudioMixerClientData::parseData(const QByteArray& packet) {
|
||||||
|
|
||||||
// parse sequence number for this packet
|
|
||||||
int numBytesPacketHeader = numBytesForPacketHeader(packet);
|
|
||||||
const char* sequenceAt = packet.constData() + numBytesPacketHeader;
|
|
||||||
quint16 sequence = *(reinterpret_cast<const quint16*>(sequenceAt));
|
|
||||||
|
|
||||||
PacketType packetType = packetTypeForPacket(packet);
|
PacketType packetType = packetTypeForPacket(packet);
|
||||||
if (packetType == PacketTypeMicrophoneAudioWithEcho
|
if (packetType == PacketTypeAudioStreamStats) {
|
||||||
|| packetType == PacketTypeMicrophoneAudioNoEcho
|
|
||||||
|| packetType == PacketTypeSilentAudioFrame) {
|
|
||||||
|
|
||||||
SequenceNumberStats::ArrivalInfo packetArrivalInfo = _incomingAvatarAudioSequenceNumberStats.sequenceNumberReceived(sequence);
|
|
||||||
|
|
||||||
// grab the AvatarAudioRingBuffer from the vector (or create it if it doesn't exist)
|
|
||||||
AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();
|
|
||||||
|
|
||||||
// read the first byte after the header to see if this is a stereo or mono buffer
|
|
||||||
quint8 channelFlag = packet.at(numBytesForPacketHeader(packet) + sizeof(quint16));
|
|
||||||
bool isStereo = channelFlag == 1;
|
|
||||||
|
|
||||||
if (avatarRingBuffer && avatarRingBuffer->isStereo() != isStereo) {
|
|
||||||
// there's a mismatch in the buffer channels for the incoming and current buffer
|
|
||||||
// so delete our current buffer and create a new one
|
|
||||||
_ringBuffers.removeOne(avatarRingBuffer);
|
|
||||||
avatarRingBuffer->deleteLater();
|
|
||||||
avatarRingBuffer = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!avatarRingBuffer) {
|
|
||||||
// we don't have an AvatarAudioRingBuffer yet, so add it
|
|
||||||
avatarRingBuffer = new AvatarAudioRingBuffer(isStereo, AudioMixer::getUseDynamicJitterBuffers());
|
|
||||||
_ringBuffers.push_back(avatarRingBuffer);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// for now, late packets are simply discarded. In the future, it may be good to insert them into their correct place
|
|
||||||
// in the ring buffer (if that frame hasn't been mixed yet)
|
|
||||||
switch (packetArrivalInfo._status) {
|
|
||||||
case SequenceNumberStats::Early: {
|
|
||||||
int packetsLost = packetArrivalInfo._seqDiffFromExpected;
|
|
||||||
avatarRingBuffer->parseDataAndHandleDroppedPackets(packet, packetsLost);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case SequenceNumberStats::OnTime: {
|
|
||||||
// ask the AvatarAudioRingBuffer instance to parse the data
|
|
||||||
avatarRingBuffer->parseDataAndHandleDroppedPackets(packet, 0);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
default: {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (packetType == PacketTypeInjectAudio) {
|
|
||||||
// this is injected audio
|
|
||||||
|
|
||||||
// grab the stream identifier for this injected audio
|
|
||||||
QUuid streamIdentifier = QUuid::fromRfc4122(packet.mid(numBytesForPacketHeader(packet) + sizeof(quint16), NUM_BYTES_RFC4122_UUID));
|
|
||||||
|
|
||||||
if (!_incomingInjectedAudioSequenceNumberStatsMap.contains(streamIdentifier)) {
|
|
||||||
_incomingInjectedAudioSequenceNumberStatsMap.insert(streamIdentifier, SequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH));
|
|
||||||
}
|
|
||||||
SequenceNumberStats::ArrivalInfo packetArrivalInfo =
|
|
||||||
_incomingInjectedAudioSequenceNumberStatsMap[streamIdentifier].sequenceNumberReceived(sequence);
|
|
||||||
|
|
||||||
InjectedAudioRingBuffer* matchingInjectedRingBuffer = NULL;
|
|
||||||
|
|
||||||
for (int i = 0; i < _ringBuffers.size(); i++) {
|
|
||||||
if (_ringBuffers[i]->getType() == PositionalAudioRingBuffer::Injector
|
|
||||||
&& ((InjectedAudioRingBuffer*) _ringBuffers[i])->getStreamIdentifier() == streamIdentifier) {
|
|
||||||
matchingInjectedRingBuffer = (InjectedAudioRingBuffer*) _ringBuffers[i];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!matchingInjectedRingBuffer) {
|
|
||||||
// we don't have a matching injected audio ring buffer, so add it
|
|
||||||
matchingInjectedRingBuffer = new InjectedAudioRingBuffer(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers());
|
|
||||||
_ringBuffers.push_back(matchingInjectedRingBuffer);
|
|
||||||
}
|
|
||||||
|
|
||||||
// for now, late packets are simply discarded. In the future, it may be good to insert them into their correct place
|
|
||||||
// in the ring buffer (if that frame hasn't been mixed yet)
|
|
||||||
switch (packetArrivalInfo._status) {
|
|
||||||
case SequenceNumberStats::Early: {
|
|
||||||
int packetsLost = packetArrivalInfo._seqDiffFromExpected;
|
|
||||||
matchingInjectedRingBuffer->parseDataAndHandleDroppedPackets(packet, packetsLost);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case SequenceNumberStats::OnTime: {
|
|
||||||
// ask the AvatarAudioRingBuffer instance to parse the data
|
|
||||||
matchingInjectedRingBuffer->parseDataAndHandleDroppedPackets(packet, 0);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
default: {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (packetType == PacketTypeAudioStreamStats) {
|
|
||||||
|
|
||||||
const char* dataAt = packet.data();
|
const char* dataAt = packet.data();
|
||||||
|
|
||||||
// skip over header, appendFlag, and num stats packed
|
// skip over header, appendFlag, and num stats packed
|
||||||
dataAt += (numBytesPacketHeader + sizeof(quint8) + sizeof(quint16));
|
dataAt += (numBytesForPacketHeader(packet) + sizeof(quint8) + sizeof(quint16));
|
||||||
|
|
||||||
// read the downstream audio stream stats
|
// read the downstream audio stream stats
|
||||||
memcpy(&_downstreamAudioStreamStats, dataAt, sizeof(AudioStreamStats));
|
memcpy(&_downstreamAudioStreamStats, dataAt, sizeof(AudioStreamStats));
|
||||||
}
|
dataAt += sizeof(AudioStreamStats);
|
||||||
|
|
||||||
|
return dataAt - packet.data();
|
||||||
|
|
||||||
|
} else {
|
||||||
|
PositionalAudioStream* matchingStream = NULL;
|
||||||
|
|
||||||
|
if (packetType == PacketTypeMicrophoneAudioWithEcho
|
||||||
|
|| packetType == PacketTypeMicrophoneAudioNoEcho
|
||||||
|
|| packetType == PacketTypeSilentAudioFrame) {
|
||||||
|
|
||||||
|
QUuid nullUUID = QUuid();
|
||||||
|
if (!_audioStreams.contains(nullUUID)) {
|
||||||
|
// we don't have a mic stream yet, so add it
|
||||||
|
|
||||||
|
// read the channel flag to see if our stream is stereo or not
|
||||||
|
const char* channelFlagAt = packet.constData() + numBytesForPacketHeader(packet) + sizeof(quint16);
|
||||||
|
quint8 channelFlag = *(reinterpret_cast<const quint8*>(channelFlagAt));
|
||||||
|
bool isStereo = channelFlag == 1;
|
||||||
|
|
||||||
|
_audioStreams.insert(nullUUID,
|
||||||
|
matchingStream = new AvatarAudioStream(isStereo, AudioMixer::getUseDynamicJitterBuffers(),
|
||||||
|
AudioMixer::getStaticDesiredJitterBufferFrames(), AudioMixer::getMaxFramesOverDesired()));
|
||||||
|
} else {
|
||||||
|
matchingStream = _audioStreams.value(nullUUID);
|
||||||
|
}
|
||||||
|
} else if (packetType == PacketTypeInjectAudio) {
|
||||||
|
// this is injected audio
|
||||||
|
|
||||||
|
// grab the stream identifier for this injected audio
|
||||||
|
int bytesBeforeStreamIdentifier = numBytesForPacketHeader(packet) + sizeof(quint16);
|
||||||
|
QUuid streamIdentifier = QUuid::fromRfc4122(packet.mid(bytesBeforeStreamIdentifier, NUM_BYTES_RFC4122_UUID));
|
||||||
|
|
||||||
|
if (!_audioStreams.contains(streamIdentifier)) {
|
||||||
|
_audioStreams.insert(streamIdentifier,
|
||||||
|
matchingStream = new InjectedAudioStream(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers(),
|
||||||
|
AudioMixer::getStaticDesiredJitterBufferFrames(), AudioMixer::getMaxFramesOverDesired()));
|
||||||
|
} else {
|
||||||
|
matchingStream = _audioStreams.value(streamIdentifier);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return matchingStream->parseData(packet);
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixerClientData::checkBuffersBeforeFrameSend(AABox* checkSourceZone, AABox* listenerZone) {
|
void AudioMixerClientData::checkBuffersBeforeFrameSend(AABox* checkSourceZone, AABox* listenerZone) {
|
||||||
for (int i = 0; i < _ringBuffers.size(); i++) {
|
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
||||||
if (_ringBuffers[i]->shouldBeAddedToMix()) {
|
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
|
||||||
|
PositionalAudioStream* stream = i.value();
|
||||||
|
if (stream->popFrames(1)) {
|
||||||
// this is a ring buffer that is ready to go
|
// this is a ring buffer that is ready to go
|
||||||
// set its flag so we know to push its buffer when all is said and done
|
|
||||||
_ringBuffers[i]->setWillBeAddedToMix(true);
|
// calculate the trailing avg loudness for the next frame
|
||||||
|
|
||||||
// calculate the average loudness for the next NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL
|
|
||||||
// that would be mixed in
|
// that would be mixed in
|
||||||
_ringBuffers[i]->updateNextOutputTrailingLoudness();
|
stream->updateLastPopOutputTrailingLoudness();
|
||||||
|
|
||||||
if (checkSourceZone && checkSourceZone->contains(_ringBuffers[i]->getPosition())) {
|
if (checkSourceZone && checkSourceZone->contains(stream->getPosition())) {
|
||||||
_ringBuffers[i]->setListenerUnattenuatedZone(listenerZone);
|
stream->setListenerUnattenuatedZone(listenerZone);
|
||||||
} else {
|
} else {
|
||||||
_ringBuffers[i]->setListenerUnattenuatedZone(NULL);
|
stream->setListenerUnattenuatedZone(NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixerClientData::pushBuffersAfterFrameSend() {
|
void AudioMixerClientData::removeDeadInjectedStreams() {
|
||||||
|
|
||||||
QList<PositionalAudioRingBuffer*>::iterator i = _ringBuffers.begin();
|
const int INJECTOR_CONSECUTIVE_NOT_MIXED_AFTER_STARTED_THRESHOLD = 100;
|
||||||
while (i != _ringBuffers.end()) {
|
|
||||||
// this was a used buffer, push the output pointer forwards
|
|
||||||
PositionalAudioRingBuffer* audioBuffer = *i;
|
|
||||||
|
|
||||||
const int INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD = 100;
|
// we have this second threshold in case the injected audio is so short that the injected stream
|
||||||
|
// never even reaches its desired size, which means it will never start.
|
||||||
|
const int INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD = 1000;
|
||||||
|
|
||||||
if (audioBuffer->willBeAddedToMix()) {
|
QHash<QUuid, PositionalAudioStream*>::Iterator i = _audioStreams.begin(), end = _audioStreams.end();
|
||||||
audioBuffer->shiftReadPosition(audioBuffer->getSamplesPerFrame());
|
while (i != end) {
|
||||||
audioBuffer->setWillBeAddedToMix(false);
|
PositionalAudioStream* audioStream = i.value();
|
||||||
} else if (audioBuffer->getType() == PositionalAudioRingBuffer::Injector
|
if (audioStream->getType() == PositionalAudioStream::Injector && audioStream->isStarved()) {
|
||||||
&& audioBuffer->hasStarted() && audioBuffer->isStarved()
|
int notMixedThreshold = audioStream->hasStarted() ? INJECTOR_CONSECUTIVE_NOT_MIXED_AFTER_STARTED_THRESHOLD
|
||||||
&& audioBuffer->getConsecutiveNotMixedCount() > INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD) {
|
: INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD;
|
||||||
// this is an empty audio buffer that has starved, safe to delete
|
if (audioStream->getConsecutiveNotMixedCount() >= notMixedThreshold) {
|
||||||
// also delete its sequence number stats
|
delete audioStream;
|
||||||
QUuid streamIdentifier = ((InjectedAudioRingBuffer*)audioBuffer)->getStreamIdentifier();
|
i = _audioStreams.erase(i);
|
||||||
_incomingInjectedAudioSequenceNumberStatsMap.remove(streamIdentifier);
|
continue;
|
||||||
delete audioBuffer;
|
}
|
||||||
i = _ringBuffers.erase(i);
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
i++;
|
++i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioStreamStats AudioMixerClientData::getAudioStreamStatsOfStream(const PositionalAudioRingBuffer* ringBuffer) const {
|
|
||||||
|
|
||||||
AudioStreamStats streamStats;
|
|
||||||
|
|
||||||
streamStats._streamType = ringBuffer->getType();
|
|
||||||
if (streamStats._streamType == PositionalAudioRingBuffer::Injector) {
|
|
||||||
streamStats._streamIdentifier = ((InjectedAudioRingBuffer*)ringBuffer)->getStreamIdentifier();
|
|
||||||
const SequenceNumberStats& sequenceNumberStats = _incomingInjectedAudioSequenceNumberStatsMap[streamStats._streamIdentifier];
|
|
||||||
streamStats._packetStreamStats = sequenceNumberStats.getStats();
|
|
||||||
streamStats._packetStreamWindowStats = sequenceNumberStats.getStatsForHistoryWindow();
|
|
||||||
} else {
|
|
||||||
streamStats._packetStreamStats = _incomingAvatarAudioSequenceNumberStats.getStats();
|
|
||||||
streamStats._packetStreamWindowStats = _incomingAvatarAudioSequenceNumberStats.getStatsForHistoryWindow();
|
|
||||||
}
|
|
||||||
|
|
||||||
const MovingMinMaxAvg<quint64>& timeGapStats = ringBuffer->getInterframeTimeGapStatsForStatsPacket();
|
|
||||||
streamStats._timeGapMin = timeGapStats.getMin();
|
|
||||||
streamStats._timeGapMax = timeGapStats.getMax();
|
|
||||||
streamStats._timeGapAverage = timeGapStats.getAverage();
|
|
||||||
streamStats._timeGapWindowMin = timeGapStats.getWindowMin();
|
|
||||||
streamStats._timeGapWindowMax = timeGapStats.getWindowMax();
|
|
||||||
streamStats._timeGapWindowAverage = timeGapStats.getWindowAverage();
|
|
||||||
|
|
||||||
streamStats._ringBufferFramesAvailable = ringBuffer->framesAvailable();
|
|
||||||
streamStats._ringBufferFramesAvailableAverage = ringBuffer->getFramesAvailableAverage();
|
|
||||||
streamStats._ringBufferDesiredJitterBufferFrames = ringBuffer->getDesiredJitterBufferFrames();
|
|
||||||
streamStats._ringBufferStarveCount = ringBuffer->getStarveCount();
|
|
||||||
streamStats._ringBufferConsecutiveNotMixedCount = ringBuffer->getConsecutiveNotMixedCount();
|
|
||||||
streamStats._ringBufferOverflowCount = ringBuffer->getOverflowCount();
|
|
||||||
streamStats._ringBufferSilentFramesDropped = ringBuffer->getSilentFramesDropped();
|
|
||||||
|
|
||||||
return streamStats;
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode) {
|
void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode) {
|
||||||
|
|
||||||
// have all the seq number stats of each audio stream push their current stats into their history,
|
// since audio stream stats packets are sent periodically, this is a good place to remove our dead injected streams.
|
||||||
// which moves that history window 1 second forward (since that's how long since the last stats were pushed into history)
|
removeDeadInjectedStreams();
|
||||||
_incomingAvatarAudioSequenceNumberStats.pushStatsToHistory();
|
|
||||||
QHash<QUuid, SequenceNumberStats>::Iterator i = _incomingInjectedAudioSequenceNumberStatsMap.begin();
|
|
||||||
QHash<QUuid, SequenceNumberStats>::Iterator end = _incomingInjectedAudioSequenceNumberStatsMap.end();
|
|
||||||
while (i != end) {
|
|
||||||
i.value().pushStatsToHistory();
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
|
|
||||||
char packet[MAX_PACKET_SIZE];
|
char packet[MAX_PACKET_SIZE];
|
||||||
NodeList* nodeList = NodeList::getInstance();
|
NodeList* nodeList = NodeList::getInstance();
|
||||||
|
@ -269,9 +165,9 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
|
||||||
// calculate how many stream stat structs we can fit in each packet
|
// calculate how many stream stat structs we can fit in each packet
|
||||||
const int numStreamStatsRoomFor = (MAX_PACKET_SIZE - numBytesPacketHeader - sizeof(quint8) - sizeof(quint16)) / sizeof(AudioStreamStats);
|
const int numStreamStatsRoomFor = (MAX_PACKET_SIZE - numBytesPacketHeader - sizeof(quint8) - sizeof(quint16)) / sizeof(AudioStreamStats);
|
||||||
|
|
||||||
// pack and send stream stats packets until all ring buffers' stats are sent
|
// pack and send stream stats packets until all audio streams' stats are sent
|
||||||
int numStreamStatsRemaining = _ringBuffers.size();
|
int numStreamStatsRemaining = _audioStreams.size();
|
||||||
QList<PositionalAudioRingBuffer*>::ConstIterator ringBuffersIterator = _ringBuffers.constBegin();
|
QHash<QUuid, PositionalAudioStream*>::ConstIterator audioStreamsIterator = _audioStreams.constBegin();
|
||||||
while (numStreamStatsRemaining > 0) {
|
while (numStreamStatsRemaining > 0) {
|
||||||
|
|
||||||
char* dataAt = headerEndAt;
|
char* dataAt = headerEndAt;
|
||||||
|
@ -288,11 +184,11 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
|
||||||
|
|
||||||
// pack the calculated number of stream stats
|
// pack the calculated number of stream stats
|
||||||
for (int i = 0; i < numStreamStatsToPack; i++) {
|
for (int i = 0; i < numStreamStatsToPack; i++) {
|
||||||
AudioStreamStats streamStats = getAudioStreamStatsOfStream(*ringBuffersIterator);
|
AudioStreamStats streamStats = audioStreamsIterator.value()->updateSeqHistoryAndGetAudioStreamStats();
|
||||||
memcpy(dataAt, &streamStats, sizeof(AudioStreamStats));
|
memcpy(dataAt, &streamStats, sizeof(AudioStreamStats));
|
||||||
dataAt += sizeof(AudioStreamStats);
|
dataAt += sizeof(AudioStreamStats);
|
||||||
|
|
||||||
ringBuffersIterator++;
|
audioStreamsIterator++;
|
||||||
}
|
}
|
||||||
numStreamStatsRemaining -= numStreamStatsToPack;
|
numStreamStatsRemaining -= numStreamStatsToPack;
|
||||||
|
|
||||||
|
@ -304,12 +200,12 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
|
||||||
QString AudioMixerClientData::getAudioStreamStatsString() const {
|
QString AudioMixerClientData::getAudioStreamStatsString() const {
|
||||||
QString result;
|
QString result;
|
||||||
AudioStreamStats streamStats = _downstreamAudioStreamStats;
|
AudioStreamStats streamStats = _downstreamAudioStreamStats;
|
||||||
result += "DOWNSTREAM.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames)
|
result += "DOWNSTREAM.desired:" + QString::number(streamStats._desiredJitterBufferFrames)
|
||||||
+ " available_avg_10s:" + QString::number(streamStats._ringBufferFramesAvailableAverage)
|
+ " available_avg_10s:" + QString::number(streamStats._framesAvailableAverage)
|
||||||
+ " available:" + QString::number(streamStats._ringBufferFramesAvailable)
|
+ " available:" + QString::number(streamStats._framesAvailable)
|
||||||
+ " starves:" + QString::number(streamStats._ringBufferStarveCount)
|
+ " starves:" + QString::number(streamStats._starveCount)
|
||||||
+ " not_mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount)
|
+ " not_mixed:" + QString::number(streamStats._consecutiveNotMixedCount)
|
||||||
+ " overflows:" + QString::number(streamStats._ringBufferOverflowCount)
|
+ " overflows:" + QString::number(streamStats._overflowCount)
|
||||||
+ " silents_dropped: ?"
|
+ " silents_dropped: ?"
|
||||||
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
|
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
|
||||||
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
|
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
|
||||||
|
@ -320,17 +216,17 @@ QString AudioMixerClientData::getAudioStreamStatsString() const {
|
||||||
+ " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax)
|
+ " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax)
|
||||||
+ " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage);
|
+ " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage);
|
||||||
|
|
||||||
AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();
|
AvatarAudioStream* avatarAudioStream = getAvatarAudioStream();
|
||||||
if (avatarRingBuffer) {
|
if (avatarAudioStream) {
|
||||||
AudioStreamStats streamStats = getAudioStreamStatsOfStream(avatarRingBuffer);
|
AudioStreamStats streamStats = avatarAudioStream->getAudioStreamStats();
|
||||||
result += " UPSTREAM.mic.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames)
|
result += " UPSTREAM.mic.desired:" + QString::number(streamStats._desiredJitterBufferFrames)
|
||||||
+ " desired_calc:" + QString::number(avatarRingBuffer->getCalculatedDesiredJitterBufferFrames())
|
+ " desired_calc:" + QString::number(avatarAudioStream->getCalculatedJitterBufferFrames())
|
||||||
+ " available_avg_10s:" + QString::number(streamStats._ringBufferFramesAvailableAverage)
|
+ " available_avg_10s:" + QString::number(streamStats._framesAvailableAverage)
|
||||||
+ " available:" + QString::number(streamStats._ringBufferFramesAvailable)
|
+ " available:" + QString::number(streamStats._framesAvailable)
|
||||||
+ " starves:" + QString::number(streamStats._ringBufferStarveCount)
|
+ " starves:" + QString::number(streamStats._starveCount)
|
||||||
+ " not_mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount)
|
+ " not_mixed:" + QString::number(streamStats._consecutiveNotMixedCount)
|
||||||
+ " overflows:" + QString::number(streamStats._ringBufferOverflowCount)
|
+ " overflows:" + QString::number(streamStats._overflowCount)
|
||||||
+ " silents_dropped:" + QString::number(streamStats._ringBufferSilentFramesDropped)
|
+ " silents_dropped:" + QString::number(streamStats._framesDropped)
|
||||||
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
|
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
|
||||||
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
|
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
|
||||||
+ " min_gap:" + formatUsecTime(streamStats._timeGapMin)
|
+ " min_gap:" + formatUsecTime(streamStats._timeGapMin)
|
||||||
|
@ -343,17 +239,18 @@ QString AudioMixerClientData::getAudioStreamStatsString() const {
|
||||||
result = "mic unknown";
|
result = "mic unknown";
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < _ringBuffers.size(); i++) {
|
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
||||||
if (_ringBuffers[i]->getType() == PositionalAudioRingBuffer::Injector) {
|
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
|
||||||
AudioStreamStats streamStats = getAudioStreamStatsOfStream(_ringBuffers[i]);
|
if (i.value()->getType() == PositionalAudioStream::Injector) {
|
||||||
result += " UPSTREAM.inj.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames)
|
AudioStreamStats streamStats = i.value()->getAudioStreamStats();
|
||||||
+ " desired_calc:" + QString::number(_ringBuffers[i]->getCalculatedDesiredJitterBufferFrames())
|
result += " UPSTREAM.inj.desired:" + QString::number(streamStats._desiredJitterBufferFrames)
|
||||||
+ " available_avg_10s:" + QString::number(streamStats._ringBufferFramesAvailableAverage)
|
+ " desired_calc:" + QString::number(i.value()->getCalculatedJitterBufferFrames())
|
||||||
+ " available:" + QString::number(streamStats._ringBufferFramesAvailable)
|
+ " available_avg_10s:" + QString::number(streamStats._framesAvailableAverage)
|
||||||
+ " starves:" + QString::number(streamStats._ringBufferStarveCount)
|
+ " available:" + QString::number(streamStats._framesAvailable)
|
||||||
+ " not_mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount)
|
+ " starves:" + QString::number(streamStats._starveCount)
|
||||||
+ " overflows:" + QString::number(streamStats._ringBufferOverflowCount)
|
+ " not_mixed:" + QString::number(streamStats._consecutiveNotMixedCount)
|
||||||
+ " silents_dropped:" + QString::number(streamStats._ringBufferSilentFramesDropped)
|
+ " overflows:" + QString::number(streamStats._overflowCount)
|
||||||
|
+ " silents_dropped:" + QString::number(streamStats._framesDropped)
|
||||||
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
|
+ " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
|
||||||
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
|
+ " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
|
||||||
+ " min_gap:" + formatUsecTime(streamStats._timeGapMin)
|
+ " min_gap:" + formatUsecTime(streamStats._timeGapMin)
|
||||||
|
|
|
@ -13,29 +13,24 @@
|
||||||
#define hifi_AudioMixerClientData_h
|
#define hifi_AudioMixerClientData_h
|
||||||
|
|
||||||
#include <AABox.h>
|
#include <AABox.h>
|
||||||
#include <NodeData.h>
|
|
||||||
#include <PositionalAudioRingBuffer.h>
|
|
||||||
|
|
||||||
#include "AvatarAudioRingBuffer.h"
|
#include "PositionalAudioStream.h"
|
||||||
#include "AudioStreamStats.h"
|
#include "AvatarAudioStream.h"
|
||||||
#include "SequenceNumberStats.h"
|
|
||||||
|
|
||||||
|
|
||||||
const int INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS = 30;
|
|
||||||
|
|
||||||
class AudioMixerClientData : public NodeData {
|
class AudioMixerClientData : public NodeData {
|
||||||
public:
|
public:
|
||||||
AudioMixerClientData();
|
AudioMixerClientData();
|
||||||
~AudioMixerClientData();
|
~AudioMixerClientData();
|
||||||
|
|
||||||
const QList<PositionalAudioRingBuffer*> getRingBuffers() const { return _ringBuffers; }
|
const QHash<QUuid, PositionalAudioStream*>& getAudioStreams() const { return _audioStreams; }
|
||||||
AvatarAudioRingBuffer* getAvatarAudioRingBuffer() const;
|
AvatarAudioStream* getAvatarAudioStream() const;
|
||||||
|
|
||||||
int parseData(const QByteArray& packet);
|
int parseData(const QByteArray& packet);
|
||||||
void checkBuffersBeforeFrameSend(AABox* checkSourceZone = NULL, AABox* listenerZone = NULL);
|
|
||||||
void pushBuffersAfterFrameSend();
|
|
||||||
|
|
||||||
AudioStreamStats getAudioStreamStatsOfStream(const PositionalAudioRingBuffer* ringBuffer) const;
|
void checkBuffersBeforeFrameSend(AABox* checkSourceZone, AABox* listenerZone);
|
||||||
|
|
||||||
|
void removeDeadInjectedStreams();
|
||||||
|
|
||||||
QString getAudioStreamStatsString() const;
|
QString getAudioStreamStatsString() const;
|
||||||
|
|
||||||
void sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode);
|
void sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode);
|
||||||
|
@ -44,11 +39,9 @@ public:
|
||||||
quint16 getOutgoingSequenceNumber() const { return _outgoingMixedAudioSequenceNumber; }
|
quint16 getOutgoingSequenceNumber() const { return _outgoingMixedAudioSequenceNumber; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
QList<PositionalAudioRingBuffer*> _ringBuffers;
|
QHash<QUuid, PositionalAudioStream*> _audioStreams; // mic stream stored under key of null UUID
|
||||||
|
|
||||||
quint16 _outgoingMixedAudioSequenceNumber;
|
quint16 _outgoingMixedAudioSequenceNumber;
|
||||||
SequenceNumberStats _incomingAvatarAudioSequenceNumberStats;
|
|
||||||
QHash<QUuid, SequenceNumberStats> _incomingInjectedAudioSequenceNumberStatsMap;
|
|
||||||
|
|
||||||
AudioStreamStats _downstreamAudioStreamStats;
|
AudioStreamStats _downstreamAudioStreamStats;
|
||||||
};
|
};
|
||||||
|
|
|
@ -1,70 +0,0 @@
|
||||||
//
|
|
||||||
// AvatarAudioRingBuffer.cpp
|
|
||||||
// assignment-client/src/audio
|
|
||||||
//
|
|
||||||
// Created by Stephen Birarda on 6/5/13.
|
|
||||||
// Copyright 2013 High Fidelity, Inc.
|
|
||||||
//
|
|
||||||
// Distributed under the Apache License, Version 2.0.
|
|
||||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
|
||||||
//
|
|
||||||
|
|
||||||
#include <PacketHeaders.h>
|
|
||||||
|
|
||||||
#include "AvatarAudioRingBuffer.h"
|
|
||||||
|
|
||||||
AvatarAudioRingBuffer::AvatarAudioRingBuffer(bool isStereo, bool dynamicJitterBuffer) :
|
|
||||||
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Microphone, isStereo, dynamicJitterBuffer) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
int AvatarAudioRingBuffer::parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped) {
|
|
||||||
frameReceivedUpdateTimingStats();
|
|
||||||
|
|
||||||
_shouldLoopbackForNode = (packetTypeForPacket(packet) == PacketTypeMicrophoneAudioWithEcho);
|
|
||||||
|
|
||||||
// skip the packet header (includes the source UUID)
|
|
||||||
int readBytes = numBytesForPacketHeader(packet);
|
|
||||||
|
|
||||||
// skip the sequence number
|
|
||||||
readBytes += sizeof(quint16);
|
|
||||||
|
|
||||||
// hop over the channel flag that has already been read in AudioMixerClientData
|
|
||||||
readBytes += sizeof(quint8);
|
|
||||||
// read the positional data
|
|
||||||
readBytes += parsePositionalData(packet.mid(readBytes));
|
|
||||||
|
|
||||||
if (packetTypeForPacket(packet) == PacketTypeSilentAudioFrame) {
|
|
||||||
// this source had no audio to send us, but this counts as a packet
|
|
||||||
// write silence equivalent to the number of silent samples they just sent us
|
|
||||||
int16_t numSilentSamples;
|
|
||||||
|
|
||||||
memcpy(&numSilentSamples, packet.data() + readBytes, sizeof(int16_t));
|
|
||||||
readBytes += sizeof(int16_t);
|
|
||||||
|
|
||||||
// add silent samples for the dropped packets as well.
|
|
||||||
// ASSUME that each dropped packet had same number of silent samples as this one
|
|
||||||
numSilentSamples *= (packetsSkipped + 1);
|
|
||||||
|
|
||||||
// NOTE: fixes a bug in old clients that would send garbage for their number of silentSamples
|
|
||||||
// CAN'T DO THIS because ScriptEngine.cpp sends frames of different size due to having a different sending interval
|
|
||||||
// (every 16.667ms) than Audio.cpp (every 10.667ms)
|
|
||||||
//numSilentSamples = getSamplesPerFrame();
|
|
||||||
|
|
||||||
addDroppableSilentSamples(numSilentSamples);
|
|
||||||
|
|
||||||
} else {
|
|
||||||
int numAudioBytes = packet.size() - readBytes;
|
|
||||||
int numAudioSamples = numAudioBytes / sizeof(int16_t);
|
|
||||||
|
|
||||||
// add silent samples for the dropped packets.
|
|
||||||
// ASSUME that each dropped packet had same number of samples as this one
|
|
||||||
if (packetsSkipped > 0) {
|
|
||||||
addDroppableSilentSamples(packetsSkipped * numAudioSamples);
|
|
||||||
}
|
|
||||||
|
|
||||||
// there is audio data to read
|
|
||||||
readBytes += writeData(packet.data() + readBytes, numAudioBytes);
|
|
||||||
}
|
|
||||||
return readBytes;
|
|
||||||
}
|
|
|
@ -1,30 +0,0 @@
|
||||||
//
|
|
||||||
// AvatarAudioRingBuffer.h
|
|
||||||
// assignment-client/src/audio
|
|
||||||
//
|
|
||||||
// Created by Stephen Birarda on 6/5/13.
|
|
||||||
// Copyright 2013 High Fidelity, Inc.
|
|
||||||
//
|
|
||||||
// Distributed under the Apache License, Version 2.0.
|
|
||||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
|
||||||
//
|
|
||||||
|
|
||||||
#ifndef hifi_AvatarAudioRingBuffer_h
|
|
||||||
#define hifi_AvatarAudioRingBuffer_h
|
|
||||||
|
|
||||||
#include <QtCore/QUuid>
|
|
||||||
|
|
||||||
#include "PositionalAudioRingBuffer.h"
|
|
||||||
|
|
||||||
class AvatarAudioRingBuffer : public PositionalAudioRingBuffer {
|
|
||||||
public:
|
|
||||||
AvatarAudioRingBuffer(bool isStereo = false, bool dynamicJitterBuffer = false);
|
|
||||||
|
|
||||||
int parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped);
|
|
||||||
private:
|
|
||||||
// disallow copying of AvatarAudioRingBuffer objects
|
|
||||||
AvatarAudioRingBuffer(const AvatarAudioRingBuffer&);
|
|
||||||
AvatarAudioRingBuffer& operator= (const AvatarAudioRingBuffer&);
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // hifi_AvatarAudioRingBuffer_h
|
|
63
assignment-client/src/audio/AvatarAudioStream.cpp
Normal file
63
assignment-client/src/audio/AvatarAudioStream.cpp
Normal file
|
@ -0,0 +1,63 @@
|
||||||
|
//
|
||||||
|
// AvatarAudioStream.cpp
|
||||||
|
// assignment-client/src/audio
|
||||||
|
//
|
||||||
|
// Created by Stephen Birarda on 6/5/13.
|
||||||
|
// Copyright 2013 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <PacketHeaders.h>
|
||||||
|
|
||||||
|
#include "AvatarAudioStream.h"
|
||||||
|
|
||||||
|
AvatarAudioStream::AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired) :
|
||||||
|
PositionalAudioStream(PositionalAudioStream::Microphone, isStereo, dynamicJitterBuffer, staticDesiredJitterBufferFrames, maxFramesOverDesired)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
int AvatarAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
|
||||||
|
|
||||||
|
_shouldLoopbackForNode = (type == PacketTypeMicrophoneAudioWithEcho);
|
||||||
|
|
||||||
|
int readBytes = 0;
|
||||||
|
|
||||||
|
// read the channel flag
|
||||||
|
quint8 channelFlag = packetAfterSeqNum.at(readBytes);
|
||||||
|
bool isStereo = channelFlag == 1;
|
||||||
|
readBytes += sizeof(quint8);
|
||||||
|
|
||||||
|
// if isStereo value has changed, restart the ring buffer with new frame size
|
||||||
|
if (isStereo != _isStereo) {
|
||||||
|
_ringBuffer.resizeForFrameSize(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||||
|
_isStereo = isStereo;
|
||||||
|
}
|
||||||
|
|
||||||
|
// read the positional data
|
||||||
|
readBytes += parsePositionalData(packetAfterSeqNum.mid(readBytes));
|
||||||
|
|
||||||
|
if (type == PacketTypeSilentAudioFrame) {
|
||||||
|
int16_t numSilentSamples;
|
||||||
|
memcpy(&numSilentSamples, packetAfterSeqNum.data() + readBytes, sizeof(int16_t));
|
||||||
|
readBytes += sizeof(int16_t);
|
||||||
|
|
||||||
|
numAudioSamples = numSilentSamples;
|
||||||
|
} else {
|
||||||
|
int numAudioBytes = packetAfterSeqNum.size() - readBytes;
|
||||||
|
numAudioSamples = numAudioBytes / sizeof(int16_t);
|
||||||
|
}
|
||||||
|
return readBytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
int AvatarAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
||||||
|
int readBytes = 0;
|
||||||
|
if (type == PacketTypeSilentAudioFrame) {
|
||||||
|
writeDroppableSilentSamples(numAudioSamples);
|
||||||
|
} else {
|
||||||
|
// there is audio data to read
|
||||||
|
readBytes += _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
|
||||||
|
}
|
||||||
|
return readBytes;
|
||||||
|
}
|
32
assignment-client/src/audio/AvatarAudioStream.h
Normal file
32
assignment-client/src/audio/AvatarAudioStream.h
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
//
|
||||||
|
// AvatarAudioStream.h
|
||||||
|
// assignment-client/src/audio
|
||||||
|
//
|
||||||
|
// Created by Stephen Birarda on 6/5/13.
|
||||||
|
// Copyright 2013 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef hifi_AvatarAudioStream_h
|
||||||
|
#define hifi_AvatarAudioStream_h
|
||||||
|
|
||||||
|
#include <QtCore/QUuid>
|
||||||
|
|
||||||
|
#include "PositionalAudioStream.h"
|
||||||
|
|
||||||
|
class AvatarAudioStream : public PositionalAudioStream {
|
||||||
|
public:
|
||||||
|
AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired);
|
||||||
|
|
||||||
|
private:
|
||||||
|
// disallow copying of AvatarAudioStream objects
|
||||||
|
AvatarAudioStream(const AvatarAudioStream&);
|
||||||
|
AvatarAudioStream& operator= (const AvatarAudioStream&);
|
||||||
|
|
||||||
|
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
|
||||||
|
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // hifi_AvatarAudioStream_h
|
|
@ -3,17 +3,29 @@
|
||||||
"label": "Audio",
|
"label": "Audio",
|
||||||
"assignment-types": [0],
|
"assignment-types": [0],
|
||||||
"settings": {
|
"settings": {
|
||||||
"unattenuated-zone": {
|
"A-dynamic-jitter-buffer": {
|
||||||
"label": "Unattenuated Zone",
|
|
||||||
"help": "Boxes for source and listener (corner x, corner y, corner z, size x, size y, size z, corner x, corner y, corner z, size x, size y, size z)",
|
|
||||||
"placeholder": "no zone",
|
|
||||||
"default": ""
|
|
||||||
},
|
|
||||||
"dynamic-jitter-buffer": {
|
|
||||||
"type": "checkbox",
|
"type": "checkbox",
|
||||||
"label": "Dynamic Jitter Buffers",
|
"label": "Dynamic Jitter Buffers",
|
||||||
"help": "Dynamically buffer client audio based on perceived jitter in packet receipt timing",
|
"help": "Dynamically buffer client audio based on perceived jitter in packet receipt timing",
|
||||||
"default": false
|
"default": false
|
||||||
|
},
|
||||||
|
"B-desired-jitter-buffer-frames": {
|
||||||
|
"label": "Desired Jitter Buffer Frames",
|
||||||
|
"help": "If dynamic jitter buffers is disabled, this determines the target number of frames maintained by the AudioMixer's jitter buffers",
|
||||||
|
"placeholder": "1",
|
||||||
|
"default": "1"
|
||||||
|
},
|
||||||
|
"C-max-frames-over-desired": {
|
||||||
|
"label": "Max Frames Over Desired",
|
||||||
|
"help": "The highest number of frames an AudioMixer's ringbuffer can exceed the desired jitter buffer frames by",
|
||||||
|
"placeholder": "10",
|
||||||
|
"default": "10"
|
||||||
|
},
|
||||||
|
"D-unattenuated-zone": {
|
||||||
|
"label": "Unattenuated Zone",
|
||||||
|
"help": "Boxes for source and listener (corner x, corner y, corner z, size x, size y, size z, corner x, corner y, corner z, size x, size y, size z)",
|
||||||
|
"placeholder": "no zone",
|
||||||
|
"default": ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
674
examples/bot_procedural.js.
Normal file
674
examples/bot_procedural.js.
Normal file
|
@ -0,0 +1,674 @@
|
||||||
|
//
|
||||||
|
// bot_procedural.js
|
||||||
|
// hifi
|
||||||
|
//
|
||||||
|
// Created by Ben Arnold on 7/29/2013
|
||||||
|
//
|
||||||
|
// Copyright (c) 2014 HighFidelity, Inc. All rights reserved.
|
||||||
|
//
|
||||||
|
// This is an example script that demonstrates an NPC avatar.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
|
||||||
|
//For procedural walk animation
|
||||||
|
Script.include("http://s3-us-west-1.amazonaws.com/highfidelity-public/scripts/proceduralAnimationAPI.js");
|
||||||
|
|
||||||
|
var procAnimAPI = new ProcAnimAPI();
|
||||||
|
|
||||||
|
function getRandomFloat(min, max) {
|
||||||
|
return Math.random() * (max - min) + min;
|
||||||
|
}
|
||||||
|
|
||||||
|
function getRandomInt (min, max) {
|
||||||
|
return Math.floor(Math.random() * (max - min + 1)) + min;
|
||||||
|
}
|
||||||
|
|
||||||
|
function printVector(string, vector) {
|
||||||
|
print(string + " " + vector.x + ", " + vector.y + ", " + vector.z);
|
||||||
|
}
|
||||||
|
|
||||||
|
var CHANCE_OF_MOVING = 0.005;
|
||||||
|
var CHANCE_OF_SOUND = 0.005;
|
||||||
|
var CHANCE_OF_HEAD_TURNING = 0.01;
|
||||||
|
var CHANCE_OF_BIG_MOVE = 1.0;
|
||||||
|
|
||||||
|
var isMoving = false;
|
||||||
|
var isTurningHead = false;
|
||||||
|
var isPlayingAudio = false;
|
||||||
|
|
||||||
|
var X_MIN = 0.50;
|
||||||
|
var X_MAX = 15.60;
|
||||||
|
var Z_MIN = 0.50;
|
||||||
|
var Z_MAX = 15.10;
|
||||||
|
var Y_FEET = 0.0;
|
||||||
|
var AVATAR_PELVIS_HEIGHT = 0.84;
|
||||||
|
var Y_PELVIS = Y_FEET + AVATAR_PELVIS_HEIGHT;
|
||||||
|
var MAX_PELVIS_DELTA = 2.5;
|
||||||
|
|
||||||
|
var MOVE_RANGE_SMALL = 3.0;
|
||||||
|
var MOVE_RANGE_BIG = 10.0;
|
||||||
|
var TURN_RANGE = 70.0;
|
||||||
|
var STOP_TOLERANCE = 0.05;
|
||||||
|
var MOVE_RATE = 0.05;
|
||||||
|
var TURN_RATE = 0.2;
|
||||||
|
var HEAD_TURN_RATE = 0.05;
|
||||||
|
var PITCH_RANGE = 15.0;
|
||||||
|
var YAW_RANGE = 35.0;
|
||||||
|
|
||||||
|
var firstPosition = { x: getRandomFloat(X_MIN, X_MAX), y: Y_PELVIS, z: getRandomFloat(Z_MIN, Z_MAX) };
|
||||||
|
var targetPosition = { x: 0, y: 0, z: 0 };
|
||||||
|
var targetOrientation = { x: 0, y: 0, z: 0, w: 0 };
|
||||||
|
var currentOrientation = { x: 0, y: 0, z: 0, w: 0 };
|
||||||
|
var targetHeadPitch = 0.0;
|
||||||
|
var targetHeadYaw = 0.0;
|
||||||
|
|
||||||
|
var basePelvisHeight = 0.0;
|
||||||
|
var pelvisOscillatorPosition = 0.0;
|
||||||
|
var pelvisOscillatorVelocity = 0.0;
|
||||||
|
|
||||||
|
function clamp(val, min, max){
|
||||||
|
return Math.max(min, Math.min(max, val))
|
||||||
|
}
|
||||||
|
|
||||||
|
//Array of all valid bot numbers
|
||||||
|
var validBotNumbers = [];
|
||||||
|
|
||||||
|
// right now we only use bot 63, since many other bots have messed up skeletons and LOD issues
|
||||||
|
var botNumber = 63;//getRandomInt(0, 99);
|
||||||
|
|
||||||
|
var newFaceFilePrefix = "ron";
|
||||||
|
|
||||||
|
var newBodyFilePrefix = "bot" + botNumber;
|
||||||
|
|
||||||
|
// set the face model fst using the bot number
|
||||||
|
// there is no need to change the body model - we're using the default
|
||||||
|
Avatar.faceModelURL = "https://s3-us-west-1.amazonaws.com/highfidelity-public/meshes/" + newFaceFilePrefix + ".fst";
|
||||||
|
Avatar.skeletonModelURL = "https://s3-us-west-1.amazonaws.com/highfidelity-public/meshes/" + newBodyFilePrefix + "_a.fst";
|
||||||
|
Avatar.billboardURL = "https://s3-us-west-1.amazonaws.com/highfidelity-public/meshes/billboards/bot" + botNumber + ".png";
|
||||||
|
|
||||||
|
Agent.isAvatar = true;
|
||||||
|
Agent.isListeningToAudioStream = true;
|
||||||
|
|
||||||
|
// change the avatar's position to the random one
|
||||||
|
Avatar.position = firstPosition;
|
||||||
|
basePelvisHeight = firstPosition.y;
|
||||||
|
printVector("New dancer, position = ", Avatar.position);
|
||||||
|
|
||||||
|
function loadSounds() {
|
||||||
|
var sound_filenames = ["AB1.raw", "Anchorman2.raw", "B1.raw", "B1.raw", "Bale1.raw", "Bandcamp.raw",
|
||||||
|
"Big1.raw", "Big2.raw", "Brian1.raw", "Buster1.raw", "CES1.raw", "CES2.raw", "CES3.raw", "CES4.raw",
|
||||||
|
"Carrie1.raw", "Carrie3.raw", "Charlotte1.raw", "EN1.raw", "EN2.raw", "EN3.raw", "Eugene1.raw", "Francesco1.raw",
|
||||||
|
"Italian1.raw", "Japanese1.raw", "Leigh1.raw", "Lucille1.raw", "Lucille2.raw", "MeanGirls.raw", "Murray2.raw",
|
||||||
|
"Nigel1.raw", "PennyLane.raw", "Pitt1.raw", "Ricardo.raw", "SN.raw", "Sake1.raw", "Samantha1.raw", "Samantha2.raw",
|
||||||
|
"Spicoli1.raw", "Supernatural.raw", "Swearengen1.raw", "TheDude.raw", "Tony.raw", "Triumph1.raw", "Uma1.raw",
|
||||||
|
"Walken1.raw", "Walken2.raw", "Z1.raw", "Z2.raw"
|
||||||
|
];
|
||||||
|
|
||||||
|
var footstep_filenames = ["FootstepW2Left-12db.wav", "FootstepW2Right-12db.wav", "FootstepW3Left-12db.wav", "FootstepW3Right-12db.wav",
|
||||||
|
"FootstepW5Left-12db.wav", "FootstepW5Right-12db.wav"];
|
||||||
|
|
||||||
|
var SOUND_BASE_URL = "https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/Cocktail+Party+Snippets/Raws/";
|
||||||
|
|
||||||
|
var FOOTSTEP_BASE_URL = "http://highfidelity-public.s3-us-west-1.amazonaws.com/sounds/Footsteps/";
|
||||||
|
|
||||||
|
for (var i = 0; i < sound_filenames.length; i++) {
|
||||||
|
sounds.push(new Sound(SOUND_BASE_URL + sound_filenames[i]));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (var i = 0; i < footstep_filenames.length; i++) {
|
||||||
|
footstepSounds.push(new Sound(FOOTSTEP_BASE_URL + footstep_filenames[i]));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var sounds = [];
|
||||||
|
var footstepSounds = [];
|
||||||
|
loadSounds();
|
||||||
|
|
||||||
|
|
||||||
|
function playRandomSound() {
|
||||||
|
if (!Agent.isPlayingAvatarSound) {
|
||||||
|
var whichSound = Math.floor((Math.random() * sounds.length));
|
||||||
|
Agent.playAvatarSound(sounds[whichSound]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function playRandomFootstepSound() {
|
||||||
|
|
||||||
|
var whichSound = Math.floor((Math.random() * footstepSounds.length));
|
||||||
|
var options = new AudioInjectionOptions();
|
||||||
|
options.position = Avatar.position;
|
||||||
|
options.volume = 1.0;
|
||||||
|
Audio.playSound(footstepSounds[whichSound], options);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// ************************************ Facial Animation **********************************
|
||||||
|
var allBlendShapes = [];
|
||||||
|
var targetBlendCoefficient = [];
|
||||||
|
var currentBlendCoefficient = [];
|
||||||
|
|
||||||
|
//Blendshape constructor
|
||||||
|
function addBlendshapeToPose(pose, shapeIndex, val) {
|
||||||
|
var index = pose.blendShapes.length;
|
||||||
|
pose.blendShapes[index] = {shapeIndex: shapeIndex, val: val };
|
||||||
|
}
|
||||||
|
//The mood of the avatar, determines face. 0 = happy, 1 = angry, 2 = sad.
|
||||||
|
|
||||||
|
//Randomly pick avatar mood. 80% happy, 10% mad 10% sad
|
||||||
|
var randMood = Math.floor(Math.random() * 11);
|
||||||
|
var avatarMood;
|
||||||
|
if (randMood == 0) {
|
||||||
|
avatarMood = 1;
|
||||||
|
} else if (randMood == 2) {
|
||||||
|
avatarMood = 2;
|
||||||
|
} else {
|
||||||
|
avatarMood = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
var currentExpression = -1;
|
||||||
|
//Face pose constructor
|
||||||
|
var happyPoses = [];
|
||||||
|
|
||||||
|
happyPoses[0] = {blendShapes: []};
|
||||||
|
addBlendshapeToPose(happyPoses[0], 28, 0.7); //MouthSmile_L
|
||||||
|
addBlendshapeToPose(happyPoses[0], 29, 0.7); //MouthSmile_R
|
||||||
|
|
||||||
|
happyPoses[1] = {blendShapes: []};
|
||||||
|
addBlendshapeToPose(happyPoses[1], 28, 1.0); //MouthSmile_L
|
||||||
|
addBlendshapeToPose(happyPoses[1], 29, 1.0); //MouthSmile_R
|
||||||
|
addBlendshapeToPose(happyPoses[1], 21, 0.2); //JawOpen
|
||||||
|
|
||||||
|
happyPoses[2] = {blendShapes: []};
|
||||||
|
addBlendshapeToPose(happyPoses[2], 28, 1.0); //MouthSmile_L
|
||||||
|
addBlendshapeToPose(happyPoses[2], 29, 1.0); //MouthSmile_R
|
||||||
|
addBlendshapeToPose(happyPoses[2], 21, 0.5); //JawOpen
|
||||||
|
addBlendshapeToPose(happyPoses[2], 46, 1.0); //CheekSquint_L
|
||||||
|
addBlendshapeToPose(happyPoses[2], 47, 1.0); //CheekSquint_R
|
||||||
|
addBlendshapeToPose(happyPoses[2], 17, 1.0); //BrowsU_L
|
||||||
|
addBlendshapeToPose(happyPoses[2], 18, 1.0); //BrowsU_R
|
||||||
|
|
||||||
|
var angryPoses = [];
|
||||||
|
|
||||||
|
angryPoses[0] = {blendShapes: []};
|
||||||
|
addBlendshapeToPose(angryPoses[0], 26, 0.6); //MouthFrown_L
|
||||||
|
addBlendshapeToPose(angryPoses[0], 27, 0.6); //MouthFrown_R
|
||||||
|
addBlendshapeToPose(angryPoses[0], 14, 0.6); //BrowsD_L
|
||||||
|
addBlendshapeToPose(angryPoses[0], 15, 0.6); //BrowsD_R
|
||||||
|
|
||||||
|
angryPoses[1] = {blendShapes: []};
|
||||||
|
addBlendshapeToPose(angryPoses[1], 26, 0.9); //MouthFrown_L
|
||||||
|
addBlendshapeToPose(angryPoses[1], 27, 0.9); //MouthFrown_R
|
||||||
|
addBlendshapeToPose(angryPoses[1], 14, 0.9); //BrowsD_L
|
||||||
|
addBlendshapeToPose(angryPoses[1], 15, 0.9); //BrowsD_R
|
||||||
|
|
||||||
|
angryPoses[2] = {blendShapes: []};
|
||||||
|
addBlendshapeToPose(angryPoses[2], 26, 1.0); //MouthFrown_L
|
||||||
|
addBlendshapeToPose(angryPoses[2], 27, 1.0); //MouthFrown_R
|
||||||
|
addBlendshapeToPose(angryPoses[2], 14, 1.0); //BrowsD_L
|
||||||
|
addBlendshapeToPose(angryPoses[2], 15, 1.0); //BrowsD_R
|
||||||
|
addBlendshapeToPose(angryPoses[2], 21, 0.5); //JawOpen
|
||||||
|
addBlendshapeToPose(angryPoses[2], 46, 1.0); //CheekSquint_L
|
||||||
|
addBlendshapeToPose(angryPoses[2], 47, 1.0); //CheekSquint_R
|
||||||
|
|
||||||
|
var sadPoses = [];
|
||||||
|
|
||||||
|
sadPoses[0] = {blendShapes: []};
|
||||||
|
addBlendshapeToPose(sadPoses[0], 26, 0.6); //MouthFrown_L
|
||||||
|
addBlendshapeToPose(sadPoses[0], 27, 0.6); //MouthFrown_R
|
||||||
|
addBlendshapeToPose(sadPoses[0], 16, 0.2); //BrowsU_C
|
||||||
|
addBlendshapeToPose(sadPoses[0], 2, 0.6); //EyeSquint_L
|
||||||
|
addBlendshapeToPose(sadPoses[0], 3, 0.6); //EyeSquint_R
|
||||||
|
|
||||||
|
sadPoses[1] = {blendShapes: []};
|
||||||
|
addBlendshapeToPose(sadPoses[1], 26, 0.9); //MouthFrown_L
|
||||||
|
addBlendshapeToPose(sadPoses[1], 27, 0.9); //MouthFrown_R
|
||||||
|
addBlendshapeToPose(sadPoses[1], 16, 0.6); //BrowsU_C
|
||||||
|
addBlendshapeToPose(sadPoses[1], 2, 0.9); //EyeSquint_L
|
||||||
|
addBlendshapeToPose(sadPoses[1], 3, 0.9); //EyeSquint_R
|
||||||
|
|
||||||
|
sadPoses[2] = {blendShapes: []};
|
||||||
|
addBlendshapeToPose(sadPoses[2], 26, 1.0); //MouthFrown_L
|
||||||
|
addBlendshapeToPose(sadPoses[2], 27, 1.0); //MouthFrown_R
|
||||||
|
addBlendshapeToPose(sadPoses[2], 16, 0.1); //BrowsU_C
|
||||||
|
addBlendshapeToPose(sadPoses[2], 2, 1.0); //EyeSquint_L
|
||||||
|
addBlendshapeToPose(sadPoses[2], 3, 1.0); //EyeSquint_R
|
||||||
|
addBlendshapeToPose(sadPoses[2], 21, 0.3); //JawOpen
|
||||||
|
|
||||||
|
var facePoses = [];
|
||||||
|
facePoses[0] = happyPoses;
|
||||||
|
facePoses[1] = angryPoses;
|
||||||
|
facePoses[2] = sadPoses;
|
||||||
|
|
||||||
|
|
||||||
|
function addBlendShape(s) {
|
||||||
|
allBlendShapes[allBlendShapes.length] = s;
|
||||||
|
}
|
||||||
|
|
||||||
|
//It is imperative that the following blendshapes are all present and are in the correct order
|
||||||
|
addBlendShape("EyeBlink_L"); //0
|
||||||
|
addBlendShape("EyeBlink_R"); //1
|
||||||
|
addBlendShape("EyeSquint_L"); //2
|
||||||
|
addBlendShape("EyeSquint_R"); //3
|
||||||
|
addBlendShape("EyeDown_L"); //4
|
||||||
|
addBlendShape("EyeDown_R"); //5
|
||||||
|
addBlendShape("EyeIn_L"); //6
|
||||||
|
addBlendShape("EyeIn_R"); //7
|
||||||
|
addBlendShape("EyeOpen_L"); //8
|
||||||
|
addBlendShape("EyeOpen_R"); //9
|
||||||
|
addBlendShape("EyeOut_L"); //10
|
||||||
|
addBlendShape("EyeOut_R"); //11
|
||||||
|
addBlendShape("EyeUp_L"); //12
|
||||||
|
addBlendShape("EyeUp_R"); //13
|
||||||
|
addBlendShape("BrowsD_L"); //14
|
||||||
|
addBlendShape("BrowsD_R"); //15
|
||||||
|
addBlendShape("BrowsU_C"); //16
|
||||||
|
addBlendShape("BrowsU_L"); //17
|
||||||
|
addBlendShape("BrowsU_R"); //18
|
||||||
|
addBlendShape("JawFwd"); //19
|
||||||
|
addBlendShape("JawLeft"); //20
|
||||||
|
addBlendShape("JawOpen"); //21
|
||||||
|
addBlendShape("JawChew"); //22
|
||||||
|
addBlendShape("JawRight"); //23
|
||||||
|
addBlendShape("MouthLeft"); //24
|
||||||
|
addBlendShape("MouthRight"); //25
|
||||||
|
addBlendShape("MouthFrown_L"); //26
|
||||||
|
addBlendShape("MouthFrown_R"); //27
|
||||||
|
addBlendShape("MouthSmile_L"); //28
|
||||||
|
addBlendShape("MouthSmile_R"); //29
|
||||||
|
addBlendShape("MouthDimple_L"); //30
|
||||||
|
addBlendShape("MouthDimple_R"); //31
|
||||||
|
addBlendShape("LipsStretch_L"); //32
|
||||||
|
addBlendShape("LipsStretch_R"); //33
|
||||||
|
addBlendShape("LipsUpperClose"); //34
|
||||||
|
addBlendShape("LipsLowerClose"); //35
|
||||||
|
addBlendShape("LipsUpperUp"); //36
|
||||||
|
addBlendShape("LipsLowerDown"); //37
|
||||||
|
addBlendShape("LipsUpperOpen"); //38
|
||||||
|
addBlendShape("LipsLowerOpen"); //39
|
||||||
|
addBlendShape("LipsFunnel"); //40
|
||||||
|
addBlendShape("LipsPucker"); //41
|
||||||
|
addBlendShape("ChinLowerRaise"); //42
|
||||||
|
addBlendShape("ChinUpperRaise"); //43
|
||||||
|
addBlendShape("Sneer"); //44
|
||||||
|
addBlendShape("Puff"); //45
|
||||||
|
addBlendShape("CheekSquint_L"); //46
|
||||||
|
addBlendShape("CheekSquint_R"); //47
|
||||||
|
|
||||||
|
for (var i = 0; i < allBlendShapes.length; i++) {
|
||||||
|
targetBlendCoefficient[i] = 0;
|
||||||
|
currentBlendCoefficient[i] = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
function setRandomExpression() {
|
||||||
|
|
||||||
|
//Clear all expression data for current expression
|
||||||
|
if (currentExpression != -1) {
|
||||||
|
var expression = facePoses[avatarMood][currentExpression];
|
||||||
|
for (var i = 0; i < expression.blendShapes.length; i++) {
|
||||||
|
targetBlendCoefficient[expression.blendShapes[i].shapeIndex] = 0.0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//Get a new current expression
|
||||||
|
currentExpression = Math.floor(Math.random() * facePoses[avatarMood].length);
|
||||||
|
var expression = facePoses[avatarMood][currentExpression];
|
||||||
|
for (var i = 0; i < expression.blendShapes.length; i++) {
|
||||||
|
targetBlendCoefficient[expression.blendShapes[i].shapeIndex] = expression.blendShapes[i].val;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var expressionChangeSpeed = 0.1;
|
||||||
|
function updateBlendShapes(deltaTime) {
|
||||||
|
|
||||||
|
for (var i = 0; i < allBlendShapes.length; i++) {
|
||||||
|
currentBlendCoefficient[i] += (targetBlendCoefficient[i] - currentBlendCoefficient[i]) * expressionChangeSpeed;
|
||||||
|
Avatar.setBlendshape(allBlendShapes[i], currentBlendCoefficient[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var BLINK_SPEED = 0.15;
|
||||||
|
var CHANCE_TO_BLINK = 0.0025;
|
||||||
|
var MAX_BLINK = 0.85;
|
||||||
|
var blink = 0.0;
|
||||||
|
var isBlinking = false;
|
||||||
|
function updateBlinking(deltaTime) {
|
||||||
|
if (isBlinking == false) {
|
||||||
|
if (Math.random() < CHANCE_TO_BLINK) {
|
||||||
|
isBlinking = true;
|
||||||
|
} else {
|
||||||
|
blink -= BLINK_SPEED;
|
||||||
|
if (blink < 0.0) blink = 0.0;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
blink += BLINK_SPEED;
|
||||||
|
if (blink > MAX_BLINK) {
|
||||||
|
blink = MAX_BLINK;
|
||||||
|
isBlinking = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
currentBlendCoefficient[0] = blink;
|
||||||
|
currentBlendCoefficient[1] = blink;
|
||||||
|
targetBlendCoefficient[0] = blink;
|
||||||
|
targetBlendCoefficient[1] = blink;
|
||||||
|
}
|
||||||
|
|
||||||
|
// *************************************************************************************
|
||||||
|
|
||||||
|
//Procedural walk animation using two keyframes
|
||||||
|
//We use a separate array for front and back joints
|
||||||
|
//Pitch, yaw, and roll for the joints
|
||||||
|
var rightAngles = [];
|
||||||
|
var leftAngles = [];
|
||||||
|
//for non mirrored joints such as the spine
|
||||||
|
var middleAngles = [];
|
||||||
|
|
||||||
|
//Actual joint mappings
|
||||||
|
var SHOULDER_JOINT_NUMBER = 15;
|
||||||
|
var ELBOW_JOINT_NUMBER = 16;
|
||||||
|
var JOINT_R_HIP = 1;
|
||||||
|
var JOINT_R_KNEE = 2;
|
||||||
|
var JOINT_L_HIP = 6;
|
||||||
|
var JOINT_L_KNEE = 7;
|
||||||
|
var JOINT_R_ARM = 15;
|
||||||
|
var JOINT_R_FOREARM = 16;
|
||||||
|
var JOINT_L_ARM = 39;
|
||||||
|
var JOINT_L_FOREARM = 40;
|
||||||
|
var JOINT_SPINE = 11;
|
||||||
|
var JOINT_R_FOOT = 3;
|
||||||
|
var JOINT_L_FOOT = 8;
|
||||||
|
var JOINT_R_TOE = 4;
|
||||||
|
var JOINT_L_TOE = 9;
|
||||||
|
|
||||||
|
// ******************************* Animation Is Defined Below *************************************
|
||||||
|
|
||||||
|
var NUM_FRAMES = 2;
|
||||||
|
for (var i = 0; i < NUM_FRAMES; i++) {
|
||||||
|
rightAngles[i] = [];
|
||||||
|
leftAngles[i] = [];
|
||||||
|
middleAngles[i] = [];
|
||||||
|
}
|
||||||
|
//Joint order for actual joint mappings, should be interleaved R,L,R,L,...S,S,S for R = right, L = left, S = single
|
||||||
|
var JOINT_ORDER = [];
|
||||||
|
//*** right / left joints ***
|
||||||
|
var HIP = 0;
|
||||||
|
JOINT_ORDER.push(JOINT_R_HIP);
|
||||||
|
JOINT_ORDER.push(JOINT_L_HIP);
|
||||||
|
var KNEE = 1;
|
||||||
|
JOINT_ORDER.push(JOINT_R_KNEE);
|
||||||
|
JOINT_ORDER.push(JOINT_L_KNEE);
|
||||||
|
var ARM = 2;
|
||||||
|
JOINT_ORDER.push(JOINT_R_ARM);
|
||||||
|
JOINT_ORDER.push(JOINT_L_ARM);
|
||||||
|
var FOREARM = 3;
|
||||||
|
JOINT_ORDER.push(JOINT_R_FOREARM);
|
||||||
|
JOINT_ORDER.push(JOINT_L_FOREARM);
|
||||||
|
var FOOT = 4;
|
||||||
|
JOINT_ORDER.push(JOINT_R_FOOT);
|
||||||
|
JOINT_ORDER.push(JOINT_L_FOOT);
|
||||||
|
var TOE = 5;
|
||||||
|
JOINT_ORDER.push(JOINT_R_TOE);
|
||||||
|
JOINT_ORDER.push(JOINT_L_TOE);
|
||||||
|
//*** middle joints ***
|
||||||
|
var SPINE = 0;
|
||||||
|
JOINT_ORDER.push(JOINT_SPINE);
|
||||||
|
|
||||||
|
//We have to store the angles so we can invert yaw and roll when making the animation
|
||||||
|
//symmetrical
|
||||||
|
|
||||||
|
//Front refers to leg, not arm.
|
||||||
|
//Legs Extending
|
||||||
|
rightAngles[0][HIP] = [30.0, 0.0, 8.0];
|
||||||
|
rightAngles[0][KNEE] = [-15.0, 0.0, 0.0];
|
||||||
|
rightAngles[0][ARM] = [85.0, -25.0, 0.0];
|
||||||
|
rightAngles[0][FOREARM] = [0.0, 0.0, -15.0];
|
||||||
|
rightAngles[0][FOOT] = [0.0, 0.0, 0.0];
|
||||||
|
rightAngles[0][TOE] = [0.0, 0.0, 0.0];
|
||||||
|
|
||||||
|
leftAngles[0][HIP] = [-15, 0.0, 8.0];
|
||||||
|
leftAngles[0][KNEE] = [-26, 0.0, 0.0];
|
||||||
|
leftAngles[0][ARM] = [85.0, 20.0, 0.0];
|
||||||
|
leftAngles[0][FOREARM] = [10.0, 0.0, -25.0];
|
||||||
|
leftAngles[0][FOOT] = [-13.0, 0.0, 0.0];
|
||||||
|
leftAngles[0][TOE] = [34.0, 0.0, 0.0];
|
||||||
|
|
||||||
|
middleAngles[0][SPINE] = [0.0, -15.0, 5.0];
|
||||||
|
|
||||||
|
//Legs Passing
|
||||||
|
rightAngles[1][HIP] = [6.0, 0.0, 8.0];
|
||||||
|
rightAngles[1][KNEE] = [-12.0, 0.0, 0.0];
|
||||||
|
rightAngles[1][ARM] = [85.0, 0.0, 0.0];
|
||||||
|
rightAngles[1][FOREARM] = [0.0, 0.0, -15.0];
|
||||||
|
rightAngles[1][FOOT] = [6.0, -8.0, 0.0];
|
||||||
|
rightAngles[1][TOE] = [0.0, 0.0, 0.0];
|
||||||
|
|
||||||
|
leftAngles[1][HIP] = [10.0, 0.0, 8.0];
|
||||||
|
leftAngles[1][KNEE] = [-60.0, 0.0, 0.0];
|
||||||
|
leftAngles[1][ARM] = [85.0, 0.0, 0.0];
|
||||||
|
leftAngles[1][FOREARM] = [0.0, 0.0, -15.0];
|
||||||
|
leftAngles[1][FOOT] = [0.0, 0.0, 0.0];
|
||||||
|
leftAngles[1][TOE] = [0.0, 0.0, 0.0];
|
||||||
|
|
||||||
|
middleAngles[1][SPINE] = [0.0, 0.0, 0.0];
|
||||||
|
|
||||||
|
//Actual keyframes for the animation
|
||||||
|
var walkKeyFrames = procAnimAPI.generateKeyframes(rightAngles, leftAngles, middleAngles, NUM_FRAMES);
|
||||||
|
|
||||||
|
// ******************************* Animation Is Defined Above *************************************
|
||||||
|
|
||||||
|
// ********************************** Standing Key Frame ******************************************
|
||||||
|
//We don't have to do any mirroring or anything, since this is just a single pose.
|
||||||
|
var rightQuats = [];
|
||||||
|
var leftQuats = [];
|
||||||
|
var middleQuats = [];
|
||||||
|
|
||||||
|
rightQuats[HIP] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 7.0);
|
||||||
|
rightQuats[KNEE] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 0.0);
|
||||||
|
rightQuats[ARM] = Quat.fromPitchYawRollDegrees(85.0, 0.0, 0.0);
|
||||||
|
rightQuats[FOREARM] = Quat.fromPitchYawRollDegrees(0.0, 0.0, -10.0);
|
||||||
|
rightQuats[FOOT] = Quat.fromPitchYawRollDegrees(0.0, -8.0, 0.0);
|
||||||
|
rightQuats[TOE] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 0.0);
|
||||||
|
|
||||||
|
leftQuats[HIP] = Quat.fromPitchYawRollDegrees(0, 0.0, -7.0);
|
||||||
|
leftQuats[KNEE] = Quat.fromPitchYawRollDegrees(0, 0.0, 0.0);
|
||||||
|
leftQuats[ARM] = Quat.fromPitchYawRollDegrees(85.0, 0.0, 0.0);
|
||||||
|
leftQuats[FOREARM] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 10.0);
|
||||||
|
leftQuats[FOOT] = Quat.fromPitchYawRollDegrees(0.0, 8.0, 0.0);
|
||||||
|
leftQuats[TOE] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 0.0);
|
||||||
|
|
||||||
|
middleQuats[SPINE] = Quat.fromPitchYawRollDegrees(0.0, 0.0, 0.0);
|
||||||
|
|
||||||
|
var standingKeyFrame = new procAnimAPI.KeyFrame(rightQuats, leftQuats, middleQuats);
|
||||||
|
|
||||||
|
// ************************************************************************************************
|
||||||
|
|
||||||
|
|
||||||
|
var currentFrame = 0;
|
||||||
|
|
||||||
|
var walkTime = 0.0;
|
||||||
|
|
||||||
|
var walkWheelRadius = 0.5;
|
||||||
|
var walkWheelRate = 2.0 * 3.141592 * walkWheelRadius / 8.0;
|
||||||
|
|
||||||
|
var avatarAcceleration = 0.75;
|
||||||
|
var avatarVelocity = 0.0;
|
||||||
|
var avatarMaxVelocity = 1.4;
|
||||||
|
|
||||||
|
function handleAnimation(deltaTime) {
|
||||||
|
|
||||||
|
updateBlinking(deltaTime);
|
||||||
|
updateBlendShapes(deltaTime);
|
||||||
|
|
||||||
|
if (Math.random() < 0.01) {
|
||||||
|
setRandomExpression();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (avatarVelocity == 0.0) {
|
||||||
|
walkTime = 0.0;
|
||||||
|
currentFrame = 0;
|
||||||
|
} else {
|
||||||
|
walkTime += avatarVelocity * deltaTime;
|
||||||
|
if (walkTime > walkWheelRate) {
|
||||||
|
walkTime = 0.0;
|
||||||
|
currentFrame++;
|
||||||
|
if (currentFrame % 2 == 1) {
|
||||||
|
playRandomFootstepSound();
|
||||||
|
}
|
||||||
|
if (currentFrame > 3) {
|
||||||
|
currentFrame = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var frame = walkKeyFrames[currentFrame];
|
||||||
|
|
||||||
|
var walkInterp = walkTime / walkWheelRate;
|
||||||
|
var animInterp = avatarVelocity / (avatarMaxVelocity / 1.3);
|
||||||
|
if (animInterp > 1.0) animInterp = 1.0;
|
||||||
|
|
||||||
|
for (var i = 0; i < JOINT_ORDER.length; i++) {
|
||||||
|
var walkJoint = procAnimAPI.deCasteljau(frame.rotations[i], frame.nextFrame.rotations[i], frame.controlPoints[i][0], frame.controlPoints[i][1], walkInterp);
|
||||||
|
var standJoint = standingKeyFrame.rotations[i];
|
||||||
|
var finalJoint = Quat.mix(standJoint, walkJoint, animInterp);
|
||||||
|
Avatar.setJointData(JOINT_ORDER[i], finalJoint);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function jumpWithLoudness(deltaTime) {
|
||||||
|
// potentially change pelvis height depending on trailing average loudness
|
||||||
|
|
||||||
|
pelvisOscillatorVelocity += deltaTime * Agent.lastReceivedAudioLoudness * 700.0 ;
|
||||||
|
|
||||||
|
pelvisOscillatorVelocity -= pelvisOscillatorPosition * 0.75;
|
||||||
|
pelvisOscillatorVelocity *= 0.97;
|
||||||
|
pelvisOscillatorPosition += deltaTime * pelvisOscillatorVelocity;
|
||||||
|
Avatar.headPitch = pelvisOscillatorPosition * 60.0;
|
||||||
|
|
||||||
|
var pelvisPosition = Avatar.position;
|
||||||
|
pelvisPosition.y = (Y_PELVIS - 0.35) + pelvisOscillatorPosition;
|
||||||
|
|
||||||
|
if (pelvisPosition.y < Y_PELVIS) {
|
||||||
|
pelvisPosition.y = Y_PELVIS;
|
||||||
|
} else if (pelvisPosition.y > Y_PELVIS + 1.0) {
|
||||||
|
pelvisPosition.y = Y_PELVIS + 1.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
Avatar.position = pelvisPosition;
|
||||||
|
}
|
||||||
|
|
||||||
|
var forcedMove = false;
|
||||||
|
|
||||||
|
var wasMovingLastFrame = false;
|
||||||
|
|
||||||
|
function handleHeadTurn() {
|
||||||
|
if (!isTurningHead && (Math.random() < CHANCE_OF_HEAD_TURNING)) {
|
||||||
|
targetHeadPitch = getRandomFloat(-PITCH_RANGE, PITCH_RANGE);
|
||||||
|
targetHeadYaw = getRandomFloat(-YAW_RANGE, YAW_RANGE);
|
||||||
|
isTurningHead = true;
|
||||||
|
} else {
|
||||||
|
Avatar.headPitch = Avatar.headPitch + (targetHeadPitch - Avatar.headPitch) * HEAD_TURN_RATE;
|
||||||
|
Avatar.headYaw = Avatar.headYaw + (targetHeadYaw - Avatar.headYaw) * HEAD_TURN_RATE;
|
||||||
|
if (Math.abs(Avatar.headPitch - targetHeadPitch) < STOP_TOLERANCE &&
|
||||||
|
Math.abs(Avatar.headYaw - targetHeadYaw) < STOP_TOLERANCE) {
|
||||||
|
isTurningHead = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function stopWalking() {
|
||||||
|
avatarVelocity = 0.0;
|
||||||
|
isMoving = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
var MAX_ATTEMPTS = 40;
|
||||||
|
function handleWalking(deltaTime) {
|
||||||
|
|
||||||
|
if (forcedMove || (!isMoving && Math.random() < CHANCE_OF_MOVING)) {
|
||||||
|
// Set new target location
|
||||||
|
|
||||||
|
var moveRange;
|
||||||
|
if (Math.random() < CHANCE_OF_BIG_MOVE) {
|
||||||
|
moveRange = MOVE_RANGE_BIG;
|
||||||
|
} else {
|
||||||
|
moveRange = MOVE_RANGE_SMALL;
|
||||||
|
}
|
||||||
|
|
||||||
|
//Keep trying new orientations if the desired target location is out of bounds
|
||||||
|
var attempts = 0;
|
||||||
|
do {
|
||||||
|
targetOrientation = Quat.multiply(Avatar.orientation, Quat.angleAxis(getRandomFloat(-TURN_RANGE, TURN_RANGE), { x:0, y:1, z:0 }));
|
||||||
|
var front = Quat.getFront(targetOrientation);
|
||||||
|
|
||||||
|
targetPosition = Vec3.sum(Avatar.position, Vec3.multiply(front, getRandomFloat(0.0, moveRange)));
|
||||||
|
}
|
||||||
|
while ((targetPosition.x < X_MIN || targetPosition.x > X_MAX || targetPosition.z < Z_MIN || targetPosition.z > Z_MAX)
|
||||||
|
&& attempts < MAX_ATTEMPTS);
|
||||||
|
|
||||||
|
targetPosition.x = clamp(targetPosition.x, X_MIN, X_MAX);
|
||||||
|
targetPosition.z = clamp(targetPosition.z, Z_MIN, Z_MAX);
|
||||||
|
targetPosition.y = Y_PELVIS;
|
||||||
|
|
||||||
|
wasMovingLastFrame = true;
|
||||||
|
isMoving = true;
|
||||||
|
forcedMove = false;
|
||||||
|
} else if (isMoving) {
|
||||||
|
|
||||||
|
var targetVector = Vec3.subtract(targetPosition, Avatar.position);
|
||||||
|
var distance = Vec3.length(targetVector);
|
||||||
|
if (distance <= avatarVelocity * deltaTime) {
|
||||||
|
Avatar.position = targetPosition;
|
||||||
|
stopWalking();
|
||||||
|
} else {
|
||||||
|
var direction = Vec3.normalize(targetVector);
|
||||||
|
//Figure out if we should be slowing down
|
||||||
|
var t = avatarVelocity / avatarAcceleration;
|
||||||
|
var d = (avatarVelocity / 2.0) * t;
|
||||||
|
if (distance < d) {
|
||||||
|
avatarVelocity -= avatarAcceleration * deltaTime;
|
||||||
|
if (avatarVelocity <= 0) {
|
||||||
|
stopWalking();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
avatarVelocity += avatarAcceleration * deltaTime;
|
||||||
|
if (avatarVelocity > avatarMaxVelocity) avatarVelocity = avatarMaxVelocity;
|
||||||
|
}
|
||||||
|
Avatar.position = Vec3.sum(Avatar.position, Vec3.multiply(direction, avatarVelocity * deltaTime));
|
||||||
|
Avatar.orientation = Quat.mix(Avatar.orientation, targetOrientation, TURN_RATE);
|
||||||
|
|
||||||
|
wasMovingLastFrame = true;
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleTalking() {
|
||||||
|
if (Math.random() < CHANCE_OF_SOUND) {
|
||||||
|
playRandomSound();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function changePelvisHeight(newHeight) {
|
||||||
|
var newPosition = Avatar.position;
|
||||||
|
newPosition.y = newHeight;
|
||||||
|
Avatar.position = newPosition;
|
||||||
|
}
|
||||||
|
|
||||||
|
function updateBehavior(deltaTime) {
|
||||||
|
|
||||||
|
if (AvatarList.containsAvatarWithDisplayName("mrdj")) {
|
||||||
|
if (wasMovingLastFrame) {
|
||||||
|
isMoving = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// we have a DJ, shouldn't we be dancing?
|
||||||
|
jumpWithLoudness(deltaTime);
|
||||||
|
} else {
|
||||||
|
|
||||||
|
// no DJ, let's just chill on the dancefloor - randomly walking and talking
|
||||||
|
handleHeadTurn();
|
||||||
|
handleAnimation(deltaTime);
|
||||||
|
handleWalking(deltaTime);
|
||||||
|
handleTalking();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Script.update.connect(updateBehavior);
|
144
examples/proceduralAnimationAPI.js
Normal file
144
examples/proceduralAnimationAPI.js
Normal file
|
@ -0,0 +1,144 @@
|
||||||
|
//
|
||||||
|
// proceduralAnimation.js
|
||||||
|
// hifi
|
||||||
|
//
|
||||||
|
// Created by Ben Arnold on 7/29/14.
|
||||||
|
// Copyright (c) 2014 HighFidelity, Inc. All rights reserved.
|
||||||
|
//
|
||||||
|
// This is a Procedural Animation API for creating procedural animations in JS.
|
||||||
|
// To include it in your JS files, simply use the following line at the top:
|
||||||
|
// Script.include("proceduralAnimation.js");
|
||||||
|
|
||||||
|
// You can see a usage example in proceduralBot.js
|
||||||
|
// The current implementation is quite simple. If you would like a feature
|
||||||
|
// to be added or expanded, you can contact Ben at brb555@vols.utk.edu
|
||||||
|
|
||||||
|
ProcAnimAPI = function() {
|
||||||
|
|
||||||
|
// generateKeyFrames(rightAngles, leftAngles, middleAngles, numFrames)
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// rightAngles - An array of tuples. The angles in degrees for the joints
|
||||||
|
// on the right side of the body
|
||||||
|
// leftAngles - An array of tuples. The angles in degrees for the joints
|
||||||
|
// on the left side of the body
|
||||||
|
// middleAngles - An array of tuples. The angles in degrees for the joints
|
||||||
|
// on the left side of the body
|
||||||
|
// numFrames - The number of frames in the animation, before mirroring.
|
||||||
|
// for a 4 frame walk animation, simply supply 2 frames
|
||||||
|
// and generateKeyFrames will return 4 frames.
|
||||||
|
//
|
||||||
|
// Return Value:
|
||||||
|
// Returns an array of KeyFrames. Each KeyFrame has an array of quaternions
|
||||||
|
// for each of the joints, generated from the input angles. They will be ordered
|
||||||
|
// R,L,R,L,...M,M,M,M where R ~ rightAngles, L ~ leftAngles, M ~ middlesAngles.
|
||||||
|
// The size of the returned array will be numFrames * 2
|
||||||
|
this.generateKeyframes = function(rightAngles, leftAngles, middleAngles, numFrames) {
|
||||||
|
|
||||||
|
if (rightAngles.length != leftAngles.length) {
|
||||||
|
print("ERROR: generateKeyFrames(...) rightAngles and leftAngles must have equal length.");
|
||||||
|
}
|
||||||
|
|
||||||
|
//for mirrored joints, such as the arms or legs
|
||||||
|
var rightQuats = [];
|
||||||
|
var leftQuats = [];
|
||||||
|
//for non mirrored joints such as the spine
|
||||||
|
var middleQuats = [];
|
||||||
|
|
||||||
|
for (var i = 0; i < numFrames; i++) {
|
||||||
|
rightQuats[i] = [];
|
||||||
|
leftQuats[i] = [];
|
||||||
|
middleQuats[i] = [];
|
||||||
|
}
|
||||||
|
|
||||||
|
var finalKeyFrames = [];
|
||||||
|
//Generate quaternions
|
||||||
|
for (var i = 0; i < rightAngles.length; i++) {
|
||||||
|
for (var j = 0; j < rightAngles[i].length; j++) {
|
||||||
|
rightQuats[i][j] = Quat.fromPitchYawRollDegrees(rightAngles[i][j][0], rightAngles[i][j][1], rightAngles[i][j][2]);
|
||||||
|
leftQuats[i][j] = Quat.fromPitchYawRollDegrees(leftAngles[i][j][0], -leftAngles[i][j][1], -leftAngles[i][j][2]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (var i = 0; i < middleAngles.length; i++) {
|
||||||
|
for (var j = 0; j < middleAngles[i].length; j++) {
|
||||||
|
middleQuats[i][j] = Quat.fromPitchYawRollDegrees(middleAngles[i][j][0], middleAngles[i][j][1], middleAngles[i][j][2]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (var i = 0; i < numFrames; i++) {
|
||||||
|
finalKeyFrames[i] = new this.KeyFrame(rightQuats[i], leftQuats[i], middleQuats[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
//Generate mirrored quaternions for the other half of the animation
|
||||||
|
for (var i = 0; i < rightAngles.length; i++) {
|
||||||
|
for (var j = 0; j < rightAngles[i].length; j++) {
|
||||||
|
rightQuats[i][j] = Quat.fromPitchYawRollDegrees(rightAngles[i][j][0], -rightAngles[i][j][1], -rightAngles[i][j][2]);
|
||||||
|
leftQuats[i][j] = Quat.fromPitchYawRollDegrees(leftAngles[i][j][0], leftAngles[i][j][1], leftAngles[i][j][2]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (var i = 0; i < middleAngles.length; i++) {
|
||||||
|
for (var j = 0; j < middleAngles[i].length; j++) {
|
||||||
|
middleQuats[i][j] = Quat.fromPitchYawRollDegrees(-middleAngles[i][j][0], -middleAngles[i][j][1], -middleAngles[i][j][2]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (var i = 0; i < numFrames; i++) {
|
||||||
|
finalKeyFrames[numFrames + i] = new this.KeyFrame(leftQuats[i], rightQuats[i], middleQuats[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
//Generate control points
|
||||||
|
this.computeBezierControlPoints(finalKeyFrames);
|
||||||
|
|
||||||
|
return finalKeyFrames;
|
||||||
|
};
|
||||||
|
|
||||||
|
//Computes 2 controlPoints to each keyframe to be used in the bezier evaluation.
|
||||||
|
//Technique is described at: //https://www.cs.tcd.ie/publications/tech-reports/reports.94/TCD-CS-94-18.pdf
|
||||||
|
this.computeBezierControlPoints = function(keyFrames) {
|
||||||
|
//Hook up pointers to the next keyframe
|
||||||
|
for (var i = 0; i < keyFrames.length - 1; i++) {
|
||||||
|
keyFrames[i].nextFrame = keyFrames[i+1];
|
||||||
|
}
|
||||||
|
keyFrames[keyFrames.length-1].nextFrame = keyFrames[0];
|
||||||
|
|
||||||
|
//Set up all C1
|
||||||
|
for (var i = 0; i < keyFrames.length; i++) {
|
||||||
|
keyFrames[i].nextFrame.controlPoints = [];
|
||||||
|
for (var j = 0; j < keyFrames[i].rotations.length; j++) {
|
||||||
|
keyFrames[i].nextFrame.controlPoints[j] = [];
|
||||||
|
var R = Quat.slerp(keyFrames[i].rotations[j], keyFrames[i].nextFrame.rotations[j], 2.0);
|
||||||
|
var T = Quat.slerp(R, keyFrames[i].nextFrame.nextFrame.rotations[j], 0.5);
|
||||||
|
keyFrames[i].nextFrame.controlPoints[j][0] = Quat.slerp(keyFrames[i].nextFrame.rotations[j], T, 0.33333);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//Set up all C2
|
||||||
|
for (var i = 0; i < keyFrames.length; i++) {
|
||||||
|
for (var j = 0; j < keyFrames[i].rotations.length; j++) {
|
||||||
|
keyFrames[i].controlPoints[j][1] = Quat.slerp(keyFrames[i].nextFrame.rotations[j], keyFrames[i].nextFrame.controlPoints[j][0], -1.0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Animation KeyFrame constructor. rightJoints and leftJoints must be the same size
|
||||||
|
this.KeyFrame = function(rightJoints, leftJoints, middleJoints) {
|
||||||
|
this.rotations = [];
|
||||||
|
|
||||||
|
for (var i = 0; i < rightJoints.length; i++) {
|
||||||
|
this.rotations[this.rotations.length] = rightJoints[i];
|
||||||
|
this.rotations[this.rotations.length] = leftJoints[i];
|
||||||
|
}
|
||||||
|
for (var i = 0; i < middleJoints.length; i++) {
|
||||||
|
this.rotations[this.rotations.length] = middleJoints[i];
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// DeCasteljau evaluation to evaluate the bezier curve.
|
||||||
|
// This is a very natural looking interpolation
|
||||||
|
this.deCasteljau = function(k1, k2, c1, c2, f) {
|
||||||
|
var a = Quat.slerp(k1, c1, f);
|
||||||
|
var b = Quat.slerp(c1, c2, f);
|
||||||
|
var c = Quat.slerp(c2, k2, f);
|
||||||
|
var d = Quat.slerp(a, b, f);
|
||||||
|
var e = Quat.slerp(b, c, f);
|
||||||
|
return Quat.slerp(d, e, f);
|
||||||
|
};
|
||||||
|
}
|
|
@ -104,10 +104,6 @@ const int IDLE_SIMULATE_MSECS = 16; // How often should call simul
|
||||||
// in the idle loop? (60 FPS is default)
|
// in the idle loop? (60 FPS is default)
|
||||||
static QTimer* idleTimer = NULL;
|
static QTimer* idleTimer = NULL;
|
||||||
|
|
||||||
const int STARTUP_JITTER_SAMPLES = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / 2;
|
|
||||||
// Startup optimistically with small jitter buffer that
|
|
||||||
// will start playback on the second received audio packet.
|
|
||||||
|
|
||||||
const QString CHECK_VERSION_URL = "https://highfidelity.io/latestVersion.xml";
|
const QString CHECK_VERSION_URL = "https://highfidelity.io/latestVersion.xml";
|
||||||
const QString SKIP_FILENAME = QStandardPaths::writableLocation(QStandardPaths::DataLocation) + "/hifi.skipversion";
|
const QString SKIP_FILENAME = QStandardPaths::writableLocation(QStandardPaths::DataLocation) + "/hifi.skipversion";
|
||||||
|
|
||||||
|
@ -163,7 +159,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
|
||||||
_touchAvgY(0.0f),
|
_touchAvgY(0.0f),
|
||||||
_isTouchPressed(false),
|
_isTouchPressed(false),
|
||||||
_mousePressed(false),
|
_mousePressed(false),
|
||||||
_audio(STARTUP_JITTER_SAMPLES),
|
_audio(),
|
||||||
_enableProcessVoxelsThread(true),
|
_enableProcessVoxelsThread(true),
|
||||||
_octreeProcessor(),
|
_octreeProcessor(),
|
||||||
_voxelHideShowThread(&_voxels),
|
_voxelHideShowThread(&_voxels),
|
||||||
|
@ -1723,9 +1719,15 @@ void Application::init() {
|
||||||
_lastTimeUpdated.start();
|
_lastTimeUpdated.start();
|
||||||
|
|
||||||
Menu::getInstance()->loadSettings();
|
Menu::getInstance()->loadSettings();
|
||||||
if (Menu::getInstance()->getAudioJitterBufferSamples() != 0) {
|
if (Menu::getInstance()->getAudioJitterBufferFrames() != 0) {
|
||||||
_audio.setJitterBufferSamples(Menu::getInstance()->getAudioJitterBufferSamples());
|
_audio.setDynamicJitterBuffers(false);
|
||||||
|
_audio.setStaticDesiredJitterBufferFrames(Menu::getInstance()->getAudioJitterBufferFrames());
|
||||||
|
} else {
|
||||||
|
_audio.setDynamicJitterBuffers(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_audio.setMaxFramesOverDesired(Menu::getInstance()->getMaxFramesOverDesired());
|
||||||
|
|
||||||
qDebug("Loaded settings");
|
qDebug("Loaded settings");
|
||||||
|
|
||||||
// initialize our face trackers after loading the menu settings
|
// initialize our face trackers after loading the menu settings
|
||||||
|
@ -2124,21 +2126,6 @@ void Application::update(float deltaTime) {
|
||||||
// let external parties know we're updating
|
// let external parties know we're updating
|
||||||
emit simulating(deltaTime);
|
emit simulating(deltaTime);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void Application::updateMyAvatar(float deltaTime) {
|
|
||||||
bool showWarnings = Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings);
|
|
||||||
PerformanceWarning warn(showWarnings, "Application::updateMyAvatar()");
|
|
||||||
|
|
||||||
_myAvatar->update(deltaTime);
|
|
||||||
|
|
||||||
{
|
|
||||||
// send head/hand data to the avatar mixer and voxel server
|
|
||||||
PerformanceTimer perfTimer("send");
|
|
||||||
QByteArray packet = byteArrayWithPopulatedHeader(PacketTypeAvatarData);
|
|
||||||
packet.append(_myAvatar->toByteArray());
|
|
||||||
controlledBroadcastToNodes(packet, NodeSet() << NodeType::AvatarMixer);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update _viewFrustum with latest camera and view frustum data...
|
// Update _viewFrustum with latest camera and view frustum data...
|
||||||
// NOTE: we get this from the view frustum, to make it simpler, since the
|
// NOTE: we get this from the view frustum, to make it simpler, since the
|
||||||
|
@ -2181,16 +2168,32 @@ void Application::updateMyAvatar(float deltaTime) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// send packet containing downstream audio stats to the AudioMixer
|
||||||
{
|
{
|
||||||
quint64 sinceLastNack = now - _lastSendDownstreamAudioStats;
|
quint64 sinceLastNack = now - _lastSendDownstreamAudioStats;
|
||||||
if (sinceLastNack > TOO_LONG_SINCE_LAST_SEND_DOWNSTREAM_AUDIO_STATS) {
|
if (sinceLastNack > TOO_LONG_SINCE_LAST_SEND_DOWNSTREAM_AUDIO_STATS) {
|
||||||
_lastSendDownstreamAudioStats = now;
|
_lastSendDownstreamAudioStats = now;
|
||||||
|
|
||||||
QMetaObject::invokeMethod(&_audio, "sendDownstreamAudioStatsPacket", Qt::QueuedConnection);
|
QMetaObject::invokeMethod(&_audio, "sendDownstreamAudioStatsPacket", Qt::QueuedConnection);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Application::updateMyAvatar(float deltaTime) {
|
||||||
|
bool showWarnings = Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings);
|
||||||
|
PerformanceWarning warn(showWarnings, "Application::updateMyAvatar()");
|
||||||
|
|
||||||
|
_myAvatar->update(deltaTime);
|
||||||
|
|
||||||
|
{
|
||||||
|
// send head/hand data to the avatar mixer and voxel server
|
||||||
|
PerformanceTimer perfTimer("send");
|
||||||
|
QByteArray packet = byteArrayWithPopulatedHeader(PacketTypeAvatarData);
|
||||||
|
packet.append(_myAvatar->toByteArray());
|
||||||
|
controlledBroadcastToNodes(packet, NodeSet() << NodeType::AvatarMixer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int Application::sendNackPackets() {
|
int Application::sendNackPackets() {
|
||||||
|
|
||||||
if (Menu::getInstance()->isOptionChecked(MenuOption::DisableNackPackets)) {
|
if (Menu::getInstance()->isOptionChecked(MenuOption::DisableNackPackets)) {
|
||||||
|
|
|
@ -39,33 +39,22 @@
|
||||||
#include <UUID.h>
|
#include <UUID.h>
|
||||||
#include <glm/glm.hpp>
|
#include <glm/glm.hpp>
|
||||||
|
|
||||||
#include "Application.h"
|
|
||||||
#include "Audio.h"
|
#include "Audio.h"
|
||||||
#include "Menu.h"
|
#include "Menu.h"
|
||||||
#include "Util.h"
|
#include "Util.h"
|
||||||
#include "AudioRingBuffer.h"
|
#include "PositionalAudioStream.h"
|
||||||
|
|
||||||
static const float AUDIO_CALLBACK_MSECS = (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float)SAMPLE_RATE * 1000.0;
|
static const float AUDIO_CALLBACK_MSECS = (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float)SAMPLE_RATE * 1000.0;
|
||||||
|
|
||||||
static const int NUMBER_OF_NOISE_SAMPLE_FRAMES = 300;
|
static const int NUMBER_OF_NOISE_SAMPLE_FRAMES = 300;
|
||||||
|
|
||||||
// audio frames time gap stats (min/max/avg) for last ~30 seconds are recalculated every ~1 second
|
|
||||||
static const int TIME_GAPS_STATS_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS;
|
|
||||||
static const int TIME_GAP_STATS_WINDOW_INTERVALS = 30;
|
|
||||||
|
|
||||||
// incoming sequence number stats history will cover last 30s
|
|
||||||
static const int INCOMING_SEQ_STATS_HISTORY_LENGTH = INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS /
|
|
||||||
(TOO_LONG_SINCE_LAST_SEND_DOWNSTREAM_AUDIO_STATS / USECS_PER_SECOND);
|
|
||||||
|
|
||||||
// the stats for the total frames available in the ring buffer and the audio output buffer
|
|
||||||
// will sample every second, update every second, and have a moving window covering 10 seconds
|
|
||||||
static const int FRAMES_AVAILABLE_STATS_WINDOW_SECONDS = 10;
|
static const int FRAMES_AVAILABLE_STATS_WINDOW_SECONDS = 10;
|
||||||
|
|
||||||
// Mute icon configration
|
// Mute icon configration
|
||||||
static const int MUTE_ICON_SIZE = 24;
|
static const int MUTE_ICON_SIZE = 24;
|
||||||
|
|
||||||
|
|
||||||
Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
|
Audio::Audio(QObject* parent) :
|
||||||
AbstractAudioInterface(parent),
|
AbstractAudioInterface(parent),
|
||||||
_audioInput(NULL),
|
_audioInput(NULL),
|
||||||
_desiredInputFormat(),
|
_desiredInputFormat(),
|
||||||
|
@ -86,15 +75,9 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
|
||||||
// slower than real time (or at least the desired sample rate). If you increase the size of the ring buffer, then it
|
// slower than real time (or at least the desired sample rate). If you increase the size of the ring buffer, then it
|
||||||
// this delay will slowly add up and the longer someone runs, they more delayed their audio will be.
|
// this delay will slowly add up and the longer someone runs, they more delayed their audio will be.
|
||||||
_inputRingBuffer(0),
|
_inputRingBuffer(0),
|
||||||
#ifdef _WIN32
|
_receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 100, true, 0, 0, true),
|
||||||
_ringBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, false, 100),
|
|
||||||
#else
|
|
||||||
_ringBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO), // DO NOT CHANGE THIS UNLESS YOU SOLVE THE AUDIO DEVICE DRIFT PROBLEM!!!
|
|
||||||
#endif
|
|
||||||
_isStereoInput(false),
|
_isStereoInput(false),
|
||||||
_averagedLatency(0.0),
|
_averagedLatency(0.0),
|
||||||
_measuredJitter(0),
|
|
||||||
_jitterBufferSamples(initialJitterBufferSamples),
|
|
||||||
_lastInputLoudness(0),
|
_lastInputLoudness(0),
|
||||||
_timeSinceLastClip(-1.0),
|
_timeSinceLastClip(-1.0),
|
||||||
_dcOffset(0),
|
_dcOffset(0),
|
||||||
|
@ -104,14 +87,12 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
|
||||||
_noiseGateEnabled(true),
|
_noiseGateEnabled(true),
|
||||||
_toneInjectionEnabled(false),
|
_toneInjectionEnabled(false),
|
||||||
_noiseGateFramesToClose(0),
|
_noiseGateFramesToClose(0),
|
||||||
_totalPacketsReceived(0),
|
|
||||||
_totalInputAudioSamples(0),
|
_totalInputAudioSamples(0),
|
||||||
_collisionSoundMagnitude(0.0f),
|
_collisionSoundMagnitude(0.0f),
|
||||||
_collisionSoundFrequency(0.0f),
|
_collisionSoundFrequency(0.0f),
|
||||||
_collisionSoundNoise(0.0f),
|
_collisionSoundNoise(0.0f),
|
||||||
_collisionSoundDuration(0.0f),
|
_collisionSoundDuration(0.0f),
|
||||||
_proceduralEffectSample(0),
|
_proceduralEffectSample(0),
|
||||||
_numFramesDisplayStarve(0),
|
|
||||||
_muted(false),
|
_muted(false),
|
||||||
_processSpatialAudio(false),
|
_processSpatialAudio(false),
|
||||||
_spatialAudioStart(0),
|
_spatialAudioStart(0),
|
||||||
|
@ -127,14 +108,10 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
|
||||||
_scopeOutputLeft(0),
|
_scopeOutputLeft(0),
|
||||||
_scopeOutputRight(0),
|
_scopeOutputRight(0),
|
||||||
_statsEnabled(false),
|
_statsEnabled(false),
|
||||||
_starveCount(0),
|
_statsShowInjectedStreams(false),
|
||||||
_consecutiveNotMixedCount(0),
|
|
||||||
_outgoingAvatarAudioSequenceNumber(0),
|
_outgoingAvatarAudioSequenceNumber(0),
|
||||||
_incomingMixedAudioSequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH),
|
|
||||||
_interframeTimeGapStats(TIME_GAPS_STATS_INTERVAL_SAMPLES, TIME_GAP_STATS_WINDOW_INTERVALS),
|
|
||||||
_audioInputMsecsReadStats(MSECS_PER_SECOND / (float)AUDIO_CALLBACK_MSECS * CALLBACK_ACCELERATOR_RATIO, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
_audioInputMsecsReadStats(MSECS_PER_SECOND / (float)AUDIO_CALLBACK_MSECS * CALLBACK_ACCELERATOR_RATIO, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
||||||
_inputRingBufferMsecsAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
_inputRingBufferMsecsAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
||||||
_outputRingBufferFramesAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
|
||||||
_audioOutputMsecsUnplayedStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS)
|
_audioOutputMsecsUnplayedStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS)
|
||||||
{
|
{
|
||||||
// clear the array of locally injected samples
|
// clear the array of locally injected samples
|
||||||
|
@ -150,29 +127,20 @@ void Audio::init(QGLWidget *parent) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::reset() {
|
void Audio::reset() {
|
||||||
_ringBuffer.reset();
|
_receivedAudioStream.reset();
|
||||||
|
|
||||||
// we don't want to reset seq numbers when space-bar reset occurs.
|
|
||||||
//_outgoingAvatarAudioSequenceNumber = 0;
|
|
||||||
|
|
||||||
resetStats();
|
resetStats();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::resetStats() {
|
void Audio::resetStats() {
|
||||||
_starveCount = 0;
|
_receivedAudioStream.resetStats();
|
||||||
_consecutiveNotMixedCount = 0;
|
|
||||||
|
|
||||||
_audioMixerAvatarStreamAudioStats = AudioStreamStats();
|
_audioMixerAvatarStreamAudioStats = AudioStreamStats();
|
||||||
_audioMixerInjectedStreamAudioStatsMap.clear();
|
_audioMixerInjectedStreamAudioStatsMap.clear();
|
||||||
|
|
||||||
_incomingMixedAudioSequenceNumberStats.reset();
|
|
||||||
|
|
||||||
_interframeTimeGapStats.reset();
|
|
||||||
|
|
||||||
_audioInputMsecsReadStats.reset();
|
_audioInputMsecsReadStats.reset();
|
||||||
_inputRingBufferMsecsAvailableStats.reset();
|
_inputRingBufferMsecsAvailableStats.reset();
|
||||||
|
|
||||||
_outputRingBufferFramesAvailableStats.reset();
|
|
||||||
_audioOutputMsecsUnplayedStats.reset();
|
_audioOutputMsecsUnplayedStats.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -740,36 +708,13 @@ void Audio::handleAudioInput() {
|
||||||
}
|
}
|
||||||
delete[] inputAudioSamples;
|
delete[] inputAudioSamples;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (_receivedAudioStream.getPacketReceived() > 0) {
|
||||||
|
pushAudioToOutput();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
|
void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) {
|
||||||
const int NUM_INITIAL_PACKETS_DISCARD = 3;
|
|
||||||
const int STANDARD_DEVIATION_SAMPLE_COUNT = 500;
|
|
||||||
|
|
||||||
_totalPacketsReceived++;
|
|
||||||
|
|
||||||
double timeDiff = (double)_timeSinceLastReceived.nsecsElapsed() / 1000.0; // ns to us
|
|
||||||
_interframeTimeGapStats.update((quint64)timeDiff);
|
|
||||||
timeDiff /= USECS_PER_MSEC; // us to ms
|
|
||||||
_timeSinceLastReceived.start();
|
|
||||||
|
|
||||||
// Discard first few received packets for computing jitter (often they pile up on start)
|
|
||||||
if (_totalPacketsReceived > NUM_INITIAL_PACKETS_DISCARD) {
|
|
||||||
_stdev.addValue(timeDiff);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (_stdev.getSamples() > STANDARD_DEVIATION_SAMPLE_COUNT) {
|
|
||||||
_measuredJitter = _stdev.getStDev();
|
|
||||||
_stdev.reset();
|
|
||||||
// Set jitter buffer to be a multiple of the measured standard deviation
|
|
||||||
const int MAX_JITTER_BUFFER_SAMPLES = _ringBuffer.getSampleCapacity() / 2;
|
|
||||||
const float NUM_STANDARD_DEVIATIONS = 3.0f;
|
|
||||||
if (Menu::getInstance()->getAudioJitterBufferSamples() == 0) {
|
|
||||||
float newJitterBufferSamples = (NUM_STANDARD_DEVIATIONS * _measuredJitter) / 1000.0f * SAMPLE_RATE;
|
|
||||||
setJitterBufferSamples(glm::clamp((int)newJitterBufferSamples, 0, MAX_JITTER_BUFFER_SAMPLES));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (_audioOutput) {
|
if (_audioOutput) {
|
||||||
// Audio output must exist and be correctly set up if we're going to process received audio
|
// Audio output must exist and be correctly set up if we're going to process received audio
|
||||||
processReceivedAudio(audioByteArray);
|
processReceivedAudio(audioByteArray);
|
||||||
|
@ -800,7 +745,7 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
|
||||||
memcpy(&streamStats, dataAt, sizeof(AudioStreamStats));
|
memcpy(&streamStats, dataAt, sizeof(AudioStreamStats));
|
||||||
dataAt += sizeof(AudioStreamStats);
|
dataAt += sizeof(AudioStreamStats);
|
||||||
|
|
||||||
if (streamStats._streamType == PositionalAudioRingBuffer::Microphone) {
|
if (streamStats._streamType == PositionalAudioStream::Microphone) {
|
||||||
_audioMixerAvatarStreamAudioStats = streamStats;
|
_audioMixerAvatarStreamAudioStats = streamStats;
|
||||||
} else {
|
} else {
|
||||||
_audioMixerInjectedStreamAudioStatsMap[streamStats._streamIdentifier] = streamStats;
|
_audioMixerInjectedStreamAudioStatsMap[streamStats._streamIdentifier] = streamStats;
|
||||||
|
@ -808,45 +753,14 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioStreamStats Audio::getDownstreamAudioStreamStats() const {
|
|
||||||
|
|
||||||
AudioStreamStats stats;
|
|
||||||
stats._streamType = PositionalAudioRingBuffer::Microphone;
|
|
||||||
|
|
||||||
stats._timeGapMin = _interframeTimeGapStats.getMin();
|
|
||||||
stats._timeGapMax = _interframeTimeGapStats.getMax();
|
|
||||||
stats._timeGapAverage = _interframeTimeGapStats.getAverage();
|
|
||||||
stats._timeGapWindowMin = _interframeTimeGapStats.getWindowMin();
|
|
||||||
stats._timeGapWindowMax = _interframeTimeGapStats.getWindowMax();
|
|
||||||
stats._timeGapWindowAverage = _interframeTimeGapStats.getWindowAverage();
|
|
||||||
|
|
||||||
stats._ringBufferFramesAvailable = _ringBuffer.framesAvailable();
|
|
||||||
stats._ringBufferFramesAvailableAverage = _outputRingBufferFramesAvailableStats.getWindowAverage();
|
|
||||||
stats._ringBufferDesiredJitterBufferFrames = getDesiredJitterBufferFrames();
|
|
||||||
stats._ringBufferStarveCount = _starveCount;
|
|
||||||
stats._ringBufferConsecutiveNotMixedCount = _consecutiveNotMixedCount;
|
|
||||||
stats._ringBufferOverflowCount = _ringBuffer.getOverflowCount();
|
|
||||||
stats._ringBufferSilentFramesDropped = 0;
|
|
||||||
|
|
||||||
stats._packetStreamStats = _incomingMixedAudioSequenceNumberStats.getStats();
|
|
||||||
stats._packetStreamWindowStats = _incomingMixedAudioSequenceNumberStats.getStatsForHistoryWindow();
|
|
||||||
|
|
||||||
return stats;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Audio::sendDownstreamAudioStatsPacket() {
|
void Audio::sendDownstreamAudioStatsPacket() {
|
||||||
|
|
||||||
// since this function is called every second, we'll sample some of our stats here
|
// since this function is called every second, we'll sample some of our stats here
|
||||||
|
|
||||||
_inputRingBufferMsecsAvailableStats.update(getInputRingBufferMsecsAvailable());
|
_inputRingBufferMsecsAvailableStats.update(getInputRingBufferMsecsAvailable());
|
||||||
|
|
||||||
_outputRingBufferFramesAvailableStats.update(_ringBuffer.framesAvailable());
|
|
||||||
_audioOutputMsecsUnplayedStats.update(getAudioOutputMsecsUnplayed());
|
_audioOutputMsecsUnplayedStats.update(getAudioOutputMsecsUnplayed());
|
||||||
|
|
||||||
// push the current seq number stats into history, which moves the history window forward 1s
|
|
||||||
// (since that's how often pushStatsToHistory() is called)
|
|
||||||
_incomingMixedAudioSequenceNumberStats.pushStatsToHistory();
|
|
||||||
|
|
||||||
char packet[MAX_PACKET_SIZE];
|
char packet[MAX_PACKET_SIZE];
|
||||||
|
|
||||||
// pack header
|
// pack header
|
||||||
|
@ -864,7 +778,7 @@ void Audio::sendDownstreamAudioStatsPacket() {
|
||||||
dataAt += sizeof(quint16);
|
dataAt += sizeof(quint16);
|
||||||
|
|
||||||
// pack downstream audio stream stats
|
// pack downstream audio stream stats
|
||||||
AudioStreamStats stats = getDownstreamAudioStreamStats();
|
AudioStreamStats stats = _receivedAudioStream.updateSeqHistoryAndGetAudioStreamStats();
|
||||||
memcpy(dataAt, &stats, sizeof(AudioStreamStats));
|
memcpy(dataAt, &stats, sizeof(AudioStreamStats));
|
||||||
dataAt += sizeof(AudioStreamStats);
|
dataAt += sizeof(AudioStreamStats);
|
||||||
|
|
||||||
|
@ -974,125 +888,114 @@ void Audio::toggleStereoInput() {
|
||||||
|
|
||||||
void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
||||||
|
|
||||||
QUuid senderUUID = uuidFromPacketHeader(audioByteArray);
|
|
||||||
|
|
||||||
// parse sequence number for this packet
|
|
||||||
int numBytesPacketHeader = numBytesForPacketHeader(audioByteArray);
|
|
||||||
const char* sequenceAt = audioByteArray.constData() + numBytesPacketHeader;
|
|
||||||
quint16 sequence = *((quint16*)sequenceAt);
|
|
||||||
_incomingMixedAudioSequenceNumberStats.sequenceNumberReceived(sequence, senderUUID);
|
|
||||||
|
|
||||||
// parse audio data
|
// parse audio data
|
||||||
_ringBuffer.parseData(audioByteArray);
|
_receivedAudioStream.parseData(audioByteArray);
|
||||||
|
|
||||||
float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float) _outputFormat.sampleRate())
|
|
||||||
* (_desiredOutputFormat.channelCount() / (float) _outputFormat.channelCount());
|
|
||||||
|
|
||||||
if (!_ringBuffer.isStarved() && _audioOutput && _audioOutput->bytesFree() == _audioOutput->bufferSize()) {
|
|
||||||
// we don't have any audio data left in the output buffer
|
|
||||||
// we just starved
|
|
||||||
//qDebug() << "Audio output just starved.";
|
|
||||||
_ringBuffer.setIsStarved(true);
|
|
||||||
_numFramesDisplayStarve = 10;
|
|
||||||
|
|
||||||
_starveCount++;
|
|
||||||
_consecutiveNotMixedCount = 0;
|
// This call has been moved to handleAudioInput. handleAudioInput is called at a much more regular interval
|
||||||
|
// than processReceivedAudio since handleAudioInput does not experience network-related jitter.
|
||||||
|
// This way, we reduce the jitter of the frames being pushed to the audio output, allowing us to use a reduced
|
||||||
|
// buffer size for it, which reduces latency.
|
||||||
|
|
||||||
|
//pushAudioToOutput();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Audio::pushAudioToOutput() {
|
||||||
|
|
||||||
|
if (_audioOutput->bytesFree() == _audioOutput->bufferSize()) {
|
||||||
|
// the audio output has no samples to play. set the downstream audio to starved so that it
|
||||||
|
// refills to its desired size before pushing frames
|
||||||
|
_receivedAudioStream.setToStarved();
|
||||||
}
|
}
|
||||||
|
|
||||||
int numNetworkOutputSamples;
|
float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float)_outputFormat.sampleRate())
|
||||||
|
* (_desiredOutputFormat.channelCount() / (float)_outputFormat.channelCount());
|
||||||
|
|
||||||
|
int numFramesToPush;
|
||||||
if (Menu::getInstance()->isOptionChecked(MenuOption::DisableQAudioOutputOverflowCheck)) {
|
if (Menu::getInstance()->isOptionChecked(MenuOption::DisableQAudioOutputOverflowCheck)) {
|
||||||
numNetworkOutputSamples = _ringBuffer.samplesAvailable();
|
numFramesToPush = _receivedAudioStream.getFramesAvailable();
|
||||||
} else {
|
} else {
|
||||||
// make sure to push a whole number of frames to the audio output
|
// make sure to push a whole number of frames to the audio output
|
||||||
int numFramesAudioOutputRoomFor = _audioOutput->bytesFree() / sizeof(int16_t) * networkOutputToOutputRatio / _ringBuffer.getNumFrameSamples();
|
int numFramesAudioOutputRoomFor = (int)(_audioOutput->bytesFree() / sizeof(int16_t) * networkOutputToOutputRatio) / _receivedAudioStream.getNumFrameSamples();
|
||||||
numNetworkOutputSamples = std::min(_ringBuffer.samplesAvailable(), numFramesAudioOutputRoomFor * _ringBuffer.getNumFrameSamples());
|
numFramesToPush = std::min(_receivedAudioStream.getFramesAvailable(), numFramesAudioOutputRoomFor);
|
||||||
}
|
}
|
||||||
|
|
||||||
// if there is data in the ring buffer and room in the audio output, decide what to do
|
// if there is data in the received stream and room in the audio output, decide what to do
|
||||||
if (numNetworkOutputSamples > 0) {
|
|
||||||
|
if (numFramesToPush > 0 && _receivedAudioStream.popFrames(numFramesToPush, false)) {
|
||||||
int numSamplesNeededToStartPlayback = std::min(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (_jitterBufferSamples * 2),
|
|
||||||
_ringBuffer.getSampleCapacity());
|
int numNetworkOutputSamples = numFramesToPush * NETWORK_BUFFER_LENGTH_SAMPLES_STEREO;
|
||||||
|
int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio;
|
||||||
if (!_ringBuffer.isNotStarvedOrHasMinimumSamples(numSamplesNeededToStartPlayback)) {
|
|
||||||
// We are still waiting for enough samples to begin playback
|
QByteArray outputBuffer;
|
||||||
// qDebug() << numNetworkOutputSamples << " samples so far, waiting for " << numSamplesNeededToStartPlayback;
|
outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t));
|
||||||
_consecutiveNotMixedCount++;
|
|
||||||
|
AudioRingBuffer::ConstIterator receivedAudioStreamPopOutput = _receivedAudioStream.getLastPopOutput();
|
||||||
|
|
||||||
|
int16_t* receivedSamples = new int16_t[numNetworkOutputSamples];
|
||||||
|
if (_processSpatialAudio) {
|
||||||
|
unsigned int sampleTime = _spatialAudioStart;
|
||||||
|
QByteArray buffer;
|
||||||
|
buffer.resize(numNetworkOutputSamples * sizeof(int16_t));
|
||||||
|
|
||||||
|
receivedAudioStreamPopOutput.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples);
|
||||||
|
|
||||||
|
// Accumulate direct transmission of audio from sender to receiver
|
||||||
|
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) {
|
||||||
|
emit preProcessOriginalInboundAudio(sampleTime, buffer, _desiredOutputFormat);
|
||||||
|
addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send audio off for spatial processing
|
||||||
|
emit processInboundAudio(sampleTime, buffer, _desiredOutputFormat);
|
||||||
|
|
||||||
|
// copy the samples we'll resample from the spatial audio ring buffer - this also
|
||||||
|
// pushes the read pointer of the spatial audio ring buffer forwards
|
||||||
|
_spatialAudioRingBuffer.readSamples(receivedSamples, numNetworkOutputSamples);
|
||||||
|
|
||||||
|
// Advance the start point for the next packet of audio to arrive
|
||||||
|
_spatialAudioStart += numNetworkOutputSamples / _desiredOutputFormat.channelCount();
|
||||||
} else {
|
} else {
|
||||||
int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio;
|
// copy the samples we'll resample from the ring buffer - this also
|
||||||
|
// pushes the read pointer of the ring buffer forwards
|
||||||
QByteArray outputBuffer;
|
receivedAudioStreamPopOutput.readSamples(receivedSamples, numNetworkOutputSamples);
|
||||||
outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t));
|
|
||||||
|
|
||||||
// We are either already playing back, or we have enough audio to start playing back.
|
|
||||||
//qDebug() << "pushing " << numNetworkOutputSamples;
|
|
||||||
_ringBuffer.setIsStarved(false);
|
|
||||||
|
|
||||||
int16_t* ringBufferSamples = new int16_t[numNetworkOutputSamples];
|
|
||||||
if (_processSpatialAudio) {
|
|
||||||
unsigned int sampleTime = _spatialAudioStart;
|
|
||||||
QByteArray buffer;
|
|
||||||
buffer.resize(numNetworkOutputSamples * sizeof(int16_t));
|
|
||||||
|
|
||||||
_ringBuffer.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples);
|
|
||||||
|
|
||||||
// Accumulate direct transmission of audio from sender to receiver
|
|
||||||
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) {
|
|
||||||
emit preProcessOriginalInboundAudio(sampleTime, buffer, _desiredOutputFormat);
|
|
||||||
addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send audio off for spatial processing
|
|
||||||
emit processInboundAudio(sampleTime, buffer, _desiredOutputFormat);
|
|
||||||
|
|
||||||
// copy the samples we'll resample from the spatial audio ring buffer - this also
|
|
||||||
// pushes the read pointer of the spatial audio ring buffer forwards
|
|
||||||
_spatialAudioRingBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples);
|
|
||||||
|
|
||||||
// Advance the start point for the next packet of audio to arrive
|
|
||||||
_spatialAudioStart += numNetworkOutputSamples / _desiredOutputFormat.channelCount();
|
|
||||||
} else {
|
|
||||||
// copy the samples we'll resample from the ring buffer - this also
|
|
||||||
// pushes the read pointer of the ring buffer forwards
|
|
||||||
_ringBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples);
|
|
||||||
}
|
|
||||||
|
|
||||||
// copy the packet from the RB to the output
|
|
||||||
linearResampling(ringBufferSamples,
|
|
||||||
(int16_t*) outputBuffer.data(),
|
|
||||||
numNetworkOutputSamples,
|
|
||||||
numDeviceOutputSamples,
|
|
||||||
_desiredOutputFormat, _outputFormat);
|
|
||||||
|
|
||||||
if (_outputDevice) {
|
|
||||||
_outputDevice->write(outputBuffer);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (_scopeEnabled && !_scopeEnabledPause) {
|
|
||||||
unsigned int numAudioChannels = _desiredOutputFormat.channelCount();
|
|
||||||
int16_t* samples = ringBufferSamples;
|
|
||||||
for (int numSamples = numNetworkOutputSamples / numAudioChannels; numSamples > 0; numSamples -= NETWORK_SAMPLES_PER_FRAME) {
|
|
||||||
|
|
||||||
unsigned int audioChannel = 0;
|
|
||||||
addBufferToScope(
|
|
||||||
_scopeOutputLeft,
|
|
||||||
_scopeOutputOffset,
|
|
||||||
samples, audioChannel, numAudioChannels);
|
|
||||||
|
|
||||||
audioChannel = 1;
|
|
||||||
addBufferToScope(
|
|
||||||
_scopeOutputRight,
|
|
||||||
_scopeOutputOffset,
|
|
||||||
samples, audioChannel, numAudioChannels);
|
|
||||||
|
|
||||||
_scopeOutputOffset += NETWORK_SAMPLES_PER_FRAME;
|
|
||||||
_scopeOutputOffset %= _samplesPerScope;
|
|
||||||
samples += NETWORK_SAMPLES_PER_FRAME * numAudioChannels;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
delete[] ringBufferSamples;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// copy the packet from the RB to the output
|
||||||
|
linearResampling(receivedSamples,
|
||||||
|
(int16_t*)outputBuffer.data(),
|
||||||
|
numNetworkOutputSamples,
|
||||||
|
numDeviceOutputSamples,
|
||||||
|
_desiredOutputFormat, _outputFormat);
|
||||||
|
|
||||||
|
if (_outputDevice) {
|
||||||
|
_outputDevice->write(outputBuffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (_scopeEnabled && !_scopeEnabledPause) {
|
||||||
|
unsigned int numAudioChannels = _desiredOutputFormat.channelCount();
|
||||||
|
int16_t* samples = receivedSamples;
|
||||||
|
for (int numSamples = numNetworkOutputSamples / numAudioChannels; numSamples > 0; numSamples -= NETWORK_SAMPLES_PER_FRAME) {
|
||||||
|
|
||||||
|
unsigned int audioChannel = 0;
|
||||||
|
addBufferToScope(
|
||||||
|
_scopeOutputLeft,
|
||||||
|
_scopeOutputOffset,
|
||||||
|
samples, audioChannel, numAudioChannels);
|
||||||
|
|
||||||
|
audioChannel = 1;
|
||||||
|
addBufferToScope(
|
||||||
|
_scopeOutputRight,
|
||||||
|
_scopeOutputOffset,
|
||||||
|
samples, audioChannel, numAudioChannels);
|
||||||
|
|
||||||
|
_scopeOutputOffset += NETWORK_SAMPLES_PER_FRAME;
|
||||||
|
_scopeOutputOffset %= _samplesPerScope;
|
||||||
|
samples += NETWORK_SAMPLES_PER_FRAME * numAudioChannels;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
delete[] receivedSamples;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1316,6 +1219,10 @@ void Audio::toggleStats() {
|
||||||
_statsEnabled = !_statsEnabled;
|
_statsEnabled = !_statsEnabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Audio::toggleStatsShowInjectedStreams() {
|
||||||
|
_statsShowInjectedStreams = !_statsShowInjectedStreams;
|
||||||
|
}
|
||||||
|
|
||||||
void Audio::selectAudioScopeFiveFrames() {
|
void Audio::selectAudioScopeFiveFrames() {
|
||||||
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioScopeFiveFrames)) {
|
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioScopeFiveFrames)) {
|
||||||
reallocateScope(5);
|
reallocateScope(5);
|
||||||
|
@ -1400,10 +1307,10 @@ void Audio::renderStats(const float* color, int width, int height) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const int LINES_WHEN_CENTERED = 30;
|
const int linesWhenCentered = _statsShowInjectedStreams ? 30 : 23;
|
||||||
const int CENTERED_BACKGROUND_HEIGHT = STATS_HEIGHT_PER_LINE * LINES_WHEN_CENTERED;
|
const int CENTERED_BACKGROUND_HEIGHT = STATS_HEIGHT_PER_LINE * linesWhenCentered;
|
||||||
|
|
||||||
int lines = _audioMixerInjectedStreamAudioStatsMap.size() * 7 + 23;
|
int lines = _statsShowInjectedStreams ? _audioMixerInjectedStreamAudioStatsMap.size() * 7 + 23 : 23;
|
||||||
int statsHeight = STATS_HEIGHT_PER_LINE * lines;
|
int statsHeight = STATS_HEIGHT_PER_LINE * lines;
|
||||||
|
|
||||||
|
|
||||||
|
@ -1430,13 +1337,14 @@ void Audio::renderStats(const float* color, int width, int height) {
|
||||||
|
|
||||||
float audioInputBufferLatency = 0.0f, inputRingBufferLatency = 0.0f, networkRoundtripLatency = 0.0f, mixerRingBufferLatency = 0.0f, outputRingBufferLatency = 0.0f, audioOutputBufferLatency = 0.0f;
|
float audioInputBufferLatency = 0.0f, inputRingBufferLatency = 0.0f, networkRoundtripLatency = 0.0f, mixerRingBufferLatency = 0.0f, outputRingBufferLatency = 0.0f, audioOutputBufferLatency = 0.0f;
|
||||||
|
|
||||||
|
AudioStreamStats downstreamAudioStreamStats = _receivedAudioStream.getAudioStreamStats();
|
||||||
SharedNodePointer audioMixerNodePointer = NodeList::getInstance()->soloNodeOfType(NodeType::AudioMixer);
|
SharedNodePointer audioMixerNodePointer = NodeList::getInstance()->soloNodeOfType(NodeType::AudioMixer);
|
||||||
if (!audioMixerNodePointer.isNull()) {
|
if (!audioMixerNodePointer.isNull()) {
|
||||||
audioInputBufferLatency = _audioInputMsecsReadStats.getWindowAverage();
|
audioInputBufferLatency = _audioInputMsecsReadStats.getWindowAverage();
|
||||||
inputRingBufferLatency = getInputRingBufferAverageMsecsAvailable();
|
inputRingBufferLatency = getInputRingBufferAverageMsecsAvailable();
|
||||||
networkRoundtripLatency = audioMixerNodePointer->getPingMs();
|
networkRoundtripLatency = audioMixerNodePointer->getPingMs();
|
||||||
mixerRingBufferLatency = _audioMixerAvatarStreamAudioStats._ringBufferFramesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS;
|
mixerRingBufferLatency = _audioMixerAvatarStreamAudioStats._framesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS;
|
||||||
outputRingBufferLatency = _outputRingBufferFramesAvailableStats.getWindowAverage() * BUFFER_SEND_INTERVAL_MSECS;
|
outputRingBufferLatency = downstreamAudioStreamStats._framesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS;
|
||||||
audioOutputBufferLatency = _audioOutputMsecsUnplayedStats.getWindowAverage();
|
audioOutputBufferLatency = _audioOutputMsecsUnplayedStats.getWindowAverage();
|
||||||
}
|
}
|
||||||
float totalLatency = audioInputBufferLatency + inputRingBufferLatency + networkRoundtripLatency + mixerRingBufferLatency + outputRingBufferLatency + audioOutputBufferLatency;
|
float totalLatency = audioInputBufferLatency + inputRingBufferLatency + networkRoundtripLatency + mixerRingBufferLatency + outputRingBufferLatency + audioOutputBufferLatency;
|
||||||
|
@ -1474,16 +1382,6 @@ void Audio::renderStats(const float* color, int width, int height) {
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||||
|
|
||||||
|
|
||||||
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
|
|
||||||
|
|
||||||
|
|
||||||
char downstreamLabelString[] = "Downstream mixed audio stats:";
|
|
||||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamLabelString, color);
|
|
||||||
|
|
||||||
renderAudioStreamStats(getDownstreamAudioStreamStats(), horizontalOffset, verticalOffset, scale, rotation, font, color, true);
|
|
||||||
|
|
||||||
|
|
||||||
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
|
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
|
||||||
|
|
||||||
char upstreamMicLabelString[] = "Upstream mic audio stats:";
|
char upstreamMicLabelString[] = "Upstream mic audio stats:";
|
||||||
|
@ -1493,17 +1391,29 @@ void Audio::renderStats(const float* color, int width, int height) {
|
||||||
renderAudioStreamStats(_audioMixerAvatarStreamAudioStats, horizontalOffset, verticalOffset, scale, rotation, font, color);
|
renderAudioStreamStats(_audioMixerAvatarStreamAudioStats, horizontalOffset, verticalOffset, scale, rotation, font, color);
|
||||||
|
|
||||||
|
|
||||||
foreach(const AudioStreamStats& injectedStreamAudioStats, _audioMixerInjectedStreamAudioStatsMap) {
|
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
|
||||||
|
|
||||||
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
|
char downstreamLabelString[] = "Downstream mixed audio stats:";
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamLabelString, color);
|
||||||
|
|
||||||
char upstreamInjectedLabelString[512];
|
renderAudioStreamStats(downstreamAudioStreamStats, horizontalOffset, verticalOffset, scale, rotation, font, color, true);
|
||||||
sprintf(upstreamInjectedLabelString, "Upstream injected audio stats: stream ID: %s",
|
|
||||||
injectedStreamAudioStats._streamIdentifier.toString().toLatin1().data());
|
|
||||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamInjectedLabelString, color);
|
|
||||||
|
|
||||||
renderAudioStreamStats(injectedStreamAudioStats, horizontalOffset, verticalOffset, scale, rotation, font, color);
|
|
||||||
|
if (_statsShowInjectedStreams) {
|
||||||
|
|
||||||
|
foreach(const AudioStreamStats& injectedStreamAudioStats, _audioMixerInjectedStreamAudioStatsMap) {
|
||||||
|
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
|
||||||
|
|
||||||
|
char upstreamInjectedLabelString[512];
|
||||||
|
sprintf(upstreamInjectedLabelString, "Upstream injected audio stats: stream ID: %s",
|
||||||
|
injectedStreamAudioStats._streamIdentifier.toString().toLatin1().data());
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamInjectedLabelString, color);
|
||||||
|
|
||||||
|
renderAudioStreamStats(injectedStreamAudioStats, horizontalOffset, verticalOffset, scale, rotation, font, color);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1524,26 +1434,26 @@ void Audio::renderAudioStreamStats(const AudioStreamStats& streamStats, int hori
|
||||||
|
|
||||||
const float BUFFER_SEND_INTERVAL_MSECS = BUFFER_SEND_INTERVAL_USECS / (float)USECS_PER_MSEC;
|
const float BUFFER_SEND_INTERVAL_MSECS = BUFFER_SEND_INTERVAL_USECS / (float)USECS_PER_MSEC;
|
||||||
sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u+%d, available: %u+%d",
|
sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u+%d, available: %u+%d",
|
||||||
streamStats._ringBufferDesiredJitterBufferFrames,
|
streamStats._desiredJitterBufferFrames,
|
||||||
streamStats._ringBufferFramesAvailableAverage,
|
streamStats._framesAvailableAverage,
|
||||||
(int)(getAudioOutputAverageMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS),
|
(int)(getAudioOutputAverageMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS),
|
||||||
streamStats._ringBufferFramesAvailable,
|
streamStats._framesAvailable,
|
||||||
(int)(getAudioOutputMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS));
|
(int)(getAudioOutputMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS));
|
||||||
} else {
|
} else {
|
||||||
sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u, available: %u",
|
sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u, available: %u",
|
||||||
streamStats._ringBufferDesiredJitterBufferFrames,
|
streamStats._desiredJitterBufferFrames,
|
||||||
streamStats._ringBufferFramesAvailableAverage,
|
streamStats._framesAvailableAverage,
|
||||||
streamStats._ringBufferFramesAvailable);
|
streamStats._framesAvailable);
|
||||||
}
|
}
|
||||||
|
|
||||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||||
|
|
||||||
sprintf(stringBuffer, " Ringbuffer stats | starves: %u, prev_starve_lasted: %u, frames_dropped: %u, overflows: %u",
|
sprintf(stringBuffer, " Ringbuffer stats | starves: %u, prev_starve_lasted: %u, frames_dropped: %u, overflows: %u",
|
||||||
streamStats._ringBufferStarveCount,
|
streamStats._starveCount,
|
||||||
streamStats._ringBufferConsecutiveNotMixedCount,
|
streamStats._consecutiveNotMixedCount,
|
||||||
streamStats._ringBufferSilentFramesDropped,
|
streamStats._framesDropped,
|
||||||
streamStats._ringBufferOverflowCount);
|
streamStats._overflowCount);
|
||||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||||
|
|
||||||
|
@ -1756,11 +1666,13 @@ bool Audio::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo)
|
||||||
|
|
||||||
if (adjustedFormatForAudioDevice(outputDeviceInfo, _desiredOutputFormat, _outputFormat)) {
|
if (adjustedFormatForAudioDevice(outputDeviceInfo, _desiredOutputFormat, _outputFormat)) {
|
||||||
qDebug() << "The format to be used for audio output is" << _outputFormat;
|
qDebug() << "The format to be used for audio output is" << _outputFormat;
|
||||||
|
|
||||||
|
const int AUDIO_OUTPUT_BUFFER_SIZE_FRAMES = 10;
|
||||||
|
|
||||||
// setup our general output device for audio-mixer audio
|
// setup our general output device for audio-mixer audio
|
||||||
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
||||||
_audioOutput->setBufferSize(_ringBuffer.getSampleCapacity() * sizeof(int16_t));
|
_audioOutput->setBufferSize(AUDIO_OUTPUT_BUFFER_SIZE_FRAMES * _outputFormat.bytesForDuration(BUFFER_SEND_INTERVAL_USECS));
|
||||||
qDebug() << "Ring Buffer capacity in samples: " << _ringBuffer.getSampleCapacity();
|
qDebug() << "Ring Buffer capacity in frames: " << AUDIO_OUTPUT_BUFFER_SIZE_FRAMES;
|
||||||
_outputDevice = _audioOutput->start();
|
_outputDevice = _audioOutput->start();
|
||||||
|
|
||||||
// setup a loopback audio output device
|
// setup a loopback audio output device
|
||||||
|
|
|
@ -31,12 +31,12 @@
|
||||||
#include <QByteArray>
|
#include <QByteArray>
|
||||||
|
|
||||||
#include <AbstractAudioInterface.h>
|
#include <AbstractAudioInterface.h>
|
||||||
#include <AudioRingBuffer.h>
|
|
||||||
#include <StdDev.h>
|
#include <StdDev.h>
|
||||||
|
|
||||||
|
#include "MixedAudioStream.h"
|
||||||
|
|
||||||
static const int NUM_AUDIO_CHANNELS = 2;
|
static const int NUM_AUDIO_CHANNELS = 2;
|
||||||
|
|
||||||
static const int INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS = 30;
|
|
||||||
|
|
||||||
class QAudioInput;
|
class QAudioInput;
|
||||||
class QAudioOutput;
|
class QAudioOutput;
|
||||||
|
@ -46,19 +46,23 @@ class Audio : public AbstractAudioInterface {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
// setup for audio I/O
|
// setup for audio I/O
|
||||||
Audio(int16_t initialJitterBufferSamples, QObject* parent = 0);
|
Audio(QObject* parent = 0);
|
||||||
|
|
||||||
float getLastInputLoudness() const { return glm::max(_lastInputLoudness - _noiseGateMeasuredFloor, 0.f); }
|
float getLastInputLoudness() const { return glm::max(_lastInputLoudness - _noiseGateMeasuredFloor, 0.f); }
|
||||||
float getTimeSinceLastClip() const { return _timeSinceLastClip; }
|
float getTimeSinceLastClip() const { return _timeSinceLastClip; }
|
||||||
float getAudioAverageInputLoudness() const { return _lastInputLoudness; }
|
float getAudioAverageInputLoudness() const { return _lastInputLoudness; }
|
||||||
|
|
||||||
void setNoiseGateEnabled(bool noiseGateEnabled) { _noiseGateEnabled = noiseGateEnabled; }
|
void setNoiseGateEnabled(bool noiseGateEnabled) { _noiseGateEnabled = noiseGateEnabled; }
|
||||||
|
|
||||||
void setJitterBufferSamples(int samples) { _jitterBufferSamples = samples; }
|
|
||||||
int getJitterBufferSamples() { return _jitterBufferSamples; }
|
|
||||||
|
|
||||||
virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen);
|
virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen);
|
||||||
virtual void startDrumSound(float volume, float frequency, float duration, float decay);
|
virtual void startDrumSound(float volume, float frequency, float duration, float decay);
|
||||||
|
|
||||||
|
void setDynamicJitterBuffers(bool dynamicJitterBuffers) { _receivedAudioStream.setDynamicJitterBuffers(dynamicJitterBuffers); }
|
||||||
|
void setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames) { _receivedAudioStream.setStaticDesiredJitterBufferFrames(staticDesiredJitterBufferFrames); }
|
||||||
|
|
||||||
|
void setMaxFramesOverDesired(int maxFramesOverDesired) { _receivedAudioStream.setMaxFramesOverDesired(maxFramesOverDesired); }
|
||||||
|
|
||||||
|
int getDesiredJitterBufferFrames() const { return _receivedAudioStream.getDesiredJitterBufferFrames(); }
|
||||||
|
|
||||||
float getCollisionSoundMagnitude() { return _collisionSoundMagnitude; }
|
float getCollisionSoundMagnitude() { return _collisionSoundMagnitude; }
|
||||||
|
|
||||||
|
@ -77,8 +81,6 @@ public:
|
||||||
int getNetworkBufferLengthSamplesPerChannel() { return NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; }
|
int getNetworkBufferLengthSamplesPerChannel() { return NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; }
|
||||||
|
|
||||||
bool getProcessSpatialAudio() const { return _processSpatialAudio; }
|
bool getProcessSpatialAudio() const { return _processSpatialAudio; }
|
||||||
|
|
||||||
const SequenceNumberStats& getIncomingMixedAudioSequenceNumberStats() const { return _incomingMixedAudioSequenceNumberStats; }
|
|
||||||
|
|
||||||
float getInputRingBufferMsecsAvailable() const;
|
float getInputRingBufferMsecsAvailable() const;
|
||||||
float getInputRingBufferAverageMsecsAvailable() const { return (float)_inputRingBufferMsecsAvailableStats.getWindowAverage(); }
|
float getInputRingBufferAverageMsecsAvailable() const { return (float)_inputRingBufferMsecsAvailableStats.getWindowAverage(); }
|
||||||
|
@ -89,7 +91,7 @@ public:
|
||||||
public slots:
|
public slots:
|
||||||
void start();
|
void start();
|
||||||
void stop();
|
void stop();
|
||||||
void addReceivedAudioToBuffer(const QByteArray& audioByteArray);
|
void addReceivedAudioToStream(const QByteArray& audioByteArray);
|
||||||
void parseAudioStreamStatsPacket(const QByteArray& packet);
|
void parseAudioStreamStatsPacket(const QByteArray& packet);
|
||||||
void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples);
|
void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples);
|
||||||
void handleAudioInput();
|
void handleAudioInput();
|
||||||
|
@ -102,6 +104,7 @@ public slots:
|
||||||
void toggleScope();
|
void toggleScope();
|
||||||
void toggleScopePause();
|
void toggleScopePause();
|
||||||
void toggleStats();
|
void toggleStats();
|
||||||
|
void toggleStatsShowInjectedStreams();
|
||||||
void toggleAudioSpatialProcessing();
|
void toggleAudioSpatialProcessing();
|
||||||
void toggleStereoInput();
|
void toggleStereoInput();
|
||||||
void selectAudioScopeFiveFrames();
|
void selectAudioScopeFiveFrames();
|
||||||
|
@ -110,7 +113,6 @@ public slots:
|
||||||
|
|
||||||
virtual void handleAudioByteArray(const QByteArray& audioByteArray);
|
virtual void handleAudioByteArray(const QByteArray& audioByteArray);
|
||||||
|
|
||||||
AudioStreamStats getDownstreamAudioStreamStats() const;
|
|
||||||
void sendDownstreamAudioStatsPacket();
|
void sendDownstreamAudioStatsPacket();
|
||||||
|
|
||||||
bool switchInputToAudioDevice(const QString& inputDeviceName);
|
bool switchInputToAudioDevice(const QString& inputDeviceName);
|
||||||
|
@ -123,16 +125,8 @@ public slots:
|
||||||
float getInputVolume() const { return (_audioInput) ? _audioInput->volume() : 0.0f; }
|
float getInputVolume() const { return (_audioInput) ? _audioInput->volume() : 0.0f; }
|
||||||
void setInputVolume(float volume) { if (_audioInput) _audioInput->setVolume(volume); }
|
void setInputVolume(float volume) { if (_audioInput) _audioInput->setVolume(volume); }
|
||||||
|
|
||||||
const AudioRingBuffer& getDownstreamRingBuffer() const { return _ringBuffer; }
|
|
||||||
|
|
||||||
int getDesiredJitterBufferFrames() const { return _jitterBufferSamples / _ringBuffer.getNumFrameSamples(); }
|
|
||||||
|
|
||||||
int getStarveCount() const { return _starveCount; }
|
|
||||||
int getConsecutiveNotMixedCount() const { return _consecutiveNotMixedCount; }
|
|
||||||
|
|
||||||
const AudioStreamStats& getAudioMixerAvatarStreamAudioStats() const { return _audioMixerAvatarStreamAudioStats; }
|
const AudioStreamStats& getAudioMixerAvatarStreamAudioStats() const { return _audioMixerAvatarStreamAudioStats; }
|
||||||
const QHash<QUuid, AudioStreamStats>& getAudioMixerInjectedStreamAudioStatsMap() const { return _audioMixerInjectedStreamAudioStatsMap; }
|
const QHash<QUuid, AudioStreamStats>& getAudioMixerInjectedStreamAudioStatsMap() const { return _audioMixerInjectedStreamAudioStatsMap; }
|
||||||
const MovingMinMaxAvg<quint64>& getInterframeTimeGapStats() const { return _interframeTimeGapStats; }
|
|
||||||
|
|
||||||
signals:
|
signals:
|
||||||
bool muteToggled();
|
bool muteToggled();
|
||||||
|
@ -159,7 +153,7 @@ private:
|
||||||
QAudioOutput* _proceduralAudioOutput;
|
QAudioOutput* _proceduralAudioOutput;
|
||||||
QIODevice* _proceduralOutputDevice;
|
QIODevice* _proceduralOutputDevice;
|
||||||
AudioRingBuffer _inputRingBuffer;
|
AudioRingBuffer _inputRingBuffer;
|
||||||
AudioRingBuffer _ringBuffer;
|
MixedAudioStream _receivedAudioStream;
|
||||||
bool _isStereoInput;
|
bool _isStereoInput;
|
||||||
|
|
||||||
QString _inputAudioDeviceName;
|
QString _inputAudioDeviceName;
|
||||||
|
@ -168,8 +162,6 @@ private:
|
||||||
StDev _stdev;
|
StDev _stdev;
|
||||||
QElapsedTimer _timeSinceLastReceived;
|
QElapsedTimer _timeSinceLastReceived;
|
||||||
float _averagedLatency;
|
float _averagedLatency;
|
||||||
float _measuredJitter;
|
|
||||||
int16_t _jitterBufferSamples;
|
|
||||||
float _lastInputLoudness;
|
float _lastInputLoudness;
|
||||||
float _timeSinceLastClip;
|
float _timeSinceLastClip;
|
||||||
float _dcOffset;
|
float _dcOffset;
|
||||||
|
@ -180,7 +172,6 @@ private:
|
||||||
bool _noiseGateEnabled;
|
bool _noiseGateEnabled;
|
||||||
bool _toneInjectionEnabled;
|
bool _toneInjectionEnabled;
|
||||||
int _noiseGateFramesToClose;
|
int _noiseGateFramesToClose;
|
||||||
int _totalPacketsReceived;
|
|
||||||
int _totalInputAudioSamples;
|
int _totalInputAudioSamples;
|
||||||
|
|
||||||
float _collisionSoundMagnitude;
|
float _collisionSoundMagnitude;
|
||||||
|
@ -197,7 +188,6 @@ private:
|
||||||
int _drumSoundSample;
|
int _drumSoundSample;
|
||||||
|
|
||||||
int _proceduralEffectSample;
|
int _proceduralEffectSample;
|
||||||
int _numFramesDisplayStarve;
|
|
||||||
bool _muted;
|
bool _muted;
|
||||||
bool _localEcho;
|
bool _localEcho;
|
||||||
GLuint _micTextureId;
|
GLuint _micTextureId;
|
||||||
|
@ -225,6 +215,9 @@ private:
|
||||||
// Process received audio
|
// Process received audio
|
||||||
void processReceivedAudio(const QByteArray& audioByteArray);
|
void processReceivedAudio(const QByteArray& audioByteArray);
|
||||||
|
|
||||||
|
// Pushes frames from the output ringbuffer to the audio output device
|
||||||
|
void pushAudioToOutput();
|
||||||
|
|
||||||
bool switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo);
|
bool switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo);
|
||||||
bool switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo);
|
bool switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo);
|
||||||
|
|
||||||
|
@ -275,22 +268,16 @@ private:
|
||||||
#endif
|
#endif
|
||||||
static const unsigned int STATS_HEIGHT_PER_LINE = 20;
|
static const unsigned int STATS_HEIGHT_PER_LINE = 20;
|
||||||
bool _statsEnabled;
|
bool _statsEnabled;
|
||||||
|
bool _statsShowInjectedStreams;
|
||||||
int _starveCount;
|
|
||||||
int _consecutiveNotMixedCount;
|
|
||||||
|
|
||||||
AudioStreamStats _audioMixerAvatarStreamAudioStats;
|
AudioStreamStats _audioMixerAvatarStreamAudioStats;
|
||||||
QHash<QUuid, AudioStreamStats> _audioMixerInjectedStreamAudioStatsMap;
|
QHash<QUuid, AudioStreamStats> _audioMixerInjectedStreamAudioStatsMap;
|
||||||
|
|
||||||
quint16 _outgoingAvatarAudioSequenceNumber;
|
quint16 _outgoingAvatarAudioSequenceNumber;
|
||||||
SequenceNumberStats _incomingMixedAudioSequenceNumberStats;
|
|
||||||
|
|
||||||
MovingMinMaxAvg<quint64> _interframeTimeGapStats;
|
|
||||||
|
|
||||||
MovingMinMaxAvg<float> _audioInputMsecsReadStats;
|
MovingMinMaxAvg<float> _audioInputMsecsReadStats;
|
||||||
MovingMinMaxAvg<float> _inputRingBufferMsecsAvailableStats;
|
MovingMinMaxAvg<float> _inputRingBufferMsecsAvailableStats;
|
||||||
|
|
||||||
MovingMinMaxAvg<int> _outputRingBufferFramesAvailableStats;
|
|
||||||
MovingMinMaxAvg<float> _audioOutputMsecsUnplayedStats;
|
MovingMinMaxAvg<float> _audioOutputMsecsUnplayedStats;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -48,7 +48,7 @@ void DatagramProcessor::processDatagrams() {
|
||||||
// only process this packet if we have a match on the packet version
|
// only process this packet if we have a match on the packet version
|
||||||
switch (packetTypeForPacket(incomingPacket)) {
|
switch (packetTypeForPacket(incomingPacket)) {
|
||||||
case PacketTypeMixedAudio:
|
case PacketTypeMixedAudio:
|
||||||
QMetaObject::invokeMethod(&application->_audio, "addReceivedAudioToBuffer", Qt::QueuedConnection,
|
QMetaObject::invokeMethod(&application->_audio, "addReceivedAudioToStream", Qt::QueuedConnection,
|
||||||
Q_ARG(QByteArray, incomingPacket));
|
Q_ARG(QByteArray, incomingPacket));
|
||||||
break;
|
break;
|
||||||
case PacketTypeAudioStreamStats:
|
case PacketTypeAudioStreamStats:
|
||||||
|
|
|
@ -82,7 +82,8 @@ const int CONSOLE_HEIGHT = 200;
|
||||||
|
|
||||||
Menu::Menu() :
|
Menu::Menu() :
|
||||||
_actionHash(),
|
_actionHash(),
|
||||||
_audioJitterBufferSamples(0),
|
_audioJitterBufferFrames(0),
|
||||||
|
_maxFramesOverDesired(0),
|
||||||
_bandwidthDialog(NULL),
|
_bandwidthDialog(NULL),
|
||||||
_fieldOfView(DEFAULT_FIELD_OF_VIEW_DEGREES),
|
_fieldOfView(DEFAULT_FIELD_OF_VIEW_DEGREES),
|
||||||
_realWorldFieldOfView(DEFAULT_REAL_WORLD_FIELD_OF_VIEW_DEGREES),
|
_realWorldFieldOfView(DEFAULT_REAL_WORLD_FIELD_OF_VIEW_DEGREES),
|
||||||
|
@ -594,12 +595,18 @@ Menu::Menu() :
|
||||||
false);
|
false);
|
||||||
|
|
||||||
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioStats,
|
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioStats,
|
||||||
0,
|
Qt::CTRL | Qt::Key_A,
|
||||||
false,
|
false,
|
||||||
appInstance->getAudio(),
|
appInstance->getAudio(),
|
||||||
SLOT(toggleStats()));
|
SLOT(toggleStats()));
|
||||||
|
|
||||||
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::DisableQAudioOutputOverflowCheck, 0, true);
|
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioStatsShowInjectedStreams,
|
||||||
|
0,
|
||||||
|
false,
|
||||||
|
appInstance->getAudio(),
|
||||||
|
SLOT(toggleStatsShowInjectedStreams()));
|
||||||
|
|
||||||
|
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::DisableQAudioOutputOverflowCheck, 0, false);
|
||||||
|
|
||||||
addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel,
|
addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel,
|
||||||
Qt::CTRL | Qt::SHIFT | Qt::Key_V,
|
Qt::CTRL | Qt::SHIFT | Qt::Key_V,
|
||||||
|
@ -627,7 +634,8 @@ void Menu::loadSettings(QSettings* settings) {
|
||||||
lockedSettings = true;
|
lockedSettings = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
_audioJitterBufferSamples = loadSetting(settings, "audioJitterBufferSamples", 0);
|
_audioJitterBufferFrames = loadSetting(settings, "audioJitterBufferFrames", 0);
|
||||||
|
_maxFramesOverDesired = loadSetting(settings, "maxFramesOverDesired", DEFAULT_MAX_FRAMES_OVER_DESIRED);
|
||||||
_fieldOfView = loadSetting(settings, "fieldOfView", DEFAULT_FIELD_OF_VIEW_DEGREES);
|
_fieldOfView = loadSetting(settings, "fieldOfView", DEFAULT_FIELD_OF_VIEW_DEGREES);
|
||||||
_realWorldFieldOfView = loadSetting(settings, "realWorldFieldOfView", DEFAULT_REAL_WORLD_FIELD_OF_VIEW_DEGREES);
|
_realWorldFieldOfView = loadSetting(settings, "realWorldFieldOfView", DEFAULT_REAL_WORLD_FIELD_OF_VIEW_DEGREES);
|
||||||
_faceshiftEyeDeflection = loadSetting(settings, "faceshiftEyeDeflection", DEFAULT_FACESHIFT_EYE_DEFLECTION);
|
_faceshiftEyeDeflection = loadSetting(settings, "faceshiftEyeDeflection", DEFAULT_FACESHIFT_EYE_DEFLECTION);
|
||||||
|
@ -677,7 +685,8 @@ void Menu::saveSettings(QSettings* settings) {
|
||||||
lockedSettings = true;
|
lockedSettings = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
settings->setValue("audioJitterBufferSamples", _audioJitterBufferSamples);
|
settings->setValue("audioJitterBufferFrames", _audioJitterBufferFrames);
|
||||||
|
settings->setValue("maxFramesOverDesired", _maxFramesOverDesired);
|
||||||
settings->setValue("fieldOfView", _fieldOfView);
|
settings->setValue("fieldOfView", _fieldOfView);
|
||||||
settings->setValue("faceshiftEyeDeflection", _faceshiftEyeDeflection);
|
settings->setValue("faceshiftEyeDeflection", _faceshiftEyeDeflection);
|
||||||
settings->setValue("maxVoxels", _maxVoxels);
|
settings->setValue("maxVoxels", _maxVoxels);
|
||||||
|
|
|
@ -85,8 +85,10 @@ public:
|
||||||
void triggerOption(const QString& menuOption);
|
void triggerOption(const QString& menuOption);
|
||||||
QAction* getActionForOption(const QString& menuOption);
|
QAction* getActionForOption(const QString& menuOption);
|
||||||
|
|
||||||
float getAudioJitterBufferSamples() const { return _audioJitterBufferSamples; }
|
float getAudioJitterBufferFrames() const { return _audioJitterBufferFrames; }
|
||||||
void setAudioJitterBufferSamples(float audioJitterBufferSamples) { _audioJitterBufferSamples = audioJitterBufferSamples; }
|
void setAudioJitterBufferFrames(float audioJitterBufferSamples) { _audioJitterBufferFrames = audioJitterBufferSamples; }
|
||||||
|
int getMaxFramesOverDesired() const { return _maxFramesOverDesired; }
|
||||||
|
void setMaxFramesOverDesired(int maxFramesOverDesired) { _maxFramesOverDesired = maxFramesOverDesired; }
|
||||||
float getFieldOfView() const { return _fieldOfView; }
|
float getFieldOfView() const { return _fieldOfView; }
|
||||||
void setFieldOfView(float fieldOfView) { _fieldOfView = fieldOfView; }
|
void setFieldOfView(float fieldOfView) { _fieldOfView = fieldOfView; }
|
||||||
float getRealWorldFieldOfView() const { return _realWorldFieldOfView; }
|
float getRealWorldFieldOfView() const { return _realWorldFieldOfView; }
|
||||||
|
@ -257,7 +259,8 @@ private:
|
||||||
|
|
||||||
|
|
||||||
QHash<QString, QAction*> _actionHash;
|
QHash<QString, QAction*> _actionHash;
|
||||||
int _audioJitterBufferSamples; /// number of extra samples to wait before starting audio playback
|
int _audioJitterBufferFrames; /// number of extra samples to wait before starting audio playback
|
||||||
|
int _maxFramesOverDesired;
|
||||||
BandwidthDialog* _bandwidthDialog;
|
BandwidthDialog* _bandwidthDialog;
|
||||||
float _fieldOfView; /// in Degrees, doesn't apply to HMD like Oculus
|
float _fieldOfView; /// in Degrees, doesn't apply to HMD like Oculus
|
||||||
float _realWorldFieldOfView; // The actual FOV set by the user's monitor size and view distance
|
float _realWorldFieldOfView; // The actual FOV set by the user's monitor size and view distance
|
||||||
|
@ -316,6 +319,7 @@ namespace MenuOption {
|
||||||
const QString AudioScopePause = "Pause Audio Scope";
|
const QString AudioScopePause = "Pause Audio Scope";
|
||||||
const QString AudioScopeTwentyFrames = "Twenty";
|
const QString AudioScopeTwentyFrames = "Twenty";
|
||||||
const QString AudioStats = "Audio Stats";
|
const QString AudioStats = "Audio Stats";
|
||||||
|
const QString AudioStatsShowInjectedStreams = "Audio Stats Show Injected Streams";
|
||||||
const QString AudioSpatialProcessingAlternateDistanceAttenuate = "Alternate distance attenuation";
|
const QString AudioSpatialProcessingAlternateDistanceAttenuate = "Alternate distance attenuation";
|
||||||
const QString AudioSpatialProcessing = "Audio Spatial Processing";
|
const QString AudioSpatialProcessing = "Audio Spatial Processing";
|
||||||
const QString AudioSpatialProcessingDontDistanceAttenuate = "Don't calculate distance attenuation";
|
const QString AudioSpatialProcessingDontDistanceAttenuate = "Don't calculate distance attenuation";
|
||||||
|
@ -349,7 +353,7 @@ namespace MenuOption {
|
||||||
const QString DisableActivityLogger = "Disable Activity Logger";
|
const QString DisableActivityLogger = "Disable Activity Logger";
|
||||||
const QString DisableAutoAdjustLOD = "Disable Automatically Adjusting LOD";
|
const QString DisableAutoAdjustLOD = "Disable Automatically Adjusting LOD";
|
||||||
const QString DisableNackPackets = "Disable NACK Packets";
|
const QString DisableNackPackets = "Disable NACK Packets";
|
||||||
const QString DisableQAudioOutputOverflowCheck = "Disable Audio Output Overflow Check";
|
const QString DisableQAudioOutputOverflowCheck = "Disable Audio Output Device Overflow Check";
|
||||||
const QString DisplayFrustum = "Display Frustum";
|
const QString DisplayFrustum = "Display Frustum";
|
||||||
const QString DisplayHands = "Display Hands";
|
const QString DisplayHands = "Display Hands";
|
||||||
const QString DisplayHandTargets = "Display Hand Targets";
|
const QString DisplayHandTargets = "Display Hand Targets";
|
||||||
|
|
|
@ -149,7 +149,9 @@ void PreferencesDialog::loadPreferences() {
|
||||||
ui.faceshiftEyeDeflectionSider->setValue(menuInstance->getFaceshiftEyeDeflection() *
|
ui.faceshiftEyeDeflectionSider->setValue(menuInstance->getFaceshiftEyeDeflection() *
|
||||||
ui.faceshiftEyeDeflectionSider->maximum());
|
ui.faceshiftEyeDeflectionSider->maximum());
|
||||||
|
|
||||||
ui.audioJitterSpin->setValue(menuInstance->getAudioJitterBufferSamples());
|
ui.audioJitterSpin->setValue(menuInstance->getAudioJitterBufferFrames());
|
||||||
|
|
||||||
|
ui.maxFramesOverDesiredSpin->setValue(menuInstance->getMaxFramesOverDesired());
|
||||||
|
|
||||||
ui.realWorldFieldOfViewSpin->setValue(menuInstance->getRealWorldFieldOfView());
|
ui.realWorldFieldOfViewSpin->setValue(menuInstance->getRealWorldFieldOfView());
|
||||||
|
|
||||||
|
@ -239,8 +241,16 @@ void PreferencesDialog::savePreferences() {
|
||||||
|
|
||||||
Menu::getInstance()->setInvertSixenseButtons(ui.invertSixenseButtonsCheckBox->isChecked());
|
Menu::getInstance()->setInvertSixenseButtons(ui.invertSixenseButtonsCheckBox->isChecked());
|
||||||
|
|
||||||
Menu::getInstance()->setAudioJitterBufferSamples(ui.audioJitterSpin->value());
|
Menu::getInstance()->setAudioJitterBufferFrames(ui.audioJitterSpin->value());
|
||||||
Application::getInstance()->getAudio()->setJitterBufferSamples(ui.audioJitterSpin->value());
|
if (Menu::getInstance()->getAudioJitterBufferFrames() != 0) {
|
||||||
|
Application::getInstance()->getAudio()->setDynamicJitterBuffers(false);
|
||||||
|
Application::getInstance()->getAudio()->setStaticDesiredJitterBufferFrames(Menu::getInstance()->getAudioJitterBufferFrames());
|
||||||
|
} else {
|
||||||
|
Application::getInstance()->getAudio()->setDynamicJitterBuffers(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
Menu::getInstance()->setMaxFramesOverDesired(ui.maxFramesOverDesiredSpin->value());
|
||||||
|
Application::getInstance()->getAudio()->setMaxFramesOverDesired(Menu::getInstance()->getMaxFramesOverDesired());
|
||||||
|
|
||||||
Application::getInstance()->resizeGL(Application::getInstance()->getGLWidget()->width(),
|
Application::getInstance()->resizeGL(Application::getInstance()->getGLWidget()->width(),
|
||||||
Application::getInstance()->getGLWidget()->height());
|
Application::getInstance()->getGLWidget()->height());
|
||||||
|
|
|
@ -291,9 +291,8 @@ void Stats::display(
|
||||||
|
|
||||||
char audioJitter[30];
|
char audioJitter[30];
|
||||||
sprintf(audioJitter,
|
sprintf(audioJitter,
|
||||||
"Buffer msecs %.1f",
|
"Buffer msecs %.1f",
|
||||||
(float) (audio->getNetworkBufferLengthSamplesPerChannel() + (float) audio->getJitterBufferSamples()) /
|
audio->getDesiredJitterBufferFrames() * BUFFER_SEND_INTERVAL_USECS / (float)USECS_PER_MSEC);
|
||||||
(float) audio->getNetworkSampleRate() * 1000.f);
|
|
||||||
drawText(30, glWidget->height() - 22, scale, rotation, font, audioJitter, color);
|
drawText(30, glWidget->height() - 22, scale, rotation, font, audioJitter, color);
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1489,7 +1489,7 @@ padding: 10px;margin-top:10px</string>
|
||||||
<string notr="true">color: rgb(51, 51, 51)</string>
|
<string notr="true">color: rgb(51, 51, 51)</string>
|
||||||
</property>
|
</property>
|
||||||
<property name="text">
|
<property name="text">
|
||||||
<string>Audio Jitter Buffer Samples (0 for automatic)</string>
|
<string>Audio Jitter Buffer Frames (0 for automatic)</string>
|
||||||
</property>
|
</property>
|
||||||
<property name="indent">
|
<property name="indent">
|
||||||
<number>15</number>
|
<number>15</number>
|
||||||
|
@ -1543,7 +1543,7 @@ padding: 10px;margin-top:10px</string>
|
||||||
</font>
|
</font>
|
||||||
</property>
|
</property>
|
||||||
<property name="minimum">
|
<property name="minimum">
|
||||||
<number>-10000</number>
|
<number>0</number>
|
||||||
</property>
|
</property>
|
||||||
<property name="maximum">
|
<property name="maximum">
|
||||||
<number>10000</number>
|
<number>10000</number>
|
||||||
|
@ -1555,6 +1555,99 @@ padding: 10px;margin-top:10px</string>
|
||||||
</item>
|
</item>
|
||||||
</layout>
|
</layout>
|
||||||
</item>
|
</item>
|
||||||
|
<item>
|
||||||
|
<layout class="QHBoxLayout" name="horizontalLayout_13">
|
||||||
|
<property name="spacing">
|
||||||
|
<number>0</number>
|
||||||
|
</property>
|
||||||
|
<property name="topMargin">
|
||||||
|
<number>10</number>
|
||||||
|
</property>
|
||||||
|
<property name="rightMargin">
|
||||||
|
<number>0</number>
|
||||||
|
</property>
|
||||||
|
<property name="bottomMargin">
|
||||||
|
<number>10</number>
|
||||||
|
</property>
|
||||||
|
<item alignment="Qt::AlignLeft">
|
||||||
|
<widget class="QLabel" name="label_10">
|
||||||
|
<property name="font">
|
||||||
|
<font>
|
||||||
|
<family>Arial</family>
|
||||||
|
</font>
|
||||||
|
</property>
|
||||||
|
<property name="styleSheet">
|
||||||
|
<string notr="true">color: rgb(51, 51, 51)</string>
|
||||||
|
</property>
|
||||||
|
<property name="text">
|
||||||
|
<string>Max Frames Over Desired</string>
|
||||||
|
</property>
|
||||||
|
<property name="indent">
|
||||||
|
<number>15</number>
|
||||||
|
</property>
|
||||||
|
<property name="buddy">
|
||||||
|
<cstring>maxFramesOverDesiredSpin</cstring>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
<item>
|
||||||
|
<spacer name="horizontalSpacer_12">
|
||||||
|
<property name="font">
|
||||||
|
<font>
|
||||||
|
<family>Arial</family>
|
||||||
|
</font>
|
||||||
|
</property>
|
||||||
|
<property name="orientation">
|
||||||
|
<enum>Qt::Horizontal</enum>
|
||||||
|
</property>
|
||||||
|
<property name="sizeHint" stdset="0">
|
||||||
|
<size>
|
||||||
|
<width>40</width>
|
||||||
|
<height>20</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
</spacer>
|
||||||
|
</item>
|
||||||
|
<item>
|
||||||
|
<widget class="QSpinBox" name="maxFramesOverDesiredSpin">
|
||||||
|
<property name="sizePolicy">
|
||||||
|
<sizepolicy hsizetype="Fixed" vsizetype="Fixed">
|
||||||
|
<horstretch>0</horstretch>
|
||||||
|
<verstretch>0</verstretch>
|
||||||
|
</sizepolicy>
|
||||||
|
</property>
|
||||||
|
<property name="minimumSize">
|
||||||
|
<size>
|
||||||
|
<width>95</width>
|
||||||
|
<height>36</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
<property name="maximumSize">
|
||||||
|
<size>
|
||||||
|
<width>70</width>
|
||||||
|
<height>16777215</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
<property name="font">
|
||||||
|
<font>
|
||||||
|
<family>Arial</family>
|
||||||
|
</font>
|
||||||
|
</property>
|
||||||
|
<property name="minimum">
|
||||||
|
<number>0</number>
|
||||||
|
</property>
|
||||||
|
<property name="maximum">
|
||||||
|
<number>10000</number>
|
||||||
|
</property>
|
||||||
|
<property name="value">
|
||||||
|
<number>1</number>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
</layout>
|
||||||
|
</item>
|
||||||
|
|
||||||
|
|
||||||
<item>
|
<item>
|
||||||
<layout class="QHBoxLayout" name="horizontalLayout_6">
|
<layout class="QHBoxLayout" name="horizontalLayout_6">
|
||||||
<property name="spacing">
|
<property name="spacing">
|
||||||
|
|
|
@ -20,15 +20,12 @@
|
||||||
|
|
||||||
|
|
||||||
AudioRingBuffer::AudioRingBuffer(int numFrameSamples, bool randomAccessMode, int numFramesCapacity) :
|
AudioRingBuffer::AudioRingBuffer(int numFrameSamples, bool randomAccessMode, int numFramesCapacity) :
|
||||||
NodeData(),
|
|
||||||
_overflowCount(0),
|
|
||||||
_frameCapacity(numFramesCapacity),
|
_frameCapacity(numFramesCapacity),
|
||||||
_sampleCapacity(numFrameSamples * numFramesCapacity),
|
_sampleCapacity(numFrameSamples * numFramesCapacity),
|
||||||
_isFull(false),
|
_isFull(false),
|
||||||
_numFrameSamples(numFrameSamples),
|
_numFrameSamples(numFrameSamples),
|
||||||
_isStarved(true),
|
_randomAccessMode(randomAccessMode),
|
||||||
_hasStarted(false),
|
_overflowCount(0)
|
||||||
_randomAccessMode(randomAccessMode)
|
|
||||||
{
|
{
|
||||||
if (numFrameSamples) {
|
if (numFrameSamples) {
|
||||||
_buffer = new int16_t[_sampleCapacity];
|
_buffer = new int16_t[_sampleCapacity];
|
||||||
|
@ -49,11 +46,8 @@ AudioRingBuffer::~AudioRingBuffer() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioRingBuffer::reset() {
|
void AudioRingBuffer::reset() {
|
||||||
|
clear();
|
||||||
_overflowCount = 0;
|
_overflowCount = 0;
|
||||||
_isFull = false;
|
|
||||||
_endOfLastWrite = _buffer;
|
|
||||||
_nextOutput = _buffer;
|
|
||||||
_isStarved = true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioRingBuffer::resizeForFrameSize(int numFrameSamples) {
|
void AudioRingBuffer::resizeForFrameSize(int numFrameSamples) {
|
||||||
|
@ -67,10 +61,10 @@ void AudioRingBuffer::resizeForFrameSize(int numFrameSamples) {
|
||||||
reset();
|
reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioRingBuffer::parseData(const QByteArray& packet) {
|
void AudioRingBuffer::clear() {
|
||||||
// skip packet header and sequence number
|
_isFull = false;
|
||||||
int numBytesBeforeAudioData = numBytesForPacketHeader(packet) + sizeof(quint16);
|
_endOfLastWrite = _buffer;
|
||||||
return writeData(packet.data() + numBytesBeforeAudioData, packet.size() - numBytesBeforeAudioData);
|
_nextOutput = _buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioRingBuffer::readSamples(int16_t* destination, int maxSamples) {
|
int AudioRingBuffer::readSamples(int16_t* destination, int maxSamples) {
|
||||||
|
@ -211,14 +205,6 @@ int AudioRingBuffer::addSilentFrame(int numSilentSamples) {
|
||||||
return numSilentSamples * sizeof(int16_t);
|
return numSilentSamples * sizeof(int16_t);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AudioRingBuffer::isNotStarvedOrHasMinimumSamples(int numRequiredSamples) const {
|
|
||||||
if (!_isStarved) {
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
return samplesAvailable() >= numRequiredSamples;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int16_t* AudioRingBuffer::shiftedPositionAccomodatingWrap(int16_t* position, int numSamplesShift) const {
|
int16_t* AudioRingBuffer::shiftedPositionAccomodatingWrap(int16_t* position, int numSamplesShift) const {
|
||||||
|
|
||||||
if (numSamplesShift > 0 && position + numSamplesShift >= _buffer + _sampleCapacity) {
|
if (numSamplesShift > 0 && position + numSamplesShift >= _buffer + _sampleCapacity) {
|
||||||
|
@ -231,3 +217,27 @@ int16_t* AudioRingBuffer::shiftedPositionAccomodatingWrap(int16_t* position, int
|
||||||
return position + numSamplesShift;
|
return position + numSamplesShift;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
float AudioRingBuffer::getFrameLoudness(const int16_t* frameStart) const {
|
||||||
|
float loudness = 0.0f;
|
||||||
|
const int16_t* sampleAt = frameStart;
|
||||||
|
const int16_t* _bufferLastAt = _buffer + _sampleCapacity - 1;
|
||||||
|
|
||||||
|
for (int i = 0; i < _numFrameSamples; ++i) {
|
||||||
|
loudness += fabsf(*sampleAt);
|
||||||
|
sampleAt = sampleAt == _bufferLastAt ? _buffer : sampleAt + 1;
|
||||||
|
}
|
||||||
|
loudness /= _numFrameSamples;
|
||||||
|
loudness /= MAX_SAMPLE_VALUE;
|
||||||
|
|
||||||
|
return loudness;
|
||||||
|
}
|
||||||
|
|
||||||
|
float AudioRingBuffer::getFrameLoudness(ConstIterator frameStart) const {
|
||||||
|
return getFrameLoudness(&(*frameStart));
|
||||||
|
}
|
||||||
|
|
||||||
|
float AudioRingBuffer::getNextOutputFrameLoudness() const {
|
||||||
|
return getFrameLoudness(_nextOutput);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -37,8 +37,7 @@ const int MIN_SAMPLE_VALUE = std::numeric_limits<int16_t>::min();
|
||||||
|
|
||||||
const int DEFAULT_RING_BUFFER_FRAME_CAPACITY = 10;
|
const int DEFAULT_RING_BUFFER_FRAME_CAPACITY = 10;
|
||||||
|
|
||||||
class AudioRingBuffer : public NodeData {
|
class AudioRingBuffer {
|
||||||
Q_OBJECT
|
|
||||||
public:
|
public:
|
||||||
AudioRingBuffer(int numFrameSamples, bool randomAccessMode = false, int numFramesCapacity = DEFAULT_RING_BUFFER_FRAME_CAPACITY);
|
AudioRingBuffer(int numFrameSamples, bool randomAccessMode = false, int numFramesCapacity = DEFAULT_RING_BUFFER_FRAME_CAPACITY);
|
||||||
~AudioRingBuffer();
|
~AudioRingBuffer();
|
||||||
|
@ -46,14 +45,11 @@ public:
|
||||||
void reset();
|
void reset();
|
||||||
void resizeForFrameSize(int numFrameSamples);
|
void resizeForFrameSize(int numFrameSamples);
|
||||||
|
|
||||||
int getSampleCapacity() const { return _sampleCapacity; }
|
void clear();
|
||||||
|
|
||||||
int parseData(const QByteArray& packet);
|
|
||||||
|
|
||||||
// assume callers using this will never wrap around the end
|
|
||||||
const int16_t* getNextOutput() const { return _nextOutput; }
|
|
||||||
const int16_t* getBuffer() const { return _buffer; }
|
|
||||||
|
|
||||||
|
int getSampleCapacity() const { return _sampleCapacity; }
|
||||||
|
int getFrameCapacity() const { return _frameCapacity; }
|
||||||
|
|
||||||
int readSamples(int16_t* destination, int maxSamples);
|
int readSamples(int16_t* destination, int maxSamples);
|
||||||
int writeSamples(const int16_t* source, int maxSamples);
|
int writeSamples(const int16_t* source, int maxSamples);
|
||||||
|
|
||||||
|
@ -64,21 +60,21 @@ public:
|
||||||
const int16_t& operator[] (const int index) const;
|
const int16_t& operator[] (const int index) const;
|
||||||
|
|
||||||
void shiftReadPosition(unsigned int numSamples);
|
void shiftReadPosition(unsigned int numSamples);
|
||||||
|
|
||||||
|
float getNextOutputFrameLoudness() const;
|
||||||
|
|
||||||
int samplesAvailable() const;
|
int samplesAvailable() const;
|
||||||
int framesAvailable() const { return samplesAvailable() / _numFrameSamples; }
|
int framesAvailable() const { return samplesAvailable() / _numFrameSamples; }
|
||||||
|
|
||||||
int getNumFrameSamples() const { return _numFrameSamples; }
|
int getNumFrameSamples() const { return _numFrameSamples; }
|
||||||
|
|
||||||
bool isNotStarvedOrHasMinimumSamples(int numRequiredSamples) const;
|
|
||||||
|
|
||||||
bool isStarved() const { return _isStarved; }
|
|
||||||
void setIsStarved(bool isStarved) { _isStarved = isStarved; }
|
|
||||||
|
|
||||||
int getOverflowCount() const { return _overflowCount; } /// how many times has the ring buffer has overwritten old data
|
int getOverflowCount() const { return _overflowCount; } /// how many times has the ring buffer has overwritten old data
|
||||||
bool hasStarted() const { return _hasStarted; }
|
|
||||||
|
|
||||||
int addSilentFrame(int numSilentSamples);
|
int addSilentFrame(int numSilentSamples);
|
||||||
|
|
||||||
|
private:
|
||||||
|
float getFrameLoudness(const int16_t* frameStart) const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// disallow copying of AudioRingBuffer objects
|
// disallow copying of AudioRingBuffer objects
|
||||||
AudioRingBuffer(const AudioRingBuffer&);
|
AudioRingBuffer(const AudioRingBuffer&);
|
||||||
|
@ -86,8 +82,6 @@ protected:
|
||||||
|
|
||||||
int16_t* shiftedPositionAccomodatingWrap(int16_t* position, int numSamplesShift) const;
|
int16_t* shiftedPositionAccomodatingWrap(int16_t* position, int numSamplesShift) const;
|
||||||
|
|
||||||
int _overflowCount; /// how many times has the ring buffer has overwritten old data
|
|
||||||
|
|
||||||
int _frameCapacity;
|
int _frameCapacity;
|
||||||
int _sampleCapacity;
|
int _sampleCapacity;
|
||||||
bool _isFull;
|
bool _isFull;
|
||||||
|
@ -95,9 +89,98 @@ protected:
|
||||||
int16_t* _nextOutput;
|
int16_t* _nextOutput;
|
||||||
int16_t* _endOfLastWrite;
|
int16_t* _endOfLastWrite;
|
||||||
int16_t* _buffer;
|
int16_t* _buffer;
|
||||||
bool _isStarved;
|
|
||||||
bool _hasStarted;
|
|
||||||
bool _randomAccessMode; /// will this ringbuffer be used for random access? if so, do some special processing
|
bool _randomAccessMode; /// will this ringbuffer be used for random access? if so, do some special processing
|
||||||
|
|
||||||
|
int _overflowCount; /// how many times has the ring buffer has overwritten old data
|
||||||
|
|
||||||
|
public:
|
||||||
|
class ConstIterator { //public std::iterator < std::forward_iterator_tag, int16_t > {
|
||||||
|
public:
|
||||||
|
ConstIterator()
|
||||||
|
: _capacity(0),
|
||||||
|
_bufferFirst(NULL),
|
||||||
|
_bufferLast(NULL),
|
||||||
|
_at(NULL) {}
|
||||||
|
|
||||||
|
ConstIterator(int16_t* bufferFirst, int capacity, int16_t* at)
|
||||||
|
: _capacity(capacity),
|
||||||
|
_bufferFirst(bufferFirst),
|
||||||
|
_bufferLast(bufferFirst + capacity - 1),
|
||||||
|
_at(at) {}
|
||||||
|
|
||||||
|
bool operator==(const ConstIterator& rhs) { return _at == rhs._at; }
|
||||||
|
bool operator!=(const ConstIterator& rhs) { return _at != rhs._at; }
|
||||||
|
const int16_t& operator*() { return *_at; }
|
||||||
|
|
||||||
|
ConstIterator& operator=(const ConstIterator& rhs) {
|
||||||
|
_capacity = rhs._capacity;
|
||||||
|
_bufferFirst = rhs._bufferFirst;
|
||||||
|
_bufferLast = rhs._bufferLast;
|
||||||
|
_at = rhs._at;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
ConstIterator& operator++() {
|
||||||
|
_at = (_at == _bufferLast) ? _bufferFirst : _at + 1;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
ConstIterator operator++(int) {
|
||||||
|
ConstIterator tmp(*this);
|
||||||
|
++(*this);
|
||||||
|
return tmp;
|
||||||
|
}
|
||||||
|
|
||||||
|
ConstIterator& operator--() {
|
||||||
|
_at = (_at == _bufferFirst) ? _bufferLast : _at - 1;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
ConstIterator operator--(int) {
|
||||||
|
ConstIterator tmp(*this);
|
||||||
|
--(*this);
|
||||||
|
return tmp;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int16_t& operator[] (int i) {
|
||||||
|
return *atShiftedBy(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
ConstIterator operator+(int i) {
|
||||||
|
return ConstIterator(_bufferFirst, _capacity, atShiftedBy(i));
|
||||||
|
}
|
||||||
|
|
||||||
|
ConstIterator operator-(int i) {
|
||||||
|
return ConstIterator(_bufferFirst, _capacity, atShiftedBy(-i));
|
||||||
|
}
|
||||||
|
|
||||||
|
void readSamples(int16_t* dest, int numSamples) {
|
||||||
|
for (int i = 0; i < numSamples; i++) {
|
||||||
|
*dest = *(*this);
|
||||||
|
++dest;
|
||||||
|
++(*this);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
int16_t* atShiftedBy(int i) {
|
||||||
|
i = (_at - _bufferFirst + i) % _capacity;
|
||||||
|
if (i < 0) {
|
||||||
|
i += _capacity;
|
||||||
|
}
|
||||||
|
return _bufferFirst + i;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
int _capacity;
|
||||||
|
int16_t* _bufferFirst;
|
||||||
|
int16_t* _bufferLast;
|
||||||
|
int16_t* _at;
|
||||||
|
};
|
||||||
|
|
||||||
|
ConstIterator nextOutput() const { return ConstIterator(_buffer, _sampleCapacity, _nextOutput); }
|
||||||
|
|
||||||
|
float getFrameLoudness(ConstIterator frameStart) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // hifi_AudioRingBuffer_h
|
#endif // hifi_AudioRingBuffer_h
|
||||||
|
|
|
@ -12,13 +12,12 @@
|
||||||
#ifndef hifi_AudioStreamStats_h
|
#ifndef hifi_AudioStreamStats_h
|
||||||
#define hifi_AudioStreamStats_h
|
#define hifi_AudioStreamStats_h
|
||||||
|
|
||||||
#include "PositionalAudioRingBuffer.h"
|
|
||||||
#include "SequenceNumberStats.h"
|
#include "SequenceNumberStats.h"
|
||||||
|
|
||||||
class AudioStreamStats {
|
class AudioStreamStats {
|
||||||
public:
|
public:
|
||||||
AudioStreamStats()
|
AudioStreamStats()
|
||||||
: _streamType(PositionalAudioRingBuffer::Microphone),
|
: _streamType(-1),
|
||||||
_streamIdentifier(),
|
_streamIdentifier(),
|
||||||
_timeGapMin(0),
|
_timeGapMin(0),
|
||||||
_timeGapMax(0),
|
_timeGapMax(0),
|
||||||
|
@ -26,18 +25,18 @@ public:
|
||||||
_timeGapWindowMin(0),
|
_timeGapWindowMin(0),
|
||||||
_timeGapWindowMax(0),
|
_timeGapWindowMax(0),
|
||||||
_timeGapWindowAverage(0.0f),
|
_timeGapWindowAverage(0.0f),
|
||||||
_ringBufferFramesAvailable(0),
|
_framesAvailable(0),
|
||||||
_ringBufferFramesAvailableAverage(0),
|
_framesAvailableAverage(0),
|
||||||
_ringBufferDesiredJitterBufferFrames(0),
|
_desiredJitterBufferFrames(0),
|
||||||
_ringBufferStarveCount(0),
|
_starveCount(0),
|
||||||
_ringBufferConsecutiveNotMixedCount(0),
|
_consecutiveNotMixedCount(0),
|
||||||
_ringBufferOverflowCount(0),
|
_overflowCount(0),
|
||||||
_ringBufferSilentFramesDropped(0),
|
_framesDropped(0),
|
||||||
_packetStreamStats(),
|
_packetStreamStats(),
|
||||||
_packetStreamWindowStats()
|
_packetStreamWindowStats()
|
||||||
{}
|
{}
|
||||||
|
|
||||||
PositionalAudioRingBuffer::Type _streamType;
|
qint32 _streamType;
|
||||||
QUuid _streamIdentifier;
|
QUuid _streamIdentifier;
|
||||||
|
|
||||||
quint64 _timeGapMin;
|
quint64 _timeGapMin;
|
||||||
|
@ -47,13 +46,13 @@ public:
|
||||||
quint64 _timeGapWindowMax;
|
quint64 _timeGapWindowMax;
|
||||||
float _timeGapWindowAverage;
|
float _timeGapWindowAverage;
|
||||||
|
|
||||||
quint32 _ringBufferFramesAvailable;
|
quint32 _framesAvailable;
|
||||||
quint16 _ringBufferFramesAvailableAverage;
|
quint16 _framesAvailableAverage;
|
||||||
quint16 _ringBufferDesiredJitterBufferFrames;
|
quint16 _desiredJitterBufferFrames;
|
||||||
quint32 _ringBufferStarveCount;
|
quint32 _starveCount;
|
||||||
quint32 _ringBufferConsecutiveNotMixedCount;
|
quint32 _consecutiveNotMixedCount;
|
||||||
quint32 _ringBufferOverflowCount;
|
quint32 _overflowCount;
|
||||||
quint32 _ringBufferSilentFramesDropped;
|
quint32 _framesDropped;
|
||||||
|
|
||||||
PacketStreamStats _packetStreamStats;
|
PacketStreamStats _packetStreamStats;
|
||||||
PacketStreamStats _packetStreamWindowStats;
|
PacketStreamStats _packetStreamWindowStats;
|
||||||
|
|
307
libraries/audio/src/InboundAudioStream.cpp
Normal file
307
libraries/audio/src/InboundAudioStream.cpp
Normal file
|
@ -0,0 +1,307 @@
|
||||||
|
//
|
||||||
|
// InboundAudioStream.cpp
|
||||||
|
// libraries/audio/src
|
||||||
|
//
|
||||||
|
// Created by Yixin Wang on 7/17/2014
|
||||||
|
// Copyright 2013 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "InboundAudioStream.h"
|
||||||
|
#include "PacketHeaders.h"
|
||||||
|
|
||||||
|
InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity,
|
||||||
|
bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc) :
|
||||||
|
_ringBuffer(numFrameSamples, false, numFramesCapacity),
|
||||||
|
_lastPopSucceeded(false),
|
||||||
|
_lastPopOutput(),
|
||||||
|
_dynamicJitterBuffers(dynamicJitterBuffers),
|
||||||
|
_staticDesiredJitterBufferFrames(staticDesiredJitterBufferFrames),
|
||||||
|
_useStDevForJitterCalc(useStDevForJitterCalc),
|
||||||
|
_calculatedJitterBufferFramesUsingMaxGap(0),
|
||||||
|
_calculatedJitterBufferFramesUsingStDev(0),
|
||||||
|
_desiredJitterBufferFrames(dynamicJitterBuffers ? 1 : staticDesiredJitterBufferFrames),
|
||||||
|
_maxFramesOverDesired(maxFramesOverDesired),
|
||||||
|
_isStarved(true),
|
||||||
|
_hasStarted(false),
|
||||||
|
_consecutiveNotMixedCount(0),
|
||||||
|
_starveCount(0),
|
||||||
|
_silentFramesDropped(0),
|
||||||
|
_oldFramesDropped(0),
|
||||||
|
_incomingSequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS),
|
||||||
|
_lastFrameReceivedTime(0),
|
||||||
|
_interframeTimeGapStatsForJitterCalc(TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES, TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS),
|
||||||
|
_interframeTimeGapStatsForStatsPacket(TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES, TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS),
|
||||||
|
_framesAvailableStat(),
|
||||||
|
_currentJitterBufferFrames(0)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void InboundAudioStream::reset() {
|
||||||
|
_ringBuffer.reset();
|
||||||
|
_lastPopSucceeded = false;
|
||||||
|
_lastPopOutput = AudioRingBuffer::ConstIterator();
|
||||||
|
_isStarved = true;
|
||||||
|
_hasStarted = false;
|
||||||
|
resetStats();
|
||||||
|
}
|
||||||
|
|
||||||
|
void InboundAudioStream::resetStats() {
|
||||||
|
if (_dynamicJitterBuffers) {
|
||||||
|
_desiredJitterBufferFrames = 1;
|
||||||
|
}
|
||||||
|
_consecutiveNotMixedCount = 0;
|
||||||
|
_starveCount = 0;
|
||||||
|
_silentFramesDropped = 0;
|
||||||
|
_oldFramesDropped = 0;
|
||||||
|
_incomingSequenceNumberStats.reset();
|
||||||
|
_lastFrameReceivedTime = 0;
|
||||||
|
_interframeTimeGapStatsForJitterCalc.reset();
|
||||||
|
_interframeTimeGapStatsForStatsPacket.reset();
|
||||||
|
_framesAvailableStat.reset();
|
||||||
|
_currentJitterBufferFrames = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void InboundAudioStream::clearBuffer() {
|
||||||
|
_ringBuffer.clear();
|
||||||
|
_framesAvailableStat.reset();
|
||||||
|
_currentJitterBufferFrames = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int InboundAudioStream::parseData(const QByteArray& packet) {
|
||||||
|
PacketType packetType = packetTypeForPacket(packet);
|
||||||
|
QUuid senderUUID = uuidFromPacketHeader(packet);
|
||||||
|
|
||||||
|
// parse header
|
||||||
|
int numBytesHeader = numBytesForPacketHeader(packet);
|
||||||
|
const char* sequenceAt = packet.constData() + numBytesHeader;
|
||||||
|
int readBytes = numBytesHeader;
|
||||||
|
|
||||||
|
// parse sequence number and track it
|
||||||
|
quint16 sequence = *(reinterpret_cast<const quint16*>(sequenceAt));
|
||||||
|
readBytes += sizeof(quint16);
|
||||||
|
SequenceNumberStats::ArrivalInfo arrivalInfo = frameReceivedUpdateNetworkStats(sequence, senderUUID);
|
||||||
|
|
||||||
|
// TODO: handle generalized silent packet here?????
|
||||||
|
|
||||||
|
// parse the info after the seq number and before the audio data.(the stream properties)
|
||||||
|
int numAudioSamples;
|
||||||
|
readBytes += parseStreamProperties(packetType, packet.mid(readBytes), numAudioSamples);
|
||||||
|
|
||||||
|
// handle this packet based on its arrival status.
|
||||||
|
// For now, late packets are ignored. It may be good in the future to insert the late audio frame
|
||||||
|
// into the ring buffer to fill in the missing frame if it hasn't been mixed yet.
|
||||||
|
switch (arrivalInfo._status) {
|
||||||
|
case SequenceNumberStats::Early: {
|
||||||
|
int packetsDropped = arrivalInfo._seqDiffFromExpected;
|
||||||
|
writeSamplesForDroppedPackets(packetsDropped * numAudioSamples);
|
||||||
|
// fall through to OnTime case
|
||||||
|
}
|
||||||
|
case SequenceNumberStats::OnTime: {
|
||||||
|
readBytes += parseAudioData(packetType, packet.mid(readBytes), numAudioSamples);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default: {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int framesAvailable = _ringBuffer.framesAvailable();
|
||||||
|
// if this stream was starved, check if we're still starved.
|
||||||
|
if (_isStarved && framesAvailable >= _desiredJitterBufferFrames) {
|
||||||
|
_isStarved = false;
|
||||||
|
}
|
||||||
|
// if the ringbuffer exceeds the desired size by more than the threshold specified,
|
||||||
|
// drop the oldest frames so the ringbuffer is down to the desired size.
|
||||||
|
if (framesAvailable > _desiredJitterBufferFrames + _maxFramesOverDesired) {
|
||||||
|
int framesToDrop = framesAvailable - (_desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING);
|
||||||
|
_ringBuffer.shiftReadPosition(framesToDrop * _ringBuffer.getNumFrameSamples());
|
||||||
|
|
||||||
|
_framesAvailableStat.reset();
|
||||||
|
_currentJitterBufferFrames = 0;
|
||||||
|
|
||||||
|
_oldFramesDropped += framesToDrop;
|
||||||
|
}
|
||||||
|
|
||||||
|
framesAvailableChanged();
|
||||||
|
|
||||||
|
return readBytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool InboundAudioStream::popFrames(int numFrames, bool starveOnFail) {
|
||||||
|
int numSamplesRequested = numFrames * _ringBuffer.getNumFrameSamples();
|
||||||
|
if (_isStarved) {
|
||||||
|
// we're still refilling; don't pop
|
||||||
|
_consecutiveNotMixedCount++;
|
||||||
|
_lastPopSucceeded = false;
|
||||||
|
} else {
|
||||||
|
if (_ringBuffer.samplesAvailable() >= numSamplesRequested) {
|
||||||
|
// we have enough samples to pop, so we're good to mix
|
||||||
|
_lastPopOutput = _ringBuffer.nextOutput();
|
||||||
|
_ringBuffer.shiftReadPosition(numSamplesRequested);
|
||||||
|
framesAvailableChanged();
|
||||||
|
|
||||||
|
_hasStarted = true;
|
||||||
|
_lastPopSucceeded = true;
|
||||||
|
} else {
|
||||||
|
// we don't have enough samples, so set this stream to starve
|
||||||
|
// if starveOnFail is true
|
||||||
|
if (starveOnFail) {
|
||||||
|
starved();
|
||||||
|
_consecutiveNotMixedCount++;
|
||||||
|
}
|
||||||
|
_lastPopSucceeded = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _lastPopSucceeded;
|
||||||
|
}
|
||||||
|
|
||||||
|
void InboundAudioStream::framesAvailableChanged() {
|
||||||
|
_framesAvailableStat.updateWithSample(_ringBuffer.framesAvailable());
|
||||||
|
|
||||||
|
if (_framesAvailableStat.getElapsedUsecs() >= FRAMES_AVAILABLE_STAT_WINDOW_USECS) {
|
||||||
|
_currentJitterBufferFrames = (int)ceil(_framesAvailableStat.getAverage());
|
||||||
|
_framesAvailableStat.reset();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void InboundAudioStream::setToStarved() {
|
||||||
|
starved();
|
||||||
|
if (_ringBuffer.framesAvailable() >= _desiredJitterBufferFrames) {
|
||||||
|
_isStarved = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void InboundAudioStream::starved() {
|
||||||
|
_isStarved = true;
|
||||||
|
_consecutiveNotMixedCount = 0;
|
||||||
|
_starveCount++;
|
||||||
|
}
|
||||||
|
|
||||||
|
void InboundAudioStream::setDynamicJitterBuffers(bool dynamicJitterBuffers) {
|
||||||
|
if (!dynamicJitterBuffers) {
|
||||||
|
_desiredJitterBufferFrames = _staticDesiredJitterBufferFrames;
|
||||||
|
} else {
|
||||||
|
if (!_dynamicJitterBuffers) {
|
||||||
|
_desiredJitterBufferFrames = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_dynamicJitterBuffers = dynamicJitterBuffers;
|
||||||
|
}
|
||||||
|
|
||||||
|
void InboundAudioStream::setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames) {
|
||||||
|
_staticDesiredJitterBufferFrames = staticDesiredJitterBufferFrames;
|
||||||
|
if (!_dynamicJitterBuffers) {
|
||||||
|
_desiredJitterBufferFrames = _staticDesiredJitterBufferFrames;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int InboundAudioStream::clampDesiredJitterBufferFramesValue(int desired) const {
|
||||||
|
const int MIN_FRAMES_DESIRED = 0;
|
||||||
|
const int MAX_FRAMES_DESIRED = _ringBuffer.getFrameCapacity();
|
||||||
|
return glm::clamp(desired, MIN_FRAMES_DESIRED, MAX_FRAMES_DESIRED);
|
||||||
|
}
|
||||||
|
|
||||||
|
SequenceNumberStats::ArrivalInfo InboundAudioStream::frameReceivedUpdateNetworkStats(quint16 sequenceNumber, const QUuid& senderUUID) {
|
||||||
|
// track the sequence number we received
|
||||||
|
SequenceNumberStats::ArrivalInfo arrivalInfo = _incomingSequenceNumberStats.sequenceNumberReceived(sequenceNumber, senderUUID);
|
||||||
|
|
||||||
|
// update our timegap stats and desired jitter buffer frames if necessary
|
||||||
|
// discard the first few packets we receive since they usually have gaps that aren't represensative of normal jitter
|
||||||
|
const int NUM_INITIAL_PACKETS_DISCARD = 3;
|
||||||
|
quint64 now = usecTimestampNow();
|
||||||
|
if (_incomingSequenceNumberStats.getNumReceived() > NUM_INITIAL_PACKETS_DISCARD) {
|
||||||
|
quint64 gap = now - _lastFrameReceivedTime;
|
||||||
|
_interframeTimeGapStatsForStatsPacket.update(gap);
|
||||||
|
|
||||||
|
const float USECS_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * USECS_PER_SECOND / (float)SAMPLE_RATE;
|
||||||
|
|
||||||
|
// update stats for Freddy's method of jitter calc
|
||||||
|
_interframeTimeGapStatsForJitterCalc.update(gap);
|
||||||
|
if (_interframeTimeGapStatsForJitterCalc.getNewStatsAvailableFlag()) {
|
||||||
|
_calculatedJitterBufferFramesUsingMaxGap = ceilf((float)_interframeTimeGapStatsForJitterCalc.getWindowMax() / USECS_PER_FRAME);
|
||||||
|
_interframeTimeGapStatsForJitterCalc.clearNewStatsAvailableFlag();
|
||||||
|
|
||||||
|
if (_dynamicJitterBuffers && !_useStDevForJitterCalc) {
|
||||||
|
_desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(_calculatedJitterBufferFramesUsingMaxGap);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// update stats for Philip's method of jitter calc
|
||||||
|
_stdev.addValue(gap);
|
||||||
|
const int STANDARD_DEVIATION_SAMPLE_COUNT = 500;
|
||||||
|
if (_stdev.getSamples() > STANDARD_DEVIATION_SAMPLE_COUNT) {
|
||||||
|
const float NUM_STANDARD_DEVIATIONS = 3.0f;
|
||||||
|
_calculatedJitterBufferFramesUsingStDev = (int)ceilf(NUM_STANDARD_DEVIATIONS * _stdev.getStDev() / USECS_PER_FRAME);
|
||||||
|
_stdev.reset();
|
||||||
|
|
||||||
|
if (_dynamicJitterBuffers && _useStDevForJitterCalc) {
|
||||||
|
_desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(_calculatedJitterBufferFramesUsingStDev);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_lastFrameReceivedTime = now;
|
||||||
|
|
||||||
|
return arrivalInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
int InboundAudioStream::writeDroppableSilentSamples(int numSilentSamples) {
|
||||||
|
|
||||||
|
// calculate how many silent frames we should drop.
|
||||||
|
int samplesPerFrame = _ringBuffer.getNumFrameSamples();
|
||||||
|
int desiredJitterBufferFramesPlusPadding = _desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING;
|
||||||
|
int numSilentFramesToDrop = 0;
|
||||||
|
|
||||||
|
if (numSilentSamples >= samplesPerFrame && _currentJitterBufferFrames > desiredJitterBufferFramesPlusPadding) {
|
||||||
|
|
||||||
|
// our avg jitter buffer size exceeds its desired value, so ignore some silent
|
||||||
|
// frames to get that size as close to desired as possible
|
||||||
|
int numSilentFramesToDropDesired = _currentJitterBufferFrames - desiredJitterBufferFramesPlusPadding;
|
||||||
|
int numSilentFramesReceived = numSilentSamples / samplesPerFrame;
|
||||||
|
numSilentFramesToDrop = std::min(numSilentFramesToDropDesired, numSilentFramesReceived);
|
||||||
|
|
||||||
|
// dont reset _currentJitterBufferFrames here; we want to be able to drop further silent frames
|
||||||
|
// without waiting for _framesAvailableStat to fill up to 10s of samples.
|
||||||
|
_currentJitterBufferFrames -= numSilentFramesToDrop;
|
||||||
|
_silentFramesDropped += numSilentFramesToDrop;
|
||||||
|
|
||||||
|
_framesAvailableStat.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
return _ringBuffer.addSilentFrame(numSilentSamples - numSilentFramesToDrop * samplesPerFrame);
|
||||||
|
}
|
||||||
|
|
||||||
|
int InboundAudioStream::writeSamplesForDroppedPackets(int numSamples) {
|
||||||
|
return writeDroppableSilentSamples(numSamples);
|
||||||
|
}
|
||||||
|
|
||||||
|
AudioStreamStats InboundAudioStream::getAudioStreamStats() const {
|
||||||
|
AudioStreamStats streamStats;
|
||||||
|
|
||||||
|
streamStats._timeGapMin = _interframeTimeGapStatsForStatsPacket.getMin();
|
||||||
|
streamStats._timeGapMax = _interframeTimeGapStatsForStatsPacket.getMax();
|
||||||
|
streamStats._timeGapAverage = _interframeTimeGapStatsForStatsPacket.getAverage();
|
||||||
|
streamStats._timeGapWindowMin = _interframeTimeGapStatsForStatsPacket.getWindowMin();
|
||||||
|
streamStats._timeGapWindowMax = _interframeTimeGapStatsForStatsPacket.getWindowMax();
|
||||||
|
streamStats._timeGapWindowAverage = _interframeTimeGapStatsForStatsPacket.getWindowAverage();
|
||||||
|
|
||||||
|
streamStats._framesAvailable = _ringBuffer.framesAvailable();
|
||||||
|
streamStats._framesAvailableAverage = _framesAvailableStat.getAverage();
|
||||||
|
streamStats._desiredJitterBufferFrames = _desiredJitterBufferFrames;
|
||||||
|
streamStats._starveCount = _starveCount;
|
||||||
|
streamStats._consecutiveNotMixedCount = _consecutiveNotMixedCount;
|
||||||
|
streamStats._overflowCount = _ringBuffer.getOverflowCount();
|
||||||
|
streamStats._framesDropped = _silentFramesDropped + _oldFramesDropped; // TODO: add separate stat for old frames dropped
|
||||||
|
|
||||||
|
streamStats._packetStreamStats = _incomingSequenceNumberStats.getStats();
|
||||||
|
streamStats._packetStreamWindowStats = _incomingSequenceNumberStats.getStatsForHistoryWindow();
|
||||||
|
|
||||||
|
return streamStats;
|
||||||
|
}
|
||||||
|
|
||||||
|
AudioStreamStats InboundAudioStream::updateSeqHistoryAndGetAudioStreamStats() {
|
||||||
|
_incomingSequenceNumberStats.pushStatsToHistory();
|
||||||
|
return getAudioStreamStats();
|
||||||
|
}
|
183
libraries/audio/src/InboundAudioStream.h
Normal file
183
libraries/audio/src/InboundAudioStream.h
Normal file
|
@ -0,0 +1,183 @@
|
||||||
|
//
|
||||||
|
// InboundAudioStream.h
|
||||||
|
// libraries/audio/src
|
||||||
|
//
|
||||||
|
// Created by Yixin Wang on 7/17/2014.
|
||||||
|
// Copyright 2013 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef hifi_InboundAudioStream_h
|
||||||
|
#define hifi_InboundAudioStream_h
|
||||||
|
|
||||||
|
#include "NodeData.h"
|
||||||
|
#include "AudioRingBuffer.h"
|
||||||
|
#include "MovingMinMaxAvg.h"
|
||||||
|
#include "SequenceNumberStats.h"
|
||||||
|
#include "AudioStreamStats.h"
|
||||||
|
#include "PacketHeaders.h"
|
||||||
|
#include "StdDev.h"
|
||||||
|
#include "TimeWeightedAvg.h"
|
||||||
|
|
||||||
|
// This adds some number of frames to the desired jitter buffer frames target we use when we're dropping frames.
|
||||||
|
// The larger this value is, the less aggressive we are about reducing the jitter buffer length.
|
||||||
|
// Setting this to 0 will try to get the jitter buffer to be exactly _desiredJitterBufferFrames long when dropping frames,
|
||||||
|
// which could lead to a starve soon after.
|
||||||
|
const int DESIRED_JITTER_BUFFER_FRAMES_PADDING = 1;
|
||||||
|
|
||||||
|
// the time gaps stats for _desiredJitterBufferFrames calculation
|
||||||
|
// will recalculate the max for the past 5000 samples every 500 samples
|
||||||
|
const int TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES = 500;
|
||||||
|
const int TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS = 10;
|
||||||
|
|
||||||
|
// the time gap stats for constructing AudioStreamStats will
|
||||||
|
// recalculate min/max/avg every ~1 second for the past ~30 seconds of time gap data
|
||||||
|
const int TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS;
|
||||||
|
const int TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS = 30;
|
||||||
|
|
||||||
|
// this controls the window size of the time-weighted avg of frames available. Every time the window fills up,
|
||||||
|
// _currentJitterBufferFrames is updated with the time-weighted avg and the running time-weighted avg is reset.
|
||||||
|
const int FRAMES_AVAILABLE_STAT_WINDOW_USECS = 2 * USECS_PER_SECOND;
|
||||||
|
|
||||||
|
// the internal history buffer of the incoming seq stats will cover 30s to calculate
|
||||||
|
// packet loss % over last 30s
|
||||||
|
const int INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS = 30;
|
||||||
|
|
||||||
|
const int INBOUND_RING_BUFFER_FRAME_CAPACITY = 100;
|
||||||
|
|
||||||
|
const int DEFAULT_MAX_FRAMES_OVER_DESIRED = 10;
|
||||||
|
const int DEFAULT_DESIRED_JITTER_BUFFER_FRAMES = 1;
|
||||||
|
|
||||||
|
class InboundAudioStream : public NodeData {
|
||||||
|
Q_OBJECT
|
||||||
|
public:
|
||||||
|
InboundAudioStream(int numFrameSamples, int numFramesCapacity,
|
||||||
|
bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired,
|
||||||
|
bool useStDevForJitterCalc = false);
|
||||||
|
|
||||||
|
void reset();
|
||||||
|
void resetStats();
|
||||||
|
void clearBuffer();
|
||||||
|
|
||||||
|
virtual int parseData(const QByteArray& packet);
|
||||||
|
|
||||||
|
|
||||||
|
bool popFrames(int numFrames, bool starveOnFail = true);
|
||||||
|
|
||||||
|
bool lastPopSucceeded() const { return _lastPopSucceeded; };
|
||||||
|
const AudioRingBuffer::ConstIterator& getLastPopOutput() const { return _lastPopOutput; }
|
||||||
|
|
||||||
|
|
||||||
|
void setToStarved();
|
||||||
|
|
||||||
|
|
||||||
|
void setDynamicJitterBuffers(bool dynamicJitterBuffers);
|
||||||
|
void setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames);
|
||||||
|
|
||||||
|
/// this function should be called once per second to ensure the seq num stats history spans ~30 seconds
|
||||||
|
AudioStreamStats updateSeqHistoryAndGetAudioStreamStats();
|
||||||
|
|
||||||
|
void setMaxFramesOverDesired(int maxFramesOverDesired) { _maxFramesOverDesired = maxFramesOverDesired; }
|
||||||
|
|
||||||
|
virtual AudioStreamStats getAudioStreamStats() const;
|
||||||
|
|
||||||
|
/// returns the desired number of jitter buffer frames under the dyanmic jitter buffers scheme
|
||||||
|
int getCalculatedJitterBufferFrames() const { return _useStDevForJitterCalc ?
|
||||||
|
_calculatedJitterBufferFramesUsingStDev : _calculatedJitterBufferFramesUsingMaxGap; };
|
||||||
|
|
||||||
|
/// returns the desired number of jitter buffer frames using Philip's method
|
||||||
|
int getCalculatedJitterBufferFramesUsingStDev() const { return _calculatedJitterBufferFramesUsingStDev; }
|
||||||
|
|
||||||
|
/// returns the desired number of jitter buffer frames using Freddy's method
|
||||||
|
int getCalculatedJitterBufferFramesUsingMaxGap() const { return _calculatedJitterBufferFramesUsingMaxGap; }
|
||||||
|
|
||||||
|
int getDesiredJitterBufferFrames() const { return _desiredJitterBufferFrames; }
|
||||||
|
int getMaxFramesOverDesired() const { return _maxFramesOverDesired; }
|
||||||
|
int getNumFrameSamples() const { return _ringBuffer.getNumFrameSamples(); }
|
||||||
|
int getFrameCapacity() const { return _ringBuffer.getFrameCapacity(); }
|
||||||
|
int getFramesAvailable() const { return _ringBuffer.framesAvailable(); }
|
||||||
|
double getFramesAvailableAverage() const { return _framesAvailableStat.getAverage(); }
|
||||||
|
|
||||||
|
bool isStarved() const { return _isStarved; }
|
||||||
|
bool hasStarted() const { return _hasStarted; }
|
||||||
|
|
||||||
|
int getConsecutiveNotMixedCount() const { return _consecutiveNotMixedCount; }
|
||||||
|
int getStarveCount() const { return _starveCount; }
|
||||||
|
int getSilentFramesDropped() const { return _silentFramesDropped; }
|
||||||
|
int getOverflowCount() const { return _ringBuffer.getOverflowCount(); }
|
||||||
|
|
||||||
|
int getPacketReceived() const { return _incomingSequenceNumberStats.getNumReceived(); }
|
||||||
|
|
||||||
|
private:
|
||||||
|
void starved();
|
||||||
|
|
||||||
|
SequenceNumberStats::ArrivalInfo frameReceivedUpdateNetworkStats(quint16 sequenceNumber, const QUuid& senderUUID);
|
||||||
|
int clampDesiredJitterBufferFramesValue(int desired) const;
|
||||||
|
|
||||||
|
int writeSamplesForDroppedPackets(int numSamples);
|
||||||
|
|
||||||
|
void framesAvailableChanged();
|
||||||
|
|
||||||
|
protected:
|
||||||
|
// disallow copying of InboundAudioStream objects
|
||||||
|
InboundAudioStream(const InboundAudioStream&);
|
||||||
|
InboundAudioStream& operator= (const InboundAudioStream&);
|
||||||
|
|
||||||
|
/// parses the info between the seq num and the audio data in the network packet and calculates
|
||||||
|
/// how many audio samples this packet contains
|
||||||
|
virtual int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) = 0;
|
||||||
|
|
||||||
|
/// parses the audio data in the network packet
|
||||||
|
virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) = 0;
|
||||||
|
|
||||||
|
int writeDroppableSilentSamples(int numSilentSamples);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
|
||||||
|
AudioRingBuffer _ringBuffer;
|
||||||
|
|
||||||
|
bool _lastPopSucceeded;
|
||||||
|
AudioRingBuffer::ConstIterator _lastPopOutput;
|
||||||
|
|
||||||
|
bool _dynamicJitterBuffers; // if false, _desiredJitterBufferFrames is locked at 1 (old behavior)
|
||||||
|
int _staticDesiredJitterBufferFrames;
|
||||||
|
|
||||||
|
// if jitter buffer is dynamic, this determines what method of calculating _desiredJitterBufferFrames
|
||||||
|
// if true, Philip's timegap std dev calculation is used. Otherwise, Freddy's max timegap calculation is used
|
||||||
|
bool _useStDevForJitterCalc;
|
||||||
|
int _calculatedJitterBufferFramesUsingMaxGap;
|
||||||
|
int _calculatedJitterBufferFramesUsingStDev;
|
||||||
|
|
||||||
|
int _desiredJitterBufferFrames;
|
||||||
|
|
||||||
|
// if there are more than _desiredJitterBufferFrames + _maxFramesOverDesired frames, old ringbuffer frames
|
||||||
|
// will be dropped to keep audio delay from building up
|
||||||
|
int _maxFramesOverDesired;
|
||||||
|
|
||||||
|
bool _isStarved;
|
||||||
|
bool _hasStarted;
|
||||||
|
|
||||||
|
// stats
|
||||||
|
|
||||||
|
int _consecutiveNotMixedCount;
|
||||||
|
int _starveCount;
|
||||||
|
int _silentFramesDropped;
|
||||||
|
int _oldFramesDropped;
|
||||||
|
|
||||||
|
SequenceNumberStats _incomingSequenceNumberStats;
|
||||||
|
|
||||||
|
quint64 _lastFrameReceivedTime;
|
||||||
|
MovingMinMaxAvg<quint64> _interframeTimeGapStatsForJitterCalc;
|
||||||
|
StDev _stdev;
|
||||||
|
MovingMinMaxAvg<quint64> _interframeTimeGapStatsForStatsPacket;
|
||||||
|
|
||||||
|
TimeWeightedAvg<int> _framesAvailableStat;
|
||||||
|
|
||||||
|
// this value is based on the time-weighted avg from _framesAvailableStat. it is only used for
|
||||||
|
// dropping silent frames right now.
|
||||||
|
int _currentJitterBufferFrames;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // hifi_InboundAudioStream_h
|
|
@ -1,71 +0,0 @@
|
||||||
//
|
|
||||||
// InjectedAudioRingBuffer.cpp
|
|
||||||
// libraries/audio/src
|
|
||||||
//
|
|
||||||
// Created by Stephen Birarda on 6/5/13.
|
|
||||||
// Copyright 2013 High Fidelity, Inc.
|
|
||||||
//
|
|
||||||
// Distributed under the Apache License, Version 2.0.
|
|
||||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
|
||||||
//
|
|
||||||
|
|
||||||
#include <cstring>
|
|
||||||
|
|
||||||
#include <QtCore/QDataStream>
|
|
||||||
#include <QtCore/qdebug.h>
|
|
||||||
|
|
||||||
#include <PacketHeaders.h>
|
|
||||||
#include <UUID.h>
|
|
||||||
|
|
||||||
#include "InjectedAudioRingBuffer.h"
|
|
||||||
|
|
||||||
InjectedAudioRingBuffer::InjectedAudioRingBuffer(const QUuid& streamIdentifier, bool dynamicJitterBuffer) :
|
|
||||||
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Injector, false, dynamicJitterBuffer),
|
|
||||||
_streamIdentifier(streamIdentifier),
|
|
||||||
_radius(0.0f),
|
|
||||||
_attenuationRatio(0)
|
|
||||||
{
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
const uchar MAX_INJECTOR_VOLUME = 255;
|
|
||||||
|
|
||||||
int InjectedAudioRingBuffer::parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped) {
|
|
||||||
frameReceivedUpdateTimingStats();
|
|
||||||
|
|
||||||
// setup a data stream to read from this packet
|
|
||||||
QDataStream packetStream(packet);
|
|
||||||
packetStream.skipRawData(numBytesForPacketHeader(packet));
|
|
||||||
|
|
||||||
// push past the sequence number
|
|
||||||
packetStream.skipRawData(sizeof(quint16));
|
|
||||||
|
|
||||||
// push past the stream identifier
|
|
||||||
packetStream.skipRawData(NUM_BYTES_RFC4122_UUID);
|
|
||||||
|
|
||||||
// pull the loopback flag and set our boolean
|
|
||||||
uchar shouldLoopback;
|
|
||||||
packetStream >> shouldLoopback;
|
|
||||||
_shouldLoopbackForNode = (shouldLoopback == 1);
|
|
||||||
|
|
||||||
// use parsePositionalData in parent PostionalAudioRingBuffer class to pull common positional data
|
|
||||||
packetStream.skipRawData(parsePositionalData(packet.mid(packetStream.device()->pos())));
|
|
||||||
|
|
||||||
// pull out the radius for this injected source - if it's zero this is a point source
|
|
||||||
packetStream >> _radius;
|
|
||||||
|
|
||||||
quint8 attenuationByte = 0;
|
|
||||||
packetStream >> attenuationByte;
|
|
||||||
_attenuationRatio = attenuationByte / (float) MAX_INJECTOR_VOLUME;
|
|
||||||
|
|
||||||
int numAudioBytes = packet.size() - packetStream.device()->pos();
|
|
||||||
int numAudioSamples = numAudioBytes / sizeof(int16_t);
|
|
||||||
|
|
||||||
// add silent samples for the dropped packets.
|
|
||||||
// ASSUME that each dropped packet had same number of samples as this one
|
|
||||||
addDroppableSilentSamples(numAudioSamples * packetsSkipped);
|
|
||||||
|
|
||||||
packetStream.skipRawData(writeData(packet.data() + packetStream.device()->pos(), numAudioBytes));
|
|
||||||
|
|
||||||
return packetStream.device()->pos();
|
|
||||||
}
|
|
|
@ -1,38 +0,0 @@
|
||||||
//
|
|
||||||
// InjectedAudioRingBuffer.h
|
|
||||||
// libraries/audio/src
|
|
||||||
//
|
|
||||||
// Created by Stephen Birarda on 6/5/13.
|
|
||||||
// Copyright 2013 High Fidelity, Inc.
|
|
||||||
//
|
|
||||||
// Distributed under the Apache License, Version 2.0.
|
|
||||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
|
||||||
//
|
|
||||||
|
|
||||||
#ifndef hifi_InjectedAudioRingBuffer_h
|
|
||||||
#define hifi_InjectedAudioRingBuffer_h
|
|
||||||
|
|
||||||
#include <QtCore/QUuid>
|
|
||||||
|
|
||||||
#include "PositionalAudioRingBuffer.h"
|
|
||||||
|
|
||||||
class InjectedAudioRingBuffer : public PositionalAudioRingBuffer {
|
|
||||||
public:
|
|
||||||
InjectedAudioRingBuffer(const QUuid& streamIdentifier = QUuid(), bool dynamicJitterBuffer = false);
|
|
||||||
|
|
||||||
int parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped);
|
|
||||||
|
|
||||||
const QUuid& getStreamIdentifier() const { return _streamIdentifier; }
|
|
||||||
float getRadius() const { return _radius; }
|
|
||||||
float getAttenuationRatio() const { return _attenuationRatio; }
|
|
||||||
private:
|
|
||||||
// disallow copying of InjectedAudioRingBuffer objects
|
|
||||||
InjectedAudioRingBuffer(const InjectedAudioRingBuffer&);
|
|
||||||
InjectedAudioRingBuffer& operator= (const InjectedAudioRingBuffer&);
|
|
||||||
|
|
||||||
QUuid _streamIdentifier;
|
|
||||||
float _radius;
|
|
||||||
float _attenuationRatio;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // hifi_InjectedAudioRingBuffer_h
|
|
69
libraries/audio/src/InjectedAudioStream.cpp
Normal file
69
libraries/audio/src/InjectedAudioStream.cpp
Normal file
|
@ -0,0 +1,69 @@
|
||||||
|
//
|
||||||
|
// InjectedAudioStream.cpp
|
||||||
|
// libraries/audio/src
|
||||||
|
//
|
||||||
|
// Created by Stephen Birarda on 6/5/13.
|
||||||
|
// Copyright 2013 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <cstring>
|
||||||
|
|
||||||
|
#include <QtCore/QDataStream>
|
||||||
|
#include <QtCore/qdebug.h>
|
||||||
|
|
||||||
|
#include <PacketHeaders.h>
|
||||||
|
#include <UUID.h>
|
||||||
|
|
||||||
|
#include "InjectedAudioStream.h"
|
||||||
|
|
||||||
|
InjectedAudioStream::InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired) :
|
||||||
|
PositionalAudioStream(PositionalAudioStream::Injector, false, dynamicJitterBuffer, staticDesiredJitterBufferFrames, maxFramesOverDesired),
|
||||||
|
_streamIdentifier(streamIdentifier),
|
||||||
|
_radius(0.0f),
|
||||||
|
_attenuationRatio(0)
|
||||||
|
{
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
const uchar MAX_INJECTOR_VOLUME = 255;
|
||||||
|
|
||||||
|
int InjectedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
|
||||||
|
// setup a data stream to read from this packet
|
||||||
|
QDataStream packetStream(packetAfterSeqNum);
|
||||||
|
|
||||||
|
// skip the stream identifier
|
||||||
|
packetStream.skipRawData(NUM_BYTES_RFC4122_UUID);
|
||||||
|
|
||||||
|
// pull the loopback flag and set our boolean
|
||||||
|
uchar shouldLoopback;
|
||||||
|
packetStream >> shouldLoopback;
|
||||||
|
_shouldLoopbackForNode = (shouldLoopback == 1);
|
||||||
|
|
||||||
|
// use parsePositionalData in parent PostionalAudioRingBuffer class to pull common positional data
|
||||||
|
packetStream.skipRawData(parsePositionalData(packetAfterSeqNum.mid(packetStream.device()->pos())));
|
||||||
|
|
||||||
|
// pull out the radius for this injected source - if it's zero this is a point source
|
||||||
|
packetStream >> _radius;
|
||||||
|
|
||||||
|
quint8 attenuationByte = 0;
|
||||||
|
packetStream >> attenuationByte;
|
||||||
|
_attenuationRatio = attenuationByte / (float)MAX_INJECTOR_VOLUME;
|
||||||
|
|
||||||
|
int numAudioBytes = packetAfterSeqNum.size() - packetStream.device()->pos();
|
||||||
|
numAudioSamples = numAudioBytes / sizeof(int16_t);
|
||||||
|
|
||||||
|
return packetStream.device()->pos();
|
||||||
|
}
|
||||||
|
|
||||||
|
int InjectedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
||||||
|
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
|
||||||
|
}
|
||||||
|
|
||||||
|
AudioStreamStats InjectedAudioStream::getAudioStreamStats() const {
|
||||||
|
AudioStreamStats streamStats = PositionalAudioStream::getAudioStreamStats();
|
||||||
|
streamStats._streamIdentifier = _streamIdentifier;
|
||||||
|
return streamStats;
|
||||||
|
}
|
42
libraries/audio/src/InjectedAudioStream.h
Normal file
42
libraries/audio/src/InjectedAudioStream.h
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
//
|
||||||
|
// InjectedAudioStream.h
|
||||||
|
// libraries/audio/src
|
||||||
|
//
|
||||||
|
// Created by Stephen Birarda on 6/5/13.
|
||||||
|
// Copyright 2013 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef hifi_InjectedAudioStream_h
|
||||||
|
#define hifi_InjectedAudioStream_h
|
||||||
|
|
||||||
|
#include <QtCore/QUuid>
|
||||||
|
|
||||||
|
#include "PositionalAudioStream.h"
|
||||||
|
|
||||||
|
class InjectedAudioStream : public PositionalAudioStream {
|
||||||
|
public:
|
||||||
|
InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired);
|
||||||
|
|
||||||
|
float getRadius() const { return _radius; }
|
||||||
|
float getAttenuationRatio() const { return _attenuationRatio; }
|
||||||
|
|
||||||
|
QUuid getStreamIdentifier() const { return _streamIdentifier; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
// disallow copying of InjectedAudioStream objects
|
||||||
|
InjectedAudioStream(const InjectedAudioStream&);
|
||||||
|
InjectedAudioStream& operator= (const InjectedAudioStream&);
|
||||||
|
|
||||||
|
AudioStreamStats getAudioStreamStats() const;
|
||||||
|
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
|
||||||
|
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
|
||||||
|
|
||||||
|
const QUuid _streamIdentifier;
|
||||||
|
float _radius;
|
||||||
|
float _attenuationRatio;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // hifi_InjectedAudioStream_h
|
|
@ -1,52 +0,0 @@
|
||||||
//
|
|
||||||
// MixedAudioRingBuffer.cpp
|
|
||||||
// libraries/audio/src
|
|
||||||
//
|
|
||||||
// Created by Stephen Birarda on 2014.
|
|
||||||
// Copyright 2014 High Fidelity, Inc.
|
|
||||||
//
|
|
||||||
// Distributed under the Apache License, Version 2.0.
|
|
||||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
|
||||||
//
|
|
||||||
|
|
||||||
#include "MixedAudioRingBuffer.h"
|
|
||||||
|
|
||||||
MixedAudioRingBuffer::MixedAudioRingBuffer(int numFrameSamples) :
|
|
||||||
AudioRingBuffer(numFrameSamples),
|
|
||||||
_lastReadFrameAverageLoudness(0.0f)
|
|
||||||
{
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
qint64 MixedAudioRingBuffer::readSamples(int16_t* destination, qint64 maxSamples) {
|
|
||||||
// calculate the average loudness for the frame about to go out
|
|
||||||
|
|
||||||
// read from _nextOutput either _numFrameSamples or to the end of the buffer
|
|
||||||
int samplesFromNextOutput = _buffer + _sampleCapacity - _nextOutput;
|
|
||||||
if (samplesFromNextOutput > _numFrameSamples) {
|
|
||||||
samplesFromNextOutput = _numFrameSamples;
|
|
||||||
}
|
|
||||||
|
|
||||||
float averageLoudness = 0.0f;
|
|
||||||
|
|
||||||
for (int s = 0; s < samplesFromNextOutput; s++) {
|
|
||||||
averageLoudness += fabsf(_nextOutput[s]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// read samples from the beginning of the buffer, if any
|
|
||||||
int samplesFromBeginning = _numFrameSamples - samplesFromNextOutput;
|
|
||||||
|
|
||||||
if (samplesFromBeginning > 0) {
|
|
||||||
for (int b = 0; b < samplesFromBeginning; b++) {
|
|
||||||
averageLoudness += fabsf(_buffer[b]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// divide by the number of samples and the MAX_SAMPLE_VALUE to get a float from 0 - 1
|
|
||||||
averageLoudness /= (float) _numFrameSamples;
|
|
||||||
averageLoudness /= (float) MAX_SAMPLE_VALUE;
|
|
||||||
|
|
||||||
_lastReadFrameAverageLoudness = averageLoudness;
|
|
||||||
|
|
||||||
return AudioRingBuffer::readSamples(destination, maxSamples);
|
|
||||||
}
|
|
|
@ -1,29 +0,0 @@
|
||||||
//
|
|
||||||
// MixedAudioRingBuffer.h
|
|
||||||
// libraries/audio/src
|
|
||||||
//
|
|
||||||
// Created by Stephen Birarda on 2014.
|
|
||||||
// Copyright 2014 High Fidelity, Inc.
|
|
||||||
//
|
|
||||||
// Distributed under the Apache License, Version 2.0.
|
|
||||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
|
||||||
//
|
|
||||||
|
|
||||||
#ifndef hifi_MixedAudioRingBuffer_h
|
|
||||||
#define hifi_MixedAudioRingBuffer_h
|
|
||||||
|
|
||||||
#include "AudioRingBuffer.h"
|
|
||||||
|
|
||||||
class MixedAudioRingBuffer : public AudioRingBuffer {
|
|
||||||
Q_OBJECT
|
|
||||||
public:
|
|
||||||
MixedAudioRingBuffer(int numFrameSamples);
|
|
||||||
|
|
||||||
float getLastReadFrameAverageLoudness() const { return _lastReadFrameAverageLoudness; }
|
|
||||||
|
|
||||||
qint64 readSamples(int16_t* destination, qint64 maxSamples);
|
|
||||||
private:
|
|
||||||
float _lastReadFrameAverageLoudness;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // hifi_MixedAudioRingBuffer_h
|
|
17
libraries/audio/src/MixedAudioStream.cpp
Normal file
17
libraries/audio/src/MixedAudioStream.cpp
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
|
||||||
|
#include "MixedAudioStream.h"
|
||||||
|
|
||||||
|
MixedAudioStream::MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc)
|
||||||
|
: InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, staticDesiredJitterBufferFrames, maxFramesOverDesired, useStDevForJitterCalc)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
int MixedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
|
||||||
|
// mixed audio packets do not have any info between the seq num and the audio data.
|
||||||
|
numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int MixedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
||||||
|
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
|
||||||
|
}
|
29
libraries/audio/src/MixedAudioStream.h
Normal file
29
libraries/audio/src/MixedAudioStream.h
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
//
|
||||||
|
// MixedAudioStream.h
|
||||||
|
// libraries/audio/src
|
||||||
|
//
|
||||||
|
// Created by Stephen Birarda on 6/5/13.
|
||||||
|
// Copyright 2013 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef hifi_MixedAudioStream_h
|
||||||
|
#define hifi_MixedAudioStream_h
|
||||||
|
|
||||||
|
#include "InboundAudioStream.h"
|
||||||
|
#include "PacketHeaders.h"
|
||||||
|
|
||||||
|
class MixedAudioStream : public InboundAudioStream {
|
||||||
|
public:
|
||||||
|
MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc);
|
||||||
|
|
||||||
|
float getNextOutputFrameLoudness() const { return _ringBuffer.getNextOutputFrameLoudness(); }
|
||||||
|
|
||||||
|
protected:
|
||||||
|
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
|
||||||
|
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // hifi_MixedAudioStream_h
|
|
@ -1,208 +0,0 @@
|
||||||
//
|
|
||||||
// PositionalAudioRingBuffer.cpp
|
|
||||||
// libraries/audio/src
|
|
||||||
//
|
|
||||||
// Created by Stephen Birarda on 6/5/13.
|
|
||||||
// Copyright 2013 High Fidelity, Inc.
|
|
||||||
//
|
|
||||||
// Distributed under the Apache License, Version 2.0.
|
|
||||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
|
||||||
//
|
|
||||||
|
|
||||||
#include <cstring>
|
|
||||||
|
|
||||||
#include <glm/detail/func_common.hpp>
|
|
||||||
#include <QtCore/QDataStream>
|
|
||||||
|
|
||||||
#include <Node.h>
|
|
||||||
#include <PacketHeaders.h>
|
|
||||||
#include <UUID.h>
|
|
||||||
|
|
||||||
#include "PositionalAudioRingBuffer.h"
|
|
||||||
#include "SharedUtil.h"
|
|
||||||
|
|
||||||
PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo, bool dynamicJitterBuffers) :
|
|
||||||
|
|
||||||
AudioRingBuffer(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
|
|
||||||
false, AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY),
|
|
||||||
_type(type),
|
|
||||||
_position(0.0f, 0.0f, 0.0f),
|
|
||||||
_orientation(0.0f, 0.0f, 0.0f, 0.0f),
|
|
||||||
_willBeAddedToMix(false),
|
|
||||||
_shouldLoopbackForNode(false),
|
|
||||||
_shouldOutputStarveDebug(true),
|
|
||||||
_isStereo(isStereo),
|
|
||||||
_nextOutputTrailingLoudness(0.0f),
|
|
||||||
_listenerUnattenuatedZone(NULL),
|
|
||||||
_lastFrameReceivedTime(0),
|
|
||||||
_interframeTimeGapStatsForJitterCalc(TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES, TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS),
|
|
||||||
_interframeTimeGapStatsForStatsPacket(TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES, TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS),
|
|
||||||
_framesAvailableStats(FRAMES_AVAILABLE_STATS_INTERVAL_SAMPLES, FRAMES_AVAILABLE_STATS_WINDOW_INTERVALS),
|
|
||||||
_desiredJitterBufferFrames(1),
|
|
||||||
_dynamicJitterBuffers(dynamicJitterBuffers),
|
|
||||||
_consecutiveNotMixedCount(0),
|
|
||||||
_starveCount(0),
|
|
||||||
_silentFramesDropped(0)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
int PositionalAudioRingBuffer::parsePositionalData(const QByteArray& positionalByteArray) {
|
|
||||||
QDataStream packetStream(positionalByteArray);
|
|
||||||
|
|
||||||
packetStream.readRawData(reinterpret_cast<char*>(&_position), sizeof(_position));
|
|
||||||
packetStream.readRawData(reinterpret_cast<char*>(&_orientation), sizeof(_orientation));
|
|
||||||
|
|
||||||
// if this node sent us a NaN for first float in orientation then don't consider this good audio and bail
|
|
||||||
if (glm::isnan(_orientation.x)) {
|
|
||||||
reset();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return packetStream.device()->pos();
|
|
||||||
}
|
|
||||||
|
|
||||||
void PositionalAudioRingBuffer::updateNextOutputTrailingLoudness() {
|
|
||||||
// ForBoundarySamples means that we expect the number of samples not to roll of the end of the ring buffer
|
|
||||||
float nextLoudness = 0;
|
|
||||||
|
|
||||||
if (samplesAvailable() >= _numFrameSamples) {
|
|
||||||
for (int i = 0; i < _numFrameSamples; ++i) {
|
|
||||||
nextLoudness += fabsf(_nextOutput[i]);
|
|
||||||
}
|
|
||||||
nextLoudness /= _numFrameSamples;
|
|
||||||
nextLoudness /= MAX_SAMPLE_VALUE;
|
|
||||||
}
|
|
||||||
|
|
||||||
const int TRAILING_AVERAGE_FRAMES = 100;
|
|
||||||
const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
|
|
||||||
const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO;
|
|
||||||
const float LOUDNESS_EPSILON = 0.000001f;
|
|
||||||
|
|
||||||
if (nextLoudness >= _nextOutputTrailingLoudness) {
|
|
||||||
_nextOutputTrailingLoudness = nextLoudness;
|
|
||||||
} else {
|
|
||||||
_nextOutputTrailingLoudness = (_nextOutputTrailingLoudness * PREVIOUS_FRAMES_RATIO) + (CURRENT_FRAME_RATIO * nextLoudness);
|
|
||||||
|
|
||||||
if (_nextOutputTrailingLoudness < LOUDNESS_EPSILON) {
|
|
||||||
_nextOutputTrailingLoudness = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool PositionalAudioRingBuffer::shouldBeAddedToMix() {
|
|
||||||
int desiredJitterBufferSamples = _desiredJitterBufferFrames * _numFrameSamples;
|
|
||||||
|
|
||||||
if (!isNotStarvedOrHasMinimumSamples(_numFrameSamples + desiredJitterBufferSamples)) {
|
|
||||||
// if the buffer was starved, allow it to accrue at least the desired number of
|
|
||||||
// jitter buffer frames before we start taking frames from it for mixing
|
|
||||||
|
|
||||||
if (_shouldOutputStarveDebug) {
|
|
||||||
_shouldOutputStarveDebug = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
_consecutiveNotMixedCount++;
|
|
||||||
return false;
|
|
||||||
} else if (samplesAvailable() < _numFrameSamples) {
|
|
||||||
// if the buffer doesn't have a full frame of samples to take for mixing, it is starved
|
|
||||||
_isStarved = true;
|
|
||||||
_starveCount++;
|
|
||||||
|
|
||||||
_framesAvailableStats.reset();
|
|
||||||
|
|
||||||
// reset our _shouldOutputStarveDebug to true so the next is printed
|
|
||||||
_shouldOutputStarveDebug = true;
|
|
||||||
|
|
||||||
_consecutiveNotMixedCount = 1;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// good buffer, add this to the mix
|
|
||||||
|
|
||||||
// if we just finished refilling after a starve, we have a new jitter buffer length.
|
|
||||||
// reset the frames available stats.
|
|
||||||
|
|
||||||
_isStarved = false;
|
|
||||||
|
|
||||||
_framesAvailableStats.update(framesAvailable());
|
|
||||||
|
|
||||||
// since we've read data from ring buffer at least once - we've started
|
|
||||||
_hasStarted = true;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
int PositionalAudioRingBuffer::getCalculatedDesiredJitterBufferFrames() const {
|
|
||||||
const float USECS_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * USECS_PER_SECOND / (float)SAMPLE_RATE;
|
|
||||||
|
|
||||||
int calculatedDesiredJitterBufferFrames = ceilf((float)_interframeTimeGapStatsForJitterCalc.getWindowMax() / USECS_PER_FRAME);
|
|
||||||
if (calculatedDesiredJitterBufferFrames < 1) {
|
|
||||||
calculatedDesiredJitterBufferFrames = 1;
|
|
||||||
}
|
|
||||||
return calculatedDesiredJitterBufferFrames;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void PositionalAudioRingBuffer::frameReceivedUpdateTimingStats() {
|
|
||||||
// update the two time gap stats we're keeping
|
|
||||||
quint64 now = usecTimestampNow();
|
|
||||||
if (_lastFrameReceivedTime != 0) {
|
|
||||||
quint64 gap = now - _lastFrameReceivedTime;
|
|
||||||
_interframeTimeGapStatsForJitterCalc.update(gap);
|
|
||||||
_interframeTimeGapStatsForStatsPacket.update(gap);
|
|
||||||
}
|
|
||||||
_lastFrameReceivedTime = now;
|
|
||||||
|
|
||||||
// recalculate the _desiredJitterBufferFrames if _interframeTimeGapStatsForJitterCalc has updated stats for us
|
|
||||||
if (_interframeTimeGapStatsForJitterCalc.getNewStatsAvailableFlag()) {
|
|
||||||
if (!_dynamicJitterBuffers) {
|
|
||||||
_desiredJitterBufferFrames = 1; // HACK to see if this fixes the audio silence
|
|
||||||
} else {
|
|
||||||
const float USECS_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * USECS_PER_SECOND / (float)SAMPLE_RATE;
|
|
||||||
|
|
||||||
_desiredJitterBufferFrames = ceilf((float)_interframeTimeGapStatsForJitterCalc.getWindowMax() / USECS_PER_FRAME);
|
|
||||||
if (_desiredJitterBufferFrames < 1) {
|
|
||||||
_desiredJitterBufferFrames = 1;
|
|
||||||
}
|
|
||||||
const int maxDesired = _frameCapacity - 1;
|
|
||||||
if (_desiredJitterBufferFrames > maxDesired) {
|
|
||||||
_desiredJitterBufferFrames = maxDesired;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_interframeTimeGapStatsForJitterCalc.clearNewStatsAvailableFlag();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void PositionalAudioRingBuffer::addDroppableSilentSamples(int numSilentSamples) {
|
|
||||||
|
|
||||||
// This adds some number of frames to the desired jitter buffer frames target we use.
|
|
||||||
// The larger this value is, the less aggressive we are about reducing the jitter buffer length.
|
|
||||||
// Setting this to 0 will try to get the jitter buffer to be exactly _desiredJitterBufferFrames long,
|
|
||||||
// which could lead immediately to a starve.
|
|
||||||
const int DESIRED_JITTER_BUFFER_FRAMES_PADDING = 1;
|
|
||||||
|
|
||||||
// calculate how many silent frames we should drop. We only drop silent frames if
|
|
||||||
// the running avg num frames available has stabilized and it's more than
|
|
||||||
// our desired number of frames by the margin defined above.
|
|
||||||
int numSilentFramesToDrop = 0;
|
|
||||||
if (_framesAvailableStats.getNewStatsAvailableFlag() && _framesAvailableStats.isWindowFilled()
|
|
||||||
&& numSilentSamples >= _numFrameSamples) {
|
|
||||||
_framesAvailableStats.clearNewStatsAvailableFlag();
|
|
||||||
int averageJitterBufferFrames = (int)_framesAvailableStats.getWindowAverage();
|
|
||||||
int desiredJitterBufferFramesPlusPadding = _desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING;
|
|
||||||
|
|
||||||
if (averageJitterBufferFrames > desiredJitterBufferFramesPlusPadding) {
|
|
||||||
// our avg jitter buffer size exceeds its desired value, so ignore some silent
|
|
||||||
// frames to get that size as close to desired as possible
|
|
||||||
int numSilentFramesToDropDesired = averageJitterBufferFrames - desiredJitterBufferFramesPlusPadding;
|
|
||||||
int numSilentFramesReceived = numSilentSamples / _numFrameSamples;
|
|
||||||
numSilentFramesToDrop = std::min(numSilentFramesToDropDesired, numSilentFramesReceived);
|
|
||||||
|
|
||||||
// since we now have a new jitter buffer length, reset the frames available stats.
|
|
||||||
_framesAvailableStats.reset();
|
|
||||||
|
|
||||||
_silentFramesDropped += numSilentFramesToDrop;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
addSilentFrame(numSilentSamples - numSilentFramesToDrop * _numFrameSamples);
|
|
||||||
}
|
|
|
@ -1,117 +0,0 @@
|
||||||
//
|
|
||||||
// PositionalAudioRingBuffer.h
|
|
||||||
// libraries/audio/src
|
|
||||||
//
|
|
||||||
// Created by Stephen Birarda on 6/5/13.
|
|
||||||
// Copyright 2013 High Fidelity, Inc.
|
|
||||||
//
|
|
||||||
// Distributed under the Apache License, Version 2.0.
|
|
||||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
|
||||||
//
|
|
||||||
|
|
||||||
#ifndef hifi_PositionalAudioRingBuffer_h
|
|
||||||
#define hifi_PositionalAudioRingBuffer_h
|
|
||||||
|
|
||||||
#include <glm/gtx/quaternion.hpp>
|
|
||||||
|
|
||||||
#include <AABox.h>
|
|
||||||
|
|
||||||
#include "AudioRingBuffer.h"
|
|
||||||
#include "MovingMinMaxAvg.h"
|
|
||||||
|
|
||||||
// the time gaps stats for _desiredJitterBufferFrames calculation
|
|
||||||
// will recalculate the max for the past 5000 samples every 500 samples
|
|
||||||
const int TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES = 500;
|
|
||||||
const int TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS = 10;
|
|
||||||
|
|
||||||
// the time gap stats for constructing AudioStreamStats will
|
|
||||||
// recalculate min/max/avg every ~1 second for the past ~30 seconds of time gap data
|
|
||||||
const int TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS;
|
|
||||||
const int TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS = 30;
|
|
||||||
|
|
||||||
// the stats for calculating the average frames available will recalculate every ~1 second
|
|
||||||
// and will include data for the past ~10 seconds
|
|
||||||
const int FRAMES_AVAILABLE_STATS_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS;
|
|
||||||
const int FRAMES_AVAILABLE_STATS_WINDOW_INTERVALS = 10;
|
|
||||||
|
|
||||||
const int AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY = 100;
|
|
||||||
|
|
||||||
class PositionalAudioRingBuffer : public AudioRingBuffer {
|
|
||||||
public:
|
|
||||||
enum Type {
|
|
||||||
Microphone,
|
|
||||||
Injector
|
|
||||||
};
|
|
||||||
|
|
||||||
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo = false, bool dynamicJitterBuffers = false);
|
|
||||||
|
|
||||||
virtual int parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped) = 0;
|
|
||||||
|
|
||||||
int parsePositionalData(const QByteArray& positionalByteArray);
|
|
||||||
int parseListenModeData(const QByteArray& listenModeByteArray);
|
|
||||||
|
|
||||||
void updateNextOutputTrailingLoudness();
|
|
||||||
float getNextOutputTrailingLoudness() const { return _nextOutputTrailingLoudness; }
|
|
||||||
|
|
||||||
bool shouldBeAddedToMix();
|
|
||||||
|
|
||||||
bool willBeAddedToMix() const { return _willBeAddedToMix; }
|
|
||||||
void setWillBeAddedToMix(bool willBeAddedToMix) { _willBeAddedToMix = willBeAddedToMix; }
|
|
||||||
|
|
||||||
bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; }
|
|
||||||
|
|
||||||
bool isStereo() const { return _isStereo; }
|
|
||||||
|
|
||||||
PositionalAudioRingBuffer::Type getType() const { return _type; }
|
|
||||||
const glm::vec3& getPosition() const { return _position; }
|
|
||||||
const glm::quat& getOrientation() const { return _orientation; }
|
|
||||||
|
|
||||||
AABox* getListenerUnattenuatedZone() const { return _listenerUnattenuatedZone; }
|
|
||||||
void setListenerUnattenuatedZone(AABox* listenerUnattenuatedZone) { _listenerUnattenuatedZone = listenerUnattenuatedZone; }
|
|
||||||
|
|
||||||
int getSamplesPerFrame() const { return _isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; }
|
|
||||||
|
|
||||||
const MovingMinMaxAvg<quint64>& getInterframeTimeGapStatsForStatsPacket() const { return _interframeTimeGapStatsForStatsPacket; }
|
|
||||||
|
|
||||||
int getCalculatedDesiredJitterBufferFrames() const; /// returns what we would calculate our desired as if asked
|
|
||||||
int getDesiredJitterBufferFrames() const { return _desiredJitterBufferFrames; }
|
|
||||||
double getFramesAvailableAverage() const { return _framesAvailableStats.getWindowAverage(); }
|
|
||||||
|
|
||||||
int getConsecutiveNotMixedCount() const { return _consecutiveNotMixedCount; }
|
|
||||||
int getStarveCount() const { return _starveCount; }
|
|
||||||
int getSilentFramesDropped() const { return _silentFramesDropped; }
|
|
||||||
|
|
||||||
protected:
|
|
||||||
// disallow copying of PositionalAudioRingBuffer objects
|
|
||||||
PositionalAudioRingBuffer(const PositionalAudioRingBuffer&);
|
|
||||||
PositionalAudioRingBuffer& operator= (const PositionalAudioRingBuffer&);
|
|
||||||
|
|
||||||
void frameReceivedUpdateTimingStats();
|
|
||||||
void addDroppableSilentSamples(int numSilentSamples);
|
|
||||||
|
|
||||||
PositionalAudioRingBuffer::Type _type;
|
|
||||||
glm::vec3 _position;
|
|
||||||
glm::quat _orientation;
|
|
||||||
bool _willBeAddedToMix;
|
|
||||||
bool _shouldLoopbackForNode;
|
|
||||||
bool _shouldOutputStarveDebug;
|
|
||||||
bool _isStereo;
|
|
||||||
|
|
||||||
float _nextOutputTrailingLoudness;
|
|
||||||
AABox* _listenerUnattenuatedZone;
|
|
||||||
|
|
||||||
quint64 _lastFrameReceivedTime;
|
|
||||||
MovingMinMaxAvg<quint64> _interframeTimeGapStatsForJitterCalc;
|
|
||||||
MovingMinMaxAvg<quint64> _interframeTimeGapStatsForStatsPacket;
|
|
||||||
MovingMinMaxAvg<int> _framesAvailableStats;
|
|
||||||
|
|
||||||
int _desiredJitterBufferFrames;
|
|
||||||
bool _dynamicJitterBuffers;
|
|
||||||
|
|
||||||
// extra stats
|
|
||||||
int _consecutiveNotMixedCount;
|
|
||||||
int _starveCount;
|
|
||||||
int _silentFramesDropped;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // hifi_PositionalAudioRingBuffer_h
|
|
77
libraries/audio/src/PositionalAudioStream.cpp
Normal file
77
libraries/audio/src/PositionalAudioStream.cpp
Normal file
|
@ -0,0 +1,77 @@
|
||||||
|
//
|
||||||
|
// PositionalAudioStream.cpp
|
||||||
|
// libraries/audio/src
|
||||||
|
//
|
||||||
|
// Created by Stephen Birarda on 6/5/13.
|
||||||
|
// Copyright 2013 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "PositionalAudioStream.h"
|
||||||
|
#include "SharedUtil.h"
|
||||||
|
|
||||||
|
#include <cstring>
|
||||||
|
|
||||||
|
#include <glm/detail/func_common.hpp>
|
||||||
|
#include <QtCore/QDataStream>
|
||||||
|
|
||||||
|
#include <Node.h>
|
||||||
|
#include <PacketHeaders.h>
|
||||||
|
#include <UUID.h>
|
||||||
|
|
||||||
|
PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers,
|
||||||
|
int staticDesiredJitterBufferFrames, int maxFramesOverDesired) :
|
||||||
|
InboundAudioStream(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
|
||||||
|
AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, dynamicJitterBuffers, staticDesiredJitterBufferFrames, maxFramesOverDesired),
|
||||||
|
_type(type),
|
||||||
|
_position(0.0f, 0.0f, 0.0f),
|
||||||
|
_orientation(0.0f, 0.0f, 0.0f, 0.0f),
|
||||||
|
_shouldLoopbackForNode(false),
|
||||||
|
_isStereo(isStereo),
|
||||||
|
_lastPopOutputTrailingLoudness(0.0f),
|
||||||
|
_listenerUnattenuatedZone(NULL)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void PositionalAudioStream::updateLastPopOutputTrailingLoudness() {
|
||||||
|
float lastPopLoudness = _ringBuffer.getFrameLoudness(_lastPopOutput);
|
||||||
|
|
||||||
|
const int TRAILING_AVERAGE_FRAMES = 100;
|
||||||
|
const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
|
||||||
|
const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO;
|
||||||
|
const float LOUDNESS_EPSILON = 0.000001f;
|
||||||
|
|
||||||
|
if (lastPopLoudness >= _lastPopOutputTrailingLoudness) {
|
||||||
|
_lastPopOutputTrailingLoudness = lastPopLoudness;
|
||||||
|
} else {
|
||||||
|
_lastPopOutputTrailingLoudness = (_lastPopOutputTrailingLoudness * PREVIOUS_FRAMES_RATIO) + (CURRENT_FRAME_RATIO * lastPopLoudness);
|
||||||
|
|
||||||
|
if (_lastPopOutputTrailingLoudness < LOUDNESS_EPSILON) {
|
||||||
|
_lastPopOutputTrailingLoudness = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int PositionalAudioStream::parsePositionalData(const QByteArray& positionalByteArray) {
|
||||||
|
QDataStream packetStream(positionalByteArray);
|
||||||
|
|
||||||
|
packetStream.readRawData(reinterpret_cast<char*>(&_position), sizeof(_position));
|
||||||
|
packetStream.readRawData(reinterpret_cast<char*>(&_orientation), sizeof(_orientation));
|
||||||
|
|
||||||
|
// if this node sent us a NaN for first float in orientation then don't consider this good audio and bail
|
||||||
|
if (glm::isnan(_orientation.x)) {
|
||||||
|
// NOTE: why would we reset the ring buffer here?
|
||||||
|
_ringBuffer.reset();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return packetStream.device()->pos();
|
||||||
|
}
|
||||||
|
|
||||||
|
AudioStreamStats PositionalAudioStream::getAudioStreamStats() const {
|
||||||
|
AudioStreamStats streamStats = InboundAudioStream::getAudioStreamStats();
|
||||||
|
streamStats._streamType = _type;
|
||||||
|
return streamStats;
|
||||||
|
}
|
66
libraries/audio/src/PositionalAudioStream.h
Normal file
66
libraries/audio/src/PositionalAudioStream.h
Normal file
|
@ -0,0 +1,66 @@
|
||||||
|
//
|
||||||
|
// PositionalAudioStream.h
|
||||||
|
// libraries/audio/src
|
||||||
|
//
|
||||||
|
// Created by Stephen Birarda on 6/5/13.
|
||||||
|
// Copyright 2013 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef hifi_PositionalAudioStream_h
|
||||||
|
#define hifi_PositionalAudioStream_h
|
||||||
|
|
||||||
|
#include <glm/gtx/quaternion.hpp>
|
||||||
|
#include <AABox.h>
|
||||||
|
|
||||||
|
#include "InboundAudioStream.h"
|
||||||
|
|
||||||
|
const int AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY = 100;
|
||||||
|
|
||||||
|
class PositionalAudioStream : public InboundAudioStream {
|
||||||
|
Q_OBJECT
|
||||||
|
public:
|
||||||
|
enum Type {
|
||||||
|
Microphone,
|
||||||
|
Injector
|
||||||
|
};
|
||||||
|
|
||||||
|
PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames,
|
||||||
|
int maxFramesOverDesired);
|
||||||
|
|
||||||
|
virtual AudioStreamStats getAudioStreamStats() const;
|
||||||
|
|
||||||
|
void updateLastPopOutputTrailingLoudness();
|
||||||
|
float getLastPopOutputTrailingLoudness() const { return _lastPopOutputTrailingLoudness; }
|
||||||
|
|
||||||
|
bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; }
|
||||||
|
bool isStereo() const { return _isStereo; }
|
||||||
|
PositionalAudioStream::Type getType() const { return _type; }
|
||||||
|
const glm::vec3& getPosition() const { return _position; }
|
||||||
|
const glm::quat& getOrientation() const { return _orientation; }
|
||||||
|
AABox* getListenerUnattenuatedZone() const { return _listenerUnattenuatedZone; }
|
||||||
|
|
||||||
|
void setListenerUnattenuatedZone(AABox* listenerUnattenuatedZone) { _listenerUnattenuatedZone = listenerUnattenuatedZone; }
|
||||||
|
|
||||||
|
protected:
|
||||||
|
// disallow copying of PositionalAudioStream objects
|
||||||
|
PositionalAudioStream(const PositionalAudioStream&);
|
||||||
|
PositionalAudioStream& operator= (const PositionalAudioStream&);
|
||||||
|
|
||||||
|
int parsePositionalData(const QByteArray& positionalByteArray);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
Type _type;
|
||||||
|
glm::vec3 _position;
|
||||||
|
glm::quat _orientation;
|
||||||
|
|
||||||
|
bool _shouldLoopbackForNode;
|
||||||
|
bool _isStereo;
|
||||||
|
|
||||||
|
float _lastPopOutputTrailingLoudness;
|
||||||
|
AABox* _listenerUnattenuatedZone;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // hifi_PositionalAudioStream_h
|
|
@ -120,6 +120,8 @@ class AvatarData : public QObject {
|
||||||
Q_PROPERTY(glm::quat orientation READ getOrientation WRITE setOrientation)
|
Q_PROPERTY(glm::quat orientation READ getOrientation WRITE setOrientation)
|
||||||
Q_PROPERTY(glm::quat headOrientation READ getHeadOrientation WRITE setHeadOrientation)
|
Q_PROPERTY(glm::quat headOrientation READ getHeadOrientation WRITE setHeadOrientation)
|
||||||
Q_PROPERTY(float headPitch READ getHeadPitch WRITE setHeadPitch)
|
Q_PROPERTY(float headPitch READ getHeadPitch WRITE setHeadPitch)
|
||||||
|
Q_PROPERTY(float headYaw READ getHeadYaw WRITE setHeadYaw)
|
||||||
|
Q_PROPERTY(float headRoll READ getHeadRoll WRITE setHeadRoll)
|
||||||
|
|
||||||
Q_PROPERTY(float audioLoudness READ getAudioLoudness WRITE setAudioLoudness)
|
Q_PROPERTY(float audioLoudness READ getAudioLoudness WRITE setAudioLoudness)
|
||||||
Q_PROPERTY(float audioAverageLoudness READ getAudioAverageLoudness WRITE setAudioAverageLoudness)
|
Q_PROPERTY(float audioAverageLoudness READ getAudioAverageLoudness WRITE setAudioAverageLoudness)
|
||||||
|
@ -171,7 +173,13 @@ public:
|
||||||
|
|
||||||
// access to Head().set/getMousePitch (degrees)
|
// access to Head().set/getMousePitch (degrees)
|
||||||
float getHeadPitch() const { return _headData->getBasePitch(); }
|
float getHeadPitch() const { return _headData->getBasePitch(); }
|
||||||
void setHeadPitch(float value) { _headData->setBasePitch(value); };
|
void setHeadPitch(float value) { _headData->setBasePitch(value); }
|
||||||
|
|
||||||
|
float getHeadYaw() const { return _headData->getBaseYaw(); }
|
||||||
|
void setHeadYaw(float value) { _headData->setBaseYaw(value); }
|
||||||
|
|
||||||
|
float getHeadRoll() const { return _headData->getBaseRoll(); }
|
||||||
|
void setHeadRoll(float value) { _headData->setBaseRoll(value); }
|
||||||
|
|
||||||
// access to Head().set/getAverageLoudness
|
// access to Head().set/getAverageLoudness
|
||||||
float getAudioLoudness() const { return _headData->getAudioLoudness(); }
|
float getAudioLoudness() const { return _headData->getAudioLoudness(); }
|
||||||
|
|
|
@ -36,9 +36,11 @@ SequenceNumberStats::ArrivalInfo SequenceNumberStats::sequenceNumberReceived(qui
|
||||||
|
|
||||||
// if the sender node has changed, reset all stats
|
// if the sender node has changed, reset all stats
|
||||||
if (senderUUID != _lastSenderUUID) {
|
if (senderUUID != _lastSenderUUID) {
|
||||||
qDebug() << "sequence number stats was reset due to new sender node";
|
if (_stats._numReceived > 0) {
|
||||||
qDebug() << "previous:" << _lastSenderUUID << "current:" << senderUUID;
|
qDebug() << "sequence number stats was reset due to new sender node";
|
||||||
reset();
|
qDebug() << "previous:" << _lastSenderUUID << "current:" << senderUUID;
|
||||||
|
reset();
|
||||||
|
}
|
||||||
_lastSenderUUID = senderUUID;
|
_lastSenderUUID = senderUUID;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -66,6 +66,20 @@ glm::quat Quat::mix(const glm::quat& q1, const glm::quat& q2, float alpha) {
|
||||||
return safeMix(q1, q2, alpha);
|
return safeMix(q1, q2, alpha);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Spherical Linear Interpolation
|
||||||
|
glm::quat Quat::slerp(const glm::quat& q1, const glm::quat& q2, float alpha) {
|
||||||
|
return glm::slerp(q1, q2, alpha);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spherical Quadratic Interpolation
|
||||||
|
glm::quat Quat::squad(const glm::quat& q1, const glm::quat& q2, const glm::quat& s1, const glm::quat& s2, float h) {
|
||||||
|
return glm::squad(q1, q2, s1, s2, h);
|
||||||
|
}
|
||||||
|
|
||||||
|
float Quat::dot(const glm::quat& q1, const glm::quat& q2) {
|
||||||
|
return glm::dot(q1, q2);
|
||||||
|
}
|
||||||
|
|
||||||
void Quat::print(const QString& lable, const glm::quat& q) {
|
void Quat::print(const QString& lable, const glm::quat& q) {
|
||||||
qDebug() << qPrintable(lable) << q.x << "," << q.y << "," << q.z << "," << q.w;
|
qDebug() << qPrintable(lable) << q.x << "," << q.y << "," << q.z << "," << q.w;
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,6 +36,9 @@ public slots:
|
||||||
glm::vec3 safeEulerAngles(const glm::quat& orientation); // degrees
|
glm::vec3 safeEulerAngles(const glm::quat& orientation); // degrees
|
||||||
glm::quat angleAxis(float angle, const glm::vec3& v); // degrees
|
glm::quat angleAxis(float angle, const glm::vec3& v); // degrees
|
||||||
glm::quat mix(const glm::quat& q1, const glm::quat& q2, float alpha);
|
glm::quat mix(const glm::quat& q1, const glm::quat& q2, float alpha);
|
||||||
|
glm::quat slerp(const glm::quat& q1, const glm::quat& q2, float alpha);
|
||||||
|
glm::quat squad(const glm::quat& q1, const glm::quat& q2, const glm::quat& s1, const glm::quat& s2, float h);
|
||||||
|
float dot(const glm::quat& q1, const glm::quat& q2);
|
||||||
void print(const QString& lable, const glm::quat& q);
|
void print(const QString& lable, const glm::quat& q);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@ void StDev::addValue(float v) {
|
||||||
if (sampleCount == MAX_STDEV_SAMPLES) sampleCount = 0;
|
if (sampleCount == MAX_STDEV_SAMPLES) sampleCount = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
float StDev::getAverage() {
|
float StDev::getAverage() const {
|
||||||
float average = 0;
|
float average = 0;
|
||||||
for (int i = 0; i < sampleCount; i++) {
|
for (int i = 0; i < sampleCount; i++) {
|
||||||
average += data[i];
|
average += data[i];
|
||||||
|
@ -49,7 +49,7 @@ float StDev::getMax() {
|
||||||
else return 0;
|
else return 0;
|
||||||
}*/
|
}*/
|
||||||
|
|
||||||
float StDev::getStDev() {
|
float StDev::getStDev() const {
|
||||||
float average = getAverage();
|
float average = getAverage();
|
||||||
float stdev = 0;
|
float stdev = 0;
|
||||||
for (int i = 0; i < sampleCount; i++) {
|
for (int i = 0; i < sampleCount; i++) {
|
||||||
|
|
|
@ -17,8 +17,8 @@ class StDev {
|
||||||
StDev();
|
StDev();
|
||||||
void reset();
|
void reset();
|
||||||
void addValue(float v);
|
void addValue(float v);
|
||||||
float getAverage();
|
float getAverage() const;
|
||||||
float getStDev();
|
float getStDev() const;
|
||||||
int getSamples() const { return sampleCount; }
|
int getSamples() const { return sampleCount; }
|
||||||
private:
|
private:
|
||||||
float * data;
|
float * data;
|
||||||
|
|
80
libraries/shared/src/TimeWeightedAvg.h
Normal file
80
libraries/shared/src/TimeWeightedAvg.h
Normal file
|
@ -0,0 +1,80 @@
|
||||||
|
//
|
||||||
|
// TimeWeightedAvg.h
|
||||||
|
// libraries/shared/src
|
||||||
|
//
|
||||||
|
// Created by Yixin Wang on 7/29/2014
|
||||||
|
// Copyright 2013 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef hifi_TimeWeightedAvg_h
|
||||||
|
#define hifi_TimeWeightedAvg_h
|
||||||
|
|
||||||
|
#include "SharedUtil.h"
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
class TimeWeightedAvg {
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
TimeWeightedAvg()
|
||||||
|
: _firstSampleTime(0),
|
||||||
|
_lastSample(),
|
||||||
|
_lastSampleTime(0),
|
||||||
|
_weightedSampleSumExcludingLastSample(0.0)
|
||||||
|
{}
|
||||||
|
|
||||||
|
void reset() {
|
||||||
|
_firstSampleTime = 0;
|
||||||
|
_lastSampleTime = 0;
|
||||||
|
_weightedSampleSumExcludingLastSample = 0.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void updateWithSample(T sample) {
|
||||||
|
quint64 now = usecTimestampNow();
|
||||||
|
|
||||||
|
if (_firstSampleTime == 0) {
|
||||||
|
_firstSampleTime = now;
|
||||||
|
} else {
|
||||||
|
_weightedSampleSumExcludingLastSample = getWeightedSampleSum(now);
|
||||||
|
}
|
||||||
|
|
||||||
|
_lastSample = sample;
|
||||||
|
_lastSampleTime = now;
|
||||||
|
}
|
||||||
|
|
||||||
|
double getAverage() const {
|
||||||
|
if (_firstSampleTime == 0) {
|
||||||
|
return 0.0;
|
||||||
|
}
|
||||||
|
quint64 now = usecTimestampNow();
|
||||||
|
quint64 elapsed = now - _firstSampleTime;
|
||||||
|
return getWeightedSampleSum(now) / (double)elapsed;
|
||||||
|
}
|
||||||
|
|
||||||
|
quint64 getElapsedUsecs() const {
|
||||||
|
if (_firstSampleTime == 0) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return usecTimestampNow() - _firstSampleTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
// if no sample has been collected yet, the return value is undefined
|
||||||
|
double getWeightedSampleSum(quint64 now) const {
|
||||||
|
quint64 lastSampleLasted = now - _lastSampleTime;
|
||||||
|
return _weightedSampleSumExcludingLastSample + (double)_lastSample * (double)lastSampleLasted;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
quint64 _firstSampleTime;
|
||||||
|
|
||||||
|
T _lastSample;
|
||||||
|
quint64 _lastSampleTime;
|
||||||
|
|
||||||
|
double _weightedSampleSumExcludingLastSample;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // hifi_TimeWeightedAvg_h
|
Loading…
Reference in a new issue