mirror of
https://github.com/overte-org/overte.git
synced 2025-07-23 13:44:32 +02:00
merge HEAD with PR-3281
This commit is contained in:
commit
e941e58626
34 changed files with 1894 additions and 637 deletions
|
@ -33,12 +33,17 @@
|
||||||
|
|
||||||
#include "Agent.h"
|
#include "Agent.h"
|
||||||
|
|
||||||
|
static const int RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES = 10;
|
||||||
|
|
||||||
Agent::Agent(const QByteArray& packet) :
|
Agent::Agent(const QByteArray& packet) :
|
||||||
ThreadedAssignment(packet),
|
ThreadedAssignment(packet),
|
||||||
_voxelEditSender(),
|
_voxelEditSender(),
|
||||||
_particleEditSender(),
|
_particleEditSender(),
|
||||||
_modelEditSender(),
|
_modelEditSender(),
|
||||||
_receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 1, false, 1, 0, false),
|
_receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES,
|
||||||
|
InboundAudioStream::Settings(0, false, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES, false,
|
||||||
|
DEFAULT_WINDOW_STARVE_THRESHOLD, DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES,
|
||||||
|
DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION, false)),
|
||||||
_avatarHashMap()
|
_avatarHashMap()
|
||||||
{
|
{
|
||||||
// be the parent of the script engine so it gets moved when we do
|
// be the parent of the script engine so it gets moved when we do
|
||||||
|
@ -148,7 +153,7 @@ void Agent::readPendingDatagrams() {
|
||||||
_voxelViewer.processDatagram(mutablePacket, sourceNode);
|
_voxelViewer.processDatagram(mutablePacket, sourceNode);
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (datagramPacketType == PacketTypeMixedAudio) {
|
} else if (datagramPacketType == PacketTypeMixedAudio || datagramPacketType == PacketTypeSilentAudioFrame) {
|
||||||
|
|
||||||
_receivedAudioStream.parseData(receivedPacket);
|
_receivedAudioStream.parseData(receivedPacket);
|
||||||
|
|
||||||
|
|
|
@ -69,9 +69,7 @@ void attachNewNodeDataToNode(Node *newNode) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AudioMixer::_useDynamicJitterBuffers = false;
|
InboundAudioStream::Settings AudioMixer::_streamSettings;
|
||||||
int AudioMixer::_staticDesiredJitterBufferFrames = 0;
|
|
||||||
int AudioMixer::_maxFramesOverDesired = 0;
|
|
||||||
|
|
||||||
bool AudioMixer::_printStreamStats = false;
|
bool AudioMixer::_printStreamStats = false;
|
||||||
|
|
||||||
|
@ -87,7 +85,12 @@ AudioMixer::AudioMixer(const QByteArray& packet) :
|
||||||
_sumMixes(0),
|
_sumMixes(0),
|
||||||
_sourceUnattenuatedZone(NULL),
|
_sourceUnattenuatedZone(NULL),
|
||||||
_listenerUnattenuatedZone(NULL),
|
_listenerUnattenuatedZone(NULL),
|
||||||
_lastSendAudioStreamStatsTime(usecTimestampNow())
|
_lastPerSecondCallbackTime(usecTimestampNow()),
|
||||||
|
_sendAudioStreamStats(false),
|
||||||
|
_datagramsReadPerCallStats(0, READ_DATAGRAMS_STATS_WINDOW_SECONDS),
|
||||||
|
_timeSpentPerCallStats(0, READ_DATAGRAMS_STATS_WINDOW_SECONDS),
|
||||||
|
_timeSpentPerHashMatchCallStats(0, READ_DATAGRAMS_STATS_WINDOW_SECONDS),
|
||||||
|
_readPendingCallsPerSecondStats(1, READ_DATAGRAMS_STATS_WINDOW_SECONDS)
|
||||||
{
|
{
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -101,8 +104,37 @@ const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f;
|
||||||
const float ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE = 0.18f;
|
const float ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE = 0.18f;
|
||||||
const float ATTENUATION_EPSILON_DISTANCE = 0.1f;
|
const float ATTENUATION_EPSILON_DISTANCE = 0.1f;
|
||||||
|
|
||||||
void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
|
int AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
|
||||||
AvatarAudioStream* listeningNodeStream) {
|
AvatarAudioStream* listeningNodeStream) {
|
||||||
|
// If repetition with fade is enabled:
|
||||||
|
// If streamToAdd could not provide a frame (it was starved), then we'll mix its previously-mixed frame
|
||||||
|
// This is preferable to not mixing it at all since that's equivalent to inserting silence.
|
||||||
|
// Basically, we'll repeat that last frame until it has a frame to mix. Depending on how many times
|
||||||
|
// we've repeated that frame in a row, we'll gradually fade that repeated frame into silence.
|
||||||
|
// This improves the perceived quality of the audio slightly.
|
||||||
|
|
||||||
|
float repeatedFrameFadeFactor = 1.0f;
|
||||||
|
|
||||||
|
if (!streamToAdd->lastPopSucceeded()) {
|
||||||
|
if (_streamSettings._repetitionWithFade && !streamToAdd->getLastPopOutput().isNull()) {
|
||||||
|
// reptition with fade is enabled, and we do have a valid previous frame to repeat.
|
||||||
|
// calculate its fade factor, which depends on how many times it's already been repeated.
|
||||||
|
repeatedFrameFadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd->getConsecutiveNotMixedCount() - 1);
|
||||||
|
if (repeatedFrameFadeFactor == 0.0f) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// at this point, we know streamToAdd's last pop output is valid
|
||||||
|
|
||||||
|
// if the frame we're about to mix is silent, bail
|
||||||
|
if (streamToAdd->getLastPopOutputLoudness() == 0.0f) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
float bearingRelativeAngleToSource = 0.0f;
|
float bearingRelativeAngleToSource = 0.0f;
|
||||||
float attenuationCoefficient = 1.0f;
|
float attenuationCoefficient = 1.0f;
|
||||||
int numSamplesDelay = 0;
|
int numSamplesDelay = 0;
|
||||||
|
@ -124,7 +156,7 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
|
||||||
if (streamToAdd->getLastPopOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) {
|
if (streamToAdd->getLastPopOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) {
|
||||||
// according to mixer performance we have decided this does not get to be mixed in
|
// according to mixer performance we have decided this does not get to be mixed in
|
||||||
// bail out
|
// bail out
|
||||||
return;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
++_sumMixes;
|
++_sumMixes;
|
||||||
|
@ -224,12 +256,13 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
|
||||||
int delayedChannelIndex = 0;
|
int delayedChannelIndex = 0;
|
||||||
|
|
||||||
const int SINGLE_STEREO_OFFSET = 2;
|
const int SINGLE_STEREO_OFFSET = 2;
|
||||||
|
float attenuationAndFade = attenuationCoefficient * repeatedFrameFadeFactor;
|
||||||
|
|
||||||
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
|
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
|
||||||
|
|
||||||
// setup the int16_t variables for the two sample sets
|
// setup the int16_t variables for the two sample sets
|
||||||
correctStreamSample[0] = streamPopOutput[s / 2] * attenuationCoefficient;
|
correctStreamSample[0] = streamPopOutput[s / 2] * attenuationAndFade;
|
||||||
correctStreamSample[1] = streamPopOutput[(s / 2) + 1] * attenuationCoefficient;
|
correctStreamSample[1] = streamPopOutput[(s / 2) + 1] * attenuationAndFade;
|
||||||
|
|
||||||
delayedChannelIndex = s + (numSamplesDelay * 2) + delayedChannelOffset;
|
delayedChannelIndex = s + (numSamplesDelay * 2) + delayedChannelOffset;
|
||||||
|
|
||||||
|
@ -245,7 +278,7 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
|
||||||
if (numSamplesDelay > 0) {
|
if (numSamplesDelay > 0) {
|
||||||
// if there was a sample delay for this stream, we need to pull samples prior to the popped output
|
// if there was a sample delay for this stream, we need to pull samples prior to the popped output
|
||||||
// to stick at the beginning
|
// to stick at the beginning
|
||||||
float attenuationAndWeakChannelRatio = attenuationCoefficient * weakChannelAmplitudeRatio;
|
float attenuationAndWeakChannelRatioAndFade = attenuationCoefficient * weakChannelAmplitudeRatio * repeatedFrameFadeFactor;
|
||||||
AudioRingBuffer::ConstIterator delayStreamPopOutput = streamPopOutput - numSamplesDelay;
|
AudioRingBuffer::ConstIterator delayStreamPopOutput = streamPopOutput - numSamplesDelay;
|
||||||
|
|
||||||
// TODO: delayStreamPopOutput may be inside the last frame written if the ringbuffer is completely full
|
// TODO: delayStreamPopOutput may be inside the last frame written if the ringbuffer is completely full
|
||||||
|
@ -253,7 +286,7 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
|
||||||
|
|
||||||
for (int i = 0; i < numSamplesDelay; i++) {
|
for (int i = 0; i < numSamplesDelay; i++) {
|
||||||
int parentIndex = i * 2;
|
int parentIndex = i * 2;
|
||||||
_clientSamples[parentIndex + delayedChannelOffset] += *delayStreamPopOutput * attenuationAndWeakChannelRatio;
|
_clientSamples[parentIndex + delayedChannelOffset] += *delayStreamPopOutput * attenuationAndWeakChannelRatioAndFade;
|
||||||
++delayStreamPopOutput;
|
++delayStreamPopOutput;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -264,8 +297,10 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
|
||||||
attenuationCoefficient = 1.0f;
|
attenuationCoefficient = 1.0f;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
float attenuationAndFade = attenuationCoefficient * repeatedFrameFadeFactor;
|
||||||
|
|
||||||
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s++) {
|
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s++) {
|
||||||
_clientSamples[s] = glm::clamp(_clientSamples[s] + (int)(streamPopOutput[s / stereoDivider] * attenuationCoefficient),
|
_clientSamples[s] = glm::clamp(_clientSamples[s] + (int)(streamPopOutput[s / stereoDivider] * attenuationAndFade),
|
||||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -309,36 +344,36 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
|
||||||
penumbraFilter.render(_clientSamples, _clientSamples, NETWORK_BUFFER_LENGTH_SAMPLES_STEREO / 2);
|
penumbraFilter.render(_clientSamples, _clientSamples, NETWORK_BUFFER_LENGTH_SAMPLES_STEREO / 2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixer::prepareMixForListeningNode(Node* node) {
|
int AudioMixer::prepareMixForListeningNode(Node* node) {
|
||||||
AvatarAudioStream* nodeAudioStream = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioStream();
|
AvatarAudioStream* nodeAudioStream = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioStream();
|
||||||
|
|
||||||
// zero out the client mix for this node
|
// zero out the client mix for this node
|
||||||
memset(_clientSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
memset(_clientSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
||||||
|
|
||||||
// loop through all other nodes that have sufficient audio to mix
|
// loop through all other nodes that have sufficient audio to mix
|
||||||
|
int streamsMixed = 0;
|
||||||
foreach (const SharedNodePointer& otherNode, NodeList::getInstance()->getNodeHash()) {
|
foreach (const SharedNodePointer& otherNode, NodeList::getInstance()->getNodeHash()) {
|
||||||
if (otherNode->getLinkedData()) {
|
if (otherNode->getLinkedData()) {
|
||||||
|
|
||||||
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
|
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
|
||||||
|
|
||||||
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
|
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
|
||||||
|
|
||||||
const QHash<QUuid, PositionalAudioStream*>& otherNodeAudioStreams = otherNodeClientData->getAudioStreams();
|
const QHash<QUuid, PositionalAudioStream*>& otherNodeAudioStreams = otherNodeClientData->getAudioStreams();
|
||||||
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
||||||
for (i = otherNodeAudioStreams.begin(); i != otherNodeAudioStreams.constEnd(); i++) {
|
for (i = otherNodeAudioStreams.constBegin(); i != otherNodeAudioStreams.constEnd(); i++) {
|
||||||
PositionalAudioStream* otherNodeStream = i.value();
|
PositionalAudioStream* otherNodeStream = i.value();
|
||||||
|
|
||||||
if ((*otherNode != *node || otherNodeStream->shouldLoopbackForNode())
|
if (*otherNode != *node || otherNodeStream->shouldLoopbackForNode()) {
|
||||||
&& otherNodeStream->lastPopSucceeded()
|
streamsMixed += addStreamToMixForListeningNodeWithStream(otherNodeStream, nodeAudioStream);
|
||||||
&& otherNodeStream->getLastPopOutputTrailingLoudness() > 0.0f) {
|
|
||||||
|
|
||||||
addStreamToMixForListeningNodeWithStream(otherNodeStream, nodeAudioStream);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return streamsMixed;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixer::readPendingDatagram(const QByteArray& receivedPacket, const HifiSockAddr& senderSockAddr) {
|
void AudioMixer::readPendingDatagram(const QByteArray& receivedPacket, const HifiSockAddr& senderSockAddr) {
|
||||||
|
@ -374,7 +409,7 @@ void AudioMixer::readPendingDatagram(const QByteArray& receivedPacket, const Hif
|
||||||
void AudioMixer::sendStatsPacket() {
|
void AudioMixer::sendStatsPacket() {
|
||||||
static QJsonObject statsObject;
|
static QJsonObject statsObject;
|
||||||
|
|
||||||
statsObject["useDynamicJitterBuffers"] = _useDynamicJitterBuffers;
|
statsObject["useDynamicJitterBuffers"] = _streamSettings._dynamicJitterBuffers;
|
||||||
statsObject["trailing_sleep_percentage"] = _trailingSleepRatio * 100.0f;
|
statsObject["trailing_sleep_percentage"] = _trailingSleepRatio * 100.0f;
|
||||||
statsObject["performance_throttling_ratio"] = _performanceThrottlingRatio;
|
statsObject["performance_throttling_ratio"] = _performanceThrottlingRatio;
|
||||||
|
|
||||||
|
@ -400,9 +435,42 @@ void AudioMixer::sendStatsPacket() {
|
||||||
int sizeOfStats = 0;
|
int sizeOfStats = 0;
|
||||||
int TOO_BIG_FOR_MTU = 1200; // some extra space for JSONification
|
int TOO_BIG_FOR_MTU = 1200; // some extra space for JSONification
|
||||||
|
|
||||||
|
QString property = "readPendingDatagram_calls_stats";
|
||||||
|
QString value = getReadPendingDatagramsCallsPerSecondsStatsString();
|
||||||
|
statsObject2[qPrintable(property)] = value;
|
||||||
|
somethingToSend = true;
|
||||||
|
sizeOfStats += property.size() + value.size();
|
||||||
|
|
||||||
|
property = "readPendingDatagram_packets_per_call_stats";
|
||||||
|
value = getReadPendingDatagramsPacketsPerCallStatsString();
|
||||||
|
statsObject2[qPrintable(property)] = value;
|
||||||
|
somethingToSend = true;
|
||||||
|
sizeOfStats += property.size() + value.size();
|
||||||
|
|
||||||
|
property = "readPendingDatagram_packets_time_per_call_stats";
|
||||||
|
value = getReadPendingDatagramsTimeStatsString();
|
||||||
|
statsObject2[qPrintable(property)] = value;
|
||||||
|
somethingToSend = true;
|
||||||
|
sizeOfStats += property.size() + value.size();
|
||||||
|
|
||||||
|
property = "readPendingDatagram_hashmatch_time_per_call_stats";
|
||||||
|
value = getReadPendingDatagramsHashMatchTimeStatsString();
|
||||||
|
statsObject2[qPrintable(property)] = value;
|
||||||
|
somethingToSend = true;
|
||||||
|
sizeOfStats += property.size() + value.size();
|
||||||
|
|
||||||
NodeList* nodeList = NodeList::getInstance();
|
NodeList* nodeList = NodeList::getInstance();
|
||||||
int clientNumber = 0;
|
int clientNumber = 0;
|
||||||
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
||||||
|
|
||||||
|
// if we're too large, send the packet
|
||||||
|
if (sizeOfStats > TOO_BIG_FOR_MTU) {
|
||||||
|
nodeList->sendStatsToDomainServer(statsObject2);
|
||||||
|
sizeOfStats = 0;
|
||||||
|
statsObject2 = QJsonObject(); // clear it
|
||||||
|
somethingToSend = false;
|
||||||
|
}
|
||||||
|
|
||||||
clientNumber++;
|
clientNumber++;
|
||||||
AudioMixerClientData* clientData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
AudioMixerClientData* clientData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||||
if (clientData) {
|
if (clientData) {
|
||||||
|
@ -412,14 +480,6 @@ void AudioMixer::sendStatsPacket() {
|
||||||
somethingToSend = true;
|
somethingToSend = true;
|
||||||
sizeOfStats += property.size() + value.size();
|
sizeOfStats += property.size() + value.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we're too large, send the packet
|
|
||||||
if (sizeOfStats > TOO_BIG_FOR_MTU) {
|
|
||||||
nodeList->sendStatsToDomainServer(statsObject2);
|
|
||||||
sizeOfStats = 0;
|
|
||||||
statsObject2 = QJsonObject(); // clear it
|
|
||||||
somethingToSend = false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (somethingToSend) {
|
if (somethingToSend) {
|
||||||
|
@ -493,44 +553,78 @@ void AudioMixer::run() {
|
||||||
|
|
||||||
// check the payload to see if we have asked for dynamicJitterBuffer support
|
// check the payload to see if we have asked for dynamicJitterBuffer support
|
||||||
const QString DYNAMIC_JITTER_BUFFER_JSON_KEY = "A-dynamic-jitter-buffer";
|
const QString DYNAMIC_JITTER_BUFFER_JSON_KEY = "A-dynamic-jitter-buffer";
|
||||||
bool shouldUseDynamicJitterBuffers = audioGroupObject[DYNAMIC_JITTER_BUFFER_JSON_KEY].toBool();
|
_streamSettings._dynamicJitterBuffers = audioGroupObject[DYNAMIC_JITTER_BUFFER_JSON_KEY].toBool();
|
||||||
if (shouldUseDynamicJitterBuffers) {
|
if (_streamSettings._dynamicJitterBuffers) {
|
||||||
qDebug() << "Enable dynamic jitter buffers.";
|
qDebug() << "Enable dynamic jitter buffers.";
|
||||||
_useDynamicJitterBuffers = true;
|
|
||||||
} else {
|
} else {
|
||||||
qDebug() << "Dynamic jitter buffers disabled.";
|
qDebug() << "Dynamic jitter buffers disabled.";
|
||||||
_useDynamicJitterBuffers = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ok;
|
bool ok;
|
||||||
|
const QString DESIRED_JITTER_BUFFER_FRAMES_KEY = "B-static-desired-jitter-buffer-frames";
|
||||||
const QString FILTER_KEY = "E-enable-filter";
|
_streamSettings._staticDesiredJitterBufferFrames = audioGroupObject[DESIRED_JITTER_BUFFER_FRAMES_KEY].toString().toInt(&ok);
|
||||||
_enableFilter = audioGroupObject[FILTER_KEY].toBool();
|
|
||||||
if (_enableFilter) {
|
|
||||||
qDebug() << "Filter enabled";
|
|
||||||
}
|
|
||||||
|
|
||||||
const QString DESIRED_JITTER_BUFFER_FRAMES_KEY = "B-desired-jitter-buffer-frames";
|
|
||||||
_staticDesiredJitterBufferFrames = audioGroupObject[DESIRED_JITTER_BUFFER_FRAMES_KEY].toString().toInt(&ok);
|
|
||||||
if (!ok) {
|
if (!ok) {
|
||||||
_staticDesiredJitterBufferFrames = DEFAULT_DESIRED_JITTER_BUFFER_FRAMES;
|
_streamSettings._staticDesiredJitterBufferFrames = DEFAULT_STATIC_DESIRED_JITTER_BUFFER_FRAMES;
|
||||||
}
|
}
|
||||||
qDebug() << "Static desired jitter buffer frames:" << _staticDesiredJitterBufferFrames;
|
qDebug() << "Static desired jitter buffer frames:" << _streamSettings._staticDesiredJitterBufferFrames;
|
||||||
|
|
||||||
const QString MAX_FRAMES_OVER_DESIRED_JSON_KEY = "C-max-frames-over-desired";
|
const QString MAX_FRAMES_OVER_DESIRED_JSON_KEY = "C-max-frames-over-desired";
|
||||||
_maxFramesOverDesired = audioGroupObject[MAX_FRAMES_OVER_DESIRED_JSON_KEY].toString().toInt(&ok);
|
_streamSettings._maxFramesOverDesired = audioGroupObject[MAX_FRAMES_OVER_DESIRED_JSON_KEY].toString().toInt(&ok);
|
||||||
if (!ok) {
|
if (!ok) {
|
||||||
_maxFramesOverDesired = DEFAULT_MAX_FRAMES_OVER_DESIRED;
|
_streamSettings._maxFramesOverDesired = DEFAULT_MAX_FRAMES_OVER_DESIRED;
|
||||||
}
|
}
|
||||||
qDebug() << "Max frames over desired:" << _maxFramesOverDesired;
|
qDebug() << "Max frames over desired:" << _streamSettings._maxFramesOverDesired;
|
||||||
|
|
||||||
const QString PRINT_STREAM_STATS_JSON_KEY = "H-print-stream-stats";
|
const QString USE_STDEV_FOR_DESIRED_CALC_JSON_KEY = "D-use-stdev-for-desired-calc";
|
||||||
|
_streamSettings._useStDevForJitterCalc = audioGroupObject[USE_STDEV_FOR_DESIRED_CALC_JSON_KEY].toBool();
|
||||||
|
if (_streamSettings._useStDevForJitterCalc) {
|
||||||
|
qDebug() << "Using Philip's stdev method for jitter calc if dynamic jitter buffers enabled";
|
||||||
|
} else {
|
||||||
|
qDebug() << "Using Fred's max-gap method for jitter calc if dynamic jitter buffers enabled";
|
||||||
|
}
|
||||||
|
|
||||||
|
const QString WINDOW_STARVE_THRESHOLD_JSON_KEY = "E-window-starve-threshold";
|
||||||
|
_streamSettings._windowStarveThreshold = audioGroupObject[WINDOW_STARVE_THRESHOLD_JSON_KEY].toString().toInt(&ok);
|
||||||
|
if (!ok) {
|
||||||
|
_streamSettings._windowStarveThreshold = DEFAULT_WINDOW_STARVE_THRESHOLD;
|
||||||
|
}
|
||||||
|
qDebug() << "Window A starve threshold:" << _streamSettings._windowStarveThreshold;
|
||||||
|
|
||||||
|
const QString WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES_JSON_KEY = "F-window-seconds-for-desired-calc-on-too-many-starves";
|
||||||
|
_streamSettings._windowSecondsForDesiredCalcOnTooManyStarves = audioGroupObject[WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES_JSON_KEY].toString().toInt(&ok);
|
||||||
|
if (!ok) {
|
||||||
|
_streamSettings._windowSecondsForDesiredCalcOnTooManyStarves = DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES;
|
||||||
|
}
|
||||||
|
qDebug() << "Window A length:" << _streamSettings._windowSecondsForDesiredCalcOnTooManyStarves << "seconds";
|
||||||
|
|
||||||
|
const QString WINDOW_SECONDS_FOR_DESIRED_REDUCTION_JSON_KEY = "G-window-seconds-for-desired-reduction";
|
||||||
|
_streamSettings._windowSecondsForDesiredReduction = audioGroupObject[WINDOW_SECONDS_FOR_DESIRED_REDUCTION_JSON_KEY].toString().toInt(&ok);
|
||||||
|
if (!ok) {
|
||||||
|
_streamSettings._windowSecondsForDesiredReduction = DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION;
|
||||||
|
}
|
||||||
|
qDebug() << "Window B length:" << _streamSettings._windowSecondsForDesiredReduction << "seconds";
|
||||||
|
|
||||||
|
const QString REPETITION_WITH_FADE_JSON_KEY = "H-repetition-with-fade";
|
||||||
|
_streamSettings._repetitionWithFade = audioGroupObject[REPETITION_WITH_FADE_JSON_KEY].toBool();
|
||||||
|
if (_streamSettings._repetitionWithFade) {
|
||||||
|
qDebug() << "Repetition with fade enabled";
|
||||||
|
} else {
|
||||||
|
qDebug() << "Repetition with fade disabled";
|
||||||
|
}
|
||||||
|
|
||||||
|
const QString PRINT_STREAM_STATS_JSON_KEY = "I-print-stream-stats";
|
||||||
_printStreamStats = audioGroupObject[PRINT_STREAM_STATS_JSON_KEY].toBool();
|
_printStreamStats = audioGroupObject[PRINT_STREAM_STATS_JSON_KEY].toBool();
|
||||||
if (_printStreamStats) {
|
if (_printStreamStats) {
|
||||||
qDebug() << "Stream stats will be printed to stdout";
|
qDebug() << "Stream stats will be printed to stdout";
|
||||||
}
|
}
|
||||||
|
|
||||||
const QString UNATTENUATED_ZONE_KEY = "D-unattenuated-zone";
|
const QString FILTER_KEY = "J-enable-filter";
|
||||||
|
_enableFilter = audioGroupObject[FILTER_KEY].toBool();
|
||||||
|
if (_enableFilter) {
|
||||||
|
qDebug() << "Filter enabled";
|
||||||
|
}
|
||||||
|
|
||||||
|
const QString UNATTENUATED_ZONE_KEY = "Z-unattenuated-zone";
|
||||||
|
|
||||||
QString unattenuatedZoneString = audioGroupObject[UNATTENUATED_ZONE_KEY].toString();
|
QString unattenuatedZoneString = audioGroupObject[UNATTENUATED_ZONE_KEY].toString();
|
||||||
if (!unattenuatedZoneString.isEmpty()) {
|
if (!unattenuatedZoneString.isEmpty()) {
|
||||||
|
@ -559,8 +653,7 @@ void AudioMixer::run() {
|
||||||
QElapsedTimer timer;
|
QElapsedTimer timer;
|
||||||
timer.start();
|
timer.start();
|
||||||
|
|
||||||
char* clientMixBuffer = new char[NETWORK_BUFFER_LENGTH_BYTES_STEREO + sizeof(quint16)
|
char clientMixBuffer[MAX_PACKET_SIZE];
|
||||||
+ numBytesForPacketHeaderGivenPacketType(PacketTypeMixedAudio)];
|
|
||||||
|
|
||||||
int usecToSleep = BUFFER_SEND_INTERVAL_USECS;
|
int usecToSleep = BUFFER_SEND_INTERVAL_USECS;
|
||||||
|
|
||||||
|
@ -620,14 +713,12 @@ void AudioMixer::run() {
|
||||||
++framesSinceCutoffEvent;
|
++framesSinceCutoffEvent;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool sendAudioStreamStats = false;
|
|
||||||
quint64 now = usecTimestampNow();
|
quint64 now = usecTimestampNow();
|
||||||
if (now - _lastSendAudioStreamStatsTime > TOO_LONG_SINCE_LAST_SEND_AUDIO_STREAM_STATS) {
|
if (now - _lastPerSecondCallbackTime > USECS_PER_SECOND) {
|
||||||
_lastSendAudioStreamStatsTime = now;
|
perSecondActions();
|
||||||
sendAudioStreamStats = true;
|
_lastPerSecondCallbackTime = now;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool streamStatsPrinted = false;
|
|
||||||
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
||||||
if (node->getLinkedData()) {
|
if (node->getLinkedData()) {
|
||||||
AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();
|
AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();
|
||||||
|
@ -640,11 +731,13 @@ void AudioMixer::run() {
|
||||||
if (node->getType() == NodeType::Agent && node->getActiveSocket()
|
if (node->getType() == NodeType::Agent && node->getActiveSocket()
|
||||||
&& nodeData->getAvatarAudioStream()) {
|
&& nodeData->getAvatarAudioStream()) {
|
||||||
|
|
||||||
prepareMixForListeningNode(node.data());
|
int streamsMixed = prepareMixForListeningNode(node.data());
|
||||||
|
|
||||||
|
char* dataAt;
|
||||||
|
if (streamsMixed > 0) {
|
||||||
// pack header
|
// pack header
|
||||||
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeMixedAudio);
|
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeMixedAudio);
|
||||||
char* dataAt = clientMixBuffer + numBytesPacketHeader;
|
dataAt = clientMixBuffer + numBytesPacketHeader;
|
||||||
|
|
||||||
// pack sequence number
|
// pack sequence number
|
||||||
quint16 sequence = nodeData->getOutgoingSequenceNumber();
|
quint16 sequence = nodeData->getOutgoingSequenceNumber();
|
||||||
|
@ -654,29 +747,36 @@ void AudioMixer::run() {
|
||||||
// pack mixed audio samples
|
// pack mixed audio samples
|
||||||
memcpy(dataAt, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
memcpy(dataAt, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
||||||
dataAt += NETWORK_BUFFER_LENGTH_BYTES_STEREO;
|
dataAt += NETWORK_BUFFER_LENGTH_BYTES_STEREO;
|
||||||
|
} else {
|
||||||
|
// pack header
|
||||||
|
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeSilentAudioFrame);
|
||||||
|
dataAt = clientMixBuffer + numBytesPacketHeader;
|
||||||
|
|
||||||
|
// pack sequence number
|
||||||
|
quint16 sequence = nodeData->getOutgoingSequenceNumber();
|
||||||
|
memcpy(dataAt, &sequence, sizeof(quint16));
|
||||||
|
dataAt += sizeof(quint16);
|
||||||
|
|
||||||
|
// pack number of silent audio samples
|
||||||
|
quint16 numSilentSamples = NETWORK_BUFFER_LENGTH_SAMPLES_STEREO;
|
||||||
|
memcpy(dataAt, &numSilentSamples, sizeof(quint16));
|
||||||
|
dataAt += sizeof(quint16);
|
||||||
|
}
|
||||||
|
|
||||||
// send mixed audio packet
|
// send mixed audio packet
|
||||||
nodeList->writeDatagram(clientMixBuffer, dataAt - clientMixBuffer, node);
|
nodeList->writeDatagram(clientMixBuffer, dataAt - clientMixBuffer, node);
|
||||||
nodeData->incrementOutgoingMixedAudioSequenceNumber();
|
nodeData->incrementOutgoingMixedAudioSequenceNumber();
|
||||||
|
|
||||||
// send an audio stream stats packet if it's time
|
// send an audio stream stats packet if it's time
|
||||||
if (sendAudioStreamStats) {
|
if (_sendAudioStreamStats) {
|
||||||
nodeData->sendAudioStreamStatsPackets(node);
|
nodeData->sendAudioStreamStatsPackets(node);
|
||||||
|
_sendAudioStreamStats = false;
|
||||||
if (_printStreamStats) {
|
|
||||||
printf("\nStats for agent %s:\n", node->getUUID().toString().toLatin1().data());
|
|
||||||
nodeData->printUpstreamDownstreamStats();
|
|
||||||
streamStatsPrinted = true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
++_sumListeners;
|
++_sumListeners;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (streamStatsPrinted) {
|
|
||||||
printf("\n----------------------------------------------------------------\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
++_numStatFrames;
|
++_numStatFrames;
|
||||||
|
|
||||||
|
@ -692,6 +792,90 @@ void AudioMixer::run() {
|
||||||
usleep(usecToSleep);
|
usleep(usecToSleep);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
delete[] clientMixBuffer;
|
|
||||||
|
void AudioMixer::perSecondActions() {
|
||||||
|
_sendAudioStreamStats = true;
|
||||||
|
|
||||||
|
int callsLastSecond = _datagramsReadPerCallStats.getCurrentIntervalSamples();
|
||||||
|
_readPendingCallsPerSecondStats.update(callsLastSecond);
|
||||||
|
|
||||||
|
if (_printStreamStats) {
|
||||||
|
|
||||||
|
printf("\n================================================================================\n\n");
|
||||||
|
|
||||||
|
printf(" readPendingDatagram() calls per second | avg: %.2f, avg_30s: %.2f, last_second: %d\n",
|
||||||
|
_readPendingCallsPerSecondStats.getAverage(),
|
||||||
|
_readPendingCallsPerSecondStats.getWindowAverage(),
|
||||||
|
callsLastSecond);
|
||||||
|
|
||||||
|
printf(" Datagrams read per call | avg: %.2f, avg_30s: %.2f, last_second: %.2f\n",
|
||||||
|
_datagramsReadPerCallStats.getAverage(),
|
||||||
|
_datagramsReadPerCallStats.getWindowAverage(),
|
||||||
|
_datagramsReadPerCallStats.getCurrentIntervalAverage());
|
||||||
|
|
||||||
|
printf(" Usecs spent per readPendingDatagram() call | avg: %.2f, avg_30s: %.2f, last_second: %.2f\n",
|
||||||
|
_timeSpentPerCallStats.getAverage(),
|
||||||
|
_timeSpentPerCallStats.getWindowAverage(),
|
||||||
|
_timeSpentPerCallStats.getCurrentIntervalAverage());
|
||||||
|
|
||||||
|
printf(" Usecs spent per packetVersionAndHashMatch() call | avg: %.2f, avg_30s: %.2f, last_second: %.2f\n",
|
||||||
|
_timeSpentPerHashMatchCallStats.getAverage(),
|
||||||
|
_timeSpentPerHashMatchCallStats.getWindowAverage(),
|
||||||
|
_timeSpentPerHashMatchCallStats.getCurrentIntervalAverage());
|
||||||
|
|
||||||
|
double WINDOW_LENGTH_USECS = READ_DATAGRAMS_STATS_WINDOW_SECONDS * USECS_PER_SECOND;
|
||||||
|
|
||||||
|
printf(" %% time spent in readPendingDatagram() calls | avg_30s: %.6f%%, last_second: %.6f%%\n",
|
||||||
|
_timeSpentPerCallStats.getWindowSum() / WINDOW_LENGTH_USECS * 100.0,
|
||||||
|
_timeSpentPerCallStats.getCurrentIntervalSum() / USECS_PER_SECOND * 100.0);
|
||||||
|
|
||||||
|
printf("%% time spent in packetVersionAndHashMatch() calls: | avg_30s: %.6f%%, last_second: %.6f%%\n",
|
||||||
|
_timeSpentPerHashMatchCallStats.getWindowSum() / WINDOW_LENGTH_USECS * 100.0,
|
||||||
|
_timeSpentPerHashMatchCallStats.getCurrentIntervalSum() / USECS_PER_SECOND * 100.0);
|
||||||
|
|
||||||
|
foreach(const SharedNodePointer& node, NodeList::getInstance()->getNodeHash()) {
|
||||||
|
if (node->getLinkedData()) {
|
||||||
|
AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();
|
||||||
|
|
||||||
|
if (node->getType() == NodeType::Agent && node->getActiveSocket()) {
|
||||||
|
printf("\nStats for agent %s --------------------------------\n",
|
||||||
|
node->getUUID().toString().toLatin1().data());
|
||||||
|
nodeData->printUpstreamDownstreamStats();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_datagramsReadPerCallStats.currentIntervalComplete();
|
||||||
|
_timeSpentPerCallStats.currentIntervalComplete();
|
||||||
|
_timeSpentPerHashMatchCallStats.currentIntervalComplete();
|
||||||
|
}
|
||||||
|
|
||||||
|
QString AudioMixer::getReadPendingDatagramsCallsPerSecondsStatsString() const {
|
||||||
|
QString result = "calls_per_sec_avg_30s: " + QString::number(_readPendingCallsPerSecondStats.getWindowAverage(), 'f', 2)
|
||||||
|
+ " calls_last_sec: " + QString::number(_readPendingCallsPerSecondStats.getLastCompleteIntervalStats().getSum() + 0.5, 'f', 0);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
QString AudioMixer::getReadPendingDatagramsPacketsPerCallStatsString() const {
|
||||||
|
QString result = "pkts_per_call_avg_30s: " + QString::number(_datagramsReadPerCallStats.getWindowAverage(), 'f', 2)
|
||||||
|
+ " pkts_per_call_avg_1s: " + QString::number(_datagramsReadPerCallStats.getLastCompleteIntervalStats().getAverage(), 'f', 2);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
QString AudioMixer::getReadPendingDatagramsTimeStatsString() const {
|
||||||
|
QString result = "usecs_per_call_avg_30s: " + QString::number(_timeSpentPerCallStats.getWindowAverage(), 'f', 2)
|
||||||
|
+ " usecs_per_call_avg_1s: " + QString::number(_timeSpentPerCallStats.getLastCompleteIntervalStats().getAverage(), 'f', 2)
|
||||||
|
+ " prct_time_in_call_30s: " + QString::number(_timeSpentPerCallStats.getWindowSum() / (READ_DATAGRAMS_STATS_WINDOW_SECONDS*USECS_PER_SECOND) * 100.0, 'f', 6) + "%"
|
||||||
|
+ " prct_time_in_call_1s: " + QString::number(_timeSpentPerCallStats.getLastCompleteIntervalStats().getSum() / USECS_PER_SECOND * 100.0, 'f', 6) + "%";
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
QString AudioMixer::getReadPendingDatagramsHashMatchTimeStatsString() const {
|
||||||
|
QString result = "usecs_per_hashmatch_avg_30s: " + QString::number(_timeSpentPerHashMatchCallStats.getWindowAverage(), 'f', 2)
|
||||||
|
+ " usecs_per_hashmatch_avg_1s: " + QString::number(_timeSpentPerHashMatchCallStats.getLastCompleteIntervalStats().getAverage(), 'f', 2)
|
||||||
|
+ " prct_time_in_hashmatch_30s: " + QString::number(_timeSpentPerHashMatchCallStats.getWindowSum() / (READ_DATAGRAMS_STATS_WINDOW_SECONDS*USECS_PER_SECOND) * 100.0, 'f', 6) + "%"
|
||||||
|
+ " prct_time_in_hashmatch_1s: " + QString::number(_timeSpentPerHashMatchCallStats.getLastCompleteIntervalStats().getSum() / USECS_PER_SECOND * 100.0, 'f', 6) + "%";
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,8 @@ class AvatarAudioStream;
|
||||||
|
|
||||||
const int SAMPLE_PHASE_DELAY_AT_90 = 20;
|
const int SAMPLE_PHASE_DELAY_AT_90 = 20;
|
||||||
|
|
||||||
const quint64 TOO_LONG_SINCE_LAST_SEND_AUDIO_STREAM_STATS = 1 * USECS_PER_SECOND;
|
const int READ_DATAGRAMS_STATS_WINDOW_SECONDS = 30;
|
||||||
|
|
||||||
|
|
||||||
/// Handles assignments of type AudioMixer - mixing streams of audio and re-distributing to various clients.
|
/// Handles assignments of type AudioMixer - mixing streams of audio and re-distributing to various clients.
|
||||||
class AudioMixer : public ThreadedAssignment {
|
class AudioMixer : public ThreadedAssignment {
|
||||||
|
@ -38,22 +39,38 @@ public slots:
|
||||||
|
|
||||||
void sendStatsPacket();
|
void sendStatsPacket();
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
<<<<<<< HEAD
|
||||||
static bool getUseDynamicJitterBuffers() { return _useDynamicJitterBuffers; }
|
static bool getUseDynamicJitterBuffers() { return _useDynamicJitterBuffers; }
|
||||||
static int getStaticDesiredJitterBufferFrames() { return _staticDesiredJitterBufferFrames; }
|
static int getStaticDesiredJitterBufferFrames() { return _staticDesiredJitterBufferFrames; }
|
||||||
static int getMaxFramesOverDesired() { return _maxFramesOverDesired; }
|
static int getMaxFramesOverDesired() { return _maxFramesOverDesired; }
|
||||||
|
|
||||||
|
=======
|
||||||
|
#endif
|
||||||
|
static const InboundAudioStream::Settings& getStreamSettings() { return _streamSettings; }
|
||||||
|
#if 0
|
||||||
|
>>>>>>> 7a8a8684d6f8c9956ca7e4f81eb8064b8dece58e
|
||||||
|
#endif
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// adds one stream to the mix for a listening node
|
/// adds one stream to the mix for a listening node
|
||||||
void addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
|
int addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
|
||||||
AvatarAudioStream* listeningNodeStream);
|
AvatarAudioStream* listeningNodeStream);
|
||||||
|
|
||||||
/// prepares and sends a mix to one Node
|
/// prepares and sends a mix to one Node
|
||||||
void prepareMixForListeningNode(Node* node);
|
int prepareMixForListeningNode(Node* node);
|
||||||
|
|
||||||
// client samples capacity is larger than what will be sent to optimize mixing
|
// client samples capacity is larger than what will be sent to optimize mixing
|
||||||
// we are MMX adding 4 samples at a time so we need client samples to have an extra 4
|
// we are MMX adding 4 samples at a time so we need client samples to have an extra 4
|
||||||
int16_t _clientSamples[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (SAMPLE_PHASE_DELAY_AT_90 * 2)];
|
int16_t _clientSamples[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (SAMPLE_PHASE_DELAY_AT_90 * 2)];
|
||||||
|
|
||||||
|
void perSecondActions();
|
||||||
|
|
||||||
|
QString getReadPendingDatagramsCallsPerSecondsStatsString() const;
|
||||||
|
QString getReadPendingDatagramsPacketsPerCallStatsString() const;
|
||||||
|
QString getReadPendingDatagramsTimeStatsString() const;
|
||||||
|
QString getReadPendingDatagramsHashMatchTimeStatsString() const;
|
||||||
|
|
||||||
float _trailingSleepRatio;
|
float _trailingSleepRatio;
|
||||||
float _minAudibilityThreshold;
|
float _minAudibilityThreshold;
|
||||||
float _performanceThrottlingRatio;
|
float _performanceThrottlingRatio;
|
||||||
|
@ -63,14 +80,23 @@ private:
|
||||||
AABox* _sourceUnattenuatedZone;
|
AABox* _sourceUnattenuatedZone;
|
||||||
AABox* _listenerUnattenuatedZone;
|
AABox* _listenerUnattenuatedZone;
|
||||||
|
|
||||||
static bool _useDynamicJitterBuffers;
|
static InboundAudioStream::Settings _streamSettings;
|
||||||
static int _staticDesiredJitterBufferFrames;
|
|
||||||
static int _maxFramesOverDesired;
|
|
||||||
|
|
||||||
static bool _printStreamStats;
|
static bool _printStreamStats;
|
||||||
|
|
||||||
static bool _enableFilter;
|
static bool _enableFilter;
|
||||||
|
|
||||||
quint64 _lastSendAudioStreamStatsTime;
|
quint64 _lastSendAudioStreamStatsTime;
|
||||||
|
quint64 _lastPerSecondCallbackTime;
|
||||||
|
|
||||||
|
bool _sendAudioStreamStats;
|
||||||
|
|
||||||
|
// stats
|
||||||
|
MovingMinMaxAvg<int> _datagramsReadPerCallStats; // update with # of datagrams read for each readPendingDatagrams call
|
||||||
|
MovingMinMaxAvg<quint64> _timeSpentPerCallStats; // update with usecs spent inside each readPendingDatagrams call
|
||||||
|
MovingMinMaxAvg<quint64> _timeSpentPerHashMatchCallStats; // update with usecs spent inside each packetVersionAndHashMatch call
|
||||||
|
|
||||||
|
MovingMinMaxAvg<int> _readPendingCallsPerSecondStats; // update with # of readPendingDatagrams calls in the last second
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // hifi_AudioMixer_h
|
#endif // hifi_AudioMixer_h
|
||||||
|
|
|
@ -74,9 +74,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
||||||
quint8 channelFlag = *(reinterpret_cast<const quint8*>(channelFlagAt));
|
quint8 channelFlag = *(reinterpret_cast<const quint8*>(channelFlagAt));
|
||||||
bool isStereo = channelFlag == 1;
|
bool isStereo = channelFlag == 1;
|
||||||
|
|
||||||
_audioStreams.insert(nullUUID,
|
_audioStreams.insert(nullUUID, matchingStream = new AvatarAudioStream(isStereo, AudioMixer::getStreamSettings()));
|
||||||
matchingStream = new AvatarAudioStream(isStereo, AudioMixer::getUseDynamicJitterBuffers(),
|
|
||||||
AudioMixer::getStaticDesiredJitterBufferFrames(), AudioMixer::getMaxFramesOverDesired()));
|
|
||||||
} else {
|
} else {
|
||||||
matchingStream = _audioStreams.value(nullUUID);
|
matchingStream = _audioStreams.value(nullUUID);
|
||||||
}
|
}
|
||||||
|
@ -88,9 +86,8 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
||||||
QUuid streamIdentifier = QUuid::fromRfc4122(packet.mid(bytesBeforeStreamIdentifier, NUM_BYTES_RFC4122_UUID));
|
QUuid streamIdentifier = QUuid::fromRfc4122(packet.mid(bytesBeforeStreamIdentifier, NUM_BYTES_RFC4122_UUID));
|
||||||
|
|
||||||
if (!_audioStreams.contains(streamIdentifier)) {
|
if (!_audioStreams.contains(streamIdentifier)) {
|
||||||
_audioStreams.insert(streamIdentifier,
|
// we don't have this injected stream yet, so add it
|
||||||
matchingStream = new InjectedAudioStream(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers(),
|
_audioStreams.insert(streamIdentifier, matchingStream = new InjectedAudioStream(streamIdentifier, AudioMixer::getStreamSettings()));
|
||||||
AudioMixer::getStaticDesiredJitterBufferFrames(), AudioMixer::getMaxFramesOverDesired()));
|
|
||||||
} else {
|
} else {
|
||||||
matchingStream = _audioStreams.value(streamIdentifier);
|
matchingStream = _audioStreams.value(streamIdentifier);
|
||||||
}
|
}
|
||||||
|
@ -105,12 +102,10 @@ void AudioMixerClientData::checkBuffersBeforeFrameSend(AABox* checkSourceZone, A
|
||||||
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
||||||
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
|
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
|
||||||
PositionalAudioStream* stream = i.value();
|
PositionalAudioStream* stream = i.value();
|
||||||
if (stream->popFrames(1, true) > 0) {
|
|
||||||
// this is a ring buffer that is ready to go
|
|
||||||
|
|
||||||
// calculate the trailing avg loudness for the next frame
|
if (stream->popFrames(1, true) > 0) {
|
||||||
// that would be mixed in
|
stream->updateLastPopOutputLoudnessAndTrailingLoudness();
|
||||||
stream->updateLastPopOutputTrailingLoudness();
|
}
|
||||||
|
|
||||||
if (checkSourceZone && checkSourceZone->contains(stream->getPosition())) {
|
if (checkSourceZone && checkSourceZone->contains(stream->getPosition())) {
|
||||||
stream->setListenerUnattenuatedZone(listenerZone);
|
stream->setListenerUnattenuatedZone(listenerZone);
|
||||||
|
@ -119,7 +114,6 @@ void AudioMixerClientData::checkBuffersBeforeFrameSend(AABox* checkSourceZone, A
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void AudioMixerClientData::removeDeadInjectedStreams() {
|
void AudioMixerClientData::removeDeadInjectedStreams() {
|
||||||
|
|
||||||
|
@ -185,7 +179,9 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
|
||||||
|
|
||||||
// pack the calculated number of stream stats
|
// pack the calculated number of stream stats
|
||||||
for (int i = 0; i < numStreamStatsToPack; i++) {
|
for (int i = 0; i < numStreamStatsToPack; i++) {
|
||||||
AudioStreamStats streamStats = audioStreamsIterator.value()->updateSeqHistoryAndGetAudioStreamStats();
|
PositionalAudioStream* stream = audioStreamsIterator.value();
|
||||||
|
stream->perSecondCallbackForUpdatingStats();
|
||||||
|
AudioStreamStats streamStats = stream->getAudioStreamStats();
|
||||||
memcpy(dataAt, &streamStats, sizeof(AudioStreamStats));
|
memcpy(dataAt, &streamStats, sizeof(AudioStreamStats));
|
||||||
dataAt += sizeof(AudioStreamStats);
|
dataAt += sizeof(AudioStreamStats);
|
||||||
|
|
||||||
|
|
|
@ -13,8 +13,8 @@
|
||||||
|
|
||||||
#include "AvatarAudioStream.h"
|
#include "AvatarAudioStream.h"
|
||||||
|
|
||||||
AvatarAudioStream::AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired) :
|
AvatarAudioStream::AvatarAudioStream(bool isStereo, const InboundAudioStream::Settings& settings) :
|
||||||
PositionalAudioStream(PositionalAudioStream::Microphone, isStereo, dynamicJitterBuffer, staticDesiredJitterBufferFrames, maxFramesOverDesired)
|
PositionalAudioStream(PositionalAudioStream::Microphone, isStereo, settings)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,26 +38,9 @@ int AvatarAudioStream::parseStreamProperties(PacketType type, const QByteArray&
|
||||||
// read the positional data
|
// read the positional data
|
||||||
readBytes += parsePositionalData(packetAfterSeqNum.mid(readBytes));
|
readBytes += parsePositionalData(packetAfterSeqNum.mid(readBytes));
|
||||||
|
|
||||||
if (type == PacketTypeSilentAudioFrame) {
|
// calculate how many samples are in this packet
|
||||||
int16_t numSilentSamples;
|
|
||||||
memcpy(&numSilentSamples, packetAfterSeqNum.data() + readBytes, sizeof(int16_t));
|
|
||||||
readBytes += sizeof(int16_t);
|
|
||||||
|
|
||||||
numAudioSamples = numSilentSamples;
|
|
||||||
} else {
|
|
||||||
int numAudioBytes = packetAfterSeqNum.size() - readBytes;
|
int numAudioBytes = packetAfterSeqNum.size() - readBytes;
|
||||||
numAudioSamples = numAudioBytes / sizeof(int16_t);
|
numAudioSamples = numAudioBytes / sizeof(int16_t);
|
||||||
}
|
|
||||||
return readBytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
int AvatarAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
|
||||||
int readBytes = 0;
|
|
||||||
if (type == PacketTypeSilentAudioFrame) {
|
|
||||||
writeDroppableSilentSamples(numAudioSamples);
|
|
||||||
} else {
|
|
||||||
// there is audio data to read
|
|
||||||
readBytes += _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
|
|
||||||
}
|
|
||||||
return readBytes;
|
return readBytes;
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
|
|
||||||
class AvatarAudioStream : public PositionalAudioStream {
|
class AvatarAudioStream : public PositionalAudioStream {
|
||||||
public:
|
public:
|
||||||
AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired);
|
AvatarAudioStream(bool isStereo, const InboundAudioStream::Settings& settings);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// disallow copying of AvatarAudioStream objects
|
// disallow copying of AvatarAudioStream objects
|
||||||
|
@ -26,7 +26,6 @@ private:
|
||||||
AvatarAudioStream& operator= (const AvatarAudioStream&);
|
AvatarAudioStream& operator= (const AvatarAudioStream&);
|
||||||
|
|
||||||
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
|
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
|
||||||
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // hifi_AvatarAudioStream_h
|
#endif // hifi_AvatarAudioStream_h
|
||||||
|
|
|
@ -9,8 +9,8 @@
|
||||||
"help": "Dynamically buffer client audio based on perceived jitter in packet receipt timing",
|
"help": "Dynamically buffer client audio based on perceived jitter in packet receipt timing",
|
||||||
"default": false
|
"default": false
|
||||||
},
|
},
|
||||||
"B-desired-jitter-buffer-frames": {
|
"B-static-desired-jitter-buffer-frames": {
|
||||||
"label": "Desired Jitter Buffer Frames",
|
"label": "Static Desired Jitter Buffer Frames",
|
||||||
"help": "If dynamic jitter buffers is disabled, this determines the target number of frames maintained by the AudioMixer's jitter buffers",
|
"help": "If dynamic jitter buffers is disabled, this determines the target number of frames maintained by the AudioMixer's jitter buffers",
|
||||||
"placeholder": "1",
|
"placeholder": "1",
|
||||||
"default": "1"
|
"default": "1"
|
||||||
|
@ -21,19 +21,49 @@
|
||||||
"placeholder": "10",
|
"placeholder": "10",
|
||||||
"default": "10"
|
"default": "10"
|
||||||
},
|
},
|
||||||
"H-print-stream-stats": {
|
"D-use-stdev-for-desired-calc": {
|
||||||
|
"type": "checkbox",
|
||||||
|
"label": "Use Stdev for Desired Jitter Frames Calc:",
|
||||||
|
"help": "If checked, Philip's method (stdev of timegaps) is used to calculate desired jitter frames. Otherwise, Fred's method (max timegap) is used",
|
||||||
|
"default": false
|
||||||
|
},
|
||||||
|
"E-window-starve-threshold": {
|
||||||
|
"label": "Window Starve Threshold",
|
||||||
|
"help": "If this many starves occur in an N-second window (N is the number in the next field), then the desired jitter frames will be re-evaluated using Window A.",
|
||||||
|
"placeholder": "3",
|
||||||
|
"default": "3"
|
||||||
|
},
|
||||||
|
"F-window-seconds-for-desired-calc-on-too-many-starves": {
|
||||||
|
"label": "Timegaps Window (A) Seconds:",
|
||||||
|
"help": "Window A contains a history of timegaps. Its max timegap is used to re-evaluate the desired jitter frames when too many starves occur within it.",
|
||||||
|
"placeholder": "50",
|
||||||
|
"default": "50"
|
||||||
|
},
|
||||||
|
"G-window-seconds-for-desired-reduction": {
|
||||||
|
"label": "Timegaps Window (B) Seconds:",
|
||||||
|
"help": "Window B contains a history of timegaps. Its max timegap is used as a ceiling for the desired jitter frames value.",
|
||||||
|
"placeholder": "10",
|
||||||
|
"default": "10"
|
||||||
|
},
|
||||||
|
"H-repetition-with-fade": {
|
||||||
|
"type": "checkbox",
|
||||||
|
"label": "Repetition with Fade:",
|
||||||
|
"help": "If enabled, dropped frames and mixing during starves will repeat the last frame, eventually fading to silence",
|
||||||
|
"default": false
|
||||||
|
},
|
||||||
|
"I-print-stream-stats": {
|
||||||
"type": "checkbox",
|
"type": "checkbox",
|
||||||
"label": "Print Stream Stats:",
|
"label": "Print Stream Stats:",
|
||||||
"help": "If enabled, audio upstream and downstream stats of each agent will be printed each second to stdout",
|
"help": "If enabled, audio upstream and downstream stats of each agent will be printed each second to stdout",
|
||||||
"default": false
|
"default": false
|
||||||
},
|
},
|
||||||
"D-unattenuated-zone": {
|
"Z-unattenuated-zone": {
|
||||||
"label": "Unattenuated Zone",
|
"label": "Unattenuated Zone",
|
||||||
"help": "Boxes for source and listener (corner x, corner y, corner z, size x, size y, size z, corner x, corner y, corner z, size x, size y, size z)",
|
"help": "Boxes for source and listener (corner x, corner y, corner z, size x, size y, size z, corner x, corner y, corner z, size x, size y, size z)",
|
||||||
"placeholder": "no zone",
|
"placeholder": "no zone",
|
||||||
"default": ""
|
"default": ""
|
||||||
},
|
},
|
||||||
"E-enable-filter": {
|
"J-enable-filter": {
|
||||||
"type": "checkbox",
|
"type": "checkbox",
|
||||||
"label": "Enable Positional Filter",
|
"label": "Enable Positional Filter",
|
||||||
"help": "If enabled, positional audio stream uses lowpass filter",
|
"help": "If enabled, positional audio stream uses lowpass filter",
|
||||||
|
|
|
@ -1772,14 +1772,7 @@ void Application::init() {
|
||||||
_lastTimeUpdated.start();
|
_lastTimeUpdated.start();
|
||||||
|
|
||||||
Menu::getInstance()->loadSettings();
|
Menu::getInstance()->loadSettings();
|
||||||
if (Menu::getInstance()->getAudioJitterBufferFrames() != 0) {
|
_audio.setReceivedAudioStreamSettings(Menu::getInstance()->getReceivedAudioStreamSettings());
|
||||||
_audio.setDynamicJitterBuffers(false);
|
|
||||||
_audio.setStaticDesiredJitterBufferFrames(Menu::getInstance()->getAudioJitterBufferFrames());
|
|
||||||
} else {
|
|
||||||
_audio.setDynamicJitterBuffers(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
_audio.setMaxFramesOverDesired(Menu::getInstance()->getMaxFramesOverDesired());
|
|
||||||
|
|
||||||
qDebug("Loaded settings");
|
qDebug("Loaded settings");
|
||||||
|
|
||||||
|
|
|
@ -72,7 +72,7 @@ Audio::Audio(QObject* parent) :
|
||||||
_proceduralAudioOutput(NULL),
|
_proceduralAudioOutput(NULL),
|
||||||
_proceduralOutputDevice(NULL),
|
_proceduralOutputDevice(NULL),
|
||||||
_inputRingBuffer(0),
|
_inputRingBuffer(0),
|
||||||
_receivedAudioStream(0, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES, true, 0, 0, true),
|
_receivedAudioStream(0, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES, InboundAudioStream::Settings()),
|
||||||
_isStereoInput(false),
|
_isStereoInput(false),
|
||||||
_averagedLatency(0.0),
|
_averagedLatency(0.0),
|
||||||
_lastInputLoudness(0),
|
_lastInputLoudness(0),
|
||||||
|
@ -105,6 +105,7 @@ Audio::Audio(QObject* parent) :
|
||||||
_scopeInput(0),
|
_scopeInput(0),
|
||||||
_scopeOutputLeft(0),
|
_scopeOutputLeft(0),
|
||||||
_scopeOutputRight(0),
|
_scopeOutputRight(0),
|
||||||
|
_scopeLastFrame(),
|
||||||
_statsEnabled(false),
|
_statsEnabled(false),
|
||||||
_statsShowInjectedStreams(false),
|
_statsShowInjectedStreams(false),
|
||||||
_outgoingAvatarAudioSequenceNumber(0),
|
_outgoingAvatarAudioSequenceNumber(0),
|
||||||
|
@ -113,14 +114,17 @@ Audio::Audio(QObject* parent) :
|
||||||
_audioOutputMsecsUnplayedStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
_audioOutputMsecsUnplayedStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
||||||
_lastSentAudioPacket(0),
|
_lastSentAudioPacket(0),
|
||||||
_packetSentTimeGaps(1, APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS),
|
_packetSentTimeGaps(1, APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS),
|
||||||
_audioOutputIODevice(*this)
|
_audioOutputIODevice(_receivedAudioStream)
|
||||||
{
|
{
|
||||||
// clear the array of locally injected samples
|
// clear the array of locally injected samples
|
||||||
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||||
// Create the noise sample array
|
// Create the noise sample array
|
||||||
_noiseSampleFrames = new float[NUMBER_OF_NOISE_SAMPLE_FRAMES];
|
_noiseSampleFrames = new float[NUMBER_OF_NOISE_SAMPLE_FRAMES];
|
||||||
|
|
||||||
connect(&_receivedAudioStream, &MixedProcessedAudioStream::processSamples, this, &Audio::processReceivedAudioStreamSamples, Qt::DirectConnection);
|
connect(&_receivedAudioStream, &MixedProcessedAudioStream::addedSilence, this, &Audio::addStereoSilenceToScope, Qt::DirectConnection);
|
||||||
|
connect(&_receivedAudioStream, &MixedProcessedAudioStream::addedLastFrameRepeatedWithFade, this, &Audio::addLastFrameRepeatedWithFadeToScope, Qt::DirectConnection);
|
||||||
|
connect(&_receivedAudioStream, &MixedProcessedAudioStream::addedStereoSamples, this, &Audio::addStereoSamplesToScope, Qt::DirectConnection);
|
||||||
|
connect(&_receivedAudioStream, &MixedProcessedAudioStream::processSamples, this, &Audio::processReceivedSamples, Qt::DirectConnection);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::init(QGLWidget *parent) {
|
void Audio::init(QGLWidget *parent) {
|
||||||
|
@ -460,8 +464,11 @@ void Audio::handleAudioInput() {
|
||||||
static char audioDataPacket[MAX_PACKET_SIZE];
|
static char audioDataPacket[MAX_PACKET_SIZE];
|
||||||
|
|
||||||
static int numBytesPacketHeader = numBytesForPacketHeaderGivenPacketType(PacketTypeMicrophoneAudioNoEcho);
|
static int numBytesPacketHeader = numBytesForPacketHeaderGivenPacketType(PacketTypeMicrophoneAudioNoEcho);
|
||||||
static int leadingBytes = numBytesPacketHeader + sizeof(quint16) + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
|
|
||||||
|
|
||||||
|
// NOTE: we assume PacketTypeMicrophoneAudioWithEcho has same size headers as
|
||||||
|
// PacketTypeMicrophoneAudioNoEcho. If not, then networkAudioSamples will be pointing to the wrong place for writing
|
||||||
|
// audio samples with echo.
|
||||||
|
static int leadingBytes = numBytesPacketHeader + sizeof(quint16) + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
|
||||||
static int16_t* networkAudioSamples = (int16_t*)(audioDataPacket + leadingBytes);
|
static int16_t* networkAudioSamples = (int16_t*)(audioDataPacket + leadingBytes);
|
||||||
|
|
||||||
float inputToNetworkInputRatio = calculateDeviceToNetworkInputRatio(_numInputCallbackBytes);
|
float inputToNetworkInputRatio = calculateDeviceToNetworkInputRatio(_numInputCallbackBytes);
|
||||||
|
@ -668,9 +675,7 @@ void Audio::handleAudioInput() {
|
||||||
if (!_isStereoInput && _scopeEnabled && !_scopeEnabledPause) {
|
if (!_isStereoInput && _scopeEnabled && !_scopeEnabledPause) {
|
||||||
unsigned int numMonoAudioChannels = 1;
|
unsigned int numMonoAudioChannels = 1;
|
||||||
unsigned int monoAudioChannel = 0;
|
unsigned int monoAudioChannel = 0;
|
||||||
addBufferToScope(_scopeInput, _scopeInputOffset, networkAudioSamples, monoAudioChannel, numMonoAudioChannels);
|
_scopeInputOffset = addBufferToScope(_scopeInput, _scopeInputOffset, networkAudioSamples, NETWORK_SAMPLES_PER_FRAME, monoAudioChannel, numMonoAudioChannels);
|
||||||
_scopeInputOffset += NETWORK_SAMPLES_PER_FRAME;
|
|
||||||
_scopeInputOffset %= _samplesPerScope;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
NodeList* nodeList = NodeList::getInstance();
|
NodeList* nodeList = NodeList::getInstance();
|
||||||
|
@ -687,18 +692,10 @@ void Audio::handleAudioInput() {
|
||||||
glm::quat headOrientation = interfaceAvatar->getHead()->getFinalOrientationInWorldFrame();
|
glm::quat headOrientation = interfaceAvatar->getHead()->getFinalOrientationInWorldFrame();
|
||||||
quint8 isStereo = _isStereoInput ? 1 : 0;
|
quint8 isStereo = _isStereoInput ? 1 : 0;
|
||||||
|
|
||||||
int numAudioBytes = 0;
|
|
||||||
|
|
||||||
PacketType packetType;
|
PacketType packetType;
|
||||||
if (_lastInputLoudness == 0) {
|
if (_lastInputLoudness == 0) {
|
||||||
packetType = PacketTypeSilentAudioFrame;
|
packetType = PacketTypeSilentAudioFrame;
|
||||||
|
|
||||||
// we need to indicate how many silent samples this is to the audio mixer
|
|
||||||
networkAudioSamples[0] = numNetworkSamples;
|
|
||||||
numAudioBytes = sizeof(int16_t);
|
|
||||||
} else {
|
} else {
|
||||||
numAudioBytes = numNetworkBytes;
|
|
||||||
|
|
||||||
if (Menu::getInstance()->isOptionChecked(MenuOption::EchoServerAudio)) {
|
if (Menu::getInstance()->isOptionChecked(MenuOption::EchoServerAudio)) {
|
||||||
packetType = PacketTypeMicrophoneAudioWithEcho;
|
packetType = PacketTypeMicrophoneAudioWithEcho;
|
||||||
} else {
|
} else {
|
||||||
|
@ -712,6 +709,12 @@ void Audio::handleAudioInput() {
|
||||||
memcpy(currentPacketPtr, &_outgoingAvatarAudioSequenceNumber, sizeof(quint16));
|
memcpy(currentPacketPtr, &_outgoingAvatarAudioSequenceNumber, sizeof(quint16));
|
||||||
currentPacketPtr += sizeof(quint16);
|
currentPacketPtr += sizeof(quint16);
|
||||||
|
|
||||||
|
if (packetType == PacketTypeSilentAudioFrame) {
|
||||||
|
// pack num silent samples
|
||||||
|
quint16 numSilentSamples = numNetworkSamples;
|
||||||
|
memcpy(currentPacketPtr, &numSilentSamples, sizeof(quint16));
|
||||||
|
currentPacketPtr += sizeof(quint16);
|
||||||
|
} else {
|
||||||
// set the mono/stereo byte
|
// set the mono/stereo byte
|
||||||
*currentPacketPtr++ = isStereo;
|
*currentPacketPtr++ = isStereo;
|
||||||
|
|
||||||
|
@ -723,6 +726,10 @@ void Audio::handleAudioInput() {
|
||||||
memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation));
|
memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation));
|
||||||
currentPacketPtr += sizeof(headOrientation);
|
currentPacketPtr += sizeof(headOrientation);
|
||||||
|
|
||||||
|
// audio samples have already been packed (written to networkAudioSamples)
|
||||||
|
currentPacketPtr += numNetworkBytes;
|
||||||
|
}
|
||||||
|
|
||||||
// first time this is 0
|
// first time this is 0
|
||||||
if (_lastSentAudioPacket == 0) {
|
if (_lastSentAudioPacket == 0) {
|
||||||
_lastSentAudioPacket = usecTimestampNow();
|
_lastSentAudioPacket = usecTimestampNow();
|
||||||
|
@ -734,17 +741,57 @@ void Audio::handleAudioInput() {
|
||||||
_lastSentAudioPacket = now;
|
_lastSentAudioPacket = now;
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeList->writeDatagram(audioDataPacket, numAudioBytes + leadingBytes, audioMixer);
|
int packetBytes = currentPacketPtr - audioDataPacket;
|
||||||
|
nodeList->writeDatagram(audioDataPacket, packetBytes, audioMixer);
|
||||||
_outgoingAvatarAudioSequenceNumber++;
|
_outgoingAvatarAudioSequenceNumber++;
|
||||||
|
|
||||||
Application::getInstance()->getBandwidthMeter()->outputStream(BandwidthMeter::AUDIO)
|
Application::getInstance()->getBandwidthMeter()->outputStream(BandwidthMeter::AUDIO)
|
||||||
.updateValue(numAudioBytes + leadingBytes);
|
.updateValue(packetBytes);
|
||||||
}
|
}
|
||||||
delete[] inputAudioSamples;
|
delete[] inputAudioSamples;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::processReceivedAudioStreamSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer) {
|
const int STEREO_FACTOR = 2;
|
||||||
|
|
||||||
|
void Audio::addStereoSilenceToScope(int silentSamplesPerChannel) {
|
||||||
|
if (!_scopeEnabled || _scopeEnabledPause) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
addSilenceToScope(_scopeOutputLeft, _scopeOutputOffset, silentSamplesPerChannel);
|
||||||
|
_scopeOutputOffset = addSilenceToScope(_scopeOutputRight, _scopeOutputOffset, silentSamplesPerChannel);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Audio::addStereoSamplesToScope(const QByteArray& samples) {
|
||||||
|
if (!_scopeEnabled || _scopeEnabledPause) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const int16_t* samplesData = reinterpret_cast<const int16_t*>(samples.data());
|
||||||
|
int samplesPerChannel = samples.size() / sizeof(int16_t) / STEREO_FACTOR;
|
||||||
|
|
||||||
|
addBufferToScope(_scopeOutputLeft, _scopeOutputOffset, samplesData, samplesPerChannel, 0, STEREO_FACTOR);
|
||||||
|
_scopeOutputOffset = addBufferToScope(_scopeOutputRight, _scopeOutputOffset, samplesData, samplesPerChannel, 1, STEREO_FACTOR);
|
||||||
|
|
||||||
|
_scopeLastFrame = samples.right(NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Audio::addLastFrameRepeatedWithFadeToScope(int samplesPerChannel) {
|
||||||
|
const int16_t* lastFrameData = reinterpret_cast<const int16_t*>(_scopeLastFrame.data());
|
||||||
|
|
||||||
|
int samplesRemaining = samplesPerChannel;
|
||||||
|
int indexOfRepeat = 0;
|
||||||
|
do {
|
||||||
|
int samplesToWriteThisIteration = std::min(samplesRemaining, (int)NETWORK_SAMPLES_PER_FRAME);
|
||||||
|
float fade = calculateRepeatedFrameFadeFactor(indexOfRepeat);
|
||||||
|
addBufferToScope(_scopeOutputLeft, _scopeOutputOffset, lastFrameData, samplesToWriteThisIteration, 0, STEREO_FACTOR, fade);
|
||||||
|
_scopeOutputOffset = addBufferToScope(_scopeOutputRight, _scopeOutputOffset, lastFrameData, samplesToWriteThisIteration, 1, STEREO_FACTOR, fade);
|
||||||
|
|
||||||
|
samplesRemaining -= samplesToWriteThisIteration;
|
||||||
|
indexOfRepeat++;
|
||||||
|
} while (samplesRemaining > 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Audio::processReceivedSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer) {
|
||||||
|
|
||||||
const int numNetworkOutputSamples = inputBuffer.size() / sizeof(int16_t);
|
const int numNetworkOutputSamples = inputBuffer.size() / sizeof(int16_t);
|
||||||
const int numDeviceOutputSamples = numNetworkOutputSamples * (_outputFormat.sampleRate() * _outputFormat.channelCount())
|
const int numDeviceOutputSamples = numNetworkOutputSamples * (_outputFormat.sampleRate() * _outputFormat.channelCount())
|
||||||
|
@ -789,30 +836,6 @@ void Audio::processReceivedAudioStreamSamples(const QByteArray& inputBuffer, QBy
|
||||||
numNetworkOutputSamples,
|
numNetworkOutputSamples,
|
||||||
numDeviceOutputSamples,
|
numDeviceOutputSamples,
|
||||||
_desiredOutputFormat, _outputFormat);
|
_desiredOutputFormat, _outputFormat);
|
||||||
|
|
||||||
|
|
||||||
if (_scopeEnabled && !_scopeEnabledPause) {
|
|
||||||
unsigned int numAudioChannels = _desiredOutputFormat.channelCount();
|
|
||||||
const int16_t* samples = receivedSamples;
|
|
||||||
for (int numSamples = numNetworkOutputSamples / numAudioChannels; numSamples > 0; numSamples -= NETWORK_SAMPLES_PER_FRAME) {
|
|
||||||
|
|
||||||
unsigned int audioChannel = 0;
|
|
||||||
addBufferToScope(
|
|
||||||
_scopeOutputLeft,
|
|
||||||
_scopeOutputOffset,
|
|
||||||
samples, audioChannel, numAudioChannels);
|
|
||||||
|
|
||||||
audioChannel = 1;
|
|
||||||
addBufferToScope(
|
|
||||||
_scopeOutputRight,
|
|
||||||
_scopeOutputOffset,
|
|
||||||
samples, audioChannel, numAudioChannels);
|
|
||||||
|
|
||||||
_scopeOutputOffset += NETWORK_SAMPLES_PER_FRAME;
|
|
||||||
_scopeOutputOffset %= _samplesPerScope;
|
|
||||||
samples += NETWORK_SAMPLES_PER_FRAME * numAudioChannels;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) {
|
void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) {
|
||||||
|
@ -825,9 +848,6 @@ void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) {
|
||||||
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(audioByteArray.size());
|
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(audioByteArray.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
|
void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
|
||||||
|
|
||||||
int numBytesPacketHeader = numBytesForPacketHeader(packet);
|
int numBytesPacketHeader = numBytesForPacketHeader(packet);
|
||||||
|
@ -860,12 +880,13 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
|
||||||
|
|
||||||
void Audio::sendDownstreamAudioStatsPacket() {
|
void Audio::sendDownstreamAudioStatsPacket() {
|
||||||
|
|
||||||
// since this function is called every second, we'll sample some of our stats here
|
// since this function is called every second, we'll sample for some of our stats here
|
||||||
|
|
||||||
_inputRingBufferMsecsAvailableStats.update(getInputRingBufferMsecsAvailable());
|
_inputRingBufferMsecsAvailableStats.update(getInputRingBufferMsecsAvailable());
|
||||||
|
|
||||||
_audioOutputMsecsUnplayedStats.update(getAudioOutputMsecsUnplayed());
|
_audioOutputMsecsUnplayedStats.update(getAudioOutputMsecsUnplayed());
|
||||||
|
|
||||||
|
// also, call _receivedAudioStream's per-second callback
|
||||||
|
_receivedAudioStream.perSecondCallbackForUpdatingStats();
|
||||||
|
|
||||||
char packet[MAX_PACKET_SIZE];
|
char packet[MAX_PACKET_SIZE];
|
||||||
|
|
||||||
// pack header
|
// pack header
|
||||||
|
@ -883,7 +904,7 @@ void Audio::sendDownstreamAudioStatsPacket() {
|
||||||
dataAt += sizeof(quint16);
|
dataAt += sizeof(quint16);
|
||||||
|
|
||||||
// pack downstream audio stream stats
|
// pack downstream audio stream stats
|
||||||
AudioStreamStats stats = _receivedAudioStream.updateSeqHistoryAndGetAudioStreamStats();
|
AudioStreamStats stats = _receivedAudioStream.getAudioStreamStats();
|
||||||
memcpy(dataAt, &stats, sizeof(AudioStreamStats));
|
memcpy(dataAt, &stats, sizeof(AudioStreamStats));
|
||||||
dataAt += sizeof(AudioStreamStats);
|
dataAt += sizeof(AudioStreamStats);
|
||||||
|
|
||||||
|
@ -916,7 +937,7 @@ void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& s
|
||||||
unsigned int delayCount = delay * _desiredOutputFormat.channelCount();
|
unsigned int delayCount = delay * _desiredOutputFormat.channelCount();
|
||||||
unsigned int silentCount = (remaining < delayCount) ? remaining : delayCount;
|
unsigned int silentCount = (remaining < delayCount) ? remaining : delayCount;
|
||||||
if (silentCount) {
|
if (silentCount) {
|
||||||
_spatialAudioRingBuffer.addSilentFrame(silentCount);
|
_spatialAudioRingBuffer.addSilentSamples(silentCount);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recalculate the number of remaining samples
|
// Recalculate the number of remaining samples
|
||||||
|
@ -1220,8 +1241,6 @@ void Audio::selectAudioFilterSmiley() {
|
||||||
void Audio::toggleScope() {
|
void Audio::toggleScope() {
|
||||||
_scopeEnabled = !_scopeEnabled;
|
_scopeEnabled = !_scopeEnabled;
|
||||||
if (_scopeEnabled) {
|
if (_scopeEnabled) {
|
||||||
_scopeInputOffset = 0;
|
|
||||||
_scopeOutputOffset = 0;
|
|
||||||
allocateScope();
|
allocateScope();
|
||||||
} else {
|
} else {
|
||||||
freeScope();
|
freeScope();
|
||||||
|
@ -1259,6 +1278,8 @@ void Audio::selectAudioScopeFiftyFrames() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::allocateScope() {
|
void Audio::allocateScope() {
|
||||||
|
_scopeInputOffset = 0;
|
||||||
|
_scopeOutputOffset = 0;
|
||||||
int num = _samplesPerScope * sizeof(int16_t);
|
int num = _samplesPerScope * sizeof(int16_t);
|
||||||
_scopeInput = new QByteArray(num, 0);
|
_scopeInput = new QByteArray(num, 0);
|
||||||
_scopeOutputLeft = new QByteArray(num, 0);
|
_scopeOutputLeft = new QByteArray(num, 0);
|
||||||
|
@ -1290,12 +1311,15 @@ void Audio::freeScope() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::addBufferToScope(
|
int Audio::addBufferToScope(QByteArray* byteArray, int frameOffset, const int16_t* source, int sourceSamplesPerChannel,
|
||||||
QByteArray* byteArray, unsigned int frameOffset, const int16_t* source, unsigned int sourceChannel, unsigned int sourceNumberOfChannels) {
|
unsigned int sourceChannel, unsigned int sourceNumberOfChannels, float fade) {
|
||||||
|
|
||||||
// Constant multiplier to map sample value to vertical size of scope
|
// Constant multiplier to map sample value to vertical size of scope
|
||||||
float multiplier = (float)MULTIPLIER_SCOPE_HEIGHT / logf(2.0f);
|
float multiplier = (float)MULTIPLIER_SCOPE_HEIGHT / logf(2.0f);
|
||||||
|
|
||||||
|
// Used to scale each sample. (logf(sample) + fadeOffset) is same as logf(sample * fade).
|
||||||
|
float fadeOffset = logf(fade);
|
||||||
|
|
||||||
// Temporary variable receives sample value
|
// Temporary variable receives sample value
|
||||||
float sample;
|
float sample;
|
||||||
|
|
||||||
|
@ -1306,17 +1330,41 @@ void Audio::addBufferToScope(
|
||||||
// Short int pointer to mapped samples in byte array
|
// Short int pointer to mapped samples in byte array
|
||||||
int16_t* destination = (int16_t*) byteArray->data();
|
int16_t* destination = (int16_t*) byteArray->data();
|
||||||
|
|
||||||
for (unsigned int i = 0; i < NETWORK_SAMPLES_PER_FRAME; i++) {
|
for (int i = 0; i < sourceSamplesPerChannel; i++) {
|
||||||
sample = (float)source[i * sourceNumberOfChannels + sourceChannel];
|
sample = (float)source[i * sourceNumberOfChannels + sourceChannel];
|
||||||
if (sample > 0) {
|
if (sample > 1) {
|
||||||
value = (int16_t)(multiplier * logf(sample));
|
value = (int16_t)(multiplier * (logf(sample) + fadeOffset));
|
||||||
} else if (sample < 0) {
|
} else if (sample < -1) {
|
||||||
value = (int16_t)(-multiplier * logf(-sample));
|
value = (int16_t)(-multiplier * (logf(-sample) + fadeOffset));
|
||||||
} else {
|
} else {
|
||||||
value = 0;
|
value = 0;
|
||||||
}
|
}
|
||||||
destination[i + frameOffset] = value;
|
destination[frameOffset] = value;
|
||||||
|
frameOffset = (frameOffset == _samplesPerScope - 1) ? 0 : frameOffset + 1;
|
||||||
}
|
}
|
||||||
|
return frameOffset;
|
||||||
|
}
|
||||||
|
|
||||||
|
int Audio::addSilenceToScope(QByteArray* byteArray, int frameOffset, int silentSamples) {
|
||||||
|
|
||||||
|
QMutexLocker lock(&_guard);
|
||||||
|
// Short int pointer to mapped samples in byte array
|
||||||
|
int16_t* destination = (int16_t*)byteArray->data();
|
||||||
|
|
||||||
|
if (silentSamples >= _samplesPerScope) {
|
||||||
|
memset(destination, 0, byteArray->size());
|
||||||
|
return frameOffset;
|
||||||
|
}
|
||||||
|
|
||||||
|
int samplesToBufferEnd = _samplesPerScope - frameOffset;
|
||||||
|
if (silentSamples > samplesToBufferEnd) {
|
||||||
|
memset(destination + frameOffset, 0, samplesToBufferEnd * sizeof(int16_t));
|
||||||
|
memset(destination, 0, silentSamples - samplesToBufferEnd * sizeof(int16_t));
|
||||||
|
} else {
|
||||||
|
memset(destination + frameOffset, 0, silentSamples * sizeof(int16_t));
|
||||||
|
}
|
||||||
|
|
||||||
|
return (frameOffset + silentSamples) % _samplesPerScope;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::renderStats(const float* color, int width, int height) {
|
void Audio::renderStats(const float* color, int width, int height) {
|
||||||
|
@ -1517,17 +1565,17 @@ void Audio::renderScope(int width, int height) {
|
||||||
return;
|
return;
|
||||||
|
|
||||||
static const float backgroundColor[4] = { 0.4f, 0.4f, 0.4f, 0.6f };
|
static const float backgroundColor[4] = { 0.4f, 0.4f, 0.4f, 0.6f };
|
||||||
static const float gridColor[4] = { 0.3f, 0.3f, 0.3f, 0.6f };
|
static const float gridColor[4] = { 0.7f, 0.7f, 0.7f, 1.0f };
|
||||||
static const float inputColor[4] = { 0.3f, 1.0f, 0.3f, 1.0f };
|
static const float inputColor[4] = { 0.3f, 1.0f, 0.3f, 1.0f };
|
||||||
static const float outputLeftColor[4] = { 1.0f, 0.3f, 0.3f, 1.0f };
|
static const float outputLeftColor[4] = { 1.0f, 0.3f, 0.3f, 1.0f };
|
||||||
static const float outputRightColor[4] = { 0.3f, 0.3f, 1.0f, 1.0f };
|
static const float outputRightColor[4] = { 0.3f, 0.3f, 1.0f, 1.0f };
|
||||||
static const int gridRows = 2;
|
static const int gridRows = 2;
|
||||||
int gridCols = _framesPerScope;
|
int gridCols = _framesPerScope;
|
||||||
|
|
||||||
int x = (width - SCOPE_WIDTH) / 2;
|
int x = (width - (int)SCOPE_WIDTH) / 2;
|
||||||
int y = (height - SCOPE_HEIGHT) / 2;
|
int y = (height - (int)SCOPE_HEIGHT) / 2;
|
||||||
int w = SCOPE_WIDTH;
|
int w = (int)SCOPE_WIDTH;
|
||||||
int h = SCOPE_HEIGHT;
|
int h = (int)SCOPE_HEIGHT;
|
||||||
|
|
||||||
renderBackground(backgroundColor, x, y, w, h);
|
renderBackground(backgroundColor, x, y, w, h);
|
||||||
renderGrid(gridColor, x, y, w, h, gridRows, gridCols);
|
renderGrid(gridColor, x, y, w, h, gridRows, gridCols);
|
||||||
|
@ -1717,7 +1765,7 @@ bool Audio::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo)
|
||||||
// setup our general output device for audio-mixer audio
|
// setup our general output device for audio-mixer audio
|
||||||
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
||||||
_audioOutput->setBufferSize(AUDIO_OUTPUT_BUFFER_SIZE_FRAMES * _outputFrameSize * sizeof(int16_t));
|
_audioOutput->setBufferSize(AUDIO_OUTPUT_BUFFER_SIZE_FRAMES * _outputFrameSize * sizeof(int16_t));
|
||||||
qDebug() << "Ring Buffer capacity in frames: " << _audioOutput->bufferSize() / sizeof(int16_t) / (float)_outputFrameSize;
|
qDebug() << "Output Buffer capacity in frames: " << _audioOutput->bufferSize() / sizeof(int16_t) / (float)_outputFrameSize;
|
||||||
|
|
||||||
_audioOutputIODevice.start();
|
_audioOutputIODevice.start();
|
||||||
_audioOutput->start(&_audioOutputIODevice);
|
_audioOutput->start(&_audioOutputIODevice);
|
||||||
|
@ -1792,13 +1840,11 @@ float Audio::getInputRingBufferMsecsAvailable() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
qint64 Audio::AudioOutputIODevice::readData(char * data, qint64 maxSize) {
|
qint64 Audio::AudioOutputIODevice::readData(char * data, qint64 maxSize) {
|
||||||
MixedProcessedAudioStream& receivedAUdioStream = _parent._receivedAudioStream;
|
|
||||||
|
|
||||||
int samplesRequested = maxSize / sizeof(int16_t);
|
int samplesRequested = maxSize / sizeof(int16_t);
|
||||||
int samplesPopped;
|
int samplesPopped;
|
||||||
int bytesWritten;
|
int bytesWritten;
|
||||||
if ((samplesPopped = receivedAUdioStream.popSamples(samplesRequested, false)) > 0) {
|
if ((samplesPopped = _receivedAudioStream.popSamples(samplesRequested, false)) > 0) {
|
||||||
AudioRingBuffer::ConstIterator lastPopOutput = receivedAUdioStream.getLastPopOutput();
|
AudioRingBuffer::ConstIterator lastPopOutput = _receivedAudioStream.getLastPopOutput();
|
||||||
lastPopOutput.readSamples((int16_t*)data, samplesPopped);
|
lastPopOutput.readSamples((int16_t*)data, samplesPopped);
|
||||||
bytesWritten = samplesPopped * sizeof(int16_t);
|
bytesWritten = samplesPopped * sizeof(int16_t);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -51,14 +51,14 @@ public:
|
||||||
|
|
||||||
class AudioOutputIODevice : public QIODevice {
|
class AudioOutputIODevice : public QIODevice {
|
||||||
public:
|
public:
|
||||||
AudioOutputIODevice(Audio& parent) : _parent(parent) {};
|
AudioOutputIODevice(MixedProcessedAudioStream& receivedAudioStream) : _receivedAudioStream(receivedAudioStream) {};
|
||||||
|
|
||||||
void start() { open(QIODevice::ReadOnly); }
|
void start() { open(QIODevice::ReadOnly); }
|
||||||
void stop() { close(); }
|
void stop() { close(); }
|
||||||
qint64 readData(char * data, qint64 maxSize);
|
qint64 readData(char * data, qint64 maxSize);
|
||||||
qint64 writeData(const char * data, qint64 maxSize) { return 0; }
|
qint64 writeData(const char * data, qint64 maxSize) { return 0; }
|
||||||
private:
|
private:
|
||||||
Audio& _parent;
|
MixedProcessedAudioStream& _receivedAudioStream;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -74,10 +74,7 @@ public:
|
||||||
virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen);
|
virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen);
|
||||||
virtual void startDrumSound(float volume, float frequency, float duration, float decay);
|
virtual void startDrumSound(float volume, float frequency, float duration, float decay);
|
||||||
|
|
||||||
void setDynamicJitterBuffers(bool dynamicJitterBuffers) { _receivedAudioStream.setDynamicJitterBuffers(dynamicJitterBuffers); }
|
void setReceivedAudioStreamSettings(const InboundAudioStream::Settings& settings) { _receivedAudioStream.setSettings(settings); }
|
||||||
void setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames) { _receivedAudioStream.setStaticDesiredJitterBufferFrames(staticDesiredJitterBufferFrames); }
|
|
||||||
|
|
||||||
void setMaxFramesOverDesired(int maxFramesOverDesired) { _receivedAudioStream.setMaxFramesOverDesired(maxFramesOverDesired); }
|
|
||||||
|
|
||||||
int getDesiredJitterBufferFrames() const { return _receivedAudioStream.getDesiredJitterBufferFrames(); }
|
int getDesiredJitterBufferFrames() const { return _receivedAudioStream.getDesiredJitterBufferFrames(); }
|
||||||
|
|
||||||
|
@ -114,7 +111,6 @@ public slots:
|
||||||
void addReceivedAudioToStream(const QByteArray& audioByteArray);
|
void addReceivedAudioToStream(const QByteArray& audioByteArray);
|
||||||
void parseAudioStreamStatsPacket(const QByteArray& packet);
|
void parseAudioStreamStatsPacket(const QByteArray& packet);
|
||||||
void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples);
|
void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples);
|
||||||
void processReceivedAudioStreamSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer);
|
|
||||||
void handleAudioInput();
|
void handleAudioInput();
|
||||||
void reset();
|
void reset();
|
||||||
void resetStats();
|
void resetStats();
|
||||||
|
@ -131,6 +127,10 @@ public slots:
|
||||||
void selectAudioScopeFiveFrames();
|
void selectAudioScopeFiveFrames();
|
||||||
void selectAudioScopeTwentyFrames();
|
void selectAudioScopeTwentyFrames();
|
||||||
void selectAudioScopeFiftyFrames();
|
void selectAudioScopeFiftyFrames();
|
||||||
|
void addStereoSilenceToScope(int silentSamplesPerChannel);
|
||||||
|
void addLastFrameRepeatedWithFadeToScope(int samplesPerChannel);
|
||||||
|
void addStereoSamplesToScope(const QByteArray& samples);
|
||||||
|
void processReceivedSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer);
|
||||||
void toggleAudioFilter();
|
void toggleAudioFilter();
|
||||||
void selectAudioFilterFlat();
|
void selectAudioFilterFlat();
|
||||||
void selectAudioFilterTrebleCut();
|
void selectAudioFilterTrebleCut();
|
||||||
|
@ -257,8 +257,9 @@ private:
|
||||||
void reallocateScope(int frames);
|
void reallocateScope(int frames);
|
||||||
|
|
||||||
// Audio scope methods for data acquisition
|
// Audio scope methods for data acquisition
|
||||||
void addBufferToScope(
|
int addBufferToScope(QByteArray* byteArray, int frameOffset, const int16_t* source, int sourceSamples,
|
||||||
QByteArray* byteArray, unsigned int frameOffset, const int16_t* source, unsigned int sourceChannel, unsigned int sourceNumberOfChannels);
|
unsigned int sourceChannel, unsigned int sourceNumberOfChannels, float fade = 1.0f);
|
||||||
|
int addSilenceToScope(QByteArray* byteArray, int frameOffset, int silentSamples);
|
||||||
|
|
||||||
// Audio scope methods for rendering
|
// Audio scope methods for rendering
|
||||||
void renderBackground(const float* color, int x, int y, int width, int height);
|
void renderBackground(const float* color, int x, int y, int width, int height);
|
||||||
|
@ -290,6 +291,7 @@ private:
|
||||||
QByteArray* _scopeInput;
|
QByteArray* _scopeInput;
|
||||||
QByteArray* _scopeOutputLeft;
|
QByteArray* _scopeOutputLeft;
|
||||||
QByteArray* _scopeOutputRight;
|
QByteArray* _scopeOutputRight;
|
||||||
|
QByteArray _scopeLastFrame;
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
static const unsigned int STATS_WIDTH = 1500;
|
static const unsigned int STATS_WIDTH = 1500;
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -48,6 +48,7 @@ void DatagramProcessor::processDatagrams() {
|
||||||
// only process this packet if we have a match on the packet version
|
// only process this packet if we have a match on the packet version
|
||||||
switch (packetTypeForPacket(incomingPacket)) {
|
switch (packetTypeForPacket(incomingPacket)) {
|
||||||
case PacketTypeMixedAudio:
|
case PacketTypeMixedAudio:
|
||||||
|
case PacketTypeSilentAudioFrame:
|
||||||
QMetaObject::invokeMethod(&application->_audio, "addReceivedAudioToStream", Qt::QueuedConnection,
|
QMetaObject::invokeMethod(&application->_audio, "addReceivedAudioToStream", Qt::QueuedConnection,
|
||||||
Q_ARG(QByteArray, incomingPacket));
|
Q_ARG(QByteArray, incomingPacket));
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -82,8 +82,7 @@ const int CONSOLE_HEIGHT = 200;
|
||||||
|
|
||||||
Menu::Menu() :
|
Menu::Menu() :
|
||||||
_actionHash(),
|
_actionHash(),
|
||||||
_audioJitterBufferFrames(0),
|
_receivedAudioStreamSettings(),
|
||||||
_maxFramesOverDesired(0),
|
|
||||||
_bandwidthDialog(NULL),
|
_bandwidthDialog(NULL),
|
||||||
_fieldOfView(DEFAULT_FIELD_OF_VIEW_DEGREES),
|
_fieldOfView(DEFAULT_FIELD_OF_VIEW_DEGREES),
|
||||||
_realWorldFieldOfView(DEFAULT_REAL_WORLD_FIELD_OF_VIEW_DEGREES),
|
_realWorldFieldOfView(DEFAULT_REAL_WORLD_FIELD_OF_VIEW_DEGREES),
|
||||||
|
@ -680,8 +679,15 @@ void Menu::loadSettings(QSettings* settings) {
|
||||||
lockedSettings = true;
|
lockedSettings = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
_audioJitterBufferFrames = loadSetting(settings, "audioJitterBufferFrames", 0);
|
_receivedAudioStreamSettings._dynamicJitterBuffers = settings->value("dynamicJitterBuffers", DEFAULT_DYNAMIC_JITTER_BUFFERS).toBool();
|
||||||
_maxFramesOverDesired = loadSetting(settings, "maxFramesOverDesired", DEFAULT_MAX_FRAMES_OVER_DESIRED);
|
_receivedAudioStreamSettings._maxFramesOverDesired = settings->value("maxFramesOverDesired", DEFAULT_MAX_FRAMES_OVER_DESIRED).toInt();
|
||||||
|
_receivedAudioStreamSettings._staticDesiredJitterBufferFrames = settings->value("staticDesiredJitterBufferFrames", DEFAULT_STATIC_DESIRED_JITTER_BUFFER_FRAMES).toInt();
|
||||||
|
_receivedAudioStreamSettings._useStDevForJitterCalc = settings->value("useStDevForJitterCalc", DEFAULT_USE_STDEV_FOR_JITTER_CALC).toBool();
|
||||||
|
_receivedAudioStreamSettings._windowStarveThreshold = settings->value("windowStarveThreshold", DEFAULT_WINDOW_STARVE_THRESHOLD).toInt();
|
||||||
|
_receivedAudioStreamSettings._windowSecondsForDesiredCalcOnTooManyStarves = settings->value("windowSecondsForDesiredCalcOnTooManyStarves", DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES).toInt();
|
||||||
|
_receivedAudioStreamSettings._windowSecondsForDesiredReduction = settings->value("windowSecondsForDesiredReduction", DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION).toInt();
|
||||||
|
_receivedAudioStreamSettings._repetitionWithFade = settings->value("repetitionWithFade", DEFAULT_REPETITION_WITH_FADE).toBool();
|
||||||
|
|
||||||
_fieldOfView = loadSetting(settings, "fieldOfView", DEFAULT_FIELD_OF_VIEW_DEGREES);
|
_fieldOfView = loadSetting(settings, "fieldOfView", DEFAULT_FIELD_OF_VIEW_DEGREES);
|
||||||
_realWorldFieldOfView = loadSetting(settings, "realWorldFieldOfView", DEFAULT_REAL_WORLD_FIELD_OF_VIEW_DEGREES);
|
_realWorldFieldOfView = loadSetting(settings, "realWorldFieldOfView", DEFAULT_REAL_WORLD_FIELD_OF_VIEW_DEGREES);
|
||||||
_faceshiftEyeDeflection = loadSetting(settings, "faceshiftEyeDeflection", DEFAULT_FACESHIFT_EYE_DEFLECTION);
|
_faceshiftEyeDeflection = loadSetting(settings, "faceshiftEyeDeflection", DEFAULT_FACESHIFT_EYE_DEFLECTION);
|
||||||
|
@ -735,8 +741,15 @@ void Menu::saveSettings(QSettings* settings) {
|
||||||
lockedSettings = true;
|
lockedSettings = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
settings->setValue("audioJitterBufferFrames", _audioJitterBufferFrames);
|
settings->setValue("dynamicJitterBuffers", _receivedAudioStreamSettings._dynamicJitterBuffers);
|
||||||
settings->setValue("maxFramesOverDesired", _maxFramesOverDesired);
|
settings->setValue("maxFramesOverDesired", _receivedAudioStreamSettings._maxFramesOverDesired);
|
||||||
|
settings->setValue("staticDesiredJitterBufferFrames", _receivedAudioStreamSettings._staticDesiredJitterBufferFrames);
|
||||||
|
settings->setValue("useStDevForJitterCalc", _receivedAudioStreamSettings._useStDevForJitterCalc);
|
||||||
|
settings->setValue("windowStarveThreshold", _receivedAudioStreamSettings._windowStarveThreshold);
|
||||||
|
settings->setValue("windowSecondsForDesiredCalcOnTooManyStarves", _receivedAudioStreamSettings._windowSecondsForDesiredCalcOnTooManyStarves);
|
||||||
|
settings->setValue("windowSecondsForDesiredReduction", _receivedAudioStreamSettings._windowSecondsForDesiredReduction);
|
||||||
|
settings->setValue("repetitionWithFade", _receivedAudioStreamSettings._repetitionWithFade);
|
||||||
|
|
||||||
settings->setValue("fieldOfView", _fieldOfView);
|
settings->setValue("fieldOfView", _fieldOfView);
|
||||||
settings->setValue("faceshiftEyeDeflection", _faceshiftEyeDeflection);
|
settings->setValue("faceshiftEyeDeflection", _faceshiftEyeDeflection);
|
||||||
settings->setValue("maxVoxels", _maxVoxels);
|
settings->setValue("maxVoxels", _maxVoxels);
|
||||||
|
|
|
@ -89,10 +89,8 @@ public:
|
||||||
void triggerOption(const QString& menuOption);
|
void triggerOption(const QString& menuOption);
|
||||||
QAction* getActionForOption(const QString& menuOption);
|
QAction* getActionForOption(const QString& menuOption);
|
||||||
|
|
||||||
float getAudioJitterBufferFrames() const { return _audioJitterBufferFrames; }
|
const InboundAudioStream::Settings& getReceivedAudioStreamSettings() const { return _receivedAudioStreamSettings; }
|
||||||
void setAudioJitterBufferFrames(float audioJitterBufferSamples) { _audioJitterBufferFrames = audioJitterBufferSamples; }
|
void setReceivedAudioStreamSettings(const InboundAudioStream::Settings& receivedAudioStreamSettings) { _receivedAudioStreamSettings = receivedAudioStreamSettings; }
|
||||||
int getMaxFramesOverDesired() const { return _maxFramesOverDesired; }
|
|
||||||
void setMaxFramesOverDesired(int maxFramesOverDesired) { _maxFramesOverDesired = maxFramesOverDesired; }
|
|
||||||
float getFieldOfView() const { return _fieldOfView; }
|
float getFieldOfView() const { return _fieldOfView; }
|
||||||
void setFieldOfView(float fieldOfView) { _fieldOfView = fieldOfView; }
|
void setFieldOfView(float fieldOfView) { _fieldOfView = fieldOfView; }
|
||||||
float getRealWorldFieldOfView() const { return _realWorldFieldOfView; }
|
float getRealWorldFieldOfView() const { return _realWorldFieldOfView; }
|
||||||
|
@ -265,8 +263,7 @@ private:
|
||||||
|
|
||||||
|
|
||||||
QHash<QString, QAction*> _actionHash;
|
QHash<QString, QAction*> _actionHash;
|
||||||
int _audioJitterBufferFrames; /// number of extra samples to wait before starting audio playback
|
InboundAudioStream::Settings _receivedAudioStreamSettings;
|
||||||
int _maxFramesOverDesired;
|
|
||||||
BandwidthDialog* _bandwidthDialog;
|
BandwidthDialog* _bandwidthDialog;
|
||||||
float _fieldOfView; /// in Degrees, doesn't apply to HMD like Oculus
|
float _fieldOfView; /// in Degrees, doesn't apply to HMD like Oculus
|
||||||
float _realWorldFieldOfView; // The actual FOV set by the user's monitor size and view distance
|
float _realWorldFieldOfView; // The actual FOV set by the user's monitor size and view distance
|
||||||
|
|
|
@ -149,9 +149,16 @@ void PreferencesDialog::loadPreferences() {
|
||||||
ui.faceshiftEyeDeflectionSider->setValue(menuInstance->getFaceshiftEyeDeflection() *
|
ui.faceshiftEyeDeflectionSider->setValue(menuInstance->getFaceshiftEyeDeflection() *
|
||||||
ui.faceshiftEyeDeflectionSider->maximum());
|
ui.faceshiftEyeDeflectionSider->maximum());
|
||||||
|
|
||||||
ui.audioJitterSpin->setValue(menuInstance->getAudioJitterBufferFrames());
|
const InboundAudioStream::Settings& streamSettings = menuInstance->getReceivedAudioStreamSettings();
|
||||||
|
|
||||||
ui.maxFramesOverDesiredSpin->setValue(menuInstance->getMaxFramesOverDesired());
|
ui.dynamicJitterBuffersCheckBox->setChecked(streamSettings._dynamicJitterBuffers);
|
||||||
|
ui.staticDesiredJitterBufferFramesSpin->setValue(streamSettings._staticDesiredJitterBufferFrames);
|
||||||
|
ui.maxFramesOverDesiredSpin->setValue(streamSettings._maxFramesOverDesired);
|
||||||
|
ui.useStdevForJitterCalcCheckBox->setChecked(streamSettings._useStDevForJitterCalc);
|
||||||
|
ui.windowStarveThresholdSpin->setValue(streamSettings._windowStarveThreshold);
|
||||||
|
ui.windowSecondsForDesiredCalcOnTooManyStarvesSpin->setValue(streamSettings._windowSecondsForDesiredCalcOnTooManyStarves);
|
||||||
|
ui.windowSecondsForDesiredReductionSpin->setValue(streamSettings._windowSecondsForDesiredReduction);
|
||||||
|
ui.repetitionWithFadeCheckBox->setChecked(streamSettings._repetitionWithFade);
|
||||||
|
|
||||||
ui.realWorldFieldOfViewSpin->setValue(menuInstance->getRealWorldFieldOfView());
|
ui.realWorldFieldOfViewSpin->setValue(menuInstance->getRealWorldFieldOfView());
|
||||||
|
|
||||||
|
@ -241,16 +248,18 @@ void PreferencesDialog::savePreferences() {
|
||||||
|
|
||||||
Menu::getInstance()->setInvertSixenseButtons(ui.invertSixenseButtonsCheckBox->isChecked());
|
Menu::getInstance()->setInvertSixenseButtons(ui.invertSixenseButtonsCheckBox->isChecked());
|
||||||
|
|
||||||
Menu::getInstance()->setAudioJitterBufferFrames(ui.audioJitterSpin->value());
|
InboundAudioStream::Settings streamSettings;
|
||||||
if (Menu::getInstance()->getAudioJitterBufferFrames() != 0) {
|
streamSettings._dynamicJitterBuffers = ui.dynamicJitterBuffersCheckBox->isChecked();
|
||||||
Application::getInstance()->getAudio()->setDynamicJitterBuffers(false);
|
streamSettings._staticDesiredJitterBufferFrames = ui.staticDesiredJitterBufferFramesSpin->value();
|
||||||
Application::getInstance()->getAudio()->setStaticDesiredJitterBufferFrames(Menu::getInstance()->getAudioJitterBufferFrames());
|
streamSettings._maxFramesOverDesired = ui.maxFramesOverDesiredSpin->value();
|
||||||
} else {
|
streamSettings._useStDevForJitterCalc = ui.useStdevForJitterCalcCheckBox->isChecked();
|
||||||
Application::getInstance()->getAudio()->setDynamicJitterBuffers(true);
|
streamSettings._windowStarveThreshold = ui.windowStarveThresholdSpin->value();
|
||||||
}
|
streamSettings._windowSecondsForDesiredCalcOnTooManyStarves = ui.windowSecondsForDesiredCalcOnTooManyStarvesSpin->value();
|
||||||
|
streamSettings._windowSecondsForDesiredReduction = ui.windowSecondsForDesiredReductionSpin->value();
|
||||||
|
streamSettings._repetitionWithFade = ui.repetitionWithFadeCheckBox->isChecked();
|
||||||
|
|
||||||
Menu::getInstance()->setMaxFramesOverDesired(ui.maxFramesOverDesiredSpin->value());
|
Menu::getInstance()->setReceivedAudioStreamSettings(streamSettings);
|
||||||
Application::getInstance()->getAudio()->setMaxFramesOverDesired(Menu::getInstance()->getMaxFramesOverDesired());
|
Application::getInstance()->getAudio()->setReceivedAudioStreamSettings(streamSettings);
|
||||||
|
|
||||||
Application::getInstance()->resizeGL(Application::getInstance()->getGLWidget()->width(),
|
Application::getInstance()->resizeGL(Application::getInstance()->getGLWidget()->width(),
|
||||||
Application::getInstance()->getGLWidget()->height());
|
Application::getInstance()->getGLWidget()->height());
|
||||||
|
|
|
@ -1464,6 +1464,97 @@ padding: 10px;margin-top:10px</string>
|
||||||
</item>
|
</item>
|
||||||
</layout>
|
</layout>
|
||||||
</item>
|
</item>
|
||||||
|
|
||||||
|
<!-- dynamic jitter buffers ____________________________________________________________________________ -->
|
||||||
|
<item>
|
||||||
|
<layout class="QHBoxLayout" name="horizontalLayout_23">
|
||||||
|
<property name="spacing">
|
||||||
|
<number>0</number>
|
||||||
|
</property>
|
||||||
|
<property name="topMargin">
|
||||||
|
<number>10</number>
|
||||||
|
</property>
|
||||||
|
<property name="rightMargin">
|
||||||
|
<number>0</number>
|
||||||
|
</property>
|
||||||
|
<property name="bottomMargin">
|
||||||
|
<number>10</number>
|
||||||
|
</property>
|
||||||
|
<item>
|
||||||
|
<widget class="QLabel" name="label_20">
|
||||||
|
<property name="font">
|
||||||
|
<font>
|
||||||
|
<family>Arial</family>
|
||||||
|
</font>
|
||||||
|
</property>
|
||||||
|
<property name="styleSheet">
|
||||||
|
<string notr="true">color: rgb(51, 51, 51)</string>
|
||||||
|
</property>
|
||||||
|
<property name="text">
|
||||||
|
<string>Enable Dynamic Jitter Buffers</string>
|
||||||
|
</property>
|
||||||
|
<property name="indent">
|
||||||
|
<number>15</number>
|
||||||
|
</property>
|
||||||
|
<property name="buddy">
|
||||||
|
<cstring>dynamicJitterBuffersCheckBox</cstring>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
<item>
|
||||||
|
<spacer name="horizontalSpacer_17">
|
||||||
|
<property name="font">
|
||||||
|
<font>
|
||||||
|
<family>Arial</family>
|
||||||
|
</font>
|
||||||
|
</property>
|
||||||
|
<property name="orientation">
|
||||||
|
<enum>Qt::Horizontal</enum>
|
||||||
|
</property>
|
||||||
|
<property name="sizeHint" stdset="0">
|
||||||
|
<size>
|
||||||
|
<width>40</width>
|
||||||
|
<height>20</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
</spacer>
|
||||||
|
</item>
|
||||||
|
<item>
|
||||||
|
<widget class="QCheckBox" name="dynamicJitterBuffersCheckBox">
|
||||||
|
<property name="sizePolicy">
|
||||||
|
<sizepolicy hsizetype="Minimum" vsizetype="Fixed">
|
||||||
|
<horstretch>0</horstretch>
|
||||||
|
<verstretch>0</verstretch>
|
||||||
|
</sizepolicy>
|
||||||
|
</property>
|
||||||
|
<property name="minimumSize">
|
||||||
|
<size>
|
||||||
|
<width>32</width>
|
||||||
|
<height>0</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
<property name="baseSize">
|
||||||
|
<size>
|
||||||
|
<width>0</width>
|
||||||
|
<height>0</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
<property name="text">
|
||||||
|
<string/>
|
||||||
|
</property>
|
||||||
|
<property name="iconSize">
|
||||||
|
<size>
|
||||||
|
<width>32</width>
|
||||||
|
<height>32</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
</layout>
|
||||||
|
</item>
|
||||||
|
|
||||||
|
|
||||||
|
<!-- static desired jitter frames____________________________________________________________________________ -->
|
||||||
<item>
|
<item>
|
||||||
<layout class="QHBoxLayout" name="horizontalLayout_8">
|
<layout class="QHBoxLayout" name="horizontalLayout_8">
|
||||||
<property name="spacing">
|
<property name="spacing">
|
||||||
|
@ -1489,13 +1580,13 @@ padding: 10px;margin-top:10px</string>
|
||||||
<string notr="true">color: rgb(51, 51, 51)</string>
|
<string notr="true">color: rgb(51, 51, 51)</string>
|
||||||
</property>
|
</property>
|
||||||
<property name="text">
|
<property name="text">
|
||||||
<string>Audio Jitter Buffer Frames (0 for automatic)</string>
|
<string>Static Jitter Buffer Frames</string>
|
||||||
</property>
|
</property>
|
||||||
<property name="indent">
|
<property name="indent">
|
||||||
<number>15</number>
|
<number>15</number>
|
||||||
</property>
|
</property>
|
||||||
<property name="buddy">
|
<property name="buddy">
|
||||||
<cstring>audioJitterSpin</cstring>
|
<cstring>staticDesiredJitterBufferFramesSpin</cstring>
|
||||||
</property>
|
</property>
|
||||||
</widget>
|
</widget>
|
||||||
</item>
|
</item>
|
||||||
|
@ -1518,7 +1609,7 @@ padding: 10px;margin-top:10px</string>
|
||||||
</spacer>
|
</spacer>
|
||||||
</item>
|
</item>
|
||||||
<item>
|
<item>
|
||||||
<widget class="QSpinBox" name="audioJitterSpin">
|
<widget class="QSpinBox" name="staticDesiredJitterBufferFramesSpin">
|
||||||
<property name="sizePolicy">
|
<property name="sizePolicy">
|
||||||
<sizepolicy hsizetype="Fixed" vsizetype="Fixed">
|
<sizepolicy hsizetype="Fixed" vsizetype="Fixed">
|
||||||
<horstretch>0</horstretch>
|
<horstretch>0</horstretch>
|
||||||
|
@ -1555,6 +1646,7 @@ padding: 10px;margin-top:10px</string>
|
||||||
</item>
|
</item>
|
||||||
</layout>
|
</layout>
|
||||||
</item>
|
</item>
|
||||||
|
<!-- max frames over desired ____________________________________________________________________________ -->
|
||||||
<item>
|
<item>
|
||||||
<layout class="QHBoxLayout" name="horizontalLayout_13">
|
<layout class="QHBoxLayout" name="horizontalLayout_13">
|
||||||
<property name="spacing">
|
<property name="spacing">
|
||||||
|
@ -1591,7 +1683,7 @@ padding: 10px;margin-top:10px</string>
|
||||||
</widget>
|
</widget>
|
||||||
</item>
|
</item>
|
||||||
<item>
|
<item>
|
||||||
<spacer name="horizontalSpacer_12">
|
<spacer name="horizontalSpacer_20">
|
||||||
<property name="font">
|
<property name="font">
|
||||||
<font>
|
<font>
|
||||||
<family>Arial</family>
|
<family>Arial</family>
|
||||||
|
@ -1648,6 +1740,466 @@ padding: 10px;margin-top:10px</string>
|
||||||
</item>
|
</item>
|
||||||
|
|
||||||
|
|
||||||
|
<!-- use stdev for jitter calc ____________________________________________________________________________ -->
|
||||||
|
<item>
|
||||||
|
<layout class="QHBoxLayout" name="horizontalLayout_19">
|
||||||
|
<property name="spacing">
|
||||||
|
<number>0</number>
|
||||||
|
</property>
|
||||||
|
<property name="topMargin">
|
||||||
|
<number>10</number>
|
||||||
|
</property>
|
||||||
|
<property name="rightMargin">
|
||||||
|
<number>0</number>
|
||||||
|
</property>
|
||||||
|
<property name="bottomMargin">
|
||||||
|
<number>10</number>
|
||||||
|
</property>
|
||||||
|
<item>
|
||||||
|
<widget class="QLabel" name="label_16">
|
||||||
|
<property name="font">
|
||||||
|
<font>
|
||||||
|
<family>Arial</family>
|
||||||
|
</font>
|
||||||
|
</property>
|
||||||
|
<property name="styleSheet">
|
||||||
|
<string notr="true">color: rgb(51, 51, 51)</string>
|
||||||
|
</property>
|
||||||
|
<property name="text">
|
||||||
|
<string>Use Stdev for Dynamic Jitter Calc</string>
|
||||||
|
</property>
|
||||||
|
<property name="indent">
|
||||||
|
<number>15</number>
|
||||||
|
</property>
|
||||||
|
<property name="buddy">
|
||||||
|
<cstring>useStdevForJitterCalcCheckBox</cstring>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
<item>
|
||||||
|
<spacer name="horizontalSpacer_21">
|
||||||
|
<property name="font">
|
||||||
|
<font>
|
||||||
|
<family>Arial</family>
|
||||||
|
</font>
|
||||||
|
</property>
|
||||||
|
<property name="orientation">
|
||||||
|
<enum>Qt::Horizontal</enum>
|
||||||
|
</property>
|
||||||
|
<property name="sizeHint" stdset="0">
|
||||||
|
<size>
|
||||||
|
<width>40</width>
|
||||||
|
<height>20</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
</spacer>
|
||||||
|
</item>
|
||||||
|
<item>
|
||||||
|
<widget class="QCheckBox" name="useStdevForJitterCalcCheckBox">
|
||||||
|
<property name="sizePolicy">
|
||||||
|
<sizepolicy hsizetype="Minimum" vsizetype="Fixed">
|
||||||
|
<horstretch>0</horstretch>
|
||||||
|
<verstretch>0</verstretch>
|
||||||
|
</sizepolicy>
|
||||||
|
</property>
|
||||||
|
<property name="minimumSize">
|
||||||
|
<size>
|
||||||
|
<width>32</width>
|
||||||
|
<height>0</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
<property name="baseSize">
|
||||||
|
<size>
|
||||||
|
<width>0</width>
|
||||||
|
<height>0</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
<property name="text">
|
||||||
|
<string/>
|
||||||
|
</property>
|
||||||
|
<property name="iconSize">
|
||||||
|
<size>
|
||||||
|
<width>32</width>
|
||||||
|
<height>32</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
</layout>
|
||||||
|
</item>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<!-- window starve threshold ____________________________________________________________________________ -->
|
||||||
|
<item>
|
||||||
|
<layout class="QHBoxLayout" name="horizontalLayout_20">
|
||||||
|
<property name="spacing">
|
||||||
|
<number>0</number>
|
||||||
|
</property>
|
||||||
|
<property name="topMargin">
|
||||||
|
<number>10</number>
|
||||||
|
</property>
|
||||||
|
<property name="rightMargin">
|
||||||
|
<number>0</number>
|
||||||
|
</property>
|
||||||
|
<property name="bottomMargin">
|
||||||
|
<number>10</number>
|
||||||
|
</property>
|
||||||
|
<item alignment="Qt::AlignLeft">
|
||||||
|
<widget class="QLabel" name="label_17">
|
||||||
|
<property name="font">
|
||||||
|
<font>
|
||||||
|
<family>Arial</family>
|
||||||
|
</font>
|
||||||
|
</property>
|
||||||
|
<property name="styleSheet">
|
||||||
|
<string notr="true">color: rgb(51, 51, 51)</string>
|
||||||
|
</property>
|
||||||
|
<property name="text">
|
||||||
|
<string>Window A Starve Threshold</string>
|
||||||
|
</property>
|
||||||
|
<property name="indent">
|
||||||
|
<number>15</number>
|
||||||
|
</property>
|
||||||
|
<property name="buddy">
|
||||||
|
<cstring>windowStarveThresholdSpin</cstring>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
<item>
|
||||||
|
<spacer name="horizontalSpacer_22">
|
||||||
|
<property name="font">
|
||||||
|
<font>
|
||||||
|
<family>Arial</family>
|
||||||
|
</font>
|
||||||
|
</property>
|
||||||
|
<property name="orientation">
|
||||||
|
<enum>Qt::Horizontal</enum>
|
||||||
|
</property>
|
||||||
|
<property name="sizeHint" stdset="0">
|
||||||
|
<size>
|
||||||
|
<width>40</width>
|
||||||
|
<height>20</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
</spacer>
|
||||||
|
</item>
|
||||||
|
<item>
|
||||||
|
<widget class="QSpinBox" name="windowStarveThresholdSpin">
|
||||||
|
<property name="sizePolicy">
|
||||||
|
<sizepolicy hsizetype="Fixed" vsizetype="Fixed">
|
||||||
|
<horstretch>0</horstretch>
|
||||||
|
<verstretch>0</verstretch>
|
||||||
|
</sizepolicy>
|
||||||
|
</property>
|
||||||
|
<property name="minimumSize">
|
||||||
|
<size>
|
||||||
|
<width>95</width>
|
||||||
|
<height>36</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
<property name="maximumSize">
|
||||||
|
<size>
|
||||||
|
<width>70</width>
|
||||||
|
<height>16777215</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
<property name="font">
|
||||||
|
<font>
|
||||||
|
<family>Arial</family>
|
||||||
|
</font>
|
||||||
|
</property>
|
||||||
|
<property name="minimum">
|
||||||
|
<number>0</number>
|
||||||
|
</property>
|
||||||
|
<property name="maximum">
|
||||||
|
<number>10000</number>
|
||||||
|
</property>
|
||||||
|
<property name="value">
|
||||||
|
<number>1</number>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
</layout>
|
||||||
|
</item>
|
||||||
|
|
||||||
|
|
||||||
|
<!-- window A seconds ____________________________________________________________________________ -->
|
||||||
|
<item>
|
||||||
|
<layout class="QHBoxLayout" name="horizontalLayout_21">
|
||||||
|
<property name="spacing">
|
||||||
|
<number>0</number>
|
||||||
|
</property>
|
||||||
|
<property name="topMargin">
|
||||||
|
<number>10</number>
|
||||||
|
</property>
|
||||||
|
<property name="rightMargin">
|
||||||
|
<number>0</number>
|
||||||
|
</property>
|
||||||
|
<property name="bottomMargin">
|
||||||
|
<number>10</number>
|
||||||
|
</property>
|
||||||
|
<item alignment="Qt::AlignLeft">
|
||||||
|
<widget class="QLabel" name="label_18">
|
||||||
|
<property name="font">
|
||||||
|
<font>
|
||||||
|
<family>Arial</family>
|
||||||
|
</font>
|
||||||
|
</property>
|
||||||
|
<property name="styleSheet">
|
||||||
|
<string notr="true">color: rgb(51, 51, 51)</string>
|
||||||
|
</property>
|
||||||
|
<property name="text">
|
||||||
|
<string>Window A (raise desired on N starves) Seconds</string>
|
||||||
|
</property>
|
||||||
|
<property name="indent">
|
||||||
|
<number>15</number>
|
||||||
|
</property>
|
||||||
|
<property name="buddy">
|
||||||
|
<cstring>windowSecondsForDesiredCalcOnTooManyStarvesSpin</cstring>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
<item>
|
||||||
|
<spacer name="horizontalSpacer_23">
|
||||||
|
<property name="font">
|
||||||
|
<font>
|
||||||
|
<family>Arial</family>
|
||||||
|
</font>
|
||||||
|
</property>
|
||||||
|
<property name="orientation">
|
||||||
|
<enum>Qt::Horizontal</enum>
|
||||||
|
</property>
|
||||||
|
<property name="sizeHint" stdset="0">
|
||||||
|
<size>
|
||||||
|
<width>40</width>
|
||||||
|
<height>20</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
</spacer>
|
||||||
|
</item>
|
||||||
|
<item>
|
||||||
|
<widget class="QSpinBox" name="windowSecondsForDesiredCalcOnTooManyStarvesSpin">
|
||||||
|
<property name="sizePolicy">
|
||||||
|
<sizepolicy hsizetype="Fixed" vsizetype="Fixed">
|
||||||
|
<horstretch>0</horstretch>
|
||||||
|
<verstretch>0</verstretch>
|
||||||
|
</sizepolicy>
|
||||||
|
</property>
|
||||||
|
<property name="minimumSize">
|
||||||
|
<size>
|
||||||
|
<width>95</width>
|
||||||
|
<height>36</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
<property name="maximumSize">
|
||||||
|
<size>
|
||||||
|
<width>70</width>
|
||||||
|
<height>16777215</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
<property name="font">
|
||||||
|
<font>
|
||||||
|
<family>Arial</family>
|
||||||
|
</font>
|
||||||
|
</property>
|
||||||
|
<property name="minimum">
|
||||||
|
<number>0</number>
|
||||||
|
</property>
|
||||||
|
<property name="maximum">
|
||||||
|
<number>10000</number>
|
||||||
|
</property>
|
||||||
|
<property name="value">
|
||||||
|
<number>1</number>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
</layout>
|
||||||
|
</item>
|
||||||
|
|
||||||
|
|
||||||
|
<!-- window B seconds ____________________________________________________________________________ -->
|
||||||
|
<item>
|
||||||
|
<layout class="QHBoxLayout" name="horizontalLayout_22">
|
||||||
|
<property name="spacing">
|
||||||
|
<number>0</number>
|
||||||
|
</property>
|
||||||
|
<property name="topMargin">
|
||||||
|
<number>10</number>
|
||||||
|
</property>
|
||||||
|
<property name="rightMargin">
|
||||||
|
<number>0</number>
|
||||||
|
</property>
|
||||||
|
<property name="bottomMargin">
|
||||||
|
<number>10</number>
|
||||||
|
</property>
|
||||||
|
<item alignment="Qt::AlignLeft">
|
||||||
|
<widget class="QLabel" name="label_19">
|
||||||
|
<property name="font">
|
||||||
|
<font>
|
||||||
|
<family>Arial</family>
|
||||||
|
</font>
|
||||||
|
</property>
|
||||||
|
<property name="styleSheet">
|
||||||
|
<string notr="true">color: rgb(51, 51, 51)</string>
|
||||||
|
</property>
|
||||||
|
<property name="text">
|
||||||
|
<string>Window B (desired ceiling) Seconds</string>
|
||||||
|
</property>
|
||||||
|
<property name="indent">
|
||||||
|
<number>15</number>
|
||||||
|
</property>
|
||||||
|
<property name="buddy">
|
||||||
|
<cstring>windowSecondsForDesiredReductionSpin</cstring>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
<item>
|
||||||
|
<spacer name="horizontalSpacer_24">
|
||||||
|
<property name="font">
|
||||||
|
<font>
|
||||||
|
<family>Arial</family>
|
||||||
|
</font>
|
||||||
|
</property>
|
||||||
|
<property name="orientation">
|
||||||
|
<enum>Qt::Horizontal</enum>
|
||||||
|
</property>
|
||||||
|
<property name="sizeHint" stdset="0">
|
||||||
|
<size>
|
||||||
|
<width>40</width>
|
||||||
|
<height>20</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
</spacer>
|
||||||
|
</item>
|
||||||
|
<item>
|
||||||
|
<widget class="QSpinBox" name="windowSecondsForDesiredReductionSpin">
|
||||||
|
<property name="sizePolicy">
|
||||||
|
<sizepolicy hsizetype="Fixed" vsizetype="Fixed">
|
||||||
|
<horstretch>0</horstretch>
|
||||||
|
<verstretch>0</verstretch>
|
||||||
|
</sizepolicy>
|
||||||
|
</property>
|
||||||
|
<property name="minimumSize">
|
||||||
|
<size>
|
||||||
|
<width>95</width>
|
||||||
|
<height>36</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
<property name="maximumSize">
|
||||||
|
<size>
|
||||||
|
<width>70</width>
|
||||||
|
<height>16777215</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
<property name="font">
|
||||||
|
<font>
|
||||||
|
<family>Arial</family>
|
||||||
|
</font>
|
||||||
|
</property>
|
||||||
|
<property name="minimum">
|
||||||
|
<number>0</number>
|
||||||
|
</property>
|
||||||
|
<property name="maximum">
|
||||||
|
<number>10000</number>
|
||||||
|
</property>
|
||||||
|
<property name="value">
|
||||||
|
<number>1</number>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
</layout>
|
||||||
|
</item>
|
||||||
|
|
||||||
|
|
||||||
|
<!-- repetition with fade ____________________________________________________________________________ -->
|
||||||
|
<item>
|
||||||
|
<layout class="QHBoxLayout" name="horizontalLayout_24">
|
||||||
|
<property name="spacing">
|
||||||
|
<number>0</number>
|
||||||
|
</property>
|
||||||
|
<property name="topMargin">
|
||||||
|
<number>10</number>
|
||||||
|
</property>
|
||||||
|
<property name="rightMargin">
|
||||||
|
<number>0</number>
|
||||||
|
</property>
|
||||||
|
<property name="bottomMargin">
|
||||||
|
<number>10</number>
|
||||||
|
</property>
|
||||||
|
<item>
|
||||||
|
<widget class="QLabel" name="label_21">
|
||||||
|
<property name="font">
|
||||||
|
<font>
|
||||||
|
<family>Arial</family>
|
||||||
|
</font>
|
||||||
|
</property>
|
||||||
|
<property name="styleSheet">
|
||||||
|
<string notr="true">color: rgb(51, 51, 51)</string>
|
||||||
|
</property>
|
||||||
|
<property name="text">
|
||||||
|
<string>Repetition with Fade</string>
|
||||||
|
</property>
|
||||||
|
<property name="indent">
|
||||||
|
<number>15</number>
|
||||||
|
</property>
|
||||||
|
<property name="buddy">
|
||||||
|
<cstring>repetitionWithFadeCheckBox</cstring>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
<item>
|
||||||
|
<spacer name="horizontalSpacer_25">
|
||||||
|
<property name="font">
|
||||||
|
<font>
|
||||||
|
<family>Arial</family>
|
||||||
|
</font>
|
||||||
|
</property>
|
||||||
|
<property name="orientation">
|
||||||
|
<enum>Qt::Horizontal</enum>
|
||||||
|
</property>
|
||||||
|
<property name="sizeHint" stdset="0">
|
||||||
|
<size>
|
||||||
|
<width>40</width>
|
||||||
|
<height>20</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
</spacer>
|
||||||
|
</item>
|
||||||
|
<item>
|
||||||
|
<widget class="QCheckBox" name="repetitionWithFadeCheckBox">
|
||||||
|
<property name="sizePolicy">
|
||||||
|
<sizepolicy hsizetype="Minimum" vsizetype="Fixed">
|
||||||
|
<horstretch>0</horstretch>
|
||||||
|
<verstretch>0</verstretch>
|
||||||
|
</sizepolicy>
|
||||||
|
</property>
|
||||||
|
<property name="minimumSize">
|
||||||
|
<size>
|
||||||
|
<width>32</width>
|
||||||
|
<height>0</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
<property name="baseSize">
|
||||||
|
<size>
|
||||||
|
<width>0</width>
|
||||||
|
<height>0</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
<property name="text">
|
||||||
|
<string/>
|
||||||
|
</property>
|
||||||
|
<property name="iconSize">
|
||||||
|
<size>
|
||||||
|
<width>32</width>
|
||||||
|
<height>32</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
</layout>
|
||||||
|
</item>
|
||||||
|
|
||||||
<item>
|
<item>
|
||||||
<layout class="QHBoxLayout" name="horizontalLayout_6">
|
<layout class="QHBoxLayout" name="horizontalLayout_6">
|
||||||
<property name="spacing">
|
<property name="spacing">
|
||||||
|
|
|
@ -22,16 +22,14 @@
|
||||||
AudioRingBuffer::AudioRingBuffer(int numFrameSamples, bool randomAccessMode, int numFramesCapacity) :
|
AudioRingBuffer::AudioRingBuffer(int numFrameSamples, bool randomAccessMode, int numFramesCapacity) :
|
||||||
_frameCapacity(numFramesCapacity),
|
_frameCapacity(numFramesCapacity),
|
||||||
_sampleCapacity(numFrameSamples * numFramesCapacity),
|
_sampleCapacity(numFrameSamples * numFramesCapacity),
|
||||||
_isFull(false),
|
_bufferLength(numFrameSamples * (numFramesCapacity + 1)),
|
||||||
_numFrameSamples(numFrameSamples),
|
_numFrameSamples(numFrameSamples),
|
||||||
_randomAccessMode(randomAccessMode),
|
_randomAccessMode(randomAccessMode),
|
||||||
_overflowCount(0)
|
_overflowCount(0)
|
||||||
{
|
{
|
||||||
if (numFrameSamples) {
|
if (numFrameSamples) {
|
||||||
_buffer = new int16_t[_sampleCapacity];
|
_buffer = new int16_t[_bufferLength];
|
||||||
if (_randomAccessMode) {
|
memset(_buffer, 0, _bufferLength * sizeof(int16_t));
|
||||||
memset(_buffer, 0, _sampleCapacity * sizeof(int16_t));
|
|
||||||
}
|
|
||||||
_nextOutput = _buffer;
|
_nextOutput = _buffer;
|
||||||
_endOfLastWrite = _buffer;
|
_endOfLastWrite = _buffer;
|
||||||
} else {
|
} else {
|
||||||
|
@ -53,22 +51,23 @@ void AudioRingBuffer::reset() {
|
||||||
void AudioRingBuffer::resizeForFrameSize(int numFrameSamples) {
|
void AudioRingBuffer::resizeForFrameSize(int numFrameSamples) {
|
||||||
delete[] _buffer;
|
delete[] _buffer;
|
||||||
_sampleCapacity = numFrameSamples * _frameCapacity;
|
_sampleCapacity = numFrameSamples * _frameCapacity;
|
||||||
|
_bufferLength = numFrameSamples * (_frameCapacity + 1);
|
||||||
_numFrameSamples = numFrameSamples;
|
_numFrameSamples = numFrameSamples;
|
||||||
_buffer = new int16_t[_sampleCapacity];
|
_buffer = new int16_t[_bufferLength];
|
||||||
|
memset(_buffer, 0, _bufferLength * sizeof(int16_t));
|
||||||
if (_randomAccessMode) {
|
if (_randomAccessMode) {
|
||||||
memset(_buffer, 0, _sampleCapacity * sizeof(int16_t));
|
memset(_buffer, 0, _bufferLength * sizeof(int16_t));
|
||||||
}
|
}
|
||||||
reset();
|
reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioRingBuffer::clear() {
|
void AudioRingBuffer::clear() {
|
||||||
_isFull = false;
|
|
||||||
_endOfLastWrite = _buffer;
|
_endOfLastWrite = _buffer;
|
||||||
_nextOutput = _buffer;
|
_nextOutput = _buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioRingBuffer::readSamples(int16_t* destination, int maxSamples) {
|
int AudioRingBuffer::readSamples(int16_t* destination, int maxSamples) {
|
||||||
return readData((char*) destination, maxSamples * sizeof(int16_t));
|
return readData((char*)destination, maxSamples * sizeof(int16_t)) / sizeof(int16_t);
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioRingBuffer::readData(char *data, int maxSize) {
|
int AudioRingBuffer::readData(char *data, int maxSize) {
|
||||||
|
@ -83,11 +82,11 @@ int AudioRingBuffer::readData(char *data, int maxSize) {
|
||||||
numReadSamples = _endOfLastWrite ? (maxSize / sizeof(int16_t)) : 0;
|
numReadSamples = _endOfLastWrite ? (maxSize / sizeof(int16_t)) : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_nextOutput + numReadSamples > _buffer + _sampleCapacity) {
|
if (_nextOutput + numReadSamples > _buffer + _bufferLength) {
|
||||||
// we're going to need to do two reads to get this data, it wraps around the edge
|
// we're going to need to do two reads to get this data, it wraps around the edge
|
||||||
|
|
||||||
// read to the end of the buffer
|
// read to the end of the buffer
|
||||||
int numSamplesToEnd = (_buffer + _sampleCapacity) - _nextOutput;
|
int numSamplesToEnd = (_buffer + _bufferLength) - _nextOutput;
|
||||||
memcpy(data, _nextOutput, numSamplesToEnd * sizeof(int16_t));
|
memcpy(data, _nextOutput, numSamplesToEnd * sizeof(int16_t));
|
||||||
if (_randomAccessMode) {
|
if (_randomAccessMode) {
|
||||||
memset(_nextOutput, 0, numSamplesToEnd * sizeof(int16_t)); // clear it
|
memset(_nextOutput, 0, numSamplesToEnd * sizeof(int16_t)); // clear it
|
||||||
|
@ -108,15 +107,12 @@ int AudioRingBuffer::readData(char *data, int maxSize) {
|
||||||
|
|
||||||
// push the position of _nextOutput by the number of samples read
|
// push the position of _nextOutput by the number of samples read
|
||||||
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, numReadSamples);
|
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, numReadSamples);
|
||||||
if (numReadSamples > 0) {
|
|
||||||
_isFull = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return numReadSamples * sizeof(int16_t);
|
return numReadSamples * sizeof(int16_t);
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioRingBuffer::writeSamples(const int16_t* source, int maxSamples) {
|
int AudioRingBuffer::writeSamples(const int16_t* source, int maxSamples) {
|
||||||
return writeData((const char*) source, maxSamples * sizeof(int16_t));
|
return writeData((const char*)source, maxSamples * sizeof(int16_t)) / sizeof(int16_t);
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioRingBuffer::writeData(const char* data, int maxSize) {
|
int AudioRingBuffer::writeData(const char* data, int maxSize) {
|
||||||
|
@ -133,18 +129,15 @@ int AudioRingBuffer::writeData(const char* data, int maxSize) {
|
||||||
qDebug() << "Overflowed ring buffer! Overwriting old data";
|
qDebug() << "Overflowed ring buffer! Overwriting old data";
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_endOfLastWrite + samplesToCopy <= _buffer + _sampleCapacity) {
|
if (_endOfLastWrite + samplesToCopy <= _buffer + _bufferLength) {
|
||||||
memcpy(_endOfLastWrite, data, samplesToCopy * sizeof(int16_t));
|
memcpy(_endOfLastWrite, data, samplesToCopy * sizeof(int16_t));
|
||||||
} else {
|
} else {
|
||||||
int numSamplesToEnd = (_buffer + _sampleCapacity) - _endOfLastWrite;
|
int numSamplesToEnd = (_buffer + _bufferLength) - _endOfLastWrite;
|
||||||
memcpy(_endOfLastWrite, data, numSamplesToEnd * sizeof(int16_t));
|
memcpy(_endOfLastWrite, data, numSamplesToEnd * sizeof(int16_t));
|
||||||
memcpy(_buffer, data + (numSamplesToEnd * sizeof(int16_t)), (samplesToCopy - numSamplesToEnd) * sizeof(int16_t));
|
memcpy(_buffer, data + (numSamplesToEnd * sizeof(int16_t)), (samplesToCopy - numSamplesToEnd) * sizeof(int16_t));
|
||||||
}
|
}
|
||||||
|
|
||||||
_endOfLastWrite = shiftedPositionAccomodatingWrap(_endOfLastWrite, samplesToCopy);
|
_endOfLastWrite = shiftedPositionAccomodatingWrap(_endOfLastWrite, samplesToCopy);
|
||||||
if (samplesToCopy > 0 && _endOfLastWrite == _nextOutput) {
|
|
||||||
_isFull = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return samplesToCopy * sizeof(int16_t);
|
return samplesToCopy * sizeof(int16_t);
|
||||||
}
|
}
|
||||||
|
@ -158,61 +151,52 @@ const int16_t& AudioRingBuffer::operator[] (const int index) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioRingBuffer::shiftReadPosition(unsigned int numSamples) {
|
void AudioRingBuffer::shiftReadPosition(unsigned int numSamples) {
|
||||||
if (numSamples > 0) {
|
|
||||||
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, numSamples);
|
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, numSamples);
|
||||||
_isFull = false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioRingBuffer::samplesAvailable() const {
|
int AudioRingBuffer::samplesAvailable() const {
|
||||||
if (!_endOfLastWrite) {
|
if (!_endOfLastWrite) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (_isFull) {
|
|
||||||
return _sampleCapacity;
|
|
||||||
}
|
|
||||||
|
|
||||||
int sampleDifference = _endOfLastWrite - _nextOutput;
|
int sampleDifference = _endOfLastWrite - _nextOutput;
|
||||||
if (sampleDifference < 0) {
|
if (sampleDifference < 0) {
|
||||||
sampleDifference += _sampleCapacity;
|
sampleDifference += _bufferLength;
|
||||||
}
|
}
|
||||||
return sampleDifference;
|
return sampleDifference;
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioRingBuffer::addSilentFrame(int numSilentSamples) {
|
int AudioRingBuffer::addSilentSamples(int silentSamples) {
|
||||||
|
|
||||||
int samplesRoomFor = _sampleCapacity - samplesAvailable();
|
int samplesRoomFor = _sampleCapacity - samplesAvailable();
|
||||||
if (numSilentSamples > samplesRoomFor) {
|
if (silentSamples > samplesRoomFor) {
|
||||||
// there's not enough room for this write. write as many silent samples as we have room for
|
// there's not enough room for this write. write as many silent samples as we have room for
|
||||||
numSilentSamples = samplesRoomFor;
|
silentSamples = samplesRoomFor;
|
||||||
qDebug() << "Dropping some silent samples to prevent ring buffer overflow";
|
qDebug() << "Dropping some silent samples to prevent ring buffer overflow";
|
||||||
}
|
}
|
||||||
|
|
||||||
// memset zeroes into the buffer, accomodate a wrap around the end
|
// memset zeroes into the buffer, accomodate a wrap around the end
|
||||||
// push the _endOfLastWrite to the correct spot
|
// push the _endOfLastWrite to the correct spot
|
||||||
if (_endOfLastWrite + numSilentSamples <= _buffer + _sampleCapacity) {
|
if (_endOfLastWrite + silentSamples <= _buffer + _bufferLength) {
|
||||||
memset(_endOfLastWrite, 0, numSilentSamples * sizeof(int16_t));
|
memset(_endOfLastWrite, 0, silentSamples * sizeof(int16_t));
|
||||||
} else {
|
} else {
|
||||||
int numSamplesToEnd = (_buffer + _sampleCapacity) - _endOfLastWrite;
|
int numSamplesToEnd = (_buffer + _bufferLength) - _endOfLastWrite;
|
||||||
memset(_endOfLastWrite, 0, numSamplesToEnd * sizeof(int16_t));
|
memset(_endOfLastWrite, 0, numSamplesToEnd * sizeof(int16_t));
|
||||||
memset(_buffer, 0, (numSilentSamples - numSamplesToEnd) * sizeof(int16_t));
|
memset(_buffer, 0, (silentSamples - numSamplesToEnd) * sizeof(int16_t));
|
||||||
}
|
|
||||||
_endOfLastWrite = shiftedPositionAccomodatingWrap(_endOfLastWrite, numSilentSamples);
|
|
||||||
if (numSilentSamples > 0 && _nextOutput == _endOfLastWrite) {
|
|
||||||
_isFull = true;
|
|
||||||
}
|
}
|
||||||
|
_endOfLastWrite = shiftedPositionAccomodatingWrap(_endOfLastWrite, silentSamples);
|
||||||
|
|
||||||
return numSilentSamples * sizeof(int16_t);
|
return silentSamples;
|
||||||
}
|
}
|
||||||
|
|
||||||
int16_t* AudioRingBuffer::shiftedPositionAccomodatingWrap(int16_t* position, int numSamplesShift) const {
|
int16_t* AudioRingBuffer::shiftedPositionAccomodatingWrap(int16_t* position, int numSamplesShift) const {
|
||||||
|
|
||||||
if (numSamplesShift > 0 && position + numSamplesShift >= _buffer + _sampleCapacity) {
|
if (numSamplesShift > 0 && position + numSamplesShift >= _buffer + _bufferLength) {
|
||||||
// this shift will wrap the position around to the beginning of the ring
|
// this shift will wrap the position around to the beginning of the ring
|
||||||
return position + numSamplesShift - _sampleCapacity;
|
return position + numSamplesShift - _bufferLength;
|
||||||
} else if (numSamplesShift < 0 && position + numSamplesShift < _buffer) {
|
} else if (numSamplesShift < 0 && position + numSamplesShift < _buffer) {
|
||||||
// this shift will go around to the end of the ring
|
// this shift will go around to the end of the ring
|
||||||
return position + numSamplesShift + _sampleCapacity;
|
return position + numSamplesShift + _bufferLength;
|
||||||
} else {
|
} else {
|
||||||
return position + numSamplesShift;
|
return position + numSamplesShift;
|
||||||
}
|
}
|
||||||
|
@ -221,7 +205,7 @@ int16_t* AudioRingBuffer::shiftedPositionAccomodatingWrap(int16_t* position, int
|
||||||
float AudioRingBuffer::getFrameLoudness(const int16_t* frameStart) const {
|
float AudioRingBuffer::getFrameLoudness(const int16_t* frameStart) const {
|
||||||
float loudness = 0.0f;
|
float loudness = 0.0f;
|
||||||
const int16_t* sampleAt = frameStart;
|
const int16_t* sampleAt = frameStart;
|
||||||
const int16_t* _bufferLastAt = _buffer + _sampleCapacity - 1;
|
const int16_t* _bufferLastAt = _buffer + _bufferLength - 1;
|
||||||
|
|
||||||
for (int i = 0; i < _numFrameSamples; ++i) {
|
for (int i = 0; i < _numFrameSamples; ++i) {
|
||||||
loudness += fabsf(*sampleAt);
|
loudness += fabsf(*sampleAt);
|
||||||
|
@ -234,6 +218,9 @@ float AudioRingBuffer::getFrameLoudness(const int16_t* frameStart) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
float AudioRingBuffer::getFrameLoudness(ConstIterator frameStart) const {
|
float AudioRingBuffer::getFrameLoudness(ConstIterator frameStart) const {
|
||||||
|
if (frameStart.isNull()) {
|
||||||
|
return 0.0f;
|
||||||
|
}
|
||||||
return getFrameLoudness(&(*frameStart));
|
return getFrameLoudness(&(*frameStart));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -241,3 +228,44 @@ float AudioRingBuffer::getNextOutputFrameLoudness() const {
|
||||||
return getFrameLoudness(_nextOutput);
|
return getFrameLoudness(_nextOutput);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int AudioRingBuffer::writeSamples(ConstIterator source, int maxSamples) {
|
||||||
|
int samplesToCopy = std::min(maxSamples, _sampleCapacity);
|
||||||
|
int samplesRoomFor = _sampleCapacity - samplesAvailable();
|
||||||
|
if (samplesToCopy > samplesRoomFor) {
|
||||||
|
// there's not enough room for this write. erase old data to make room for this new data
|
||||||
|
int samplesToDelete = samplesToCopy - samplesRoomFor;
|
||||||
|
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, samplesToDelete);
|
||||||
|
_overflowCount++;
|
||||||
|
qDebug() << "Overflowed ring buffer! Overwriting old data";
|
||||||
|
}
|
||||||
|
|
||||||
|
int16_t* bufferLast = _buffer + _bufferLength - 1;
|
||||||
|
for (int i = 0; i < samplesToCopy; i++) {
|
||||||
|
*_endOfLastWrite = *source;
|
||||||
|
_endOfLastWrite = (_endOfLastWrite == bufferLast) ? _buffer : _endOfLastWrite + 1;
|
||||||
|
++source;
|
||||||
|
}
|
||||||
|
|
||||||
|
return samplesToCopy;
|
||||||
|
}
|
||||||
|
|
||||||
|
int AudioRingBuffer::writeSamplesWithFade(ConstIterator source, int maxSamples, float fade) {
|
||||||
|
int samplesToCopy = std::min(maxSamples, _sampleCapacity);
|
||||||
|
int samplesRoomFor = _sampleCapacity - samplesAvailable();
|
||||||
|
if (samplesToCopy > samplesRoomFor) {
|
||||||
|
// there's not enough room for this write. erase old data to make room for this new data
|
||||||
|
int samplesToDelete = samplesToCopy - samplesRoomFor;
|
||||||
|
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, samplesToDelete);
|
||||||
|
_overflowCount++;
|
||||||
|
qDebug() << "Overflowed ring buffer! Overwriting old data";
|
||||||
|
}
|
||||||
|
|
||||||
|
int16_t* bufferLast = _buffer + _bufferLength - 1;
|
||||||
|
for (int i = 0; i < samplesToCopy; i++) {
|
||||||
|
*_endOfLastWrite = (int16_t)((float)(*source) * fade);
|
||||||
|
_endOfLastWrite = (_endOfLastWrite == bufferLast) ? _buffer : _endOfLastWrite + 1;
|
||||||
|
++source;
|
||||||
|
}
|
||||||
|
|
||||||
|
return samplesToCopy;
|
||||||
|
}
|
||||||
|
|
|
@ -68,7 +68,7 @@ public:
|
||||||
|
|
||||||
int getOverflowCount() const { return _overflowCount; } /// how many times has the ring buffer has overwritten old data
|
int getOverflowCount() const { return _overflowCount; } /// how many times has the ring buffer has overwritten old data
|
||||||
|
|
||||||
int addSilentFrame(int numSilentSamples);
|
int addSilentSamples(int samples);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
float getFrameLoudness(const int16_t* frameStart) const;
|
float getFrameLoudness(const int16_t* frameStart) const;
|
||||||
|
@ -82,7 +82,7 @@ protected:
|
||||||
|
|
||||||
int _frameCapacity;
|
int _frameCapacity;
|
||||||
int _sampleCapacity;
|
int _sampleCapacity;
|
||||||
bool _isFull;
|
int _bufferLength; // actual length of _buffer: will be one frame larger than _sampleCapacity
|
||||||
int _numFrameSamples;
|
int _numFrameSamples;
|
||||||
int16_t* _nextOutput;
|
int16_t* _nextOutput;
|
||||||
int16_t* _endOfLastWrite;
|
int16_t* _endOfLastWrite;
|
||||||
|
@ -95,23 +95,25 @@ public:
|
||||||
class ConstIterator { //public std::iterator < std::forward_iterator_tag, int16_t > {
|
class ConstIterator { //public std::iterator < std::forward_iterator_tag, int16_t > {
|
||||||
public:
|
public:
|
||||||
ConstIterator()
|
ConstIterator()
|
||||||
: _capacity(0),
|
: _bufferLength(0),
|
||||||
_bufferFirst(NULL),
|
_bufferFirst(NULL),
|
||||||
_bufferLast(NULL),
|
_bufferLast(NULL),
|
||||||
_at(NULL) {}
|
_at(NULL) {}
|
||||||
|
|
||||||
ConstIterator(int16_t* bufferFirst, int capacity, int16_t* at)
|
ConstIterator(int16_t* bufferFirst, int capacity, int16_t* at)
|
||||||
: _capacity(capacity),
|
: _bufferLength(capacity),
|
||||||
_bufferFirst(bufferFirst),
|
_bufferFirst(bufferFirst),
|
||||||
_bufferLast(bufferFirst + capacity - 1),
|
_bufferLast(bufferFirst + capacity - 1),
|
||||||
_at(at) {}
|
_at(at) {}
|
||||||
|
|
||||||
|
bool isNull() const { return _at == NULL; }
|
||||||
|
|
||||||
bool operator==(const ConstIterator& rhs) { return _at == rhs._at; }
|
bool operator==(const ConstIterator& rhs) { return _at == rhs._at; }
|
||||||
bool operator!=(const ConstIterator& rhs) { return _at != rhs._at; }
|
bool operator!=(const ConstIterator& rhs) { return _at != rhs._at; }
|
||||||
const int16_t& operator*() { return *_at; }
|
const int16_t& operator*() { return *_at; }
|
||||||
|
|
||||||
ConstIterator& operator=(const ConstIterator& rhs) {
|
ConstIterator& operator=(const ConstIterator& rhs) {
|
||||||
_capacity = rhs._capacity;
|
_bufferLength = rhs._bufferLength;
|
||||||
_bufferFirst = rhs._bufferFirst;
|
_bufferFirst = rhs._bufferFirst;
|
||||||
_bufferLast = rhs._bufferLast;
|
_bufferLast = rhs._bufferLast;
|
||||||
_at = rhs._at;
|
_at = rhs._at;
|
||||||
|
@ -145,40 +147,54 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
ConstIterator operator+(int i) {
|
ConstIterator operator+(int i) {
|
||||||
return ConstIterator(_bufferFirst, _capacity, atShiftedBy(i));
|
return ConstIterator(_bufferFirst, _bufferLength, atShiftedBy(i));
|
||||||
}
|
}
|
||||||
|
|
||||||
ConstIterator operator-(int i) {
|
ConstIterator operator-(int i) {
|
||||||
return ConstIterator(_bufferFirst, _capacity, atShiftedBy(-i));
|
return ConstIterator(_bufferFirst, _bufferLength, atShiftedBy(-i));
|
||||||
}
|
}
|
||||||
|
|
||||||
void readSamples(int16_t* dest, int numSamples) {
|
void readSamples(int16_t* dest, int numSamples) {
|
||||||
|
int16_t* at = _at;
|
||||||
for (int i = 0; i < numSamples; i++) {
|
for (int i = 0; i < numSamples; i++) {
|
||||||
*dest = *(*this);
|
*dest = *at;
|
||||||
++dest;
|
++dest;
|
||||||
++(*this);
|
at = (at == _bufferLast) ? _bufferFirst : at + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void readSamplesWithFade(int16_t* dest, int numSamples, float fade) {
|
||||||
|
int16_t* at = _at;
|
||||||
|
for (int i = 0; i < numSamples; i++) {
|
||||||
|
*dest = (float)*at * fade;
|
||||||
|
++dest;
|
||||||
|
at = (at == _bufferLast) ? _bufferFirst : at + 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
int16_t* atShiftedBy(int i) {
|
int16_t* atShiftedBy(int i) {
|
||||||
i = (_at - _bufferFirst + i) % _capacity;
|
i = (_at - _bufferFirst + i) % _bufferLength;
|
||||||
if (i < 0) {
|
if (i < 0) {
|
||||||
i += _capacity;
|
i += _bufferLength;
|
||||||
}
|
}
|
||||||
return _bufferFirst + i;
|
return _bufferFirst + i;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
int _capacity;
|
int _bufferLength;
|
||||||
int16_t* _bufferFirst;
|
int16_t* _bufferFirst;
|
||||||
int16_t* _bufferLast;
|
int16_t* _bufferLast;
|
||||||
int16_t* _at;
|
int16_t* _at;
|
||||||
};
|
};
|
||||||
|
|
||||||
ConstIterator nextOutput() const { return ConstIterator(_buffer, _sampleCapacity, _nextOutput); }
|
ConstIterator nextOutput() const { return ConstIterator(_buffer, _bufferLength, _nextOutput); }
|
||||||
|
ConstIterator lastFrameWritten() const { return ConstIterator(_buffer, _bufferLength, _endOfLastWrite) - _numFrameSamples; }
|
||||||
|
|
||||||
float getFrameLoudness(ConstIterator frameStart) const;
|
float getFrameLoudness(ConstIterator frameStart) const;
|
||||||
|
|
||||||
|
int writeSamples(ConstIterator source, int maxSamples);
|
||||||
|
int writeSamplesWithFade(ConstIterator source, int maxSamples, float fade);
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // hifi_AudioRingBuffer_h
|
#endif // hifi_AudioRingBuffer_h
|
||||||
|
|
|
@ -14,30 +14,37 @@
|
||||||
#include "InboundAudioStream.h"
|
#include "InboundAudioStream.h"
|
||||||
#include "PacketHeaders.h"
|
#include "PacketHeaders.h"
|
||||||
|
|
||||||
InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity,
|
const int STARVE_HISTORY_CAPACITY = 50;
|
||||||
bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc) :
|
|
||||||
|
InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity, const Settings& settings) :
|
||||||
_ringBuffer(numFrameSamples, false, numFramesCapacity),
|
_ringBuffer(numFrameSamples, false, numFramesCapacity),
|
||||||
_lastPopSucceeded(false),
|
_lastPopSucceeded(false),
|
||||||
_lastPopOutput(),
|
_lastPopOutput(),
|
||||||
_dynamicJitterBuffers(dynamicJitterBuffers),
|
_dynamicJitterBuffers(settings._dynamicJitterBuffers),
|
||||||
_staticDesiredJitterBufferFrames(staticDesiredJitterBufferFrames),
|
_staticDesiredJitterBufferFrames(settings._staticDesiredJitterBufferFrames),
|
||||||
_useStDevForJitterCalc(useStDevForJitterCalc),
|
_useStDevForJitterCalc(settings._useStDevForJitterCalc),
|
||||||
_calculatedJitterBufferFramesUsingMaxGap(0),
|
_desiredJitterBufferFrames(settings._dynamicJitterBuffers ? 1 : settings._staticDesiredJitterBufferFrames),
|
||||||
_calculatedJitterBufferFramesUsingStDev(0),
|
_maxFramesOverDesired(settings._maxFramesOverDesired),
|
||||||
_desiredJitterBufferFrames(dynamicJitterBuffers ? 1 : staticDesiredJitterBufferFrames),
|
|
||||||
_maxFramesOverDesired(maxFramesOverDesired),
|
|
||||||
_isStarved(true),
|
_isStarved(true),
|
||||||
_hasStarted(false),
|
_hasStarted(false),
|
||||||
_consecutiveNotMixedCount(0),
|
_consecutiveNotMixedCount(0),
|
||||||
_starveCount(0),
|
_starveCount(0),
|
||||||
_silentFramesDropped(0),
|
_silentFramesDropped(0),
|
||||||
_oldFramesDropped(0),
|
_oldFramesDropped(0),
|
||||||
_incomingSequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS),
|
_incomingSequenceNumberStats(STATS_FOR_STATS_PACKET_WINDOW_SECONDS),
|
||||||
_lastFrameReceivedTime(0),
|
_lastPacketReceivedTime(0),
|
||||||
_interframeTimeGapStatsForJitterCalc(TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES, TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS),
|
_timeGapStatsForDesiredCalcOnTooManyStarves(0, settings._windowSecondsForDesiredCalcOnTooManyStarves),
|
||||||
_interframeTimeGapStatsForStatsPacket(TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES, TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS),
|
_calculatedJitterBufferFramesUsingMaxGap(0),
|
||||||
|
_stdevStatsForDesiredCalcOnTooManyStarves(),
|
||||||
|
_calculatedJitterBufferFramesUsingStDev(0),
|
||||||
|
_timeGapStatsForDesiredReduction(0, settings._windowSecondsForDesiredReduction),
|
||||||
|
_starveHistoryWindowSeconds(settings._windowSecondsForDesiredCalcOnTooManyStarves),
|
||||||
|
_starveHistory(STARVE_HISTORY_CAPACITY),
|
||||||
|
_starveThreshold(settings._windowStarveThreshold),
|
||||||
_framesAvailableStat(),
|
_framesAvailableStat(),
|
||||||
_currentJitterBufferFrames(0)
|
_currentJitterBufferFrames(0),
|
||||||
|
_timeGapStatsForStatsPacket(0, STATS_FOR_STATS_PACKET_WINDOW_SECONDS),
|
||||||
|
_repetitionWithFade(settings._repetitionWithFade)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,11 +66,14 @@ void InboundAudioStream::resetStats() {
|
||||||
_silentFramesDropped = 0;
|
_silentFramesDropped = 0;
|
||||||
_oldFramesDropped = 0;
|
_oldFramesDropped = 0;
|
||||||
_incomingSequenceNumberStats.reset();
|
_incomingSequenceNumberStats.reset();
|
||||||
_lastFrameReceivedTime = 0;
|
_lastPacketReceivedTime = 0;
|
||||||
_interframeTimeGapStatsForJitterCalc.reset();
|
_timeGapStatsForDesiredCalcOnTooManyStarves.reset();
|
||||||
_interframeTimeGapStatsForStatsPacket.reset();
|
_stdevStatsForDesiredCalcOnTooManyStarves = StDev();
|
||||||
|
_timeGapStatsForDesiredReduction.reset();
|
||||||
|
_starveHistory.clear();
|
||||||
_framesAvailableStat.reset();
|
_framesAvailableStat.reset();
|
||||||
_currentJitterBufferFrames = 0;
|
_currentJitterBufferFrames = 0;
|
||||||
|
_timeGapStatsForStatsPacket.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
void InboundAudioStream::clearBuffer() {
|
void InboundAudioStream::clearBuffer() {
|
||||||
|
@ -72,8 +82,11 @@ void InboundAudioStream::clearBuffer() {
|
||||||
_currentJitterBufferFrames = 0;
|
_currentJitterBufferFrames = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int InboundAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
void InboundAudioStream::perSecondCallbackForUpdatingStats() {
|
||||||
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
|
_incomingSequenceNumberStats.pushStatsToHistory();
|
||||||
|
_timeGapStatsForDesiredCalcOnTooManyStarves.currentIntervalComplete();
|
||||||
|
_timeGapStatsForDesiredReduction.currentIntervalComplete();
|
||||||
|
_timeGapStatsForStatsPacket.currentIntervalComplete();
|
||||||
}
|
}
|
||||||
|
|
||||||
int InboundAudioStream::parseData(const QByteArray& packet) {
|
int InboundAudioStream::parseData(const QByteArray& packet) {
|
||||||
|
@ -83,36 +96,51 @@ int InboundAudioStream::parseData(const QByteArray& packet) {
|
||||||
|
|
||||||
// parse header
|
// parse header
|
||||||
int numBytesHeader = numBytesForPacketHeader(packet);
|
int numBytesHeader = numBytesForPacketHeader(packet);
|
||||||
const char* sequenceAt = packet.constData() + numBytesHeader;
|
const char* dataAt = packet.constData() + numBytesHeader;
|
||||||
int readBytes = numBytesHeader;
|
int readBytes = numBytesHeader;
|
||||||
|
|
||||||
// parse sequence number and track it
|
// parse sequence number and track it
|
||||||
quint16 sequence = *(reinterpret_cast<const quint16*>(sequenceAt));
|
quint16 sequence = *(reinterpret_cast<const quint16*>(dataAt));
|
||||||
|
dataAt += sizeof(quint16);
|
||||||
readBytes += sizeof(quint16);
|
readBytes += sizeof(quint16);
|
||||||
SequenceNumberStats::ArrivalInfo arrivalInfo = _incomingSequenceNumberStats.sequenceNumberReceived(sequence, senderUUID);
|
SequenceNumberStats::ArrivalInfo arrivalInfo = _incomingSequenceNumberStats.sequenceNumberReceived(sequence, senderUUID);
|
||||||
|
|
||||||
frameReceivedUpdateTimingStats();
|
packetReceivedUpdateTimingStats();
|
||||||
|
|
||||||
// TODO: handle generalized silent packet here?????
|
int networkSamples;
|
||||||
|
|
||||||
// parse the info after the seq number and before the audio data.(the stream properties)
|
if (packetType == PacketTypeSilentAudioFrame) {
|
||||||
int numAudioSamples;
|
quint16 numSilentSamples = *(reinterpret_cast<const quint16*>(dataAt));
|
||||||
readBytes += parseStreamProperties(packetType, packet.mid(readBytes), numAudioSamples);
|
readBytes += sizeof(quint16);
|
||||||
|
networkSamples = (int)numSilentSamples;
|
||||||
|
} else {
|
||||||
|
// parse the info after the seq number and before the audio data (the stream properties)
|
||||||
|
readBytes += parseStreamProperties(packetType, packet.mid(readBytes), networkSamples);
|
||||||
|
}
|
||||||
|
|
||||||
// handle this packet based on its arrival status.
|
// handle this packet based on its arrival status.
|
||||||
// For now, late packets are ignored. It may be good in the future to insert the late audio frame
|
|
||||||
// into the ring buffer to fill in the missing frame if it hasn't been mixed yet.
|
|
||||||
switch (arrivalInfo._status) {
|
switch (arrivalInfo._status) {
|
||||||
case SequenceNumberStats::Early: {
|
case SequenceNumberStats::Early: {
|
||||||
|
// Packet is early; write droppable silent samples for each of the skipped packets.
|
||||||
|
// NOTE: we assume that each dropped packet contains the same number of samples
|
||||||
|
// as the packet we just received.
|
||||||
int packetsDropped = arrivalInfo._seqDiffFromExpected;
|
int packetsDropped = arrivalInfo._seqDiffFromExpected;
|
||||||
writeSamplesForDroppedPackets(packetsDropped * numAudioSamples);
|
writeSamplesForDroppedPackets(packetsDropped * networkSamples);
|
||||||
|
|
||||||
// fall through to OnTime case
|
// fall through to OnTime case
|
||||||
}
|
}
|
||||||
case SequenceNumberStats::OnTime: {
|
case SequenceNumberStats::OnTime: {
|
||||||
readBytes += parseAudioData(packetType, packet.mid(readBytes), numAudioSamples);
|
// Packet is on time; parse its data to the ringbuffer
|
||||||
|
if (packetType == PacketTypeSilentAudioFrame) {
|
||||||
|
writeDroppableSilentSamples(networkSamples);
|
||||||
|
} else {
|
||||||
|
readBytes += parseAudioData(packetType, packet.mid(readBytes), networkSamples);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default: {
|
default: {
|
||||||
|
// For now, late packets are ignored. It may be good in the future to insert the late audio packet data
|
||||||
|
// into the ring buffer to fill in the missing frame if it hasn't been mixed yet.
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -139,6 +167,43 @@ int InboundAudioStream::parseData(const QByteArray& packet) {
|
||||||
return readBytes;
|
return readBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int InboundAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
|
||||||
|
// mixed audio packets do not have any info between the seq num and the audio data.
|
||||||
|
numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int InboundAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
||||||
|
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
|
||||||
|
}
|
||||||
|
|
||||||
|
int InboundAudioStream::writeDroppableSilentSamples(int silentSamples) {
|
||||||
|
// calculate how many silent frames we should drop.
|
||||||
|
int samplesPerFrame = _ringBuffer.getNumFrameSamples();
|
||||||
|
int desiredJitterBufferFramesPlusPadding = _desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING;
|
||||||
|
int numSilentFramesToDrop = 0;
|
||||||
|
|
||||||
|
if (silentSamples >= samplesPerFrame && _currentJitterBufferFrames > desiredJitterBufferFramesPlusPadding) {
|
||||||
|
|
||||||
|
// our avg jitter buffer size exceeds its desired value, so ignore some silent
|
||||||
|
// frames to get that size as close to desired as possible
|
||||||
|
int numSilentFramesToDropDesired = _currentJitterBufferFrames - desiredJitterBufferFramesPlusPadding;
|
||||||
|
int numSilentFramesReceived = silentSamples / samplesPerFrame;
|
||||||
|
numSilentFramesToDrop = std::min(numSilentFramesToDropDesired, numSilentFramesReceived);
|
||||||
|
|
||||||
|
// dont reset _currentJitterBufferFrames here; we want to be able to drop further silent frames
|
||||||
|
// without waiting for _framesAvailableStat to fill up to 10s of samples.
|
||||||
|
_currentJitterBufferFrames -= numSilentFramesToDrop;
|
||||||
|
_silentFramesDropped += numSilentFramesToDrop;
|
||||||
|
|
||||||
|
_framesAvailableStat.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
int ret = _ringBuffer.addSilentSamples(silentSamples - numSilentFramesToDrop * samplesPerFrame);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
int InboundAudioStream::popSamples(int maxSamples, bool allOrNothing, bool starveIfNoSamplesPopped) {
|
int InboundAudioStream::popSamples(int maxSamples, bool allOrNothing, bool starveIfNoSamplesPopped) {
|
||||||
int samplesPopped = 0;
|
int samplesPopped = 0;
|
||||||
int samplesAvailable = _ringBuffer.samplesAvailable();
|
int samplesAvailable = _ringBuffer.samplesAvailable();
|
||||||
|
@ -216,12 +281,61 @@ void InboundAudioStream::framesAvailableChanged() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void InboundAudioStream::setToStarved() {
|
void InboundAudioStream::setToStarved() {
|
||||||
_isStarved = true;
|
|
||||||
_consecutiveNotMixedCount = 0;
|
_consecutiveNotMixedCount = 0;
|
||||||
_starveCount++;
|
_starveCount++;
|
||||||
// if we have more than the desired frames when setToStarved() is called, then we'll immediately
|
// if we have more than the desired frames when setToStarved() is called, then we'll immediately
|
||||||
// be considered refilled. in that case, there's no need to set _isStarved to true.
|
// be considered refilled. in that case, there's no need to set _isStarved to true.
|
||||||
_isStarved = (_ringBuffer.framesAvailable() < _desiredJitterBufferFrames);
|
_isStarved = (_ringBuffer.framesAvailable() < _desiredJitterBufferFrames);
|
||||||
|
|
||||||
|
// record the time of this starve in the starve history
|
||||||
|
quint64 now = usecTimestampNow();
|
||||||
|
_starveHistory.insert(now);
|
||||||
|
|
||||||
|
if (_dynamicJitterBuffers) {
|
||||||
|
// dynamic jitter buffers are enabled. check if this starve put us over the window
|
||||||
|
// starve threshold
|
||||||
|
quint64 windowEnd = now - _starveHistoryWindowSeconds * USECS_PER_SECOND;
|
||||||
|
RingBufferHistory<quint64>::Iterator starvesIterator = _starveHistory.begin();
|
||||||
|
RingBufferHistory<quint64>::Iterator end = _starveHistory.end();
|
||||||
|
int starvesInWindow = 1;
|
||||||
|
do {
|
||||||
|
++starvesIterator;
|
||||||
|
if (*starvesIterator < windowEnd) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
starvesInWindow++;
|
||||||
|
} while (starvesIterator != end);
|
||||||
|
|
||||||
|
// this starve put us over the starve threshold. update _desiredJitterBufferFrames to
|
||||||
|
// value determined by window A.
|
||||||
|
if (starvesInWindow >= _starveThreshold) {
|
||||||
|
int calculatedJitterBufferFrames;
|
||||||
|
if (_useStDevForJitterCalc) {
|
||||||
|
calculatedJitterBufferFrames = _calculatedJitterBufferFramesUsingStDev;
|
||||||
|
} else {
|
||||||
|
// we don't know when the next packet will arrive, so it's possible the gap between the last packet and the
|
||||||
|
// next packet will exceed the max time gap in the window. If the time since the last packet has already exceeded
|
||||||
|
// the window max gap, then we should use that value to calculate desired frames.
|
||||||
|
int framesSinceLastPacket = ceilf((float)(now - _lastPacketReceivedTime) / (float)BUFFER_SEND_INTERVAL_USECS);
|
||||||
|
calculatedJitterBufferFrames = std::max(_calculatedJitterBufferFramesUsingMaxGap, framesSinceLastPacket);
|
||||||
|
}
|
||||||
|
// make sure _desiredJitterBufferFrames does not become lower here
|
||||||
|
if (calculatedJitterBufferFrames >= _desiredJitterBufferFrames) {
|
||||||
|
_desiredJitterBufferFrames = calculatedJitterBufferFrames;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void InboundAudioStream::setSettings(const Settings& settings) {
|
||||||
|
setMaxFramesOverDesired(settings._maxFramesOverDesired);
|
||||||
|
setDynamicJitterBuffers(settings._dynamicJitterBuffers);
|
||||||
|
setStaticDesiredJitterBufferFrames(settings._staticDesiredJitterBufferFrames);
|
||||||
|
setUseStDevForJitterCalc(settings._useStDevForJitterCalc);
|
||||||
|
setWindowStarveThreshold(settings._windowStarveThreshold);
|
||||||
|
setWindowSecondsForDesiredCalcOnTooManyStarves(settings._windowSecondsForDesiredCalcOnTooManyStarves);
|
||||||
|
setWindowSecondsForDesiredReduction(settings._windowSecondsForDesiredReduction);
|
||||||
|
setRepetitionWithFade(settings._repetitionWithFade);
|
||||||
}
|
}
|
||||||
|
|
||||||
void InboundAudioStream::setDynamicJitterBuffers(bool dynamicJitterBuffers) {
|
void InboundAudioStream::setDynamicJitterBuffers(bool dynamicJitterBuffers) {
|
||||||
|
@ -229,6 +343,7 @@ void InboundAudioStream::setDynamicJitterBuffers(bool dynamicJitterBuffers) {
|
||||||
_desiredJitterBufferFrames = _staticDesiredJitterBufferFrames;
|
_desiredJitterBufferFrames = _staticDesiredJitterBufferFrames;
|
||||||
} else {
|
} else {
|
||||||
if (!_dynamicJitterBuffers) {
|
if (!_dynamicJitterBuffers) {
|
||||||
|
// if we're enabling dynamic jitter buffer frames, start desired frames at 1
|
||||||
_desiredJitterBufferFrames = 1;
|
_desiredJitterBufferFrames = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -242,90 +357,102 @@ void InboundAudioStream::setStaticDesiredJitterBufferFrames(int staticDesiredJit
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void InboundAudioStream::setWindowSecondsForDesiredCalcOnTooManyStarves(int windowSecondsForDesiredCalcOnTooManyStarves) {
|
||||||
|
_timeGapStatsForDesiredCalcOnTooManyStarves.setWindowIntervals(windowSecondsForDesiredCalcOnTooManyStarves);
|
||||||
|
_starveHistoryWindowSeconds = windowSecondsForDesiredCalcOnTooManyStarves;
|
||||||
|
}
|
||||||
|
|
||||||
|
void InboundAudioStream::setWindowSecondsForDesiredReduction(int windowSecondsForDesiredReduction) {
|
||||||
|
_timeGapStatsForDesiredReduction.setWindowIntervals(windowSecondsForDesiredReduction);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
int InboundAudioStream::clampDesiredJitterBufferFramesValue(int desired) const {
|
int InboundAudioStream::clampDesiredJitterBufferFramesValue(int desired) const {
|
||||||
const int MIN_FRAMES_DESIRED = 0;
|
const int MIN_FRAMES_DESIRED = 0;
|
||||||
const int MAX_FRAMES_DESIRED = _ringBuffer.getFrameCapacity();
|
const int MAX_FRAMES_DESIRED = _ringBuffer.getFrameCapacity();
|
||||||
return glm::clamp(desired, MIN_FRAMES_DESIRED, MAX_FRAMES_DESIRED);
|
return glm::clamp(desired, MIN_FRAMES_DESIRED, MAX_FRAMES_DESIRED);
|
||||||
}
|
}
|
||||||
|
|
||||||
void InboundAudioStream::frameReceivedUpdateTimingStats() {
|
void InboundAudioStream::packetReceivedUpdateTimingStats() {
|
||||||
|
|
||||||
// update our timegap stats and desired jitter buffer frames if necessary
|
// update our timegap stats and desired jitter buffer frames if necessary
|
||||||
// discard the first few packets we receive since they usually have gaps that aren't represensative of normal jitter
|
// discard the first few packets we receive since they usually have gaps that aren't represensative of normal jitter
|
||||||
const int NUM_INITIAL_PACKETS_DISCARD = 3;
|
const int NUM_INITIAL_PACKETS_DISCARD = 3;
|
||||||
quint64 now = usecTimestampNow();
|
quint64 now = usecTimestampNow();
|
||||||
if (_incomingSequenceNumberStats.getReceived() > NUM_INITIAL_PACKETS_DISCARD) {
|
if (_incomingSequenceNumberStats.getReceived() > NUM_INITIAL_PACKETS_DISCARD) {
|
||||||
quint64 gap = now - _lastFrameReceivedTime;
|
quint64 gap = now - _lastPacketReceivedTime;
|
||||||
_interframeTimeGapStatsForStatsPacket.update(gap);
|
_timeGapStatsForStatsPacket.update(gap);
|
||||||
|
|
||||||
const float USECS_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * USECS_PER_SECOND / (float)SAMPLE_RATE;
|
// update all stats used for desired frames calculations under dynamic jitter buffer mode
|
||||||
|
_timeGapStatsForDesiredCalcOnTooManyStarves.update(gap);
|
||||||
|
_stdevStatsForDesiredCalcOnTooManyStarves.addValue(gap);
|
||||||
|
_timeGapStatsForDesiredReduction.update(gap);
|
||||||
|
|
||||||
// update stats for Freddy's method of jitter calc
|
if (_timeGapStatsForDesiredCalcOnTooManyStarves.getNewStatsAvailableFlag()) {
|
||||||
_interframeTimeGapStatsForJitterCalc.update(gap);
|
_calculatedJitterBufferFramesUsingMaxGap = ceilf((float)_timeGapStatsForDesiredCalcOnTooManyStarves.getWindowMax()
|
||||||
if (_interframeTimeGapStatsForJitterCalc.getNewStatsAvailableFlag()) {
|
/ (float)BUFFER_SEND_INTERVAL_USECS);
|
||||||
_calculatedJitterBufferFramesUsingMaxGap = ceilf((float)_interframeTimeGapStatsForJitterCalc.getWindowMax() / USECS_PER_FRAME);
|
_timeGapStatsForDesiredCalcOnTooManyStarves.clearNewStatsAvailableFlag();
|
||||||
_interframeTimeGapStatsForJitterCalc.clearNewStatsAvailableFlag();
|
|
||||||
|
|
||||||
if (_dynamicJitterBuffers && !_useStDevForJitterCalc) {
|
|
||||||
_desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(_calculatedJitterBufferFramesUsingMaxGap);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// update stats for Philip's method of jitter calc
|
|
||||||
_stdev.addValue(gap);
|
|
||||||
const int STANDARD_DEVIATION_SAMPLE_COUNT = 500;
|
const int STANDARD_DEVIATION_SAMPLE_COUNT = 500;
|
||||||
if (_stdev.getSamples() > STANDARD_DEVIATION_SAMPLE_COUNT) {
|
if (_stdevStatsForDesiredCalcOnTooManyStarves.getSamples() > STANDARD_DEVIATION_SAMPLE_COUNT) {
|
||||||
const float NUM_STANDARD_DEVIATIONS = 3.0f;
|
const float NUM_STANDARD_DEVIATIONS = 3.0f;
|
||||||
_calculatedJitterBufferFramesUsingStDev = (int)ceilf(NUM_STANDARD_DEVIATIONS * _stdev.getStDev() / USECS_PER_FRAME);
|
_calculatedJitterBufferFramesUsingStDev = ceilf(NUM_STANDARD_DEVIATIONS * _stdevStatsForDesiredCalcOnTooManyStarves.getStDev()
|
||||||
_stdev.reset();
|
/ (float)BUFFER_SEND_INTERVAL_USECS);
|
||||||
|
_stdevStatsForDesiredCalcOnTooManyStarves.reset();
|
||||||
|
}
|
||||||
|
|
||||||
if (_dynamicJitterBuffers && _useStDevForJitterCalc) {
|
if (_dynamicJitterBuffers) {
|
||||||
_desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(_calculatedJitterBufferFramesUsingStDev);
|
// if the max gap in window B (_timeGapStatsForDesiredReduction) corresponds to a smaller number of frames than _desiredJitterBufferFrames,
|
||||||
|
// then reduce _desiredJitterBufferFrames to that number of frames.
|
||||||
|
if (_timeGapStatsForDesiredReduction.getNewStatsAvailableFlag() && _timeGapStatsForDesiredReduction.isWindowFilled()) {
|
||||||
|
int calculatedJitterBufferFrames = ceilf((float)_timeGapStatsForDesiredReduction.getWindowMax() / (float)BUFFER_SEND_INTERVAL_USECS);
|
||||||
|
if (calculatedJitterBufferFrames < _desiredJitterBufferFrames) {
|
||||||
|
_desiredJitterBufferFrames = calculatedJitterBufferFrames;
|
||||||
|
}
|
||||||
|
_timeGapStatsForDesiredReduction.clearNewStatsAvailableFlag();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_lastFrameReceivedTime = now;
|
|
||||||
|
_lastPacketReceivedTime = now;
|
||||||
}
|
}
|
||||||
|
|
||||||
int InboundAudioStream::writeDroppableSilentSamples(int numSilentSamples) {
|
int InboundAudioStream::writeSamplesForDroppedPackets(int networkSamples) {
|
||||||
|
if (_repetitionWithFade) {
|
||||||
// calculate how many silent frames we should drop.
|
return writeLastFrameRepeatedWithFade(networkSamples);
|
||||||
int samplesPerFrame = _ringBuffer.getNumFrameSamples();
|
}
|
||||||
int desiredJitterBufferFramesPlusPadding = _desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING;
|
return writeDroppableSilentSamples(networkSamples);
|
||||||
int numSilentFramesToDrop = 0;
|
|
||||||
|
|
||||||
if (numSilentSamples >= samplesPerFrame && _currentJitterBufferFrames > desiredJitterBufferFramesPlusPadding) {
|
|
||||||
|
|
||||||
// our avg jitter buffer size exceeds its desired value, so ignore some silent
|
|
||||||
// frames to get that size as close to desired as possible
|
|
||||||
int numSilentFramesToDropDesired = _currentJitterBufferFrames - desiredJitterBufferFramesPlusPadding;
|
|
||||||
int numSilentFramesReceived = numSilentSamples / samplesPerFrame;
|
|
||||||
numSilentFramesToDrop = std::min(numSilentFramesToDropDesired, numSilentFramesReceived);
|
|
||||||
|
|
||||||
// dont reset _currentJitterBufferFrames here; we want to be able to drop further silent frames
|
|
||||||
// without waiting for _framesAvailableStat to fill up to 10s of samples.
|
|
||||||
_currentJitterBufferFrames -= numSilentFramesToDrop;
|
|
||||||
_silentFramesDropped += numSilentFramesToDrop;
|
|
||||||
|
|
||||||
_framesAvailableStat.reset();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return _ringBuffer.addSilentFrame(numSilentSamples - numSilentFramesToDrop * samplesPerFrame);
|
int InboundAudioStream::writeLastFrameRepeatedWithFade(int samples) {
|
||||||
|
AudioRingBuffer::ConstIterator frameToRepeat = _ringBuffer.lastFrameWritten();
|
||||||
|
int frameSize = _ringBuffer.getNumFrameSamples();
|
||||||
|
int samplesToWrite = samples;
|
||||||
|
int indexOfRepeat = 0;
|
||||||
|
do {
|
||||||
|
int samplesToWriteThisIteration = std::min(samplesToWrite, frameSize);
|
||||||
|
float fade = calculateRepeatedFrameFadeFactor(indexOfRepeat);
|
||||||
|
if (fade == 1.0f) {
|
||||||
|
samplesToWrite -= _ringBuffer.writeSamples(frameToRepeat, samplesToWriteThisIteration);
|
||||||
|
} else {
|
||||||
|
samplesToWrite -= _ringBuffer.writeSamplesWithFade(frameToRepeat, samplesToWriteThisIteration, fade);
|
||||||
}
|
}
|
||||||
|
indexOfRepeat++;
|
||||||
|
} while (samplesToWrite > 0);
|
||||||
|
|
||||||
int InboundAudioStream::writeSamplesForDroppedPackets(int numSamples) {
|
return samples;
|
||||||
return writeDroppableSilentSamples(numSamples);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioStreamStats InboundAudioStream::getAudioStreamStats() const {
|
AudioStreamStats InboundAudioStream::getAudioStreamStats() const {
|
||||||
AudioStreamStats streamStats;
|
AudioStreamStats streamStats;
|
||||||
|
|
||||||
streamStats._timeGapMin = _interframeTimeGapStatsForStatsPacket.getMin();
|
streamStats._timeGapMin = _timeGapStatsForStatsPacket.getMin();
|
||||||
streamStats._timeGapMax = _interframeTimeGapStatsForStatsPacket.getMax();
|
streamStats._timeGapMax = _timeGapStatsForStatsPacket.getMax();
|
||||||
streamStats._timeGapAverage = _interframeTimeGapStatsForStatsPacket.getAverage();
|
streamStats._timeGapAverage = _timeGapStatsForStatsPacket.getAverage();
|
||||||
streamStats._timeGapWindowMin = _interframeTimeGapStatsForStatsPacket.getWindowMin();
|
streamStats._timeGapWindowMin = _timeGapStatsForStatsPacket.getWindowMin();
|
||||||
streamStats._timeGapWindowMax = _interframeTimeGapStatsForStatsPacket.getWindowMax();
|
streamStats._timeGapWindowMax = _timeGapStatsForStatsPacket.getWindowMax();
|
||||||
streamStats._timeGapWindowAverage = _interframeTimeGapStatsForStatsPacket.getWindowAverage();
|
streamStats._timeGapWindowAverage = _timeGapStatsForStatsPacket.getWindowAverage();
|
||||||
|
|
||||||
streamStats._framesAvailable = _ringBuffer.framesAvailable();
|
streamStats._framesAvailable = _ringBuffer.framesAvailable();
|
||||||
streamStats._framesAvailableAverage = _framesAvailableStat.getAverage();
|
streamStats._framesAvailableAverage = _framesAvailableStat.getAverage();
|
||||||
|
@ -341,7 +468,24 @@ AudioStreamStats InboundAudioStream::getAudioStreamStats() const {
|
||||||
return streamStats;
|
return streamStats;
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioStreamStats InboundAudioStream::updateSeqHistoryAndGetAudioStreamStats() {
|
float calculateRepeatedFrameFadeFactor(int indexOfRepeat) {
|
||||||
_incomingSequenceNumberStats.pushStatsToHistory();
|
// fade factor scheme is from this paper:
|
||||||
return getAudioStreamStats();
|
// http://inst.eecs.berkeley.edu/~ee290t/sp04/lectures/packet_loss_recov_paper11.pdf
|
||||||
|
|
||||||
|
const float INITIAL_MSECS_NO_FADE = 20.0f;
|
||||||
|
const float MSECS_FADE_TO_ZERO = 320.0f;
|
||||||
|
|
||||||
|
const float INITIAL_FRAMES_NO_FADE = INITIAL_MSECS_NO_FADE * (float)USECS_PER_MSEC / (float)BUFFER_SEND_INTERVAL_USECS;
|
||||||
|
const float FRAMES_FADE_TO_ZERO = MSECS_FADE_TO_ZERO * (float)USECS_PER_MSEC / (float)BUFFER_SEND_INTERVAL_USECS;
|
||||||
|
|
||||||
|
const float SAMPLE_RANGE = std::numeric_limits<int16_t>::max();
|
||||||
|
|
||||||
|
if (indexOfRepeat <= INITIAL_FRAMES_NO_FADE) {
|
||||||
|
return 1.0f;
|
||||||
|
} else if (indexOfRepeat <= INITIAL_FRAMES_NO_FADE + FRAMES_FADE_TO_ZERO) {
|
||||||
|
return pow(SAMPLE_RANGE, -(indexOfRepeat - INITIAL_FRAMES_NO_FADE) / FRAMES_FADE_TO_ZERO);
|
||||||
|
|
||||||
|
//return 1.0f - ((indexOfRepeat - INITIAL_FRAMES_NO_FADE) / FRAMES_FADE_TO_ZERO);
|
||||||
|
}
|
||||||
|
return 0.0f;
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,43 +22,84 @@
|
||||||
#include "TimeWeightedAvg.h"
|
#include "TimeWeightedAvg.h"
|
||||||
|
|
||||||
// This adds some number of frames to the desired jitter buffer frames target we use when we're dropping frames.
|
// This adds some number of frames to the desired jitter buffer frames target we use when we're dropping frames.
|
||||||
// The larger this value is, the less aggressive we are about reducing the jitter buffer length.
|
// The larger this value is, the less frames we drop when attempting to reduce the jitter buffer length.
|
||||||
// Setting this to 0 will try to get the jitter buffer to be exactly _desiredJitterBufferFrames long when dropping frames,
|
// Setting this to 0 will try to get the jitter buffer to be exactly _desiredJitterBufferFrames when dropping frames,
|
||||||
// which could lead to a starve soon after.
|
// which could lead to a starve soon after.
|
||||||
const int DESIRED_JITTER_BUFFER_FRAMES_PADDING = 1;
|
const int DESIRED_JITTER_BUFFER_FRAMES_PADDING = 1;
|
||||||
|
|
||||||
// the time gaps stats for _desiredJitterBufferFrames calculation
|
// this controls the length of the window for stats used in the stats packet (not the stats used in
|
||||||
// will recalculate the max for the past 5000 samples every 500 samples
|
// _desiredJitterBufferFrames calculation)
|
||||||
const int TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES = 500;
|
const int STATS_FOR_STATS_PACKET_WINDOW_SECONDS = 30;
|
||||||
const int TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS = 10;
|
|
||||||
|
|
||||||
// the time gap stats for constructing AudioStreamStats will
|
|
||||||
// recalculate min/max/avg every ~1 second for the past ~30 seconds of time gap data
|
|
||||||
const int TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS;
|
|
||||||
const int TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS = 30;
|
|
||||||
|
|
||||||
// this controls the window size of the time-weighted avg of frames available. Every time the window fills up,
|
// this controls the window size of the time-weighted avg of frames available. Every time the window fills up,
|
||||||
// _currentJitterBufferFrames is updated with the time-weighted avg and the running time-weighted avg is reset.
|
// _currentJitterBufferFrames is updated with the time-weighted avg and the running time-weighted avg is reset.
|
||||||
const int FRAMES_AVAILABLE_STAT_WINDOW_USECS = 2 * USECS_PER_SECOND;
|
const int FRAMES_AVAILABLE_STAT_WINDOW_USECS = 10 * USECS_PER_SECOND;
|
||||||
|
|
||||||
// the internal history buffer of the incoming seq stats will cover 30s to calculate
|
|
||||||
// packet loss % over last 30s
|
|
||||||
const int INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS = 30;
|
|
||||||
|
|
||||||
const int INBOUND_RING_BUFFER_FRAME_CAPACITY = 100;
|
|
||||||
|
|
||||||
|
// default values for members of the Settings struct
|
||||||
const int DEFAULT_MAX_FRAMES_OVER_DESIRED = 10;
|
const int DEFAULT_MAX_FRAMES_OVER_DESIRED = 10;
|
||||||
const int DEFAULT_DESIRED_JITTER_BUFFER_FRAMES = 1;
|
const bool DEFAULT_DYNAMIC_JITTER_BUFFERS = true;
|
||||||
|
const int DEFAULT_STATIC_DESIRED_JITTER_BUFFER_FRAMES = 1;
|
||||||
|
const bool DEFAULT_USE_STDEV_FOR_JITTER_CALC = false;
|
||||||
|
const int DEFAULT_WINDOW_STARVE_THRESHOLD = 3;
|
||||||
|
const int DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES = 50;
|
||||||
|
const int DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION = 10;
|
||||||
|
const bool DEFAULT_REPETITION_WITH_FADE = true;
|
||||||
|
|
||||||
class InboundAudioStream : public NodeData {
|
class InboundAudioStream : public NodeData {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
InboundAudioStream(int numFrameSamples, int numFramesCapacity,
|
class Settings {
|
||||||
bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired,
|
public:
|
||||||
bool useStDevForJitterCalc = false);
|
Settings()
|
||||||
|
: _maxFramesOverDesired(DEFAULT_MAX_FRAMES_OVER_DESIRED),
|
||||||
|
_dynamicJitterBuffers(DEFAULT_DYNAMIC_JITTER_BUFFERS),
|
||||||
|
_staticDesiredJitterBufferFrames(DEFAULT_STATIC_DESIRED_JITTER_BUFFER_FRAMES),
|
||||||
|
_useStDevForJitterCalc(DEFAULT_USE_STDEV_FOR_JITTER_CALC),
|
||||||
|
_windowStarveThreshold(DEFAULT_WINDOW_STARVE_THRESHOLD),
|
||||||
|
_windowSecondsForDesiredCalcOnTooManyStarves(DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES),
|
||||||
|
_windowSecondsForDesiredReduction(DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION),
|
||||||
|
_repetitionWithFade(DEFAULT_REPETITION_WITH_FADE)
|
||||||
|
{}
|
||||||
|
|
||||||
|
Settings(int maxFramesOverDesired, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames,
|
||||||
|
bool useStDevForJitterCalc, int windowStarveThreshold, int windowSecondsForDesiredCalcOnTooManyStarves,
|
||||||
|
int _windowSecondsForDesiredReduction, bool repetitionWithFade)
|
||||||
|
: _maxFramesOverDesired(maxFramesOverDesired),
|
||||||
|
_dynamicJitterBuffers(dynamicJitterBuffers),
|
||||||
|
_staticDesiredJitterBufferFrames(staticDesiredJitterBufferFrames),
|
||||||
|
_useStDevForJitterCalc(useStDevForJitterCalc),
|
||||||
|
_windowStarveThreshold(windowStarveThreshold),
|
||||||
|
_windowSecondsForDesiredCalcOnTooManyStarves(windowSecondsForDesiredCalcOnTooManyStarves),
|
||||||
|
_windowSecondsForDesiredReduction(windowSecondsForDesiredCalcOnTooManyStarves),
|
||||||
|
_repetitionWithFade(repetitionWithFade)
|
||||||
|
{}
|
||||||
|
|
||||||
|
// max number of frames over desired in the ringbuffer.
|
||||||
|
int _maxFramesOverDesired;
|
||||||
|
|
||||||
|
// if false, _desiredJitterBufferFrames will always be _staticDesiredJitterBufferFrames. Otherwise,
|
||||||
|
// either fred or philip's method will be used to calculate _desiredJitterBufferFrames based on packet timegaps.
|
||||||
|
bool _dynamicJitterBuffers;
|
||||||
|
|
||||||
|
// settings for static jitter buffer mode
|
||||||
|
int _staticDesiredJitterBufferFrames;
|
||||||
|
|
||||||
|
// settings for dynamic jitter buffer mode
|
||||||
|
bool _useStDevForJitterCalc; // if true, philip's method is used. otherwise, fred's method is used.
|
||||||
|
int _windowStarveThreshold;
|
||||||
|
int _windowSecondsForDesiredCalcOnTooManyStarves;
|
||||||
|
int _windowSecondsForDesiredReduction;
|
||||||
|
|
||||||
|
// if true, the prev frame will be repeated (fading to silence) for dropped frames.
|
||||||
|
// otherwise, silence will be inserted.
|
||||||
|
bool _repetitionWithFade;
|
||||||
|
};
|
||||||
|
|
||||||
|
public:
|
||||||
|
InboundAudioStream(int numFrameSamples, int numFramesCapacity, const Settings& settings);
|
||||||
|
|
||||||
void reset();
|
void reset();
|
||||||
void resetStats();
|
virtual void resetStats();
|
||||||
void clearBuffer();
|
void clearBuffer();
|
||||||
|
|
||||||
virtual int parseData(const QByteArray& packet);
|
virtual int parseData(const QByteArray& packet);
|
||||||
|
@ -73,13 +114,17 @@ public:
|
||||||
void setToStarved();
|
void setToStarved();
|
||||||
|
|
||||||
|
|
||||||
void setDynamicJitterBuffers(bool dynamicJitterBuffers);
|
void setSettings(const Settings& settings);
|
||||||
void setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames);
|
|
||||||
|
|
||||||
/// this function should be called once per second to ensure the seq num stats history spans ~30 seconds
|
|
||||||
AudioStreamStats updateSeqHistoryAndGetAudioStreamStats();
|
|
||||||
|
|
||||||
void setMaxFramesOverDesired(int maxFramesOverDesired) { _maxFramesOverDesired = maxFramesOverDesired; }
|
void setMaxFramesOverDesired(int maxFramesOverDesired) { _maxFramesOverDesired = maxFramesOverDesired; }
|
||||||
|
void setDynamicJitterBuffers(bool setDynamicJitterBuffers);
|
||||||
|
void setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames);
|
||||||
|
void setUseStDevForJitterCalc(bool useStDevForJitterCalc) { _useStDevForJitterCalc = useStDevForJitterCalc; }
|
||||||
|
void setWindowStarveThreshold(int windowStarveThreshold) { _starveThreshold = windowStarveThreshold; }
|
||||||
|
void setWindowSecondsForDesiredCalcOnTooManyStarves(int windowSecondsForDesiredCalcOnTooManyStarves);
|
||||||
|
void setWindowSecondsForDesiredReduction(int windowSecondsForDesiredReduction);
|
||||||
|
void setRepetitionWithFade(bool repetitionWithFade) { _repetitionWithFade = repetitionWithFade; }
|
||||||
|
|
||||||
|
|
||||||
virtual AudioStreamStats getAudioStreamStats() const;
|
virtual AudioStreamStats getAudioStreamStats() const;
|
||||||
|
|
||||||
|
@ -110,11 +155,17 @@ public:
|
||||||
|
|
||||||
int getPacketsReceived() const { return _incomingSequenceNumberStats.getReceived(); }
|
int getPacketsReceived() const { return _incomingSequenceNumberStats.getReceived(); }
|
||||||
|
|
||||||
|
public slots:
|
||||||
|
/// This function should be called every second for all the stats to function properly. If dynamic jitter buffers
|
||||||
|
/// is enabled, those stats are used to calculate _desiredJitterBufferFrames.
|
||||||
|
/// If the stats are not used and dynamic jitter buffers is disabled, it's not necessary to call this function.
|
||||||
|
void perSecondCallbackForUpdatingStats();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void frameReceivedUpdateTimingStats();
|
void packetReceivedUpdateTimingStats();
|
||||||
int clampDesiredJitterBufferFramesValue(int desired) const;
|
int clampDesiredJitterBufferFramesValue(int desired) const;
|
||||||
|
|
||||||
int writeSamplesForDroppedPackets(int numSamples);
|
int writeSamplesForDroppedPackets(int networkSamples);
|
||||||
|
|
||||||
void popSamplesNoCheck(int samples);
|
void popSamplesNoCheck(int samples);
|
||||||
void framesAvailableChanged();
|
void framesAvailableChanged();
|
||||||
|
@ -126,13 +177,19 @@ protected:
|
||||||
|
|
||||||
/// parses the info between the seq num and the audio data in the network packet and calculates
|
/// parses the info between the seq num and the audio data in the network packet and calculates
|
||||||
/// how many audio samples this packet contains (used when filling in samples for dropped packets).
|
/// how many audio samples this packet contains (used when filling in samples for dropped packets).
|
||||||
virtual int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) = 0;
|
/// default implementation assumes no stream properties and raw audio samples after stream propertiess
|
||||||
|
virtual int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& networkSamples);
|
||||||
|
|
||||||
/// parses the audio data in the network packet.
|
/// parses the audio data in the network packet.
|
||||||
/// default implementation assumes packet contains raw audio samples after stream properties
|
/// default implementation assumes packet contains raw audio samples after stream properties
|
||||||
virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
|
virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int networkSamples);
|
||||||
|
|
||||||
int writeDroppableSilentSamples(int numSilentSamples);
|
/// writes silent samples to the buffer that may be dropped to reduce latency caused by the buffer
|
||||||
|
virtual int writeDroppableSilentSamples(int silentSamples);
|
||||||
|
|
||||||
|
/// writes the last written frame repeatedly, gradually fading to silence.
|
||||||
|
/// used for writing samples for dropped packets.
|
||||||
|
virtual int writeLastFrameRepeatedWithFade(int samples);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
|
@ -147,8 +204,6 @@ protected:
|
||||||
// if jitter buffer is dynamic, this determines what method of calculating _desiredJitterBufferFrames
|
// if jitter buffer is dynamic, this determines what method of calculating _desiredJitterBufferFrames
|
||||||
// if true, Philip's timegap std dev calculation is used. Otherwise, Freddy's max timegap calculation is used
|
// if true, Philip's timegap std dev calculation is used. Otherwise, Freddy's max timegap calculation is used
|
||||||
bool _useStDevForJitterCalc;
|
bool _useStDevForJitterCalc;
|
||||||
int _calculatedJitterBufferFramesUsingMaxGap;
|
|
||||||
int _calculatedJitterBufferFramesUsingStDev;
|
|
||||||
|
|
||||||
int _desiredJitterBufferFrames;
|
int _desiredJitterBufferFrames;
|
||||||
|
|
||||||
|
@ -168,16 +223,28 @@ protected:
|
||||||
|
|
||||||
SequenceNumberStats _incomingSequenceNumberStats;
|
SequenceNumberStats _incomingSequenceNumberStats;
|
||||||
|
|
||||||
quint64 _lastFrameReceivedTime;
|
quint64 _lastPacketReceivedTime;
|
||||||
MovingMinMaxAvg<quint64> _interframeTimeGapStatsForJitterCalc;
|
MovingMinMaxAvg<quint64> _timeGapStatsForDesiredCalcOnTooManyStarves; // for Freddy's method
|
||||||
StDev _stdev;
|
int _calculatedJitterBufferFramesUsingMaxGap;
|
||||||
MovingMinMaxAvg<quint64> _interframeTimeGapStatsForStatsPacket;
|
StDev _stdevStatsForDesiredCalcOnTooManyStarves; // for Philip's method
|
||||||
|
int _calculatedJitterBufferFramesUsingStDev; // the most recent desired frames calculated by Philip's method
|
||||||
|
MovingMinMaxAvg<quint64> _timeGapStatsForDesiredReduction;
|
||||||
|
|
||||||
|
int _starveHistoryWindowSeconds;
|
||||||
|
RingBufferHistory<quint64> _starveHistory;
|
||||||
|
int _starveThreshold;
|
||||||
|
|
||||||
TimeWeightedAvg<int> _framesAvailableStat;
|
TimeWeightedAvg<int> _framesAvailableStat;
|
||||||
|
|
||||||
// this value is based on the time-weighted avg from _framesAvailableStat. it is only used for
|
// this value is periodically updated with the time-weighted avg from _framesAvailableStat. it is only used for
|
||||||
// dropping silent frames right now.
|
// dropping silent frames right now.
|
||||||
int _currentJitterBufferFrames;
|
int _currentJitterBufferFrames;
|
||||||
|
|
||||||
|
MovingMinMaxAvg<quint64> _timeGapStatsForStatsPacket;
|
||||||
|
|
||||||
|
bool _repetitionWithFade;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
float calculateRepeatedFrameFadeFactor(int indexOfRepeat);
|
||||||
|
|
||||||
#endif // hifi_InboundAudioStream_h
|
#endif // hifi_InboundAudioStream_h
|
||||||
|
|
|
@ -19,8 +19,8 @@
|
||||||
|
|
||||||
#include "InjectedAudioStream.h"
|
#include "InjectedAudioStream.h"
|
||||||
|
|
||||||
InjectedAudioStream::InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired) :
|
InjectedAudioStream::InjectedAudioStream(const QUuid& streamIdentifier, const InboundAudioStream::Settings& settings) :
|
||||||
PositionalAudioStream(PositionalAudioStream::Injector, false, dynamicJitterBuffer, staticDesiredJitterBufferFrames, maxFramesOverDesired),
|
PositionalAudioStream(PositionalAudioStream::Injector, false, settings),
|
||||||
_streamIdentifier(streamIdentifier),
|
_streamIdentifier(streamIdentifier),
|
||||||
_radius(0.0f),
|
_radius(0.0f),
|
||||||
_attenuationRatio(0)
|
_attenuationRatio(0)
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
|
|
||||||
class InjectedAudioStream : public PositionalAudioStream {
|
class InjectedAudioStream : public PositionalAudioStream {
|
||||||
public:
|
public:
|
||||||
InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired);
|
InjectedAudioStream(const QUuid& streamIdentifier, const InboundAudioStream::Settings& settings);
|
||||||
|
|
||||||
float getRadius() const { return _radius; }
|
float getRadius() const { return _radius; }
|
||||||
float getAttenuationRatio() const { return _attenuationRatio; }
|
float getAttenuationRatio() const { return _attenuationRatio; }
|
||||||
|
|
|
@ -11,13 +11,7 @@
|
||||||
|
|
||||||
#include "MixedAudioStream.h"
|
#include "MixedAudioStream.h"
|
||||||
|
|
||||||
MixedAudioStream::MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc)
|
MixedAudioStream::MixedAudioStream(int numFrameSamples, int numFramesCapacity, const InboundAudioStream::Settings& settings)
|
||||||
: InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, staticDesiredJitterBufferFrames, maxFramesOverDesired, useStDevForJitterCalc)
|
: InboundAudioStream(numFrameSamples, numFramesCapacity, settings)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
int MixedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
|
|
||||||
// mixed audio packets do not have any info between the seq num and the audio data.
|
|
||||||
numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
|
@ -17,12 +17,9 @@
|
||||||
|
|
||||||
class MixedAudioStream : public InboundAudioStream {
|
class MixedAudioStream : public InboundAudioStream {
|
||||||
public:
|
public:
|
||||||
MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc);
|
MixedAudioStream(int numFrameSamples, int numFramesCapacity, const InboundAudioStream::Settings& settings);
|
||||||
|
|
||||||
float getNextOutputFrameLoudness() const { return _ringBuffer.getNextOutputFrameLoudness(); }
|
float getNextOutputFrameLoudness() const { return _ringBuffer.getNextOutputFrameLoudness(); }
|
||||||
|
|
||||||
protected:
|
|
||||||
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // hifi_MixedAudioStream_h
|
#endif // hifi_MixedAudioStream_h
|
||||||
|
|
|
@ -11,30 +11,40 @@
|
||||||
|
|
||||||
#include "MixedProcessedAudioStream.h"
|
#include "MixedProcessedAudioStream.h"
|
||||||
|
|
||||||
MixedProcessedAudioStream ::MixedProcessedAudioStream (int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc)
|
static const int STEREO_FACTOR = 2;
|
||||||
: InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, staticDesiredJitterBufferFrames, maxFramesOverDesired, useStDevForJitterCalc)
|
|
||||||
|
MixedProcessedAudioStream::MixedProcessedAudioStream(int numFrameSamples, int numFramesCapacity, const InboundAudioStream::Settings& settings)
|
||||||
|
: InboundAudioStream(numFrameSamples, numFramesCapacity, settings)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void MixedProcessedAudioStream::outputFormatChanged(int outputFormatChannelCountTimesSampleRate) {
|
void MixedProcessedAudioStream::outputFormatChanged(int outputFormatChannelCountTimesSampleRate) {
|
||||||
_outputFormatChannelsTimesSampleRate = outputFormatChannelCountTimesSampleRate;
|
_outputFormatChannelsTimesSampleRate = outputFormatChannelCountTimesSampleRate;
|
||||||
int deviceOutputFrameSize = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * _outputFormatChannelsTimesSampleRate / SAMPLE_RATE;
|
int deviceOutputFrameSize = networkToDeviceSamples(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO);
|
||||||
_ringBuffer.resizeForFrameSize(deviceOutputFrameSize);
|
_ringBuffer.resizeForFrameSize(deviceOutputFrameSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
int MixedProcessedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
|
int MixedProcessedAudioStream::writeDroppableSilentSamples(int silentSamples) {
|
||||||
// mixed audio packets do not have any info between the seq num and the audio data.
|
|
||||||
int numNetworkSamples = packetAfterSeqNum.size() / sizeof(int16_t);
|
|
||||||
|
|
||||||
// since numAudioSamples is used to know how many samples to add for each dropped packet before this one,
|
int deviceSilentSamplesWritten = InboundAudioStream::writeDroppableSilentSamples(networkToDeviceSamples(silentSamples));
|
||||||
// we want to set it to the number of device audio samples since this stream contains device audio samples, not network samples.
|
|
||||||
const int STEREO_DIVIDER = 2;
|
|
||||||
numAudioSamples = numNetworkSamples * _outputFormatChannelsTimesSampleRate / (STEREO_DIVIDER * SAMPLE_RATE);
|
|
||||||
|
|
||||||
return 0;
|
emit addedSilence(deviceToNetworkSamples(deviceSilentSamplesWritten) / STEREO_FACTOR);
|
||||||
|
|
||||||
|
return deviceSilentSamplesWritten;
|
||||||
}
|
}
|
||||||
|
|
||||||
int MixedProcessedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
int MixedProcessedAudioStream::writeLastFrameRepeatedWithFade(int samples) {
|
||||||
|
|
||||||
|
int deviceSamplesWritten = InboundAudioStream::writeLastFrameRepeatedWithFade(networkToDeviceSamples(samples));
|
||||||
|
|
||||||
|
emit addedLastFrameRepeatedWithFade(deviceToNetworkSamples(deviceSamplesWritten) / STEREO_FACTOR);
|
||||||
|
|
||||||
|
return deviceSamplesWritten;
|
||||||
|
}
|
||||||
|
|
||||||
|
int MixedProcessedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int networkSamples) {
|
||||||
|
|
||||||
|
emit addedStereoSamples(packetAfterStreamProperties);
|
||||||
|
|
||||||
QByteArray outputBuffer;
|
QByteArray outputBuffer;
|
||||||
emit processSamples(packetAfterStreamProperties, outputBuffer);
|
emit processSamples(packetAfterStreamProperties, outputBuffer);
|
||||||
|
@ -43,3 +53,11 @@ int MixedProcessedAudioStream::parseAudioData(PacketType type, const QByteArray&
|
||||||
|
|
||||||
return packetAfterStreamProperties.size();
|
return packetAfterStreamProperties.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int MixedProcessedAudioStream::networkToDeviceSamples(int networkSamples) {
|
||||||
|
return (quint64)networkSamples * (quint64)_outputFormatChannelsTimesSampleRate / (quint64)(STEREO_FACTOR * SAMPLE_RATE);
|
||||||
|
}
|
||||||
|
|
||||||
|
int MixedProcessedAudioStream::deviceToNetworkSamples(int deviceSamples) {
|
||||||
|
return (quint64)deviceSamples * (quint64)(STEREO_FACTOR * SAMPLE_RATE) / (quint64)_outputFormatChannelsTimesSampleRate;
|
||||||
|
}
|
||||||
|
|
|
@ -14,21 +14,32 @@
|
||||||
|
|
||||||
#include "InboundAudioStream.h"
|
#include "InboundAudioStream.h"
|
||||||
|
|
||||||
|
class Audio;
|
||||||
|
|
||||||
class MixedProcessedAudioStream : public InboundAudioStream {
|
class MixedProcessedAudioStream : public InboundAudioStream {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
MixedProcessedAudioStream (int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc);
|
MixedProcessedAudioStream(int numFrameSamples, int numFramesCapacity, const InboundAudioStream::Settings& settings);
|
||||||
|
|
||||||
signals:
|
signals:
|
||||||
|
|
||||||
|
void addedSilence(int silentSamplesPerChannel);
|
||||||
|
void addedLastFrameRepeatedWithFade(int samplesPerChannel);
|
||||||
|
void addedStereoSamples(const QByteArray& samples);
|
||||||
|
|
||||||
void processSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer);
|
void processSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
void outputFormatChanged(int outputFormatChannelCountTimesSampleRate);
|
void outputFormatChanged(int outputFormatChannelCountTimesSampleRate);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
|
int writeDroppableSilentSamples(int silentSamples);
|
||||||
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
|
int writeLastFrameRepeatedWithFade(int samples);
|
||||||
|
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int networkSamples);
|
||||||
|
|
||||||
|
private:
|
||||||
|
int networkToDeviceSamples(int networkSamples);
|
||||||
|
int deviceToNetworkSamples(int deviceSamples);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
int _outputFormatChannelsTimesSampleRate;
|
int _outputFormatChannelsTimesSampleRate;
|
||||||
|
|
|
@ -21,16 +21,16 @@
|
||||||
#include <PacketHeaders.h>
|
#include <PacketHeaders.h>
|
||||||
#include <UUID.h>
|
#include <UUID.h>
|
||||||
|
|
||||||
PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers,
|
PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, const InboundAudioStream::Settings& settings) :
|
||||||
int staticDesiredJitterBufferFrames, int maxFramesOverDesired) :
|
|
||||||
InboundAudioStream(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
|
InboundAudioStream(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
|
||||||
AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, dynamicJitterBuffers, staticDesiredJitterBufferFrames, maxFramesOverDesired),
|
AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, settings),
|
||||||
_type(type),
|
_type(type),
|
||||||
_position(0.0f, 0.0f, 0.0f),
|
_position(0.0f, 0.0f, 0.0f),
|
||||||
_orientation(0.0f, 0.0f, 0.0f, 0.0f),
|
_orientation(0.0f, 0.0f, 0.0f, 0.0f),
|
||||||
_shouldLoopbackForNode(false),
|
_shouldLoopbackForNode(false),
|
||||||
_isStereo(isStereo),
|
_isStereo(isStereo),
|
||||||
_lastPopOutputTrailingLoudness(0.0f),
|
_lastPopOutputTrailingLoudness(0.0f),
|
||||||
|
_lastPopOutputLoudness(0.0f),
|
||||||
_listenerUnattenuatedZone(NULL)
|
_listenerUnattenuatedZone(NULL)
|
||||||
{
|
{
|
||||||
// constant defined in AudioMixer.h. However, we don't want to include this here
|
// constant defined in AudioMixer.h. However, we don't want to include this here
|
||||||
|
@ -39,18 +39,23 @@ PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, b
|
||||||
_filter.initialize(SAMPLE_RATE, (NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (SAMPLE_PHASE_DELAY_AT_90 * 2)) / 2);
|
_filter.initialize(SAMPLE_RATE, (NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (SAMPLE_PHASE_DELAY_AT_90 * 2)) / 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
void PositionalAudioStream::updateLastPopOutputTrailingLoudness() {
|
void PositionalAudioStream::resetStats() {
|
||||||
float lastPopLoudness = _ringBuffer.getFrameLoudness(_lastPopOutput);
|
_lastPopOutputTrailingLoudness = 0.0f;
|
||||||
|
_lastPopOutputLoudness = 0.0f;
|
||||||
|
}
|
||||||
|
|
||||||
|
void PositionalAudioStream::updateLastPopOutputLoudnessAndTrailingLoudness() {
|
||||||
|
_lastPopOutputLoudness = _ringBuffer.getFrameLoudness(_lastPopOutput);
|
||||||
|
|
||||||
const int TRAILING_AVERAGE_FRAMES = 100;
|
const int TRAILING_AVERAGE_FRAMES = 100;
|
||||||
const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
|
const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
|
||||||
const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO;
|
const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO;
|
||||||
const float LOUDNESS_EPSILON = 0.000001f;
|
const float LOUDNESS_EPSILON = 0.000001f;
|
||||||
|
|
||||||
if (lastPopLoudness >= _lastPopOutputTrailingLoudness) {
|
if (_lastPopOutputLoudness >= _lastPopOutputTrailingLoudness) {
|
||||||
_lastPopOutputTrailingLoudness = lastPopLoudness;
|
_lastPopOutputTrailingLoudness = _lastPopOutputLoudness;
|
||||||
} else {
|
} else {
|
||||||
_lastPopOutputTrailingLoudness = (_lastPopOutputTrailingLoudness * PREVIOUS_FRAMES_RATIO) + (CURRENT_FRAME_RATIO * lastPopLoudness);
|
_lastPopOutputTrailingLoudness = (_lastPopOutputTrailingLoudness * PREVIOUS_FRAMES_RATIO) + (CURRENT_FRAME_RATIO * _lastPopOutputLoudness);
|
||||||
|
|
||||||
if (_lastPopOutputTrailingLoudness < LOUDNESS_EPSILON) {
|
if (_lastPopOutputTrailingLoudness < LOUDNESS_EPSILON) {
|
||||||
_lastPopOutputTrailingLoudness = 0;
|
_lastPopOutputTrailingLoudness = 0;
|
||||||
|
|
|
@ -29,13 +29,15 @@ public:
|
||||||
Injector
|
Injector
|
||||||
};
|
};
|
||||||
|
|
||||||
PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames,
|
PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, const InboundAudioStream::Settings& settings);
|
||||||
int maxFramesOverDesired);
|
|
||||||
|
virtual void resetStats();
|
||||||
|
|
||||||
virtual AudioStreamStats getAudioStreamStats() const;
|
virtual AudioStreamStats getAudioStreamStats() const;
|
||||||
|
|
||||||
void updateLastPopOutputTrailingLoudness();
|
void updateLastPopOutputLoudnessAndTrailingLoudness();
|
||||||
float getLastPopOutputTrailingLoudness() const { return _lastPopOutputTrailingLoudness; }
|
float getLastPopOutputTrailingLoudness() const { return _lastPopOutputTrailingLoudness; }
|
||||||
|
float getLastPopOutputLoudness() const { return _lastPopOutputLoudness; }
|
||||||
|
|
||||||
bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; }
|
bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; }
|
||||||
bool isStereo() const { return _isStereo; }
|
bool isStereo() const { return _isStereo; }
|
||||||
|
@ -64,6 +66,7 @@ protected:
|
||||||
bool _isStereo;
|
bool _isStereo;
|
||||||
|
|
||||||
float _lastPopOutputTrailingLoudness;
|
float _lastPopOutputTrailingLoudness;
|
||||||
|
float _lastPopOutputLoudness;
|
||||||
AABox* _listenerUnattenuatedZone;
|
AABox* _listenerUnattenuatedZone;
|
||||||
|
|
||||||
AudioFilterHSF1s _filter;
|
AudioFilterHSF1s _filter;
|
||||||
|
|
|
@ -49,8 +49,9 @@ PacketVersion versionForPacketType(PacketType type) {
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case PacketTypeMicrophoneAudioNoEcho:
|
case PacketTypeMicrophoneAudioNoEcho:
|
||||||
case PacketTypeMicrophoneAudioWithEcho:
|
case PacketTypeMicrophoneAudioWithEcho:
|
||||||
case PacketTypeSilentAudioFrame:
|
|
||||||
return 2;
|
return 2;
|
||||||
|
case PacketTypeSilentAudioFrame:
|
||||||
|
return 3;
|
||||||
case PacketTypeMixedAudio:
|
case PacketTypeMixedAudio:
|
||||||
return 1;
|
return 1;
|
||||||
case PacketTypeAvatarData:
|
case PacketTypeAvatarData:
|
||||||
|
|
|
@ -487,14 +487,6 @@ void ScriptEngine::run() {
|
||||||
int numPreSequenceNumberBytes = audioPacket.size();
|
int numPreSequenceNumberBytes = audioPacket.size();
|
||||||
packetStream << (quint16) 0;
|
packetStream << (quint16) 0;
|
||||||
|
|
||||||
// assume scripted avatar audio is mono and set channel flag to zero
|
|
||||||
packetStream << (quint8) 0;
|
|
||||||
|
|
||||||
// use the orientation and position of this avatar for the source of this audio
|
|
||||||
packetStream.writeRawData(reinterpret_cast<const char*>(&_avatarData->getPosition()), sizeof(glm::vec3));
|
|
||||||
glm::quat headOrientation = _avatarData->getHeadOrientation();
|
|
||||||
packetStream.writeRawData(reinterpret_cast<const char*>(&headOrientation), sizeof(glm::quat));
|
|
||||||
|
|
||||||
if (silentFrame) {
|
if (silentFrame) {
|
||||||
if (!_isListeningToAudioStream) {
|
if (!_isListeningToAudioStream) {
|
||||||
// if we have a silent frame and we're not listening then just send nothing and break out of here
|
// if we have a silent frame and we're not listening then just send nothing and break out of here
|
||||||
|
@ -503,10 +495,18 @@ void ScriptEngine::run() {
|
||||||
|
|
||||||
// write the number of silent samples so the audio-mixer can uphold timing
|
// write the number of silent samples so the audio-mixer can uphold timing
|
||||||
packetStream.writeRawData(reinterpret_cast<const char*>(&SCRIPT_AUDIO_BUFFER_SAMPLES), sizeof(int16_t));
|
packetStream.writeRawData(reinterpret_cast<const char*>(&SCRIPT_AUDIO_BUFFER_SAMPLES), sizeof(int16_t));
|
||||||
|
|
||||||
} else if (nextSoundOutput) {
|
} else if (nextSoundOutput) {
|
||||||
|
// assume scripted avatar audio is mono and set channel flag to zero
|
||||||
|
packetStream << (quint8)0;
|
||||||
|
|
||||||
|
// use the orientation and position of this avatar for the source of this audio
|
||||||
|
packetStream.writeRawData(reinterpret_cast<const char*>(&_avatarData->getPosition()), sizeof(glm::vec3));
|
||||||
|
glm::quat headOrientation = _avatarData->getHeadOrientation();
|
||||||
|
packetStream.writeRawData(reinterpret_cast<const char*>(&headOrientation), sizeof(glm::quat));
|
||||||
|
|
||||||
// write the raw audio data
|
// write the raw audio data
|
||||||
packetStream.writeRawData(reinterpret_cast<const char*>(nextSoundOutput),
|
packetStream.writeRawData(reinterpret_cast<const char*>(nextSoundOutput), numAvailableSamples * sizeof(int16_t));
|
||||||
numAvailableSamples * sizeof(int16_t));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// write audio packet to AudioMixer nodes
|
// write audio packet to AudioMixer nodes
|
||||||
|
|
|
@ -18,45 +18,63 @@
|
||||||
#include "RingBufferHistory.h"
|
#include "RingBufferHistory.h"
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
class MovingMinMaxAvg {
|
class MinMaxAvg {
|
||||||
|
|
||||||
private:
|
|
||||||
class Stats {
|
|
||||||
public:
|
public:
|
||||||
Stats()
|
MinMaxAvg()
|
||||||
: _min(std::numeric_limits<T>::max()),
|
: _min(std::numeric_limits<T>::max()),
|
||||||
_max(std::numeric_limits<T>::min()),
|
_max(std::numeric_limits<T>::min()),
|
||||||
_average(0.0) {}
|
_average(0.0),
|
||||||
|
_samples(0)
|
||||||
|
{}
|
||||||
|
|
||||||
void updateWithSample(T sample, int& numSamplesInAverage) {
|
void reset() {
|
||||||
|
_min = std::numeric_limits<T>::max();
|
||||||
|
_max = std::numeric_limits<T>::min();
|
||||||
|
_average = 0.0;
|
||||||
|
_samples = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void update(T sample) {
|
||||||
if (sample < _min) {
|
if (sample < _min) {
|
||||||
_min = sample;
|
_min = sample;
|
||||||
}
|
}
|
||||||
if (sample > _max) {
|
if (sample > _max) {
|
||||||
_max = sample;
|
_max = sample;
|
||||||
}
|
}
|
||||||
_average = _average * ((double)numSamplesInAverage / (numSamplesInAverage + 1))
|
double totalSamples = _samples + 1;
|
||||||
+ (double)sample / (numSamplesInAverage + 1);
|
_average = _average * ((double)_samples / totalSamples)
|
||||||
numSamplesInAverage++;
|
+ (double)sample / totalSamples;
|
||||||
|
_samples++;
|
||||||
}
|
}
|
||||||
|
|
||||||
void updateWithOtherStats(const Stats& other, int& numStatsInAverage) {
|
void update(const MinMaxAvg<T>& other) {
|
||||||
if (other._min < _min) {
|
if (other._min < _min) {
|
||||||
_min = other._min;
|
_min = other._min;
|
||||||
}
|
}
|
||||||
if (other._max > _max) {
|
if (other._max > _max) {
|
||||||
_max = other._max;
|
_max = other._max;
|
||||||
}
|
}
|
||||||
_average = _average * ((double)numStatsInAverage / (numStatsInAverage + 1))
|
double totalSamples = _samples + other._samples;
|
||||||
+ other._average / (numStatsInAverage + 1);
|
_average = _average * ((double)_samples / totalSamples)
|
||||||
numStatsInAverage++;
|
+ other._average * ((double)other._samples / totalSamples);
|
||||||
|
_samples += other._samples;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
T getMin() const { return _min; }
|
||||||
|
T getMax() const { return _max; }
|
||||||
|
double getAverage() const { return _average; }
|
||||||
|
int getSamples() const { return _samples; }
|
||||||
|
double getSum() const { return _samples * _average; }
|
||||||
|
|
||||||
|
private:
|
||||||
T _min;
|
T _min;
|
||||||
T _max;
|
T _max;
|
||||||
double _average;
|
double _average;
|
||||||
|
int _samples;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
class MovingMinMaxAvg {
|
||||||
public:
|
public:
|
||||||
// This class collects 3 stats (min, max, avg) over a moving window of samples.
|
// This class collects 3 stats (min, max, avg) over a moving window of samples.
|
||||||
// The moving window contains _windowIntervals * _intervalLength samples.
|
// The moving window contains _windowIntervals * _intervalLength samples.
|
||||||
|
@ -66,66 +84,98 @@ public:
|
||||||
// this class with MovingMinMaxAvg(100, 50). If you want a moving min of the past 100 samples updated on every
|
// this class with MovingMinMaxAvg(100, 50). If you want a moving min of the past 100 samples updated on every
|
||||||
// new sample, instantiate this class with MovingMinMaxAvg(1, 100).
|
// new sample, instantiate this class with MovingMinMaxAvg(1, 100).
|
||||||
|
|
||||||
|
|
||||||
|
/// use intervalLength = 0 to use in manual mode, where the currentIntervalComplete() function must
|
||||||
|
/// be called to complete an interval
|
||||||
MovingMinMaxAvg(int intervalLength, int windowIntervals)
|
MovingMinMaxAvg(int intervalLength, int windowIntervals)
|
||||||
: _intervalLength(intervalLength),
|
: _intervalLength(intervalLength),
|
||||||
_windowIntervals(windowIntervals),
|
_windowIntervals(windowIntervals),
|
||||||
_overallStats(),
|
_overallStats(),
|
||||||
_samplesCollected(0),
|
|
||||||
_windowStats(),
|
_windowStats(),
|
||||||
_existingSamplesInCurrentInterval(0),
|
|
||||||
_currentIntervalStats(),
|
_currentIntervalStats(),
|
||||||
_intervalStats(windowIntervals),
|
_intervalStats(windowIntervals),
|
||||||
_newStatsAvailable(false)
|
_newStatsAvailable(false)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
void reset() {
|
void reset() {
|
||||||
_overallStats = Stats();
|
_overallStats.reset();
|
||||||
_samplesCollected = 0;
|
_windowStats.reset();
|
||||||
_windowStats = Stats();
|
_currentIntervalStats.reset();
|
||||||
_existingSamplesInCurrentInterval = 0;
|
|
||||||
_currentIntervalStats = Stats();
|
|
||||||
_intervalStats.clear();
|
_intervalStats.clear();
|
||||||
_newStatsAvailable = false;
|
_newStatsAvailable = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void setWindowIntervals(int windowIntervals) {
|
||||||
|
_windowIntervals = windowIntervals;
|
||||||
|
_overallStats.reset();
|
||||||
|
_windowStats.reset();
|
||||||
|
_currentIntervalStats.reset();
|
||||||
|
_intervalStats.setCapacity(_windowIntervals);
|
||||||
|
_newStatsAvailable = false;
|
||||||
|
}
|
||||||
|
|
||||||
void update(T newSample) {
|
void update(T newSample) {
|
||||||
// update overall stats
|
// update overall stats
|
||||||
_overallStats.updateWithSample(newSample, _samplesCollected);
|
_overallStats.update(newSample);
|
||||||
|
|
||||||
// update the current interval stats
|
// update the current interval stats
|
||||||
_currentIntervalStats.updateWithSample(newSample, _existingSamplesInCurrentInterval);
|
_currentIntervalStats.update(newSample);
|
||||||
|
|
||||||
// if the current interval of samples is now full, record its stats into our past intervals' stats
|
// if the current interval of samples is now full, record its stats into our past intervals' stats
|
||||||
if (_existingSamplesInCurrentInterval == _intervalLength) {
|
// NOTE: if _intervalLength is 0 (manual mode), currentIntervalComplete() will not be called here.
|
||||||
|
if (_currentIntervalStats.getSamples() == _intervalLength) {
|
||||||
|
currentIntervalComplete();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This function can be called to manually control when each interval ends. For example, if each interval
|
||||||
|
/// needs to last T seconds as opposed to N samples, this function should be called every T seconds.
|
||||||
|
void currentIntervalComplete() {
|
||||||
// record current interval's stats, then reset them
|
// record current interval's stats, then reset them
|
||||||
_intervalStats.insert(_currentIntervalStats);
|
_intervalStats.insert(_currentIntervalStats);
|
||||||
_currentIntervalStats = Stats();
|
_currentIntervalStats.reset();
|
||||||
_existingSamplesInCurrentInterval = 0;
|
|
||||||
|
|
||||||
// update the window's stats by combining the intervals' stats
|
// update the window's stats by combining the intervals' stats
|
||||||
typename RingBufferHistory<Stats>::Iterator i = _intervalStats.begin();
|
typename RingBufferHistory< MinMaxAvg<T> >::Iterator i = _intervalStats.begin();
|
||||||
typename RingBufferHistory<Stats>::Iterator end = _intervalStats.end();
|
typename RingBufferHistory< MinMaxAvg<T> >::Iterator end = _intervalStats.end();
|
||||||
_windowStats = Stats();
|
_windowStats.reset();
|
||||||
int intervalsIncludedInWindowStats = 0;
|
|
||||||
while (i != end) {
|
while (i != end) {
|
||||||
_windowStats.updateWithOtherStats(*i, intervalsIncludedInWindowStats);
|
_windowStats.update(*i);
|
||||||
i++;
|
++i;
|
||||||
}
|
}
|
||||||
|
|
||||||
_newStatsAvailable = true;
|
_newStatsAvailable = true;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
bool getNewStatsAvailableFlag() const { return _newStatsAvailable; }
|
bool getNewStatsAvailableFlag() const { return _newStatsAvailable; }
|
||||||
void clearNewStatsAvailableFlag() { _newStatsAvailable = false; }
|
void clearNewStatsAvailableFlag() { _newStatsAvailable = false; }
|
||||||
|
|
||||||
T getMin() const { return _overallStats._min; }
|
T getMin() const { return _overallStats.getMin(); }
|
||||||
T getMax() const { return _overallStats._max; }
|
T getMax() const { return _overallStats.getMax(); }
|
||||||
double getAverage() const { return _overallStats._average; }
|
double getAverage() const { return _overallStats.getAverage(); }
|
||||||
T getWindowMin() const { return _windowStats._min; }
|
int getSamples() const { return _overallStats.getSamples(); }
|
||||||
T getWindowMax() const { return _windowStats._max; }
|
double getSum() const { return _overallStats.getSum(); }
|
||||||
double getWindowAverage() const { return _windowStats._average; }
|
|
||||||
|
T getWindowMin() const { return _windowStats.getMin(); }
|
||||||
|
T getWindowMax() const { return _windowStats.getMax(); }
|
||||||
|
double getWindowAverage() const { return _windowStats.getAverage(); }
|
||||||
|
int getWindowSamples() const { return _windowStats.getSamples(); }
|
||||||
|
double getWindowSum() const { return _windowStats.getSum(); }
|
||||||
|
|
||||||
|
T getCurrentIntervalMin() const { return _currentIntervalStats.getMin(); }
|
||||||
|
T getCurrentIntervalMax() const { return _currentIntervalStats.getMax(); }
|
||||||
|
double getCurrentIntervalAverage() const { return _currentIntervalStats.getAverage(); }
|
||||||
|
int getCurrentIntervalSamples() const { return _currentIntervalStats.getSamples(); }
|
||||||
|
double getCurrentIntervalSum() const { return _currentIntervalStats.getSum(); }
|
||||||
|
|
||||||
|
const MinMaxAvg<T>& getOverallStats() const{ return _overallStats; }
|
||||||
|
const MinMaxAvg<T>& getWindowStats() const{ return _windowStats; }
|
||||||
|
const MinMaxAvg<T>& getCurrentIntervalStats() const { return _currentIntervalStats; }
|
||||||
|
|
||||||
|
MinMaxAvg<T> getLastCompleteIntervalStats() const {
|
||||||
|
const MinMaxAvg<T>* stats = _intervalStats.getNewestEntry();
|
||||||
|
return stats == NULL ? MinMaxAvg<T>() : *stats;
|
||||||
|
}
|
||||||
|
|
||||||
bool isWindowFilled() const { return _intervalStats.isFilled(); }
|
bool isWindowFilled() const { return _intervalStats.isFilled(); }
|
||||||
|
|
||||||
|
@ -134,18 +184,16 @@ private:
|
||||||
int _windowIntervals;
|
int _windowIntervals;
|
||||||
|
|
||||||
// these are min/max/avg stats for all samples collected.
|
// these are min/max/avg stats for all samples collected.
|
||||||
Stats _overallStats;
|
MinMaxAvg<T> _overallStats;
|
||||||
int _samplesCollected;
|
|
||||||
|
|
||||||
// these are the min/max/avg stats for the samples in the moving window
|
// these are the min/max/avg stats for the samples in the moving window
|
||||||
Stats _windowStats;
|
MinMaxAvg<T> _windowStats;
|
||||||
int _existingSamplesInCurrentInterval;
|
|
||||||
|
|
||||||
// these are the min/max/avg stats for the current interval
|
// these are the min/max/avg stats for the samples in the current interval
|
||||||
Stats _currentIntervalStats;
|
MinMaxAvg<T> _currentIntervalStats;
|
||||||
|
|
||||||
// these are stored stats for the past intervals in the window
|
// these are stored stats for the past intervals in the window
|
||||||
RingBufferHistory<Stats> _intervalStats;
|
RingBufferHistory< MinMaxAvg<T> > _intervalStats;
|
||||||
|
|
||||||
bool _newStatsAvailable;
|
bool _newStatsAvailable;
|
||||||
};
|
};
|
||||||
|
|
|
@ -35,6 +35,14 @@ public:
|
||||||
_numEntries = 0;
|
_numEntries = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void setCapacity(int capacity) {
|
||||||
|
_size = capacity + 1;
|
||||||
|
_capacity = capacity;
|
||||||
|
_newestEntryAtIndex = 0;
|
||||||
|
_numEntries = 0;
|
||||||
|
_buffer.resize(_size);
|
||||||
|
}
|
||||||
|
|
||||||
void insert(const T& entry) {
|
void insert(const T& entry) {
|
||||||
// increment newest entry index cyclically
|
// increment newest entry index cyclically
|
||||||
_newestEntryAtIndex = (_newestEntryAtIndex == _size - 1) ? 0 : _newestEntryAtIndex + 1;
|
_newestEntryAtIndex = (_newestEntryAtIndex == _size - 1) ? 0 : _newestEntryAtIndex + 1;
|
||||||
|
@ -83,9 +91,14 @@ private:
|
||||||
QVector<T> _buffer;
|
QVector<T> _buffer;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
class Iterator : public std::iterator < std::forward_iterator_tag, T > {
|
class Iterator : public std::iterator < std::random_access_iterator_tag, T > {
|
||||||
public:
|
public:
|
||||||
Iterator(T* bufferFirst, T* bufferLast, T* at) : _bufferFirst(bufferFirst), _bufferLast(bufferLast), _at(at) {}
|
Iterator(T* bufferFirst, T* bufferLast, T* newestAt, T* at)
|
||||||
|
: _bufferFirst(bufferFirst),
|
||||||
|
_bufferLast(bufferLast),
|
||||||
|
_bufferLength(bufferLast - bufferFirst + 1),
|
||||||
|
_newestAt(newestAt),
|
||||||
|
_at(at) {}
|
||||||
|
|
||||||
bool operator==(const Iterator& rhs) { return _at == rhs._at; }
|
bool operator==(const Iterator& rhs) { return _at == rhs._at; }
|
||||||
bool operator!=(const Iterator& rhs) { return _at != rhs._at; }
|
bool operator!=(const Iterator& rhs) { return _at != rhs._at; }
|
||||||
|
@ -103,20 +116,95 @@ public:
|
||||||
return tmp;
|
return tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Iterator& operator--() {
|
||||||
|
_at = (_at == _bufferLast) ? _bufferFirst : _at + 1;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
Iterator operator--(int) {
|
||||||
|
Iterator tmp(*this);
|
||||||
|
--(*this);
|
||||||
|
return tmp;
|
||||||
|
}
|
||||||
|
|
||||||
|
Iterator operator+(int add) {
|
||||||
|
Iterator sum(*this);
|
||||||
|
sum._at = atShiftedBy(add);
|
||||||
|
return sum;
|
||||||
|
}
|
||||||
|
|
||||||
|
Iterator operator-(int sub) {
|
||||||
|
Iterator sum(*this);
|
||||||
|
sum._at = atShiftedBy(-sub);
|
||||||
|
return sum;
|
||||||
|
}
|
||||||
|
|
||||||
|
Iterator& operator+=(int add) {
|
||||||
|
_at = atShiftedBy(add);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
Iterator& operator-=(int sub) {
|
||||||
|
_at = atShiftedBy(-sub);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
T& operator[](int i) {
|
||||||
|
return *(atShiftedBy(i));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator<(const Iterator& rhs) {
|
||||||
|
return age() < rhs.age();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator>(const Iterator& rhs) {
|
||||||
|
return age() > rhs.age();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator<=(const Iterator& rhs) {
|
||||||
|
return age() < rhs.age();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator>=(const Iterator& rhs) {
|
||||||
|
return age() >= rhs.age();
|
||||||
|
}
|
||||||
|
|
||||||
|
int operator-(const Iterator& rhs) {
|
||||||
|
return age() - rhs.age();
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
T* const _bufferFirst;
|
T* atShiftedBy(int i) { // shifts i places towards _bufferFirst (towards older entries)
|
||||||
T* const _bufferLast;
|
i = (_at - _bufferFirst - i) % _bufferLength;
|
||||||
|
if (i < 0) {
|
||||||
|
i += _bufferLength;
|
||||||
|
}
|
||||||
|
return _bufferFirst + i;
|
||||||
|
}
|
||||||
|
|
||||||
|
int age() {
|
||||||
|
int age = _newestAt - _at;
|
||||||
|
if (age < 0) {
|
||||||
|
age += _bufferLength;
|
||||||
|
}
|
||||||
|
return age;
|
||||||
|
}
|
||||||
|
|
||||||
|
T* _bufferFirst;
|
||||||
|
T* _bufferLast;
|
||||||
|
int _bufferLength;
|
||||||
|
T* _newestAt;
|
||||||
T* _at;
|
T* _at;
|
||||||
};
|
};
|
||||||
|
|
||||||
Iterator begin() { return Iterator(&_buffer.first(), &_buffer.last(), &_buffer[_newestEntryAtIndex]); }
|
Iterator begin() { return Iterator(&_buffer.first(), &_buffer.last(), &_buffer[_newestEntryAtIndex], &_buffer[_newestEntryAtIndex]); }
|
||||||
|
|
||||||
Iterator end() {
|
Iterator end() {
|
||||||
int endAtIndex = _newestEntryAtIndex - _numEntries;
|
int endAtIndex = _newestEntryAtIndex - _numEntries;
|
||||||
if (endAtIndex < 0) {
|
if (endAtIndex < 0) {
|
||||||
endAtIndex += _size;
|
endAtIndex += _size;
|
||||||
}
|
}
|
||||||
return Iterator(&_buffer.first(), &_buffer.last(), &_buffer[endAtIndex]);
|
return Iterator(&_buffer.first(), &_buffer.last(), &_buffer[_newestEntryAtIndex], &_buffer[endAtIndex]);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -36,19 +36,19 @@ void AudioRingBufferTests::runAllTests() {
|
||||||
readIndexAt = 0;
|
readIndexAt = 0;
|
||||||
|
|
||||||
// write 73 samples, 73 samples in buffer
|
// write 73 samples, 73 samples in buffer
|
||||||
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 73) / sizeof(int16_t);
|
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 73);
|
||||||
assertBufferSize(ringBuffer, 73);
|
assertBufferSize(ringBuffer, 73);
|
||||||
|
|
||||||
// read 43 samples, 30 samples in buffer
|
// read 43 samples, 30 samples in buffer
|
||||||
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 43) / sizeof(int16_t);
|
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 43);
|
||||||
assertBufferSize(ringBuffer, 30);
|
assertBufferSize(ringBuffer, 30);
|
||||||
|
|
||||||
// write 70 samples, 100 samples in buffer (full)
|
// write 70 samples, 100 samples in buffer (full)
|
||||||
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 70) / sizeof(int16_t);
|
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 70);
|
||||||
assertBufferSize(ringBuffer, 100);
|
assertBufferSize(ringBuffer, 100);
|
||||||
|
|
||||||
// read 100 samples, 0 samples in buffer (empty)
|
// read 100 samples, 0 samples in buffer (empty)
|
||||||
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 100) / sizeof(int16_t);
|
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 100);
|
||||||
assertBufferSize(ringBuffer, 0);
|
assertBufferSize(ringBuffer, 0);
|
||||||
|
|
||||||
|
|
||||||
|
@ -65,15 +65,15 @@ void AudioRingBufferTests::runAllTests() {
|
||||||
readIndexAt = 0;
|
readIndexAt = 0;
|
||||||
|
|
||||||
// write 59 samples, 59 samples in buffer
|
// write 59 samples, 59 samples in buffer
|
||||||
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 59) / sizeof(int16_t);
|
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 59);
|
||||||
assertBufferSize(ringBuffer, 59);
|
assertBufferSize(ringBuffer, 59);
|
||||||
|
|
||||||
// write 99 samples, 100 samples in buffer
|
// write 99 samples, 100 samples in buffer
|
||||||
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 99) / sizeof(int16_t);
|
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 99);
|
||||||
assertBufferSize(ringBuffer, 100);
|
assertBufferSize(ringBuffer, 100);
|
||||||
|
|
||||||
// read 100 samples, 0 samples in buffer
|
// read 100 samples, 0 samples in buffer
|
||||||
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 100) / sizeof(int16_t);
|
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 100);
|
||||||
assertBufferSize(ringBuffer, 0);
|
assertBufferSize(ringBuffer, 0);
|
||||||
|
|
||||||
// verify 100 samples of read data
|
// verify 100 samples of read data
|
||||||
|
@ -88,23 +88,23 @@ void AudioRingBufferTests::runAllTests() {
|
||||||
readIndexAt = 0;
|
readIndexAt = 0;
|
||||||
|
|
||||||
// write 77 samples, 77 samples in buffer
|
// write 77 samples, 77 samples in buffer
|
||||||
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 77) / sizeof(int16_t);
|
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 77);
|
||||||
assertBufferSize(ringBuffer, 77);
|
assertBufferSize(ringBuffer, 77);
|
||||||
|
|
||||||
// write 24 samples, 100 samples in buffer (overwrote one sample: "0")
|
// write 24 samples, 100 samples in buffer (overwrote one sample: "0")
|
||||||
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 24) / sizeof(int16_t);
|
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 24);
|
||||||
assertBufferSize(ringBuffer, 100);
|
assertBufferSize(ringBuffer, 100);
|
||||||
|
|
||||||
// write 29 silent samples, 100 samples in buffer, make sure non were added
|
// write 29 silent samples, 100 samples in buffer, make sure non were added
|
||||||
int samplesWritten;
|
int samplesWritten;
|
||||||
if ((samplesWritten = ringBuffer.addSilentFrame(29)) != 0) {
|
if ((samplesWritten = ringBuffer.addSilentSamples(29)) != 0) {
|
||||||
qDebug("addSilentFrame(29) incorrect! Expected: 0 Actual: %d", samplesWritten);
|
qDebug("addSilentSamples(29) incorrect! Expected: 0 Actual: %d", samplesWritten);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
assertBufferSize(ringBuffer, 100);
|
assertBufferSize(ringBuffer, 100);
|
||||||
|
|
||||||
// read 3 samples, 97 samples in buffer (expect to read "1", "2", "3")
|
// read 3 samples, 97 samples in buffer (expect to read "1", "2", "3")
|
||||||
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 3) / sizeof(int16_t);
|
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 3);
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
if (readData[i] != i + 1) {
|
if (readData[i] != i + 1) {
|
||||||
qDebug("Second readData[%d] incorrect! Expcted: %d Actual: %d", i, i + 1, readData[i]);
|
qDebug("Second readData[%d] incorrect! Expcted: %d Actual: %d", i, i + 1, readData[i]);
|
||||||
|
@ -114,14 +114,14 @@ void AudioRingBufferTests::runAllTests() {
|
||||||
assertBufferSize(ringBuffer, 97);
|
assertBufferSize(ringBuffer, 97);
|
||||||
|
|
||||||
// write 4 silent samples, 100 samples in buffer
|
// write 4 silent samples, 100 samples in buffer
|
||||||
if ((samplesWritten = ringBuffer.addSilentFrame(4) / sizeof(int16_t)) != 3) {
|
if ((samplesWritten = ringBuffer.addSilentSamples(4)) != 3) {
|
||||||
qDebug("addSilentFrame(4) incorrect! Exptected: 3 Actual: %d", samplesWritten);
|
qDebug("addSilentSamples(4) incorrect! Exptected: 3 Actual: %d", samplesWritten);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
assertBufferSize(ringBuffer, 100);
|
assertBufferSize(ringBuffer, 100);
|
||||||
|
|
||||||
// read back 97 samples (the non-silent samples), 3 samples in buffer (expect to read "4" thru "100")
|
// read back 97 samples (the non-silent samples), 3 samples in buffer (expect to read "4" thru "100")
|
||||||
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 97) / sizeof(int16_t);
|
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 97);
|
||||||
for (int i = 3; i < 100; i++) {
|
for (int i = 3; i < 100; i++) {
|
||||||
if (readData[i] != i + 1) {
|
if (readData[i] != i + 1) {
|
||||||
qDebug("third readData[%d] incorrect! Expcted: %d Actual: %d", i, i + 1, readData[i]);
|
qDebug("third readData[%d] incorrect! Expcted: %d Actual: %d", i, i + 1, readData[i]);
|
||||||
|
@ -131,7 +131,7 @@ void AudioRingBufferTests::runAllTests() {
|
||||||
assertBufferSize(ringBuffer, 3);
|
assertBufferSize(ringBuffer, 3);
|
||||||
|
|
||||||
// read back 3 silent samples, 0 samples in buffer
|
// read back 3 silent samples, 0 samples in buffer
|
||||||
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 3) / sizeof(int16_t);
|
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 3);
|
||||||
for (int i = 100; i < 103; i++) {
|
for (int i = 100; i < 103; i++) {
|
||||||
if (readData[i] != 0) {
|
if (readData[i] != 0) {
|
||||||
qDebug("Fourth readData[%d] incorrect! Expcted: %d Actual: %d", i, 0, readData[i]);
|
qDebug("Fourth readData[%d] incorrect! Expcted: %d Actual: %d", i, 0, readData[i]);
|
||||||
|
@ -143,4 +143,3 @@ void AudioRingBufferTests::runAllTests() {
|
||||||
|
|
||||||
qDebug() << "PASSED";
|
qDebug() << "PASSED";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -271,6 +271,7 @@ void runReceive(const char* addressOption, int port, int gap, int size, int repo
|
||||||
|
|
||||||
quint64 networkStart = usecTimestampNow();
|
quint64 networkStart = usecTimestampNow();
|
||||||
n = recvfrom(sockfd, inputBuffer, size, 0, NULL, NULL); // we don't care about where it came from
|
n = recvfrom(sockfd, inputBuffer, size, 0, NULL, NULL); // we don't care about where it came from
|
||||||
|
|
||||||
quint64 networkEnd = usecTimestampNow();
|
quint64 networkEnd = usecTimestampNow();
|
||||||
float networkElapsed = (float)(networkEnd - networkStart);
|
float networkElapsed = (float)(networkEnd - networkStart);
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,7 @@ int main(int argc, char** argv) {
|
||||||
MovingMinMaxAvgTests::runAllTests();
|
MovingMinMaxAvgTests::runAllTests();
|
||||||
MovingPercentileTests::runAllTests();
|
MovingPercentileTests::runAllTests();
|
||||||
AngularConstraintTests::runAllTests();
|
AngularConstraintTests::runAllTests();
|
||||||
|
printf("tests complete, press enter to exit\n");
|
||||||
getchar();
|
getchar();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue