merge HEAD with PR-3281

This commit is contained in:
Craig Hansen-Sturm 2014-08-26 18:25:40 -07:00
commit e941e58626
34 changed files with 1894 additions and 637 deletions

View file

@ -33,12 +33,17 @@
#include "Agent.h"
static const int RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES = 10;
Agent::Agent(const QByteArray& packet) :
ThreadedAssignment(packet),
_voxelEditSender(),
_particleEditSender(),
_modelEditSender(),
_receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 1, false, 1, 0, false),
_receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES,
InboundAudioStream::Settings(0, false, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES, false,
DEFAULT_WINDOW_STARVE_THRESHOLD, DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES,
DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION, false)),
_avatarHashMap()
{
// be the parent of the script engine so it gets moved when we do
@ -148,7 +153,7 @@ void Agent::readPendingDatagrams() {
_voxelViewer.processDatagram(mutablePacket, sourceNode);
}
} else if (datagramPacketType == PacketTypeMixedAudio) {
} else if (datagramPacketType == PacketTypeMixedAudio || datagramPacketType == PacketTypeSilentAudioFrame) {
_receivedAudioStream.parseData(receivedPacket);

View file

@ -69,9 +69,7 @@ void attachNewNodeDataToNode(Node *newNode) {
}
}
bool AudioMixer::_useDynamicJitterBuffers = false;
int AudioMixer::_staticDesiredJitterBufferFrames = 0;
int AudioMixer::_maxFramesOverDesired = 0;
InboundAudioStream::Settings AudioMixer::_streamSettings;
bool AudioMixer::_printStreamStats = false;
@ -87,7 +85,12 @@ AudioMixer::AudioMixer(const QByteArray& packet) :
_sumMixes(0),
_sourceUnattenuatedZone(NULL),
_listenerUnattenuatedZone(NULL),
_lastSendAudioStreamStatsTime(usecTimestampNow())
_lastPerSecondCallbackTime(usecTimestampNow()),
_sendAudioStreamStats(false),
_datagramsReadPerCallStats(0, READ_DATAGRAMS_STATS_WINDOW_SECONDS),
_timeSpentPerCallStats(0, READ_DATAGRAMS_STATS_WINDOW_SECONDS),
_timeSpentPerHashMatchCallStats(0, READ_DATAGRAMS_STATS_WINDOW_SECONDS),
_readPendingCallsPerSecondStats(1, READ_DATAGRAMS_STATS_WINDOW_SECONDS)
{
}
@ -101,8 +104,37 @@ const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f;
const float ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE = 0.18f;
const float ATTENUATION_EPSILON_DISTANCE = 0.1f;
void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
int AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
AvatarAudioStream* listeningNodeStream) {
// If repetition with fade is enabled:
// If streamToAdd could not provide a frame (it was starved), then we'll mix its previously-mixed frame
// This is preferable to not mixing it at all since that's equivalent to inserting silence.
// Basically, we'll repeat that last frame until it has a frame to mix. Depending on how many times
// we've repeated that frame in a row, we'll gradually fade that repeated frame into silence.
// This improves the perceived quality of the audio slightly.
float repeatedFrameFadeFactor = 1.0f;
if (!streamToAdd->lastPopSucceeded()) {
if (_streamSettings._repetitionWithFade && !streamToAdd->getLastPopOutput().isNull()) {
// reptition with fade is enabled, and we do have a valid previous frame to repeat.
// calculate its fade factor, which depends on how many times it's already been repeated.
repeatedFrameFadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd->getConsecutiveNotMixedCount() - 1);
if (repeatedFrameFadeFactor == 0.0f) {
return 0;
}
} else {
return 0;
}
}
// at this point, we know streamToAdd's last pop output is valid
// if the frame we're about to mix is silent, bail
if (streamToAdd->getLastPopOutputLoudness() == 0.0f) {
return 0;
}
float bearingRelativeAngleToSource = 0.0f;
float attenuationCoefficient = 1.0f;
int numSamplesDelay = 0;
@ -124,7 +156,7 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
if (streamToAdd->getLastPopOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) {
// according to mixer performance we have decided this does not get to be mixed in
// bail out
return;
return 0;
}
++_sumMixes;
@ -224,12 +256,13 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
int delayedChannelIndex = 0;
const int SINGLE_STEREO_OFFSET = 2;
float attenuationAndFade = attenuationCoefficient * repeatedFrameFadeFactor;
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
// setup the int16_t variables for the two sample sets
correctStreamSample[0] = streamPopOutput[s / 2] * attenuationCoefficient;
correctStreamSample[1] = streamPopOutput[(s / 2) + 1] * attenuationCoefficient;
correctStreamSample[0] = streamPopOutput[s / 2] * attenuationAndFade;
correctStreamSample[1] = streamPopOutput[(s / 2) + 1] * attenuationAndFade;
delayedChannelIndex = s + (numSamplesDelay * 2) + delayedChannelOffset;
@ -245,7 +278,7 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
if (numSamplesDelay > 0) {
// if there was a sample delay for this stream, we need to pull samples prior to the popped output
// to stick at the beginning
float attenuationAndWeakChannelRatio = attenuationCoefficient * weakChannelAmplitudeRatio;
float attenuationAndWeakChannelRatioAndFade = attenuationCoefficient * weakChannelAmplitudeRatio * repeatedFrameFadeFactor;
AudioRingBuffer::ConstIterator delayStreamPopOutput = streamPopOutput - numSamplesDelay;
// TODO: delayStreamPopOutput may be inside the last frame written if the ringbuffer is completely full
@ -253,7 +286,7 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
for (int i = 0; i < numSamplesDelay; i++) {
int parentIndex = i * 2;
_clientSamples[parentIndex + delayedChannelOffset] += *delayStreamPopOutput * attenuationAndWeakChannelRatio;
_clientSamples[parentIndex + delayedChannelOffset] += *delayStreamPopOutput * attenuationAndWeakChannelRatioAndFade;
++delayStreamPopOutput;
}
}
@ -264,12 +297,14 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
attenuationCoefficient = 1.0f;
}
float attenuationAndFade = attenuationCoefficient * repeatedFrameFadeFactor;
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s++) {
_clientSamples[s] = glm::clamp(_clientSamples[s] + (int)(streamPopOutput[s / stereoDivider] * attenuationCoefficient),
_clientSamples[s] = glm::clamp(_clientSamples[s] + (int)(streamPopOutput[s / stereoDivider] * attenuationAndFade),
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
}
}
if (_enableFilter && shouldAttenuate) {
glm::vec3 relativePosition = streamToAdd->getPosition() - listeningNodeStream->getPosition();
@ -309,36 +344,36 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
penumbraFilter.render(_clientSamples, _clientSamples, NETWORK_BUFFER_LENGTH_SAMPLES_STEREO / 2);
}
}
return 1;
}
void AudioMixer::prepareMixForListeningNode(Node* node) {
int AudioMixer::prepareMixForListeningNode(Node* node) {
AvatarAudioStream* nodeAudioStream = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioStream();
// zero out the client mix for this node
memset(_clientSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
// loop through all other nodes that have sufficient audio to mix
int streamsMixed = 0;
foreach (const SharedNodePointer& otherNode, NodeList::getInstance()->getNodeHash()) {
if (otherNode->getLinkedData()) {
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
const QHash<QUuid, PositionalAudioStream*>& otherNodeAudioStreams = otherNodeClientData->getAudioStreams();
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = otherNodeAudioStreams.begin(); i != otherNodeAudioStreams.constEnd(); i++) {
for (i = otherNodeAudioStreams.constBegin(); i != otherNodeAudioStreams.constEnd(); i++) {
PositionalAudioStream* otherNodeStream = i.value();
if ((*otherNode != *node || otherNodeStream->shouldLoopbackForNode())
&& otherNodeStream->lastPopSucceeded()
&& otherNodeStream->getLastPopOutputTrailingLoudness() > 0.0f) {
addStreamToMixForListeningNodeWithStream(otherNodeStream, nodeAudioStream);
if (*otherNode != *node || otherNodeStream->shouldLoopbackForNode()) {
streamsMixed += addStreamToMixForListeningNodeWithStream(otherNodeStream, nodeAudioStream);
}
}
}
}
return streamsMixed;
}
void AudioMixer::readPendingDatagram(const QByteArray& receivedPacket, const HifiSockAddr& senderSockAddr) {
@ -374,7 +409,7 @@ void AudioMixer::readPendingDatagram(const QByteArray& receivedPacket, const Hif
void AudioMixer::sendStatsPacket() {
static QJsonObject statsObject;
statsObject["useDynamicJitterBuffers"] = _useDynamicJitterBuffers;
statsObject["useDynamicJitterBuffers"] = _streamSettings._dynamicJitterBuffers;
statsObject["trailing_sleep_percentage"] = _trailingSleepRatio * 100.0f;
statsObject["performance_throttling_ratio"] = _performanceThrottlingRatio;
@ -400,9 +435,42 @@ void AudioMixer::sendStatsPacket() {
int sizeOfStats = 0;
int TOO_BIG_FOR_MTU = 1200; // some extra space for JSONification
QString property = "readPendingDatagram_calls_stats";
QString value = getReadPendingDatagramsCallsPerSecondsStatsString();
statsObject2[qPrintable(property)] = value;
somethingToSend = true;
sizeOfStats += property.size() + value.size();
property = "readPendingDatagram_packets_per_call_stats";
value = getReadPendingDatagramsPacketsPerCallStatsString();
statsObject2[qPrintable(property)] = value;
somethingToSend = true;
sizeOfStats += property.size() + value.size();
property = "readPendingDatagram_packets_time_per_call_stats";
value = getReadPendingDatagramsTimeStatsString();
statsObject2[qPrintable(property)] = value;
somethingToSend = true;
sizeOfStats += property.size() + value.size();
property = "readPendingDatagram_hashmatch_time_per_call_stats";
value = getReadPendingDatagramsHashMatchTimeStatsString();
statsObject2[qPrintable(property)] = value;
somethingToSend = true;
sizeOfStats += property.size() + value.size();
NodeList* nodeList = NodeList::getInstance();
int clientNumber = 0;
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
// if we're too large, send the packet
if (sizeOfStats > TOO_BIG_FOR_MTU) {
nodeList->sendStatsToDomainServer(statsObject2);
sizeOfStats = 0;
statsObject2 = QJsonObject(); // clear it
somethingToSend = false;
}
clientNumber++;
AudioMixerClientData* clientData = static_cast<AudioMixerClientData*>(node->getLinkedData());
if (clientData) {
@ -412,14 +480,6 @@ void AudioMixer::sendStatsPacket() {
somethingToSend = true;
sizeOfStats += property.size() + value.size();
}
// if we're too large, send the packet
if (sizeOfStats > TOO_BIG_FOR_MTU) {
nodeList->sendStatsToDomainServer(statsObject2);
sizeOfStats = 0;
statsObject2 = QJsonObject(); // clear it
somethingToSend = false;
}
}
if (somethingToSend) {
@ -490,47 +550,81 @@ void AudioMixer::run() {
if (settingsObject.contains(AUDIO_GROUP_KEY)) {
QJsonObject audioGroupObject = settingsObject[AUDIO_GROUP_KEY].toObject();
// check the payload to see if we have asked for dynamicJitterBuffer support
const QString DYNAMIC_JITTER_BUFFER_JSON_KEY = "A-dynamic-jitter-buffer";
bool shouldUseDynamicJitterBuffers = audioGroupObject[DYNAMIC_JITTER_BUFFER_JSON_KEY].toBool();
if (shouldUseDynamicJitterBuffers) {
_streamSettings._dynamicJitterBuffers = audioGroupObject[DYNAMIC_JITTER_BUFFER_JSON_KEY].toBool();
if (_streamSettings._dynamicJitterBuffers) {
qDebug() << "Enable dynamic jitter buffers.";
_useDynamicJitterBuffers = true;
} else {
qDebug() << "Dynamic jitter buffers disabled.";
_useDynamicJitterBuffers = false;
}
bool ok;
const QString FILTER_KEY = "E-enable-filter";
_enableFilter = audioGroupObject[FILTER_KEY].toBool();
if (_enableFilter) {
qDebug() << "Filter enabled";
}
const QString DESIRED_JITTER_BUFFER_FRAMES_KEY = "B-desired-jitter-buffer-frames";
_staticDesiredJitterBufferFrames = audioGroupObject[DESIRED_JITTER_BUFFER_FRAMES_KEY].toString().toInt(&ok);
bool ok;
const QString DESIRED_JITTER_BUFFER_FRAMES_KEY = "B-static-desired-jitter-buffer-frames";
_streamSettings._staticDesiredJitterBufferFrames = audioGroupObject[DESIRED_JITTER_BUFFER_FRAMES_KEY].toString().toInt(&ok);
if (!ok) {
_staticDesiredJitterBufferFrames = DEFAULT_DESIRED_JITTER_BUFFER_FRAMES;
_streamSettings._staticDesiredJitterBufferFrames = DEFAULT_STATIC_DESIRED_JITTER_BUFFER_FRAMES;
}
qDebug() << "Static desired jitter buffer frames:" << _staticDesiredJitterBufferFrames;
qDebug() << "Static desired jitter buffer frames:" << _streamSettings._staticDesiredJitterBufferFrames;
const QString MAX_FRAMES_OVER_DESIRED_JSON_KEY = "C-max-frames-over-desired";
_maxFramesOverDesired = audioGroupObject[MAX_FRAMES_OVER_DESIRED_JSON_KEY].toString().toInt(&ok);
_streamSettings._maxFramesOverDesired = audioGroupObject[MAX_FRAMES_OVER_DESIRED_JSON_KEY].toString().toInt(&ok);
if (!ok) {
_maxFramesOverDesired = DEFAULT_MAX_FRAMES_OVER_DESIRED;
_streamSettings._maxFramesOverDesired = DEFAULT_MAX_FRAMES_OVER_DESIRED;
}
qDebug() << "Max frames over desired:" << _streamSettings._maxFramesOverDesired;
const QString USE_STDEV_FOR_DESIRED_CALC_JSON_KEY = "D-use-stdev-for-desired-calc";
_streamSettings._useStDevForJitterCalc = audioGroupObject[USE_STDEV_FOR_DESIRED_CALC_JSON_KEY].toBool();
if (_streamSettings._useStDevForJitterCalc) {
qDebug() << "Using Philip's stdev method for jitter calc if dynamic jitter buffers enabled";
} else {
qDebug() << "Using Fred's max-gap method for jitter calc if dynamic jitter buffers enabled";
}
qDebug() << "Max frames over desired:" << _maxFramesOverDesired;
const QString PRINT_STREAM_STATS_JSON_KEY = "H-print-stream-stats";
const QString WINDOW_STARVE_THRESHOLD_JSON_KEY = "E-window-starve-threshold";
_streamSettings._windowStarveThreshold = audioGroupObject[WINDOW_STARVE_THRESHOLD_JSON_KEY].toString().toInt(&ok);
if (!ok) {
_streamSettings._windowStarveThreshold = DEFAULT_WINDOW_STARVE_THRESHOLD;
}
qDebug() << "Window A starve threshold:" << _streamSettings._windowStarveThreshold;
const QString WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES_JSON_KEY = "F-window-seconds-for-desired-calc-on-too-many-starves";
_streamSettings._windowSecondsForDesiredCalcOnTooManyStarves = audioGroupObject[WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES_JSON_KEY].toString().toInt(&ok);
if (!ok) {
_streamSettings._windowSecondsForDesiredCalcOnTooManyStarves = DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES;
}
qDebug() << "Window A length:" << _streamSettings._windowSecondsForDesiredCalcOnTooManyStarves << "seconds";
const QString WINDOW_SECONDS_FOR_DESIRED_REDUCTION_JSON_KEY = "G-window-seconds-for-desired-reduction";
_streamSettings._windowSecondsForDesiredReduction = audioGroupObject[WINDOW_SECONDS_FOR_DESIRED_REDUCTION_JSON_KEY].toString().toInt(&ok);
if (!ok) {
_streamSettings._windowSecondsForDesiredReduction = DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION;
}
qDebug() << "Window B length:" << _streamSettings._windowSecondsForDesiredReduction << "seconds";
const QString REPETITION_WITH_FADE_JSON_KEY = "H-repetition-with-fade";
_streamSettings._repetitionWithFade = audioGroupObject[REPETITION_WITH_FADE_JSON_KEY].toBool();
if (_streamSettings._repetitionWithFade) {
qDebug() << "Repetition with fade enabled";
} else {
qDebug() << "Repetition with fade disabled";
}
const QString PRINT_STREAM_STATS_JSON_KEY = "I-print-stream-stats";
_printStreamStats = audioGroupObject[PRINT_STREAM_STATS_JSON_KEY].toBool();
if (_printStreamStats) {
qDebug() << "Stream stats will be printed to stdout";
}
const QString UNATTENUATED_ZONE_KEY = "D-unattenuated-zone";
const QString FILTER_KEY = "J-enable-filter";
_enableFilter = audioGroupObject[FILTER_KEY].toBool();
if (_enableFilter) {
qDebug() << "Filter enabled";
}
const QString UNATTENUATED_ZONE_KEY = "Z-unattenuated-zone";
QString unattenuatedZoneString = audioGroupObject[UNATTENUATED_ZONE_KEY].toString();
if (!unattenuatedZoneString.isEmpty()) {
@ -558,9 +652,8 @@ void AudioMixer::run() {
int nextFrame = 0;
QElapsedTimer timer;
timer.start();
char* clientMixBuffer = new char[NETWORK_BUFFER_LENGTH_BYTES_STEREO + sizeof(quint16)
+ numBytesForPacketHeaderGivenPacketType(PacketTypeMixedAudio)];
char clientMixBuffer[MAX_PACKET_SIZE];
int usecToSleep = BUFFER_SEND_INTERVAL_USECS;
@ -619,15 +712,13 @@ void AudioMixer::run() {
if (!hasRatioChanged) {
++framesSinceCutoffEvent;
}
bool sendAudioStreamStats = false;
quint64 now = usecTimestampNow();
if (now - _lastSendAudioStreamStatsTime > TOO_LONG_SINCE_LAST_SEND_AUDIO_STREAM_STATS) {
_lastSendAudioStreamStatsTime = now;
sendAudioStreamStats = true;
}
bool streamStatsPrinted = false;
quint64 now = usecTimestampNow();
if (now - _lastPerSecondCallbackTime > USECS_PER_SECOND) {
perSecondActions();
_lastPerSecondCallbackTime = now;
}
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
if (node->getLinkedData()) {
AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();
@ -640,43 +731,52 @@ void AudioMixer::run() {
if (node->getType() == NodeType::Agent && node->getActiveSocket()
&& nodeData->getAvatarAudioStream()) {
prepareMixForListeningNode(node.data());
int streamsMixed = prepareMixForListeningNode(node.data());
// pack header
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeMixedAudio);
char* dataAt = clientMixBuffer + numBytesPacketHeader;
char* dataAt;
if (streamsMixed > 0) {
// pack header
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeMixedAudio);
dataAt = clientMixBuffer + numBytesPacketHeader;
// pack sequence number
quint16 sequence = nodeData->getOutgoingSequenceNumber();
memcpy(dataAt, &sequence, sizeof(quint16));
dataAt += sizeof(quint16);
// pack sequence number
quint16 sequence = nodeData->getOutgoingSequenceNumber();
memcpy(dataAt, &sequence, sizeof(quint16));
dataAt += sizeof(quint16);
// pack mixed audio samples
memcpy(dataAt, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
dataAt += NETWORK_BUFFER_LENGTH_BYTES_STEREO;
// pack mixed audio samples
memcpy(dataAt, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
dataAt += NETWORK_BUFFER_LENGTH_BYTES_STEREO;
} else {
// pack header
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeSilentAudioFrame);
dataAt = clientMixBuffer + numBytesPacketHeader;
// pack sequence number
quint16 sequence = nodeData->getOutgoingSequenceNumber();
memcpy(dataAt, &sequence, sizeof(quint16));
dataAt += sizeof(quint16);
// pack number of silent audio samples
quint16 numSilentSamples = NETWORK_BUFFER_LENGTH_SAMPLES_STEREO;
memcpy(dataAt, &numSilentSamples, sizeof(quint16));
dataAt += sizeof(quint16);
}
// send mixed audio packet
nodeList->writeDatagram(clientMixBuffer, dataAt - clientMixBuffer, node);
nodeData->incrementOutgoingMixedAudioSequenceNumber();
// send an audio stream stats packet if it's time
if (sendAudioStreamStats) {
if (_sendAudioStreamStats) {
nodeData->sendAudioStreamStatsPackets(node);
if (_printStreamStats) {
printf("\nStats for agent %s:\n", node->getUUID().toString().toLatin1().data());
nodeData->printUpstreamDownstreamStats();
streamStatsPrinted = true;
}
_sendAudioStreamStats = false;
}
++_sumListeners;
}
}
}
if (streamStatsPrinted) {
printf("\n----------------------------------------------------------------\n");
}
++_numStatFrames;
@ -692,6 +792,90 @@ void AudioMixer::run() {
usleep(usecToSleep);
}
}
delete[] clientMixBuffer;
}
void AudioMixer::perSecondActions() {
_sendAudioStreamStats = true;
int callsLastSecond = _datagramsReadPerCallStats.getCurrentIntervalSamples();
_readPendingCallsPerSecondStats.update(callsLastSecond);
if (_printStreamStats) {
printf("\n================================================================================\n\n");
printf(" readPendingDatagram() calls per second | avg: %.2f, avg_30s: %.2f, last_second: %d\n",
_readPendingCallsPerSecondStats.getAverage(),
_readPendingCallsPerSecondStats.getWindowAverage(),
callsLastSecond);
printf(" Datagrams read per call | avg: %.2f, avg_30s: %.2f, last_second: %.2f\n",
_datagramsReadPerCallStats.getAverage(),
_datagramsReadPerCallStats.getWindowAverage(),
_datagramsReadPerCallStats.getCurrentIntervalAverage());
printf(" Usecs spent per readPendingDatagram() call | avg: %.2f, avg_30s: %.2f, last_second: %.2f\n",
_timeSpentPerCallStats.getAverage(),
_timeSpentPerCallStats.getWindowAverage(),
_timeSpentPerCallStats.getCurrentIntervalAverage());
printf(" Usecs spent per packetVersionAndHashMatch() call | avg: %.2f, avg_30s: %.2f, last_second: %.2f\n",
_timeSpentPerHashMatchCallStats.getAverage(),
_timeSpentPerHashMatchCallStats.getWindowAverage(),
_timeSpentPerHashMatchCallStats.getCurrentIntervalAverage());
double WINDOW_LENGTH_USECS = READ_DATAGRAMS_STATS_WINDOW_SECONDS * USECS_PER_SECOND;
printf(" %% time spent in readPendingDatagram() calls | avg_30s: %.6f%%, last_second: %.6f%%\n",
_timeSpentPerCallStats.getWindowSum() / WINDOW_LENGTH_USECS * 100.0,
_timeSpentPerCallStats.getCurrentIntervalSum() / USECS_PER_SECOND * 100.0);
printf("%% time spent in packetVersionAndHashMatch() calls: | avg_30s: %.6f%%, last_second: %.6f%%\n",
_timeSpentPerHashMatchCallStats.getWindowSum() / WINDOW_LENGTH_USECS * 100.0,
_timeSpentPerHashMatchCallStats.getCurrentIntervalSum() / USECS_PER_SECOND * 100.0);
foreach(const SharedNodePointer& node, NodeList::getInstance()->getNodeHash()) {
if (node->getLinkedData()) {
AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();
if (node->getType() == NodeType::Agent && node->getActiveSocket()) {
printf("\nStats for agent %s --------------------------------\n",
node->getUUID().toString().toLatin1().data());
nodeData->printUpstreamDownstreamStats();
}
}
}
}
_datagramsReadPerCallStats.currentIntervalComplete();
_timeSpentPerCallStats.currentIntervalComplete();
_timeSpentPerHashMatchCallStats.currentIntervalComplete();
}
QString AudioMixer::getReadPendingDatagramsCallsPerSecondsStatsString() const {
QString result = "calls_per_sec_avg_30s: " + QString::number(_readPendingCallsPerSecondStats.getWindowAverage(), 'f', 2)
+ " calls_last_sec: " + QString::number(_readPendingCallsPerSecondStats.getLastCompleteIntervalStats().getSum() + 0.5, 'f', 0);
return result;
}
QString AudioMixer::getReadPendingDatagramsPacketsPerCallStatsString() const {
QString result = "pkts_per_call_avg_30s: " + QString::number(_datagramsReadPerCallStats.getWindowAverage(), 'f', 2)
+ " pkts_per_call_avg_1s: " + QString::number(_datagramsReadPerCallStats.getLastCompleteIntervalStats().getAverage(), 'f', 2);
return result;
}
QString AudioMixer::getReadPendingDatagramsTimeStatsString() const {
QString result = "usecs_per_call_avg_30s: " + QString::number(_timeSpentPerCallStats.getWindowAverage(), 'f', 2)
+ " usecs_per_call_avg_1s: " + QString::number(_timeSpentPerCallStats.getLastCompleteIntervalStats().getAverage(), 'f', 2)
+ " prct_time_in_call_30s: " + QString::number(_timeSpentPerCallStats.getWindowSum() / (READ_DATAGRAMS_STATS_WINDOW_SECONDS*USECS_PER_SECOND) * 100.0, 'f', 6) + "%"
+ " prct_time_in_call_1s: " + QString::number(_timeSpentPerCallStats.getLastCompleteIntervalStats().getSum() / USECS_PER_SECOND * 100.0, 'f', 6) + "%";
return result;
}
QString AudioMixer::getReadPendingDatagramsHashMatchTimeStatsString() const {
QString result = "usecs_per_hashmatch_avg_30s: " + QString::number(_timeSpentPerHashMatchCallStats.getWindowAverage(), 'f', 2)
+ " usecs_per_hashmatch_avg_1s: " + QString::number(_timeSpentPerHashMatchCallStats.getLastCompleteIntervalStats().getAverage(), 'f', 2)
+ " prct_time_in_hashmatch_30s: " + QString::number(_timeSpentPerHashMatchCallStats.getWindowSum() / (READ_DATAGRAMS_STATS_WINDOW_SECONDS*USECS_PER_SECOND) * 100.0, 'f', 6) + "%"
+ " prct_time_in_hashmatch_1s: " + QString::number(_timeSpentPerHashMatchCallStats.getLastCompleteIntervalStats().getSum() / USECS_PER_SECOND * 100.0, 'f', 6) + "%";
return result;
}

View file

@ -21,7 +21,8 @@ class AvatarAudioStream;
const int SAMPLE_PHASE_DELAY_AT_90 = 20;
const quint64 TOO_LONG_SINCE_LAST_SEND_AUDIO_STREAM_STATS = 1 * USECS_PER_SECOND;
const int READ_DATAGRAMS_STATS_WINDOW_SECONDS = 30;
/// Handles assignments of type AudioMixer - mixing streams of audio and re-distributing to various clients.
class AudioMixer : public ThreadedAssignment {
@ -38,21 +39,37 @@ public slots:
void sendStatsPacket();
#if 0
<<<<<<< HEAD
static bool getUseDynamicJitterBuffers() { return _useDynamicJitterBuffers; }
static int getStaticDesiredJitterBufferFrames() { return _staticDesiredJitterBufferFrames; }
static int getMaxFramesOverDesired() { return _maxFramesOverDesired; }
=======
#endif
static const InboundAudioStream::Settings& getStreamSettings() { return _streamSettings; }
#if 0
>>>>>>> 7a8a8684d6f8c9956ca7e4f81eb8064b8dece58e
#endif
private:
/// adds one stream to the mix for a listening node
void addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
int addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
AvatarAudioStream* listeningNodeStream);
/// prepares and sends a mix to one Node
void prepareMixForListeningNode(Node* node);
int prepareMixForListeningNode(Node* node);
// client samples capacity is larger than what will be sent to optimize mixing
// we are MMX adding 4 samples at a time so we need client samples to have an extra 4
int16_t _clientSamples[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (SAMPLE_PHASE_DELAY_AT_90 * 2)];
void perSecondActions();
QString getReadPendingDatagramsCallsPerSecondsStatsString() const;
QString getReadPendingDatagramsPacketsPerCallStatsString() const;
QString getReadPendingDatagramsTimeStatsString() const;
QString getReadPendingDatagramsHashMatchTimeStatsString() const;
float _trailingSleepRatio;
float _minAudibilityThreshold;
@ -63,14 +80,23 @@ private:
AABox* _sourceUnattenuatedZone;
AABox* _listenerUnattenuatedZone;
static bool _useDynamicJitterBuffers;
static int _staticDesiredJitterBufferFrames;
static int _maxFramesOverDesired;
static InboundAudioStream::Settings _streamSettings;
static bool _printStreamStats;
static bool _enableFilter;
quint64 _lastSendAudioStreamStatsTime;
quint64 _lastPerSecondCallbackTime;
bool _sendAudioStreamStats;
// stats
MovingMinMaxAvg<int> _datagramsReadPerCallStats; // update with # of datagrams read for each readPendingDatagrams call
MovingMinMaxAvg<quint64> _timeSpentPerCallStats; // update with usecs spent inside each readPendingDatagrams call
MovingMinMaxAvg<quint64> _timeSpentPerHashMatchCallStats; // update with usecs spent inside each packetVersionAndHashMatch call
MovingMinMaxAvg<int> _readPendingCallsPerSecondStats; // update with # of readPendingDatagrams calls in the last second
};
#endif // hifi_AudioMixer_h

View file

@ -74,9 +74,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
quint8 channelFlag = *(reinterpret_cast<const quint8*>(channelFlagAt));
bool isStereo = channelFlag == 1;
_audioStreams.insert(nullUUID,
matchingStream = new AvatarAudioStream(isStereo, AudioMixer::getUseDynamicJitterBuffers(),
AudioMixer::getStaticDesiredJitterBufferFrames(), AudioMixer::getMaxFramesOverDesired()));
_audioStreams.insert(nullUUID, matchingStream = new AvatarAudioStream(isStereo, AudioMixer::getStreamSettings()));
} else {
matchingStream = _audioStreams.value(nullUUID);
}
@ -88,9 +86,8 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
QUuid streamIdentifier = QUuid::fromRfc4122(packet.mid(bytesBeforeStreamIdentifier, NUM_BYTES_RFC4122_UUID));
if (!_audioStreams.contains(streamIdentifier)) {
_audioStreams.insert(streamIdentifier,
matchingStream = new InjectedAudioStream(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers(),
AudioMixer::getStaticDesiredJitterBufferFrames(), AudioMixer::getMaxFramesOverDesired()));
// we don't have this injected stream yet, so add it
_audioStreams.insert(streamIdentifier, matchingStream = new InjectedAudioStream(streamIdentifier, AudioMixer::getStreamSettings()));
} else {
matchingStream = _audioStreams.value(streamIdentifier);
}
@ -105,18 +102,15 @@ void AudioMixerClientData::checkBuffersBeforeFrameSend(AABox* checkSourceZone, A
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
PositionalAudioStream* stream = i.value();
if (stream->popFrames(1, true) > 0) {
// this is a ring buffer that is ready to go
// calculate the trailing avg loudness for the next frame
// that would be mixed in
stream->updateLastPopOutputTrailingLoudness();
if (checkSourceZone && checkSourceZone->contains(stream->getPosition())) {
stream->setListenerUnattenuatedZone(listenerZone);
} else {
stream->setListenerUnattenuatedZone(NULL);
}
stream->updateLastPopOutputLoudnessAndTrailingLoudness();
}
if (checkSourceZone && checkSourceZone->contains(stream->getPosition())) {
stream->setListenerUnattenuatedZone(listenerZone);
} else {
stream->setListenerUnattenuatedZone(NULL);
}
}
}
@ -185,7 +179,9 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
// pack the calculated number of stream stats
for (int i = 0; i < numStreamStatsToPack; i++) {
AudioStreamStats streamStats = audioStreamsIterator.value()->updateSeqHistoryAndGetAudioStreamStats();
PositionalAudioStream* stream = audioStreamsIterator.value();
stream->perSecondCallbackForUpdatingStats();
AudioStreamStats streamStats = stream->getAudioStreamStats();
memcpy(dataAt, &streamStats, sizeof(AudioStreamStats));
dataAt += sizeof(AudioStreamStats);

View file

@ -13,8 +13,8 @@
#include "AvatarAudioStream.h"
AvatarAudioStream::AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired) :
PositionalAudioStream(PositionalAudioStream::Microphone, isStereo, dynamicJitterBuffer, staticDesiredJitterBufferFrames, maxFramesOverDesired)
AvatarAudioStream::AvatarAudioStream(bool isStereo, const InboundAudioStream::Settings& settings) :
PositionalAudioStream(PositionalAudioStream::Microphone, isStereo, settings)
{
}
@ -38,26 +38,9 @@ int AvatarAudioStream::parseStreamProperties(PacketType type, const QByteArray&
// read the positional data
readBytes += parsePositionalData(packetAfterSeqNum.mid(readBytes));
if (type == PacketTypeSilentAudioFrame) {
int16_t numSilentSamples;
memcpy(&numSilentSamples, packetAfterSeqNum.data() + readBytes, sizeof(int16_t));
readBytes += sizeof(int16_t);
numAudioSamples = numSilentSamples;
} else {
int numAudioBytes = packetAfterSeqNum.size() - readBytes;
numAudioSamples = numAudioBytes / sizeof(int16_t);
}
return readBytes;
}
int AvatarAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
int readBytes = 0;
if (type == PacketTypeSilentAudioFrame) {
writeDroppableSilentSamples(numAudioSamples);
} else {
// there is audio data to read
readBytes += _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
}
// calculate how many samples are in this packet
int numAudioBytes = packetAfterSeqNum.size() - readBytes;
numAudioSamples = numAudioBytes / sizeof(int16_t);
return readBytes;
}

View file

@ -18,7 +18,7 @@
class AvatarAudioStream : public PositionalAudioStream {
public:
AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired);
AvatarAudioStream(bool isStereo, const InboundAudioStream::Settings& settings);
private:
// disallow copying of AvatarAudioStream objects
@ -26,7 +26,6 @@ private:
AvatarAudioStream& operator= (const AvatarAudioStream&);
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
};
#endif // hifi_AvatarAudioStream_h

View file

@ -9,8 +9,8 @@
"help": "Dynamically buffer client audio based on perceived jitter in packet receipt timing",
"default": false
},
"B-desired-jitter-buffer-frames": {
"label": "Desired Jitter Buffer Frames",
"B-static-desired-jitter-buffer-frames": {
"label": "Static Desired Jitter Buffer Frames",
"help": "If dynamic jitter buffers is disabled, this determines the target number of frames maintained by the AudioMixer's jitter buffers",
"placeholder": "1",
"default": "1"
@ -21,19 +21,49 @@
"placeholder": "10",
"default": "10"
},
"H-print-stream-stats": {
"D-use-stdev-for-desired-calc": {
"type": "checkbox",
"label": "Use Stdev for Desired Jitter Frames Calc:",
"help": "If checked, Philip's method (stdev of timegaps) is used to calculate desired jitter frames. Otherwise, Fred's method (max timegap) is used",
"default": false
},
"E-window-starve-threshold": {
"label": "Window Starve Threshold",
"help": "If this many starves occur in an N-second window (N is the number in the next field), then the desired jitter frames will be re-evaluated using Window A.",
"placeholder": "3",
"default": "3"
},
"F-window-seconds-for-desired-calc-on-too-many-starves": {
"label": "Timegaps Window (A) Seconds:",
"help": "Window A contains a history of timegaps. Its max timegap is used to re-evaluate the desired jitter frames when too many starves occur within it.",
"placeholder": "50",
"default": "50"
},
"G-window-seconds-for-desired-reduction": {
"label": "Timegaps Window (B) Seconds:",
"help": "Window B contains a history of timegaps. Its max timegap is used as a ceiling for the desired jitter frames value.",
"placeholder": "10",
"default": "10"
},
"H-repetition-with-fade": {
"type": "checkbox",
"label": "Repetition with Fade:",
"help": "If enabled, dropped frames and mixing during starves will repeat the last frame, eventually fading to silence",
"default": false
},
"I-print-stream-stats": {
"type": "checkbox",
"label": "Print Stream Stats:",
"help": "If enabled, audio upstream and downstream stats of each agent will be printed each second to stdout",
"default": false
},
"D-unattenuated-zone": {
"Z-unattenuated-zone": {
"label": "Unattenuated Zone",
"help": "Boxes for source and listener (corner x, corner y, corner z, size x, size y, size z, corner x, corner y, corner z, size x, size y, size z)",
"placeholder": "no zone",
"default": ""
},
"E-enable-filter": {
"J-enable-filter": {
"type": "checkbox",
"label": "Enable Positional Filter",
"help": "If enabled, positional audio stream uses lowpass filter",

View file

@ -1772,14 +1772,7 @@ void Application::init() {
_lastTimeUpdated.start();
Menu::getInstance()->loadSettings();
if (Menu::getInstance()->getAudioJitterBufferFrames() != 0) {
_audio.setDynamicJitterBuffers(false);
_audio.setStaticDesiredJitterBufferFrames(Menu::getInstance()->getAudioJitterBufferFrames());
} else {
_audio.setDynamicJitterBuffers(true);
}
_audio.setMaxFramesOverDesired(Menu::getInstance()->getMaxFramesOverDesired());
_audio.setReceivedAudioStreamSettings(Menu::getInstance()->getReceivedAudioStreamSettings());
qDebug("Loaded settings");

View file

@ -72,7 +72,7 @@ Audio::Audio(QObject* parent) :
_proceduralAudioOutput(NULL),
_proceduralOutputDevice(NULL),
_inputRingBuffer(0),
_receivedAudioStream(0, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES, true, 0, 0, true),
_receivedAudioStream(0, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES, InboundAudioStream::Settings()),
_isStereoInput(false),
_averagedLatency(0.0),
_lastInputLoudness(0),
@ -105,6 +105,7 @@ Audio::Audio(QObject* parent) :
_scopeInput(0),
_scopeOutputLeft(0),
_scopeOutputRight(0),
_scopeLastFrame(),
_statsEnabled(false),
_statsShowInjectedStreams(false),
_outgoingAvatarAudioSequenceNumber(0),
@ -113,14 +114,17 @@ Audio::Audio(QObject* parent) :
_audioOutputMsecsUnplayedStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
_lastSentAudioPacket(0),
_packetSentTimeGaps(1, APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS),
_audioOutputIODevice(*this)
_audioOutputIODevice(_receivedAudioStream)
{
// clear the array of locally injected samples
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
// Create the noise sample array
_noiseSampleFrames = new float[NUMBER_OF_NOISE_SAMPLE_FRAMES];
connect(&_receivedAudioStream, &MixedProcessedAudioStream::processSamples, this, &Audio::processReceivedAudioStreamSamples, Qt::DirectConnection);
connect(&_receivedAudioStream, &MixedProcessedAudioStream::addedSilence, this, &Audio::addStereoSilenceToScope, Qt::DirectConnection);
connect(&_receivedAudioStream, &MixedProcessedAudioStream::addedLastFrameRepeatedWithFade, this, &Audio::addLastFrameRepeatedWithFadeToScope, Qt::DirectConnection);
connect(&_receivedAudioStream, &MixedProcessedAudioStream::addedStereoSamples, this, &Audio::addStereoSamplesToScope, Qt::DirectConnection);
connect(&_receivedAudioStream, &MixedProcessedAudioStream::processSamples, this, &Audio::processReceivedSamples, Qt::DirectConnection);
}
void Audio::init(QGLWidget *parent) {
@ -460,9 +464,12 @@ void Audio::handleAudioInput() {
static char audioDataPacket[MAX_PACKET_SIZE];
static int numBytesPacketHeader = numBytesForPacketHeaderGivenPacketType(PacketTypeMicrophoneAudioNoEcho);
static int leadingBytes = numBytesPacketHeader + sizeof(quint16) + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
static int16_t* networkAudioSamples = (int16_t*) (audioDataPacket + leadingBytes);
// NOTE: we assume PacketTypeMicrophoneAudioWithEcho has same size headers as
// PacketTypeMicrophoneAudioNoEcho. If not, then networkAudioSamples will be pointing to the wrong place for writing
// audio samples with echo.
static int leadingBytes = numBytesPacketHeader + sizeof(quint16) + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
static int16_t* networkAudioSamples = (int16_t*)(audioDataPacket + leadingBytes);
float inputToNetworkInputRatio = calculateDeviceToNetworkInputRatio(_numInputCallbackBytes);
@ -668,9 +675,7 @@ void Audio::handleAudioInput() {
if (!_isStereoInput && _scopeEnabled && !_scopeEnabledPause) {
unsigned int numMonoAudioChannels = 1;
unsigned int monoAudioChannel = 0;
addBufferToScope(_scopeInput, _scopeInputOffset, networkAudioSamples, monoAudioChannel, numMonoAudioChannels);
_scopeInputOffset += NETWORK_SAMPLES_PER_FRAME;
_scopeInputOffset %= _samplesPerScope;
_scopeInputOffset = addBufferToScope(_scopeInput, _scopeInputOffset, networkAudioSamples, NETWORK_SAMPLES_PER_FRAME, monoAudioChannel, numMonoAudioChannels);
}
NodeList* nodeList = NodeList::getInstance();
@ -686,19 +691,11 @@ void Audio::handleAudioInput() {
glm::vec3 headPosition = interfaceAvatar->getHead()->getPosition();
glm::quat headOrientation = interfaceAvatar->getHead()->getFinalOrientationInWorldFrame();
quint8 isStereo = _isStereoInput ? 1 : 0;
int numAudioBytes = 0;
PacketType packetType;
if (_lastInputLoudness == 0) {
packetType = PacketTypeSilentAudioFrame;
// we need to indicate how many silent samples this is to the audio mixer
networkAudioSamples[0] = numNetworkSamples;
numAudioBytes = sizeof(int16_t);
} else {
numAudioBytes = numNetworkBytes;
if (Menu::getInstance()->isOptionChecked(MenuOption::EchoServerAudio)) {
packetType = PacketTypeMicrophoneAudioWithEcho;
} else {
@ -707,21 +704,31 @@ void Audio::handleAudioInput() {
}
char* currentPacketPtr = audioDataPacket + populatePacketHeader(audioDataPacket, packetType);
// pack sequence number
memcpy(currentPacketPtr, &_outgoingAvatarAudioSequenceNumber, sizeof(quint16));
currentPacketPtr += sizeof(quint16);
// set the mono/stereo byte
*currentPacketPtr++ = isStereo;
if (packetType == PacketTypeSilentAudioFrame) {
// pack num silent samples
quint16 numSilentSamples = numNetworkSamples;
memcpy(currentPacketPtr, &numSilentSamples, sizeof(quint16));
currentPacketPtr += sizeof(quint16);
} else {
// set the mono/stereo byte
*currentPacketPtr++ = isStereo;
// memcpy the three float positions
memcpy(currentPacketPtr, &headPosition, sizeof(headPosition));
currentPacketPtr += (sizeof(headPosition));
// memcpy the three float positions
memcpy(currentPacketPtr, &headPosition, sizeof(headPosition));
currentPacketPtr += (sizeof(headPosition));
// memcpy our orientation
memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation));
currentPacketPtr += sizeof(headOrientation);
// memcpy our orientation
memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation));
currentPacketPtr += sizeof(headOrientation);
// audio samples have already been packed (written to networkAudioSamples)
currentPacketPtr += numNetworkBytes;
}
// first time this is 0
if (_lastSentAudioPacket == 0) {
@ -733,18 +740,58 @@ void Audio::handleAudioInput() {
_lastSentAudioPacket = now;
}
nodeList->writeDatagram(audioDataPacket, numAudioBytes + leadingBytes, audioMixer);
int packetBytes = currentPacketPtr - audioDataPacket;
nodeList->writeDatagram(audioDataPacket, packetBytes, audioMixer);
_outgoingAvatarAudioSequenceNumber++;
Application::getInstance()->getBandwidthMeter()->outputStream(BandwidthMeter::AUDIO)
.updateValue(numAudioBytes + leadingBytes);
.updateValue(packetBytes);
}
delete[] inputAudioSamples;
}
}
void Audio::processReceivedAudioStreamSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer) {
const int STEREO_FACTOR = 2;
void Audio::addStereoSilenceToScope(int silentSamplesPerChannel) {
if (!_scopeEnabled || _scopeEnabledPause) {
return;
}
addSilenceToScope(_scopeOutputLeft, _scopeOutputOffset, silentSamplesPerChannel);
_scopeOutputOffset = addSilenceToScope(_scopeOutputRight, _scopeOutputOffset, silentSamplesPerChannel);
}
void Audio::addStereoSamplesToScope(const QByteArray& samples) {
if (!_scopeEnabled || _scopeEnabledPause) {
return;
}
const int16_t* samplesData = reinterpret_cast<const int16_t*>(samples.data());
int samplesPerChannel = samples.size() / sizeof(int16_t) / STEREO_FACTOR;
addBufferToScope(_scopeOutputLeft, _scopeOutputOffset, samplesData, samplesPerChannel, 0, STEREO_FACTOR);
_scopeOutputOffset = addBufferToScope(_scopeOutputRight, _scopeOutputOffset, samplesData, samplesPerChannel, 1, STEREO_FACTOR);
_scopeLastFrame = samples.right(NETWORK_BUFFER_LENGTH_BYTES_STEREO);
}
void Audio::addLastFrameRepeatedWithFadeToScope(int samplesPerChannel) {
const int16_t* lastFrameData = reinterpret_cast<const int16_t*>(_scopeLastFrame.data());
int samplesRemaining = samplesPerChannel;
int indexOfRepeat = 0;
do {
int samplesToWriteThisIteration = std::min(samplesRemaining, (int)NETWORK_SAMPLES_PER_FRAME);
float fade = calculateRepeatedFrameFadeFactor(indexOfRepeat);
addBufferToScope(_scopeOutputLeft, _scopeOutputOffset, lastFrameData, samplesToWriteThisIteration, 0, STEREO_FACTOR, fade);
_scopeOutputOffset = addBufferToScope(_scopeOutputRight, _scopeOutputOffset, lastFrameData, samplesToWriteThisIteration, 1, STEREO_FACTOR, fade);
samplesRemaining -= samplesToWriteThisIteration;
indexOfRepeat++;
} while (samplesRemaining > 0);
}
void Audio::processReceivedSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer) {
const int numNetworkOutputSamples = inputBuffer.size() / sizeof(int16_t);
const int numDeviceOutputSamples = numNetworkOutputSamples * (_outputFormat.sampleRate() * _outputFormat.channelCount())
@ -789,30 +836,6 @@ void Audio::processReceivedAudioStreamSamples(const QByteArray& inputBuffer, QBy
numNetworkOutputSamples,
numDeviceOutputSamples,
_desiredOutputFormat, _outputFormat);
if (_scopeEnabled && !_scopeEnabledPause) {
unsigned int numAudioChannels = _desiredOutputFormat.channelCount();
const int16_t* samples = receivedSamples;
for (int numSamples = numNetworkOutputSamples / numAudioChannels; numSamples > 0; numSamples -= NETWORK_SAMPLES_PER_FRAME) {
unsigned int audioChannel = 0;
addBufferToScope(
_scopeOutputLeft,
_scopeOutputOffset,
samples, audioChannel, numAudioChannels);
audioChannel = 1;
addBufferToScope(
_scopeOutputRight,
_scopeOutputOffset,
samples, audioChannel, numAudioChannels);
_scopeOutputOffset += NETWORK_SAMPLES_PER_FRAME;
_scopeOutputOffset %= _samplesPerScope;
samples += NETWORK_SAMPLES_PER_FRAME * numAudioChannels;
}
}
}
void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) {
@ -825,9 +848,6 @@ void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) {
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(audioByteArray.size());
}
void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
int numBytesPacketHeader = numBytesForPacketHeader(packet);
@ -860,12 +880,13 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
void Audio::sendDownstreamAudioStatsPacket() {
// since this function is called every second, we'll sample some of our stats here
// since this function is called every second, we'll sample for some of our stats here
_inputRingBufferMsecsAvailableStats.update(getInputRingBufferMsecsAvailable());
_audioOutputMsecsUnplayedStats.update(getAudioOutputMsecsUnplayed());
// also, call _receivedAudioStream's per-second callback
_receivedAudioStream.perSecondCallbackForUpdatingStats();
char packet[MAX_PACKET_SIZE];
// pack header
@ -883,7 +904,7 @@ void Audio::sendDownstreamAudioStatsPacket() {
dataAt += sizeof(quint16);
// pack downstream audio stream stats
AudioStreamStats stats = _receivedAudioStream.updateSeqHistoryAndGetAudioStreamStats();
AudioStreamStats stats = _receivedAudioStream.getAudioStreamStats();
memcpy(dataAt, &stats, sizeof(AudioStreamStats));
dataAt += sizeof(AudioStreamStats);
@ -916,7 +937,7 @@ void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& s
unsigned int delayCount = delay * _desiredOutputFormat.channelCount();
unsigned int silentCount = (remaining < delayCount) ? remaining : delayCount;
if (silentCount) {
_spatialAudioRingBuffer.addSilentFrame(silentCount);
_spatialAudioRingBuffer.addSilentSamples(silentCount);
}
// Recalculate the number of remaining samples
@ -1220,8 +1241,6 @@ void Audio::selectAudioFilterSmiley() {
void Audio::toggleScope() {
_scopeEnabled = !_scopeEnabled;
if (_scopeEnabled) {
_scopeInputOffset = 0;
_scopeOutputOffset = 0;
allocateScope();
} else {
freeScope();
@ -1259,6 +1278,8 @@ void Audio::selectAudioScopeFiftyFrames() {
}
void Audio::allocateScope() {
_scopeInputOffset = 0;
_scopeOutputOffset = 0;
int num = _samplesPerScope * sizeof(int16_t);
_scopeInput = new QByteArray(num, 0);
_scopeOutputLeft = new QByteArray(num, 0);
@ -1290,12 +1311,15 @@ void Audio::freeScope() {
}
}
void Audio::addBufferToScope(
QByteArray* byteArray, unsigned int frameOffset, const int16_t* source, unsigned int sourceChannel, unsigned int sourceNumberOfChannels) {
int Audio::addBufferToScope(QByteArray* byteArray, int frameOffset, const int16_t* source, int sourceSamplesPerChannel,
unsigned int sourceChannel, unsigned int sourceNumberOfChannels, float fade) {
// Constant multiplier to map sample value to vertical size of scope
float multiplier = (float)MULTIPLIER_SCOPE_HEIGHT / logf(2.0f);
// Used to scale each sample. (logf(sample) + fadeOffset) is same as logf(sample * fade).
float fadeOffset = logf(fade);
// Temporary variable receives sample value
float sample;
@ -1306,17 +1330,41 @@ void Audio::addBufferToScope(
// Short int pointer to mapped samples in byte array
int16_t* destination = (int16_t*) byteArray->data();
for (unsigned int i = 0; i < NETWORK_SAMPLES_PER_FRAME; i++) {
for (int i = 0; i < sourceSamplesPerChannel; i++) {
sample = (float)source[i * sourceNumberOfChannels + sourceChannel];
if (sample > 0) {
value = (int16_t)(multiplier * logf(sample));
} else if (sample < 0) {
value = (int16_t)(-multiplier * logf(-sample));
if (sample > 1) {
value = (int16_t)(multiplier * (logf(sample) + fadeOffset));
} else if (sample < -1) {
value = (int16_t)(-multiplier * (logf(-sample) + fadeOffset));
} else {
value = 0;
}
destination[i + frameOffset] = value;
destination[frameOffset] = value;
frameOffset = (frameOffset == _samplesPerScope - 1) ? 0 : frameOffset + 1;
}
return frameOffset;
}
int Audio::addSilenceToScope(QByteArray* byteArray, int frameOffset, int silentSamples) {
QMutexLocker lock(&_guard);
// Short int pointer to mapped samples in byte array
int16_t* destination = (int16_t*)byteArray->data();
if (silentSamples >= _samplesPerScope) {
memset(destination, 0, byteArray->size());
return frameOffset;
}
int samplesToBufferEnd = _samplesPerScope - frameOffset;
if (silentSamples > samplesToBufferEnd) {
memset(destination + frameOffset, 0, samplesToBufferEnd * sizeof(int16_t));
memset(destination, 0, silentSamples - samplesToBufferEnd * sizeof(int16_t));
} else {
memset(destination + frameOffset, 0, silentSamples * sizeof(int16_t));
}
return (frameOffset + silentSamples) % _samplesPerScope;
}
void Audio::renderStats(const float* color, int width, int height) {
@ -1517,17 +1565,17 @@ void Audio::renderScope(int width, int height) {
return;
static const float backgroundColor[4] = { 0.4f, 0.4f, 0.4f, 0.6f };
static const float gridColor[4] = { 0.3f, 0.3f, 0.3f, 0.6f };
static const float gridColor[4] = { 0.7f, 0.7f, 0.7f, 1.0f };
static const float inputColor[4] = { 0.3f, 1.0f, 0.3f, 1.0f };
static const float outputLeftColor[4] = { 1.0f, 0.3f, 0.3f, 1.0f };
static const float outputRightColor[4] = { 0.3f, 0.3f, 1.0f, 1.0f };
static const int gridRows = 2;
int gridCols = _framesPerScope;
int x = (width - SCOPE_WIDTH) / 2;
int y = (height - SCOPE_HEIGHT) / 2;
int w = SCOPE_WIDTH;
int h = SCOPE_HEIGHT;
int x = (width - (int)SCOPE_WIDTH) / 2;
int y = (height - (int)SCOPE_HEIGHT) / 2;
int w = (int)SCOPE_WIDTH;
int h = (int)SCOPE_HEIGHT;
renderBackground(backgroundColor, x, y, w, h);
renderGrid(gridColor, x, y, w, h, gridRows, gridCols);
@ -1717,7 +1765,7 @@ bool Audio::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo)
// setup our general output device for audio-mixer audio
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
_audioOutput->setBufferSize(AUDIO_OUTPUT_BUFFER_SIZE_FRAMES * _outputFrameSize * sizeof(int16_t));
qDebug() << "Ring Buffer capacity in frames: " << _audioOutput->bufferSize() / sizeof(int16_t) / (float)_outputFrameSize;
qDebug() << "Output Buffer capacity in frames: " << _audioOutput->bufferSize() / sizeof(int16_t) / (float)_outputFrameSize;
_audioOutputIODevice.start();
_audioOutput->start(&_audioOutputIODevice);
@ -1792,13 +1840,11 @@ float Audio::getInputRingBufferMsecsAvailable() const {
}
qint64 Audio::AudioOutputIODevice::readData(char * data, qint64 maxSize) {
MixedProcessedAudioStream& receivedAUdioStream = _parent._receivedAudioStream;
int samplesRequested = maxSize / sizeof(int16_t);
int samplesPopped;
int bytesWritten;
if ((samplesPopped = receivedAUdioStream.popSamples(samplesRequested, false)) > 0) {
AudioRingBuffer::ConstIterator lastPopOutput = receivedAUdioStream.getLastPopOutput();
if ((samplesPopped = _receivedAudioStream.popSamples(samplesRequested, false)) > 0) {
AudioRingBuffer::ConstIterator lastPopOutput = _receivedAudioStream.getLastPopOutput();
lastPopOutput.readSamples((int16_t*)data, samplesPopped);
bytesWritten = samplesPopped * sizeof(int16_t);
} else {

View file

@ -51,14 +51,14 @@ public:
class AudioOutputIODevice : public QIODevice {
public:
AudioOutputIODevice(Audio& parent) : _parent(parent) {};
AudioOutputIODevice(MixedProcessedAudioStream& receivedAudioStream) : _receivedAudioStream(receivedAudioStream) {};
void start() { open(QIODevice::ReadOnly); }
void stop() { close(); }
qint64 readData(char * data, qint64 maxSize);
qint64 writeData(const char * data, qint64 maxSize) { return 0; }
private:
Audio& _parent;
MixedProcessedAudioStream& _receivedAudioStream;
};
@ -74,10 +74,7 @@ public:
virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen);
virtual void startDrumSound(float volume, float frequency, float duration, float decay);
void setDynamicJitterBuffers(bool dynamicJitterBuffers) { _receivedAudioStream.setDynamicJitterBuffers(dynamicJitterBuffers); }
void setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames) { _receivedAudioStream.setStaticDesiredJitterBufferFrames(staticDesiredJitterBufferFrames); }
void setMaxFramesOverDesired(int maxFramesOverDesired) { _receivedAudioStream.setMaxFramesOverDesired(maxFramesOverDesired); }
void setReceivedAudioStreamSettings(const InboundAudioStream::Settings& settings) { _receivedAudioStream.setSettings(settings); }
int getDesiredJitterBufferFrames() const { return _receivedAudioStream.getDesiredJitterBufferFrames(); }
@ -114,7 +111,6 @@ public slots:
void addReceivedAudioToStream(const QByteArray& audioByteArray);
void parseAudioStreamStatsPacket(const QByteArray& packet);
void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples);
void processReceivedAudioStreamSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer);
void handleAudioInput();
void reset();
void resetStats();
@ -131,6 +127,10 @@ public slots:
void selectAudioScopeFiveFrames();
void selectAudioScopeTwentyFrames();
void selectAudioScopeFiftyFrames();
void addStereoSilenceToScope(int silentSamplesPerChannel);
void addLastFrameRepeatedWithFadeToScope(int samplesPerChannel);
void addStereoSamplesToScope(const QByteArray& samples);
void processReceivedSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer);
void toggleAudioFilter();
void selectAudioFilterFlat();
void selectAudioFilterTrebleCut();
@ -257,8 +257,9 @@ private:
void reallocateScope(int frames);
// Audio scope methods for data acquisition
void addBufferToScope(
QByteArray* byteArray, unsigned int frameOffset, const int16_t* source, unsigned int sourceChannel, unsigned int sourceNumberOfChannels);
int addBufferToScope(QByteArray* byteArray, int frameOffset, const int16_t* source, int sourceSamples,
unsigned int sourceChannel, unsigned int sourceNumberOfChannels, float fade = 1.0f);
int addSilenceToScope(QByteArray* byteArray, int frameOffset, int silentSamples);
// Audio scope methods for rendering
void renderBackground(const float* color, int x, int y, int width, int height);
@ -290,6 +291,7 @@ private:
QByteArray* _scopeInput;
QByteArray* _scopeOutputLeft;
QByteArray* _scopeOutputRight;
QByteArray _scopeLastFrame;
#ifdef _WIN32
static const unsigned int STATS_WIDTH = 1500;
#else

View file

@ -48,6 +48,7 @@ void DatagramProcessor::processDatagrams() {
// only process this packet if we have a match on the packet version
switch (packetTypeForPacket(incomingPacket)) {
case PacketTypeMixedAudio:
case PacketTypeSilentAudioFrame:
QMetaObject::invokeMethod(&application->_audio, "addReceivedAudioToStream", Qt::QueuedConnection,
Q_ARG(QByteArray, incomingPacket));
break;

View file

@ -82,8 +82,7 @@ const int CONSOLE_HEIGHT = 200;
Menu::Menu() :
_actionHash(),
_audioJitterBufferFrames(0),
_maxFramesOverDesired(0),
_receivedAudioStreamSettings(),
_bandwidthDialog(NULL),
_fieldOfView(DEFAULT_FIELD_OF_VIEW_DEGREES),
_realWorldFieldOfView(DEFAULT_REAL_WORLD_FIELD_OF_VIEW_DEGREES),
@ -680,8 +679,15 @@ void Menu::loadSettings(QSettings* settings) {
lockedSettings = true;
}
_audioJitterBufferFrames = loadSetting(settings, "audioJitterBufferFrames", 0);
_maxFramesOverDesired = loadSetting(settings, "maxFramesOverDesired", DEFAULT_MAX_FRAMES_OVER_DESIRED);
_receivedAudioStreamSettings._dynamicJitterBuffers = settings->value("dynamicJitterBuffers", DEFAULT_DYNAMIC_JITTER_BUFFERS).toBool();
_receivedAudioStreamSettings._maxFramesOverDesired = settings->value("maxFramesOverDesired", DEFAULT_MAX_FRAMES_OVER_DESIRED).toInt();
_receivedAudioStreamSettings._staticDesiredJitterBufferFrames = settings->value("staticDesiredJitterBufferFrames", DEFAULT_STATIC_DESIRED_JITTER_BUFFER_FRAMES).toInt();
_receivedAudioStreamSettings._useStDevForJitterCalc = settings->value("useStDevForJitterCalc", DEFAULT_USE_STDEV_FOR_JITTER_CALC).toBool();
_receivedAudioStreamSettings._windowStarveThreshold = settings->value("windowStarveThreshold", DEFAULT_WINDOW_STARVE_THRESHOLD).toInt();
_receivedAudioStreamSettings._windowSecondsForDesiredCalcOnTooManyStarves = settings->value("windowSecondsForDesiredCalcOnTooManyStarves", DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES).toInt();
_receivedAudioStreamSettings._windowSecondsForDesiredReduction = settings->value("windowSecondsForDesiredReduction", DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION).toInt();
_receivedAudioStreamSettings._repetitionWithFade = settings->value("repetitionWithFade", DEFAULT_REPETITION_WITH_FADE).toBool();
_fieldOfView = loadSetting(settings, "fieldOfView", DEFAULT_FIELD_OF_VIEW_DEGREES);
_realWorldFieldOfView = loadSetting(settings, "realWorldFieldOfView", DEFAULT_REAL_WORLD_FIELD_OF_VIEW_DEGREES);
_faceshiftEyeDeflection = loadSetting(settings, "faceshiftEyeDeflection", DEFAULT_FACESHIFT_EYE_DEFLECTION);
@ -735,8 +741,15 @@ void Menu::saveSettings(QSettings* settings) {
lockedSettings = true;
}
settings->setValue("audioJitterBufferFrames", _audioJitterBufferFrames);
settings->setValue("maxFramesOverDesired", _maxFramesOverDesired);
settings->setValue("dynamicJitterBuffers", _receivedAudioStreamSettings._dynamicJitterBuffers);
settings->setValue("maxFramesOverDesired", _receivedAudioStreamSettings._maxFramesOverDesired);
settings->setValue("staticDesiredJitterBufferFrames", _receivedAudioStreamSettings._staticDesiredJitterBufferFrames);
settings->setValue("useStDevForJitterCalc", _receivedAudioStreamSettings._useStDevForJitterCalc);
settings->setValue("windowStarveThreshold", _receivedAudioStreamSettings._windowStarveThreshold);
settings->setValue("windowSecondsForDesiredCalcOnTooManyStarves", _receivedAudioStreamSettings._windowSecondsForDesiredCalcOnTooManyStarves);
settings->setValue("windowSecondsForDesiredReduction", _receivedAudioStreamSettings._windowSecondsForDesiredReduction);
settings->setValue("repetitionWithFade", _receivedAudioStreamSettings._repetitionWithFade);
settings->setValue("fieldOfView", _fieldOfView);
settings->setValue("faceshiftEyeDeflection", _faceshiftEyeDeflection);
settings->setValue("maxVoxels", _maxVoxels);

View file

@ -89,10 +89,8 @@ public:
void triggerOption(const QString& menuOption);
QAction* getActionForOption(const QString& menuOption);
float getAudioJitterBufferFrames() const { return _audioJitterBufferFrames; }
void setAudioJitterBufferFrames(float audioJitterBufferSamples) { _audioJitterBufferFrames = audioJitterBufferSamples; }
int getMaxFramesOverDesired() const { return _maxFramesOverDesired; }
void setMaxFramesOverDesired(int maxFramesOverDesired) { _maxFramesOverDesired = maxFramesOverDesired; }
const InboundAudioStream::Settings& getReceivedAudioStreamSettings() const { return _receivedAudioStreamSettings; }
void setReceivedAudioStreamSettings(const InboundAudioStream::Settings& receivedAudioStreamSettings) { _receivedAudioStreamSettings = receivedAudioStreamSettings; }
float getFieldOfView() const { return _fieldOfView; }
void setFieldOfView(float fieldOfView) { _fieldOfView = fieldOfView; }
float getRealWorldFieldOfView() const { return _realWorldFieldOfView; }
@ -265,8 +263,7 @@ private:
QHash<QString, QAction*> _actionHash;
int _audioJitterBufferFrames; /// number of extra samples to wait before starting audio playback
int _maxFramesOverDesired;
InboundAudioStream::Settings _receivedAudioStreamSettings;
BandwidthDialog* _bandwidthDialog;
float _fieldOfView; /// in Degrees, doesn't apply to HMD like Oculus
float _realWorldFieldOfView; // The actual FOV set by the user's monitor size and view distance

View file

@ -149,9 +149,16 @@ void PreferencesDialog::loadPreferences() {
ui.faceshiftEyeDeflectionSider->setValue(menuInstance->getFaceshiftEyeDeflection() *
ui.faceshiftEyeDeflectionSider->maximum());
ui.audioJitterSpin->setValue(menuInstance->getAudioJitterBufferFrames());
const InboundAudioStream::Settings& streamSettings = menuInstance->getReceivedAudioStreamSettings();
ui.maxFramesOverDesiredSpin->setValue(menuInstance->getMaxFramesOverDesired());
ui.dynamicJitterBuffersCheckBox->setChecked(streamSettings._dynamicJitterBuffers);
ui.staticDesiredJitterBufferFramesSpin->setValue(streamSettings._staticDesiredJitterBufferFrames);
ui.maxFramesOverDesiredSpin->setValue(streamSettings._maxFramesOverDesired);
ui.useStdevForJitterCalcCheckBox->setChecked(streamSettings._useStDevForJitterCalc);
ui.windowStarveThresholdSpin->setValue(streamSettings._windowStarveThreshold);
ui.windowSecondsForDesiredCalcOnTooManyStarvesSpin->setValue(streamSettings._windowSecondsForDesiredCalcOnTooManyStarves);
ui.windowSecondsForDesiredReductionSpin->setValue(streamSettings._windowSecondsForDesiredReduction);
ui.repetitionWithFadeCheckBox->setChecked(streamSettings._repetitionWithFade);
ui.realWorldFieldOfViewSpin->setValue(menuInstance->getRealWorldFieldOfView());
@ -241,16 +248,18 @@ void PreferencesDialog::savePreferences() {
Menu::getInstance()->setInvertSixenseButtons(ui.invertSixenseButtonsCheckBox->isChecked());
Menu::getInstance()->setAudioJitterBufferFrames(ui.audioJitterSpin->value());
if (Menu::getInstance()->getAudioJitterBufferFrames() != 0) {
Application::getInstance()->getAudio()->setDynamicJitterBuffers(false);
Application::getInstance()->getAudio()->setStaticDesiredJitterBufferFrames(Menu::getInstance()->getAudioJitterBufferFrames());
} else {
Application::getInstance()->getAudio()->setDynamicJitterBuffers(true);
}
InboundAudioStream::Settings streamSettings;
streamSettings._dynamicJitterBuffers = ui.dynamicJitterBuffersCheckBox->isChecked();
streamSettings._staticDesiredJitterBufferFrames = ui.staticDesiredJitterBufferFramesSpin->value();
streamSettings._maxFramesOverDesired = ui.maxFramesOverDesiredSpin->value();
streamSettings._useStDevForJitterCalc = ui.useStdevForJitterCalcCheckBox->isChecked();
streamSettings._windowStarveThreshold = ui.windowStarveThresholdSpin->value();
streamSettings._windowSecondsForDesiredCalcOnTooManyStarves = ui.windowSecondsForDesiredCalcOnTooManyStarvesSpin->value();
streamSettings._windowSecondsForDesiredReduction = ui.windowSecondsForDesiredReductionSpin->value();
streamSettings._repetitionWithFade = ui.repetitionWithFadeCheckBox->isChecked();
Menu::getInstance()->setMaxFramesOverDesired(ui.maxFramesOverDesiredSpin->value());
Application::getInstance()->getAudio()->setMaxFramesOverDesired(Menu::getInstance()->getMaxFramesOverDesired());
Menu::getInstance()->setReceivedAudioStreamSettings(streamSettings);
Application::getInstance()->getAudio()->setReceivedAudioStreamSettings(streamSettings);
Application::getInstance()->resizeGL(Application::getInstance()->getGLWidget()->width(),
Application::getInstance()->getGLWidget()->height());

View file

@ -1464,6 +1464,97 @@ padding: 10px;margin-top:10px</string>
</item>
</layout>
</item>
<!-- dynamic jitter buffers ____________________________________________________________________________ -->
<item>
<layout class="QHBoxLayout" name="horizontalLayout_23">
<property name="spacing">
<number>0</number>
</property>
<property name="topMargin">
<number>10</number>
</property>
<property name="rightMargin">
<number>0</number>
</property>
<property name="bottomMargin">
<number>10</number>
</property>
<item>
<widget class="QLabel" name="label_20">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="styleSheet">
<string notr="true">color: rgb(51, 51, 51)</string>
</property>
<property name="text">
<string>Enable Dynamic Jitter Buffers</string>
</property>
<property name="indent">
<number>15</number>
</property>
<property name="buddy">
<cstring>dynamicJitterBuffersCheckBox</cstring>
</property>
</widget>
</item>
<item>
<spacer name="horizontalSpacer_17">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
<property name="sizeHint" stdset="0">
<size>
<width>40</width>
<height>20</height>
</size>
</property>
</spacer>
</item>
<item>
<widget class="QCheckBox" name="dynamicJitterBuffersCheckBox">
<property name="sizePolicy">
<sizepolicy hsizetype="Minimum" vsizetype="Fixed">
<horstretch>0</horstretch>
<verstretch>0</verstretch>
</sizepolicy>
</property>
<property name="minimumSize">
<size>
<width>32</width>
<height>0</height>
</size>
</property>
<property name="baseSize">
<size>
<width>0</width>
<height>0</height>
</size>
</property>
<property name="text">
<string/>
</property>
<property name="iconSize">
<size>
<width>32</width>
<height>32</height>
</size>
</property>
</widget>
</item>
</layout>
</item>
<!-- static desired jitter frames____________________________________________________________________________ -->
<item>
<layout class="QHBoxLayout" name="horizontalLayout_8">
<property name="spacing">
@ -1489,13 +1580,13 @@ padding: 10px;margin-top:10px</string>
<string notr="true">color: rgb(51, 51, 51)</string>
</property>
<property name="text">
<string>Audio Jitter Buffer Frames (0 for automatic)</string>
<string>Static Jitter Buffer Frames</string>
</property>
<property name="indent">
<number>15</number>
</property>
<property name="buddy">
<cstring>audioJitterSpin</cstring>
<cstring>staticDesiredJitterBufferFramesSpin</cstring>
</property>
</widget>
</item>
@ -1518,7 +1609,7 @@ padding: 10px;margin-top:10px</string>
</spacer>
</item>
<item>
<widget class="QSpinBox" name="audioJitterSpin">
<widget class="QSpinBox" name="staticDesiredJitterBufferFramesSpin">
<property name="sizePolicy">
<sizepolicy hsizetype="Fixed" vsizetype="Fixed">
<horstretch>0</horstretch>
@ -1555,6 +1646,7 @@ padding: 10px;margin-top:10px</string>
</item>
</layout>
</item>
<!-- max frames over desired ____________________________________________________________________________ -->
<item>
<layout class="QHBoxLayout" name="horizontalLayout_13">
<property name="spacing">
@ -1591,7 +1683,7 @@ padding: 10px;margin-top:10px</string>
</widget>
</item>
<item>
<spacer name="horizontalSpacer_12">
<spacer name="horizontalSpacer_20">
<property name="font">
<font>
<family>Arial</family>
@ -1646,7 +1738,467 @@ padding: 10px;margin-top:10px</string>
</item>
</layout>
</item>
<!-- use stdev for jitter calc ____________________________________________________________________________ -->
<item>
<layout class="QHBoxLayout" name="horizontalLayout_19">
<property name="spacing">
<number>0</number>
</property>
<property name="topMargin">
<number>10</number>
</property>
<property name="rightMargin">
<number>0</number>
</property>
<property name="bottomMargin">
<number>10</number>
</property>
<item>
<widget class="QLabel" name="label_16">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="styleSheet">
<string notr="true">color: rgb(51, 51, 51)</string>
</property>
<property name="text">
<string>Use Stdev for Dynamic Jitter Calc</string>
</property>
<property name="indent">
<number>15</number>
</property>
<property name="buddy">
<cstring>useStdevForJitterCalcCheckBox</cstring>
</property>
</widget>
</item>
<item>
<spacer name="horizontalSpacer_21">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
<property name="sizeHint" stdset="0">
<size>
<width>40</width>
<height>20</height>
</size>
</property>
</spacer>
</item>
<item>
<widget class="QCheckBox" name="useStdevForJitterCalcCheckBox">
<property name="sizePolicy">
<sizepolicy hsizetype="Minimum" vsizetype="Fixed">
<horstretch>0</horstretch>
<verstretch>0</verstretch>
</sizepolicy>
</property>
<property name="minimumSize">
<size>
<width>32</width>
<height>0</height>
</size>
</property>
<property name="baseSize">
<size>
<width>0</width>
<height>0</height>
</size>
</property>
<property name="text">
<string/>
</property>
<property name="iconSize">
<size>
<width>32</width>
<height>32</height>
</size>
</property>
</widget>
</item>
</layout>
</item>
<!-- window starve threshold ____________________________________________________________________________ -->
<item>
<layout class="QHBoxLayout" name="horizontalLayout_20">
<property name="spacing">
<number>0</number>
</property>
<property name="topMargin">
<number>10</number>
</property>
<property name="rightMargin">
<number>0</number>
</property>
<property name="bottomMargin">
<number>10</number>
</property>
<item alignment="Qt::AlignLeft">
<widget class="QLabel" name="label_17">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="styleSheet">
<string notr="true">color: rgb(51, 51, 51)</string>
</property>
<property name="text">
<string>Window A Starve Threshold</string>
</property>
<property name="indent">
<number>15</number>
</property>
<property name="buddy">
<cstring>windowStarveThresholdSpin</cstring>
</property>
</widget>
</item>
<item>
<spacer name="horizontalSpacer_22">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
<property name="sizeHint" stdset="0">
<size>
<width>40</width>
<height>20</height>
</size>
</property>
</spacer>
</item>
<item>
<widget class="QSpinBox" name="windowStarveThresholdSpin">
<property name="sizePolicy">
<sizepolicy hsizetype="Fixed" vsizetype="Fixed">
<horstretch>0</horstretch>
<verstretch>0</verstretch>
</sizepolicy>
</property>
<property name="minimumSize">
<size>
<width>95</width>
<height>36</height>
</size>
</property>
<property name="maximumSize">
<size>
<width>70</width>
<height>16777215</height>
</size>
</property>
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="minimum">
<number>0</number>
</property>
<property name="maximum">
<number>10000</number>
</property>
<property name="value">
<number>1</number>
</property>
</widget>
</item>
</layout>
</item>
<!-- window A seconds ____________________________________________________________________________ -->
<item>
<layout class="QHBoxLayout" name="horizontalLayout_21">
<property name="spacing">
<number>0</number>
</property>
<property name="topMargin">
<number>10</number>
</property>
<property name="rightMargin">
<number>0</number>
</property>
<property name="bottomMargin">
<number>10</number>
</property>
<item alignment="Qt::AlignLeft">
<widget class="QLabel" name="label_18">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="styleSheet">
<string notr="true">color: rgb(51, 51, 51)</string>
</property>
<property name="text">
<string>Window A (raise desired on N starves) Seconds</string>
</property>
<property name="indent">
<number>15</number>
</property>
<property name="buddy">
<cstring>windowSecondsForDesiredCalcOnTooManyStarvesSpin</cstring>
</property>
</widget>
</item>
<item>
<spacer name="horizontalSpacer_23">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
<property name="sizeHint" stdset="0">
<size>
<width>40</width>
<height>20</height>
</size>
</property>
</spacer>
</item>
<item>
<widget class="QSpinBox" name="windowSecondsForDesiredCalcOnTooManyStarvesSpin">
<property name="sizePolicy">
<sizepolicy hsizetype="Fixed" vsizetype="Fixed">
<horstretch>0</horstretch>
<verstretch>0</verstretch>
</sizepolicy>
</property>
<property name="minimumSize">
<size>
<width>95</width>
<height>36</height>
</size>
</property>
<property name="maximumSize">
<size>
<width>70</width>
<height>16777215</height>
</size>
</property>
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="minimum">
<number>0</number>
</property>
<property name="maximum">
<number>10000</number>
</property>
<property name="value">
<number>1</number>
</property>
</widget>
</item>
</layout>
</item>
<!-- window B seconds ____________________________________________________________________________ -->
<item>
<layout class="QHBoxLayout" name="horizontalLayout_22">
<property name="spacing">
<number>0</number>
</property>
<property name="topMargin">
<number>10</number>
</property>
<property name="rightMargin">
<number>0</number>
</property>
<property name="bottomMargin">
<number>10</number>
</property>
<item alignment="Qt::AlignLeft">
<widget class="QLabel" name="label_19">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="styleSheet">
<string notr="true">color: rgb(51, 51, 51)</string>
</property>
<property name="text">
<string>Window B (desired ceiling) Seconds</string>
</property>
<property name="indent">
<number>15</number>
</property>
<property name="buddy">
<cstring>windowSecondsForDesiredReductionSpin</cstring>
</property>
</widget>
</item>
<item>
<spacer name="horizontalSpacer_24">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
<property name="sizeHint" stdset="0">
<size>
<width>40</width>
<height>20</height>
</size>
</property>
</spacer>
</item>
<item>
<widget class="QSpinBox" name="windowSecondsForDesiredReductionSpin">
<property name="sizePolicy">
<sizepolicy hsizetype="Fixed" vsizetype="Fixed">
<horstretch>0</horstretch>
<verstretch>0</verstretch>
</sizepolicy>
</property>
<property name="minimumSize">
<size>
<width>95</width>
<height>36</height>
</size>
</property>
<property name="maximumSize">
<size>
<width>70</width>
<height>16777215</height>
</size>
</property>
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="minimum">
<number>0</number>
</property>
<property name="maximum">
<number>10000</number>
</property>
<property name="value">
<number>1</number>
</property>
</widget>
</item>
</layout>
</item>
<!-- repetition with fade ____________________________________________________________________________ -->
<item>
<layout class="QHBoxLayout" name="horizontalLayout_24">
<property name="spacing">
<number>0</number>
</property>
<property name="topMargin">
<number>10</number>
</property>
<property name="rightMargin">
<number>0</number>
</property>
<property name="bottomMargin">
<number>10</number>
</property>
<item>
<widget class="QLabel" name="label_21">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="styleSheet">
<string notr="true">color: rgb(51, 51, 51)</string>
</property>
<property name="text">
<string>Repetition with Fade</string>
</property>
<property name="indent">
<number>15</number>
</property>
<property name="buddy">
<cstring>repetitionWithFadeCheckBox</cstring>
</property>
</widget>
</item>
<item>
<spacer name="horizontalSpacer_25">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
<property name="sizeHint" stdset="0">
<size>
<width>40</width>
<height>20</height>
</size>
</property>
</spacer>
</item>
<item>
<widget class="QCheckBox" name="repetitionWithFadeCheckBox">
<property name="sizePolicy">
<sizepolicy hsizetype="Minimum" vsizetype="Fixed">
<horstretch>0</horstretch>
<verstretch>0</verstretch>
</sizepolicy>
</property>
<property name="minimumSize">
<size>
<width>32</width>
<height>0</height>
</size>
</property>
<property name="baseSize">
<size>
<width>0</width>
<height>0</height>
</size>
</property>
<property name="text">
<string/>
</property>
<property name="iconSize">
<size>
<width>32</width>
<height>32</height>
</size>
</property>
</widget>
</item>
</layout>
</item>
<item>
<layout class="QHBoxLayout" name="horizontalLayout_6">

View file

@ -20,18 +20,16 @@
#include "AudioRingBuffer.h"
AudioRingBuffer::AudioRingBuffer(int numFrameSamples, bool randomAccessMode, int numFramesCapacity) :
_frameCapacity(numFramesCapacity),
_sampleCapacity(numFrameSamples * numFramesCapacity),
_isFull(false),
_numFrameSamples(numFrameSamples),
_randomAccessMode(randomAccessMode),
_overflowCount(0)
_frameCapacity(numFramesCapacity),
_sampleCapacity(numFrameSamples * numFramesCapacity),
_bufferLength(numFrameSamples * (numFramesCapacity + 1)),
_numFrameSamples(numFrameSamples),
_randomAccessMode(randomAccessMode),
_overflowCount(0)
{
if (numFrameSamples) {
_buffer = new int16_t[_sampleCapacity];
if (_randomAccessMode) {
memset(_buffer, 0, _sampleCapacity * sizeof(int16_t));
}
_buffer = new int16_t[_bufferLength];
memset(_buffer, 0, _bufferLength * sizeof(int16_t));
_nextOutput = _buffer;
_endOfLastWrite = _buffer;
} else {
@ -53,28 +51,29 @@ void AudioRingBuffer::reset() {
void AudioRingBuffer::resizeForFrameSize(int numFrameSamples) {
delete[] _buffer;
_sampleCapacity = numFrameSamples * _frameCapacity;
_bufferLength = numFrameSamples * (_frameCapacity + 1);
_numFrameSamples = numFrameSamples;
_buffer = new int16_t[_sampleCapacity];
_buffer = new int16_t[_bufferLength];
memset(_buffer, 0, _bufferLength * sizeof(int16_t));
if (_randomAccessMode) {
memset(_buffer, 0, _sampleCapacity * sizeof(int16_t));
memset(_buffer, 0, _bufferLength * sizeof(int16_t));
}
reset();
}
void AudioRingBuffer::clear() {
_isFull = false;
_endOfLastWrite = _buffer;
_nextOutput = _buffer;
}
int AudioRingBuffer::readSamples(int16_t* destination, int maxSamples) {
return readData((char*) destination, maxSamples * sizeof(int16_t));
return readData((char*)destination, maxSamples * sizeof(int16_t)) / sizeof(int16_t);
}
int AudioRingBuffer::readData(char *data, int maxSize) {
// only copy up to the number of samples we have available
int numReadSamples = std::min((int) (maxSize / sizeof(int16_t)), samplesAvailable());
int numReadSamples = std::min((int)(maxSize / sizeof(int16_t)), samplesAvailable());
// If we're in random access mode, then we consider our number of available read samples slightly
// differently. Namely, if anything has been written, we say we have as many samples as they ask for
@ -83,16 +82,16 @@ int AudioRingBuffer::readData(char *data, int maxSize) {
numReadSamples = _endOfLastWrite ? (maxSize / sizeof(int16_t)) : 0;
}
if (_nextOutput + numReadSamples > _buffer + _sampleCapacity) {
if (_nextOutput + numReadSamples > _buffer + _bufferLength) {
// we're going to need to do two reads to get this data, it wraps around the edge
// read to the end of the buffer
int numSamplesToEnd = (_buffer + _sampleCapacity) - _nextOutput;
int numSamplesToEnd = (_buffer + _bufferLength) - _nextOutput;
memcpy(data, _nextOutput, numSamplesToEnd * sizeof(int16_t));
if (_randomAccessMode) {
memset(_nextOutput, 0, numSamplesToEnd * sizeof(int16_t)); // clear it
}
// read the rest from the beginning of the buffer
memcpy(data + (numSamplesToEnd * sizeof(int16_t)), _buffer, (numReadSamples - numSamplesToEnd) * sizeof(int16_t));
if (_randomAccessMode) {
@ -108,22 +107,19 @@ int AudioRingBuffer::readData(char *data, int maxSize) {
// push the position of _nextOutput by the number of samples read
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, numReadSamples);
if (numReadSamples > 0) {
_isFull = false;
}
return numReadSamples * sizeof(int16_t);
}
int AudioRingBuffer::writeSamples(const int16_t* source, int maxSamples) {
return writeData((const char*) source, maxSamples * sizeof(int16_t));
int AudioRingBuffer::writeSamples(const int16_t* source, int maxSamples) {
return writeData((const char*)source, maxSamples * sizeof(int16_t)) / sizeof(int16_t);
}
int AudioRingBuffer::writeData(const char* data, int maxSize) {
// make sure we have enough bytes left for this to be the right amount of audio
// otherwise we should not copy that data, and leave the buffer pointers where they are
int samplesToCopy = std::min((int)(maxSize / sizeof(int16_t)), _sampleCapacity);
int samplesRoomFor = _sampleCapacity - samplesAvailable();
if (samplesToCopy > samplesRoomFor) {
// there's not enough room for this write. erase old data to make room for this new data
@ -132,19 +128,16 @@ int AudioRingBuffer::writeData(const char* data, int maxSize) {
_overflowCount++;
qDebug() << "Overflowed ring buffer! Overwriting old data";
}
if (_endOfLastWrite + samplesToCopy <= _buffer + _sampleCapacity) {
if (_endOfLastWrite + samplesToCopy <= _buffer + _bufferLength) {
memcpy(_endOfLastWrite, data, samplesToCopy * sizeof(int16_t));
} else {
int numSamplesToEnd = (_buffer + _sampleCapacity) - _endOfLastWrite;
int numSamplesToEnd = (_buffer + _bufferLength) - _endOfLastWrite;
memcpy(_endOfLastWrite, data, numSamplesToEnd * sizeof(int16_t));
memcpy(_buffer, data + (numSamplesToEnd * sizeof(int16_t)), (samplesToCopy - numSamplesToEnd) * sizeof(int16_t));
}
_endOfLastWrite = shiftedPositionAccomodatingWrap(_endOfLastWrite, samplesToCopy);
if (samplesToCopy > 0 && _endOfLastWrite == _nextOutput) {
_isFull = true;
}
return samplesToCopy * sizeof(int16_t);
}
@ -158,61 +151,52 @@ const int16_t& AudioRingBuffer::operator[] (const int index) const {
}
void AudioRingBuffer::shiftReadPosition(unsigned int numSamples) {
if (numSamples > 0) {
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, numSamples);
_isFull = false;
}
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, numSamples);
}
int AudioRingBuffer::samplesAvailable() const {
if (!_endOfLastWrite) {
return 0;
}
if (_isFull) {
return _sampleCapacity;
}
int sampleDifference = _endOfLastWrite - _nextOutput;
if (sampleDifference < 0) {
sampleDifference += _sampleCapacity;
sampleDifference += _bufferLength;
}
return sampleDifference;
}
int AudioRingBuffer::addSilentFrame(int numSilentSamples) {
int AudioRingBuffer::addSilentSamples(int silentSamples) {
int samplesRoomFor = _sampleCapacity - samplesAvailable();
if (numSilentSamples > samplesRoomFor) {
if (silentSamples > samplesRoomFor) {
// there's not enough room for this write. write as many silent samples as we have room for
numSilentSamples = samplesRoomFor;
silentSamples = samplesRoomFor;
qDebug() << "Dropping some silent samples to prevent ring buffer overflow";
}
// memset zeroes into the buffer, accomodate a wrap around the end
// push the _endOfLastWrite to the correct spot
if (_endOfLastWrite + numSilentSamples <= _buffer + _sampleCapacity) {
memset(_endOfLastWrite, 0, numSilentSamples * sizeof(int16_t));
if (_endOfLastWrite + silentSamples <= _buffer + _bufferLength) {
memset(_endOfLastWrite, 0, silentSamples * sizeof(int16_t));
} else {
int numSamplesToEnd = (_buffer + _sampleCapacity) - _endOfLastWrite;
int numSamplesToEnd = (_buffer + _bufferLength) - _endOfLastWrite;
memset(_endOfLastWrite, 0, numSamplesToEnd * sizeof(int16_t));
memset(_buffer, 0, (numSilentSamples - numSamplesToEnd) * sizeof(int16_t));
}
_endOfLastWrite = shiftedPositionAccomodatingWrap(_endOfLastWrite, numSilentSamples);
if (numSilentSamples > 0 && _nextOutput == _endOfLastWrite) {
_isFull = true;
memset(_buffer, 0, (silentSamples - numSamplesToEnd) * sizeof(int16_t));
}
_endOfLastWrite = shiftedPositionAccomodatingWrap(_endOfLastWrite, silentSamples);
return numSilentSamples * sizeof(int16_t);
return silentSamples;
}
int16_t* AudioRingBuffer::shiftedPositionAccomodatingWrap(int16_t* position, int numSamplesShift) const {
if (numSamplesShift > 0 && position + numSamplesShift >= _buffer + _sampleCapacity) {
if (numSamplesShift > 0 && position + numSamplesShift >= _buffer + _bufferLength) {
// this shift will wrap the position around to the beginning of the ring
return position + numSamplesShift - _sampleCapacity;
return position + numSamplesShift - _bufferLength;
} else if (numSamplesShift < 0 && position + numSamplesShift < _buffer) {
// this shift will go around to the end of the ring
return position + numSamplesShift + _sampleCapacity;
return position + numSamplesShift + _bufferLength;
} else {
return position + numSamplesShift;
}
@ -221,7 +205,7 @@ int16_t* AudioRingBuffer::shiftedPositionAccomodatingWrap(int16_t* position, int
float AudioRingBuffer::getFrameLoudness(const int16_t* frameStart) const {
float loudness = 0.0f;
const int16_t* sampleAt = frameStart;
const int16_t* _bufferLastAt = _buffer + _sampleCapacity - 1;
const int16_t* _bufferLastAt = _buffer + _bufferLength - 1;
for (int i = 0; i < _numFrameSamples; ++i) {
loudness += fabsf(*sampleAt);
@ -229,11 +213,14 @@ float AudioRingBuffer::getFrameLoudness(const int16_t* frameStart) const {
}
loudness /= _numFrameSamples;
loudness /= MAX_SAMPLE_VALUE;
return loudness;
}
float AudioRingBuffer::getFrameLoudness(ConstIterator frameStart) const {
if (frameStart.isNull()) {
return 0.0f;
}
return getFrameLoudness(&(*frameStart));
}
@ -241,3 +228,44 @@ float AudioRingBuffer::getNextOutputFrameLoudness() const {
return getFrameLoudness(_nextOutput);
}
int AudioRingBuffer::writeSamples(ConstIterator source, int maxSamples) {
int samplesToCopy = std::min(maxSamples, _sampleCapacity);
int samplesRoomFor = _sampleCapacity - samplesAvailable();
if (samplesToCopy > samplesRoomFor) {
// there's not enough room for this write. erase old data to make room for this new data
int samplesToDelete = samplesToCopy - samplesRoomFor;
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, samplesToDelete);
_overflowCount++;
qDebug() << "Overflowed ring buffer! Overwriting old data";
}
int16_t* bufferLast = _buffer + _bufferLength - 1;
for (int i = 0; i < samplesToCopy; i++) {
*_endOfLastWrite = *source;
_endOfLastWrite = (_endOfLastWrite == bufferLast) ? _buffer : _endOfLastWrite + 1;
++source;
}
return samplesToCopy;
}
int AudioRingBuffer::writeSamplesWithFade(ConstIterator source, int maxSamples, float fade) {
int samplesToCopy = std::min(maxSamples, _sampleCapacity);
int samplesRoomFor = _sampleCapacity - samplesAvailable();
if (samplesToCopy > samplesRoomFor) {
// there's not enough room for this write. erase old data to make room for this new data
int samplesToDelete = samplesToCopy - samplesRoomFor;
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, samplesToDelete);
_overflowCount++;
qDebug() << "Overflowed ring buffer! Overwriting old data";
}
int16_t* bufferLast = _buffer + _bufferLength - 1;
for (int i = 0; i < samplesToCopy; i++) {
*_endOfLastWrite = (int16_t)((float)(*source) * fade);
_endOfLastWrite = (_endOfLastWrite == bufferLast) ? _buffer : _endOfLastWrite + 1;
++source;
}
return samplesToCopy;
}

View file

@ -28,7 +28,7 @@ const int NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL = 512;
const int NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL = NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL / sizeof(int16_t);
const unsigned int BUFFER_SEND_INTERVAL_USECS = floorf((NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL
/ (float) SAMPLE_RATE) * USECS_PER_SECOND);
/ (float)SAMPLE_RATE) * USECS_PER_SECOND);
const int MAX_SAMPLE_VALUE = std::numeric_limits<int16_t>::max();
const int MIN_SAMPLE_VALUE = std::numeric_limits<int16_t>::min();
@ -42,33 +42,33 @@ public:
void reset();
void resizeForFrameSize(int numFrameSamples);
void clear();
int getSampleCapacity() const { return _sampleCapacity; }
int getFrameCapacity() const { return _frameCapacity; }
int readSamples(int16_t* destination, int maxSamples);
int writeSamples(const int16_t* source, int maxSamples);
int readData(char* data, int maxSize);
int writeData(const char* data, int maxSize);
int16_t& operator[](const int index);
const int16_t& operator[] (const int index) const;
void shiftReadPosition(unsigned int numSamples);
float getNextOutputFrameLoudness() const;
int samplesAvailable() const;
int framesAvailable() const { return samplesAvailable() / _numFrameSamples; }
int getNumFrameSamples() const { return _numFrameSamples; }
int getOverflowCount() const { return _overflowCount; } /// how many times has the ring buffer has overwritten old data
int addSilentFrame(int numSilentSamples);
int addSilentSamples(int samples);
private:
float getFrameLoudness(const int16_t* frameStart) const;
@ -77,12 +77,12 @@ protected:
// disallow copying of AudioRingBuffer objects
AudioRingBuffer(const AudioRingBuffer&);
AudioRingBuffer& operator= (const AudioRingBuffer&);
int16_t* shiftedPositionAccomodatingWrap(int16_t* position, int numSamplesShift) const;
int _frameCapacity;
int _sampleCapacity;
bool _isFull;
int _bufferLength; // actual length of _buffer: will be one frame larger than _sampleCapacity
int _numFrameSamples;
int16_t* _nextOutput;
int16_t* _endOfLastWrite;
@ -95,23 +95,25 @@ public:
class ConstIterator { //public std::iterator < std::forward_iterator_tag, int16_t > {
public:
ConstIterator()
: _capacity(0),
: _bufferLength(0),
_bufferFirst(NULL),
_bufferLast(NULL),
_at(NULL) {}
ConstIterator(int16_t* bufferFirst, int capacity, int16_t* at)
: _capacity(capacity),
: _bufferLength(capacity),
_bufferFirst(bufferFirst),
_bufferLast(bufferFirst + capacity - 1),
_at(at) {}
bool isNull() const { return _at == NULL; }
bool operator==(const ConstIterator& rhs) { return _at == rhs._at; }
bool operator!=(const ConstIterator& rhs) { return _at != rhs._at; }
const int16_t& operator*() { return *_at; }
ConstIterator& operator=(const ConstIterator& rhs) {
_capacity = rhs._capacity;
_bufferLength = rhs._bufferLength;
_bufferFirst = rhs._bufferFirst;
_bufferLast = rhs._bufferLast;
_at = rhs._at;
@ -145,40 +147,54 @@ public:
}
ConstIterator operator+(int i) {
return ConstIterator(_bufferFirst, _capacity, atShiftedBy(i));
return ConstIterator(_bufferFirst, _bufferLength, atShiftedBy(i));
}
ConstIterator operator-(int i) {
return ConstIterator(_bufferFirst, _capacity, atShiftedBy(-i));
return ConstIterator(_bufferFirst, _bufferLength, atShiftedBy(-i));
}
void readSamples(int16_t* dest, int numSamples) {
int16_t* at = _at;
for (int i = 0; i < numSamples; i++) {
*dest = *(*this);
*dest = *at;
++dest;
++(*this);
at = (at == _bufferLast) ? _bufferFirst : at + 1;
}
}
void readSamplesWithFade(int16_t* dest, int numSamples, float fade) {
int16_t* at = _at;
for (int i = 0; i < numSamples; i++) {
*dest = (float)*at * fade;
++dest;
at = (at == _bufferLast) ? _bufferFirst : at + 1;
}
}
private:
int16_t* atShiftedBy(int i) {
i = (_at - _bufferFirst + i) % _capacity;
i = (_at - _bufferFirst + i) % _bufferLength;
if (i < 0) {
i += _capacity;
i += _bufferLength;
}
return _bufferFirst + i;
}
private:
int _capacity;
int _bufferLength;
int16_t* _bufferFirst;
int16_t* _bufferLast;
int16_t* _at;
};
ConstIterator nextOutput() const { return ConstIterator(_buffer, _sampleCapacity, _nextOutput); }
ConstIterator nextOutput() const { return ConstIterator(_buffer, _bufferLength, _nextOutput); }
ConstIterator lastFrameWritten() const { return ConstIterator(_buffer, _bufferLength, _endOfLastWrite) - _numFrameSamples; }
float getFrameLoudness(ConstIterator frameStart) const;
int writeSamples(ConstIterator source, int maxSamples);
int writeSamplesWithFade(ConstIterator source, int maxSamples, float fade);
};
#endif // hifi_AudioRingBuffer_h

View file

@ -14,30 +14,37 @@
#include "InboundAudioStream.h"
#include "PacketHeaders.h"
InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity,
bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc) :
const int STARVE_HISTORY_CAPACITY = 50;
InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity, const Settings& settings) :
_ringBuffer(numFrameSamples, false, numFramesCapacity),
_lastPopSucceeded(false),
_lastPopOutput(),
_dynamicJitterBuffers(dynamicJitterBuffers),
_staticDesiredJitterBufferFrames(staticDesiredJitterBufferFrames),
_useStDevForJitterCalc(useStDevForJitterCalc),
_calculatedJitterBufferFramesUsingMaxGap(0),
_calculatedJitterBufferFramesUsingStDev(0),
_desiredJitterBufferFrames(dynamicJitterBuffers ? 1 : staticDesiredJitterBufferFrames),
_maxFramesOverDesired(maxFramesOverDesired),
_dynamicJitterBuffers(settings._dynamicJitterBuffers),
_staticDesiredJitterBufferFrames(settings._staticDesiredJitterBufferFrames),
_useStDevForJitterCalc(settings._useStDevForJitterCalc),
_desiredJitterBufferFrames(settings._dynamicJitterBuffers ? 1 : settings._staticDesiredJitterBufferFrames),
_maxFramesOverDesired(settings._maxFramesOverDesired),
_isStarved(true),
_hasStarted(false),
_consecutiveNotMixedCount(0),
_starveCount(0),
_silentFramesDropped(0),
_oldFramesDropped(0),
_incomingSequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS),
_lastFrameReceivedTime(0),
_interframeTimeGapStatsForJitterCalc(TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES, TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS),
_interframeTimeGapStatsForStatsPacket(TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES, TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS),
_incomingSequenceNumberStats(STATS_FOR_STATS_PACKET_WINDOW_SECONDS),
_lastPacketReceivedTime(0),
_timeGapStatsForDesiredCalcOnTooManyStarves(0, settings._windowSecondsForDesiredCalcOnTooManyStarves),
_calculatedJitterBufferFramesUsingMaxGap(0),
_stdevStatsForDesiredCalcOnTooManyStarves(),
_calculatedJitterBufferFramesUsingStDev(0),
_timeGapStatsForDesiredReduction(0, settings._windowSecondsForDesiredReduction),
_starveHistoryWindowSeconds(settings._windowSecondsForDesiredCalcOnTooManyStarves),
_starveHistory(STARVE_HISTORY_CAPACITY),
_starveThreshold(settings._windowStarveThreshold),
_framesAvailableStat(),
_currentJitterBufferFrames(0)
_currentJitterBufferFrames(0),
_timeGapStatsForStatsPacket(0, STATS_FOR_STATS_PACKET_WINDOW_SECONDS),
_repetitionWithFade(settings._repetitionWithFade)
{
}
@ -59,11 +66,14 @@ void InboundAudioStream::resetStats() {
_silentFramesDropped = 0;
_oldFramesDropped = 0;
_incomingSequenceNumberStats.reset();
_lastFrameReceivedTime = 0;
_interframeTimeGapStatsForJitterCalc.reset();
_interframeTimeGapStatsForStatsPacket.reset();
_lastPacketReceivedTime = 0;
_timeGapStatsForDesiredCalcOnTooManyStarves.reset();
_stdevStatsForDesiredCalcOnTooManyStarves = StDev();
_timeGapStatsForDesiredReduction.reset();
_starveHistory.clear();
_framesAvailableStat.reset();
_currentJitterBufferFrames = 0;
_timeGapStatsForStatsPacket.reset();
}
void InboundAudioStream::clearBuffer() {
@ -72,8 +82,11 @@ void InboundAudioStream::clearBuffer() {
_currentJitterBufferFrames = 0;
}
int InboundAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
void InboundAudioStream::perSecondCallbackForUpdatingStats() {
_incomingSequenceNumberStats.pushStatsToHistory();
_timeGapStatsForDesiredCalcOnTooManyStarves.currentIntervalComplete();
_timeGapStatsForDesiredReduction.currentIntervalComplete();
_timeGapStatsForStatsPacket.currentIntervalComplete();
}
int InboundAudioStream::parseData(const QByteArray& packet) {
@ -83,36 +96,51 @@ int InboundAudioStream::parseData(const QByteArray& packet) {
// parse header
int numBytesHeader = numBytesForPacketHeader(packet);
const char* sequenceAt = packet.constData() + numBytesHeader;
const char* dataAt = packet.constData() + numBytesHeader;
int readBytes = numBytesHeader;
// parse sequence number and track it
quint16 sequence = *(reinterpret_cast<const quint16*>(sequenceAt));
quint16 sequence = *(reinterpret_cast<const quint16*>(dataAt));
dataAt += sizeof(quint16);
readBytes += sizeof(quint16);
SequenceNumberStats::ArrivalInfo arrivalInfo = _incomingSequenceNumberStats.sequenceNumberReceived(sequence, senderUUID);
frameReceivedUpdateTimingStats();
packetReceivedUpdateTimingStats();
// TODO: handle generalized silent packet here?????
int networkSamples;
// parse the info after the seq number and before the audio data.(the stream properties)
int numAudioSamples;
readBytes += parseStreamProperties(packetType, packet.mid(readBytes), numAudioSamples);
if (packetType == PacketTypeSilentAudioFrame) {
quint16 numSilentSamples = *(reinterpret_cast<const quint16*>(dataAt));
readBytes += sizeof(quint16);
networkSamples = (int)numSilentSamples;
} else {
// parse the info after the seq number and before the audio data (the stream properties)
readBytes += parseStreamProperties(packetType, packet.mid(readBytes), networkSamples);
}
// handle this packet based on its arrival status.
// For now, late packets are ignored. It may be good in the future to insert the late audio frame
// into the ring buffer to fill in the missing frame if it hasn't been mixed yet.
switch (arrivalInfo._status) {
case SequenceNumberStats::Early: {
// Packet is early; write droppable silent samples for each of the skipped packets.
// NOTE: we assume that each dropped packet contains the same number of samples
// as the packet we just received.
int packetsDropped = arrivalInfo._seqDiffFromExpected;
writeSamplesForDroppedPackets(packetsDropped * numAudioSamples);
writeSamplesForDroppedPackets(packetsDropped * networkSamples);
// fall through to OnTime case
}
case SequenceNumberStats::OnTime: {
readBytes += parseAudioData(packetType, packet.mid(readBytes), numAudioSamples);
// Packet is on time; parse its data to the ringbuffer
if (packetType == PacketTypeSilentAudioFrame) {
writeDroppableSilentSamples(networkSamples);
} else {
readBytes += parseAudioData(packetType, packet.mid(readBytes), networkSamples);
}
break;
}
default: {
// For now, late packets are ignored. It may be good in the future to insert the late audio packet data
// into the ring buffer to fill in the missing frame if it hasn't been mixed yet.
break;
}
}
@ -139,6 +167,43 @@ int InboundAudioStream::parseData(const QByteArray& packet) {
return readBytes;
}
int InboundAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
// mixed audio packets do not have any info between the seq num and the audio data.
numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t);
return 0;
}
int InboundAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
}
int InboundAudioStream::writeDroppableSilentSamples(int silentSamples) {
// calculate how many silent frames we should drop.
int samplesPerFrame = _ringBuffer.getNumFrameSamples();
int desiredJitterBufferFramesPlusPadding = _desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING;
int numSilentFramesToDrop = 0;
if (silentSamples >= samplesPerFrame && _currentJitterBufferFrames > desiredJitterBufferFramesPlusPadding) {
// our avg jitter buffer size exceeds its desired value, so ignore some silent
// frames to get that size as close to desired as possible
int numSilentFramesToDropDesired = _currentJitterBufferFrames - desiredJitterBufferFramesPlusPadding;
int numSilentFramesReceived = silentSamples / samplesPerFrame;
numSilentFramesToDrop = std::min(numSilentFramesToDropDesired, numSilentFramesReceived);
// dont reset _currentJitterBufferFrames here; we want to be able to drop further silent frames
// without waiting for _framesAvailableStat to fill up to 10s of samples.
_currentJitterBufferFrames -= numSilentFramesToDrop;
_silentFramesDropped += numSilentFramesToDrop;
_framesAvailableStat.reset();
}
int ret = _ringBuffer.addSilentSamples(silentSamples - numSilentFramesToDrop * samplesPerFrame);
return ret;
}
int InboundAudioStream::popSamples(int maxSamples, bool allOrNothing, bool starveIfNoSamplesPopped) {
int samplesPopped = 0;
int samplesAvailable = _ringBuffer.samplesAvailable();
@ -216,12 +281,61 @@ void InboundAudioStream::framesAvailableChanged() {
}
void InboundAudioStream::setToStarved() {
_isStarved = true;
_consecutiveNotMixedCount = 0;
_starveCount++;
// if we have more than the desired frames when setToStarved() is called, then we'll immediately
// be considered refilled. in that case, there's no need to set _isStarved to true.
_isStarved = (_ringBuffer.framesAvailable() < _desiredJitterBufferFrames);
// record the time of this starve in the starve history
quint64 now = usecTimestampNow();
_starveHistory.insert(now);
if (_dynamicJitterBuffers) {
// dynamic jitter buffers are enabled. check if this starve put us over the window
// starve threshold
quint64 windowEnd = now - _starveHistoryWindowSeconds * USECS_PER_SECOND;
RingBufferHistory<quint64>::Iterator starvesIterator = _starveHistory.begin();
RingBufferHistory<quint64>::Iterator end = _starveHistory.end();
int starvesInWindow = 1;
do {
++starvesIterator;
if (*starvesIterator < windowEnd) {
break;
}
starvesInWindow++;
} while (starvesIterator != end);
// this starve put us over the starve threshold. update _desiredJitterBufferFrames to
// value determined by window A.
if (starvesInWindow >= _starveThreshold) {
int calculatedJitterBufferFrames;
if (_useStDevForJitterCalc) {
calculatedJitterBufferFrames = _calculatedJitterBufferFramesUsingStDev;
} else {
// we don't know when the next packet will arrive, so it's possible the gap between the last packet and the
// next packet will exceed the max time gap in the window. If the time since the last packet has already exceeded
// the window max gap, then we should use that value to calculate desired frames.
int framesSinceLastPacket = ceilf((float)(now - _lastPacketReceivedTime) / (float)BUFFER_SEND_INTERVAL_USECS);
calculatedJitterBufferFrames = std::max(_calculatedJitterBufferFramesUsingMaxGap, framesSinceLastPacket);
}
// make sure _desiredJitterBufferFrames does not become lower here
if (calculatedJitterBufferFrames >= _desiredJitterBufferFrames) {
_desiredJitterBufferFrames = calculatedJitterBufferFrames;
}
}
}
}
void InboundAudioStream::setSettings(const Settings& settings) {
setMaxFramesOverDesired(settings._maxFramesOverDesired);
setDynamicJitterBuffers(settings._dynamicJitterBuffers);
setStaticDesiredJitterBufferFrames(settings._staticDesiredJitterBufferFrames);
setUseStDevForJitterCalc(settings._useStDevForJitterCalc);
setWindowStarveThreshold(settings._windowStarveThreshold);
setWindowSecondsForDesiredCalcOnTooManyStarves(settings._windowSecondsForDesiredCalcOnTooManyStarves);
setWindowSecondsForDesiredReduction(settings._windowSecondsForDesiredReduction);
setRepetitionWithFade(settings._repetitionWithFade);
}
void InboundAudioStream::setDynamicJitterBuffers(bool dynamicJitterBuffers) {
@ -229,6 +343,7 @@ void InboundAudioStream::setDynamicJitterBuffers(bool dynamicJitterBuffers) {
_desiredJitterBufferFrames = _staticDesiredJitterBufferFrames;
} else {
if (!_dynamicJitterBuffers) {
// if we're enabling dynamic jitter buffer frames, start desired frames at 1
_desiredJitterBufferFrames = 1;
}
}
@ -242,90 +357,102 @@ void InboundAudioStream::setStaticDesiredJitterBufferFrames(int staticDesiredJit
}
}
void InboundAudioStream::setWindowSecondsForDesiredCalcOnTooManyStarves(int windowSecondsForDesiredCalcOnTooManyStarves) {
_timeGapStatsForDesiredCalcOnTooManyStarves.setWindowIntervals(windowSecondsForDesiredCalcOnTooManyStarves);
_starveHistoryWindowSeconds = windowSecondsForDesiredCalcOnTooManyStarves;
}
void InboundAudioStream::setWindowSecondsForDesiredReduction(int windowSecondsForDesiredReduction) {
_timeGapStatsForDesiredReduction.setWindowIntervals(windowSecondsForDesiredReduction);
}
int InboundAudioStream::clampDesiredJitterBufferFramesValue(int desired) const {
const int MIN_FRAMES_DESIRED = 0;
const int MAX_FRAMES_DESIRED = _ringBuffer.getFrameCapacity();
return glm::clamp(desired, MIN_FRAMES_DESIRED, MAX_FRAMES_DESIRED);
}
void InboundAudioStream::frameReceivedUpdateTimingStats() {
void InboundAudioStream::packetReceivedUpdateTimingStats() {
// update our timegap stats and desired jitter buffer frames if necessary
// discard the first few packets we receive since they usually have gaps that aren't represensative of normal jitter
const int NUM_INITIAL_PACKETS_DISCARD = 3;
quint64 now = usecTimestampNow();
if (_incomingSequenceNumberStats.getReceived() > NUM_INITIAL_PACKETS_DISCARD) {
quint64 gap = now - _lastFrameReceivedTime;
_interframeTimeGapStatsForStatsPacket.update(gap);
quint64 gap = now - _lastPacketReceivedTime;
_timeGapStatsForStatsPacket.update(gap);
const float USECS_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * USECS_PER_SECOND / (float)SAMPLE_RATE;
// update all stats used for desired frames calculations under dynamic jitter buffer mode
_timeGapStatsForDesiredCalcOnTooManyStarves.update(gap);
_stdevStatsForDesiredCalcOnTooManyStarves.addValue(gap);
_timeGapStatsForDesiredReduction.update(gap);
// update stats for Freddy's method of jitter calc
_interframeTimeGapStatsForJitterCalc.update(gap);
if (_interframeTimeGapStatsForJitterCalc.getNewStatsAvailableFlag()) {
_calculatedJitterBufferFramesUsingMaxGap = ceilf((float)_interframeTimeGapStatsForJitterCalc.getWindowMax() / USECS_PER_FRAME);
_interframeTimeGapStatsForJitterCalc.clearNewStatsAvailableFlag();
if (_dynamicJitterBuffers && !_useStDevForJitterCalc) {
_desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(_calculatedJitterBufferFramesUsingMaxGap);
}
if (_timeGapStatsForDesiredCalcOnTooManyStarves.getNewStatsAvailableFlag()) {
_calculatedJitterBufferFramesUsingMaxGap = ceilf((float)_timeGapStatsForDesiredCalcOnTooManyStarves.getWindowMax()
/ (float)BUFFER_SEND_INTERVAL_USECS);
_timeGapStatsForDesiredCalcOnTooManyStarves.clearNewStatsAvailableFlag();
}
// update stats for Philip's method of jitter calc
_stdev.addValue(gap);
const int STANDARD_DEVIATION_SAMPLE_COUNT = 500;
if (_stdev.getSamples() > STANDARD_DEVIATION_SAMPLE_COUNT) {
if (_stdevStatsForDesiredCalcOnTooManyStarves.getSamples() > STANDARD_DEVIATION_SAMPLE_COUNT) {
const float NUM_STANDARD_DEVIATIONS = 3.0f;
_calculatedJitterBufferFramesUsingStDev = (int)ceilf(NUM_STANDARD_DEVIATIONS * _stdev.getStDev() / USECS_PER_FRAME);
_stdev.reset();
_calculatedJitterBufferFramesUsingStDev = ceilf(NUM_STANDARD_DEVIATIONS * _stdevStatsForDesiredCalcOnTooManyStarves.getStDev()
/ (float)BUFFER_SEND_INTERVAL_USECS);
_stdevStatsForDesiredCalcOnTooManyStarves.reset();
}
if (_dynamicJitterBuffers && _useStDevForJitterCalc) {
_desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(_calculatedJitterBufferFramesUsingStDev);
if (_dynamicJitterBuffers) {
// if the max gap in window B (_timeGapStatsForDesiredReduction) corresponds to a smaller number of frames than _desiredJitterBufferFrames,
// then reduce _desiredJitterBufferFrames to that number of frames.
if (_timeGapStatsForDesiredReduction.getNewStatsAvailableFlag() && _timeGapStatsForDesiredReduction.isWindowFilled()) {
int calculatedJitterBufferFrames = ceilf((float)_timeGapStatsForDesiredReduction.getWindowMax() / (float)BUFFER_SEND_INTERVAL_USECS);
if (calculatedJitterBufferFrames < _desiredJitterBufferFrames) {
_desiredJitterBufferFrames = calculatedJitterBufferFrames;
}
_timeGapStatsForDesiredReduction.clearNewStatsAvailableFlag();
}
}
}
_lastFrameReceivedTime = now;
_lastPacketReceivedTime = now;
}
int InboundAudioStream::writeDroppableSilentSamples(int numSilentSamples) {
// calculate how many silent frames we should drop.
int samplesPerFrame = _ringBuffer.getNumFrameSamples();
int desiredJitterBufferFramesPlusPadding = _desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING;
int numSilentFramesToDrop = 0;
if (numSilentSamples >= samplesPerFrame && _currentJitterBufferFrames > desiredJitterBufferFramesPlusPadding) {
// our avg jitter buffer size exceeds its desired value, so ignore some silent
// frames to get that size as close to desired as possible
int numSilentFramesToDropDesired = _currentJitterBufferFrames - desiredJitterBufferFramesPlusPadding;
int numSilentFramesReceived = numSilentSamples / samplesPerFrame;
numSilentFramesToDrop = std::min(numSilentFramesToDropDesired, numSilentFramesReceived);
// dont reset _currentJitterBufferFrames here; we want to be able to drop further silent frames
// without waiting for _framesAvailableStat to fill up to 10s of samples.
_currentJitterBufferFrames -= numSilentFramesToDrop;
_silentFramesDropped += numSilentFramesToDrop;
_framesAvailableStat.reset();
int InboundAudioStream::writeSamplesForDroppedPackets(int networkSamples) {
if (_repetitionWithFade) {
return writeLastFrameRepeatedWithFade(networkSamples);
}
return _ringBuffer.addSilentFrame(numSilentSamples - numSilentFramesToDrop * samplesPerFrame);
return writeDroppableSilentSamples(networkSamples);
}
int InboundAudioStream::writeSamplesForDroppedPackets(int numSamples) {
return writeDroppableSilentSamples(numSamples);
int InboundAudioStream::writeLastFrameRepeatedWithFade(int samples) {
AudioRingBuffer::ConstIterator frameToRepeat = _ringBuffer.lastFrameWritten();
int frameSize = _ringBuffer.getNumFrameSamples();
int samplesToWrite = samples;
int indexOfRepeat = 0;
do {
int samplesToWriteThisIteration = std::min(samplesToWrite, frameSize);
float fade = calculateRepeatedFrameFadeFactor(indexOfRepeat);
if (fade == 1.0f) {
samplesToWrite -= _ringBuffer.writeSamples(frameToRepeat, samplesToWriteThisIteration);
} else {
samplesToWrite -= _ringBuffer.writeSamplesWithFade(frameToRepeat, samplesToWriteThisIteration, fade);
}
indexOfRepeat++;
} while (samplesToWrite > 0);
return samples;
}
AudioStreamStats InboundAudioStream::getAudioStreamStats() const {
AudioStreamStats streamStats;
streamStats._timeGapMin = _interframeTimeGapStatsForStatsPacket.getMin();
streamStats._timeGapMax = _interframeTimeGapStatsForStatsPacket.getMax();
streamStats._timeGapAverage = _interframeTimeGapStatsForStatsPacket.getAverage();
streamStats._timeGapWindowMin = _interframeTimeGapStatsForStatsPacket.getWindowMin();
streamStats._timeGapWindowMax = _interframeTimeGapStatsForStatsPacket.getWindowMax();
streamStats._timeGapWindowAverage = _interframeTimeGapStatsForStatsPacket.getWindowAverage();
streamStats._timeGapMin = _timeGapStatsForStatsPacket.getMin();
streamStats._timeGapMax = _timeGapStatsForStatsPacket.getMax();
streamStats._timeGapAverage = _timeGapStatsForStatsPacket.getAverage();
streamStats._timeGapWindowMin = _timeGapStatsForStatsPacket.getWindowMin();
streamStats._timeGapWindowMax = _timeGapStatsForStatsPacket.getWindowMax();
streamStats._timeGapWindowAverage = _timeGapStatsForStatsPacket.getWindowAverage();
streamStats._framesAvailable = _ringBuffer.framesAvailable();
streamStats._framesAvailableAverage = _framesAvailableStat.getAverage();
@ -341,7 +468,24 @@ AudioStreamStats InboundAudioStream::getAudioStreamStats() const {
return streamStats;
}
AudioStreamStats InboundAudioStream::updateSeqHistoryAndGetAudioStreamStats() {
_incomingSequenceNumberStats.pushStatsToHistory();
return getAudioStreamStats();
float calculateRepeatedFrameFadeFactor(int indexOfRepeat) {
// fade factor scheme is from this paper:
// http://inst.eecs.berkeley.edu/~ee290t/sp04/lectures/packet_loss_recov_paper11.pdf
const float INITIAL_MSECS_NO_FADE = 20.0f;
const float MSECS_FADE_TO_ZERO = 320.0f;
const float INITIAL_FRAMES_NO_FADE = INITIAL_MSECS_NO_FADE * (float)USECS_PER_MSEC / (float)BUFFER_SEND_INTERVAL_USECS;
const float FRAMES_FADE_TO_ZERO = MSECS_FADE_TO_ZERO * (float)USECS_PER_MSEC / (float)BUFFER_SEND_INTERVAL_USECS;
const float SAMPLE_RANGE = std::numeric_limits<int16_t>::max();
if (indexOfRepeat <= INITIAL_FRAMES_NO_FADE) {
return 1.0f;
} else if (indexOfRepeat <= INITIAL_FRAMES_NO_FADE + FRAMES_FADE_TO_ZERO) {
return pow(SAMPLE_RANGE, -(indexOfRepeat - INITIAL_FRAMES_NO_FADE) / FRAMES_FADE_TO_ZERO);
//return 1.0f - ((indexOfRepeat - INITIAL_FRAMES_NO_FADE) / FRAMES_FADE_TO_ZERO);
}
return 0.0f;
}

View file

@ -22,43 +22,84 @@
#include "TimeWeightedAvg.h"
// This adds some number of frames to the desired jitter buffer frames target we use when we're dropping frames.
// The larger this value is, the less aggressive we are about reducing the jitter buffer length.
// Setting this to 0 will try to get the jitter buffer to be exactly _desiredJitterBufferFrames long when dropping frames,
// The larger this value is, the less frames we drop when attempting to reduce the jitter buffer length.
// Setting this to 0 will try to get the jitter buffer to be exactly _desiredJitterBufferFrames when dropping frames,
// which could lead to a starve soon after.
const int DESIRED_JITTER_BUFFER_FRAMES_PADDING = 1;
// the time gaps stats for _desiredJitterBufferFrames calculation
// will recalculate the max for the past 5000 samples every 500 samples
const int TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES = 500;
const int TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS = 10;
// the time gap stats for constructing AudioStreamStats will
// recalculate min/max/avg every ~1 second for the past ~30 seconds of time gap data
const int TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS;
const int TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS = 30;
// this controls the length of the window for stats used in the stats packet (not the stats used in
// _desiredJitterBufferFrames calculation)
const int STATS_FOR_STATS_PACKET_WINDOW_SECONDS = 30;
// this controls the window size of the time-weighted avg of frames available. Every time the window fills up,
// _currentJitterBufferFrames is updated with the time-weighted avg and the running time-weighted avg is reset.
const int FRAMES_AVAILABLE_STAT_WINDOW_USECS = 2 * USECS_PER_SECOND;
// the internal history buffer of the incoming seq stats will cover 30s to calculate
// packet loss % over last 30s
const int INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS = 30;
const int INBOUND_RING_BUFFER_FRAME_CAPACITY = 100;
const int FRAMES_AVAILABLE_STAT_WINDOW_USECS = 10 * USECS_PER_SECOND;
// default values for members of the Settings struct
const int DEFAULT_MAX_FRAMES_OVER_DESIRED = 10;
const int DEFAULT_DESIRED_JITTER_BUFFER_FRAMES = 1;
const bool DEFAULT_DYNAMIC_JITTER_BUFFERS = true;
const int DEFAULT_STATIC_DESIRED_JITTER_BUFFER_FRAMES = 1;
const bool DEFAULT_USE_STDEV_FOR_JITTER_CALC = false;
const int DEFAULT_WINDOW_STARVE_THRESHOLD = 3;
const int DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES = 50;
const int DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION = 10;
const bool DEFAULT_REPETITION_WITH_FADE = true;
class InboundAudioStream : public NodeData {
Q_OBJECT
public:
InboundAudioStream(int numFrameSamples, int numFramesCapacity,
bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired,
bool useStDevForJitterCalc = false);
class Settings {
public:
Settings()
: _maxFramesOverDesired(DEFAULT_MAX_FRAMES_OVER_DESIRED),
_dynamicJitterBuffers(DEFAULT_DYNAMIC_JITTER_BUFFERS),
_staticDesiredJitterBufferFrames(DEFAULT_STATIC_DESIRED_JITTER_BUFFER_FRAMES),
_useStDevForJitterCalc(DEFAULT_USE_STDEV_FOR_JITTER_CALC),
_windowStarveThreshold(DEFAULT_WINDOW_STARVE_THRESHOLD),
_windowSecondsForDesiredCalcOnTooManyStarves(DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES),
_windowSecondsForDesiredReduction(DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION),
_repetitionWithFade(DEFAULT_REPETITION_WITH_FADE)
{}
Settings(int maxFramesOverDesired, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames,
bool useStDevForJitterCalc, int windowStarveThreshold, int windowSecondsForDesiredCalcOnTooManyStarves,
int _windowSecondsForDesiredReduction, bool repetitionWithFade)
: _maxFramesOverDesired(maxFramesOverDesired),
_dynamicJitterBuffers(dynamicJitterBuffers),
_staticDesiredJitterBufferFrames(staticDesiredJitterBufferFrames),
_useStDevForJitterCalc(useStDevForJitterCalc),
_windowStarveThreshold(windowStarveThreshold),
_windowSecondsForDesiredCalcOnTooManyStarves(windowSecondsForDesiredCalcOnTooManyStarves),
_windowSecondsForDesiredReduction(windowSecondsForDesiredCalcOnTooManyStarves),
_repetitionWithFade(repetitionWithFade)
{}
// max number of frames over desired in the ringbuffer.
int _maxFramesOverDesired;
// if false, _desiredJitterBufferFrames will always be _staticDesiredJitterBufferFrames. Otherwise,
// either fred or philip's method will be used to calculate _desiredJitterBufferFrames based on packet timegaps.
bool _dynamicJitterBuffers;
// settings for static jitter buffer mode
int _staticDesiredJitterBufferFrames;
// settings for dynamic jitter buffer mode
bool _useStDevForJitterCalc; // if true, philip's method is used. otherwise, fred's method is used.
int _windowStarveThreshold;
int _windowSecondsForDesiredCalcOnTooManyStarves;
int _windowSecondsForDesiredReduction;
// if true, the prev frame will be repeated (fading to silence) for dropped frames.
// otherwise, silence will be inserted.
bool _repetitionWithFade;
};
public:
InboundAudioStream(int numFrameSamples, int numFramesCapacity, const Settings& settings);
void reset();
void resetStats();
virtual void resetStats();
void clearBuffer();
virtual int parseData(const QByteArray& packet);
@ -72,14 +113,18 @@ public:
void setToStarved();
void setDynamicJitterBuffers(bool dynamicJitterBuffers);
void setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames);
/// this function should be called once per second to ensure the seq num stats history spans ~30 seconds
AudioStreamStats updateSeqHistoryAndGetAudioStreamStats();
void setSettings(const Settings& settings);
void setMaxFramesOverDesired(int maxFramesOverDesired) { _maxFramesOverDesired = maxFramesOverDesired; }
void setDynamicJitterBuffers(bool setDynamicJitterBuffers);
void setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames);
void setUseStDevForJitterCalc(bool useStDevForJitterCalc) { _useStDevForJitterCalc = useStDevForJitterCalc; }
void setWindowStarveThreshold(int windowStarveThreshold) { _starveThreshold = windowStarveThreshold; }
void setWindowSecondsForDesiredCalcOnTooManyStarves(int windowSecondsForDesiredCalcOnTooManyStarves);
void setWindowSecondsForDesiredReduction(int windowSecondsForDesiredReduction);
void setRepetitionWithFade(bool repetitionWithFade) { _repetitionWithFade = repetitionWithFade; }
virtual AudioStreamStats getAudioStreamStats() const;
@ -110,11 +155,17 @@ public:
int getPacketsReceived() const { return _incomingSequenceNumberStats.getReceived(); }
public slots:
/// This function should be called every second for all the stats to function properly. If dynamic jitter buffers
/// is enabled, those stats are used to calculate _desiredJitterBufferFrames.
/// If the stats are not used and dynamic jitter buffers is disabled, it's not necessary to call this function.
void perSecondCallbackForUpdatingStats();
private:
void frameReceivedUpdateTimingStats();
void packetReceivedUpdateTimingStats();
int clampDesiredJitterBufferFramesValue(int desired) const;
int writeSamplesForDroppedPackets(int numSamples);
int writeSamplesForDroppedPackets(int networkSamples);
void popSamplesNoCheck(int samples);
void framesAvailableChanged();
@ -126,13 +177,19 @@ protected:
/// parses the info between the seq num and the audio data in the network packet and calculates
/// how many audio samples this packet contains (used when filling in samples for dropped packets).
virtual int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) = 0;
/// default implementation assumes no stream properties and raw audio samples after stream propertiess
virtual int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& networkSamples);
/// parses the audio data in the network packet.
/// default implementation assumes packet contains raw audio samples after stream properties
virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int networkSamples);
int writeDroppableSilentSamples(int numSilentSamples);
/// writes silent samples to the buffer that may be dropped to reduce latency caused by the buffer
virtual int writeDroppableSilentSamples(int silentSamples);
/// writes the last written frame repeatedly, gradually fading to silence.
/// used for writing samples for dropped packets.
virtual int writeLastFrameRepeatedWithFade(int samples);
protected:
@ -147,8 +204,6 @@ protected:
// if jitter buffer is dynamic, this determines what method of calculating _desiredJitterBufferFrames
// if true, Philip's timegap std dev calculation is used. Otherwise, Freddy's max timegap calculation is used
bool _useStDevForJitterCalc;
int _calculatedJitterBufferFramesUsingMaxGap;
int _calculatedJitterBufferFramesUsingStDev;
int _desiredJitterBufferFrames;
@ -168,16 +223,28 @@ protected:
SequenceNumberStats _incomingSequenceNumberStats;
quint64 _lastFrameReceivedTime;
MovingMinMaxAvg<quint64> _interframeTimeGapStatsForJitterCalc;
StDev _stdev;
MovingMinMaxAvg<quint64> _interframeTimeGapStatsForStatsPacket;
quint64 _lastPacketReceivedTime;
MovingMinMaxAvg<quint64> _timeGapStatsForDesiredCalcOnTooManyStarves; // for Freddy's method
int _calculatedJitterBufferFramesUsingMaxGap;
StDev _stdevStatsForDesiredCalcOnTooManyStarves; // for Philip's method
int _calculatedJitterBufferFramesUsingStDev; // the most recent desired frames calculated by Philip's method
MovingMinMaxAvg<quint64> _timeGapStatsForDesiredReduction;
int _starveHistoryWindowSeconds;
RingBufferHistory<quint64> _starveHistory;
int _starveThreshold;
TimeWeightedAvg<int> _framesAvailableStat;
// this value is based on the time-weighted avg from _framesAvailableStat. it is only used for
// this value is periodically updated with the time-weighted avg from _framesAvailableStat. it is only used for
// dropping silent frames right now.
int _currentJitterBufferFrames;
MovingMinMaxAvg<quint64> _timeGapStatsForStatsPacket;
bool _repetitionWithFade;
};
float calculateRepeatedFrameFadeFactor(int indexOfRepeat);
#endif // hifi_InboundAudioStream_h

View file

@ -19,8 +19,8 @@
#include "InjectedAudioStream.h"
InjectedAudioStream::InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired) :
PositionalAudioStream(PositionalAudioStream::Injector, false, dynamicJitterBuffer, staticDesiredJitterBufferFrames, maxFramesOverDesired),
InjectedAudioStream::InjectedAudioStream(const QUuid& streamIdentifier, const InboundAudioStream::Settings& settings) :
PositionalAudioStream(PositionalAudioStream::Injector, false, settings),
_streamIdentifier(streamIdentifier),
_radius(0.0f),
_attenuationRatio(0)

View file

@ -18,7 +18,7 @@
class InjectedAudioStream : public PositionalAudioStream {
public:
InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired);
InjectedAudioStream(const QUuid& streamIdentifier, const InboundAudioStream::Settings& settings);
float getRadius() const { return _radius; }
float getAttenuationRatio() const { return _attenuationRatio; }

View file

@ -11,13 +11,7 @@
#include "MixedAudioStream.h"
MixedAudioStream::MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc)
: InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, staticDesiredJitterBufferFrames, maxFramesOverDesired, useStDevForJitterCalc)
MixedAudioStream::MixedAudioStream(int numFrameSamples, int numFramesCapacity, const InboundAudioStream::Settings& settings)
: InboundAudioStream(numFrameSamples, numFramesCapacity, settings)
{
}
int MixedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
// mixed audio packets do not have any info between the seq num and the audio data.
numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t);
return 0;
}

View file

@ -17,12 +17,9 @@
class MixedAudioStream : public InboundAudioStream {
public:
MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc);
MixedAudioStream(int numFrameSamples, int numFramesCapacity, const InboundAudioStream::Settings& settings);
float getNextOutputFrameLoudness() const { return _ringBuffer.getNextOutputFrameLoudness(); }
protected:
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
};
#endif // hifi_MixedAudioStream_h

View file

@ -11,35 +11,53 @@
#include "MixedProcessedAudioStream.h"
MixedProcessedAudioStream ::MixedProcessedAudioStream (int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc)
: InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, staticDesiredJitterBufferFrames, maxFramesOverDesired, useStDevForJitterCalc)
static const int STEREO_FACTOR = 2;
MixedProcessedAudioStream::MixedProcessedAudioStream(int numFrameSamples, int numFramesCapacity, const InboundAudioStream::Settings& settings)
: InboundAudioStream(numFrameSamples, numFramesCapacity, settings)
{
}
void MixedProcessedAudioStream::outputFormatChanged(int outputFormatChannelCountTimesSampleRate) {
_outputFormatChannelsTimesSampleRate = outputFormatChannelCountTimesSampleRate;
int deviceOutputFrameSize = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * _outputFormatChannelsTimesSampleRate / SAMPLE_RATE;
int deviceOutputFrameSize = networkToDeviceSamples(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO);
_ringBuffer.resizeForFrameSize(deviceOutputFrameSize);
}
int MixedProcessedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
// mixed audio packets do not have any info between the seq num and the audio data.
int numNetworkSamples = packetAfterSeqNum.size() / sizeof(int16_t);
int MixedProcessedAudioStream::writeDroppableSilentSamples(int silentSamples) {
int deviceSilentSamplesWritten = InboundAudioStream::writeDroppableSilentSamples(networkToDeviceSamples(silentSamples));
emit addedSilence(deviceToNetworkSamples(deviceSilentSamplesWritten) / STEREO_FACTOR);
// since numAudioSamples is used to know how many samples to add for each dropped packet before this one,
// we want to set it to the number of device audio samples since this stream contains device audio samples, not network samples.
const int STEREO_DIVIDER = 2;
numAudioSamples = numNetworkSamples * _outputFormatChannelsTimesSampleRate / (STEREO_DIVIDER * SAMPLE_RATE);
return 0;
return deviceSilentSamplesWritten;
}
int MixedProcessedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
int MixedProcessedAudioStream::writeLastFrameRepeatedWithFade(int samples) {
int deviceSamplesWritten = InboundAudioStream::writeLastFrameRepeatedWithFade(networkToDeviceSamples(samples));
emit addedLastFrameRepeatedWithFade(deviceToNetworkSamples(deviceSamplesWritten) / STEREO_FACTOR);
return deviceSamplesWritten;
}
int MixedProcessedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int networkSamples) {
emit addedStereoSamples(packetAfterStreamProperties);
QByteArray outputBuffer;
emit processSamples(packetAfterStreamProperties, outputBuffer);
_ringBuffer.writeData(outputBuffer.data(), outputBuffer.size());
_ringBuffer.writeData(outputBuffer.data(), outputBuffer.size());
return packetAfterStreamProperties.size();
}
int MixedProcessedAudioStream::networkToDeviceSamples(int networkSamples) {
return (quint64)networkSamples * (quint64)_outputFormatChannelsTimesSampleRate / (quint64)(STEREO_FACTOR * SAMPLE_RATE);
}
int MixedProcessedAudioStream::deviceToNetworkSamples(int deviceSamples) {
return (quint64)deviceSamples * (quint64)(STEREO_FACTOR * SAMPLE_RATE) / (quint64)_outputFormatChannelsTimesSampleRate;
}

View file

@ -14,21 +14,32 @@
#include "InboundAudioStream.h"
class Audio;
class MixedProcessedAudioStream : public InboundAudioStream {
Q_OBJECT
public:
MixedProcessedAudioStream (int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc);
MixedProcessedAudioStream(int numFrameSamples, int numFramesCapacity, const InboundAudioStream::Settings& settings);
signals:
void addedSilence(int silentSamplesPerChannel);
void addedLastFrameRepeatedWithFade(int samplesPerChannel);
void addedStereoSamples(const QByteArray& samples);
void processSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer);
public:
void outputFormatChanged(int outputFormatChannelCountTimesSampleRate);
protected:
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
int writeDroppableSilentSamples(int silentSamples);
int writeLastFrameRepeatedWithFade(int samples);
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int networkSamples);
private:
int networkToDeviceSamples(int networkSamples);
int deviceToNetworkSamples(int deviceSamples);
private:
int _outputFormatChannelsTimesSampleRate;

View file

@ -21,16 +21,16 @@
#include <PacketHeaders.h>
#include <UUID.h>
PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers,
int staticDesiredJitterBufferFrames, int maxFramesOverDesired) :
PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, const InboundAudioStream::Settings& settings) :
InboundAudioStream(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, dynamicJitterBuffers, staticDesiredJitterBufferFrames, maxFramesOverDesired),
AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, settings),
_type(type),
_position(0.0f, 0.0f, 0.0f),
_orientation(0.0f, 0.0f, 0.0f, 0.0f),
_shouldLoopbackForNode(false),
_isStereo(isStereo),
_lastPopOutputTrailingLoudness(0.0f),
_lastPopOutputLoudness(0.0f),
_listenerUnattenuatedZone(NULL)
{
// constant defined in AudioMixer.h. However, we don't want to include this here
@ -39,18 +39,23 @@ PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, b
_filter.initialize(SAMPLE_RATE, (NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (SAMPLE_PHASE_DELAY_AT_90 * 2)) / 2);
}
void PositionalAudioStream::updateLastPopOutputTrailingLoudness() {
float lastPopLoudness = _ringBuffer.getFrameLoudness(_lastPopOutput);
void PositionalAudioStream::resetStats() {
_lastPopOutputTrailingLoudness = 0.0f;
_lastPopOutputLoudness = 0.0f;
}
void PositionalAudioStream::updateLastPopOutputLoudnessAndTrailingLoudness() {
_lastPopOutputLoudness = _ringBuffer.getFrameLoudness(_lastPopOutput);
const int TRAILING_AVERAGE_FRAMES = 100;
const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO;
const float LOUDNESS_EPSILON = 0.000001f;
if (lastPopLoudness >= _lastPopOutputTrailingLoudness) {
_lastPopOutputTrailingLoudness = lastPopLoudness;
if (_lastPopOutputLoudness >= _lastPopOutputTrailingLoudness) {
_lastPopOutputTrailingLoudness = _lastPopOutputLoudness;
} else {
_lastPopOutputTrailingLoudness = (_lastPopOutputTrailingLoudness * PREVIOUS_FRAMES_RATIO) + (CURRENT_FRAME_RATIO * lastPopLoudness);
_lastPopOutputTrailingLoudness = (_lastPopOutputTrailingLoudness * PREVIOUS_FRAMES_RATIO) + (CURRENT_FRAME_RATIO * _lastPopOutputLoudness);
if (_lastPopOutputTrailingLoudness < LOUDNESS_EPSILON) {
_lastPopOutputTrailingLoudness = 0;

View file

@ -29,13 +29,15 @@ public:
Injector
};
PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames,
int maxFramesOverDesired);
PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, const InboundAudioStream::Settings& settings);
virtual void resetStats();
virtual AudioStreamStats getAudioStreamStats() const;
void updateLastPopOutputTrailingLoudness();
void updateLastPopOutputLoudnessAndTrailingLoudness();
float getLastPopOutputTrailingLoudness() const { return _lastPopOutputTrailingLoudness; }
float getLastPopOutputLoudness() const { return _lastPopOutputLoudness; }
bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; }
bool isStereo() const { return _isStereo; }
@ -64,6 +66,7 @@ protected:
bool _isStereo;
float _lastPopOutputTrailingLoudness;
float _lastPopOutputLoudness;
AABox* _listenerUnattenuatedZone;
AudioFilterHSF1s _filter;

View file

@ -49,8 +49,9 @@ PacketVersion versionForPacketType(PacketType type) {
switch (type) {
case PacketTypeMicrophoneAudioNoEcho:
case PacketTypeMicrophoneAudioWithEcho:
case PacketTypeSilentAudioFrame:
return 2;
case PacketTypeSilentAudioFrame:
return 3;
case PacketTypeMixedAudio:
return 1;
case PacketTypeAvatarData:

View file

@ -486,14 +486,6 @@ void ScriptEngine::run() {
// pack a placeholder value for sequence number for now, will be packed when destination node is known
int numPreSequenceNumberBytes = audioPacket.size();
packetStream << (quint16) 0;
// assume scripted avatar audio is mono and set channel flag to zero
packetStream << (quint8) 0;
// use the orientation and position of this avatar for the source of this audio
packetStream.writeRawData(reinterpret_cast<const char*>(&_avatarData->getPosition()), sizeof(glm::vec3));
glm::quat headOrientation = _avatarData->getHeadOrientation();
packetStream.writeRawData(reinterpret_cast<const char*>(&headOrientation), sizeof(glm::quat));
if (silentFrame) {
if (!_isListeningToAudioStream) {
@ -503,12 +495,20 @@ void ScriptEngine::run() {
// write the number of silent samples so the audio-mixer can uphold timing
packetStream.writeRawData(reinterpret_cast<const char*>(&SCRIPT_AUDIO_BUFFER_SAMPLES), sizeof(int16_t));
} else if (nextSoundOutput) {
// write the raw audio data
packetStream.writeRawData(reinterpret_cast<const char*>(nextSoundOutput),
numAvailableSamples * sizeof(int16_t));
}
} else if (nextSoundOutput) {
// assume scripted avatar audio is mono and set channel flag to zero
packetStream << (quint8)0;
// use the orientation and position of this avatar for the source of this audio
packetStream.writeRawData(reinterpret_cast<const char*>(&_avatarData->getPosition()), sizeof(glm::vec3));
glm::quat headOrientation = _avatarData->getHeadOrientation();
packetStream.writeRawData(reinterpret_cast<const char*>(&headOrientation), sizeof(glm::quat));
// write the raw audio data
packetStream.writeRawData(reinterpret_cast<const char*>(nextSoundOutput), numAvailableSamples * sizeof(int16_t));
}
// write audio packet to AudioMixer nodes
NodeList* nodeList = NodeList::getInstance();
foreach(const SharedNodePointer& node, nodeList->getNodeHash()) {

View file

@ -18,45 +18,63 @@
#include "RingBufferHistory.h"
template <typename T>
class MovingMinMaxAvg {
class MinMaxAvg {
public:
MinMaxAvg()
: _min(std::numeric_limits<T>::max()),
_max(std::numeric_limits<T>::min()),
_average(0.0),
_samples(0)
{}
void reset() {
_min = std::numeric_limits<T>::max();
_max = std::numeric_limits<T>::min();
_average = 0.0;
_samples = 0;
}
void update(T sample) {
if (sample < _min) {
_min = sample;
}
if (sample > _max) {
_max = sample;
}
double totalSamples = _samples + 1;
_average = _average * ((double)_samples / totalSamples)
+ (double)sample / totalSamples;
_samples++;
}
void update(const MinMaxAvg<T>& other) {
if (other._min < _min) {
_min = other._min;
}
if (other._max > _max) {
_max = other._max;
}
double totalSamples = _samples + other._samples;
_average = _average * ((double)_samples / totalSamples)
+ other._average * ((double)other._samples / totalSamples);
_samples += other._samples;
}
T getMin() const { return _min; }
T getMax() const { return _max; }
double getAverage() const { return _average; }
int getSamples() const { return _samples; }
double getSum() const { return _samples * _average; }
private:
class Stats {
public:
Stats()
: _min(std::numeric_limits<T>::max()),
_max(std::numeric_limits<T>::min()),
_average(0.0) {}
void updateWithSample(T sample, int& numSamplesInAverage) {
if (sample < _min) {
_min = sample;
}
if (sample > _max) {
_max = sample;
}
_average = _average * ((double)numSamplesInAverage / (numSamplesInAverage + 1))
+ (double)sample / (numSamplesInAverage + 1);
numSamplesInAverage++;
}
void updateWithOtherStats(const Stats& other, int& numStatsInAverage) {
if (other._min < _min) {
_min = other._min;
}
if (other._max > _max) {
_max = other._max;
}
_average = _average * ((double)numStatsInAverage / (numStatsInAverage + 1))
+ other._average / (numStatsInAverage + 1);
numStatsInAverage++;
}
T _min;
T _max;
double _average;
};
T _min;
T _max;
double _average;
int _samples;
};
template <typename T>
class MovingMinMaxAvg {
public:
// This class collects 3 stats (min, max, avg) over a moving window of samples.
// The moving window contains _windowIntervals * _intervalLength samples.
@ -66,66 +84,98 @@ public:
// this class with MovingMinMaxAvg(100, 50). If you want a moving min of the past 100 samples updated on every
// new sample, instantiate this class with MovingMinMaxAvg(1, 100).
/// use intervalLength = 0 to use in manual mode, where the currentIntervalComplete() function must
/// be called to complete an interval
MovingMinMaxAvg(int intervalLength, int windowIntervals)
: _intervalLength(intervalLength),
_windowIntervals(windowIntervals),
_overallStats(),
_samplesCollected(0),
_windowStats(),
_existingSamplesInCurrentInterval(0),
_currentIntervalStats(),
_intervalStats(windowIntervals),
_newStatsAvailable(false)
{}
void reset() {
_overallStats = Stats();
_samplesCollected = 0;
_windowStats = Stats();
_existingSamplesInCurrentInterval = 0;
_currentIntervalStats = Stats();
_overallStats.reset();
_windowStats.reset();
_currentIntervalStats.reset();
_intervalStats.clear();
_newStatsAvailable = false;
}
void setWindowIntervals(int windowIntervals) {
_windowIntervals = windowIntervals;
_overallStats.reset();
_windowStats.reset();
_currentIntervalStats.reset();
_intervalStats.setCapacity(_windowIntervals);
_newStatsAvailable = false;
}
void update(T newSample) {
// update overall stats
_overallStats.updateWithSample(newSample, _samplesCollected);
_overallStats.update(newSample);
// update the current interval stats
_currentIntervalStats.updateWithSample(newSample, _existingSamplesInCurrentInterval);
_currentIntervalStats.update(newSample);
// if the current interval of samples is now full, record its stats into our past intervals' stats
if (_existingSamplesInCurrentInterval == _intervalLength) {
// record current interval's stats, then reset them
_intervalStats.insert(_currentIntervalStats);
_currentIntervalStats = Stats();
_existingSamplesInCurrentInterval = 0;
// update the window's stats by combining the intervals' stats
typename RingBufferHistory<Stats>::Iterator i = _intervalStats.begin();
typename RingBufferHistory<Stats>::Iterator end = _intervalStats.end();
_windowStats = Stats();
int intervalsIncludedInWindowStats = 0;
while (i != end) {
_windowStats.updateWithOtherStats(*i, intervalsIncludedInWindowStats);
i++;
}
_newStatsAvailable = true;
// NOTE: if _intervalLength is 0 (manual mode), currentIntervalComplete() will not be called here.
if (_currentIntervalStats.getSamples() == _intervalLength) {
currentIntervalComplete();
}
}
/// This function can be called to manually control when each interval ends. For example, if each interval
/// needs to last T seconds as opposed to N samples, this function should be called every T seconds.
void currentIntervalComplete() {
// record current interval's stats, then reset them
_intervalStats.insert(_currentIntervalStats);
_currentIntervalStats.reset();
// update the window's stats by combining the intervals' stats
typename RingBufferHistory< MinMaxAvg<T> >::Iterator i = _intervalStats.begin();
typename RingBufferHistory< MinMaxAvg<T> >::Iterator end = _intervalStats.end();
_windowStats.reset();
while (i != end) {
_windowStats.update(*i);
++i;
}
_newStatsAvailable = true;
}
bool getNewStatsAvailableFlag() const { return _newStatsAvailable; }
void clearNewStatsAvailableFlag() { _newStatsAvailable = false; }
T getMin() const { return _overallStats._min; }
T getMax() const { return _overallStats._max; }
double getAverage() const { return _overallStats._average; }
T getWindowMin() const { return _windowStats._min; }
T getWindowMax() const { return _windowStats._max; }
double getWindowAverage() const { return _windowStats._average; }
T getMin() const { return _overallStats.getMin(); }
T getMax() const { return _overallStats.getMax(); }
double getAverage() const { return _overallStats.getAverage(); }
int getSamples() const { return _overallStats.getSamples(); }
double getSum() const { return _overallStats.getSum(); }
T getWindowMin() const { return _windowStats.getMin(); }
T getWindowMax() const { return _windowStats.getMax(); }
double getWindowAverage() const { return _windowStats.getAverage(); }
int getWindowSamples() const { return _windowStats.getSamples(); }
double getWindowSum() const { return _windowStats.getSum(); }
T getCurrentIntervalMin() const { return _currentIntervalStats.getMin(); }
T getCurrentIntervalMax() const { return _currentIntervalStats.getMax(); }
double getCurrentIntervalAverage() const { return _currentIntervalStats.getAverage(); }
int getCurrentIntervalSamples() const { return _currentIntervalStats.getSamples(); }
double getCurrentIntervalSum() const { return _currentIntervalStats.getSum(); }
const MinMaxAvg<T>& getOverallStats() const{ return _overallStats; }
const MinMaxAvg<T>& getWindowStats() const{ return _windowStats; }
const MinMaxAvg<T>& getCurrentIntervalStats() const { return _currentIntervalStats; }
MinMaxAvg<T> getLastCompleteIntervalStats() const {
const MinMaxAvg<T>* stats = _intervalStats.getNewestEntry();
return stats == NULL ? MinMaxAvg<T>() : *stats;
}
bool isWindowFilled() const { return _intervalStats.isFilled(); }
@ -134,18 +184,16 @@ private:
int _windowIntervals;
// these are min/max/avg stats for all samples collected.
Stats _overallStats;
int _samplesCollected;
MinMaxAvg<T> _overallStats;
// these are the min/max/avg stats for the samples in the moving window
Stats _windowStats;
int _existingSamplesInCurrentInterval;
MinMaxAvg<T> _windowStats;
// these are the min/max/avg stats for the current interval
Stats _currentIntervalStats;
// these are the min/max/avg stats for the samples in the current interval
MinMaxAvg<T> _currentIntervalStats;
// these are stored stats for the past intervals in the window
RingBufferHistory<Stats> _intervalStats;
RingBufferHistory< MinMaxAvg<T> > _intervalStats;
bool _newStatsAvailable;
};

View file

@ -35,6 +35,14 @@ public:
_numEntries = 0;
}
void setCapacity(int capacity) {
_size = capacity + 1;
_capacity = capacity;
_newestEntryAtIndex = 0;
_numEntries = 0;
_buffer.resize(_size);
}
void insert(const T& entry) {
// increment newest entry index cyclically
_newestEntryAtIndex = (_newestEntryAtIndex == _size - 1) ? 0 : _newestEntryAtIndex + 1;
@ -83,9 +91,14 @@ private:
QVector<T> _buffer;
public:
class Iterator : public std::iterator < std::forward_iterator_tag, T > {
class Iterator : public std::iterator < std::random_access_iterator_tag, T > {
public:
Iterator(T* bufferFirst, T* bufferLast, T* at) : _bufferFirst(bufferFirst), _bufferLast(bufferLast), _at(at) {}
Iterator(T* bufferFirst, T* bufferLast, T* newestAt, T* at)
: _bufferFirst(bufferFirst),
_bufferLast(bufferLast),
_bufferLength(bufferLast - bufferFirst + 1),
_newestAt(newestAt),
_at(at) {}
bool operator==(const Iterator& rhs) { return _at == rhs._at; }
bool operator!=(const Iterator& rhs) { return _at != rhs._at; }
@ -103,20 +116,95 @@ public:
return tmp;
}
Iterator& operator--() {
_at = (_at == _bufferLast) ? _bufferFirst : _at + 1;
return *this;
}
Iterator operator--(int) {
Iterator tmp(*this);
--(*this);
return tmp;
}
Iterator operator+(int add) {
Iterator sum(*this);
sum._at = atShiftedBy(add);
return sum;
}
Iterator operator-(int sub) {
Iterator sum(*this);
sum._at = atShiftedBy(-sub);
return sum;
}
Iterator& operator+=(int add) {
_at = atShiftedBy(add);
return *this;
}
Iterator& operator-=(int sub) {
_at = atShiftedBy(-sub);
return *this;
}
T& operator[](int i) {
return *(atShiftedBy(i));
}
bool operator<(const Iterator& rhs) {
return age() < rhs.age();
}
bool operator>(const Iterator& rhs) {
return age() > rhs.age();
}
bool operator<=(const Iterator& rhs) {
return age() < rhs.age();
}
bool operator>=(const Iterator& rhs) {
return age() >= rhs.age();
}
int operator-(const Iterator& rhs) {
return age() - rhs.age();
}
private:
T* const _bufferFirst;
T* const _bufferLast;
T* atShiftedBy(int i) { // shifts i places towards _bufferFirst (towards older entries)
i = (_at - _bufferFirst - i) % _bufferLength;
if (i < 0) {
i += _bufferLength;
}
return _bufferFirst + i;
}
int age() {
int age = _newestAt - _at;
if (age < 0) {
age += _bufferLength;
}
return age;
}
T* _bufferFirst;
T* _bufferLast;
int _bufferLength;
T* _newestAt;
T* _at;
};
Iterator begin() { return Iterator(&_buffer.first(), &_buffer.last(), &_buffer[_newestEntryAtIndex]); }
Iterator begin() { return Iterator(&_buffer.first(), &_buffer.last(), &_buffer[_newestEntryAtIndex], &_buffer[_newestEntryAtIndex]); }
Iterator end() {
int endAtIndex = _newestEntryAtIndex - _numEntries;
if (endAtIndex < 0) {
endAtIndex += _size;
}
return Iterator(&_buffer.first(), &_buffer.last(), &_buffer[endAtIndex]);
return Iterator(&_buffer.first(), &_buffer.last(), &_buffer[_newestEntryAtIndex], &_buffer[endAtIndex]);
}
};

View file

@ -27,28 +27,28 @@ void AudioRingBufferTests::runAllTests() {
int16_t readData[10000];
int readIndexAt;
AudioRingBuffer ringBuffer(10, false, 10); // makes buffer of 100 int16_t samples
for (int T = 0; T < 300; T++) {
writeIndexAt = 0;
readIndexAt = 0;
// write 73 samples, 73 samples in buffer
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 73) / sizeof(int16_t);
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 73);
assertBufferSize(ringBuffer, 73);
// read 43 samples, 30 samples in buffer
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 43) / sizeof(int16_t);
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 43);
assertBufferSize(ringBuffer, 30);
// write 70 samples, 100 samples in buffer (full)
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 70) / sizeof(int16_t);
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 70);
assertBufferSize(ringBuffer, 100);
// read 100 samples, 0 samples in buffer (empty)
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 100) / sizeof(int16_t);
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 100);
assertBufferSize(ringBuffer, 0);
@ -65,15 +65,15 @@ void AudioRingBufferTests::runAllTests() {
readIndexAt = 0;
// write 59 samples, 59 samples in buffer
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 59) / sizeof(int16_t);
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 59);
assertBufferSize(ringBuffer, 59);
// write 99 samples, 100 samples in buffer
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 99) / sizeof(int16_t);
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 99);
assertBufferSize(ringBuffer, 100);
// read 100 samples, 0 samples in buffer
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 100) / sizeof(int16_t);
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 100);
assertBufferSize(ringBuffer, 0);
// verify 100 samples of read data
@ -88,23 +88,23 @@ void AudioRingBufferTests::runAllTests() {
readIndexAt = 0;
// write 77 samples, 77 samples in buffer
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 77) / sizeof(int16_t);
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 77);
assertBufferSize(ringBuffer, 77);
// write 24 samples, 100 samples in buffer (overwrote one sample: "0")
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 24) / sizeof(int16_t);
writeIndexAt += ringBuffer.writeSamples(&writeData[writeIndexAt], 24);
assertBufferSize(ringBuffer, 100);
// write 29 silent samples, 100 samples in buffer, make sure non were added
int samplesWritten;
if ((samplesWritten = ringBuffer.addSilentFrame(29)) != 0) {
qDebug("addSilentFrame(29) incorrect! Expected: 0 Actual: %d", samplesWritten);
if ((samplesWritten = ringBuffer.addSilentSamples(29)) != 0) {
qDebug("addSilentSamples(29) incorrect! Expected: 0 Actual: %d", samplesWritten);
return;
}
assertBufferSize(ringBuffer, 100);
// read 3 samples, 97 samples in buffer (expect to read "1", "2", "3")
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 3) / sizeof(int16_t);
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 3);
for (int i = 0; i < 3; i++) {
if (readData[i] != i + 1) {
qDebug("Second readData[%d] incorrect! Expcted: %d Actual: %d", i, i + 1, readData[i]);
@ -114,14 +114,14 @@ void AudioRingBufferTests::runAllTests() {
assertBufferSize(ringBuffer, 97);
// write 4 silent samples, 100 samples in buffer
if ((samplesWritten = ringBuffer.addSilentFrame(4) / sizeof(int16_t)) != 3) {
qDebug("addSilentFrame(4) incorrect! Exptected: 3 Actual: %d", samplesWritten);
if ((samplesWritten = ringBuffer.addSilentSamples(4)) != 3) {
qDebug("addSilentSamples(4) incorrect! Exptected: 3 Actual: %d", samplesWritten);
return;
}
assertBufferSize(ringBuffer, 100);
// read back 97 samples (the non-silent samples), 3 samples in buffer (expect to read "4" thru "100")
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 97) / sizeof(int16_t);
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 97);
for (int i = 3; i < 100; i++) {
if (readData[i] != i + 1) {
qDebug("third readData[%d] incorrect! Expcted: %d Actual: %d", i, i + 1, readData[i]);
@ -131,7 +131,7 @@ void AudioRingBufferTests::runAllTests() {
assertBufferSize(ringBuffer, 3);
// read back 3 silent samples, 0 samples in buffer
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 3) / sizeof(int16_t);
readIndexAt += ringBuffer.readSamples(&readData[readIndexAt], 3);
for (int i = 100; i < 103; i++) {
if (readData[i] != 0) {
qDebug("Fourth readData[%d] incorrect! Expcted: %d Actual: %d", i, 0, readData[i]);
@ -143,4 +143,3 @@ void AudioRingBufferTests::runAllTests() {
qDebug() << "PASSED";
}

View file

@ -271,9 +271,10 @@ void runReceive(const char* addressOption, int port, int gap, int size, int repo
quint64 networkStart = usecTimestampNow();
n = recvfrom(sockfd, inputBuffer, size, 0, NULL, NULL); // we don't care about where it came from
quint64 networkEnd = usecTimestampNow();
float networkElapsed = (float)(networkEnd - networkStart);
if (n < 0) {
std::cout << "Receive error: " << strerror(errno) << "\n";
}

View file

@ -16,6 +16,7 @@ int main(int argc, char** argv) {
MovingMinMaxAvgTests::runAllTests();
MovingPercentileTests::runAllTests();
AngularConstraintTests::runAllTests();
printf("tests complete, press enter to exit\n");
getchar();
return 0;
}