mirror of
https://github.com/overte-org/overte.git
synced 2025-04-20 18:44:01 +02:00
added staticDesiredJitterBufferFrames feature; AudioMixer crashing
moved some callbacks from Application::updateAvatar() to Application::update()
This commit is contained in:
parent
efd4b7ad67
commit
daeb2a898d
16 changed files with 122 additions and 100 deletions
|
@ -68,6 +68,7 @@ void attachNewNodeDataToNode(Node *newNode) {
|
|||
}
|
||||
|
||||
bool AudioMixer::_useDynamicJitterBuffers = false;
|
||||
int AudioMixer::_staticDesiredJitterBufferFrames = 0;
|
||||
int AudioMixer::_maxFramesOverDesired = 0;
|
||||
|
||||
AudioMixer::AudioMixer(const QByteArray& packet) :
|
||||
|
@ -436,48 +437,58 @@ void AudioMixer::run() {
|
|||
if (settingsObject.contains(AUDIO_GROUP_KEY)) {
|
||||
QJsonObject audioGroupObject = settingsObject[AUDIO_GROUP_KEY].toObject();
|
||||
|
||||
const QString UNATTENUATED_ZONE_KEY = "unattenuated-zone";
|
||||
|
||||
QString unattenuatedZoneString = audioGroupObject[UNATTENUATED_ZONE_KEY].toString();
|
||||
if (!unattenuatedZoneString.isEmpty()) {
|
||||
QStringList zoneStringList = unattenuatedZoneString.split(',');
|
||||
|
||||
glm::vec3 sourceCorner(zoneStringList[0].toFloat(), zoneStringList[1].toFloat(), zoneStringList[2].toFloat());
|
||||
glm::vec3 sourceDimensions(zoneStringList[3].toFloat(), zoneStringList[4].toFloat(), zoneStringList[5].toFloat());
|
||||
|
||||
glm::vec3 listenerCorner(zoneStringList[6].toFloat(), zoneStringList[7].toFloat(), zoneStringList[8].toFloat());
|
||||
glm::vec3 listenerDimensions(zoneStringList[9].toFloat(), zoneStringList[10].toFloat(), zoneStringList[11].toFloat());
|
||||
|
||||
_sourceUnattenuatedZone = new AABox(sourceCorner, sourceDimensions);
|
||||
_listenerUnattenuatedZone = new AABox(listenerCorner, listenerDimensions);
|
||||
|
||||
glm::vec3 sourceCenter = _sourceUnattenuatedZone->calcCenter();
|
||||
glm::vec3 destinationCenter = _listenerUnattenuatedZone->calcCenter();
|
||||
|
||||
qDebug() << "There is an unattenuated zone with source center at"
|
||||
<< QString("%1, %2, %3").arg(sourceCenter.x).arg(sourceCenter.y).arg(sourceCenter.z);
|
||||
qDebug() << "Buffers inside this zone will not be attenuated inside a box with center at"
|
||||
<< QString("%1, %2, %3").arg(destinationCenter.x).arg(destinationCenter.y).arg(destinationCenter.z);
|
||||
}
|
||||
|
||||
// check the payload to see if we have asked for dynamicJitterBuffer support
|
||||
const QString DYNAMIC_JITTER_BUFFER_JSON_KEY = "dynamic-jitter-buffer";
|
||||
const QString DYNAMIC_JITTER_BUFFER_JSON_KEY = "A-dynamic-jitter-buffer";
|
||||
bool shouldUseDynamicJitterBuffers = audioGroupObject[DYNAMIC_JITTER_BUFFER_JSON_KEY].toBool();
|
||||
if (shouldUseDynamicJitterBuffers) {
|
||||
qDebug() << "Enable dynamic jitter buffers.";
|
||||
_useDynamicJitterBuffers = true;
|
||||
} else {
|
||||
qDebug() << "Dynamic jitter buffers disabled, using old behavior.";
|
||||
qDebug() << "Dynamic jitter buffers disabled.";
|
||||
_useDynamicJitterBuffers = false;
|
||||
}
|
||||
|
||||
const QString MAX_FRAMES_OVER_DESIRED_JSON_KEY = "max-frames-over-desired";
|
||||
bool ok;
|
||||
|
||||
const QString DESIRED_JITTER_BUFFER_FRAMES_KEY = "B-desired-jitter-buffer-frames";
|
||||
_staticDesiredJitterBufferFrames = audioGroupObject[DESIRED_JITTER_BUFFER_FRAMES_KEY].toString().toInt(&ok);
|
||||
if (!ok) {
|
||||
_staticDesiredJitterBufferFrames = DEFAULT_DESIRED_JITTER_BUFFER_FRAMES;
|
||||
}
|
||||
qDebug() << "Static desired jitter buffer frames:" << _staticDesiredJitterBufferFrames;
|
||||
|
||||
const QString MAX_FRAMES_OVER_DESIRED_JSON_KEY = "C-max-frames-over-desired";
|
||||
_maxFramesOverDesired = audioGroupObject[MAX_FRAMES_OVER_DESIRED_JSON_KEY].toString().toInt(&ok);
|
||||
if (!ok) {
|
||||
_maxFramesOverDesired = DEFAULT_MAX_FRAMES_OVER_DESIRED;
|
||||
}
|
||||
qDebug() << "Max frames over desired:" << _maxFramesOverDesired;
|
||||
|
||||
|
||||
|
||||
const QString UNATTENUATED_ZONE_KEY = "D-unattenuated-zone";
|
||||
|
||||
QString unattenuatedZoneString = audioGroupObject[UNATTENUATED_ZONE_KEY].toString();
|
||||
if (!unattenuatedZoneString.isEmpty()) {
|
||||
QStringList zoneStringList = unattenuatedZoneString.split(',');
|
||||
|
||||
glm::vec3 sourceCorner(zoneStringList[0].toFloat(), zoneStringList[1].toFloat(), zoneStringList[2].toFloat());
|
||||
glm::vec3 sourceDimensions(zoneStringList[3].toFloat(), zoneStringList[4].toFloat(), zoneStringList[5].toFloat());
|
||||
|
||||
glm::vec3 listenerCorner(zoneStringList[6].toFloat(), zoneStringList[7].toFloat(), zoneStringList[8].toFloat());
|
||||
glm::vec3 listenerDimensions(zoneStringList[9].toFloat(), zoneStringList[10].toFloat(), zoneStringList[11].toFloat());
|
||||
|
||||
_sourceUnattenuatedZone = new AABox(sourceCorner, sourceDimensions);
|
||||
_listenerUnattenuatedZone = new AABox(listenerCorner, listenerDimensions);
|
||||
|
||||
glm::vec3 sourceCenter = _sourceUnattenuatedZone->calcCenter();
|
||||
glm::vec3 destinationCenter = _listenerUnattenuatedZone->calcCenter();
|
||||
|
||||
qDebug() << "There is an unattenuated zone with source center at"
|
||||
<< QString("%1, %2, %3").arg(sourceCenter.x).arg(sourceCenter.y).arg(sourceCenter.z);
|
||||
qDebug() << "Buffers inside this zone will not be attenuated inside a box with center at"
|
||||
<< QString("%1, %2, %3").arg(destinationCenter.x).arg(destinationCenter.y).arg(destinationCenter.z);
|
||||
}
|
||||
}
|
||||
|
||||
int nextFrame = 0;
|
||||
|
|
|
@ -38,6 +38,7 @@ public slots:
|
|||
void sendStatsPacket();
|
||||
|
||||
static bool getUseDynamicJitterBuffers() { return _useDynamicJitterBuffers; }
|
||||
static int getStaticDesiredJitterBufferFrames() { return _staticDesiredJitterBufferFrames; }
|
||||
static int getMaxFramesOverDesired() { return _maxFramesOverDesired; }
|
||||
|
||||
private:
|
||||
|
@ -62,6 +63,7 @@ private:
|
|||
AABox* _listenerUnattenuatedZone;
|
||||
|
||||
static bool _useDynamicJitterBuffers;
|
||||
static int _staticDesiredJitterBufferFrames;
|
||||
static int _maxFramesOverDesired;
|
||||
|
||||
quint64 _lastSendAudioStreamStatsTime;
|
||||
|
|
|
@ -74,7 +74,8 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
|||
bool isStereo = channelFlag == 1;
|
||||
|
||||
_audioStreams.insert(nullUUID,
|
||||
matchingStream = new AvatarAudioStream(isStereo, AudioMixer::getUseDynamicJitterBuffers(), AudioMixer::getMaxFramesOverDesired()));
|
||||
matchingStream = new AvatarAudioStream(isStereo, AudioMixer::getUseDynamicJitterBuffers(),
|
||||
AudioMixer::getStaticDesiredJitterBufferFrames(), AudioMixer::getMaxFramesOverDesired()));
|
||||
} else {
|
||||
matchingStream = _audioStreams.value(nullUUID);
|
||||
}
|
||||
|
@ -87,9 +88,8 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
|||
|
||||
if (!_audioStreams.contains(streamIdentifier)) {
|
||||
_audioStreams.insert(streamIdentifier,
|
||||
matchingStream = new InjectedAudioStream(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers(), AudioMixer::getMaxFramesOverDesired()));
|
||||
} else {
|
||||
matchingStream = _audioStreams.value(streamIdentifier);
|
||||
matchingStream = new InjectedAudioStream(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers(),
|
||||
AudioMixer::getStaticDesiredJitterBufferFrames(), AudioMixer::getMaxFramesOverDesired()));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
|
||||
#include "AvatarAudioStream.h"
|
||||
|
||||
AvatarAudioStream::AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer, int maxFramesOverDesired) :
|
||||
PositionalAudioStream(PositionalAudioStream::Microphone, isStereo, dynamicJitterBuffer, maxFramesOverDesired)
|
||||
AvatarAudioStream::AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired) :
|
||||
PositionalAudioStream(PositionalAudioStream::Microphone, isStereo, dynamicJitterBuffer, staticDesiredJitterBufferFrames, maxFramesOverDesired)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
class AvatarAudioStream : public PositionalAudioStream {
|
||||
public:
|
||||
AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer, int maxFramesOverDesired);
|
||||
AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired);
|
||||
|
||||
private:
|
||||
// disallow copying of AvatarAudioStream objects
|
||||
|
|
|
@ -3,23 +3,29 @@
|
|||
"label": "Audio",
|
||||
"assignment-types": [0],
|
||||
"settings": {
|
||||
"unattenuated-zone": {
|
||||
"label": "Unattenuated Zone",
|
||||
"help": "Boxes for source and listener (corner x, corner y, corner z, size x, size y, size z, corner x, corner y, corner z, size x, size y, size z)",
|
||||
"placeholder": "no zone",
|
||||
"default": ""
|
||||
},
|
||||
"max-frames-over-desired": {
|
||||
"label": "Max Frames Over Desired",
|
||||
"help": "The highest number of frames an AudioMixer's ringbuffer can exceed the desired jitter buffer frames by",
|
||||
"placeholder": "10",
|
||||
"default": ""
|
||||
},
|
||||
"dynamic-jitter-buffer": {
|
||||
"A-dynamic-jitter-buffer": {
|
||||
"type": "checkbox",
|
||||
"label": "Dynamic Jitter Buffers",
|
||||
"help": "Dynamically buffer client audio based on perceived jitter in packet receipt timing",
|
||||
"default": false
|
||||
},
|
||||
"B-desired-jitter-buffer-frames": {
|
||||
"label": "Desired Jitter Buffer Frames",
|
||||
"help": "If dynamic jitter buffers is disabled, this determines the target number of frames maintained by the AudioMixer's jitter buffers",
|
||||
"placeholder": "1",
|
||||
"default": "1"
|
||||
},
|
||||
"C-max-frames-over-desired": {
|
||||
"label": "Max Frames Over Desired",
|
||||
"help": "The highest number of frames an AudioMixer's ringbuffer can exceed the desired jitter buffer frames by",
|
||||
"placeholder": "10",
|
||||
"default": "10"
|
||||
},
|
||||
"D-unattenuated-zone": {
|
||||
"label": "Unattenuated Zone",
|
||||
"help": "Boxes for source and listener (corner x, corner y, corner z, size x, size y, size z, corner x, corner y, corner z, size x, size y, size z)",
|
||||
"placeholder": "no zone",
|
||||
"default": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1709,7 +1709,10 @@ void Application::init() {
|
|||
|
||||
Menu::getInstance()->loadSettings();
|
||||
if (Menu::getInstance()->getAudioJitterBufferFrames() != 0) {
|
||||
_audio.overrideDesiredJitterBufferFramesTo(Menu::getInstance()->getAudioJitterBufferFrames());
|
||||
_audio.setDynamicJitterBuffers(false);
|
||||
_audio.setStaticDesiredJitterBufferFrames(Menu::getInstance()->getAudioJitterBufferFrames());
|
||||
} else {
|
||||
_audio.setDynamicJitterBuffers(true);
|
||||
}
|
||||
|
||||
_audio.setMaxFramesOverDesired(Menu::getInstance()->getMaxFramesOverDesired());
|
||||
|
@ -2107,21 +2110,6 @@ void Application::update(float deltaTime) {
|
|||
// let external parties know we're updating
|
||||
emit simulating(deltaTime);
|
||||
}
|
||||
}
|
||||
|
||||
void Application::updateMyAvatar(float deltaTime) {
|
||||
bool showWarnings = Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings);
|
||||
PerformanceWarning warn(showWarnings, "Application::updateMyAvatar()");
|
||||
|
||||
_myAvatar->update(deltaTime);
|
||||
|
||||
{
|
||||
// send head/hand data to the avatar mixer and voxel server
|
||||
PerformanceTimer perfTimer("send");
|
||||
QByteArray packet = byteArrayWithPopulatedHeader(PacketTypeAvatarData);
|
||||
packet.append(_myAvatar->toByteArray());
|
||||
controlledBroadcastToNodes(packet, NodeSet() << NodeType::AvatarMixer);
|
||||
}
|
||||
|
||||
// Update _viewFrustum with latest camera and view frustum data...
|
||||
// NOTE: we get this from the view frustum, to make it simpler, since the
|
||||
|
@ -2164,16 +2152,32 @@ void Application::updateMyAvatar(float deltaTime) {
|
|||
}
|
||||
}
|
||||
|
||||
// send packet containing downstream audio stats to the AudioMixer
|
||||
{
|
||||
quint64 sinceLastNack = now - _lastSendDownstreamAudioStats;
|
||||
if (sinceLastNack > TOO_LONG_SINCE_LAST_SEND_DOWNSTREAM_AUDIO_STATS) {
|
||||
_lastSendDownstreamAudioStats = now;
|
||||
|
||||
|
||||
QMetaObject::invokeMethod(&_audio, "sendDownstreamAudioStatsPacket", Qt::QueuedConnection);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Application::updateMyAvatar(float deltaTime) {
|
||||
bool showWarnings = Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings);
|
||||
PerformanceWarning warn(showWarnings, "Application::updateMyAvatar()");
|
||||
|
||||
_myAvatar->update(deltaTime);
|
||||
|
||||
{
|
||||
// send head/hand data to the avatar mixer and voxel server
|
||||
PerformanceTimer perfTimer("send");
|
||||
QByteArray packet = byteArrayWithPopulatedHeader(PacketTypeAvatarData);
|
||||
packet.append(_myAvatar->toByteArray());
|
||||
controlledBroadcastToNodes(packet, NodeSet() << NodeType::AvatarMixer);
|
||||
}
|
||||
}
|
||||
|
||||
int Application::sendNackPackets() {
|
||||
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::DisableNackPackets)) {
|
||||
|
|
|
@ -751,10 +751,6 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
|
|||
}
|
||||
}
|
||||
|
||||
AudioStreamStats Audio::getDownstreamAudioStreamStats() const {
|
||||
return _receivedAudioStream.getAudioStreamStats();
|
||||
}
|
||||
|
||||
void Audio::sendDownstreamAudioStatsPacket() {
|
||||
|
||||
// since this function is called every second, we'll sample some of our stats here
|
||||
|
|
|
@ -57,8 +57,8 @@ public:
|
|||
virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen);
|
||||
virtual void startDrumSound(float volume, float frequency, float duration, float decay);
|
||||
|
||||
void overrideDesiredJitterBufferFramesTo(int desired) { _receivedAudioStream.overrideDesiredJitterBufferFramesTo(desired); }
|
||||
void unoverrideDesiredJitterBufferFrames() { _receivedAudioStream.unoverrideDesiredJitterBufferFrames(); }
|
||||
void setDynamicJitterBuffers(bool dynamicJitterBuffers) { _receivedAudioStream.setDynamicJitterBuffers(dynamicJitterBuffers); }
|
||||
void setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames) { _receivedAudioStream.setStaticDesiredJitterBufferFrames(staticDesiredJitterBufferFrames); }
|
||||
|
||||
void setMaxFramesOverDesired(int maxFramesOverDesired) { _receivedAudioStream.setMaxFramesOverDesired(maxFramesOverDesired); }
|
||||
|
||||
|
@ -113,7 +113,6 @@ public slots:
|
|||
|
||||
virtual void handleAudioByteArray(const QByteArray& audioByteArray);
|
||||
|
||||
AudioStreamStats getDownstreamAudioStreamStats() const;
|
||||
void sendDownstreamAudioStatsPacket();
|
||||
|
||||
bool switchInputToAudioDevice(const QString& inputDeviceName);
|
||||
|
|
|
@ -243,9 +243,10 @@ void PreferencesDialog::savePreferences() {
|
|||
|
||||
Menu::getInstance()->setAudioJitterBufferFrames(ui.audioJitterSpin->value());
|
||||
if (Menu::getInstance()->getAudioJitterBufferFrames() != 0) {
|
||||
Application::getInstance()->getAudio()->overrideDesiredJitterBufferFramesTo(Menu::getInstance()->getAudioJitterBufferFrames());
|
||||
Application::getInstance()->getAudio()->setDynamicJitterBuffers(false);
|
||||
Application::getInstance()->getAudio()->setStaticDesiredJitterBufferFrames(Menu::getInstance()->getAudioJitterBufferFrames());
|
||||
} else {
|
||||
Application::getInstance()->getAudio()->unoverrideDesiredJitterBufferFrames();
|
||||
Application::getInstance()->getAudio()->setDynamicJitterBuffers(true);
|
||||
}
|
||||
|
||||
Menu::getInstance()->setMaxFramesOverDesired(ui.maxFramesOverDesiredSpin->value());
|
||||
|
|
|
@ -13,16 +13,16 @@
|
|||
#include "PacketHeaders.h"
|
||||
|
||||
InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity,
|
||||
bool dynamicJitterBuffers, int maxFramesOverDesired, bool useStDevForJitterCalc) :
|
||||
bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc) :
|
||||
_ringBuffer(numFrameSamples, false, numFramesCapacity),
|
||||
_lastPopSucceeded(false),
|
||||
_lastPopOutput(),
|
||||
_dynamicJitterBuffers(dynamicJitterBuffers),
|
||||
_dynamicJitterBuffersOverride(false),
|
||||
_staticDesiredJitterBufferFrames(staticDesiredJitterBufferFrames),
|
||||
_useStDevForJitterCalc(useStDevForJitterCalc),
|
||||
_calculatedJitterBufferFramesUsingMaxGap(0),
|
||||
_calculatedJitterBufferFramesUsingStDev(0),
|
||||
_desiredJitterBufferFrames(1),
|
||||
_desiredJitterBufferFrames(dynamicJitterBuffers ? 1 : staticDesiredJitterBufferFrames),
|
||||
_maxFramesOverDesired(maxFramesOverDesired),
|
||||
_isStarved(true),
|
||||
_hasStarted(false),
|
||||
|
@ -49,7 +49,9 @@ void InboundAudioStream::reset() {
|
|||
}
|
||||
|
||||
void InboundAudioStream::resetStats() {
|
||||
_desiredJitterBufferFrames = 1;
|
||||
if (_dynamicJitterBuffers) {
|
||||
_desiredJitterBufferFrames = 1;
|
||||
}
|
||||
_consecutiveNotMixedCount = 0;
|
||||
_starveCount = 0;
|
||||
_silentFramesDropped = 0;
|
||||
|
@ -178,16 +180,15 @@ void InboundAudioStream::starved() {
|
|||
_starveCount++;
|
||||
}
|
||||
|
||||
void InboundAudioStream::overrideDesiredJitterBufferFramesTo(int desired) {
|
||||
_dynamicJitterBuffersOverride = true;
|
||||
_desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(desired);
|
||||
}
|
||||
|
||||
void InboundAudioStream::unoverrideDesiredJitterBufferFrames() {
|
||||
_dynamicJitterBuffersOverride = false;
|
||||
if (!_dynamicJitterBuffers) {
|
||||
_desiredJitterBufferFrames = 1;
|
||||
void InboundAudioStream::setDynamicJitterBuffers(bool dynamicJitterBuffers) {
|
||||
if (!dynamicJitterBuffers) {
|
||||
_desiredJitterBufferFrames = _staticDesiredJitterBufferFrames;
|
||||
} else {
|
||||
if (!_dynamicJitterBuffers) {
|
||||
_desiredJitterBufferFrames = 1;
|
||||
}
|
||||
}
|
||||
_dynamicJitterBuffers = dynamicJitterBuffers;
|
||||
}
|
||||
|
||||
int InboundAudioStream::clampDesiredJitterBufferFramesValue(int desired) const {
|
||||
|
@ -216,7 +217,7 @@ SequenceNumberStats::ArrivalInfo InboundAudioStream::frameReceivedUpdateNetworkS
|
|||
_calculatedJitterBufferFramesUsingMaxGap = ceilf((float)_interframeTimeGapStatsForJitterCalc.getWindowMax() / USECS_PER_FRAME);
|
||||
_interframeTimeGapStatsForJitterCalc.clearNewStatsAvailableFlag();
|
||||
|
||||
if (_dynamicJitterBuffers && !_dynamicJitterBuffersOverride && !_useStDevForJitterCalc) {
|
||||
if (_dynamicJitterBuffers && !_useStDevForJitterCalc) {
|
||||
_desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(_calculatedJitterBufferFramesUsingMaxGap);
|
||||
}
|
||||
}
|
||||
|
@ -229,7 +230,7 @@ SequenceNumberStats::ArrivalInfo InboundAudioStream::frameReceivedUpdateNetworkS
|
|||
_calculatedJitterBufferFramesUsingStDev = (int)ceilf(NUM_STANDARD_DEVIATIONS * _stdev.getStDev() / USECS_PER_FRAME);
|
||||
_stdev.reset();
|
||||
|
||||
if (_dynamicJitterBuffers && !_dynamicJitterBuffersOverride && _useStDevForJitterCalc) {
|
||||
if (_dynamicJitterBuffers && _useStDevForJitterCalc) {
|
||||
_desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(_calculatedJitterBufferFramesUsingStDev);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -48,13 +48,13 @@ const int INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS = 30;
|
|||
const int INBOUND_RING_BUFFER_FRAME_CAPACITY = 100;
|
||||
|
||||
const int DEFAULT_MAX_FRAMES_OVER_DESIRED = 10;
|
||||
|
||||
const int DEFAULT_DESIRED_JITTER_BUFFER_FRAMES = 1;
|
||||
|
||||
class InboundAudioStream : public NodeData {
|
||||
Q_OBJECT
|
||||
public:
|
||||
InboundAudioStream(int numFrameSamples, int numFramesCapacity,
|
||||
bool dynamicJitterBuffers, int maxFramesOverDesired,
|
||||
bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired,
|
||||
bool useStDevForJitterCalc = false);
|
||||
|
||||
void reset();
|
||||
|
@ -72,9 +72,9 @@ public:
|
|||
|
||||
void setToStarved();
|
||||
|
||||
/// turns off dyanmic jitter buffers and sets the desired jitter buffer frames to specified value
|
||||
void overrideDesiredJitterBufferFramesTo(int desired);
|
||||
void unoverrideDesiredJitterBufferFrames();
|
||||
|
||||
void setDynamicJitterBuffers(bool dynamicJitterBuffers);
|
||||
void setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames) { _staticDesiredJitterBufferFrames = staticDesiredJitterBufferFrames; }
|
||||
|
||||
/// this function should be called once per second to ensure the seq num stats history spans ~30 seconds
|
||||
AudioStreamStats updateSeqHistoryAndGetAudioStreamStats();
|
||||
|
@ -140,7 +140,7 @@ protected:
|
|||
AudioRingBuffer::ConstIterator _lastPopOutput;
|
||||
|
||||
bool _dynamicJitterBuffers; // if false, _desiredJitterBufferFrames is locked at 1 (old behavior)
|
||||
bool _dynamicJitterBuffersOverride; // used for locking the _desiredJitterBufferFrames to some number while running
|
||||
int _staticDesiredJitterBufferFrames;
|
||||
|
||||
// if jitter buffer is dynamic, this determines what method of calculating _desiredJitterBufferFrames
|
||||
// if true, Philip's timegap std dev calculation is used. Otherwise, Freddy's max timegap calculation is used
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
#include "InjectedAudioStream.h"
|
||||
|
||||
InjectedAudioStream::InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer, int maxFramesOverDesired) :
|
||||
PositionalAudioStream(PositionalAudioStream::Injector, false, dynamicJitterBuffer, maxFramesOverDesired),
|
||||
InjectedAudioStream::InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired) :
|
||||
PositionalAudioStream(PositionalAudioStream::Injector, false, dynamicJitterBuffer, staticDesiredJitterBufferFrames, maxFramesOverDesired),
|
||||
_streamIdentifier(streamIdentifier),
|
||||
_radius(0.0f),
|
||||
_attenuationRatio(0)
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
class InjectedAudioStream : public PositionalAudioStream {
|
||||
public:
|
||||
InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer, int maxFramesOverDesired);
|
||||
InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired);
|
||||
|
||||
float getRadius() const { return _radius; }
|
||||
float getAttenuationRatio() const { return _attenuationRatio; }
|
||||
|
|
|
@ -21,9 +21,10 @@
|
|||
#include <PacketHeaders.h>
|
||||
#include <UUID.h>
|
||||
|
||||
PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers, int maxFramesOverDesired) :
|
||||
PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers,
|
||||
int staticDesiredJitterBufferFrames, int maxFramesOverDesired) :
|
||||
InboundAudioStream(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
|
||||
AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, dynamicJitterBuffers, maxFramesOverDesired),
|
||||
AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, dynamicJitterBuffers, staticDesiredJitterBufferFrames, maxFramesOverDesired),
|
||||
_type(type),
|
||||
_position(0.0f, 0.0f, 0.0f),
|
||||
_orientation(0.0f, 0.0f, 0.0f, 0.0f),
|
||||
|
|
|
@ -27,7 +27,8 @@ public:
|
|||
Injector
|
||||
};
|
||||
|
||||
PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers, int maxFramesOverDesired);
|
||||
PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames,
|
||||
int maxFramesOverDesired);
|
||||
|
||||
virtual AudioStreamStats getAudioStreamStats() const;
|
||||
|
||||
|
|
Loading…
Reference in a new issue