Merge branch 'master' of https://github.com/highfidelity/hifi into address-bar-text-area-changes

This commit is contained in:
howard-stearns 2016-09-26 11:32:59 -07:00
commit 2dfc5ca268
37 changed files with 411 additions and 688 deletions

View file

@ -48,11 +48,8 @@ static const int RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES = 10;
Agent::Agent(ReceivedMessage& message) :
ThreadedAssignment(message),
_entityEditSender(),
_receivedAudioStream(AudioConstants::NETWORK_FRAME_SAMPLES_STEREO, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES,
InboundAudioStream::Settings(0, false, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES, false,
DEFAULT_WINDOW_STARVE_THRESHOLD, DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES,
DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION, false))
{
_receivedAudioStream(AudioConstants::NETWORK_FRAME_SAMPLES_STEREO,
RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES) {
DependencyManager::get<EntityScriptingInterface>()->setPacketSender(&_entityEditSender);
ResourceManager::init();

View file

@ -61,15 +61,14 @@
#include "AudioMixer.h"
const float LOUDNESS_TO_DISTANCE_RATIO = 0.00001f;
const float DEFAULT_ATTENUATION_PER_DOUBLING_IN_DISTANCE = 0.5f; // attenuation = -6dB * log2(distance)
const float DEFAULT_NOISE_MUTING_THRESHOLD = 0.003f;
const QString AUDIO_MIXER_LOGGING_TARGET_NAME = "audio-mixer";
const QString AUDIO_ENV_GROUP_KEY = "audio_env";
const QString AUDIO_BUFFER_GROUP_KEY = "audio_buffer";
InboundAudioStream::Settings AudioMixer::_streamSettings;
static const float LOUDNESS_TO_DISTANCE_RATIO = 0.00001f;
static const float DEFAULT_ATTENUATION_PER_DOUBLING_IN_DISTANCE = 0.5f; // attenuation = -6dB * log2(distance)
static const float DEFAULT_NOISE_MUTING_THRESHOLD = 0.003f;
static const QString AUDIO_MIXER_LOGGING_TARGET_NAME = "audio-mixer";
static const QString AUDIO_ENV_GROUP_KEY = "audio_env";
static const QString AUDIO_BUFFER_GROUP_KEY = "audio_buffer";
int AudioMixer::_numStaticJitterFrames{ -1 };
bool AudioMixer::_enableFilter = true;
bool AudioMixer::shouldMute(float quietestFrame) {
@ -269,7 +268,7 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData&
if (!streamToAdd.lastPopSucceeded()) {
bool forceSilentBlock = true;
if (_streamSettings._repetitionWithFade && !streamToAdd.getLastPopOutput().isNull()) {
if (!streamToAdd.getLastPopOutput().isNull()) {
// reptition with fade is enabled, and we do have a valid previous frame to repeat
// so we mix the previously-mixed block
@ -641,7 +640,7 @@ QString AudioMixer::percentageForMixStats(int counter) {
void AudioMixer::sendStatsPacket() {
static QJsonObject statsObject;
statsObject["useDynamicJitterBuffers"] = _streamSettings._dynamicJitterBuffers;
statsObject["useDynamicJitterBuffers"] = _numStaticJitterFrames == -1;
statsObject["trailing_sleep_percentage"] = _trailingSleepRatio * 100.0f;
statsObject["performance_throttling_ratio"] = _performanceThrottlingRatio;
@ -902,63 +901,62 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
// check the payload to see if we have asked for dynamicJitterBuffer support
const QString DYNAMIC_JITTER_BUFFER_JSON_KEY = "dynamic_jitter_buffer";
_streamSettings._dynamicJitterBuffers = audioBufferGroupObject[DYNAMIC_JITTER_BUFFER_JSON_KEY].toBool();
if (_streamSettings._dynamicJitterBuffers) {
qDebug() << "Enable dynamic jitter buffers.";
bool enableDynamicJitterBuffer = audioBufferGroupObject[DYNAMIC_JITTER_BUFFER_JSON_KEY].toBool();
if (enableDynamicJitterBuffer) {
qDebug() << "Enabling dynamic jitter buffers.";
bool ok;
const QString DESIRED_JITTER_BUFFER_FRAMES_KEY = "static_desired_jitter_buffer_frames";
_numStaticJitterFrames = audioBufferGroupObject[DESIRED_JITTER_BUFFER_FRAMES_KEY].toString().toInt(&ok);
if (!ok) {
_numStaticJitterFrames = InboundAudioStream::DEFAULT_STATIC_JITTER_FRAMES;
}
qDebug() << "Static desired jitter buffer frames:" << _numStaticJitterFrames;
} else {
qDebug() << "Dynamic jitter buffers disabled.";
qDebug() << "Disabling dynamic jitter buffers.";
_numStaticJitterFrames = -1;
}
// check for deprecated audio settings
auto deprecationNotice = [](const QString& setting, const QString& value) {
qInfo().nospace() << "[DEPRECATION NOTICE] " << setting << "(" << value << ") has been deprecated, and has no effect";
};
bool ok;
const QString DESIRED_JITTER_BUFFER_FRAMES_KEY = "static_desired_jitter_buffer_frames";
_streamSettings._staticDesiredJitterBufferFrames = audioBufferGroupObject[DESIRED_JITTER_BUFFER_FRAMES_KEY].toString().toInt(&ok);
if (!ok) {
_streamSettings._staticDesiredJitterBufferFrames = DEFAULT_STATIC_DESIRED_JITTER_BUFFER_FRAMES;
}
qDebug() << "Static desired jitter buffer frames:" << _streamSettings._staticDesiredJitterBufferFrames;
const QString MAX_FRAMES_OVER_DESIRED_JSON_KEY = "max_frames_over_desired";
_streamSettings._maxFramesOverDesired = audioBufferGroupObject[MAX_FRAMES_OVER_DESIRED_JSON_KEY].toString().toInt(&ok);
if (!ok) {
_streamSettings._maxFramesOverDesired = DEFAULT_MAX_FRAMES_OVER_DESIRED;
}
qDebug() << "Max frames over desired:" << _streamSettings._maxFramesOverDesired;
const QString USE_STDEV_FOR_DESIRED_CALC_JSON_KEY = "use_stdev_for_desired_calc";
_streamSettings._useStDevForJitterCalc = audioBufferGroupObject[USE_STDEV_FOR_DESIRED_CALC_JSON_KEY].toBool();
if (_streamSettings._useStDevForJitterCalc) {
qDebug() << "Using stdev method for jitter calc if dynamic jitter buffers enabled";
} else {
qDebug() << "Using max-gap method for jitter calc if dynamic jitter buffers enabled";
int maxFramesOverDesired = audioBufferGroupObject[MAX_FRAMES_OVER_DESIRED_JSON_KEY].toString().toInt(&ok);
if (ok && maxFramesOverDesired != InboundAudioStream::MAX_FRAMES_OVER_DESIRED) {
deprecationNotice(MAX_FRAMES_OVER_DESIRED_JSON_KEY, QString::number(maxFramesOverDesired));
}
const QString WINDOW_STARVE_THRESHOLD_JSON_KEY = "window_starve_threshold";
_streamSettings._windowStarveThreshold = audioBufferGroupObject[WINDOW_STARVE_THRESHOLD_JSON_KEY].toString().toInt(&ok);
if (!ok) {
_streamSettings._windowStarveThreshold = DEFAULT_WINDOW_STARVE_THRESHOLD;
int windowStarveThreshold = audioBufferGroupObject[WINDOW_STARVE_THRESHOLD_JSON_KEY].toString().toInt(&ok);
if (ok && windowStarveThreshold != InboundAudioStream::WINDOW_STARVE_THRESHOLD) {
deprecationNotice(WINDOW_STARVE_THRESHOLD_JSON_KEY, QString::number(windowStarveThreshold));
}
qDebug() << "Window A starve threshold:" << _streamSettings._windowStarveThreshold;
const QString WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES_JSON_KEY = "window_seconds_for_desired_calc_on_too_many_starves";
_streamSettings._windowSecondsForDesiredCalcOnTooManyStarves = audioBufferGroupObject[WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES_JSON_KEY].toString().toInt(&ok);
if (!ok) {
_streamSettings._windowSecondsForDesiredCalcOnTooManyStarves = DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES;
int windowSecondsForDesiredCalcOnTooManyStarves = audioBufferGroupObject[WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES_JSON_KEY].toString().toInt(&ok);
if (ok && windowSecondsForDesiredCalcOnTooManyStarves != InboundAudioStream::WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES) {
deprecationNotice(WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES_JSON_KEY, QString::number(windowSecondsForDesiredCalcOnTooManyStarves));
}
qDebug() << "Window A length:" << _streamSettings._windowSecondsForDesiredCalcOnTooManyStarves << "seconds";
const QString WINDOW_SECONDS_FOR_DESIRED_REDUCTION_JSON_KEY = "window_seconds_for_desired_reduction";
_streamSettings._windowSecondsForDesiredReduction = audioBufferGroupObject[WINDOW_SECONDS_FOR_DESIRED_REDUCTION_JSON_KEY].toString().toInt(&ok);
if (!ok) {
_streamSettings._windowSecondsForDesiredReduction = DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION;
int windowSecondsForDesiredReduction = audioBufferGroupObject[WINDOW_SECONDS_FOR_DESIRED_REDUCTION_JSON_KEY].toString().toInt(&ok);
if (ok && windowSecondsForDesiredReduction != InboundAudioStream::WINDOW_SECONDS_FOR_DESIRED_REDUCTION) {
deprecationNotice(WINDOW_SECONDS_FOR_DESIRED_REDUCTION_JSON_KEY, QString::number(windowSecondsForDesiredReduction));
}
const QString USE_STDEV_FOR_JITTER_JSON_KEY = "use_stdev_for_desired_calc";
bool useStDevForJitterCalc = audioBufferGroupObject[USE_STDEV_FOR_JITTER_JSON_KEY].toBool();
if (useStDevForJitterCalc != InboundAudioStream::USE_STDEV_FOR_JITTER) {
deprecationNotice(USE_STDEV_FOR_JITTER_JSON_KEY, useStDevForJitterCalc ? "true" : "false");
}
qDebug() << "Window B length:" << _streamSettings._windowSecondsForDesiredReduction << "seconds";
const QString REPETITION_WITH_FADE_JSON_KEY = "repetition_with_fade";
_streamSettings._repetitionWithFade = audioBufferGroupObject[REPETITION_WITH_FADE_JSON_KEY].toBool();
if (_streamSettings._repetitionWithFade) {
qDebug() << "Repetition with fade enabled";
} else {
qDebug() << "Repetition with fade disabled";
bool repetitionWithFade = audioBufferGroupObject[REPETITION_WITH_FADE_JSON_KEY].toBool();
if (repetitionWithFade != InboundAudioStream::REPETITION_WITH_FADE) {
deprecationNotice(REPETITION_WITH_FADE_JSON_KEY, repetitionWithFade ? "true" : "false");
}
}

View file

@ -39,7 +39,7 @@ public slots:
void sendStatsPacket() override;
static const InboundAudioStream::Settings& getStreamSettings() { return _streamSettings; }
static int getStaticJitterFrames() { return _numStaticJitterFrames; }
private slots:
void broadcastMixes();
@ -112,7 +112,7 @@ private:
};
QVector<ReverbSettings> _zoneReverbSettings;
static InboundAudioStream::Settings _streamSettings;
static int _numStaticJitterFrames; // -1 denotes dynamic jitter buffering
static bool _enableFilter;
};

View file

@ -109,7 +109,7 @@ int AudioMixerClientData::parseData(ReceivedMessage& message) {
bool isStereo = channelFlag == 1;
auto avatarAudioStream = new AvatarAudioStream(isStereo, AudioMixer::getStreamSettings());
auto avatarAudioStream = new AvatarAudioStream(isStereo, AudioMixer::getStaticJitterFrames());
avatarAudioStream->setupCodec(_codec, _selectedCodecName, AudioConstants::MONO);
qDebug() << "creating new AvatarAudioStream... codec:" << _selectedCodecName;
@ -143,7 +143,7 @@ int AudioMixerClientData::parseData(ReceivedMessage& message) {
if (streamIt == _audioStreams.end()) {
// we don't have this injected stream yet, so add it
auto injectorStream = new InjectedAudioStream(streamIdentifier, isStereo, AudioMixer::getStreamSettings());
auto injectorStream = new InjectedAudioStream(streamIdentifier, isStereo, AudioMixer::getStaticJitterFrames());
#if INJECTORS_SUPPORT_CODECS
injectorStream->setupCodec(_codec, _selectedCodecName, isStereo ? AudioConstants::STEREO : AudioConstants::MONO);

View file

@ -13,10 +13,8 @@
#include "AvatarAudioStream.h"
AvatarAudioStream::AvatarAudioStream(bool isStereo, const InboundAudioStream::Settings& settings) :
PositionalAudioStream(PositionalAudioStream::Microphone, isStereo, settings)
{
}
AvatarAudioStream::AvatarAudioStream(bool isStereo, int numStaticJitterFrames) :
PositionalAudioStream(PositionalAudioStream::Microphone, isStereo, numStaticJitterFrames) {}
int AvatarAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
int readBytes = 0;

View file

@ -18,7 +18,7 @@
class AvatarAudioStream : public PositionalAudioStream {
public:
AvatarAudioStream(bool isStereo, const InboundAudioStream::Settings& settings);
AvatarAudioStream(bool isStereo, int numStaticJitterFrames = -1);
private:
// disallow copying of AvatarAudioStream objects

View file

@ -1033,65 +1033,41 @@
"name": "dynamic_jitter_buffer",
"type": "checkbox",
"label": "Dynamic Jitter Buffers",
"help": "Dynamically buffer client audio based on perceived jitter in packet receipt timing",
"help": "Dynamically buffer inbound audio streams based on perceived jitter in packet receipt timing.",
"default": true,
"advanced": true
},
{
"name": "static_desired_jitter_buffer_frames",
"label": "Static Desired Jitter Buffer Frames",
"help": "If dynamic jitter buffers is disabled, this determines the target number of frames maintained by the AudioMixer's jitter buffers",
"help": "If dynamic jitter buffers is disabled, this determines the size of the jitter buffers of inbound audio streams in the mixer. Higher numbers introduce more latency.",
"placeholder": "1",
"default": "1",
"advanced": true
},
{
"name": "max_frames_over_desired",
"label": "Max Frames Over Desired",
"help": "The highest number of frames an AudioMixer's ringbuffer can exceed the desired jitter buffer frames by",
"placeholder": "10",
"default": "10",
"advanced": true
"name": "max_frames_over_desired",
"deprecated": true
},
{
"name": "use_stdev_for_desired_calc",
"type": "checkbox",
"label": "Stdev for Desired Jitter Frames Calc",
"help": "Use Philip's method (stdev of timegaps) to calculate desired jitter frames (otherwise Fred's max timegap method is used)",
"default": false,
"advanced": true
"name": "window_starve_threshold",
"deprecated": true
},
{
"name": "window_starve_threshold",
"label": "Window Starve Threshold",
"help": "If this many starves occur in an N-second window (N is the number in the next field), then the desired jitter frames will be re-evaluated using Window A.",
"placeholder": "3",
"default": "3",
"advanced": true
"name": "window_seconds_for_desired_calc_on_too_many_starves",
"deprecated": true
},
{
"name": "window_seconds_for_desired_calc_on_too_many_starves",
"label": "Timegaps Window (A) Seconds",
"help": "Window A contains a history of timegaps. Its max timegap is used to re-evaluate the desired jitter frames when too many starves occur within it.",
"placeholder": "50",
"default": "50",
"advanced": true
"name": "window_seconds_for_desired_reduction",
"deprecated": true
},
{
"name": "window_seconds_for_desired_reduction",
"label": "Timegaps Window (B) Seconds",
"help": "Window B contains a history of timegaps. Its max timegap is used as a ceiling for the desired jitter frames value.",
"placeholder": "10",
"default": "10",
"advanced": true
"name": "use_stdev_for_desired_calc",
"deprecated": true
},
{
"name": "repetition_with_fade",
"type": "checkbox",
"label": "Repetition with Fade",
"help": "Dropped frames and mixing during starves repeat the last frame, eventually fading to silence",
"default": false,
"advanced": true
"name": "repetition_with_fade",
"deprecated": true
}
]
},

View file

@ -75,7 +75,7 @@ span.port {
color: #666666;
}
.advanced-setting {
.advanced-setting, .deprecated-setting {
display: none;
}

View file

@ -40,7 +40,8 @@
<script id="panels-template" type="text/template">
<% _.each(descriptions, function(group){ %>
<% split_settings = _.partition(group.settings, function(value, index) { return !value.advanced }) %>
<% var settings = _.partition(group.settings, function(value, index) { return !value.deprecated })[0] %>
<% split_settings = _.partition(settings, function(value, index) { return !value.advanced }) %>
<% isAdvanced = _.isEmpty(split_settings[0]) %>
<% if (isAdvanced) { %>
<% $("a[href=#" + group.name + "]").addClass('advanced-setting').hide() %>

View file

@ -2,6 +2,7 @@ var Settings = {
showAdvanced: false,
METAVERSE_URL: 'https://metaverse.highfidelity.com',
ADVANCED_CLASS: 'advanced-setting',
DEPRECATED_CLASS: 'deprecated-setting',
TRIGGER_CHANGE_CLASS: 'trigger-change',
DATA_ROW_CLASS: 'value-row',
DATA_COL_CLASS: 'value-col',
@ -42,7 +43,10 @@ var Settings = {
var viewHelpers = {
getFormGroup: function(keypath, setting, values, isAdvanced) {
form_group = "<div class='form-group " + (isAdvanced ? Settings.ADVANCED_CLASS : "") + "' data-keypath='" + keypath + "'>";
form_group = "<div class='form-group " +
(isAdvanced ? Settings.ADVANCED_CLASS : "") + " " +
(setting.deprecated ? Settings.DEPRECATED_CLASS : "" ) + "' " +
"data-keypath='" + keypath + "'>";
setting_value = _(values).valueForKeyPath(keypath);
if (_.isUndefined(setting_value) || _.isNull(setting_value)) {
@ -454,7 +458,7 @@ function setupHFAccountButton() {
}
// use the existing getFormGroup helper to ask for a button
var buttonGroup = viewHelpers.getFormGroup('', buttonSetting, Settings.data.values, false);
var buttonGroup = viewHelpers.getFormGroup('', buttonSetting, Settings.data.values);
// add the button group to the top of the metaverse panel
$('#metaverse .panel-body').prepend(buttonGroup);
@ -665,7 +669,7 @@ function setupPlacesTable() {
}
// get a table for the places
var placesTableGroup = viewHelpers.getFormGroup('', placesTableSetting, Settings.data.values, false);
var placesTableGroup = viewHelpers.getFormGroup('', placesTableSetting, Settings.data.values);
// append the places table in the right place
$('#places_paths .panel-body').prepend(placesTableGroup);

View file

@ -1278,41 +1278,43 @@ void DomainServer::handleMetaverseHeartbeatError(QNetworkReply& requestReply) {
}
void DomainServer::sendICEServerAddressToMetaverseAPI() {
if (!_iceServerSocket.isNull()) {
const QString ICE_SERVER_ADDRESS = "ice_server_address";
const QString ICE_SERVER_ADDRESS = "ice_server_address";
QJsonObject domainObject;
QJsonObject domainObject;
if (!_connectedToICEServer || _iceServerSocket.isNull()) {
domainObject[ICE_SERVER_ADDRESS] = "0.0.0.0";
} else {
// we're using full automatic networking and we have a current ice-server socket, use that now
domainObject[ICE_SERVER_ADDRESS] = _iceServerSocket.getAddress().toString();
const auto& temporaryDomainKey = DependencyManager::get<AccountManager>()->getTemporaryDomainKey(getID());
if (!temporaryDomainKey.isEmpty()) {
// add the temporary domain token
const QString KEY_KEY = "api_key";
domainObject[KEY_KEY] = temporaryDomainKey;
}
QString domainUpdateJSON = QString("{\"domain\": %1 }").arg(QString(QJsonDocument(domainObject).toJson()));
// make sure we hear about failure so we can retry
JSONCallbackParameters callbackParameters;
callbackParameters.errorCallbackReceiver = this;
callbackParameters.errorCallbackMethod = "handleFailedICEServerAddressUpdate";
static QString repeatedMessage = LogHandler::getInstance().addOnlyOnceMessageRegex
("Updating ice-server address in High Fidelity Metaverse API to [^ \n]+");
qDebug() << "Updating ice-server address in High Fidelity Metaverse API to"
<< _iceServerSocket.getAddress().toString();
static const QString DOMAIN_ICE_ADDRESS_UPDATE = "/api/v1/domains/%1/ice_server_address";
DependencyManager::get<AccountManager>()->sendRequest(DOMAIN_ICE_ADDRESS_UPDATE.arg(uuidStringWithoutCurlyBraces(getID())),
AccountManagerAuth::Optional,
QNetworkAccessManager::PutOperation,
callbackParameters,
domainUpdateJSON.toUtf8());
}
const auto& temporaryDomainKey = DependencyManager::get<AccountManager>()->getTemporaryDomainKey(getID());
if (!temporaryDomainKey.isEmpty()) {
// add the temporary domain token
const QString KEY_KEY = "api_key";
domainObject[KEY_KEY] = temporaryDomainKey;
}
QString domainUpdateJSON = QString("{\"domain\": %1 }").arg(QString(QJsonDocument(domainObject).toJson()));
// make sure we hear about failure so we can retry
JSONCallbackParameters callbackParameters;
callbackParameters.errorCallbackReceiver = this;
callbackParameters.errorCallbackMethod = "handleFailedICEServerAddressUpdate";
static QString repeatedMessage = LogHandler::getInstance().addOnlyOnceMessageRegex
("Updating ice-server address in High Fidelity Metaverse API to [^ \n]+");
qDebug() << "Updating ice-server address in High Fidelity Metaverse API to"
<< (_iceServerSocket.isNull() ? "" : _iceServerSocket.getAddress().toString());
static const QString DOMAIN_ICE_ADDRESS_UPDATE = "/api/v1/domains/%1/ice_server_address";
DependencyManager::get<AccountManager>()->sendRequest(DOMAIN_ICE_ADDRESS_UPDATE.arg(uuidStringWithoutCurlyBraces(getID())),
AccountManagerAuth::Optional,
QNetworkAccessManager::PutOperation,
callbackParameters,
domainUpdateJSON.toUtf8());
}
void DomainServer::handleFailedICEServerAddressUpdate(QNetworkReply& requestReply) {
@ -1364,6 +1366,7 @@ void DomainServer::sendHeartbeatToIceServer() {
// reset the connection flag for ICE server
_connectedToICEServer = false;
sendICEServerAddressToMetaverseAPI();
// randomize our ice-server address (and simultaneously look up any new hostnames for available ice-servers)
randomizeICEServerAddress(true);
@ -2336,6 +2339,7 @@ void DomainServer::processICEServerHeartbeatACK(QSharedPointer<ReceivedMessage>
if (!_connectedToICEServer) {
_connectedToICEServer = true;
sendICEServerAddressToMetaverseAPI();
qInfo() << "Connected to ice-server at" << _iceServerSocket;
}
}

View file

@ -1039,7 +1039,8 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer) :
cameraMenuChanged();
}
// set the local loopback interface for local sounds from audio scripts
// set the local loopback interface for local sounds
AudioInjector::setLocalAudioInterface(audioIO.data());
AudioScriptingInterface::getInstance().setLocalAudioInterface(audioIO.data());
this->installEventFilter(this);

View file

@ -303,12 +303,6 @@ Menu::Menu() {
DependencyManager::get<OffscreenUi>()->toggle(QString("hifi/dialogs/AvatarPreferencesDialog.qml"), "AvatarPreferencesDialog");
});
// Settings > Audio...
action = addActionToQMenuAndActionHash(settingsMenu, "Audio...");
connect(action, &QAction::triggered, [] {
DependencyManager::get<OffscreenUi>()->toggle(QString("hifi/dialogs/AudioPreferencesDialog.qml"), "AudioPreferencesDialog");
});
// Settings > LOD...
action = addActionToQMenuAndActionHash(settingsMenu, "LOD...");
connect(action, &QAction::triggered, [] {
@ -584,6 +578,12 @@ Menu::Menu() {
// Developer > Audio >>>
MenuWrapper* audioDebugMenu = developerMenu->addMenu("Audio");
action = addActionToQMenuAndActionHash(audioDebugMenu, "Buffers...");
connect(action, &QAction::triggered, [] {
DependencyManager::get<OffscreenUi>()->toggle(QString("hifi/dialogs/AudioPreferencesDialog.qml"), "AudioPreferencesDialog");
});
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioNoiseReduction, 0, true,
audioIO.data(), SLOT(toggleAudioNoiseReduction()));
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::EchoServerAudio, 0, false,

View file

@ -188,92 +188,32 @@ void setupPreferences() {
static const QString AUDIO("Audio");
{
auto getter = []()->bool { return DependencyManager::get<AudioClient>()->getReceivedAudioStream().getDynamicJitterBuffers(); };
auto setter = [](bool value) { DependencyManager::get<AudioClient>()->getReceivedAudioStream().setDynamicJitterBuffers(value); };
preferences->addPreference(new CheckPreference(AUDIO, "Enable dynamic jitter buffers", getter, setter));
auto getter = []()->bool { return !DependencyManager::get<AudioClient>()->getReceivedAudioStream().dynamicJitterBufferEnabled(); };
auto setter = [](bool value) { DependencyManager::get<AudioClient>()->getReceivedAudioStream().setDynamicJitterBufferEnabled(!value); };
auto preference = new CheckPreference(AUDIO, "Disable dynamic jitter buffer", getter, setter);
preferences->addPreference(preference);
}
{
auto getter = []()->float { return DependencyManager::get<AudioClient>()->getReceivedAudioStream().getDesiredJitterBufferFrames(); };
auto setter = [](float value) { DependencyManager::get<AudioClient>()->getReceivedAudioStream().setStaticDesiredJitterBufferFrames(value); };
auto getter = []()->float { return DependencyManager::get<AudioClient>()->getReceivedAudioStream().getStaticJitterBufferFrames(); };
auto setter = [](float value) { DependencyManager::get<AudioClient>()->getReceivedAudioStream().setStaticJitterBufferFrames(value); };
auto preference = new SpinnerPreference(AUDIO, "Static jitter buffer frames", getter, setter);
preference->setMin(0);
preference->setMax(10000);
preference->setMax(2000);
preference->setStep(1);
preferences->addPreference(preference);
}
{
auto getter = []()->float { return DependencyManager::get<AudioClient>()->getReceivedAudioStream().getMaxFramesOverDesired(); };
auto setter = [](float value) { DependencyManager::get<AudioClient>()->getReceivedAudioStream().setMaxFramesOverDesired(value); };
auto preference = new SpinnerPreference(AUDIO, "Max frames over desired", getter, setter);
preference->setMax(10000);
preference->setStep(1);
auto getter = []()->bool { return !DependencyManager::get<AudioClient>()->getOutputStarveDetectionEnabled(); };
auto setter = [](bool value) { DependencyManager::get<AudioClient>()->setOutputStarveDetectionEnabled(!value); };
auto preference = new CheckPreference(AUDIO, "Disable output starve detection", getter, setter);
preferences->addPreference(preference);
}
{
auto getter = []()->bool { return DependencyManager::get<AudioClient>()->getReceivedAudioStream().getUseStDevForJitterCalc(); };
auto setter = [](bool value) { DependencyManager::get<AudioClient>()->getReceivedAudioStream().setUseStDevForJitterCalc(value); };
preferences->addPreference(new CheckPreference(AUDIO, "Use standard deviation for dynamic jitter calc", getter, setter));
}
{
auto getter = []()->float { return DependencyManager::get<AudioClient>()->getReceivedAudioStream().getWindowStarveThreshold(); };
auto setter = [](float value) { DependencyManager::get<AudioClient>()->getReceivedAudioStream().setWindowStarveThreshold(value); };
auto preference = new SpinnerPreference(AUDIO, "Window A starve threshold", getter, setter);
preference->setMax(10000);
preference->setStep(1);
preferences->addPreference(preference);
}
{
auto getter = []()->float { return DependencyManager::get<AudioClient>()->getReceivedAudioStream().getWindowSecondsForDesiredCalcOnTooManyStarves(); };
auto setter = [](float value) { DependencyManager::get<AudioClient>()->getReceivedAudioStream().setWindowSecondsForDesiredCalcOnTooManyStarves(value); };
auto preference = new SpinnerPreference(AUDIO, "Window A (raise desired on N starves) seconds", getter, setter);
preference->setMax(10000);
preference->setStep(1);
preferences->addPreference(preference);
}
{
auto getter = []()->float { return DependencyManager::get<AudioClient>()->getReceivedAudioStream().getWindowSecondsForDesiredReduction(); };
auto setter = [](float value) { DependencyManager::get<AudioClient>()->getReceivedAudioStream().setWindowSecondsForDesiredReduction(value); };
auto preference = new SpinnerPreference(AUDIO, "Window B (desired ceiling) seconds", getter, setter);
preference->setMax(10000);
preference->setStep(1);
preferences->addPreference(preference);
}
{
auto getter = []()->bool { return DependencyManager::get<AudioClient>()->getReceivedAudioStream().getRepetitionWithFade(); };
auto setter = [](bool value) { DependencyManager::get<AudioClient>()->getReceivedAudioStream().setRepetitionWithFade(value); };
preferences->addPreference(new CheckPreference(AUDIO, "Repetition with fade", getter, setter));
}
{
auto getter = []()->float { return DependencyManager::get<AudioClient>()->getOutputBufferSize(); };
auto setter = [](float value) { DependencyManager::get<AudioClient>()->setOutputBufferSize(value); };
auto preference = new SpinnerPreference(AUDIO, "Output buffer initial size (frames)", getter, setter);
preference->setMin(1);
preference->setMax(20);
preference->setStep(1);
preferences->addPreference(preference);
}
{
auto getter = []()->bool { return DependencyManager::get<AudioClient>()->getOutputStarveDetectionEnabled(); };
auto setter = [](bool value) { DependencyManager::get<AudioClient>()->setOutputStarveDetectionEnabled(value); };
auto preference = new CheckPreference(AUDIO, "Output starve detection (automatic buffer size increase)", getter, setter);
preferences->addPreference(preference);
}
{
auto getter = []()->float { return DependencyManager::get<AudioClient>()->getOutputStarveDetectionThreshold(); };
auto setter = [](float value) { DependencyManager::get<AudioClient>()->setOutputStarveDetectionThreshold(value); };
auto preference = new SpinnerPreference(AUDIO, "Output starve detection threshold", getter, setter);
preference->setMin(1);
preference->setMax(500);
preference->setStep(1);
preferences->addPreference(preference);
}
{
auto getter = []()->float { return DependencyManager::get<AudioClient>()->getOutputStarveDetectionPeriod(); };
auto setter = [](float value) { DependencyManager::get<AudioClient>()->setOutputStarveDetectionPeriod(value); };
auto preference = new SpinnerPreference(AUDIO, "Output starve detection period (ms)", getter, setter);
preference->setMin(1);
preference->setMax((float)999999999);
auto preference = new SpinnerPreference(AUDIO, "Output buffer initial frames", getter, setter);
preference->setMin(AudioClient::MIN_BUFFER_FRAMES);
preference->setMax(AudioClient::MAX_BUFFER_FRAMES);
preference->setStep(1);
preferences->addPreference(preference);
}

View file

@ -51,22 +51,24 @@
#include "AudioClient.h"
const int AudioClient::MIN_BUFFER_FRAMES = 1;
const int AudioClient::MAX_BUFFER_FRAMES = 20;
static const int RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES = 100;
static const auto DEFAULT_POSITION_GETTER = []{ return Vectors::ZERO; };
static const auto DEFAULT_ORIENTATION_GETTER = [] { return Quaternions::IDENTITY; };
Setting::Handle<bool> dynamicJitterBuffers("dynamicJitterBuffers", DEFAULT_DYNAMIC_JITTER_BUFFERS);
Setting::Handle<int> maxFramesOverDesired("maxFramesOverDesired", DEFAULT_MAX_FRAMES_OVER_DESIRED);
Setting::Handle<int> staticDesiredJitterBufferFrames("staticDesiredJitterBufferFrames",
DEFAULT_STATIC_DESIRED_JITTER_BUFFER_FRAMES);
Setting::Handle<bool> useStDevForJitterCalc("useStDevForJitterCalc", DEFAULT_USE_STDEV_FOR_JITTER_CALC);
Setting::Handle<int> windowStarveThreshold("windowStarveThreshold", DEFAULT_WINDOW_STARVE_THRESHOLD);
Setting::Handle<int> windowSecondsForDesiredCalcOnTooManyStarves("windowSecondsForDesiredCalcOnTooManyStarves",
DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES);
Setting::Handle<int> windowSecondsForDesiredReduction("windowSecondsForDesiredReduction",
DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION);
Setting::Handle<bool> repetitionWithFade("repetitionWithFade", DEFAULT_REPETITION_WITH_FADE);
static const int DEFAULT_BUFFER_FRAMES = 1;
static const bool DEFAULT_STARVE_DETECTION_ENABLED = true;
static const int STARVE_DETECTION_THRESHOLD = 3;
static const int STARVE_DETECTION_PERIOD = 10 * 1000; // 10 Seconds
Setting::Handle<bool> dynamicJitterBufferEnabled("dynamicJitterBuffersEnabled",
InboundAudioStream::DEFAULT_DYNAMIC_JITTER_BUFFER_ENABLED);
Setting::Handle<int> staticJitterBufferFrames("staticJitterBufferFrames",
InboundAudioStream::DEFAULT_STATIC_JITTER_FRAMES);
// protect the Qt internal device list
using Mutex = std::mutex;
@ -113,18 +115,13 @@ AudioClient::AudioClient() :
_loopbackAudioOutput(NULL),
_loopbackOutputDevice(NULL),
_inputRingBuffer(0),
_receivedAudioStream(0, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES, InboundAudioStream::Settings()),
_receivedAudioStream(0, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES),
_isStereoInput(false),
_outputStarveDetectionStartTimeMsec(0),
_outputStarveDetectionCount(0),
_outputBufferSizeFrames("audioOutputBufferSizeFrames", DEFAULT_AUDIO_OUTPUT_BUFFER_SIZE_FRAMES),
_outputBufferSizeFrames("audioOutputBufferFrames", DEFAULT_BUFFER_FRAMES),
_sessionOutputBufferSizeFrames(_outputBufferSizeFrames.get()),
_outputStarveDetectionEnabled("audioOutputBufferStarveDetectionEnabled",
DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_ENABLED),
_outputStarveDetectionPeriodMsec("audioOutputStarveDetectionPeriod",
DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_PERIOD),
_outputStarveDetectionThreshold("audioOutputStarveDetectionThreshold",
DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_THRESHOLD),
_outputStarveDetectionEnabled("audioOutputStarveDetectionEnabled", DEFAULT_STARVE_DETECTION_ENABLED),
_lastInputLoudness(0.0f),
_timeSinceLastClip(-1.0f),
_muted(false),
@ -141,8 +138,17 @@ AudioClient::AudioClient() :
_stats(&_receivedAudioStream),
_inputGate(),
_positionGetter(DEFAULT_POSITION_GETTER),
_orientationGetter(DEFAULT_ORIENTATION_GETTER)
{
_orientationGetter(DEFAULT_ORIENTATION_GETTER) {
// deprecate legacy settings
{
Setting::Handle<int>::Deprecated("maxFramesOverDesired", InboundAudioStream::MAX_FRAMES_OVER_DESIRED);
Setting::Handle<int>::Deprecated("windowStarveThreshold", InboundAudioStream::WINDOW_STARVE_THRESHOLD);
Setting::Handle<int>::Deprecated("windowSecondsForDesiredCalcOnTooManyStarves", InboundAudioStream::WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES);
Setting::Handle<int>::Deprecated("windowSecondsForDesiredReduction", InboundAudioStream::WINDOW_SECONDS_FOR_DESIRED_REDUCTION);
Setting::Handle<bool>::Deprecated("useStDevForJitterCalc", InboundAudioStream::USE_STDEV_FOR_JITTER);
Setting::Handle<bool>::Deprecated("repetitionWithFade", InboundAudioStream::REPETITION_WITH_FADE);
}
connect(&_receivedAudioStream, &MixedProcessedAudioStream::processSamples,
this, &AudioClient::processReceivedSamples, Qt::DirectConnection);
connect(this, &AudioClient::changeDevice, this, [=](const QAudioDeviceInfo& outputDeviceInfo) { switchOutputToAudioDevice(outputDeviceInfo); });
@ -1232,12 +1238,12 @@ void AudioClient::outputNotify() {
if (_outputStarveDetectionEnabled.get()) {
quint64 now = usecTimestampNow() / 1000;
int dt = (int)(now - _outputStarveDetectionStartTimeMsec);
if (dt > _outputStarveDetectionPeriodMsec.get()) {
if (dt > STARVE_DETECTION_PERIOD) {
_outputStarveDetectionStartTimeMsec = now;
_outputStarveDetectionCount = 0;
} else {
_outputStarveDetectionCount += recentUnfulfilled;
if (_outputStarveDetectionCount > _outputStarveDetectionThreshold.get()) {
if (_outputStarveDetectionCount > STARVE_DETECTION_THRESHOLD) {
int oldOutputBufferSizeFrames = _sessionOutputBufferSizeFrames;
int newOutputBufferSizeFrames = setOutputBufferSize(oldOutputBufferSizeFrames + 1, false);
@ -1333,7 +1339,7 @@ bool AudioClient::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDevice
}
int AudioClient::setOutputBufferSize(int numFrames, bool persist) {
numFrames = std::min(std::max(numFrames, MIN_AUDIO_OUTPUT_BUFFER_SIZE_FRAMES), MAX_AUDIO_OUTPUT_BUFFER_SIZE_FRAMES);
numFrames = std::min(std::max(numFrames, MIN_BUFFER_FRAMES), MAX_BUFFER_FRAMES);
if (numFrames != _sessionOutputBufferSizeFrames) {
qCInfo(audioclient, "Audio output buffer set to %d frames", numFrames);
_sessionOutputBufferSizeFrames = numFrames;
@ -1462,15 +1468,8 @@ void AudioClient::checkDevices() {
}
void AudioClient::loadSettings() {
_receivedAudioStream.setDynamicJitterBuffers(dynamicJitterBuffers.get());
_receivedAudioStream.setMaxFramesOverDesired(maxFramesOverDesired.get());
_receivedAudioStream.setStaticDesiredJitterBufferFrames(staticDesiredJitterBufferFrames.get());
_receivedAudioStream.setUseStDevForJitterCalc(useStDevForJitterCalc.get());
_receivedAudioStream.setWindowStarveThreshold(windowStarveThreshold.get());
_receivedAudioStream.setWindowSecondsForDesiredCalcOnTooManyStarves(
windowSecondsForDesiredCalcOnTooManyStarves.get());
_receivedAudioStream.setWindowSecondsForDesiredReduction(windowSecondsForDesiredReduction.get());
_receivedAudioStream.setRepetitionWithFade(repetitionWithFade.get());
_receivedAudioStream.setDynamicJitterBufferEnabled(dynamicJitterBufferEnabled.get());
_receivedAudioStream.setStaticJitterBufferFrames(staticJitterBufferFrames.get());
qCDebug(audioclient) << "---- Initializing Audio Client ----";
auto codecPlugins = PluginManager::getInstance()->getCodecPlugins();
@ -1481,12 +1480,6 @@ void AudioClient::loadSettings() {
}
void AudioClient::saveSettings() {
dynamicJitterBuffers.set(_receivedAudioStream.getDynamicJitterBuffers());
maxFramesOverDesired.set(_receivedAudioStream.getMaxFramesOverDesired());
staticDesiredJitterBufferFrames.set(_receivedAudioStream.getDesiredJitterBufferFrames());
windowStarveThreshold.set(_receivedAudioStream.getWindowStarveThreshold());
windowSecondsForDesiredCalcOnTooManyStarves.set(_receivedAudioStream.
getWindowSecondsForDesiredCalcOnTooManyStarves());
windowSecondsForDesiredReduction.set(_receivedAudioStream.getWindowSecondsForDesiredReduction());
repetitionWithFade.set(_receivedAudioStream.getRepetitionWithFade());
dynamicJitterBufferEnabled.set(_receivedAudioStream.dynamicJitterBufferEnabled());
staticJitterBufferFrames.set(_receivedAudioStream.getStaticJitterBufferFrames());
}

View file

@ -61,15 +61,6 @@
#pragma warning( pop )
#endif
static const int NUM_AUDIO_CHANNELS = 2;
static const int DEFAULT_AUDIO_OUTPUT_BUFFER_SIZE_FRAMES = 3;
static const int MIN_AUDIO_OUTPUT_BUFFER_SIZE_FRAMES = 1;
static const int MAX_AUDIO_OUTPUT_BUFFER_SIZE_FRAMES = 20;
static const int DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_ENABLED = true;
static const int DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_THRESHOLD = 3;
static const quint64 DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_PERIOD = 10 * 1000; // 10 Seconds
class QAudioInput;
class QAudioOutput;
class QIODevice;
@ -82,6 +73,9 @@ class AudioClient : public AbstractAudioInterface, public Dependency {
Q_OBJECT
SINGLETON_DEPENDENCY
public:
static const int MIN_BUFFER_FRAMES;
static const int MAX_BUFFER_FRAMES;
using AudioPositionGetter = std::function<glm::vec3()>;
using AudioOrientationGetter = std::function<glm::quat()>;
@ -115,8 +109,6 @@ public:
float getTimeSinceLastClip() const { return _timeSinceLastClip; }
float getAudioAverageInputLoudness() const { return _lastInputLoudness; }
int getDesiredJitterBufferFrames() const { return _receivedAudioStream.getDesiredJitterBufferFrames(); }
bool isMuted() { return _muted; }
const AudioIOStats& getStats() const { return _stats; }
@ -126,12 +118,6 @@ public:
bool getOutputStarveDetectionEnabled() { return _outputStarveDetectionEnabled.get(); }
void setOutputStarveDetectionEnabled(bool enabled) { _outputStarveDetectionEnabled.set(enabled); }
int getOutputStarveDetectionPeriod() { return _outputStarveDetectionPeriodMsec.get(); }
void setOutputStarveDetectionPeriod(int msecs) { _outputStarveDetectionPeriodMsec.set(msecs); }
int getOutputStarveDetectionThreshold() { return _outputStarveDetectionThreshold.get(); }
void setOutputStarveDetectionThreshold(int threshold) { _outputStarveDetectionThreshold.set(threshold); }
bool isSimulatingJitter() { return _gate.isSimulatingJitter(); }
void setIsSimulatingJitter(bool enable) { _gate.setIsSimulatingJitter(enable); }
@ -283,9 +269,6 @@ private:
Setting::Handle<int> _outputBufferSizeFrames;
int _sessionOutputBufferSizeFrames;
Setting::Handle<bool> _outputStarveDetectionEnabled;
Setting::Handle<int> _outputStarveDetectionPeriodMsec;
// Maximum number of starves per _outputStarveDetectionPeriod before increasing buffer size
Setting::Handle<int> _outputStarveDetectionThreshold;
StDev _stdev;
QElapsedTimer _timeSinceLastReceived;

View file

@ -28,6 +28,8 @@
int audioInjectorPtrMetaTypeId = qRegisterMetaType<AudioInjector*>();
AbstractAudioInterface* AudioInjector::_localAudioInterface{ nullptr };
AudioInjectorState operator& (AudioInjectorState lhs, AudioInjectorState rhs) {
return static_cast<AudioInjectorState>(static_cast<uint8_t>(lhs) & static_cast<uint8_t>(rhs));
};
@ -37,24 +39,15 @@ AudioInjectorState& operator|= (AudioInjectorState& lhs, AudioInjectorState rhs)
return lhs;
};
AudioInjector::AudioInjector(QObject* parent) :
QObject(parent)
{
}
AudioInjector::AudioInjector(const Sound& sound, const AudioInjectorOptions& injectorOptions) :
_audioData(sound.getByteArray()),
_options(injectorOptions)
AudioInjector(sound.getByteArray(), injectorOptions)
{
}
AudioInjector::AudioInjector(const QByteArray& audioData, const AudioInjectorOptions& injectorOptions) :
_audioData(audioData),
_options(injectorOptions)
{
}
bool AudioInjector::stateHas(AudioInjectorState state) const {
@ -103,26 +96,6 @@ void AudioInjector::finish() {
}
}
void AudioInjector::setupInjection() {
if (!_hasSetup) {
_hasSetup = true;
// check if we need to offset the sound by some number of seconds
if (_options.secondOffset > 0.0f) {
// convert the offset into a number of bytes
int byteOffset = (int) floorf(AudioConstants::SAMPLE_RATE * _options.secondOffset * (_options.stereo ? 2.0f : 1.0f));
byteOffset *= sizeof(int16_t);
_currentSendOffset = byteOffset;
} else {
_currentSendOffset = 0;
}
} else {
qCDebug(audio) << "AudioInjector::setupInjection called but already setup.";
}
}
void AudioInjector::restart() {
// grab the AudioInjectorManager
auto injectorManager = DependencyManager::get<AudioInjectorManager>();
@ -150,30 +123,37 @@ void AudioInjector::restart() {
// check our state to decide if we need extra handling for the restart request
if (stateHas(AudioInjectorState::Finished)) {
// we finished playing, need to reset state so we can get going again
_hasSetup = false;
_shouldStop = false;
_state = AudioInjectorState::NotFinished;
// call inject audio to start injection over again
setupInjection();
// inject locally
if(injectLocally()) {
// if not localOnly, wake the AudioInjectorManager back up if it is stuck waiting
if (!_options.localOnly) {
if (!injectorManager->restartFinishedInjector(this)) {
_state = AudioInjectorState::Finished; // we're not playing, so reset the state used by isPlaying.
}
}
} else {
_state = AudioInjectorState::Finished; // we failed to play, so we are finished again
if (!inject(&AudioInjectorManager::restartFinishedInjector)) {
qWarning() << "AudioInjector::restart failed to thread injector";
}
}
}
bool AudioInjector::inject(bool(AudioInjectorManager::*injection)(AudioInjector*)) {
_state = AudioInjectorState::NotFinished;
int byteOffset = 0;
if (_options.secondOffset > 0.0f) {
byteOffset = (int)floorf(AudioConstants::SAMPLE_RATE * _options.secondOffset * (_options.stereo ? 2.0f : 1.0f));
byteOffset *= sizeof(AudioConstants::SAMPLE_SIZE);
}
_currentSendOffset = byteOffset;
if (!injectLocally()) {
finishLocalInjection();
}
bool success = true;
if (!_options.localOnly) {
auto injectorManager = DependencyManager::get<AudioInjectorManager>();
if (!(*injectorManager.*injection)(this)) {
success = false;
finishNetworkInjection();
}
}
return success;
}
bool AudioInjector::injectLocally() {
bool success = false;
if (_localAudioInterface) {
@ -202,11 +182,6 @@ bool AudioInjector::injectLocally() {
qCDebug(audio) << "AudioInjector::injectLocally cannot inject locally with no local audio interface present.";
}
if (!success) {
// we never started so we are finished with local injection
finishLocalInjection();
}
return success;
}
@ -447,7 +422,7 @@ AudioInjector* AudioInjector::playSound(SharedSoundPointer sound, const float vo
QByteArray samples = sound->getByteArray();
if (stretchFactor == 1.0f) {
return playSoundAndDelete(samples, options, nullptr);
return playSoundAndDelete(samples, options);
}
const int standardRate = AudioConstants::SAMPLE_RATE;
@ -465,11 +440,11 @@ AudioInjector* AudioInjector::playSound(SharedSoundPointer sound, const float vo
nInputFrames);
Q_UNUSED(nOutputFrames);
return playSoundAndDelete(resampled, options, nullptr);
return playSoundAndDelete(resampled, options);
}
AudioInjector* AudioInjector::playSoundAndDelete(const QByteArray& buffer, const AudioInjectorOptions options, AbstractAudioInterface* localInterface) {
AudioInjector* sound = playSound(buffer, options, localInterface);
AudioInjector* AudioInjector::playSoundAndDelete(const QByteArray& buffer, const AudioInjectorOptions options) {
AudioInjector* sound = playSound(buffer, options);
if (sound) {
sound->_state |= AudioInjectorState::PendingDelete;
@ -479,27 +454,10 @@ AudioInjector* AudioInjector::playSoundAndDelete(const QByteArray& buffer, const
}
AudioInjector* AudioInjector::playSound(const QByteArray& buffer, const AudioInjectorOptions options, AbstractAudioInterface* localInterface) {
AudioInjector* AudioInjector::playSound(const QByteArray& buffer, const AudioInjectorOptions options) {
AudioInjector* injector = new AudioInjector(buffer, options);
injector->setLocalAudioInterface(localInterface);
// grab the AudioInjectorManager
auto injectorManager = DependencyManager::get<AudioInjectorManager>();
// setup parameters required for injection
injector->setupInjection();
// we always inject locally, except when there is no localInterface
injector->injectLocally();
// if localOnly, we are done, just return injector.
if (!options.localOnly) {
// send off to server for everyone else
if (!injectorManager->threadInjector(injector)) {
// we failed to thread the new injector (we are at the max number of injector threads)
qDebug() << "AudioInjector::playSound failed to thread injector";
}
if (!injector->inject(&AudioInjectorManager::threadInjector)) {
qWarning() << "AudioInjector::playSound failed to thread injector";
}
return injector;
}

View file

@ -48,9 +48,7 @@ AudioInjectorState& operator|= (AudioInjectorState& lhs, AudioInjectorState rhs)
// until it dies.
class AudioInjector : public QObject {
Q_OBJECT
public:
AudioInjector(QObject* parent);
AudioInjector(const Sound& sound, const AudioInjectorOptions& injectorOptions);
AudioInjector(const QByteArray& audioData, const AudioInjectorOptions& injectorOptions);
@ -66,11 +64,11 @@ public:
float getVolume() const { return _options.volume; }
glm::vec3 getPosition() const { return _options.position; }
bool isStereo() const { return _options.stereo; }
void setLocalAudioInterface(AbstractAudioInterface* localAudioInterface) { _localAudioInterface = localAudioInterface; }
bool stateHas(AudioInjectorState state) const ;
static AudioInjector* playSoundAndDelete(const QByteArray& buffer, const AudioInjectorOptions options, AbstractAudioInterface* localInterface);
static AudioInjector* playSound(const QByteArray& buffer, const AudioInjectorOptions options, AbstractAudioInterface* localInterface);
static void setLocalAudioInterface(AbstractAudioInterface* audioInterface) { _localAudioInterface = audioInterface; }
static AudioInjector* playSoundAndDelete(const QByteArray& buffer, const AudioInjectorOptions options);
static AudioInjector* playSound(const QByteArray& buffer, const AudioInjectorOptions options);
static AudioInjector* playSound(SharedSoundPointer sound, const float volume, const float stretchFactor, const glm::vec3 position);
public slots:
@ -94,20 +92,19 @@ signals:
void restarting();
private:
void setupInjection();
int64_t injectNextFrame();
bool inject(bool(AudioInjectorManager::*injection)(AudioInjector*));
bool injectLocally();
static AbstractAudioInterface* _localAudioInterface;
QByteArray _audioData;
AudioInjectorOptions _options;
AudioInjectorState _state { AudioInjectorState::NotFinished };
bool _hasSentFirstFrame { false };
bool _hasSetup { false };
bool _shouldStop { false };
float _loudness { 0.0f };
int _currentSendOffset { 0 };
std::unique_ptr<NLPacket> _currentPacket { nullptr };
AbstractAudioInterface* _localAudioInterface { nullptr };
AudioInjectorLocalBuffer* _localBuffer { nullptr };
int64_t _nextFrame { 0 };

View file

@ -157,8 +157,6 @@ bool AudioInjectorManager::threadInjector(AudioInjector* injector) {
// move the injector to the QThread
injector->moveToThread(_thread);
// handle a restart once the injector has finished
// add the injector to the queue with a send timestamp of now
_injectors.emplace(usecTimestampNow(), InjectorQPointer { injector });
@ -170,13 +168,17 @@ bool AudioInjectorManager::threadInjector(AudioInjector* injector) {
}
bool AudioInjectorManager::restartFinishedInjector(AudioInjector* injector) {
if (!_shouldStop) {
// guard the injectors vector with a mutex
Lock lock(_injectorsMutex);
if (wouldExceedLimits()) {
return false;
}
if (_shouldStop) {
qDebug() << "AudioInjectorManager::threadInjector asked to thread injector but is shutting down.";
return false;
}
// guard the injectors vector with a mutex
Lock lock(_injectorsMutex);
if (wouldExceedLimits()) {
return false;
} else {
// add the injector to the queue with a send timestamp of now
_injectors.emplace(usecTimestampNow(), InjectorQPointer { injector });

View file

@ -12,9 +12,5 @@
#include "AudioLogging.h"
Q_LOGGING_CATEGORY(audio, "hifi.audio")
#if DEV_BUILD || PR_BUILD
Q_LOGGING_CATEGORY(audiostream, "hifi.audio-stream", QtInfoMsg)
#else
Q_LOGGING_CATEGORY(audiostream, "hifi.audio-stream", QtWarningMsg)
#endif

View file

@ -18,43 +18,46 @@
#include "InboundAudioStream.h"
#include "AudioLogging.h"
const bool InboundAudioStream::DEFAULT_DYNAMIC_JITTER_BUFFER_ENABLED = true;
const int InboundAudioStream::DEFAULT_STATIC_JITTER_FRAMES = 1;
const int InboundAudioStream::MAX_FRAMES_OVER_DESIRED = 10;
const int InboundAudioStream::WINDOW_STARVE_THRESHOLD = 3;
const int InboundAudioStream::WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES = 50;
const int InboundAudioStream::WINDOW_SECONDS_FOR_DESIRED_REDUCTION = 10;
const bool InboundAudioStream::USE_STDEV_FOR_JITTER = false;
const bool InboundAudioStream::REPETITION_WITH_FADE = true;
static const int STARVE_HISTORY_CAPACITY = 50;
// This is called 1x/s, and we want it to log the last 5s
static const int UNPLAYED_MS_WINDOW_SECS = 5;
InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity, const Settings& settings) :
// This adds some number of frames to the desired jitter buffer frames target we use when we're dropping frames.
// The larger this value is, the less frames we drop when attempting to reduce the jitter buffer length.
// Setting this to 0 will try to get the jitter buffer to be exactly _desiredJitterBufferFrames when dropping frames,
// which could lead to a starve soon after.
static const int DESIRED_JITTER_BUFFER_FRAMES_PADDING = 1;
// this controls the length of the window for stats used in the stats packet (not the stats used in
// _desiredJitterBufferFrames calculation)
static const int STATS_FOR_STATS_PACKET_WINDOW_SECONDS = 30;
// this controls the window size of the time-weighted avg of frames available. Every time the window fills up,
// _currentJitterBufferFrames is updated with the time-weighted avg and the running time-weighted avg is reset.
static const quint64 FRAMES_AVAILABLE_STAT_WINDOW_USECS = 10 * USECS_PER_SECOND;
InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity, int numStaticJitterFrames) :
_ringBuffer(numFrameSamples, numFramesCapacity),
_lastPopSucceeded(false),
_lastPopOutput(),
_dynamicJitterBuffers(settings._dynamicJitterBuffers),
_staticDesiredJitterBufferFrames(settings._staticDesiredJitterBufferFrames),
_useStDevForJitterCalc(settings._useStDevForJitterCalc),
_desiredJitterBufferFrames(settings._dynamicJitterBuffers ? 1 : settings._staticDesiredJitterBufferFrames),
_maxFramesOverDesired(settings._maxFramesOverDesired),
_isStarved(true),
_hasStarted(false),
_consecutiveNotMixedCount(0),
_starveCount(0),
_silentFramesDropped(0),
_oldFramesDropped(0),
_dynamicJitterBufferEnabled(numStaticJitterFrames == -1),
_staticJitterBufferFrames(std::max(numStaticJitterFrames, DEFAULT_STATIC_JITTER_FRAMES)),
_desiredJitterBufferFrames(_dynamicJitterBufferEnabled ? 1 : _staticJitterBufferFrames),
_incomingSequenceNumberStats(STATS_FOR_STATS_PACKET_WINDOW_SECONDS),
_lastPacketReceivedTime(0),
_timeGapStatsForDesiredCalcOnTooManyStarves(0, settings._windowSecondsForDesiredCalcOnTooManyStarves),
_calculatedJitterBufferFramesUsingMaxGap(0),
_stdevStatsForDesiredCalcOnTooManyStarves(),
_calculatedJitterBufferFramesUsingStDev(0),
_timeGapStatsForDesiredReduction(0, settings._windowSecondsForDesiredReduction),
_starveHistoryWindowSeconds(settings._windowSecondsForDesiredCalcOnTooManyStarves),
_starveHistory(STARVE_HISTORY_CAPACITY),
_starveThreshold(settings._windowStarveThreshold),
_framesAvailableStat(),
_unplayedMs(0, UNPLAYED_MS_WINDOW_SECS),
_currentJitterBufferFrames(0),
_timeGapStatsForStatsPacket(0, STATS_FOR_STATS_PACKET_WINDOW_SECONDS),
_repetitionWithFade(settings._repetitionWithFade),
_hasReverb(false)
{
_timeGapStatsForStatsPacket(0, STATS_FOR_STATS_PACKET_WINDOW_SECONDS) {}
InboundAudioStream::~InboundAudioStream() {
cleanupCodec();
}
void InboundAudioStream::reset() {
@ -70,7 +73,7 @@ void InboundAudioStream::reset() {
}
void InboundAudioStream::resetStats() {
if (_dynamicJitterBuffers) {
if (_dynamicJitterBufferEnabled) {
_desiredJitterBufferFrames = 1;
}
_consecutiveNotMixedCount = 0;
@ -80,7 +83,6 @@ void InboundAudioStream::resetStats() {
_incomingSequenceNumberStats.reset();
_lastPacketReceivedTime = 0;
_timeGapStatsForDesiredCalcOnTooManyStarves.reset();
_stdevStatsForDesiredCalcOnTooManyStarves = StDev();
_timeGapStatsForDesiredReduction.reset();
_starveHistory.clear();
_framesAvailableStat.reset();
@ -174,7 +176,7 @@ int InboundAudioStream::parseData(ReceivedMessage& message) {
}
// if the ringbuffer exceeds the desired size by more than the threshold specified,
// drop the oldest frames so the ringbuffer is down to the desired size.
if (framesAvailable > _desiredJitterBufferFrames + _maxFramesOverDesired) {
if (framesAvailable > _desiredJitterBufferFrames + MAX_FRAMES_OVER_DESIRED) {
int framesToDrop = framesAvailable - (_desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING);
_ringBuffer.shiftReadPosition(framesToDrop * _ringBuffer.getNumFrameSamples());
@ -250,7 +252,7 @@ int InboundAudioStream::writeDroppableSilentSamples(int silentSamples) {
return ret;
}
int InboundAudioStream::popSamples(int maxSamples, bool allOrNothing, bool starveIfNoSamplesPopped) {
int InboundAudioStream::popSamples(int maxSamples, bool allOrNothing) {
int samplesPopped = 0;
int samplesAvailable = _ringBuffer.samplesAvailable();
if (_isStarved) {
@ -268,44 +270,19 @@ int InboundAudioStream::popSamples(int maxSamples, bool allOrNothing, bool starv
popSamplesNoCheck(samplesAvailable);
samplesPopped = samplesAvailable;
} else {
// we can't pop any samples. set this stream to starved if needed
if (starveIfNoSamplesPopped) {
setToStarved();
_consecutiveNotMixedCount++;
}
// we can't pop any samples, set this stream to starved
setToStarved();
_consecutiveNotMixedCount++;
_lastPopSucceeded = false;
}
}
return samplesPopped;
}
int InboundAudioStream::popFrames(int maxFrames, bool allOrNothing, bool starveIfNoFramesPopped) {
int framesPopped = 0;
int framesAvailable = _ringBuffer.framesAvailable();
if (_isStarved) {
// we're still refilling; don't pop
_consecutiveNotMixedCount++;
_lastPopSucceeded = false;
} else {
if (framesAvailable >= maxFrames) {
// we have enough frames to pop, so we're good to pop
popSamplesNoCheck(maxFrames * _ringBuffer.getNumFrameSamples());
framesPopped = maxFrames;
} else if (!allOrNothing && framesAvailable > 0) {
// we don't have the requested number of frames, but we do have some
// frames available, so pop all those (except in all-or-nothing mode)
popSamplesNoCheck(framesAvailable * _ringBuffer.getNumFrameSamples());
framesPopped = framesAvailable;
} else {
// we can't pop any frames. set this stream to starved if needed
if (starveIfNoFramesPopped) {
setToStarved();
_consecutiveNotMixedCount = 1;
}
_lastPopSucceeded = false;
}
}
return framesPopped;
int InboundAudioStream::popFrames(int maxFrames, bool allOrNothing) {
int numFrameSamples = _ringBuffer.getNumFrameSamples();
int samplesPopped = popSamples(maxFrames * numFrameSamples, allOrNothing);
return samplesPopped / numFrameSamples;
}
void InboundAudioStream::popSamplesNoCheck(int samples) {
@ -346,10 +323,10 @@ void InboundAudioStream::setToStarved() {
quint64 now = usecTimestampNow();
_starveHistory.insert(now);
if (_dynamicJitterBuffers) {
if (_dynamicJitterBufferEnabled) {
// dynamic jitter buffers are enabled. check if this starve put us over the window
// starve threshold
quint64 windowEnd = now - _starveHistoryWindowSeconds * USECS_PER_SECOND;
quint64 windowEnd = now - WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES * USECS_PER_SECOND;
RingBufferHistory<quint64>::Iterator starvesIterator = _starveHistory.begin();
RingBufferHistory<quint64>::Iterator end = _starveHistory.end();
int starvesInWindow = 1;
@ -363,18 +340,14 @@ void InboundAudioStream::setToStarved() {
// this starve put us over the starve threshold. update _desiredJitterBufferFrames to
// value determined by window A.
if (starvesInWindow >= _starveThreshold) {
if (starvesInWindow >= WINDOW_STARVE_THRESHOLD) {
int calculatedJitterBufferFrames;
if (_useStDevForJitterCalc) {
calculatedJitterBufferFrames = _calculatedJitterBufferFramesUsingStDev;
} else {
// we don't know when the next packet will arrive, so it's possible the gap between the last packet and the
// next packet will exceed the max time gap in the window. If the time since the last packet has already exceeded
// the window max gap, then we should use that value to calculate desired frames.
int framesSinceLastPacket = ceilf((float)(now - _lastPacketReceivedTime)
/ (float)AudioConstants::NETWORK_FRAME_USECS);
calculatedJitterBufferFrames = std::max(_calculatedJitterBufferFramesUsingMaxGap, framesSinceLastPacket);
}
// we don't know when the next packet will arrive, so it's possible the gap between the last packet and the
// next packet will exceed the max time gap in the window. If the time since the last packet has already exceeded
// the window max gap, then we should use that value to calculate desired frames.
int framesSinceLastPacket = ceilf((float)(now - _lastPacketReceivedTime)
/ (float)AudioConstants::NETWORK_FRAME_USECS);
calculatedJitterBufferFrames = std::max(_calculatedJitterBufferFrames, framesSinceLastPacket);
// make sure _desiredJitterBufferFrames does not become lower here
if (calculatedJitterBufferFrames >= _desiredJitterBufferFrames) {
_desiredJitterBufferFrames = calculatedJitterBufferFrames;
@ -384,52 +357,25 @@ void InboundAudioStream::setToStarved() {
}
}
void InboundAudioStream::setSettings(const Settings& settings) {
setMaxFramesOverDesired(settings._maxFramesOverDesired);
setDynamicJitterBuffers(settings._dynamicJitterBuffers);
setStaticDesiredJitterBufferFrames(settings._staticDesiredJitterBufferFrames);
setUseStDevForJitterCalc(settings._useStDevForJitterCalc);
setWindowStarveThreshold(settings._windowStarveThreshold);
setWindowSecondsForDesiredCalcOnTooManyStarves(settings._windowSecondsForDesiredCalcOnTooManyStarves);
setWindowSecondsForDesiredReduction(settings._windowSecondsForDesiredReduction);
setRepetitionWithFade(settings._repetitionWithFade);
}
void InboundAudioStream::setDynamicJitterBuffers(bool dynamicJitterBuffers) {
if (!dynamicJitterBuffers) {
_desiredJitterBufferFrames = _staticDesiredJitterBufferFrames;
void InboundAudioStream::setDynamicJitterBufferEnabled(bool enable) {
if (!enable) {
_desiredJitterBufferFrames = _staticJitterBufferFrames;
} else {
if (!_dynamicJitterBuffers) {
if (!_dynamicJitterBufferEnabled) {
// if we're enabling dynamic jitter buffer frames, start desired frames at 1
_desiredJitterBufferFrames = 1;
}
}
_dynamicJitterBuffers = dynamicJitterBuffers;
_dynamicJitterBufferEnabled = enable;
}
void InboundAudioStream::setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames) {
_staticDesiredJitterBufferFrames = staticDesiredJitterBufferFrames;
if (!_dynamicJitterBuffers) {
_desiredJitterBufferFrames = _staticDesiredJitterBufferFrames;
void InboundAudioStream::setStaticJitterBufferFrames(int staticJitterBufferFrames) {
_staticJitterBufferFrames = staticJitterBufferFrames;
if (!_dynamicJitterBufferEnabled) {
_desiredJitterBufferFrames = _staticJitterBufferFrames;
}
}
void InboundAudioStream::setWindowSecondsForDesiredCalcOnTooManyStarves(int windowSecondsForDesiredCalcOnTooManyStarves) {
_timeGapStatsForDesiredCalcOnTooManyStarves.setWindowIntervals(windowSecondsForDesiredCalcOnTooManyStarves);
_starveHistoryWindowSeconds = windowSecondsForDesiredCalcOnTooManyStarves;
}
void InboundAudioStream::setWindowSecondsForDesiredReduction(int windowSecondsForDesiredReduction) {
_timeGapStatsForDesiredReduction.setWindowIntervals(windowSecondsForDesiredReduction);
}
int InboundAudioStream::clampDesiredJitterBufferFramesValue(int desired) const {
const int MIN_FRAMES_DESIRED = 0;
const int MAX_FRAMES_DESIRED = _ringBuffer.getFrameCapacity();
return glm::clamp(desired, MIN_FRAMES_DESIRED, MAX_FRAMES_DESIRED);
}
void InboundAudioStream::packetReceivedUpdateTimingStats() {
// update our timegap stats and desired jitter buffer frames if necessary
@ -442,25 +388,15 @@ void InboundAudioStream::packetReceivedUpdateTimingStats() {
// update all stats used for desired frames calculations under dynamic jitter buffer mode
_timeGapStatsForDesiredCalcOnTooManyStarves.update(gap);
_stdevStatsForDesiredCalcOnTooManyStarves.addValue(gap);
_timeGapStatsForDesiredReduction.update(gap);
if (_timeGapStatsForDesiredCalcOnTooManyStarves.getNewStatsAvailableFlag()) {
_calculatedJitterBufferFramesUsingMaxGap = ceilf((float)_timeGapStatsForDesiredCalcOnTooManyStarves.getWindowMax()
_calculatedJitterBufferFrames = ceilf((float)_timeGapStatsForDesiredCalcOnTooManyStarves.getWindowMax()
/ (float) AudioConstants::NETWORK_FRAME_USECS);
_timeGapStatsForDesiredCalcOnTooManyStarves.clearNewStatsAvailableFlag();
}
const int STANDARD_DEVIATION_SAMPLE_COUNT = 500;
if (_stdevStatsForDesiredCalcOnTooManyStarves.getSamples() > STANDARD_DEVIATION_SAMPLE_COUNT) {
const float NUM_STANDARD_DEVIATIONS = 3.0f;
_calculatedJitterBufferFramesUsingStDev = ceilf(NUM_STANDARD_DEVIATIONS
* _stdevStatsForDesiredCalcOnTooManyStarves.getStDev()
/ (float) AudioConstants::NETWORK_FRAME_USECS);
_stdevStatsForDesiredCalcOnTooManyStarves.reset();
}
if (_dynamicJitterBuffers) {
if (_dynamicJitterBufferEnabled) {
// if the max gap in window B (_timeGapStatsForDesiredReduction) corresponds to a smaller number of frames than _desiredJitterBufferFrames,
// then reduce _desiredJitterBufferFrames to that number of frames.
if (_timeGapStatsForDesiredReduction.getNewStatsAvailableFlag() && _timeGapStatsForDesiredReduction.isWindowFilled()) {
@ -479,10 +415,7 @@ void InboundAudioStream::packetReceivedUpdateTimingStats() {
}
int InboundAudioStream::writeSamplesForDroppedPackets(int networkSamples) {
if (_repetitionWithFade) {
return writeLastFrameRepeatedWithFade(networkSamples);
}
return writeDroppableSilentSamples(networkSamples);
return writeLastFrameRepeatedWithFade(networkSamples);
}
int InboundAudioStream::writeLastFrameRepeatedWithFade(int samples) {

View file

@ -27,86 +27,28 @@
#include "AudioStreamStats.h"
#include "TimeWeightedAvg.h"
// This adds some number of frames to the desired jitter buffer frames target we use when we're dropping frames.
// The larger this value is, the less frames we drop when attempting to reduce the jitter buffer length.
// Setting this to 0 will try to get the jitter buffer to be exactly _desiredJitterBufferFrames when dropping frames,
// which could lead to a starve soon after.
const int DESIRED_JITTER_BUFFER_FRAMES_PADDING = 1;
// this controls the length of the window for stats used in the stats packet (not the stats used in
// _desiredJitterBufferFrames calculation)
const int STATS_FOR_STATS_PACKET_WINDOW_SECONDS = 30;
// this controls the window size of the time-weighted avg of frames available. Every time the window fills up,
// _currentJitterBufferFrames is updated with the time-weighted avg and the running time-weighted avg is reset.
const quint64 FRAMES_AVAILABLE_STAT_WINDOW_USECS = 10 * USECS_PER_SECOND;
// default values for members of the Settings struct
const int DEFAULT_MAX_FRAMES_OVER_DESIRED = 10;
const bool DEFAULT_DYNAMIC_JITTER_BUFFERS = true;
const int DEFAULT_STATIC_DESIRED_JITTER_BUFFER_FRAMES = 1;
const bool DEFAULT_USE_STDEV_FOR_JITTER_CALC = false;
const int DEFAULT_WINDOW_STARVE_THRESHOLD = 3;
const int DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES = 50;
const int DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION = 10;
const bool DEFAULT_REPETITION_WITH_FADE = true;
// Audio Env bitset
const int HAS_REVERB_BIT = 0; // 1st bit
class InboundAudioStream : public NodeData {
Q_OBJECT
public:
class Settings {
public:
Settings()
: _maxFramesOverDesired(DEFAULT_MAX_FRAMES_OVER_DESIRED),
_dynamicJitterBuffers(DEFAULT_DYNAMIC_JITTER_BUFFERS),
_staticDesiredJitterBufferFrames(DEFAULT_STATIC_DESIRED_JITTER_BUFFER_FRAMES),
_useStDevForJitterCalc(DEFAULT_USE_STDEV_FOR_JITTER_CALC),
_windowStarveThreshold(DEFAULT_WINDOW_STARVE_THRESHOLD),
_windowSecondsForDesiredCalcOnTooManyStarves(DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES),
_windowSecondsForDesiredReduction(DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION),
_repetitionWithFade(DEFAULT_REPETITION_WITH_FADE)
{}
Settings(int maxFramesOverDesired, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames,
bool useStDevForJitterCalc, int windowStarveThreshold, int windowSecondsForDesiredCalcOnTooManyStarves,
int _windowSecondsForDesiredReduction, bool repetitionWithFade)
: _maxFramesOverDesired(maxFramesOverDesired),
_dynamicJitterBuffers(dynamicJitterBuffers),
_staticDesiredJitterBufferFrames(staticDesiredJitterBufferFrames),
_useStDevForJitterCalc(useStDevForJitterCalc),
_windowStarveThreshold(windowStarveThreshold),
_windowSecondsForDesiredCalcOnTooManyStarves(windowSecondsForDesiredCalcOnTooManyStarves),
_windowSecondsForDesiredReduction(windowSecondsForDesiredCalcOnTooManyStarves),
_repetitionWithFade(repetitionWithFade)
{}
// max number of frames over desired in the ringbuffer.
int _maxFramesOverDesired;
// if false, _desiredJitterBufferFrames will always be _staticDesiredJitterBufferFrames. Otherwise,
// either fred or philip's method will be used to calculate _desiredJitterBufferFrames based on packet timegaps.
bool _dynamicJitterBuffers;
// settings for static jitter buffer mode
int _staticDesiredJitterBufferFrames;
// settings for dynamic jitter buffer mode
bool _useStDevForJitterCalc; // if true, philip's method is used. otherwise, fred's method is used.
int _windowStarveThreshold;
int _windowSecondsForDesiredCalcOnTooManyStarves;
int _windowSecondsForDesiredReduction;
// if true, the prev frame will be repeated (fading to silence) for dropped frames.
// otherwise, silence will be inserted.
bool _repetitionWithFade;
};
public:
InboundAudioStream(int numFrameSamples, int numFramesCapacity, const Settings& settings);
~InboundAudioStream() { cleanupCodec(); }
// settings
static const bool DEFAULT_DYNAMIC_JITTER_BUFFER_ENABLED;
static const int DEFAULT_STATIC_JITTER_FRAMES;
// legacy (now static) settings
static const int MAX_FRAMES_OVER_DESIRED;
static const int WINDOW_STARVE_THRESHOLD;
static const int WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES;
static const int WINDOW_SECONDS_FOR_DESIRED_REDUCTION;
// unused (eradicated) settings
static const bool USE_STDEV_FOR_JITTER;
static const bool REPETITION_WITH_FADE;
InboundAudioStream() = delete;
InboundAudioStream(int numFrameSamples, int numFramesCapacity, int numStaticJitterFrames = -1);
~InboundAudioStream();
void reset();
virtual void resetStats();
@ -114,8 +56,8 @@ public:
virtual int parseData(ReceivedMessage& packet) override;
int popFrames(int maxFrames, bool allOrNothing, bool starveIfNoFramesPopped = true);
int popSamples(int maxSamples, bool allOrNothing, bool starveIfNoSamplesPopped = true);
int popFrames(int maxFrames, bool allOrNothing);
int popSamples(int maxSamples, bool allOrNothing);
bool lastPopSucceeded() const { return _lastPopSucceeded; };
const AudioRingBuffer::ConstIterator& getLastPopOutput() const { return _lastPopOutput; }
@ -124,39 +66,18 @@ public:
void setToStarved();
void setSettings(const Settings& settings);
void setMaxFramesOverDesired(int maxFramesOverDesired) { _maxFramesOverDesired = maxFramesOverDesired; }
void setDynamicJitterBuffers(bool setDynamicJitterBuffers);
void setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames);
void setUseStDevForJitterCalc(bool useStDevForJitterCalc) { _useStDevForJitterCalc = useStDevForJitterCalc; }
void setWindowStarveThreshold(int windowStarveThreshold) { _starveThreshold = windowStarveThreshold; }
void setWindowSecondsForDesiredCalcOnTooManyStarves(int windowSecondsForDesiredCalcOnTooManyStarves);
void setWindowSecondsForDesiredReduction(int windowSecondsForDesiredReduction);
void setRepetitionWithFade(bool repetitionWithFade) { _repetitionWithFade = repetitionWithFade; }
void setDynamicJitterBufferEnabled(bool enable);
void setStaticJitterBufferFrames(int staticJitterBufferFrames);
virtual AudioStreamStats getAudioStreamStats() const;
/// returns the desired number of jitter buffer frames under the dyanmic jitter buffers scheme
int getCalculatedJitterBufferFrames() const { return _useStDevForJitterCalc ?
_calculatedJitterBufferFramesUsingStDev : _calculatedJitterBufferFramesUsingMaxGap; };
/// returns the desired number of jitter buffer frames using Philip's method
int getCalculatedJitterBufferFramesUsingStDev() const { return _calculatedJitterBufferFramesUsingStDev; }
/// returns the desired number of jitter buffer frames using Freddy's method
int getCalculatedJitterBufferFramesUsingMaxGap() const { return _calculatedJitterBufferFramesUsingMaxGap; }
int getCalculatedJitterBufferFrames() const { return _calculatedJitterBufferFrames; }
int getWindowSecondsForDesiredReduction() const {
return _timeGapStatsForDesiredReduction.getWindowIntervals(); }
int getWindowSecondsForDesiredCalcOnTooManyStarves() const {
return _timeGapStatsForDesiredCalcOnTooManyStarves.getWindowIntervals(); }
bool getDynamicJitterBuffers() const { return _dynamicJitterBuffers; }
bool getRepetitionWithFade() const { return _repetitionWithFade;}
int getWindowStarveThreshold() const { return _starveThreshold;}
bool getUseStDevForJitterCalc() const { return _useStDevForJitterCalc; }
int getDesiredJitterBufferFrames() const { return _desiredJitterBufferFrames; }
int getMaxFramesOverDesired() const { return _maxFramesOverDesired; }
bool dynamicJitterBufferEnabled() const { return _dynamicJitterBufferEnabled; }
int getStaticJitterBufferFrames() { return _staticJitterBufferFrames; }
int getDesiredJitterBufferFrames() { return _desiredJitterBufferFrames; }
int getNumFrameSamples() const { return _ringBuffer.getNumFrameSamples(); }
int getFrameCapacity() const { return _ringBuffer.getFrameCapacity(); }
int getFramesAvailable() const { return _ringBuffer.framesAvailable(); }
@ -193,7 +114,6 @@ public slots:
private:
void packetReceivedUpdateTimingStats();
int clampDesiredJitterBufferFramesValue(int desired) const;
int writeSamplesForDroppedPackets(int networkSamples);
@ -225,64 +145,49 @@ protected:
AudioRingBuffer _ringBuffer;
bool _lastPopSucceeded;
bool _lastPopSucceeded { false };
AudioRingBuffer::ConstIterator _lastPopOutput;
bool _dynamicJitterBuffers; // if false, _desiredJitterBufferFrames is locked at 1 (old behavior)
int _staticDesiredJitterBufferFrames;
// if jitter buffer is dynamic, this determines what method of calculating _desiredJitterBufferFrames
// if true, Philip's timegap std dev calculation is used. Otherwise, Freddy's max timegap calculation is used
bool _useStDevForJitterCalc;
bool _dynamicJitterBufferEnabled { DEFAULT_DYNAMIC_JITTER_BUFFER_ENABLED };
int _staticJitterBufferFrames { DEFAULT_STATIC_JITTER_FRAMES };
int _desiredJitterBufferFrames;
// if there are more than _desiredJitterBufferFrames + _maxFramesOverDesired frames, old ringbuffer frames
// will be dropped to keep audio delay from building up
int _maxFramesOverDesired;
bool _isStarved;
bool _hasStarted;
bool _isStarved { true };
bool _hasStarted { false };
// stats
int _consecutiveNotMixedCount;
int _starveCount;
int _silentFramesDropped;
int _oldFramesDropped;
int _consecutiveNotMixedCount { 0 };
int _starveCount { 0 };
int _silentFramesDropped { 0 };
int _oldFramesDropped { 0 };
SequenceNumberStats _incomingSequenceNumberStats;
quint64 _lastPacketReceivedTime;
MovingMinMaxAvg<quint64> _timeGapStatsForDesiredCalcOnTooManyStarves; // for Freddy's method
int _calculatedJitterBufferFramesUsingMaxGap;
StDev _stdevStatsForDesiredCalcOnTooManyStarves; // for Philip's method
int _calculatedJitterBufferFramesUsingStDev; // the most recent desired frames calculated by Philip's method
MovingMinMaxAvg<quint64> _timeGapStatsForDesiredReduction;
quint64 _lastPacketReceivedTime { 0 };
MovingMinMaxAvg<quint64> _timeGapStatsForDesiredCalcOnTooManyStarves { 0, WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES };
int _calculatedJitterBufferFrames { 0 };
MovingMinMaxAvg<quint64> _timeGapStatsForDesiredReduction { 0, WINDOW_SECONDS_FOR_DESIRED_REDUCTION };
int _starveHistoryWindowSeconds;
RingBufferHistory<quint64> _starveHistory;
int _starveThreshold;
TimeWeightedAvg<int> _framesAvailableStat;
MovingMinMaxAvg<float> _unplayedMs;
// this value is periodically updated with the time-weighted avg from _framesAvailableStat. it is only used for
// dropping silent frames right now.
int _currentJitterBufferFrames;
int _currentJitterBufferFrames { 0 };
MovingMinMaxAvg<quint64> _timeGapStatsForStatsPacket;
bool _repetitionWithFade;
// Reverb properties
bool _hasReverb;
float _reverbTime;
float _wetLevel;
bool _hasReverb { false };
float _reverbTime { 0.0f };
float _wetLevel { 0.0f };
CodecPluginPointer _codec;
QString _selectedCodecName;
Decoder* _decoder{ nullptr };
Decoder* _decoder { nullptr };
};
float calculateRepeatedFrameFadeFactor(int indexOfRepeat);

View file

@ -19,14 +19,11 @@
#include "InjectedAudioStream.h"
InjectedAudioStream::InjectedAudioStream(const QUuid& streamIdentifier, const bool isStereo, const InboundAudioStream::Settings& settings) :
PositionalAudioStream(PositionalAudioStream::Injector, isStereo, settings),
InjectedAudioStream::InjectedAudioStream(const QUuid& streamIdentifier, bool isStereo, int numStaticJitterFrames) :
PositionalAudioStream(PositionalAudioStream::Injector, isStereo, numStaticJitterFrames),
_streamIdentifier(streamIdentifier),
_radius(0.0f),
_attenuationRatio(0)
{
}
_attenuationRatio(0) {}
const uchar MAX_INJECTOR_VOLUME = 255;

View file

@ -18,7 +18,7 @@
class InjectedAudioStream : public PositionalAudioStream {
public:
InjectedAudioStream(const QUuid& streamIdentifier, const bool isStereo, const InboundAudioStream::Settings& settings);
InjectedAudioStream(const QUuid& streamIdentifier, bool isStereo, int numStaticJitterFrames = -1);
float getRadius() const { return _radius; }
float getAttenuationRatio() const { return _attenuationRatio; }

View file

@ -11,7 +11,5 @@
#include "MixedAudioStream.h"
MixedAudioStream::MixedAudioStream(int numFrameSamples, int numFramesCapacity, const InboundAudioStream::Settings& settings)
: InboundAudioStream(numFrameSamples, numFramesCapacity, settings)
{
}
MixedAudioStream::MixedAudioStream(int numFrameSamples, int numFramesCapacity, int numStaticJitterFrames) :
InboundAudioStream(numFrameSamples, numFramesCapacity, numStaticJitterFrames) {}

View file

@ -16,7 +16,7 @@
class MixedAudioStream : public InboundAudioStream {
public:
MixedAudioStream(int numFrameSamples, int numFramesCapacity, const InboundAudioStream::Settings& settings);
MixedAudioStream(int numFrameSamples, int numFramesCapacity, int numStaticJitterFrames = -1);
float getNextOutputFrameLoudness() const { return _ringBuffer.getNextOutputFrameLoudness(); }
};

View file

@ -14,10 +14,8 @@
static const int STEREO_FACTOR = 2;
MixedProcessedAudioStream::MixedProcessedAudioStream(int numFrameSamples, int numFramesCapacity, const InboundAudioStream::Settings& settings)
: InboundAudioStream(numFrameSamples, numFramesCapacity, settings)
{
}
MixedProcessedAudioStream::MixedProcessedAudioStream(int numFrameSamples, int numFramesCapacity, int numStaticJitterFrames)
: InboundAudioStream(numFrameSamples, numFramesCapacity, numStaticJitterFrames) {}
void MixedProcessedAudioStream::outputFormatChanged(int outputFormatChannelCountTimesSampleRate) {
_outputFormatChannelsTimesSampleRate = outputFormatChannelCountTimesSampleRate;

View file

@ -19,7 +19,7 @@ class AudioClient;
class MixedProcessedAudioStream : public InboundAudioStream {
Q_OBJECT
public:
MixedProcessedAudioStream(int numFrameSamples, int numFramesCapacity, const InboundAudioStream::Settings& settings);
MixedProcessedAudioStream(int numFrameSamples, int numFramesCapacity, int numStaticJitterFrames = -1);
signals:

View file

@ -21,11 +21,11 @@
#include <udt/PacketHeaders.h>
#include <UUID.h>
PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, const InboundAudioStream::Settings& settings) :
PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, int numStaticJitterFrames) :
InboundAudioStream(isStereo
? AudioConstants::NETWORK_FRAME_SAMPLES_STEREO
: AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL,
AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, settings),
AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, numStaticJitterFrames),
_type(type),
_position(0.0f, 0.0f, 0.0f),
_orientation(0.0f, 0.0f, 0.0f, 0.0f),
@ -36,9 +36,7 @@ PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, b
_lastPopOutputLoudness(0.0f),
_quietestTrailingFrameLoudness(std::numeric_limits<float>::max()),
_quietestFrameLoudness(0.0f),
_frameCounter(0)
{
}
_frameCounter(0) {}
void PositionalAudioStream::resetStats() {
_lastPopOutputTrailingLoudness = 0.0f;

View file

@ -27,7 +27,7 @@ public:
Injector
};
PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, const InboundAudioStream::Settings& settings);
PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, int numStaticJitterFrames = -1);
const QUuid DEFAULT_STREAM_IDENTIFIER = QUuid();
virtual const QUuid& getStreamIdentifier() const { return DEFAULT_STREAM_IDENTIFIER; }

View file

@ -545,6 +545,10 @@ QObject* OffscreenQmlSurface::load(const QUrl& qmlSource, std::function<void(QQm
return finishQmlLoad(f);
}
void OffscreenQmlSurface::clearCache() {
getRootContext()->engine()->clearComponentCache();
}
void OffscreenQmlSurface::requestUpdate() {
_polish = true;
_render = true;

View file

@ -40,10 +40,12 @@ public:
virtual void create(QOpenGLContext* context);
void resize(const QSize& size, bool forceResize = false);
QSize size() const;
Q_INVOKABLE QObject* load(const QUrl& qmlSource, std::function<void(QQmlContext*, QObject*)> f = [](QQmlContext*, QObject*) {});
Q_INVOKABLE QObject* load(const QString& qmlSourceFile, std::function<void(QQmlContext*, QObject*)> f = [](QQmlContext*, QObject*) {}) {
return load(QUrl(qmlSourceFile), f);
}
void clearCache();
Q_INVOKABLE void executeOnUiThread(std::function<void()> function, bool blocking = false);
Q_INVOKABLE QVariant returnFromUiThread(std::function<QVariant()> function);

View file

@ -17,3 +17,5 @@ if (NOT ANDROID)
endif ()
link_hifi_libraries(shared networking octree gpu ui procedural model model-networking recording avatars fbx entities controllers animation audio physics)
# ui includes gl, but link_hifi_libraries does not use transitive includes, so gl must be explicit
include_hifi_library_headers(gl)

View file

@ -45,7 +45,7 @@ ScriptAudioInjector* AudioScriptingInterface::playSound(SharedSoundPointer sound
// stereo option isn't set from script, this comes from sound metadata or filename
AudioInjectorOptions optionsCopy = injectorOptions;
optionsCopy.stereo = sound->isStereo();
auto injector = AudioInjector::playSound(sound->getByteArray(), optionsCopy, _localAudioInterface);
auto injector = AudioInjector::playSound(sound->getByteArray(), optionsCopy);
if (!injector) {
return NULL;
}

View file

@ -16,6 +16,8 @@
#include <UserActivityLogger.h>
#include <PathUtils.h>
#include <OffscreenUi.h>
#include "ScriptEngine.h"
#include "ScriptEngineLogging.h"
@ -367,28 +369,21 @@ QStringList ScriptEngines::getRunningScripts() {
void ScriptEngines::stopAllScripts(bool restart) {
QReadLocker lock(&_scriptEnginesHashLock);
if (restart) {
// Delete all running scripts from cache so that they are re-downloaded when they are restarted
auto scriptCache = DependencyManager::get<ScriptCache>();
for (QHash<QUrl, ScriptEngine*>::const_iterator it = _scriptEnginesHash.constBegin();
it != _scriptEnginesHash.constEnd(); it++) {
if (!it.value()->isFinished()) {
scriptCache->deleteScript(it.key());
}
}
}
// Stop and possibly restart all currently running scripts
for (QHash<QUrl, ScriptEngine*>::const_iterator it = _scriptEnginesHash.constBegin();
it != _scriptEnginesHash.constEnd(); it++) {
// skip already stopped scripts
if (it.value()->isFinished() || it.value()->isStopping()) {
continue;
}
// queue user scripts if restarting
if (restart && it.value()->isUserLoaded()) {
connect(it.value(), &ScriptEngine::finished, this, [this](QString scriptName, ScriptEngine* engine) {
reloadScript(scriptName);
});
}
// stop all scripts
it.value()->stop(true);
qCDebug(scriptengine) << "stopping script..." << it.key();
}
@ -431,6 +426,7 @@ void ScriptEngines::setScriptsLocation(const QString& scriptsLocation) {
void ScriptEngines::reloadAllScripts() {
DependencyManager::get<ScriptCache>()->clearCache();
DependencyManager::get<OffscreenUi>()->clearCache();
emit scriptsReloading();
stopAllScripts(true);
}

View file

@ -19,6 +19,7 @@
#include <QtCore/QString>
#include <QtCore/QVariant>
#include <QtCore/QReadWriteLock>
#include <QtCore/QDebug>
#include <glm/glm.hpp>
#include <glm/gtc/quaternion.hpp>
@ -74,7 +75,27 @@ namespace Setting {
Handle(const QString& key, const T& defaultValue) : Interface(key), _defaultValue(defaultValue) {}
Handle(const QStringList& path, const T& defaultValue) : Handle(path.join("/"), defaultValue) {}
virtual ~Handle() { deinit(); }
static Handle Deprecated(const QString& key) {
Handle handle = Handle(key);
handle.deprecate();
return handle;
}
static Handle Deprecated(const QStringList& path) {
return Deprecated(path.join("/"));
}
static Handle Deprecated(const QString& key, const T& defaultValue) {
Handle handle = Handle(key, defaultValue);
handle.deprecate();
return handle;
}
static Handle Deprecated(const QStringList& path, const T& defaultValue) {
return Deprecated(path.join("/"), defaultValue);
}
virtual ~Handle() {
deinit();
}
// Returns setting value, returns its default value if not found
T get() const {
@ -102,6 +123,9 @@ namespace Setting {
_isSet = true;
save();
}
if (_isDeprecated) {
deprecate();
}
}
void remove() {
@ -117,8 +141,20 @@ namespace Setting {
virtual QVariant getVariant() override { return QVariant::fromValue(get()); }
private:
void deprecate() {
if (_isSet) {
if (get() != getDefault()) {
qInfo().nospace() << "[DEPRECATION NOTICE] " << _key << "(" << get() << ") has been deprecated, and has no effect";
} else {
remove();
}
}
_isDeprecated = true;
}
T _value;
const T _defaultValue;
bool _isDeprecated{ false };
};
template <typename T>

View file

@ -45,14 +45,20 @@ bool OculusControllerManager::activate() {
// register with UserInputMapper
auto userInputMapper = DependencyManager::get<controller::UserInputMapper>();
if (OVR_SUCCESS(ovr_GetInputState(_session, ovrControllerType_Remote, &_inputState))) {
_remote = std::make_shared<RemoteDevice>(*this);
userInputMapper->registerDevice(_remote);
unsigned int controllerConnected = ovr_GetConnectedControllerTypes(_session);
if ((controllerConnected & ovrControllerType_Remote) == ovrControllerType_Remote) {
if (OVR_SUCCESS(ovr_GetInputState(_session, ovrControllerType_Remote, &_inputState))) {
_remote = std::make_shared<RemoteDevice>(*this);
userInputMapper->registerDevice(_remote);
}
}
if (OVR_SUCCESS(ovr_GetInputState(_session, ovrControllerType_Touch, &_inputState))) {
_touch = std::make_shared<TouchDevice>(*this);
userInputMapper->registerDevice(_touch);
if ((controllerConnected & ovrControllerType_Touch) != 0) {
if (OVR_SUCCESS(ovr_GetInputState(_session, ovrControllerType_Touch, &_inputState))) {
_touch = std::make_shared<TouchDevice>(*this);
userInputMapper->registerDevice(_touch);
}
}
return true;