Refining the voxelization of the sphere in the view frustum grid

This commit is contained in:
samcake 2016-09-26 18:41:53 -07:00
commit 9b94a025fc
151 changed files with 3603 additions and 2199 deletions

View file

@ -48,11 +48,8 @@ static const int RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES = 10;
Agent::Agent(ReceivedMessage& message) :
ThreadedAssignment(message),
_entityEditSender(),
_receivedAudioStream(AudioConstants::NETWORK_FRAME_SAMPLES_STEREO, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES,
InboundAudioStream::Settings(0, false, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES, false,
DEFAULT_WINDOW_STARVE_THRESHOLD, DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES,
DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION, false))
{
_receivedAudioStream(AudioConstants::NETWORK_FRAME_SAMPLES_STEREO,
RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES) {
DependencyManager::get<EntityScriptingInterface>()->setPacketSender(&_entityEditSender);
ResourceManager::init();

View file

@ -61,15 +61,14 @@
#include "AudioMixer.h"
const float LOUDNESS_TO_DISTANCE_RATIO = 0.00001f;
const float DEFAULT_ATTENUATION_PER_DOUBLING_IN_DISTANCE = 0.5f; // attenuation = -6dB * log2(distance)
const float DEFAULT_NOISE_MUTING_THRESHOLD = 0.003f;
const QString AUDIO_MIXER_LOGGING_TARGET_NAME = "audio-mixer";
const QString AUDIO_ENV_GROUP_KEY = "audio_env";
const QString AUDIO_BUFFER_GROUP_KEY = "audio_buffer";
InboundAudioStream::Settings AudioMixer::_streamSettings;
static const float LOUDNESS_TO_DISTANCE_RATIO = 0.00001f;
static const float DEFAULT_ATTENUATION_PER_DOUBLING_IN_DISTANCE = 0.5f; // attenuation = -6dB * log2(distance)
static const float DEFAULT_NOISE_MUTING_THRESHOLD = 0.003f;
static const QString AUDIO_MIXER_LOGGING_TARGET_NAME = "audio-mixer";
static const QString AUDIO_ENV_GROUP_KEY = "audio_env";
static const QString AUDIO_BUFFER_GROUP_KEY = "audio_buffer";
int AudioMixer::_numStaticJitterFrames{ -1 };
bool AudioMixer::_enableFilter = true;
bool AudioMixer::shouldMute(float quietestFrame) {
@ -269,7 +268,7 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData&
if (!streamToAdd.lastPopSucceeded()) {
bool forceSilentBlock = true;
if (_streamSettings._repetitionWithFade && !streamToAdd.getLastPopOutput().isNull()) {
if (!streamToAdd.getLastPopOutput().isNull()) {
// reptition with fade is enabled, and we do have a valid previous frame to repeat
// so we mix the previously-mixed block
@ -641,7 +640,7 @@ QString AudioMixer::percentageForMixStats(int counter) {
void AudioMixer::sendStatsPacket() {
static QJsonObject statsObject;
statsObject["useDynamicJitterBuffers"] = _streamSettings._dynamicJitterBuffers;
statsObject["useDynamicJitterBuffers"] = _numStaticJitterFrames == -1;
statsObject["trailing_sleep_percentage"] = _trailingSleepRatio * 100.0f;
statsObject["performance_throttling_ratio"] = _performanceThrottlingRatio;
@ -902,63 +901,62 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
// check the payload to see if we have asked for dynamicJitterBuffer support
const QString DYNAMIC_JITTER_BUFFER_JSON_KEY = "dynamic_jitter_buffer";
_streamSettings._dynamicJitterBuffers = audioBufferGroupObject[DYNAMIC_JITTER_BUFFER_JSON_KEY].toBool();
if (_streamSettings._dynamicJitterBuffers) {
qDebug() << "Enable dynamic jitter buffers.";
bool enableDynamicJitterBuffer = audioBufferGroupObject[DYNAMIC_JITTER_BUFFER_JSON_KEY].toBool();
if (enableDynamicJitterBuffer) {
qDebug() << "Enabling dynamic jitter buffers.";
bool ok;
const QString DESIRED_JITTER_BUFFER_FRAMES_KEY = "static_desired_jitter_buffer_frames";
_numStaticJitterFrames = audioBufferGroupObject[DESIRED_JITTER_BUFFER_FRAMES_KEY].toString().toInt(&ok);
if (!ok) {
_numStaticJitterFrames = InboundAudioStream::DEFAULT_STATIC_JITTER_FRAMES;
}
qDebug() << "Static desired jitter buffer frames:" << _numStaticJitterFrames;
} else {
qDebug() << "Dynamic jitter buffers disabled.";
qDebug() << "Disabling dynamic jitter buffers.";
_numStaticJitterFrames = -1;
}
// check for deprecated audio settings
auto deprecationNotice = [](const QString& setting, const QString& value) {
qInfo().nospace() << "[DEPRECATION NOTICE] " << setting << "(" << value << ") has been deprecated, and has no effect";
};
bool ok;
const QString DESIRED_JITTER_BUFFER_FRAMES_KEY = "static_desired_jitter_buffer_frames";
_streamSettings._staticDesiredJitterBufferFrames = audioBufferGroupObject[DESIRED_JITTER_BUFFER_FRAMES_KEY].toString().toInt(&ok);
if (!ok) {
_streamSettings._staticDesiredJitterBufferFrames = DEFAULT_STATIC_DESIRED_JITTER_BUFFER_FRAMES;
}
qDebug() << "Static desired jitter buffer frames:" << _streamSettings._staticDesiredJitterBufferFrames;
const QString MAX_FRAMES_OVER_DESIRED_JSON_KEY = "max_frames_over_desired";
_streamSettings._maxFramesOverDesired = audioBufferGroupObject[MAX_FRAMES_OVER_DESIRED_JSON_KEY].toString().toInt(&ok);
if (!ok) {
_streamSettings._maxFramesOverDesired = DEFAULT_MAX_FRAMES_OVER_DESIRED;
}
qDebug() << "Max frames over desired:" << _streamSettings._maxFramesOverDesired;
const QString USE_STDEV_FOR_DESIRED_CALC_JSON_KEY = "use_stdev_for_desired_calc";
_streamSettings._useStDevForJitterCalc = audioBufferGroupObject[USE_STDEV_FOR_DESIRED_CALC_JSON_KEY].toBool();
if (_streamSettings._useStDevForJitterCalc) {
qDebug() << "Using stdev method for jitter calc if dynamic jitter buffers enabled";
} else {
qDebug() << "Using max-gap method for jitter calc if dynamic jitter buffers enabled";
int maxFramesOverDesired = audioBufferGroupObject[MAX_FRAMES_OVER_DESIRED_JSON_KEY].toString().toInt(&ok);
if (ok && maxFramesOverDesired != InboundAudioStream::MAX_FRAMES_OVER_DESIRED) {
deprecationNotice(MAX_FRAMES_OVER_DESIRED_JSON_KEY, QString::number(maxFramesOverDesired));
}
const QString WINDOW_STARVE_THRESHOLD_JSON_KEY = "window_starve_threshold";
_streamSettings._windowStarveThreshold = audioBufferGroupObject[WINDOW_STARVE_THRESHOLD_JSON_KEY].toString().toInt(&ok);
if (!ok) {
_streamSettings._windowStarveThreshold = DEFAULT_WINDOW_STARVE_THRESHOLD;
int windowStarveThreshold = audioBufferGroupObject[WINDOW_STARVE_THRESHOLD_JSON_KEY].toString().toInt(&ok);
if (ok && windowStarveThreshold != InboundAudioStream::WINDOW_STARVE_THRESHOLD) {
deprecationNotice(WINDOW_STARVE_THRESHOLD_JSON_KEY, QString::number(windowStarveThreshold));
}
qDebug() << "Window A starve threshold:" << _streamSettings._windowStarveThreshold;
const QString WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES_JSON_KEY = "window_seconds_for_desired_calc_on_too_many_starves";
_streamSettings._windowSecondsForDesiredCalcOnTooManyStarves = audioBufferGroupObject[WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES_JSON_KEY].toString().toInt(&ok);
if (!ok) {
_streamSettings._windowSecondsForDesiredCalcOnTooManyStarves = DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES;
int windowSecondsForDesiredCalcOnTooManyStarves = audioBufferGroupObject[WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES_JSON_KEY].toString().toInt(&ok);
if (ok && windowSecondsForDesiredCalcOnTooManyStarves != InboundAudioStream::WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES) {
deprecationNotice(WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES_JSON_KEY, QString::number(windowSecondsForDesiredCalcOnTooManyStarves));
}
qDebug() << "Window A length:" << _streamSettings._windowSecondsForDesiredCalcOnTooManyStarves << "seconds";
const QString WINDOW_SECONDS_FOR_DESIRED_REDUCTION_JSON_KEY = "window_seconds_for_desired_reduction";
_streamSettings._windowSecondsForDesiredReduction = audioBufferGroupObject[WINDOW_SECONDS_FOR_DESIRED_REDUCTION_JSON_KEY].toString().toInt(&ok);
if (!ok) {
_streamSettings._windowSecondsForDesiredReduction = DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION;
int windowSecondsForDesiredReduction = audioBufferGroupObject[WINDOW_SECONDS_FOR_DESIRED_REDUCTION_JSON_KEY].toString().toInt(&ok);
if (ok && windowSecondsForDesiredReduction != InboundAudioStream::WINDOW_SECONDS_FOR_DESIRED_REDUCTION) {
deprecationNotice(WINDOW_SECONDS_FOR_DESIRED_REDUCTION_JSON_KEY, QString::number(windowSecondsForDesiredReduction));
}
const QString USE_STDEV_FOR_JITTER_JSON_KEY = "use_stdev_for_desired_calc";
bool useStDevForJitterCalc = audioBufferGroupObject[USE_STDEV_FOR_JITTER_JSON_KEY].toBool();
if (useStDevForJitterCalc != InboundAudioStream::USE_STDEV_FOR_JITTER) {
deprecationNotice(USE_STDEV_FOR_JITTER_JSON_KEY, useStDevForJitterCalc ? "true" : "false");
}
qDebug() << "Window B length:" << _streamSettings._windowSecondsForDesiredReduction << "seconds";
const QString REPETITION_WITH_FADE_JSON_KEY = "repetition_with_fade";
_streamSettings._repetitionWithFade = audioBufferGroupObject[REPETITION_WITH_FADE_JSON_KEY].toBool();
if (_streamSettings._repetitionWithFade) {
qDebug() << "Repetition with fade enabled";
} else {
qDebug() << "Repetition with fade disabled";
bool repetitionWithFade = audioBufferGroupObject[REPETITION_WITH_FADE_JSON_KEY].toBool();
if (repetitionWithFade != InboundAudioStream::REPETITION_WITH_FADE) {
deprecationNotice(REPETITION_WITH_FADE_JSON_KEY, repetitionWithFade ? "true" : "false");
}
}

View file

@ -39,7 +39,7 @@ public slots:
void sendStatsPacket() override;
static const InboundAudioStream::Settings& getStreamSettings() { return _streamSettings; }
static int getStaticJitterFrames() { return _numStaticJitterFrames; }
private slots:
void broadcastMixes();
@ -112,7 +112,7 @@ private:
};
QVector<ReverbSettings> _zoneReverbSettings;
static InboundAudioStream::Settings _streamSettings;
static int _numStaticJitterFrames; // -1 denotes dynamic jitter buffering
static bool _enableFilter;
};

View file

@ -109,7 +109,7 @@ int AudioMixerClientData::parseData(ReceivedMessage& message) {
bool isStereo = channelFlag == 1;
auto avatarAudioStream = new AvatarAudioStream(isStereo, AudioMixer::getStreamSettings());
auto avatarAudioStream = new AvatarAudioStream(isStereo, AudioMixer::getStaticJitterFrames());
avatarAudioStream->setupCodec(_codec, _selectedCodecName, AudioConstants::MONO);
qDebug() << "creating new AvatarAudioStream... codec:" << _selectedCodecName;
@ -143,7 +143,7 @@ int AudioMixerClientData::parseData(ReceivedMessage& message) {
if (streamIt == _audioStreams.end()) {
// we don't have this injected stream yet, so add it
auto injectorStream = new InjectedAudioStream(streamIdentifier, isStereo, AudioMixer::getStreamSettings());
auto injectorStream = new InjectedAudioStream(streamIdentifier, isStereo, AudioMixer::getStaticJitterFrames());
#if INJECTORS_SUPPORT_CODECS
injectorStream->setupCodec(_codec, _selectedCodecName, isStereo ? AudioConstants::STEREO : AudioConstants::MONO);
@ -270,6 +270,7 @@ QJsonObject AudioMixerClientData::getAudioStreamStats() {
downstreamStats["desired"] = streamStats._desiredJitterBufferFrames;
downstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage;
downstreamStats["available"] = (double) streamStats._framesAvailable;
downstreamStats["unplayed"] = (double) streamStats._unplayedMs;
downstreamStats["starves"] = (double) streamStats._starveCount;
downstreamStats["not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
downstreamStats["overflows"] = (double) streamStats._overflowCount;
@ -294,6 +295,7 @@ QJsonObject AudioMixerClientData::getAudioStreamStats() {
upstreamStats["desired_calc"] = avatarAudioStream->getCalculatedJitterBufferFrames();
upstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage;
upstreamStats["available"] = (double) streamStats._framesAvailable;
upstreamStats["unplayed"] = (double) streamStats._unplayedMs;
upstreamStats["starves"] = (double) streamStats._starveCount;
upstreamStats["not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
upstreamStats["overflows"] = (double) streamStats._overflowCount;
@ -323,6 +325,7 @@ QJsonObject AudioMixerClientData::getAudioStreamStats() {
upstreamStats["desired_calc"] = injectorPair.second->getCalculatedJitterBufferFrames();
upstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage;
upstreamStats["available"] = (double) streamStats._framesAvailable;
upstreamStats["unplayed"] = (double) streamStats._unplayedMs;
upstreamStats["starves"] = (double) streamStats._starveCount;
upstreamStats["not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
upstreamStats["overflows"] = (double) streamStats._overflowCount;

View file

@ -13,10 +13,8 @@
#include "AvatarAudioStream.h"
AvatarAudioStream::AvatarAudioStream(bool isStereo, const InboundAudioStream::Settings& settings) :
PositionalAudioStream(PositionalAudioStream::Microphone, isStereo, settings)
{
}
AvatarAudioStream::AvatarAudioStream(bool isStereo, int numStaticJitterFrames) :
PositionalAudioStream(PositionalAudioStream::Microphone, isStereo, numStaticJitterFrames) {}
int AvatarAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
int readBytes = 0;

View file

@ -18,7 +18,7 @@
class AvatarAudioStream : public PositionalAudioStream {
public:
AvatarAudioStream(bool isStereo, const InboundAudioStream::Settings& settings);
AvatarAudioStream(bool isStereo, int numStaticJitterFrames = -1);
private:
// disallow copying of AvatarAudioStream objects

View file

@ -19,6 +19,7 @@
#include <AccountManager.h>
#include <HTTPConnection.h>
#include <LogHandler.h>
#include <shared/NetworkUtils.h>
#include <NetworkingConstants.h>
#include <NumericalConstants.h>
#include <UUID.h>

View file

@ -20,8 +20,8 @@ if (WIN32)
ExternalProject_Add(
${EXTERNAL_NAME}
URL http://hifi-public.s3.amazonaws.com/dependencies/ovr_sdk_win_1.3.0_public.zip
URL_MD5 a2dcf695e0f03a70fdd1ed7480585e82
URL https://hifi-public.s3.amazonaws.com/dependencies/ovr_sdk_win_1.8.0_public.zip
URL_MD5 bea17e04acc1dd8cf7cabefa1b28cc3c
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
@ -29,8 +29,8 @@ if (WIN32)
)
ExternalProject_Get_Property(${EXTERNAL_NAME} SOURCE_DIR)
set(LIBOVR_DIR ${SOURCE_DIR}/OculusSDK/LibOVR)
message("LIBOVR dir ${SOURCE_DIR}")
set(LIBOVR_DIR ${SOURCE_DIR}/LibOVR)
if ("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
set(LIBOVR_LIB_DIR ${LIBOVR_DIR}/Lib/Windows/x64/Release/VS2013 CACHE TYPE INTERNAL)
else()
@ -38,6 +38,7 @@ if (WIN32)
endif()
set(${EXTERNAL_NAME_UPPER}_INCLUDE_DIRS ${LIBOVR_DIR}/Include CACHE TYPE INTERNAL)
message("LIBOVR include dir ${${EXTERNAL_NAME_UPPER}_INCLUDE_DIRS}")
set(${EXTERNAL_NAME_UPPER}_LIBRARIES ${LIBOVR_LIB_DIR}/LibOVR.lib CACHE TYPE INTERNAL)
elseif(APPLE)

View file

@ -18,6 +18,13 @@ macro(SET_PACKAGING_PARAMETERS)
set(RELEASE_TYPE $ENV{RELEASE_TYPE})
set(RELEASE_NUMBER $ENV{RELEASE_NUMBER})
string(TOLOWER "$ENV{BRANCH}" BUILD_BRANCH)
set(BUILD_GLOBAL_SERVICES "DEVELOPMENT")
set(USE_STABLE_GLOBAL_SERVICES FALSE)
message(STATUS "The BUILD_BRANCH variable is: ${BUILD_BRANCH}")
message(STATUS "The BRANCH environment variable is: $ENV{BRANCH}")
message(STATUS "The RELEASE_TYPE variable is: ${RELEASE_TYPE}")
if (RELEASE_TYPE STREQUAL "PRODUCTION")
set(DEPLOY_PACKAGE TRUE)
@ -31,6 +38,14 @@ macro(SET_PACKAGING_PARAMETERS)
# add definition for this release type
add_definitions(-DPRODUCTION_BUILD)
# if the build is a PRODUCTION_BUILD from the "stable" branch
# then use the STABLE gobal services
if (BUILD_BRANCH STREQUAL "stable")
message(STATUS "The RELEASE_TYPE is PRODUCTION and the BUILD_BRANCH is stable...")
set(BUILD_GLOBAL_SERVICES "STABLE")
set(USE_STABLE_GLOBAL_SERVICES TRUE)
endif()
elseif (RELEASE_TYPE STREQUAL "PR")
set(DEPLOY_PACKAGE TRUE)
set(PR_BUILD 1)
@ -132,6 +147,10 @@ macro(SET_PACKAGING_PARAMETERS)
set(CLIENT_COMPONENT client)
set(SERVER_COMPONENT server)
# print out some results for testing this new build feature
message(STATUS "The BUILD_GLOBAL_SERVICES variable is: ${BUILD_GLOBAL_SERVICES}")
message(STATUS "The USE_STABLE_GLOBAL_SERVICES variable is: ${USE_STABLE_GLOBAL_SERVICES}")
# create a header file our targets can use to find out the application version
file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/includes")
configure_file("${HF_CMAKE_DIR}/templates/BuildInfo.h.in" "${CMAKE_BINARY_DIR}/includes/BuildInfo.h")

View file

@ -9,7 +9,12 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#define USE_STABLE_GLOBAL_SERVICES @USE_STABLE_GLOBAL_SERVICES@
namespace BuildInfo {
const QString MODIFIED_ORGANIZATION = "@BUILD_ORGANIZATION@";
const QString VERSION = "@BUILD_VERSION@";
const QString BUILD_BRANCH = "@BUILD_BRANCH@";
const QString BUILD_GLOBAL_SERVICES = "@BUILD_GLOBAL_SERVICES@";
}

View file

@ -600,6 +600,9 @@ Section "-Core installation"
Delete "$INSTDIR\version"
Delete "$INSTDIR\xinput1_3.dll"
;Delete old Qt files
Delete "$INSTDIR\audio\qtaudio_windows.dll"
; Delete old desktop shortcuts before they were renamed during Sandbox rename
Delete "$DESKTOP\@PRE_SANDBOX_INTERFACE_SHORTCUT_NAME@.lnk"
Delete "$DESKTOP\@PRE_SANDBOX_CONSOLE_SHORTCUT_NAME@.lnk"

View file

@ -380,6 +380,14 @@
"default": "0",
"advanced": false
},
{
"name": "maximum_user_capacity_redirect_location",
"label": "Redirect to Location on Maximum Capacity",
"help": "Is there another domain, you'd like to redirect clients to when the maximum number of avatars are connected.",
"placeholder": "",
"default": "",
"advanced": false
},
{
"name": "standard_permissions",
"type": "table",
@ -1025,65 +1033,41 @@
"name": "dynamic_jitter_buffer",
"type": "checkbox",
"label": "Dynamic Jitter Buffers",
"help": "Dynamically buffer client audio based on perceived jitter in packet receipt timing",
"help": "Dynamically buffer inbound audio streams based on perceived jitter in packet receipt timing.",
"default": true,
"advanced": true
},
{
"name": "static_desired_jitter_buffer_frames",
"label": "Static Desired Jitter Buffer Frames",
"help": "If dynamic jitter buffers is disabled, this determines the target number of frames maintained by the AudioMixer's jitter buffers",
"help": "If dynamic jitter buffers is disabled, this determines the size of the jitter buffers of inbound audio streams in the mixer. Higher numbers introduce more latency.",
"placeholder": "1",
"default": "1",
"advanced": true
},
{
"name": "max_frames_over_desired",
"label": "Max Frames Over Desired",
"help": "The highest number of frames an AudioMixer's ringbuffer can exceed the desired jitter buffer frames by",
"placeholder": "10",
"default": "10",
"advanced": true
"name": "max_frames_over_desired",
"deprecated": true
},
{
"name": "use_stdev_for_desired_calc",
"type": "checkbox",
"label": "Stdev for Desired Jitter Frames Calc",
"help": "Use Philip's method (stdev of timegaps) to calculate desired jitter frames (otherwise Fred's max timegap method is used)",
"default": false,
"advanced": true
"name": "window_starve_threshold",
"deprecated": true
},
{
"name": "window_starve_threshold",
"label": "Window Starve Threshold",
"help": "If this many starves occur in an N-second window (N is the number in the next field), then the desired jitter frames will be re-evaluated using Window A.",
"placeholder": "3",
"default": "3",
"advanced": true
"name": "window_seconds_for_desired_calc_on_too_many_starves",
"deprecated": true
},
{
"name": "window_seconds_for_desired_calc_on_too_many_starves",
"label": "Timegaps Window (A) Seconds",
"help": "Window A contains a history of timegaps. Its max timegap is used to re-evaluate the desired jitter frames when too many starves occur within it.",
"placeholder": "50",
"default": "50",
"advanced": true
"name": "window_seconds_for_desired_reduction",
"deprecated": true
},
{
"name": "window_seconds_for_desired_reduction",
"label": "Timegaps Window (B) Seconds",
"help": "Window B contains a history of timegaps. Its max timegap is used as a ceiling for the desired jitter frames value.",
"placeholder": "10",
"default": "10",
"advanced": true
"name": "use_stdev_for_desired_calc",
"deprecated": true
},
{
"name": "repetition_with_fade",
"type": "checkbox",
"label": "Repetition with Fade",
"help": "Dropped frames and mixing during starves repeat the last frame, eventually fading to silence",
"default": false,
"advanced": true
"name": "repetition_with_fade",
"deprecated": true
}
]
},

View file

@ -75,7 +75,7 @@ span.port {
color: #666666;
}
.advanced-setting {
.advanced-setting, .deprecated-setting {
display: none;
}

View file

@ -40,7 +40,8 @@
<script id="panels-template" type="text/template">
<% _.each(descriptions, function(group){ %>
<% split_settings = _.partition(group.settings, function(value, index) { return !value.advanced }) %>
<% var settings = _.partition(group.settings, function(value, index) { return !value.deprecated })[0] %>
<% split_settings = _.partition(settings, function(value, index) { return !value.advanced }) %>
<% isAdvanced = _.isEmpty(split_settings[0]) %>
<% if (isAdvanced) { %>
<% $("a[href=#" + group.name + "]").addClass('advanced-setting').hide() %>

View file

@ -2,6 +2,7 @@ var Settings = {
showAdvanced: false,
METAVERSE_URL: 'https://metaverse.highfidelity.com',
ADVANCED_CLASS: 'advanced-setting',
DEPRECATED_CLASS: 'deprecated-setting',
TRIGGER_CHANGE_CLASS: 'trigger-change',
DATA_ROW_CLASS: 'value-row',
DATA_COL_CLASS: 'value-col',
@ -42,7 +43,10 @@ var Settings = {
var viewHelpers = {
getFormGroup: function(keypath, setting, values, isAdvanced) {
form_group = "<div class='form-group " + (isAdvanced ? Settings.ADVANCED_CLASS : "") + "' data-keypath='" + keypath + "'>";
form_group = "<div class='form-group " +
(isAdvanced ? Settings.ADVANCED_CLASS : "") + " " +
(setting.deprecated ? Settings.DEPRECATED_CLASS : "" ) + "' " +
"data-keypath='" + keypath + "'>";
setting_value = _(values).valueForKeyPath(keypath);
if (_.isUndefined(setting_value) || _.isNull(setting_value)) {
@ -454,7 +458,7 @@ function setupHFAccountButton() {
}
// use the existing getFormGroup helper to ask for a button
var buttonGroup = viewHelpers.getFormGroup('', buttonSetting, Settings.data.values, false);
var buttonGroup = viewHelpers.getFormGroup('', buttonSetting, Settings.data.values);
// add the button group to the top of the metaverse panel
$('#metaverse .panel-body').prepend(buttonGroup);
@ -665,7 +669,7 @@ function setupPlacesTable() {
}
// get a table for the places
var placesTableGroup = viewHelpers.getFormGroup('', placesTableSetting, Settings.data.values, false);
var placesTableGroup = viewHelpers.getFormGroup('', placesTableSetting, Settings.data.values);
// append the places table in the right place
$('#places_paths .panel-body').prepend(placesTableGroup);

View file

@ -317,6 +317,7 @@ SharedNodePointer DomainGatekeeper::processAssignmentConnectRequest(const NodeCo
}
const QString MAXIMUM_USER_CAPACITY = "security.maximum_user_capacity";
const QString MAXIMUM_USER_CAPACITY_REDIRECT_LOCATION = "security.maximum_user_capacity_redirect_location";
SharedNodePointer DomainGatekeeper::processAgentConnectRequest(const NodeConnectionData& nodeConnection,
const QString& username,
@ -363,7 +364,7 @@ SharedNodePointer DomainGatekeeper::processAgentConnectRequest(const NodeConnect
if (!userPerms.can(NodePermissions::Permission::canConnectToDomain)) {
sendConnectionDeniedPacket("You lack the required permissions to connect to this domain.",
nodeConnection.senderSockAddr, DomainHandler::ConnectionRefusedReason::TooManyUsers);
nodeConnection.senderSockAddr, DomainHandler::ConnectionRefusedReason::NotAuthorized);
#ifdef WANT_DEBUG
qDebug() << "stalling login due to permissions:" << username;
#endif
@ -372,8 +373,16 @@ SharedNodePointer DomainGatekeeper::processAgentConnectRequest(const NodeConnect
if (!userPerms.can(NodePermissions::Permission::canConnectPastMaxCapacity) && !isWithinMaxCapacity()) {
// we can't allow this user to connect because we are at max capacity
QString redirectOnMaxCapacity;
const QVariant* redirectOnMaxCapacityVariant =
valueForKeyPath(_server->_settingsManager.getSettingsMap(), MAXIMUM_USER_CAPACITY_REDIRECT_LOCATION);
if (redirectOnMaxCapacityVariant && redirectOnMaxCapacityVariant->canConvert<QString>()) {
redirectOnMaxCapacity = redirectOnMaxCapacityVariant->toString();
qDebug() << "Redirection domain:" << redirectOnMaxCapacity;
}
sendConnectionDeniedPacket("Too many connected users.", nodeConnection.senderSockAddr,
DomainHandler::ConnectionRefusedReason::TooManyUsers);
DomainHandler::ConnectionRefusedReason::TooManyUsers, redirectOnMaxCapacity);
#ifdef WANT_DEBUG
qDebug() << "stalling login due to max capacity:" << username;
#endif
@ -623,22 +632,30 @@ void DomainGatekeeper::sendProtocolMismatchConnectionDenial(const HifiSockAddr&
}
void DomainGatekeeper::sendConnectionDeniedPacket(const QString& reason, const HifiSockAddr& senderSockAddr,
DomainHandler::ConnectionRefusedReason reasonCode) {
DomainHandler::ConnectionRefusedReason reasonCode,
QString extraInfo) {
// this is an agent and we've decided we won't let them connect - send them a packet to deny connection
QByteArray utfString = reason.toUtf8();
quint16 payloadSize = utfString.size();
QByteArray utfReasonString = reason.toUtf8();
quint16 reasonSize = utfReasonString.size();
QByteArray utfExtraInfo = extraInfo.toUtf8();
quint16 extraInfoSize = utfExtraInfo.size();
// setup the DomainConnectionDenied packet
auto connectionDeniedPacket = NLPacket::create(PacketType::DomainConnectionDenied,
payloadSize + sizeof(payloadSize) + sizeof(uint8_t));
sizeof(uint8_t) + // reasonCode
reasonSize + sizeof(reasonSize) +
extraInfoSize + sizeof(extraInfoSize));
// pack in the reason the connection was denied (the client displays this)
if (payloadSize > 0) {
uint8_t reasonCodeWire = (uint8_t)reasonCode;
connectionDeniedPacket->writePrimitive(reasonCodeWire);
connectionDeniedPacket->writePrimitive(payloadSize);
connectionDeniedPacket->write(utfString);
}
uint8_t reasonCodeWire = (uint8_t)reasonCode;
connectionDeniedPacket->writePrimitive(reasonCodeWire);
connectionDeniedPacket->writePrimitive(reasonSize);
connectionDeniedPacket->write(utfReasonString);
// write the extra info as well
connectionDeniedPacket->writePrimitive(extraInfoSize);
connectionDeniedPacket->write(utfExtraInfo);
// send the packet off
DependencyManager::get<LimitedNodeList>()->sendPacket(std::move(connectionDeniedPacket), senderSockAddr);

View file

@ -88,7 +88,8 @@ private:
void sendConnectionTokenPacket(const QString& username, const HifiSockAddr& senderSockAddr);
static void sendConnectionDeniedPacket(const QString& reason, const HifiSockAddr& senderSockAddr,
DomainHandler::ConnectionRefusedReason reasonCode = DomainHandler::ConnectionRefusedReason::Unknown);
DomainHandler::ConnectionRefusedReason reasonCode = DomainHandler::ConnectionRefusedReason::Unknown,
QString extraInfo = QString());
void pingPunchForConnectingPeer(const SharedNetworkPeer& peer);

View file

@ -23,6 +23,7 @@
#include <QStandardPaths>
#include <QTimer>
#include <QUrlQuery>
#include <QCommandLineParser>
#include <AccountManager.h>
#include <BuildInfo.h>
@ -45,7 +46,11 @@
int const DomainServer::EXIT_CODE_REBOOT = 234923;
#if USE_STABLE_GLOBAL_SERVICES
const QString ICE_SERVER_DEFAULT_HOSTNAME = "ice.highfidelity.com";
#else
const QString ICE_SERVER_DEFAULT_HOSTNAME = "dev-ice.highfidelity.com";
#endif
DomainServer::DomainServer(int argc, char* argv[]) :
QCoreApplication(argc, argv),
@ -62,8 +67,11 @@ DomainServer::DomainServer(int argc, char* argv[]) :
_webAuthenticationStateSet(),
_cookieSessionHash(),
_automaticNetworkingSetting(),
_settingsManager()
_settingsManager(),
_iceServerAddr(ICE_SERVER_DEFAULT_HOSTNAME),
_iceServerPort(ICE_SERVER_DEFAULT_PORT)
{
parseCommandLine();
qInstallMessageHandler(LogHandler::verboseMessageHandler);
LogUtils::init();
@ -79,6 +87,14 @@ DomainServer::DomainServer(int argc, char* argv[]) :
qDebug() << "Setting up domain-server";
qDebug() << "[VERSION] Build sequence:" << qPrintable(applicationVersion());
qDebug() << "[VERSION] MODIFIED_ORGANIZATION:" << BuildInfo::MODIFIED_ORGANIZATION;
qDebug() << "[VERSION] VERSION:" << BuildInfo::VERSION;
qDebug() << "[VERSION] BUILD_BRANCH:" << BuildInfo::BUILD_BRANCH;
qDebug() << "[VERSION] BUILD_GLOBAL_SERVICES:" << BuildInfo::BUILD_GLOBAL_SERVICES;
qDebug() << "[VERSION] We will be using this default ICE server:" << ICE_SERVER_DEFAULT_HOSTNAME;
// make sure we have a fresh AccountManager instance
// (need this since domain-server can restart itself and maintain static variables)
DependencyManager::set<AccountManager>();
@ -147,6 +163,53 @@ DomainServer::DomainServer(int argc, char* argv[]) :
qDebug() << "domain-server is running";
}
void DomainServer::parseCommandLine() {
QCommandLineParser parser;
parser.setApplicationDescription("High Fidelity Domain Server");
parser.addHelpOption();
const QCommandLineOption iceServerAddressOption("i", "ice-server address", "IP:PORT or HOSTNAME:PORT");
parser.addOption(iceServerAddressOption);
const QCommandLineOption domainIDOption("d", "domain-server uuid");
parser.addOption(domainIDOption);
const QCommandLineOption getTempNameOption("get-temp-name", "Request a temporary domain-name");
parser.addOption(getTempNameOption);
const QCommandLineOption masterConfigOption("master-config", "Deprecated config-file option");
parser.addOption(masterConfigOption);
if (!parser.parse(QCoreApplication::arguments())) {
qWarning() << parser.errorText() << endl;
parser.showHelp();
Q_UNREACHABLE();
}
if (parser.isSet(iceServerAddressOption)) {
// parse the IP and port combination for this target
QString hostnamePortString = parser.value(iceServerAddressOption);
_iceServerAddr = hostnamePortString.left(hostnamePortString.indexOf(':'));
_iceServerPort = (quint16) hostnamePortString.mid(hostnamePortString.indexOf(':') + 1).toUInt();
if (_iceServerPort == 0) {
_iceServerPort = ICE_SERVER_DEFAULT_PORT;
}
if (_iceServerAddr.isEmpty()) {
qWarning() << "Could not parse an IP address and port combination from" << hostnamePortString;
QMetaObject::invokeMethod(this, "quit", Qt::QueuedConnection);
}
}
if (parser.isSet(domainIDOption)) {
_overridingDomainID = QUuid(parser.value(domainIDOption));
_overrideDomainID = true;
qDebug() << "domain-server ID is" << _overridingDomainID;
}
}
DomainServer::~DomainServer() {
// destroy the LimitedNodeList before the DomainServer QCoreApplication is down
DependencyManager::destroy<LimitedNodeList>();
@ -154,7 +217,7 @@ DomainServer::~DomainServer() {
void DomainServer::queuedQuit(QString quitMessage, int exitCode) {
if (!quitMessage.isEmpty()) {
qCritical() << qPrintable(quitMessage);
qWarning() << qPrintable(quitMessage);
}
QCoreApplication::exit(exitCode);
@ -295,7 +358,7 @@ void DomainServer::handleTempDomainSuccess(QNetworkReply& requestReply) {
auto domainObject = jsonObject[DATA_KEY].toObject()[DOMAIN_KEY].toObject();
if (!domainObject.isEmpty()) {
auto id = domainObject[ID_KEY].toString();
auto id = _overrideDomainID ? _overridingDomainID.toString() : domainObject[ID_KEY].toString();
auto name = domainObject[NAME_KEY].toString();
auto key = domainObject[KEY_KEY].toString();
@ -403,24 +466,30 @@ void DomainServer::setupNodeListAndAssignments() {
quint16 localHttpsPort = DOMAIN_SERVER_HTTPS_PORT;
nodeList->putLocalPortIntoSharedMemory(DOMAIN_SERVER_LOCAL_HTTPS_PORT_SMEM_KEY, this, localHttpsPort);
// set our LimitedNodeList UUID to match the UUID from our config
// nodes will currently use this to add resources to data-web that relate to our domain
const QVariant* idValueVariant = valueForKeyPath(settingsMap, METAVERSE_DOMAIN_ID_KEY_PATH);
if (idValueVariant) {
nodeList->setSessionUUID(idValueVariant->toString());
bool isMetaverseDomain = false;
if (_overrideDomainID) {
nodeList->setSessionUUID(_overridingDomainID);
isMetaverseDomain = true; // assume metaverse domain
} else {
const QVariant* idValueVariant = valueForKeyPath(settingsMap, METAVERSE_DOMAIN_ID_KEY_PATH);
if (idValueVariant) {
nodeList->setSessionUUID(idValueVariant->toString());
isMetaverseDomain = true; // if we have an ID, we'll assume we're a metaverse domain
} else {
nodeList->setSessionUUID(QUuid::createUuid()); // Use random UUID
}
}
// if we have an ID, we'll assume we're a metaverse domain
// now see if we think we're a temp domain (we have an API key) or a full domain
if (isMetaverseDomain) {
// see if we think we're a temp domain (we have an API key) or a full domain
const auto& temporaryDomainKey = DependencyManager::get<AccountManager>()->getTemporaryDomainKey(getID());
if (temporaryDomainKey.isEmpty()) {
_type = MetaverseDomain;
} else {
_type = MetaverseTemporaryDomain;
}
} else {
nodeList->setSessionUUID(QUuid::createUuid()); // Use random UUID
}
connect(nodeList.data(), &LimitedNodeList::nodeAdded, this, &DomainServer::nodeAdded);
@ -536,7 +605,6 @@ void DomainServer::setupAutomaticNetworking() {
} else {
qDebug() << "Cannot enable domain-server automatic networking without a domain ID."
<< "Please add an ID to your config file or via the web interface.";
return;
}
}
@ -594,12 +662,11 @@ void DomainServer::setupICEHeartbeatForFullNetworking() {
void DomainServer::updateICEServerAddresses() {
if (_iceAddressLookupID == -1) {
_iceAddressLookupID = QHostInfo::lookupHost(ICE_SERVER_DEFAULT_HOSTNAME, this, SLOT(handleICEHostInfo(QHostInfo)));
_iceAddressLookupID = QHostInfo::lookupHost(_iceServerAddr, this, SLOT(handleICEHostInfo(QHostInfo)));
}
}
void DomainServer::parseAssignmentConfigs(QSet<Assignment::Type>& excludedTypes) {
// check for configs from the command line, these take precedence
const QString ASSIGNMENT_CONFIG_REGEX_STRING = "config-([\\d]+)";
QRegExp assignmentConfigRegex(ASSIGNMENT_CONFIG_REGEX_STRING);
@ -1211,41 +1278,43 @@ void DomainServer::handleMetaverseHeartbeatError(QNetworkReply& requestReply) {
}
void DomainServer::sendICEServerAddressToMetaverseAPI() {
if (!_iceServerSocket.isNull()) {
const QString ICE_SERVER_ADDRESS = "ice_server_address";
const QString ICE_SERVER_ADDRESS = "ice_server_address";
QJsonObject domainObject;
QJsonObject domainObject;
if (!_connectedToICEServer || _iceServerSocket.isNull()) {
domainObject[ICE_SERVER_ADDRESS] = "0.0.0.0";
} else {
// we're using full automatic networking and we have a current ice-server socket, use that now
domainObject[ICE_SERVER_ADDRESS] = _iceServerSocket.getAddress().toString();
const auto& temporaryDomainKey = DependencyManager::get<AccountManager>()->getTemporaryDomainKey(getID());
if (!temporaryDomainKey.isEmpty()) {
// add the temporary domain token
const QString KEY_KEY = "api_key";
domainObject[KEY_KEY] = temporaryDomainKey;
}
QString domainUpdateJSON = QString("{\"domain\": %1 }").arg(QString(QJsonDocument(domainObject).toJson()));
// make sure we hear about failure so we can retry
JSONCallbackParameters callbackParameters;
callbackParameters.errorCallbackReceiver = this;
callbackParameters.errorCallbackMethod = "handleFailedICEServerAddressUpdate";
static QString repeatedMessage = LogHandler::getInstance().addOnlyOnceMessageRegex
("Updating ice-server address in High Fidelity Metaverse API to [^ \n]+");
qDebug() << "Updating ice-server address in High Fidelity Metaverse API to"
<< _iceServerSocket.getAddress().toString();
static const QString DOMAIN_ICE_ADDRESS_UPDATE = "/api/v1/domains/%1/ice_server_address";
DependencyManager::get<AccountManager>()->sendRequest(DOMAIN_ICE_ADDRESS_UPDATE.arg(uuidStringWithoutCurlyBraces(getID())),
AccountManagerAuth::Optional,
QNetworkAccessManager::PutOperation,
callbackParameters,
domainUpdateJSON.toUtf8());
}
const auto& temporaryDomainKey = DependencyManager::get<AccountManager>()->getTemporaryDomainKey(getID());
if (!temporaryDomainKey.isEmpty()) {
// add the temporary domain token
const QString KEY_KEY = "api_key";
domainObject[KEY_KEY] = temporaryDomainKey;
}
QString domainUpdateJSON = QString("{\"domain\": %1 }").arg(QString(QJsonDocument(domainObject).toJson()));
// make sure we hear about failure so we can retry
JSONCallbackParameters callbackParameters;
callbackParameters.errorCallbackReceiver = this;
callbackParameters.errorCallbackMethod = "handleFailedICEServerAddressUpdate";
static QString repeatedMessage = LogHandler::getInstance().addOnlyOnceMessageRegex
("Updating ice-server address in High Fidelity Metaverse API to [^ \n]+");
qDebug() << "Updating ice-server address in High Fidelity Metaverse API to"
<< (_iceServerSocket.isNull() ? "" : _iceServerSocket.getAddress().toString());
static const QString DOMAIN_ICE_ADDRESS_UPDATE = "/api/v1/domains/%1/ice_server_address";
DependencyManager::get<AccountManager>()->sendRequest(DOMAIN_ICE_ADDRESS_UPDATE.arg(uuidStringWithoutCurlyBraces(getID())),
AccountManagerAuth::Optional,
QNetworkAccessManager::PutOperation,
callbackParameters,
domainUpdateJSON.toUtf8());
}
void DomainServer::handleFailedICEServerAddressUpdate(QNetworkReply& requestReply) {
@ -1297,6 +1366,7 @@ void DomainServer::sendHeartbeatToIceServer() {
// reset the connection flag for ICE server
_connectedToICEServer = false;
sendICEServerAddressToMetaverseAPI();
// randomize our ice-server address (and simultaneously look up any new hostnames for available ice-servers)
randomizeICEServerAddress(true);
@ -2269,6 +2339,7 @@ void DomainServer::processICEServerHeartbeatACK(QSharedPointer<ReceivedMessage>
if (!_connectedToICEServer) {
_connectedToICEServer = true;
sendICEServerAddressToMetaverseAPI();
qInfo() << "Connected to ice-server at" << _iceServerSocket;
}
}

View file

@ -105,6 +105,7 @@ signals:
private:
const QUuid& getID();
void parseCommandLine();
void setupNodeListAndAssignments();
bool optionallySetupOAuth();
@ -205,6 +206,11 @@ private:
friend class DomainGatekeeper;
friend class DomainMetadata;
QString _iceServerAddr;
int _iceServerPort;
bool _overrideDomainID { false }; // should we override the domain-id from settings?
QUuid _overridingDomainID { QUuid() }; // what should we override it with?
};

View file

@ -9,10 +9,10 @@
.st1{fill:#E6E7E8;}
.st2{fill:#FFFFFF;}
</style>
<path class="st0" d="M1428.61,172H11.46c-6.27,0-11.39-5.13-11.39-11.39V49.58c0-6.27,5.13-11.39,11.39-11.39h1417.15
c6.27,0,11.39,5.13,11.39,11.39v111.03C1440,166.87,1434.87,172,1428.61,172z"/>
<path class="st1" d="M1428.61,165.81H11.46c-6.27,0-11.39-5.13-11.39-11.39V43.39c0-6.27,5.13-11.39,11.39-11.39h1417.15
c6.27,0,11.39,5.13,11.39,11.39v111.03C1440,160.68,1434.87,165.81,1428.61,165.81z"/>
<path class="st2" d="M1133.24,165.81H417.95c-4.47,0-8.12-3.65-8.12-8.12V40.11c0-4.47,3.65-8.12,8.12-8.12h715.28
c4.47,0,8.12,3.65,8.12,8.12v117.57C1141.36,162.15,1137.7,165.81,1133.24,165.81z"/>
<path class="st0" d="M1428.6,172H11.5c-6.3,0-11.4-5.1-11.4-11.4v-111c0-6.3,5.1-11.4,11.4-11.4h1417.2c6.3,0,11.4,5.1,11.4,11.4
v111C1440,166.9,1434.9,172,1428.6,172z"/>
<path class="st1" d="M1428.6,165.8H11.5c-6.3,0-11.4-5.1-11.4-11.4v-111C0.1,37.1,5.2,32,11.5,32h1417.2c6.3,0,11.4,5.1,11.4,11.4
v111C1440,160.7,1434.9,165.8,1428.6,165.8z"/>
<path class="st2" d="M1429.9,165.8H421.3c-6.3,0-11.5-3.6-11.5-8.1V40.1c0-4.5,5.1-8.1,11.5-8.1h1008.6c6.3,0,11.5,3.7,11.5,8.1
v117.6C1441.4,162.1,1436.2,165.8,1429.9,165.8z"/>
</svg>

Before

Width:  |  Height:  |  Size: 1.4 KiB

After

Width:  |  Height:  |  Size: 1.3 KiB

View file

@ -0,0 +1,53 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 19.2.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
viewBox="0 0 32 64" style="enable-background:new 0 0 32 64;" xml:space="preserve">
<style type="text/css">
.st0{fill:#168DB7;}
.st1{fill:#FFFFFF;}
.st2{opacity:0.63;fill:#58595B;enable-background:new ;}
</style>
<circle cx="15.8" cy="48.2" r="14.7"/>
<circle class="st0" cx="15.8" cy="47.6" r="14.7"/>
<circle cx="16.1" cy="44.9" r="3"/>
<path d="M18.2,50.2H14c-1.7,0-3.1,1.5-3.1,3.2V55c1.4,1.1,3.1,1.7,4.9,1.7c2.1,0,4-0.8,5.4-2.1v-1.2C21.3,51.7,19.9,50.2,18.2,50.2z
"/>
<circle cx="22.9" cy="44.9" r="1.6"/>
<path d="M24,47.9h-1.8c-0.4,0-0.8,0.2-1,0.4c0.5,0.3,1,0.7,1.4,1.1c0.5,0.5,0.7,1.2,0.7,1.9c0.1,0.4,0.1,0.8,0.1,1.2v0.3
c0.8-1,1.5-2.7,1.8-3.6v-0.1C25.2,48.8,24.6,47.9,24,47.9z"/>
<circle cx="9.1" cy="45.2" r="1.6"/>
<path d="M8.7,53.2c0-0.3,0-0.6,0-0.9v-0.1c0-0.1,0-0.2,0-0.3s0-0.1,0-0.2c0-0.1,0-0.1,0-0.2c0.1-0.9,0.7-2,1.4-2.5
c0.2-0.2,0.4-0.3,0.6-0.4c-0.2-0.2-0.6-0.3-0.9-0.3H8c-0.6,0-1.2,0.8-1.1,1.2v0.1C7.2,50.6,7.9,52.3,8.7,53.2z"/>
<circle class="st1" cx="16.1" cy="44.3" r="3"/>
<path class="st1" d="M18.2,49.6H14c-1.7,0-3.1,1.5-3.1,3.2v1.6c1.4,1.1,3.1,1.7,4.9,1.7c2.1,0,4-0.8,5.4-2.1v-1.2
C21.3,51.1,19.9,49.6,18.2,49.6z"/>
<circle class="st1" cx="22.9" cy="44.4" r="1.6"/>
<path class="st1" d="M24,47.4h-1.8c-0.4,0-0.8,0.2-1,0.4c0.5,0.3,1,0.7,1.4,1.1c0.5,0.5,0.7,1.2,0.7,1.9c0.1,0.4,0.1,0.8,0.1,1.2
v0.3c0.8-1,1.5-2.7,1.8-3.6v-0.1C25.2,48.2,24.6,47.4,24,47.4z"/>
<circle class="st1" cx="9.1" cy="44.6" r="1.6"/>
<path class="st1" d="M8.7,52.7c0-0.3,0-0.6,0-0.9v-0.1c0-0.1,0-0.2,0-0.3s0-0.1,0-0.2c0-0.1,0-0.1,0-0.2c0.1-0.9,0.7-2,1.4-2.5
c0.2-0.2,0.4-0.3,0.6-0.4c-0.2-0.2-0.6-0.3-0.9-0.3H8c-0.6,0-1.2,0.8-1.1,1.2v0.1C7.2,50,7.9,51.7,8.7,52.7z"/>
<path d="M15.9,3.4c-7,0-12.7,5.7-12.7,12.7s5.7,12.7,12.7,12.7s12.7-5.7,12.7-12.7S22.9,3.4,15.9,3.4z M15.9,27.2
c-6.1,0-11.1-5-11.1-11.1S9.8,5,15.9,5C22,4.9,27,9.9,27,16.1C27,22.2,22,27.2,15.9,27.2z"/>
<circle class="st2" cx="15.9" cy="15.5" r="12.4"/>
<path class="st1" d="M15.9,2.8c-7,0-12.7,5.7-12.7,12.7s5.7,12.7,12.7,12.7s12.7-5.7,12.7-12.7S22.9,2.8,15.9,2.8z M15.9,26.6
c-6.1,0-11.1-5-11.1-11.1s5-11.1,11.1-11.1C22,4.4,27,9.4,27,15.5S22,26.6,15.9,26.6z"/>
<circle cx="16.1" cy="12.9" r="3"/>
<path d="M18.2,18.2H14c-1.7,0-3.1,1.5-3.1,3.2V23c1.4,1.1,3.1,1.7,4.9,1.7c2.1,0,4-0.8,5.4-2.1v-1.2C21.3,19.7,19.9,18.2,18.2,18.2z
"/>
<circle cx="22.9" cy="12.9" r="1.6"/>
<path d="M24,15.9h-1.8c-0.4,0-0.8,0.2-1,0.4c0.5,0.3,1,0.7,1.4,1.1c0.5,0.5,0.7,1.2,0.7,1.9c0.1,0.4,0.1,0.8,0.1,1.2v0.3
c0.8-1,1.5-2.7,1.8-3.6v-0.1C25.2,16.8,24.6,15.9,24,15.9z"/>
<circle cx="9.1" cy="13.2" r="1.6"/>
<path d="M8.7,21.2c0-0.3,0-0.6,0-0.9v-0.1c0-0.1,0-0.2,0-0.3c0-0.1,0-0.1,0-0.2s0-0.1,0-0.2c0.1-0.9,0.7-2,1.4-2.5
c0.2-0.2,0.4-0.3,0.6-0.4c-0.2-0.2-0.6-0.3-0.9-0.3H8c-0.6,0-1.2,0.8-1.1,1.2v0.1C7.2,18.6,7.9,20.2,8.7,21.2z"/>
<circle class="st1" cx="16.1" cy="12.3" r="3"/>
<path class="st1" d="M18.2,17.6H14c-1.7,0-3.1,1.5-3.1,3.2v1.6c1.4,1.1,3.1,1.7,4.9,1.7c2.1,0,4-0.8,5.4-2.1v-1.2
C21.3,19.1,19.9,17.6,18.2,17.6z"/>
<circle class="st1" cx="22.9" cy="12.4" r="1.6"/>
<path class="st1" d="M24,15.4h-1.8c-0.4,0-0.8,0.2-1,0.4c0.5,0.3,1,0.7,1.4,1.1c0.5,0.5,0.7,1.2,0.7,1.9c0.1,0.4,0.1,0.8,0.1,1.2
v0.3c0.8-1,1.5-2.7,1.8-3.6v-0.1C25.2,16.2,24.6,15.4,24,15.4z"/>
<circle class="st1" cx="9.1" cy="12.6" r="1.6"/>
<path class="st1" d="M8.7,20.7c0-0.3,0-0.6,0-0.9v-0.1c0-0.1,0-0.2,0-0.3c0-0.1,0-0.1,0-0.2s0-0.1,0-0.2c0.1-0.9,0.7-2,1.4-2.5
c0.2-0.2,0.4-0.3,0.6-0.4c-0.2-0.2-0.6-0.3-0.9-0.3H8c-0.6,0-1.2,0.8-1.1,1.2v0.1C7.2,18,7.9,19.7,8.7,20.7z"/>
</svg>

After

Width:  |  Height:  |  Size: 3.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 74 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 106 KiB

After

Width:  |  Height:  |  Size: 59 KiB

View file

@ -0,0 +1,33 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 19.2.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
viewBox="0 0 32 64" style="enable-background:new 0 0 32 64;" xml:space="preserve">
<style type="text/css">
.st0{opacity:0.64;fill:#58595B;}
.st1{fill:#FFFFFF;}
.st2{fill:#168DB7;}
</style>
<circle class="st0" cx="15.8" cy="15.5" r="12.5"/>
<path d="M15.8,3.4c-7,0-12.7,5.7-12.7,12.7s5.7,12.7,12.7,12.7c7,0,12.7-5.7,12.7-12.7S22.8,3.4,15.8,3.4z M15.8,27.3
c-6.2,0-11.2-5-11.2-11.2C4.6,10,9.7,5,15.8,5C22,5,27,10,27,16.1C27,22.3,22,27.3,15.8,27.3z"/>
<path class="st1" d="M15.8,2.8c-7,0-12.7,5.7-12.7,12.7s5.7,12.7,12.7,12.7c7,0,12.7-5.7,12.7-12.7S22.8,2.8,15.8,2.8z M15.8,26.7
c-6.2,0-11.2-5-11.2-11.2c0-6.2,5-11.2,11.2-11.2C22,4.4,27,9.4,27,15.6C27,21.7,22,26.7,15.8,26.7z"/>
<path d="M21.5,11.6H20V11c0-1-0.6-1.2-1.6-1.2h-5c-1,0-1.5,0.2-1.5,1.2v0.6h-1.5c-1,0-1.9,0.4-1.9,1.4v6.6c0,1,0.8,1.9,1.9,1.9h11
c1,0,1.7-1.1,1.7-2.1V13C23.3,12,22.5,11.6,21.5,11.6z M16.1,20.1c-2.3,0-4-1.8-4-4c0-2.3,1.8-4,4-4c2.3,0,4,1.8,4,4
S18.4,20.1,16.1,20.1z"/>
<circle cx="16.1" cy="16" r="2.4"/>
<path class="st1" d="M21.5,11H20v-0.6c0-1-0.6-1.2-1.6-1.2h-5c-1,0-1.5,0.2-1.5,1.2V11h-1.5c-1,0-1.9,0.4-1.9,1.4v6.6
c0,1,0.8,1.9,1.9,1.9h11c1,0,1.7-1.1,1.7-2.1v-6.4C23.3,11.5,22.5,11,21.5,11z M16.1,19.5c-2.3,0-4-1.8-4-4c0-2.3,1.8-4,4-4
c2.3,0,4,1.8,4,4S18.4,19.5,16.1,19.5z"/>
<circle class="st1" cx="16.1" cy="15.5" r="2.4"/>
<circle cx="15.8" cy="48.2" r="14.8"/>
<circle class="st2" cx="15.8" cy="47.6" r="14.8"/>
<path d="M21.5,43.6H20V43c0-1-0.6-1.2-1.6-1.2h-5c-1,0-1.5,0.2-1.5,1.2v0.6h-1.5c-1,0-1.9,0.4-1.9,1.4v6.6c0,1,0.8,1.9,1.9,1.9h11
c1,0,1.7-1.1,1.7-2.1V45C23.3,44.1,22.5,43.6,21.5,43.6z M16.1,52.1c-2.3,0-4-1.8-4-4c0-2.3,1.8-4,4-4c2.3,0,4,1.8,4,4
C20.1,50.2,18.4,52.1,16.1,52.1z"/>
<circle cx="16.1" cy="48" r="2.4"/>
<path class="st1" d="M21.5,43.1H20v-0.6c0-1-0.6-1.2-1.6-1.2h-5c-1,0-1.5,0.2-1.5,1.2v0.6h-1.5c-1,0-1.9,0.4-1.9,1.4v6.6
c0,1,0.8,1.9,1.9,1.9h11c1,0,1.7-1.1,1.7-2.1v-6.4C23.3,43.5,22.5,43.1,21.5,43.1z M16.1,51.5c-2.3,0-4-1.8-4-4c0-2.3,1.8-4,4-4
c2.3,0,4,1.8,4,4C20.1,49.7,18.4,51.5,16.1,51.5z"/>
<circle class="st1" cx="16.1" cy="47.5" r="2.4"/>
</svg>

After

Width:  |  Height:  |  Size: 2.3 KiB

View file

@ -50,16 +50,15 @@ Window {
function resetAfterTeleport() {
storyCardFrame.shown = root.shown = false;
}
function goCard(card) {
if (addressBarDialog.useFeed) {
storyCardHTML.url = addressBarDialog.metaverseServerUrl + "/user_stories/" + card.storyId;
function goCard(targetString) {
if (0 !== targetString.indexOf('hifi://')) {
storyCardHTML.url = addressBarDialog.metaverseServerUrl + targetString;
storyCardFrame.shown = true;
return;
}
addressLine.text = card.hifiUrl;
addressLine.text = targetString;
toggleOrGo(true);
}
property var allPlaces: [];
property var allStories: [];
property int cardWidth: 200;
property int cardHeight: 152;
@ -72,7 +71,6 @@ Window {
// The buttons have their button state changed on hover, so we have to manually fix them up here
onBackEnabledChanged: backArrow.buttonState = addressBarDialog.backEnabled ? 1 : 0;
onForwardEnabledChanged: forwardArrow.buttonState = addressBarDialog.forwardEnabled ? 1 : 0;
onUseFeedChanged: updateFeedState();
onReceivedHifiSchemeURL: resetAfterTeleport();
ListModel { id: suggestions }
@ -97,7 +95,6 @@ Window {
userName: model.username;
placeName: model.place_name;
hifiUrl: model.place_name + model.path;
imageUrl: model.image_url;
thumbnail: model.thumbnail_url;
action: model.action;
timestamp: model.created_at;
@ -135,7 +132,10 @@ Window {
buttonState: 1
defaultState: 1
hoverState: 2
onClicked: addressBarDialog.loadHome();
onClicked: {
addressBarDialog.loadHome();
root.shown = false;
}
anchors {
left: parent.left
leftMargin: homeButton.width / 2
@ -176,9 +176,9 @@ Window {
top: parent.top
bottom: parent.bottom
left: forwardArrow.right
right: placesButton.left
right: parent.right
leftMargin: forwardArrow.width
rightMargin: placesButton.width
rightMargin: forwardArrow.width / 2
topMargin: parent.inputAreaStep + hifi.layout.spacing
bottomMargin: parent.inputAreaStep + hifi.layout.spacing
}
@ -188,32 +188,6 @@ Window {
helperItalic: true
onTextChanged: filterChoicesByText()
}
// These two are radio buttons.
ToolbarButton {
id: placesButton
imageURL: "../images/places.svg"
buttonState: 1
defaultState: addressBarDialog.useFeed ? 0 : 1;
hoverState: addressBarDialog.useFeed ? 2 : -1;
onClicked: addressBarDialog.useFeed ? toggleFeed() : identity()
anchors {
right: feedButton.left;
bottom: addressLine.bottom;
}
}
ToolbarButton {
id: feedButton;
imageURL: "../images/snap-feed.svg";
buttonState: 0
defaultState: addressBarDialog.useFeed ? 1 : 0;
hoverState: addressBarDialog.useFeed ? -1 : 2;
onClicked: addressBarDialog.useFeed ? identity() : toggleFeed();
anchors {
right: parent.right;
bottom: addressLine.bottom;
rightMargin: feedButton.width / 2
}
}
}
Window {
@ -237,16 +211,6 @@ Window {
}
}
function toggleFeed() {
addressBarDialog.useFeed = !addressBarDialog.useFeed;
updateFeedState();
}
function updateFeedState() {
placesButton.buttonState = addressBarDialog.useFeed ? 0 : 1;
feedButton.buttonState = addressBarDialog.useFeed ? 1 : 0;
filterChoicesByText();
}
function getRequest(url, cb) { // cb(error, responseOfCorrectContentType) of url. General for 'get' text/html/json, but without redirects.
// TODO: make available to other .qml.
var request = new XMLHttpRequest();
@ -271,37 +235,6 @@ Window {
request.open("GET", url, true);
request.send();
}
function asyncMap(array, iterator, cb) {
// call iterator(element, icb) once for each element of array, and then cb(error, mappedResult)
// when icb(error, mappedElement) has been called by each iterator.
// Calls to iterator are overlapped and may call icb in any order, but the mappedResults are collected in the same
// order as the elements of the array.
// Short-circuits if error. Note that iterator MUST be an asynchronous function. (Use setTimeout if necessary.)
var count = array.length, results = [];
if (!count) {
return cb(null, results);
}
array.forEach(function (element, index) {
if (count < 0) { // don't keep iterating after we short-circuit
return;
}
iterator(element, function (error, mapped) {
results[index] = mapped;
if (error || !--count) {
count = 0; // don't cb multiple times if error
cb(error, results);
}
});
});
}
// Example:
/*asyncMap([0, 1, 2, 3, 4, 5, 6], function (elt, icb) {
console.log('called', elt);
setTimeout(function () {
console.log('answering', elt);
icb(null, elt);
}, Math.random() * 1000);
}, console.log); */
function identity(x) {
return x;
@ -321,131 +254,61 @@ Window {
cb(error);
return true;
}
function getPlace(placeData, cb) { // cb(error, side-effected-placeData), after adding path, thumbnails, and description
var url = metaverseBase + 'places/' + placeData.place_name;
getRequest(url, function (error, data) {
if (handleError(url, error, data, cb)) {
return;
}
var place = data.data.place, previews = place.previews;
placeData.path = place.path;
if (previews && previews.thumbnail) {
placeData.thumbnail_url = previews.thumbnail;
}
if (place.description) {
placeData.description = place.description;
placeData.searchText += ' ' + place.description.toUpperCase();
}
cb(error, placeData);
});
function resolveUrl(url) {
return (url.indexOf('/') === 0) ? (addressBarDialog.metaverseServerUrl + url) : url;
}
function makeModelData(data, optionalPlaceName) { // create a new obj from data
function makeModelData(data) { // create a new obj from data
// ListModel elements will only ever have those properties that are defined by the first obj that is added.
// So here we make sure that we have all the properties we need, regardless of whether it is a place data or user story.
var name = optionalPlaceName || data.place_name,
var name = data.place_name,
tags = data.tags || [data.action, data.username],
description = data.description || "",
thumbnail_url = data.thumbnail_url || "",
image_url = thumbnail_url;
if (data.details) {
try {
image_url = JSON.parse(data.details).image_url || thumbnail_url;
} catch (e) {
console.log(name, "has bad details", data.details);
}
}
thumbnail_url = data.thumbnail_url || "";
return {
place_name: name,
username: data.username || "",
path: data.path || "",
created_at: data.created_at || "",
action: data.action || "",
thumbnail_url: thumbnail_url,
image_url: image_url,
thumbnail_url: resolveUrl(thumbnail_url),
metaverseId: (data.id || "").toString(), // Some are strings from server while others are numbers. Model objects require uniformity.
tags: tags,
description: description,
online_users: data.online_users || 0,
online_users: data.details.concurrency || 0,
searchText: [name].concat(tags, description || []).join(' ').toUpperCase()
}
}
function mapDomainPlaces(domain, cb) { // cb(error, arrayOfDomainPlaceData)
function addPlace(name, icb) {
getPlace(makeModelData(domain, name), icb);
}
// IWBNI we could get these results in order with most-recent-entered first.
// In any case, we don't really need to preserve the domain.names order in the results.
asyncMap(domain.names || [], addPlace, cb);
}
function suggestable(place) {
if (addressBarDialog.useFeed) {
if (place.action === 'snapshot') {
return true;
}
return (place.place_name !== AddressManager.hostname) // Not our entry, but do show other entry points to current domain.
&& place.thumbnail_url
&& place.online_users // at least one present means it's actually online
&& place.online_users <= 20;
}
function getDomainPage(pageNumber, cb) { // cb(error) after all pages of domain data have been added to model
// Each page of results is processed completely before we start on the next page.
// For each page of domains, we process each domain in parallel, and for each domain, process each place name in parallel.
// This gives us minimum latency within the page, but we do preserve the order within the page by using asyncMap and
// only appending the collected results.
var params = [
'open', // published hours handle now
// TBD: should determine if place is actually running?
'restriction=open', // Not by whitelist, etc. TBD: If logged in, add hifi to the restriction options, in order to include places that require login?
// TBD: add maturity?
'protocol=' + encodeURIComponent(AddressManager.protocolVersion()),
'sort_by=users',
'sort_order=desc',
'page=' + pageNumber
];
var url = metaverseBase + 'domains/all?' + params.join('&');
getRequest(url, function (error, data) {
if (handleError(url, error, data, cb)) {
return;
}
asyncMap(data.data.domains, mapDomainPlaces, function (error, pageResults) {
if (error) {
return cb(error);
}
// pageResults is now [ [ placeDataOneForDomainOne, placeDataTwoForDomainOne, ...], [ placeDataTwoForDomainTwo...] ]
pageResults.forEach(function (domainResults) {
allPlaces = allPlaces.concat(domainResults);
if (!addressLine.text && !addressBarDialog.useFeed) { // Don't add if the user is already filtering
domainResults.forEach(function (place) {
if (suggestable(place)) {
suggestions.append(place);
}
});
}
});
if (data.current_page < data.total_pages) {
return getDomainPage(pageNumber + 1, cb);
}
cb();
});
});
return (place.place_name !== AddressManager.hostname); // Not our entry, but do show other entry points to current domain.
// could also require right protocolVersion
}
function getUserStoryPage(pageNumber, cb) { // cb(error) after all pages of domain data have been added to model
var url = metaverseBase + 'user_stories?page=' + pageNumber;
var options = [
'include_actions=snapshot,concurrency',
'protocol=' + encodeURIComponent(AddressManager.protocolVersion()),
'page=' + pageNumber
];
var url = metaverseBase + 'user_stories?' + options.join('&');
getRequest(url, function (error, data) {
if (handleError(url, error, data, cb)) {
return;
}
var stories = data.user_stories.map(function (story) { // explicit single-argument function
return makeModelData(story);
return makeModelData(story, url);
});
allStories = allStories.concat(stories);
if (!addressLine.text && addressBarDialog.useFeed) { // Don't add if the user is already filtering
if (!addressLine.text) { // Don't add if the user is already filtering
stories.forEach(function (story) {
suggestions.append(story);
if (suggestable(story)) {
suggestions.append(story);
}
});
}
if ((data.current_page < data.total_pages) && (data.current_page <= 10)) { // just 10 pages = 100 stories for now
@ -457,7 +320,7 @@ Window {
function filterChoicesByText() {
suggestions.clear();
var words = addressLine.text.toUpperCase().split(/\s+/).filter(identity),
data = addressBarDialog.useFeed ? allStories : allPlaces;
data = allStories;
function matches(place) {
if (!words.length) {
return suggestable(place);
@ -474,12 +337,8 @@ Window {
}
function fillDestinations() {
allPlaces = [];
allStories = [];
suggestions.clear();
getDomainPage(1, function (error) {
console.log('domain query', error || 'ok', allPlaces.length);
});
getUserStoryPage(1, function (error) {
console.log('user stories query', error || 'ok', allStories.length);
});

View file

@ -154,7 +154,6 @@ ScrollingWindow {
return i;
}
}
console.warn("Could not find tab for " + source);
return -1;
}
@ -189,7 +188,6 @@ ScrollingWindow {
return i;
}
}
console.warn("Could not find free tab");
return -1;
}
@ -216,7 +214,6 @@ ScrollingWindow {
var existingTabIndex = findIndexForUrl(properties.source);
if (existingTabIndex >= 0) {
console.log("Existing tab " + existingTabIndex + " found with URL " + properties.source);
var tab = tabView.getTab(existingTabIndex);
return tab.item;
}
@ -239,16 +236,13 @@ ScrollingWindow {
var tab = tabView.getTab(freeTabIndex);
tab.title = properties.title || "Unknown";
tab.enabled = true;
console.log("New tab URL: " + properties.source)
tab.originalUrl = properties.source;
var eventBridge = properties.eventBridge;
console.log("Event bridge: " + eventBridge);
var result = tab.item;
result.enabled = true;
tabView.tabCount++;
console.log("Setting event bridge: " + eventBridge);
result.eventBridgeWrapper.eventBridge = eventBridge;
result.url = properties.source;
return result;

View file

@ -44,7 +44,6 @@ Item {
webChannel.registeredObjects: [eventBridgeWrapper]
Component.onCompleted: {
console.log("Connecting JS messaging to Hifi Logging");
// Ensure the JS from the web-engine makes it to our logging
root.javaScriptConsoleMessage.connect(function(level, message, lineNumber, sourceID) {
console.log("Web Entity JS message: " + sourceID + " " + lineNumber + " " + message);

View file

@ -14,6 +14,7 @@
import Hifi 1.0
import QtQuick 2.5
import QtGraphicalEffects 1.0
import "toolbars"
import "../styles-uit"
Rectangle {
@ -23,16 +24,13 @@ Rectangle {
property string timestamp: "";
property string hifiUrl: "";
property string thumbnail: defaultThumbnail;
property string imageUrl: "";
property var goFunction: null;
property string storyId: "";
property string timePhrase: pastTime(timestamp);
property string actionPhrase: makeActionPhrase(action);
property int onlineUsers: 0;
property bool isUserStory: userName && !onlineUsers;
property int textPadding: 20;
property int textPadding: 10;
property int textSize: 24;
property int textSizeSmall: 18;
property string defaultThumbnail: Qt.resolvedUrl("../../images/default-domain.gif");
@ -58,14 +56,6 @@ Rectangle {
}
return 'about a minute ago';
}
function makeActionPhrase(actionLabel) {
switch (actionLabel) {
case "snapshot":
return "took a snapshot";
default:
return "unknown"
}
}
Image {
id: lobby;
@ -100,7 +90,7 @@ Rectangle {
spread: dropSpread;
}
DropShadow {
visible: desktop.gradientsSupported;
visible: users.visible && desktop.gradientsSupported;
source: users;
anchors.fill: users;
horizontalOffset: dropHorizontalOffset;
@ -112,7 +102,7 @@ Rectangle {
}
RalewaySemiBold {
id: place;
text: isUserStory ? "" : placeName;
text: placeName;
color: hifi.colors.white;
size: textSize;
anchors {
@ -121,14 +111,15 @@ Rectangle {
margins: textPadding;
}
}
RalewayRegular {
FiraSansRegular {
id: users;
text: isUserStory ? timePhrase : (onlineUsers + ((onlineUsers === 1) ? ' person' : ' people'));
size: textSizeSmall;
visible: action === 'concurrency';
text: onlineUsers;
size: textSize;
color: hifi.colors.white;
anchors {
bottom: parent.bottom;
right: parent.right;
verticalCenter: usersImage.verticalCenter;
right: usersImage.left;
margins: textPadding;
}
}
@ -141,9 +132,23 @@ Rectangle {
id: zmouseArea;
anchors.fill: parent;
acceptedButtons: Qt.LeftButton;
onClicked: goFunction(parent);
onClicked: goFunction("hifi://" + hifiUrl);
hoverEnabled: true;
onEntered: hoverThunk();
onExited: unhoverThunk();
}
ToolbarButton {
id: usersImage;
imageURL: "../../images/" + action + ".svg";
size: 32;
onClicked: goFunction("/user_stories/" + storyId);
buttonState: 0;
defaultState: 0;
hoverState: 1;
anchors {
bottom: parent.bottom;
right: parent.right;
margins: textPadding;
}
}
}

View file

@ -264,7 +264,7 @@ public:
auto elapsedMovingAverage = _movingAverage.getAverage();
if (elapsedMovingAverage > _maxElapsedAverage) {
qDebug() << "DEADLOCK WATCHDOG WARNING:"
qCDebug(interfaceapp_deadlock) << "DEADLOCK WATCHDOG WARNING:"
<< "lastHeartbeatAge:" << lastHeartbeatAge
<< "elapsedMovingAverage:" << elapsedMovingAverage
<< "maxElapsed:" << _maxElapsed
@ -274,7 +274,7 @@ public:
_maxElapsedAverage = elapsedMovingAverage;
}
if (lastHeartbeatAge > _maxElapsed) {
qDebug() << "DEADLOCK WATCHDOG WARNING:"
qCDebug(interfaceapp_deadlock) << "DEADLOCK WATCHDOG WARNING:"
<< "lastHeartbeatAge:" << lastHeartbeatAge
<< "elapsedMovingAverage:" << elapsedMovingAverage
<< "PREVIOUS maxElapsed:" << _maxElapsed
@ -284,7 +284,7 @@ public:
_maxElapsed = lastHeartbeatAge;
}
if (elapsedMovingAverage > WARNING_ELAPSED_HEARTBEAT) {
qDebug() << "DEADLOCK WATCHDOG WARNING:"
qCDebug(interfaceapp_deadlock) << "DEADLOCK WATCHDOG WARNING:"
<< "lastHeartbeatAge:" << lastHeartbeatAge
<< "elapsedMovingAverage:" << elapsedMovingAverage << "** OVER EXPECTED VALUE **"
<< "maxElapsed:" << _maxElapsed
@ -293,7 +293,7 @@ public:
}
if (lastHeartbeatAge > MAX_HEARTBEAT_AGE_USECS) {
qDebug() << "DEADLOCK DETECTED -- "
qCDebug(interfaceapp_deadlock) << "DEADLOCK DETECTED -- "
<< "lastHeartbeatAge:" << lastHeartbeatAge
<< "[ lastHeartbeat :" << lastHeartbeat
<< "now:" << now << " ]"
@ -562,6 +562,16 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer) :
_deadlockWatchdogThread->start();
qCDebug(interfaceapp) << "[VERSION] Build sequence:" << qPrintable(applicationVersion());
qCDebug(interfaceapp) << "[VERSION] MODIFIED_ORGANIZATION:" << BuildInfo::MODIFIED_ORGANIZATION;
qCDebug(interfaceapp) << "[VERSION] VERSION:" << BuildInfo::VERSION;
qCDebug(interfaceapp) << "[VERSION] BUILD_BRANCH:" << BuildInfo::BUILD_BRANCH;
qCDebug(interfaceapp) << "[VERSION] BUILD_GLOBAL_SERVICES:" << BuildInfo::BUILD_GLOBAL_SERVICES;
#if USE_STABLE_GLOBAL_SERVICES
qCDebug(interfaceapp) << "[VERSION] We will use STABLE global services.";
#else
qCDebug(interfaceapp) << "[VERSION] We will use DEVELOPMENT global services.";
#endif
_bookmarks = new Bookmarks(); // Before setting up the menu
@ -1029,7 +1039,8 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer) :
cameraMenuChanged();
}
// set the local loopback interface for local sounds from audio scripts
// set the local loopback interface for local sounds
AudioInjector::setLocalAudioInterface(audioIO.data());
AudioScriptingInterface::getInstance().setLocalAudioInterface(audioIO.data());
this->installEventFilter(this);
@ -1229,8 +1240,15 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer) :
firstRun.set(false);
}
void Application::domainConnectionRefused(const QString& reasonMessage, int reasonCode) {
switch (static_cast<DomainHandler::ConnectionRefusedReason>(reasonCode)) {
void Application::domainConnectionRefused(const QString& reasonMessage, int reasonCodeInt, const QString& extraInfo) {
DomainHandler::ConnectionRefusedReason reasonCode = static_cast<DomainHandler::ConnectionRefusedReason>(reasonCodeInt);
if (reasonCode == DomainHandler::ConnectionRefusedReason::TooManyUsers && !extraInfo.isEmpty()) {
DependencyManager::get<AddressManager>()->handleLookupString(extraInfo);
return;
}
switch (reasonCode) {
case DomainHandler::ConnectionRefusedReason::ProtocolMismatch:
case DomainHandler::ConnectionRefusedReason::TooManyUsers:
case DomainHandler::ConnectionRefusedReason::Unknown: {
@ -2005,7 +2023,7 @@ void Application::resizeGL() {
static qreal lastDevicePixelRatio = 0;
qreal devicePixelRatio = _window->devicePixelRatio();
if (offscreenUi->size() != fromGlm(uiSize) || devicePixelRatio != lastDevicePixelRatio) {
qDebug() << "Device pixel ratio changed, triggering resize to " << uiSize;
qCDebug(interfaceapp) << "Device pixel ratio changed, triggering resize to " << uiSize;
offscreenUi->resize(fromGlm(uiSize), true);
_offscreenContext->makeCurrent();
lastDevicePixelRatio = devicePixelRatio;
@ -3260,17 +3278,17 @@ void Application::init() {
Setting::Handle<bool> firstRun { Settings::firstRun, true };
if (addressLookupString.isEmpty() && firstRun.get()) {
qDebug() << "First run and no URL passed... attempting to go to Home or Entry...";
qCDebug(interfaceapp) << "First run and no URL passed... attempting to go to Home or Entry...";
DependencyManager::get<AddressManager>()->ifLocalSandboxRunningElse([](){
qDebug() << "Home sandbox appears to be running, going to Home.";
qCDebug(interfaceapp) << "Home sandbox appears to be running, going to Home.";
DependencyManager::get<AddressManager>()->goToLocalSandbox();
},
[](){
qDebug() << "Home sandbox does not appear to be running, going to Entry.";
qCDebug(interfaceapp) << "Home sandbox does not appear to be running, going to Entry.";
DependencyManager::get<AddressManager>()->goToEntry();
});
} else {
qDebug() << "Not first run... going to" << qPrintable(addressLookupString.isEmpty() ? QString("previous location") : addressLookupString);
qCDebug(interfaceapp) << "Not first run... going to" << qPrintable(addressLookupString.isEmpty() ? QString("previous location") : addressLookupString);
DependencyManager::get<AddressManager>()->loadSettings(addressLookupString);
}
@ -5617,7 +5635,7 @@ void Application::setActiveDisplayPlugin(const QString& pluginName) {
void Application::handleLocalServerConnection() const {
auto server = qobject_cast<QLocalServer*>(sender());
qDebug() << "Got connection on local server from additional instance - waiting for parameters";
qCDebug(interfaceapp) << "Got connection on local server from additional instance - waiting for parameters";
auto socket = server->nextPendingConnection();
@ -5633,7 +5651,7 @@ void Application::readArgumentsFromLocalSocket() const {
auto message = socket->readAll();
socket->deleteLater();
qDebug() << "Read from connection: " << message;
qCDebug(interfaceapp) << "Read from connection: " << message;
// If we received a message, try to open it as a URL
if (message.length() > 0) {
@ -5735,8 +5753,8 @@ void Application::updateThreadPoolCount() const {
auto reservedThreads = UI_RESERVED_THREADS + OS_RESERVED_THREADS + _displayPlugin->getRequiredThreadCount();
auto availableThreads = QThread::idealThreadCount() - reservedThreads;
auto threadPoolSize = std::max(MIN_PROCESSING_THREAD_POOL_SIZE, availableThreads);
qDebug() << "Ideal Thread Count " << QThread::idealThreadCount();
qDebug() << "Reserved threads " << reservedThreads;
qDebug() << "Setting thread pool size to " << threadPoolSize;
qCDebug(interfaceapp) << "Ideal Thread Count " << QThread::idealThreadCount();
qCDebug(interfaceapp) << "Reserved threads " << reservedThreads;
qCDebug(interfaceapp) << "Setting thread pool size to " << threadPoolSize;
QThreadPool::globalInstance()->setMaxThreadCount(threadPoolSize);
}

View file

@ -44,12 +44,12 @@
#include <AbstractUriHandler.h>
#include <shared/RateCounter.h>
#include <ThreadSafeValueCache.h>
#include <shared/FileLogger.h>
#include "avatar/MyAvatar.h"
#include "Bookmarks.h"
#include "Camera.h"
#include "ConnectionMonitor.h"
#include "FileLogger.h"
#include "gpu/Context.h"
#include "Menu.h"
#include "octree/OctreePacketProcessor.h"
@ -375,7 +375,7 @@ private slots:
void nodeKilled(SharedNodePointer node);
static void packetSent(quint64 length);
void updateDisplayMode();
void domainConnectionRefused(const QString& reasonMessage, int reason);
void domainConnectionRefused(const QString& reasonMessage, int reason, const QString& extraInfo);
private:
static void initDisplay();

View file

@ -13,3 +13,4 @@
Q_LOGGING_CATEGORY(interfaceapp, "hifi.interface")
Q_LOGGING_CATEGORY(interfaceapp_timing, "hifi.interface.timing")
Q_LOGGING_CATEGORY(interfaceapp_deadlock, "hifi.interface.deadlock")

View file

@ -16,5 +16,6 @@
Q_DECLARE_LOGGING_CATEGORY(interfaceapp)
Q_DECLARE_LOGGING_CATEGORY(interfaceapp_timing)
Q_DECLARE_LOGGING_CATEGORY(interfaceapp_deadlock)
#endif // hifi_InterfaceLogging_h

View file

@ -303,12 +303,6 @@ Menu::Menu() {
DependencyManager::get<OffscreenUi>()->toggle(QString("hifi/dialogs/AvatarPreferencesDialog.qml"), "AvatarPreferencesDialog");
});
// Settings > Audio...
action = addActionToQMenuAndActionHash(settingsMenu, "Audio...");
connect(action, &QAction::triggered, [] {
DependencyManager::get<OffscreenUi>()->toggle(QString("hifi/dialogs/AudioPreferencesDialog.qml"), "AudioPreferencesDialog");
});
// Settings > LOD...
action = addActionToQMenuAndActionHash(settingsMenu, "LOD...");
connect(action, &QAction::triggered, [] {
@ -584,6 +578,12 @@ Menu::Menu() {
// Developer > Audio >>>
MenuWrapper* audioDebugMenu = developerMenu->addMenu("Audio");
action = addActionToQMenuAndActionHash(audioDebugMenu, "Buffers...");
connect(action, &QAction::triggered, [] {
DependencyManager::get<OffscreenUi>()->toggle(QString("hifi/dialogs/AudioPreferencesDialog.qml"), "AudioPreferencesDialog");
});
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioNoiseReduction, 0, true,
audioIO.data(), SLOT(toggleAudioNoiseReduction()));
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::EchoServerAudio, 0, false,

View file

@ -244,14 +244,19 @@ void AvatarActionHold::doKinematicUpdate(float deltaTimeStep) {
// 3 -- ignore i of 0 1 2
// 4 -- ignore i of 1 2 3
// 5 -- ignore i of 2 3 4
if ((i + 1) % AvatarActionHold::velocitySmoothFrames == _measuredLinearVelocitiesIndex ||
(i + 2) % AvatarActionHold::velocitySmoothFrames == _measuredLinearVelocitiesIndex ||
(i + 3) % AvatarActionHold::velocitySmoothFrames == _measuredLinearVelocitiesIndex) {
continue;
}
// This code is now disabled, but I'm leaving it commented-out because I suspect it will come back.
// if ((i + 1) % AvatarActionHold::velocitySmoothFrames == _measuredLinearVelocitiesIndex ||
// (i + 2) % AvatarActionHold::velocitySmoothFrames == _measuredLinearVelocitiesIndex ||
// (i + 3) % AvatarActionHold::velocitySmoothFrames == _measuredLinearVelocitiesIndex) {
// continue;
// }
measuredLinearVelocity += _measuredLinearVelocities[i];
}
measuredLinearVelocity /= (float)(AvatarActionHold::velocitySmoothFrames - 3); // 3 because of the 3 we skipped, above
measuredLinearVelocity /= (float)(AvatarActionHold::velocitySmoothFrames
// - 3 // 3 because of the 3 we skipped, above
);
if (_kinematicSetVelocity) {
rigidBody->setLinearVelocity(glmToBullet(measuredLinearVelocity));

View file

@ -58,7 +58,7 @@ public slots:
signals:
void domainChanged(const QString& domainHostname);
void svoImportRequested(const QString& url);
void domainConnectionRefused(const QString& reasonMessage, int reasonCode);
void domainConnectionRefused(const QString& reasonMessage, int reasonCode, const QString& extraInfo);
void snapshotTaken(const QString& path, bool notify);
void snapshotShared(const QString& error);

View file

@ -58,21 +58,19 @@ void AudioStatsDisplay::updatedDisplay(QString str) {
AudioStatsDialog::AudioStatsDialog(QWidget* parent) :
QDialog(parent, Qt::Window | Qt::WindowCloseButtonHint | Qt::WindowStaysOnTopHint) {
_shouldShowInjectedStreams = false;
setWindowTitle("Audio Network Statistics");
// Get statistics from the Audio Client
_stats = &DependencyManager::get<AudioClient>()->getStats();
// Create layout
_form = new QFormLayout();
_form->setSizeConstraint(QLayout::SetFixedSize);
QDialog::setLayout(_form);
// Load and initialize all channels
renderStats();
// Initialize channels' content (needed to correctly size channels)
updateStats();
// Create channels
_audioDisplayChannels = QVector<QVector<AudioStatsDisplay*>>(1);
_audioMixerID = addChannel(_form, _audioMixerStats, COLOR0);
@ -80,9 +78,16 @@ AudioStatsDialog::AudioStatsDialog(QWidget* parent) :
_upstreamMixerID = addChannel(_form, _upstreamMixerStats, COLOR2);
_downstreamID = addChannel(_form, _downstreamStats, COLOR3);
_upstreamInjectedID = addChannel(_form, _upstreamInjectedStats, COLOR0);
connect(averageUpdateTimer, SIGNAL(timeout()), this, SLOT(updateTimerTimeout()));
averageUpdateTimer->start(1000);
// Initialize channels
updateChannels();
// Future renders
connect(averageUpdateTimer, SIGNAL(timeout()), this, SLOT(renderStats()));
averageUpdateTimer->start(200);
// Initial render
QDialog::setLayout(_form);
}
int AudioStatsDialog::addChannel(QFormLayout* form, QVector<QString>& stats, const unsigned color) {
@ -99,148 +104,152 @@ int AudioStatsDialog::addChannel(QFormLayout* form, QVector<QString>& stats, con
return channelID;
}
void AudioStatsDialog::updateStats(QVector<QString>& stats, int channelID) {
void AudioStatsDialog::renderStats() {
updateStats();
updateChannels();
}
void AudioStatsDialog::updateChannels() {
updateChannel(_audioMixerStats, _audioMixerID);
updateChannel(_upstreamClientStats, _upstreamClientID);
updateChannel(_upstreamMixerStats, _upstreamMixerID);
updateChannel(_downstreamStats, _downstreamID);
updateChannel(_upstreamInjectedStats, _upstreamInjectedID);
}
void AudioStatsDialog::updateChannel(QVector<QString>& stats, int channelID) {
// Update all stat displays at specified channel
for (int i = 0; i < stats.size(); i++)
_audioDisplayChannels[channelID].at(i)->updatedDisplay(stats.at(i));
}
void AudioStatsDialog::renderStats() {
void AudioStatsDialog::updateStats() {
// Clear current stats from all vectors
clearAllChannels();
double audioInputBufferLatency = 0.0,
inputRingBufferLatency = 0.0,
networkRoundtripLatency = 0.0,
mixerRingBufferLatency = 0.0,
outputRingBufferLatency = 0.0,
audioOutputBufferLatency = 0.0;
double audioInputBufferLatency{ 0.0 };
double inputRingBufferLatency{ 0.0 };
double networkRoundtripLatency{ 0.0 };
double mixerRingBufferLatency{ 0.0 };
double outputRingBufferLatency{ 0.0 };
double audioOutputBufferLatency{ 0.0 };
AudioStreamStats downstreamAudioStreamStats = _stats->getMixerDownstreamStats();
SharedNodePointer audioMixerNodePointer = DependencyManager::get<NodeList>()->soloNodeOfType(NodeType::AudioMixer);
if (!audioMixerNodePointer.isNull()) {
audioInputBufferLatency = (double)_stats->getAudioInputMsecsReadStats().getWindowAverage();
inputRingBufferLatency = (double)_stats->getInputRungBufferMsecsAvailableStats().getWindowAverage();
networkRoundtripLatency = (double) audioMixerNodePointer->getPingMs();
mixerRingBufferLatency = (double)_stats->getMixerAvatarStreamStats()._framesAvailableAverage *
(double)AudioConstants::NETWORK_FRAME_MSECS;
outputRingBufferLatency = (double)downstreamAudioStreamStats._framesAvailableAverage *
(double)AudioConstants::NETWORK_FRAME_MSECS;
audioOutputBufferLatency = (double)_stats->getAudioOutputMsecsUnplayedStats().getWindowAverage();
if (SharedNodePointer audioMixerNodePointer = DependencyManager::get<NodeList>()->soloNodeOfType(NodeType::AudioMixer)) {
audioInputBufferLatency = (double)_stats->getInputMsRead().getWindowMax();
inputRingBufferLatency = (double)_stats->getInputMsUnplayed().getWindowMax();
networkRoundtripLatency = (double)audioMixerNodePointer->getPingMs();
mixerRingBufferLatency = (double)_stats->getMixerAvatarStreamStats()._unplayedMs;
outputRingBufferLatency = (double)_stats->getMixerDownstreamStats()._unplayedMs;
audioOutputBufferLatency = (double)_stats->getOutputMsUnplayed().getWindowMax();
}
double totalLatency = audioInputBufferLatency + inputRingBufferLatency + networkRoundtripLatency + mixerRingBufferLatency
+ outputRingBufferLatency + audioOutputBufferLatency;
QString stats = "Audio input buffer: %1ms - avg msecs of samples read to the audio input buffer in last 10s";
_audioMixerStats.push_back(stats.arg(QString::number(audioInputBufferLatency, 'f', 2)));
double totalLatency = audioInputBufferLatency + inputRingBufferLatency + mixerRingBufferLatency
+ outputRingBufferLatency + audioOutputBufferLatency + networkRoundtripLatency;
stats = "Input ring buffer: %1ms - avg msecs of samples read to the input ring buffer in last 10s";
_audioMixerStats.push_back(stats.arg(QString::number(inputRingBufferLatency, 'f', 2)));
stats = "Network to mixer: %1ms - half of last ping value calculated by the node list";
_audioMixerStats.push_back(stats.arg(QString::number((networkRoundtripLatency / 2.0), 'f', 2)));
stats = "Network to client: %1ms - half of last ping value calculated by the node list";
_audioMixerStats.push_back(stats.arg(QString::number((mixerRingBufferLatency / 2.0),'f', 2)));
stats = "Output ring buffer: %1ms - avg msecs of samples in output ring buffer in last 10s";
_audioMixerStats.push_back(stats.arg(QString::number(outputRingBufferLatency,'f', 2)));
stats = "Audio output buffer: %1ms - avg msecs of samples in audio output buffer in last 10s";
_audioMixerStats.push_back(stats.arg(QString::number(mixerRingBufferLatency,'f', 2)));
stats = "TOTAL: %1ms - avg msecs of samples in audio output buffer in last 10s";
_audioMixerStats.push_back(stats.arg(QString::number(totalLatency, 'f', 2)));
QString stats;
_audioMixerStats.push_back("PIPELINE (averaged over the past 10s)");
stats = "Input Read:\t%1 ms";
_audioMixerStats.push_back(stats.arg(QString::number(audioInputBufferLatency, 'f', 0)));
stats = "Input Ring:\t%1 ms";
_audioMixerStats.push_back(stats.arg(QString::number(inputRingBufferLatency, 'f', 0)));
stats = "Network (client->mixer):\t%1 ms";
_audioMixerStats.push_back(stats.arg(QString::number(networkRoundtripLatency / 2, 'f', 0)));
stats = "Mixer Ring:\t%1 ms";
_audioMixerStats.push_back(stats.arg(QString::number(mixerRingBufferLatency, 'f', 0)));
stats = "Network (mixer->client):\t%1 ms";
_audioMixerStats.push_back(stats.arg(QString::number(networkRoundtripLatency / 2, 'f', 0)));
stats = "Output Ring:\t%1 ms";
_audioMixerStats.push_back(stats.arg(QString::number(outputRingBufferLatency, 'f', 0)));
stats = "Output Read:\t%1 ms";
_audioMixerStats.push_back(stats.arg(QString::number(audioOutputBufferLatency, 'f', 0)));
stats = "TOTAL:\t%1 ms";
_audioMixerStats.push_back(stats.arg(QString::number(totalLatency, 'f', 0)));
const MovingMinMaxAvg<quint64>& packetSentTimeGaps = _stats->getPacketSentTimeGaps();
const MovingMinMaxAvg<quint64>& packetSentTimeGaps = _stats->getPacketTimegaps();
_upstreamClientStats.push_back("\nUpstream Mic Audio Packets Sent Gaps (by client):");
stats = "Inter-packet timegaps (overall) | min: %1, max: %2, avg: %3";
stats = "Inter-packet timegaps";
_upstreamClientStats.push_back(stats);
stats = "overall min:\t%1, max:\t%2, avg:\t%3";
stats = stats.arg(formatUsecTime(packetSentTimeGaps.getMin()),
formatUsecTime(packetSentTimeGaps.getMax()),
formatUsecTime(packetSentTimeGaps.getAverage()));
_upstreamClientStats.push_back(stats);
stats = "Inter-packet timegaps (last 30s) | min: %1, max: %2, avg: %3";
stats = "last window min:\t%1, max:\t%2, avg:\t%3";
stats = stats.arg(formatUsecTime(packetSentTimeGaps.getWindowMin()),
formatUsecTime(packetSentTimeGaps.getWindowMax()),
formatUsecTime(packetSentTimeGaps.getWindowAverage()));
_upstreamClientStats.push_back(stats);
_upstreamMixerStats.push_back("\nUpstream mic audio stats (received and reported by audio-mixer):");
_upstreamMixerStats.push_back("\nMIXER STREAM");
_upstreamMixerStats.push_back("(this client's remote mixer stream performance)");
renderAudioStreamStats(&_stats->getMixerAvatarStreamStats(), &_upstreamMixerStats, true);
renderAudioStreamStats(&_stats->getMixerAvatarStreamStats(), &_upstreamMixerStats);
_downstreamStats.push_back("\nDownstream mixed audio stats:");
_downstreamStats.push_back("\nCLIENT STREAM");
AudioStreamStats downstreamStats = _stats->getMixerDownstreamStats();
renderAudioStreamStats(&downstreamStats, &_downstreamStats, true);
renderAudioStreamStats(&downstreamStats, &_downstreamStats);
if (_shouldShowInjectedStreams) {
foreach(const AudioStreamStats& injectedStreamAudioStats, _stats->getMixerInjectedStreamStatsMap()) {
stats = "\nUpstream injected audio stats: stream ID: %1";
stats = "\nINJECTED STREAM (ID: %1)";
stats = stats.arg(injectedStreamAudioStats._streamIdentifier.toString());
_upstreamInjectedStats.push_back(stats);
renderAudioStreamStats(&injectedStreamAudioStats, &_upstreamInjectedStats, true);
renderAudioStreamStats(&injectedStreamAudioStats, &_upstreamInjectedStats);
}
}
}
void AudioStatsDialog::renderAudioStreamStats(const AudioStreamStats* streamStats, QVector<QString>* audioStreamStats, bool isDownstreamStats) {
void AudioStatsDialog::renderAudioStreamStats(const AudioStreamStats* streamStats, QVector<QString>* audioStreamStats) {
QString stats = "Packet loss | overall: %1% (%2 lost), last_30s: %3% (%4 lost)";
QString stats = "Packet Loss";
audioStreamStats->push_back(stats);
stats = "overall:\t%1%\t(%2 lost), window:\t%3%\t(%4 lost)";
stats = stats.arg(QString::number((int)(streamStats->_packetStreamStats.getLostRate() * 100.0f)),
QString::number((int)(streamStats->_packetStreamStats._lost)),
QString::number((int)(streamStats->_packetStreamWindowStats.getLostRate() * 100.0f)),
QString::number((int)(streamStats->_packetStreamWindowStats._lost)));
QString::number((int)(streamStats->_packetStreamStats._lost)),
QString::number((int)(streamStats->_packetStreamWindowStats.getLostRate() * 100.0f)),
QString::number((int)(streamStats->_packetStreamWindowStats._lost)));
audioStreamStats->push_back(stats);
if (isDownstreamStats) {
stats = "Ringbuffer frames | desired: %1, avg_available(10s): %2 + %3, available: %4 + %5";
stats = stats.arg(QString::number(streamStats->_desiredJitterBufferFrames),
QString::number(streamStats->_framesAvailableAverage),
QString::number((int)((float)_stats->getAudioInputMsecsReadStats().getWindowAverage() /
AudioConstants::NETWORK_FRAME_MSECS)),
QString::number(streamStats->_framesAvailable),
QString::number((int)(_stats->getAudioOutputMsecsUnplayedStats().getCurrentIntervalLastSample() /
AudioConstants::NETWORK_FRAME_MSECS)));
audioStreamStats->push_back(stats);
} else {
stats = "Ringbuffer frames | desired: %1, avg_available(10s): %2, available: %3";
stats = stats.arg(QString::number(streamStats->_desiredJitterBufferFrames),
QString::number(streamStats->_framesAvailableAverage),
QString::number(streamStats->_framesAvailable));
audioStreamStats->push_back(stats);
}
stats = "Ringbuffer stats | starves: %1, prev_starve_lasted: %2, frames_dropped: %3, overflows: %4";
stats = "Ringbuffer";
audioStreamStats->push_back(stats);
stats = "available frames (avg):\t%1\t(%2), desired:\t%3";
stats = stats.arg(QString::number(streamStats->_framesAvailable),
QString::number(streamStats->_framesAvailableAverage),
QString::number(streamStats->_desiredJitterBufferFrames));
audioStreamStats->push_back(stats);
stats = "starves:\t%1, last starve duration:\t%2, drops:\t%3, overflows:\t%4";
stats = stats.arg(QString::number(streamStats->_starveCount),
QString::number(streamStats->_consecutiveNotMixedCount),
QString::number(streamStats->_framesDropped),
QString::number(streamStats->_overflowCount));
QString::number(streamStats->_consecutiveNotMixedCount),
QString::number(streamStats->_framesDropped),
QString::number(streamStats->_overflowCount));
audioStreamStats->push_back(stats);
stats = "Inter-packet timegaps";
audioStreamStats->push_back(stats);
stats = "Inter-packet timegaps (overall) | min: %1, max: %2, avg: %3";
stats = "overall min:\t%1, max:\t%2, avg:\t%3";
stats = stats.arg(formatUsecTime(streamStats->_timeGapMin),
formatUsecTime(streamStats->_timeGapMax),
formatUsecTime(streamStats->_timeGapAverage));
formatUsecTime(streamStats->_timeGapMax),
formatUsecTime(streamStats->_timeGapAverage));
audioStreamStats->push_back(stats);
stats = "Inter-packet timegaps (last 30s) | min: %1, max: %2, avg: %3";
stats = "last window min:\t%1, max:\t%2, avg:\t%3";
stats = stats.arg(formatUsecTime(streamStats->_timeGapWindowMin),
formatUsecTime(streamStats->_timeGapWindowMax),
formatUsecTime(streamStats->_timeGapWindowAverage));
formatUsecTime(streamStats->_timeGapWindowMax),
formatUsecTime(streamStats->_timeGapWindowAverage));
audioStreamStats->push_back(stats);
}
void AudioStatsDialog::clearAllChannels() {
@ -251,21 +260,6 @@ void AudioStatsDialog::clearAllChannels() {
_upstreamInjectedStats.clear();
}
void AudioStatsDialog::updateTimerTimeout() {
renderStats();
// Update all audio stats
updateStats(_audioMixerStats, _audioMixerID);
updateStats(_upstreamClientStats, _upstreamClientID);
updateStats(_upstreamMixerStats, _upstreamMixerID);
updateStats(_downstreamStats, _downstreamID);
updateStats(_upstreamInjectedStats, _upstreamInjectedID);
}
void AudioStatsDialog::paintEvent(QPaintEvent* event) {
// Repaint each stat in each channel

View file

@ -70,18 +70,18 @@ private:
QVector<QVector<AudioStatsDisplay*>> _audioDisplayChannels;
void updateStats();
int addChannel(QFormLayout* form, QVector<QString>& stats, const unsigned color);
void updateStats(QVector<QString>& stats, const int channelID);
void renderStats();
void updateChannel(QVector<QString>& stats, const int channelID);
void updateChannels();
void clearAllChannels();
void renderAudioStreamStats(const AudioStreamStats* streamStats, QVector<QString>* audioStreamstats, bool isDownstreamStats);
void renderAudioStreamStats(const AudioStreamStats* streamStats, QVector<QString>* audioStreamstats);
const AudioIOStats* _stats;
QFormLayout* _form;
bool _isEnabled;
bool _shouldShowInjectedStreams;
bool _shouldShowInjectedStreams{ false };
signals:
@ -93,7 +93,7 @@ signals:
void reject() override;
void updateTimerTimeout();
void renderStats();
protected:

View file

@ -58,7 +58,10 @@ void CachesSizeDialog::confirmClicked(bool checked) {
DependencyManager::get<AnimationCache>()->setUnusedResourceCacheSize(_animations->value() * BYTES_PER_MEGABYTES);
DependencyManager::get<ModelCache>()->setUnusedResourceCacheSize(_geometries->value() * BYTES_PER_MEGABYTES);
DependencyManager::get<SoundCache>()->setUnusedResourceCacheSize(_sounds->value() * BYTES_PER_MEGABYTES);
// Disabling the texture cache because it's a liability in cases where we're overcommiting GPU memory
#if 0
DependencyManager::get<TextureCache>()->setUnusedResourceCacheSize(_textures->value() * BYTES_PER_MEGABYTES);
#endif
QDialog::close();
}
@ -78,4 +81,4 @@ void CachesSizeDialog::reject() {
void CachesSizeDialog::closeEvent(QCloseEvent* event) {
QDialog::closeEvent(event);
emit closed();
}
}

View file

@ -20,7 +20,7 @@
#include <QCheckBox>
#include <QSyntaxHighlighter>
#include "AbstractLoggerInterface.h"
#include <shared/AbstractLoggerInterface.h>
class KeywordHighlighter : public QSyntaxHighlighter {
Q_OBJECT

View file

@ -188,102 +188,48 @@ void setupPreferences() {
static const QString AUDIO("Audio");
{
auto getter = []()->bool { return DependencyManager::get<AudioClient>()->getReceivedAudioStream().getDynamicJitterBuffers(); };
auto setter = [](bool value) { DependencyManager::get<AudioClient>()->getReceivedAudioStream().setDynamicJitterBuffers(value); };
preferences->addPreference(new CheckPreference(AUDIO, "Enable dynamic jitter buffers", getter, setter));
auto getter = []()->bool { return !DependencyManager::get<AudioClient>()->getReceivedAudioStream().dynamicJitterBufferEnabled(); };
auto setter = [](bool value) { DependencyManager::get<AudioClient>()->getReceivedAudioStream().setDynamicJitterBufferEnabled(!value); };
auto preference = new CheckPreference(AUDIO, "Disable dynamic jitter buffer", getter, setter);
preferences->addPreference(preference);
}
{
auto getter = []()->float { return DependencyManager::get<AudioClient>()->getReceivedAudioStream().getDesiredJitterBufferFrames(); };
auto setter = [](float value) { DependencyManager::get<AudioClient>()->getReceivedAudioStream().setStaticDesiredJitterBufferFrames(value); };
auto getter = []()->float { return DependencyManager::get<AudioClient>()->getReceivedAudioStream().getStaticJitterBufferFrames(); };
auto setter = [](float value) { DependencyManager::get<AudioClient>()->getReceivedAudioStream().setStaticJitterBufferFrames(value); };
auto preference = new SpinnerPreference(AUDIO, "Static jitter buffer frames", getter, setter);
preference->setMin(0);
preference->setMax(10000);
preference->setMax(2000);
preference->setStep(1);
preferences->addPreference(preference);
}
{
auto getter = []()->float { return DependencyManager::get<AudioClient>()->getReceivedAudioStream().getMaxFramesOverDesired(); };
auto setter = [](float value) { DependencyManager::get<AudioClient>()->getReceivedAudioStream().setMaxFramesOverDesired(value); };
auto preference = new SpinnerPreference(AUDIO, "Max frames over desired", getter, setter);
preference->setMax(10000);
preference->setStep(1);
auto getter = []()->bool { return !DependencyManager::get<AudioClient>()->getOutputStarveDetectionEnabled(); };
auto setter = [](bool value) { DependencyManager::get<AudioClient>()->setOutputStarveDetectionEnabled(!value); };
auto preference = new CheckPreference(AUDIO, "Disable output starve detection", getter, setter);
preferences->addPreference(preference);
}
{
auto getter = []()->bool { return DependencyManager::get<AudioClient>()->getReceivedAudioStream().getUseStDevForJitterCalc(); };
auto setter = [](bool value) { DependencyManager::get<AudioClient>()->getReceivedAudioStream().setUseStDevForJitterCalc(value); };
preferences->addPreference(new CheckPreference(AUDIO, "Use standard deviation for dynamic jitter calc", getter, setter));
}
{
auto getter = []()->float { return DependencyManager::get<AudioClient>()->getReceivedAudioStream().getWindowStarveThreshold(); };
auto setter = [](float value) { DependencyManager::get<AudioClient>()->getReceivedAudioStream().setWindowStarveThreshold(value); };
auto preference = new SpinnerPreference(AUDIO, "Window A starve threshold", getter, setter);
preference->setMax(10000);
preference->setStep(1);
preferences->addPreference(preference);
}
{
auto getter = []()->float { return DependencyManager::get<AudioClient>()->getReceivedAudioStream().getWindowSecondsForDesiredCalcOnTooManyStarves(); };
auto setter = [](float value) { DependencyManager::get<AudioClient>()->getReceivedAudioStream().setWindowSecondsForDesiredCalcOnTooManyStarves(value); };
auto preference = new SpinnerPreference(AUDIO, "Window A (raise desired on N starves) seconds", getter, setter);
preference->setMax(10000);
preference->setStep(1);
preferences->addPreference(preference);
}
{
auto getter = []()->float { return DependencyManager::get<AudioClient>()->getReceivedAudioStream().getWindowSecondsForDesiredReduction(); };
auto setter = [](float value) { DependencyManager::get<AudioClient>()->getReceivedAudioStream().setWindowSecondsForDesiredReduction(value); };
auto preference = new SpinnerPreference(AUDIO, "Window B (desired ceiling) seconds", getter, setter);
preference->setMax(10000);
preference->setStep(1);
preferences->addPreference(preference);
}
{
auto getter = []()->bool { return DependencyManager::get<AudioClient>()->getReceivedAudioStream().getRepetitionWithFade(); };
auto setter = [](bool value) { DependencyManager::get<AudioClient>()->getReceivedAudioStream().setRepetitionWithFade(value); };
preferences->addPreference(new CheckPreference(AUDIO, "Repetition with fade", getter, setter));
}
{
auto getter = []()->float { return DependencyManager::get<AudioClient>()->getOutputBufferSize(); };
auto setter = [](float value) { DependencyManager::get<AudioClient>()->setOutputBufferSize(value); };
auto preference = new SpinnerPreference(AUDIO, "Output buffer initial size (frames)", getter, setter);
preference->setMin(1);
preference->setMax(20);
preference->setStep(1);
preferences->addPreference(preference);
}
{
auto getter = []()->bool { return DependencyManager::get<AudioClient>()->getOutputStarveDetectionEnabled(); };
auto setter = [](bool value) { DependencyManager::get<AudioClient>()->setOutputStarveDetectionEnabled(value); };
auto preference = new CheckPreference(AUDIO, "Output starve detection (automatic buffer size increase)", getter, setter);
preferences->addPreference(preference);
}
{
auto getter = []()->float { return DependencyManager::get<AudioClient>()->getOutputStarveDetectionThreshold(); };
auto setter = [](float value) { DependencyManager::get<AudioClient>()->setOutputStarveDetectionThreshold(value); };
auto preference = new SpinnerPreference(AUDIO, "Output starve detection threshold", getter, setter);
preference->setMin(1);
preference->setMax(500);
preference->setStep(1);
preferences->addPreference(preference);
}
{
auto getter = []()->float { return DependencyManager::get<AudioClient>()->getOutputStarveDetectionPeriod(); };
auto setter = [](float value) { DependencyManager::get<AudioClient>()->setOutputStarveDetectionPeriod(value); };
auto preference = new SpinnerPreference(AUDIO, "Output starve detection period (ms)", getter, setter);
preference->setMin(1);
preference->setMax((float)999999999);
auto preference = new SpinnerPreference(AUDIO, "Output buffer initial frames", getter, setter);
preference->setMin(AudioClient::MIN_BUFFER_FRAMES);
preference->setMax(AudioClient::MAX_BUFFER_FRAMES);
preference->setStep(1);
preferences->addPreference(preference);
}
#if DEV_BUILD || PR_BUILD
{
auto getter = []()->bool { return DependencyManager::get<AudioClient>()->isSimulatingJitter(); };
auto setter = [](bool value) { return DependencyManager::get<AudioClient>()->setIsSimulatingJitter(value); };
auto preference = new CheckPreference(AUDIO, "Packet jitter simulator", getter, setter);
preferences->addPreference(preference);
}
{
auto getter = []()->float { return DependencyManager::get<AudioClient>()->getGateThreshold(); };
auto setter = [](float value) { return DependencyManager::get<AudioClient>()->setGateThreshold(value); };
auto preference = new SpinnerPreference(AUDIO, "Debug gate threshold", getter, setter);
auto preference = new SpinnerPreference(AUDIO, "Packet throttle threshold", getter, setter);
preference->setMin(1);
preference->setMax((float)100);
preference->setMax(200);
preference->setStep(1);
preferences->addPreference(preference);
}

View file

@ -25,7 +25,7 @@
#include <AddressManager.h>
#include <avatar/AvatarManager.h>
#include <avatar/MyAvatar.h>
#include <FileUtils.h>
#include <shared/FileUtils.h>
#include <NodeList.h>
#include <OffscreenUi.h>
#include <SharedUtil.h>

View file

@ -47,27 +47,28 @@
#include "PositionalAudioStream.h"
#include "AudioClientLogging.h"
#include "AudioLogging.h"
#include "AudioClient.h"
const int AudioClient::MIN_BUFFER_FRAMES = 1;
const int AudioClient::MAX_BUFFER_FRAMES = 20;
static const int RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES = 100;
static const auto DEFAULT_POSITION_GETTER = []{ return Vectors::ZERO; };
static const auto DEFAULT_ORIENTATION_GETTER = [] { return Quaternions::IDENTITY; };
static const int DEFAULT_AUDIO_OUTPUT_GATE_THRESHOLD = 1;
static const int DEFAULT_BUFFER_FRAMES = 1;
Setting::Handle<bool> dynamicJitterBuffers("dynamicJitterBuffers", DEFAULT_DYNAMIC_JITTER_BUFFERS);
Setting::Handle<int> maxFramesOverDesired("maxFramesOverDesired", DEFAULT_MAX_FRAMES_OVER_DESIRED);
Setting::Handle<int> staticDesiredJitterBufferFrames("staticDesiredJitterBufferFrames",
DEFAULT_STATIC_DESIRED_JITTER_BUFFER_FRAMES);
Setting::Handle<bool> useStDevForJitterCalc("useStDevForJitterCalc", DEFAULT_USE_STDEV_FOR_JITTER_CALC);
Setting::Handle<int> windowStarveThreshold("windowStarveThreshold", DEFAULT_WINDOW_STARVE_THRESHOLD);
Setting::Handle<int> windowSecondsForDesiredCalcOnTooManyStarves("windowSecondsForDesiredCalcOnTooManyStarves",
DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES);
Setting::Handle<int> windowSecondsForDesiredReduction("windowSecondsForDesiredReduction",
DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION);
Setting::Handle<bool> repetitionWithFade("repetitionWithFade", DEFAULT_REPETITION_WITH_FADE);
static const bool DEFAULT_STARVE_DETECTION_ENABLED = true;
static const int STARVE_DETECTION_THRESHOLD = 3;
static const int STARVE_DETECTION_PERIOD = 10 * 1000; // 10 Seconds
Setting::Handle<bool> dynamicJitterBufferEnabled("dynamicJitterBuffersEnabled",
InboundAudioStream::DEFAULT_DYNAMIC_JITTER_BUFFER_ENABLED);
Setting::Handle<int> staticJitterBufferFrames("staticJitterBufferFrames",
InboundAudioStream::DEFAULT_STATIC_JITTER_FRAMES);
// protect the Qt internal device list
using Mutex = std::mutex;
@ -101,8 +102,7 @@ private:
AudioClient::AudioClient() :
AbstractAudioInterface(),
_gateThreshold("audioOutputGateThreshold", DEFAULT_AUDIO_OUTPUT_GATE_THRESHOLD),
_gate(this, _gateThreshold.get()),
_gate(this),
_audioInput(NULL),
_desiredInputFormat(),
_inputFormat(),
@ -115,19 +115,13 @@ AudioClient::AudioClient() :
_loopbackAudioOutput(NULL),
_loopbackOutputDevice(NULL),
_inputRingBuffer(0),
_receivedAudioStream(0, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES, InboundAudioStream::Settings()),
_receivedAudioStream(0, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES),
_isStereoInput(false),
_outputStarveDetectionStartTimeMsec(0),
_outputStarveDetectionCount(0),
_outputBufferSizeFrames("audioOutputBufferSizeFrames", DEFAULT_AUDIO_OUTPUT_BUFFER_SIZE_FRAMES),
_outputBufferSizeFrames("audioOutputBufferFrames", DEFAULT_BUFFER_FRAMES),
_sessionOutputBufferSizeFrames(_outputBufferSizeFrames.get()),
_outputStarveDetectionEnabled("audioOutputBufferStarveDetectionEnabled",
DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_ENABLED),
_outputStarveDetectionPeriodMsec("audioOutputStarveDetectionPeriod",
DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_PERIOD),
_outputStarveDetectionThreshold("audioOutputStarveDetectionThreshold",
DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_THRESHOLD),
_averagedLatency(0.0f),
_outputStarveDetectionEnabled("audioOutputStarveDetectionEnabled", DEFAULT_STARVE_DETECTION_ENABLED),
_lastInputLoudness(0.0f),
_timeSinceLastClip(-1.0f),
_muted(false),
@ -144,10 +138,16 @@ AudioClient::AudioClient() :
_stats(&_receivedAudioStream),
_inputGate(),
_positionGetter(DEFAULT_POSITION_GETTER),
_orientationGetter(DEFAULT_ORIENTATION_GETTER)
{
// clear the array of locally injected samples
memset(_localProceduralSamples, 0, AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL);
_orientationGetter(DEFAULT_ORIENTATION_GETTER) {
// deprecate legacy settings
{
Setting::Handle<int>::Deprecated("maxFramesOverDesired", InboundAudioStream::MAX_FRAMES_OVER_DESIRED);
Setting::Handle<int>::Deprecated("windowStarveThreshold", InboundAudioStream::WINDOW_STARVE_THRESHOLD);
Setting::Handle<int>::Deprecated("windowSecondsForDesiredCalcOnTooManyStarves", InboundAudioStream::WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES);
Setting::Handle<int>::Deprecated("windowSecondsForDesiredReduction", InboundAudioStream::WINDOW_SECONDS_FOR_DESIRED_REDUCTION);
Setting::Handle<bool>::Deprecated("useStDevForJitterCalc", InboundAudioStream::USE_STDEV_FOR_JITTER);
Setting::Handle<bool>::Deprecated("repetitionWithFade", InboundAudioStream::REPETITION_WITH_FADE);
}
connect(&_receivedAudioStream, &MixedProcessedAudioStream::processSamples,
this, &AudioClient::processReceivedSamples, Qt::DirectConnection);
@ -185,7 +185,7 @@ AudioClient::~AudioClient() {
}
void AudioClient::handleMismatchAudioFormat(SharedNodePointer node, const QString& currentCodec, const QString& recievedCodec) {
qDebug() << __FUNCTION__ << "sendingNode:" << *node << "currentCodec:" << currentCodec << "recievedCodec:" << recievedCodec;
qCDebug(audioclient) << __FUNCTION__ << "sendingNode:" << *node << "currentCodec:" << currentCodec << "recievedCodec:" << recievedCodec;
selectAudioFormat(recievedCodec);
}
@ -374,7 +374,8 @@ bool adjustedFormatForAudioDevice(const QAudioDeviceInfo& audioDevice,
adjustedAudioFormat = desiredAudioFormat;
#ifdef Q_OS_ANDROID
adjustedAudioFormat.setSampleRate(44100);
// FIXME: query the native sample rate of the device?
adjustedAudioFormat.setSampleRate(48000);
#else
//
@ -443,7 +444,7 @@ void possibleResampling(AudioSRC* resampler,
if (!sampleChannelConversion(sourceSamples, destinationSamples, numSourceSamples,
sourceAudioFormat, destinationAudioFormat)) {
// no conversion, we can copy the samples directly across
memcpy(destinationSamples, sourceSamples, numSourceSamples * sizeof(int16_t));
memcpy(destinationSamples, sourceSamples, numSourceSamples * AudioConstants::SAMPLE_SIZE);
}
} else {
@ -553,31 +554,53 @@ void AudioClient::handleAudioDataPacket(QSharedPointer<ReceivedMessage> message)
}
}
AudioClient::Gate::Gate(AudioClient* audioClient, int threshold) :
_audioClient(audioClient),
_threshold(threshold) {}
AudioClient::Gate::Gate(AudioClient* audioClient) :
_audioClient(audioClient) {}
void AudioClient::Gate::setIsSimulatingJitter(bool enable) {
std::lock_guard<std::mutex> lock(_mutex);
flush();
_isSimulatingJitter = enable;
}
void AudioClient::Gate::setThreshold(int threshold) {
std::lock_guard<std::mutex> lock(_mutex);
flush();
_threshold = std::max(threshold, 1);
}
void AudioClient::Gate::insert(QSharedPointer<ReceivedMessage> message) {
std::lock_guard<std::mutex> lock(_mutex);
// Short-circuit for normal behavior
if (_threshold == 1) {
if (_threshold == 1 && !_isSimulatingJitter) {
_audioClient->_receivedAudioStream.parseData(*message);
return;
}
// Throttle the current packet until the next flush
_queue.push(message);
_index++;
if (_index % _threshold == 0) {
// When appropriate, flush all held packets to the received audio stream
if (_isSimulatingJitter) {
// The JITTER_FLUSH_CHANCE defines the discrete probability density function of jitter (ms),
// where f(t) = pow(1 - JITTER_FLUSH_CHANCE, (t / 10) * JITTER_FLUSH_CHANCE
// for t (ms) = 10, 20, ... (because typical packet timegap is 10ms),
// because there is a JITTER_FLUSH_CHANCE of any packet instigating a flush of all held packets.
static const float JITTER_FLUSH_CHANCE = 0.6f;
// It is set at 0.6 to give a low chance of spikes (>30ms, 2.56%) so that they are obvious,
// but settled within the measured 5s window in audio network stats.
if (randFloat() < JITTER_FLUSH_CHANCE) {
flush();
}
} else if (!(_index % _threshold)) {
flush();
}
}
void AudioClient::Gate::flush() {
// Send all held packets to the received audio stream to be (eventually) played
while (!_queue.empty()) {
_audioClient->_receivedAudioStream.parseData(*_queue.front());
_queue.pop();
@ -634,7 +657,7 @@ void AudioClient::selectAudioFormat(const QString& selectedCodecName) {
_selectedCodecName = selectedCodecName;
qDebug() << "Selected Codec:" << _selectedCodecName;
qCDebug(audioclient) << "Selected Codec:" << _selectedCodecName;
// release any old codec encoder/decoder first...
if (_codec && _encoder) {
@ -650,7 +673,7 @@ void AudioClient::selectAudioFormat(const QString& selectedCodecName) {
_codec = plugin;
_receivedAudioStream.setupCodec(plugin, _selectedCodecName, AudioConstants::STEREO);
_encoder = plugin->createEncoder(AudioConstants::SAMPLE_RATE, AudioConstants::MONO);
qDebug() << "Selected Codec Plugin:" << _codec.get();
qCDebug(audioclient) << "Selected Codec Plugin:" << _codec.get();
break;
}
}
@ -683,8 +706,8 @@ bool AudioClient::switchOutputToAudioDevice(const QString& outputDeviceName) {
void AudioClient::configureReverb() {
ReverbParameters p;
p.sampleRate = _outputFormat.sampleRate();
p.sampleRate = AudioConstants::SAMPLE_RATE;
p.bandwidth = _reverbOptions->getBandwidth();
p.preDelay = _reverbOptions->getPreDelay();
p.lateDelay = _reverbOptions->getLateDelay();
@ -710,6 +733,7 @@ void AudioClient::configureReverb() {
_listenerReverb.setParameters(&p);
// used only for adding self-reverb to loopback audio
p.sampleRate = _outputFormat.sampleRate();
p.wetDryMix = 100.0f;
p.preDelay = 0.0f;
p.earlyGain = -96.0f; // disable ER
@ -816,23 +840,22 @@ void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) {
static QByteArray loopBackByteArray;
int numInputSamples = inputByteArray.size() / sizeof(int16_t);
int numInputSamples = inputByteArray.size() / AudioConstants::SAMPLE_SIZE;
int numLoopbackSamples = numDestinationSamplesRequired(_inputFormat, _outputFormat, numInputSamples);
loopBackByteArray.resize(numLoopbackSamples * sizeof(int16_t));
loopBackByteArray.resize(numLoopbackSamples * AudioConstants::SAMPLE_SIZE);
int16_t* inputSamples = reinterpret_cast<int16_t*>(inputByteArray.data());
int16_t* loopbackSamples = reinterpret_cast<int16_t*>(loopBackByteArray.data());
auto NO_RESAMPLER = nullptr;
possibleResampling(NO_RESAMPLER,
inputSamples, loopbackSamples,
numInputSamples, numLoopbackSamples,
_inputFormat, _outputFormat);
// upmix mono to stereo
if (!sampleChannelConversion(inputSamples, loopbackSamples, numInputSamples, _inputFormat, _outputFormat)) {
// no conversion, just copy the samples
memcpy(loopbackSamples, inputSamples, numInputSamples * AudioConstants::SAMPLE_SIZE);
}
// apply stereo reverb at the source, to the loopback audio
if (!_shouldEchoLocally && hasReverb) {
assert(_outputFormat.channelCount() == 2);
updateReverbOptions();
_sourceReverb.render(loopbackSamples, loopbackSamples, numLoopbackSamples/2);
}
@ -841,8 +864,12 @@ void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) {
}
void AudioClient::handleAudioInput() {
// input samples required to produce exactly NETWORK_FRAME_SAMPLES of output
const int inputSamplesRequired = _inputFormat.channelCount() * _inputToNetworkResampler->getMinInput(AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
const int inputSamplesRequired = (_inputToNetworkResampler ?
_inputToNetworkResampler->getMinInput(AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL) :
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL) * _inputFormat.channelCount();
const auto inputAudioSamples = std::unique_ptr<int16_t[]>(new int16_t[inputSamplesRequired]);
QByteArray inputByteArray = _inputDevice->readAll();
@ -851,7 +878,7 @@ void AudioClient::handleAudioInput() {
_inputRingBuffer.writeData(inputByteArray.data(), inputByteArray.size());
float audioInputMsecsRead = inputByteArray.size() / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
_stats.updateInputMsecsRead(audioInputMsecsRead);
_stats.updateInputMsRead(audioInputMsecsRead);
const int numNetworkBytes = _isStereoInput
? AudioConstants::NETWORK_FRAME_BYTES_STEREO
@ -929,16 +956,20 @@ void AudioClient::handleAudioInput() {
audioTransform.setRotation(_orientationGetter());
// FIXME find a way to properly handle both playback audio and user audio concurrently
QByteArray decocedBuffer(reinterpret_cast<char*>(networkAudioSamples), numNetworkBytes);
QByteArray decodedBuffer(reinterpret_cast<char*>(networkAudioSamples), numNetworkBytes);
QByteArray encodedBuffer;
if (_encoder) {
_encoder->encode(decocedBuffer, encodedBuffer);
_encoder->encode(decodedBuffer, encodedBuffer);
} else {
encodedBuffer = decocedBuffer;
encodedBuffer = decodedBuffer;
}
emitAudioPacket(encodedBuffer.constData(), encodedBuffer.size(), _outgoingAvatarAudioSequenceNumber, audioTransform, packetType, _selectedCodecName);
_stats.sentPacket();
int bytesInInputRingBuffer = _inputRingBuffer.samplesAvailable() * AudioConstants::SAMPLE_SIZE;
float msecsInInputRingBuffer = bytesInInputRingBuffer / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
_stats.updateInputMsUnplayed(msecsInInputRingBuffer);
}
}
@ -958,13 +989,9 @@ void AudioClient::handleRecordedAudioInput(const QByteArray& audio) {
emitAudioPacket(encodedBuffer.data(), encodedBuffer.size(), _outgoingAvatarAudioSequenceNumber, audioTransform, PacketType::MicrophoneAudioWithEcho, _selectedCodecName);
}
void AudioClient::mixLocalAudioInjectors(int16_t* inputBuffer) {
void AudioClient::mixLocalAudioInjectors(float* mixBuffer) {
memset(_hrtfBuffer, 0, sizeof(_hrtfBuffer));
QVector<AudioInjector*> injectorsToRemove;
static const float INT16_TO_FLOAT_SCALE_FACTOR = 1/32768.0f;
bool injectorsHaveData = false;
// lock the injector vector
Lock lock(_injectorsMutex);
@ -972,19 +999,17 @@ void AudioClient::mixLocalAudioInjectors(int16_t* inputBuffer) {
for (AudioInjector* injector : getActiveLocalAudioInjectors()) {
if (injector->getLocalBuffer()) {
qint64 samplesToRead = injector->isStereo() ?
AudioConstants::NETWORK_FRAME_BYTES_STEREO :
AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
qint64 samplesToRead = injector->isStereo() ? AudioConstants::NETWORK_FRAME_BYTES_STEREO : AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
// get one frame from the injector (mono or stereo)
memset(_scratchBuffer, 0, sizeof(_scratchBuffer));
if (0 < injector->getLocalBuffer()->readData((char*)_scratchBuffer, samplesToRead)) {
injectorsHaveData = true;
if (injector->isStereo()) {
if (injector->isStereo() ) {
for(int i=0; i<AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i++) {
_hrtfBuffer[i] += (float)(_scratchBuffer[i]) * INT16_TO_FLOAT_SCALE_FACTOR;
// stereo gets directly mixed into mixBuffer
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i++) {
mixBuffer[i] += (float)_scratchBuffer[i] * (1/32768.0f);
}
} else {
@ -995,73 +1020,66 @@ void AudioClient::mixLocalAudioInjectors(int16_t* inputBuffer) {
float gain = gainForSource(distance, injector->getVolume());
float azimuth = azimuthForSource(relativePosition);
injector->getLocalHRTF().render(_scratchBuffer, _hrtfBuffer, 1, azimuth, distance, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
// mono gets spatialized into mixBuffer
injector->getLocalHRTF().render(_scratchBuffer, mixBuffer, 1, azimuth, distance, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
}
} else {
qDebug() << "injector has no more data, marking finished for removal";
qCDebug(audioclient) << "injector has no more data, marking finished for removal";
injector->finishLocalInjection();
injectorsToRemove.append(injector);
}
} else {
qDebug() << "injector has no local buffer, marking as finished for removal";
qCDebug(audioclient) << "injector has no local buffer, marking as finished for removal";
injector->finishLocalInjection();
injectorsToRemove.append(injector);
}
}
if(injectorsHaveData) {
// mix network into the hrtfBuffer
for(int i=0; i<AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i++) {
_hrtfBuffer[i] += (float)(inputBuffer[i]) * INT16_TO_FLOAT_SCALE_FACTOR;
}
// now, use limiter to write back to the inputBuffer
_audioLimiter.render(_hrtfBuffer, inputBuffer, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
}
for(AudioInjector* injector : injectorsToRemove) {
qDebug() << "removing injector";
for (AudioInjector* injector : injectorsToRemove) {
qCDebug(audioclient) << "removing injector";
getActiveLocalAudioInjectors().removeOne(injector);
}
}
void AudioClient::processReceivedSamples(const QByteArray& decodedBuffer, QByteArray& outputBuffer) {
const int numDecodecSamples = decodedBuffer.size() / sizeof(int16_t);
const int numDeviceOutputSamples = _outputFrameSize;
Q_ASSERT(_outputFrameSize == numDecodecSamples * (_outputFormat.sampleRate() * _outputFormat.channelCount())
/ (_desiredOutputFormat.sampleRate() * _desiredOutputFormat.channelCount()));
outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t));
const int16_t* decodedSamples;
int16_t* outputSamples = reinterpret_cast<int16_t*>(outputBuffer.data());
QByteArray decodedBufferCopy = decodedBuffer;
const int16_t* decodedSamples = reinterpret_cast<const int16_t*>(decodedBuffer.data());
assert(decodedBuffer.size() == AudioConstants::NETWORK_FRAME_BYTES_STEREO);
if(getActiveLocalAudioInjectors().size() > 0) {
mixLocalAudioInjectors((int16_t*)decodedBufferCopy.data());
decodedSamples = reinterpret_cast<const int16_t*>(decodedBufferCopy.data());
} else {
decodedSamples = reinterpret_cast<const int16_t*>(decodedBuffer.data());
outputBuffer.resize(_outputFrameSize * AudioConstants::SAMPLE_SIZE);
int16_t* outputSamples = reinterpret_cast<int16_t*>(outputBuffer.data());
// convert network audio to float
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i++) {
_mixBuffer[i] = (float)decodedSamples[i] * (1/32768.0f);
}
// mix in active injectors
if (getActiveLocalAudioInjectors().size() > 0) {
mixLocalAudioInjectors(_mixBuffer);
}
// copy the packet from the RB to the output
possibleResampling(_networkToOutputResampler, decodedSamples, outputSamples,
numDecodecSamples, numDeviceOutputSamples,
_desiredOutputFormat, _outputFormat);
// apply stereo reverb at the listener, to the received audio
// apply stereo reverb
bool hasReverb = _reverb || _receivedAudioStream.hasReverb();
if (hasReverb) {
assert(_outputFormat.channelCount() == 2);
updateReverbOptions();
_listenerReverb.render(outputSamples, outputSamples, numDeviceOutputSamples/2);
_listenerReverb.render(_mixBuffer, _mixBuffer, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
}
if (_networkToOutputResampler) {
// resample to output sample rate
_audioLimiter.render(_mixBuffer, _scratchBuffer, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
_networkToOutputResampler->render(_scratchBuffer, outputSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
} else {
// no resampling needed
_audioLimiter.render(_mixBuffer, outputSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
}
}
@ -1117,10 +1135,10 @@ bool AudioClient::outputLocalInjector(bool isStereo, AudioInjector* injector) {
// Since this is invoked with invokeMethod, there _should_ be
// no reason to lock access to the vector of injectors.
if (!_activeLocalAudioInjectors.contains(injector)) {
qDebug() << "adding new injector";
qCDebug(audioclient) << "adding new injector";
_activeLocalAudioInjectors.append(injector);
} else {
qDebug() << "injector exists in active list already";
qCDebug(audioclient) << "injector exists in active list already";
}
return true;
@ -1215,22 +1233,23 @@ bool AudioClient::switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceIn
void AudioClient::outputNotify() {
int recentUnfulfilled = _audioOutputIODevice.getRecentUnfulfilledReads();
if (recentUnfulfilled > 0) {
qCInfo(audioclient, "Starve detected, %d new unfulfilled reads", recentUnfulfilled);
qCDebug(audioclient, "Starve detected, %d new unfulfilled reads", recentUnfulfilled);
if (_outputStarveDetectionEnabled.get()) {
quint64 now = usecTimestampNow() / 1000;
int dt = (int)(now - _outputStarveDetectionStartTimeMsec);
if (dt > _outputStarveDetectionPeriodMsec.get()) {
if (dt > STARVE_DETECTION_PERIOD) {
_outputStarveDetectionStartTimeMsec = now;
_outputStarveDetectionCount = 0;
} else {
_outputStarveDetectionCount += recentUnfulfilled;
if (_outputStarveDetectionCount > _outputStarveDetectionThreshold.get()) {
if (_outputStarveDetectionCount > STARVE_DETECTION_THRESHOLD) {
int oldOutputBufferSizeFrames = _sessionOutputBufferSizeFrames;
int newOutputBufferSizeFrames = setOutputBufferSize(oldOutputBufferSizeFrames + 1, false);
if (newOutputBufferSizeFrames > oldOutputBufferSizeFrames) {
qCInfo(audioclient, "Starve threshold surpassed (%d starves in %d ms)", _outputStarveDetectionCount, dt);
qCDebug(audioclient,
"Starve threshold surpassed (%d starves in %d ms)", _outputStarveDetectionCount, dt);
}
_outputStarveDetectionStartTimeMsec = now;
@ -1290,7 +1309,7 @@ bool AudioClient::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDevice
// setup our general output device for audio-mixer audio
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
int osDefaultBufferSize = _audioOutput->bufferSize();
int requestedSize = _sessionOutputBufferSizeFrames *_outputFrameSize * sizeof(int16_t);
int requestedSize = _sessionOutputBufferSizeFrames *_outputFrameSize * AudioConstants::SAMPLE_SIZE;
_audioOutput->setBufferSize(requestedSize);
connect(_audioOutput, &QAudioOutput::notify, this, &AudioClient::outputNotify);
@ -1302,7 +1321,7 @@ bool AudioClient::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDevice
_audioOutput->start(&_audioOutputIODevice);
lock.unlock();
qCDebug(audioclient) << "Output Buffer capacity in frames: " << _audioOutput->bufferSize() / sizeof(int16_t) / (float)_outputFrameSize <<
qCDebug(audioclient) << "Output Buffer capacity in frames: " << _audioOutput->bufferSize() / AudioConstants::SAMPLE_SIZE / (float)_outputFrameSize <<
"requested bytes:" << requestedSize << "actual bytes:" << _audioOutput->bufferSize() <<
"os default:" << osDefaultBufferSize << "period size:" << _audioOutput->periodSize();
@ -1320,7 +1339,7 @@ bool AudioClient::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDevice
}
int AudioClient::setOutputBufferSize(int numFrames, bool persist) {
numFrames = std::min(std::max(numFrames, MIN_AUDIO_OUTPUT_BUFFER_SIZE_FRAMES), MAX_AUDIO_OUTPUT_BUFFER_SIZE_FRAMES);
numFrames = std::min(std::max(numFrames, MIN_BUFFER_FRAMES), MAX_BUFFER_FRAMES);
if (numFrames != _sessionOutputBufferSizeFrames) {
qCInfo(audioclient, "Audio output buffer set to %d frames", numFrames);
_sessionOutputBufferSizeFrames = numFrames;
@ -1364,26 +1383,10 @@ int AudioClient::calculateNumberOfInputCallbackBytes(const QAudioFormat& format)
}
int AudioClient::calculateNumberOfFrameSamples(int numBytes) const {
int frameSamples = (int)(numBytes * CALLBACK_ACCELERATOR_RATIO + 0.5f) / sizeof(int16_t);
int frameSamples = (int)(numBytes * CALLBACK_ACCELERATOR_RATIO + 0.5f) / AudioConstants::SAMPLE_SIZE;
return frameSamples;
}
float AudioClient::getInputRingBufferMsecsAvailable() const {
int bytesInInputRingBuffer = _inputRingBuffer.samplesAvailable() * sizeof(int16_t);
float msecsInInputRingBuffer = bytesInInputRingBuffer / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
return msecsInInputRingBuffer;
}
float AudioClient::getAudioOutputMsecsUnplayed() const {
if (!_audioOutput) {
return 0.0f;
}
int bytesAudioOutputUnplayed = _audioOutput->bufferSize() - _audioOutput->bytesFree();
float msecsAudioOutputUnplayed = bytesAudioOutputUnplayed / (float)_outputFormat.bytesForDuration(USECS_PER_MSEC);
return msecsAudioOutputUnplayed;
}
float AudioClient::azimuthForSource(const glm::vec3& relativePosition) {
// copied from AudioMixer, more or less
glm::quat inverseOrientation = glm::inverse(_orientationGetter());
@ -1424,14 +1427,15 @@ float AudioClient::gainForSource(float distance, float volume) {
}
qint64 AudioClient::AudioOutputIODevice::readData(char * data, qint64 maxSize) {
auto samplesRequested = maxSize / sizeof(int16_t);
auto samplesRequested = maxSize / AudioConstants::SAMPLE_SIZE;
int samplesPopped;
int bytesWritten;
if ((samplesPopped = _receivedAudioStream.popSamples((int)samplesRequested, false)) > 0) {
qCDebug(audiostream, "Read %d samples from buffer (%d available)", samplesPopped, _receivedAudioStream.getSamplesAvailable());
AudioRingBuffer::ConstIterator lastPopOutput = _receivedAudioStream.getLastPopOutput();
lastPopOutput.readSamples((int16_t*)data, samplesPopped);
bytesWritten = samplesPopped * sizeof(int16_t);
bytesWritten = samplesPopped * AudioConstants::SAMPLE_SIZE;
} else {
// nothing on network, don't grab anything from injectors, and just return 0s
// this will flood the log: qCDebug(audioclient, "empty/partial network buffer");
@ -1439,8 +1443,11 @@ qint64 AudioClient::AudioOutputIODevice::readData(char * data, qint64 maxSize) {
bytesWritten = maxSize;
}
bool wasBufferStarved = _audio->_audioOutput->bufferSize() == _audio->_audioOutput->bytesFree();
if (wasBufferStarved) {
int bytesAudioOutputUnplayed = _audio->_audioOutput->bufferSize() - _audio->_audioOutput->bytesFree();
float msecsAudioOutputUnplayed = bytesAudioOutputUnplayed / (float)_audio->_outputFormat.bytesForDuration(USECS_PER_MSEC);
_audio->_stats.updateOutputMsUnplayed(msecsAudioOutputUnplayed);
if (bytesAudioOutputUnplayed == 0) {
_unfulfilledReads++;
}
@ -1461,31 +1468,18 @@ void AudioClient::checkDevices() {
}
void AudioClient::loadSettings() {
_receivedAudioStream.setDynamicJitterBuffers(dynamicJitterBuffers.get());
_receivedAudioStream.setMaxFramesOverDesired(maxFramesOverDesired.get());
_receivedAudioStream.setStaticDesiredJitterBufferFrames(staticDesiredJitterBufferFrames.get());
_receivedAudioStream.setUseStDevForJitterCalc(useStDevForJitterCalc.get());
_receivedAudioStream.setWindowStarveThreshold(windowStarveThreshold.get());
_receivedAudioStream.setWindowSecondsForDesiredCalcOnTooManyStarves(
windowSecondsForDesiredCalcOnTooManyStarves.get());
_receivedAudioStream.setWindowSecondsForDesiredReduction(windowSecondsForDesiredReduction.get());
_receivedAudioStream.setRepetitionWithFade(repetitionWithFade.get());
_receivedAudioStream.setDynamicJitterBufferEnabled(dynamicJitterBufferEnabled.get());
_receivedAudioStream.setStaticJitterBufferFrames(staticJitterBufferFrames.get());
qDebug() << "---- Initializing Audio Client ----";
qCDebug(audioclient) << "---- Initializing Audio Client ----";
auto codecPlugins = PluginManager::getInstance()->getCodecPlugins();
for (auto& plugin : codecPlugins) {
qDebug() << "Codec available:" << plugin->getName();
qCDebug(audioclient) << "Codec available:" << plugin->getName();
}
}
void AudioClient::saveSettings() {
dynamicJitterBuffers.set(_receivedAudioStream.getDynamicJitterBuffers());
maxFramesOverDesired.set(_receivedAudioStream.getMaxFramesOverDesired());
staticDesiredJitterBufferFrames.set(_receivedAudioStream.getDesiredJitterBufferFrames());
windowStarveThreshold.set(_receivedAudioStream.getWindowStarveThreshold());
windowSecondsForDesiredCalcOnTooManyStarves.set(_receivedAudioStream.
getWindowSecondsForDesiredCalcOnTooManyStarves());
windowSecondsForDesiredReduction.set(_receivedAudioStream.getWindowSecondsForDesiredReduction());
repetitionWithFade.set(_receivedAudioStream.getRepetitionWithFade());
dynamicJitterBufferEnabled.set(_receivedAudioStream.dynamicJitterBufferEnabled());
staticJitterBufferFrames.set(_receivedAudioStream.getStaticJitterBufferFrames());
}

View file

@ -61,15 +61,6 @@
#pragma warning( pop )
#endif
static const int NUM_AUDIO_CHANNELS = 2;
static const int DEFAULT_AUDIO_OUTPUT_BUFFER_SIZE_FRAMES = 3;
static const int MIN_AUDIO_OUTPUT_BUFFER_SIZE_FRAMES = 1;
static const int MAX_AUDIO_OUTPUT_BUFFER_SIZE_FRAMES = 20;
static const int DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_ENABLED = true;
static const int DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_THRESHOLD = 3;
static const quint64 DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_PERIOD = 10 * 1000; // 10 Seconds
class QAudioInput;
class QAudioOutput;
class QIODevice;
@ -82,6 +73,9 @@ class AudioClient : public AbstractAudioInterface, public Dependency {
Q_OBJECT
SINGLETON_DEPENDENCY
public:
static const int MIN_BUFFER_FRAMES;
static const int MAX_BUFFER_FRAMES;
using AudioPositionGetter = std::function<glm::vec3()>;
using AudioOrientationGetter = std::function<glm::quat()>;
@ -115,25 +109,17 @@ public:
float getTimeSinceLastClip() const { return _timeSinceLastClip; }
float getAudioAverageInputLoudness() const { return _lastInputLoudness; }
int getDesiredJitterBufferFrames() const { return _receivedAudioStream.getDesiredJitterBufferFrames(); }
bool isMuted() { return _muted; }
const AudioIOStats& getStats() const { return _stats; }
float getInputRingBufferMsecsAvailable() const;
float getAudioOutputMsecsUnplayed() const;
int getOutputBufferSize() { return _outputBufferSizeFrames.get(); }
bool getOutputStarveDetectionEnabled() { return _outputStarveDetectionEnabled.get(); }
void setOutputStarveDetectionEnabled(bool enabled) { _outputStarveDetectionEnabled.set(enabled); }
int getOutputStarveDetectionPeriod() { return _outputStarveDetectionPeriodMsec.get(); }
void setOutputStarveDetectionPeriod(int msecs) { _outputStarveDetectionPeriodMsec.set(msecs); }
int getOutputStarveDetectionThreshold() { return _outputStarveDetectionThreshold.get(); }
void setOutputStarveDetectionThreshold(int threshold) { _outputStarveDetectionThreshold.set(threshold); }
bool isSimulatingJitter() { return _gate.isSimulatingJitter(); }
void setIsSimulatingJitter(bool enable) { _gate.setIsSimulatingJitter(enable); }
int getGateThreshold() { return _gate.getThreshold(); }
void setGateThreshold(int threshold) { _gate.setThreshold(threshold); }
@ -227,13 +213,16 @@ protected:
private:
void outputFormatChanged();
void mixLocalAudioInjectors(int16_t* inputBuffer);
void mixLocalAudioInjectors(float* mixBuffer);
float azimuthForSource(const glm::vec3& relativePosition);
float gainForSource(float distance, float volume);
class Gate {
public:
Gate(AudioClient* audioClient, int threshold);
Gate(AudioClient* audioClient);
bool isSimulatingJitter() { return _isSimulatingJitter; }
void setIsSimulatingJitter(bool enable);
int getThreshold() { return _threshold; }
void setThreshold(int threshold);
@ -245,26 +234,25 @@ private:
AudioClient* _audioClient;
std::queue<QSharedPointer<ReceivedMessage>> _queue;
std::mutex _mutex;
int _index{ 0 };
int _threshold;
int _threshold{ 1 };
bool _isSimulatingJitter{ false };
};
Setting::Handle<int> _gateThreshold;
Gate _gate;
Mutex _injectorsMutex;
QByteArray firstInputFrame;
QAudioInput* _audioInput;
QAudioFormat _desiredInputFormat;
QAudioFormat _inputFormat;
QIODevice* _inputDevice;
int _numInputCallbackBytes;
int16_t _localProceduralSamples[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL];
QAudioOutput* _audioOutput;
QAudioFormat _desiredOutputFormat;
QAudioFormat _outputFormat;
int _outputFrameSize;
int16_t _outputProcessingBuffer[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
int _numOutputCallbackBytes;
QAudioOutput* _loopbackAudioOutput;
QIODevice* _loopbackOutputDevice;
@ -281,13 +269,9 @@ private:
Setting::Handle<int> _outputBufferSizeFrames;
int _sessionOutputBufferSizeFrames;
Setting::Handle<bool> _outputStarveDetectionEnabled;
Setting::Handle<int> _outputStarveDetectionPeriodMsec;
// Maximum number of starves per _outputStarveDetectionPeriod before increasing buffer size
Setting::Handle<int> _outputStarveDetectionThreshold;
StDev _stdev;
QElapsedTimer _timeSinceLastReceived;
float _averagedLatency;
float _lastInputLoudness;
float _timeSinceLastClip;
int _totalInputAudioSamples;
@ -309,7 +293,7 @@ private:
AudioSRC* _networkToOutputResampler;
// for local hrtf-ing
float _hrtfBuffer[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
float _mixBuffer[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
int16_t _scratchBuffer[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
AudioLimiter _audioLimiter;

View file

@ -18,54 +18,73 @@
#include "AudioIOStats.h"
const int FRAMES_AVAILABLE_STATS_WINDOW_SECONDS = 10;
// This is called 5x/sec (see AudioStatsDialog), and we want it to log the last 5s
static const int INPUT_READS_WINDOW = 25;
static const int INPUT_UNPLAYED_WINDOW = 25;
static const int OUTPUT_UNPLAYED_WINDOW = 25;
const int APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS = (int)(30.0f * 1000.0f / AudioConstants::NETWORK_FRAME_MSECS);
static const int APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS = (int)(30.0f * 1000.0f / AudioConstants::NETWORK_FRAME_MSECS);
AudioIOStats::AudioIOStats(MixedProcessedAudioStream* receivedAudioStream) :
_receivedAudioStream(receivedAudioStream),
_audioInputMsecsReadStats(MSECS_PER_SECOND / (float)AudioConstants::NETWORK_FRAME_MSECS * AudioClient::CALLBACK_ACCELERATOR_RATIO, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
_inputRingBufferMsecsAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
_audioOutputMsecsUnplayedStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
_lastSentAudioPacket(0),
_packetSentTimeGaps(1, APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS)
_inputMsRead(0, INPUT_READS_WINDOW),
_inputMsUnplayed(0, INPUT_UNPLAYED_WINDOW),
_outputMsUnplayed(0, OUTPUT_UNPLAYED_WINDOW),
_lastSentPacketTime(0),
_packetTimegaps(0, APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS)
{
}
AudioStreamStats AudioIOStats::getMixerDownstreamStats() const {
return _receivedAudioStream->getAudioStreamStats();
}
void AudioIOStats::reset() {
_receivedAudioStream->resetStats();
_inputMsRead.reset();
_inputMsUnplayed.reset();
_outputMsUnplayed.reset();
_packetTimegaps.reset();
_mixerAvatarStreamStats = AudioStreamStats();
_mixerInjectedStreamStatsMap.clear();
_audioInputMsecsReadStats.reset();
_inputRingBufferMsecsAvailableStats.reset();
_audioOutputMsecsUnplayedStats.reset();
_packetSentTimeGaps.reset();
}
void AudioIOStats::sentPacket() {
// first time this is 0
if (_lastSentAudioPacket == 0) {
_lastSentAudioPacket = usecTimestampNow();
if (_lastSentPacketTime == 0) {
_lastSentPacketTime = usecTimestampNow();
} else {
quint64 now = usecTimestampNow();
quint64 gap = now - _lastSentAudioPacket;
_packetSentTimeGaps.update(gap);
_lastSentAudioPacket = now;
quint64 gap = now - _lastSentPacketTime;
_lastSentPacketTime = now;
_packetTimegaps.update(gap);
}
}
void AudioIOStats::processStreamStatsPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer sendingNode) {
const MovingMinMaxAvg<float>& AudioIOStats::getInputMsRead() const {
_inputMsRead.currentIntervalComplete();
return _inputMsRead;
}
const MovingMinMaxAvg<float>& AudioIOStats::getInputMsUnplayed() const {
_inputMsUnplayed.currentIntervalComplete();
return _inputMsUnplayed;
}
const MovingMinMaxAvg<float>& AudioIOStats::getOutputMsUnplayed() const {
_outputMsUnplayed.currentIntervalComplete();
return _outputMsUnplayed;
}
const MovingMinMaxAvg<quint64>& AudioIOStats::getPacketTimegaps() const {
_packetTimegaps.currentIntervalComplete();
return _packetTimegaps;
}
const AudioStreamStats AudioIOStats::getMixerDownstreamStats() const {
return _receivedAudioStream->getAudioStreamStats();
}
void AudioIOStats::processStreamStatsPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer sendingNode) {
// parse the appendFlag, clear injected audio stream stats if 0
quint8 appendFlag;
message->readPrimitive(&appendFlag);
@ -92,14 +111,9 @@ void AudioIOStats::processStreamStatsPacket(QSharedPointer<ReceivedMessage> mess
}
void AudioIOStats::sendDownstreamAudioStatsPacket() {
auto audioIO = DependencyManager::get<AudioClient>();
// since this function is called every second, we'll sample for some of our stats here
_inputRingBufferMsecsAvailableStats.update(audioIO->getInputRingBufferMsecsAvailable());
_audioOutputMsecsUnplayedStats.update(audioIO->getAudioOutputMsecsUnplayed());
// also, call _receivedAudioStream's per-second callback
// call _receivedAudioStream's per-second callback
_receivedAudioStream->perSecondCallbackForUpdatingStats();
auto nodeList = DependencyManager::get<NodeList>();

View file

@ -29,19 +29,20 @@ public:
void reset();
void updateInputMsecsRead(float msecsRead) { _audioInputMsecsReadStats.update(msecsRead); }
void updateInputMsRead(float ms) { _inputMsRead.update(ms); }
void updateInputMsUnplayed(float ms) { _inputMsUnplayed.update(ms); }
void updateOutputMsUnplayed(float ms) { _outputMsUnplayed.update(ms); }
void sentPacket();
AudioStreamStats getMixerDownstreamStats() const;
const MovingMinMaxAvg<float>& getInputMsRead() const;
const MovingMinMaxAvg<float>& getInputMsUnplayed() const;
const MovingMinMaxAvg<float>& getOutputMsUnplayed() const;
const MovingMinMaxAvg<quint64>& getPacketTimegaps() const;
const AudioStreamStats getMixerDownstreamStats() const;
const AudioStreamStats& getMixerAvatarStreamStats() const { return _mixerAvatarStreamStats; }
const QHash<QUuid, AudioStreamStats>& getMixerInjectedStreamStatsMap() const { return _mixerInjectedStreamStatsMap; }
const MovingMinMaxAvg<float>& getAudioInputMsecsReadStats() const { return _audioInputMsecsReadStats; }
const MovingMinMaxAvg<float>& getInputRungBufferMsecsAvailableStats() const { return _inputRingBufferMsecsAvailableStats; }
const MovingMinMaxAvg<float>& getAudioOutputMsecsUnplayedStats() const { return _audioOutputMsecsUnplayedStats; }
const MovingMinMaxAvg<quint64>& getPacketSentTimeGaps() const { return _packetSentTimeGaps; }
void sendDownstreamAudioStatsPacket();
public slots:
@ -49,17 +50,16 @@ public slots:
private:
MixedProcessedAudioStream* _receivedAudioStream;
MovingMinMaxAvg<float> _audioInputMsecsReadStats;
MovingMinMaxAvg<float> _inputRingBufferMsecsAvailableStats;
MovingMinMaxAvg<float> _audioOutputMsecsUnplayedStats;
mutable MovingMinMaxAvg<float> _inputMsRead;
mutable MovingMinMaxAvg<float> _inputMsUnplayed;
mutable MovingMinMaxAvg<float> _outputMsUnplayed;
quint64 _lastSentPacketTime;
mutable MovingMinMaxAvg<quint64> _packetTimegaps;
AudioStreamStats _mixerAvatarStreamStats;
QHash<QUuid, AudioStreamStats> _mixerInjectedStreamStatsMap;
quint64 _lastSentAudioPacket;
MovingMinMaxAvg<quint64> _packetSentTimeGaps;
};
#endif // hifi_AudioIOStats_h

View file

@ -23,15 +23,16 @@ namespace AudioConstants {
typedef int16_t AudioSample;
const int SAMPLE_SIZE = sizeof(AudioSample);
inline const char* getAudioFrameName() { return "com.highfidelity.recording.Audio"; }
const int MAX_CODEC_NAME_LENGTH = 30;
const int MAX_CODEC_NAME_LENGTH_ON_WIRE = MAX_CODEC_NAME_LENGTH + sizeof(uint32_t);
const int NETWORK_FRAME_BYTES_STEREO = 960;
const int NETWORK_FRAME_SAMPLES_STEREO = NETWORK_FRAME_BYTES_STEREO / sizeof(AudioSample);
const int NETWORK_FRAME_SAMPLES_STEREO = NETWORK_FRAME_BYTES_STEREO / SAMPLE_SIZE;
const int NETWORK_FRAME_BYTES_PER_CHANNEL = NETWORK_FRAME_BYTES_STEREO / 2;
const int NETWORK_FRAME_SAMPLES_PER_CHANNEL = NETWORK_FRAME_BYTES_PER_CHANNEL / sizeof(AudioSample);
const int NETWORK_FRAME_SAMPLES_PER_CHANNEL = NETWORK_FRAME_BYTES_PER_CHANNEL / SAMPLE_SIZE;
const float NETWORK_FRAME_SECS = (AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL / float(AudioConstants::SAMPLE_RATE));
const float NETWORK_FRAME_MSECS = NETWORK_FRAME_SECS * 1000.0f;
const float NETWORK_FRAMES_PER_SEC = 1.0f / NETWORK_FRAME_SECS;

View file

@ -26,7 +26,9 @@
#include "SoundCache.h"
#include "AudioSRC.h"
//int audioInjectorPtrMetaTypeId = qRegisterMetaType<AudioInjector*>();
int audioInjectorPtrMetaTypeId = qRegisterMetaType<AudioInjector*>();
AbstractAudioInterface* AudioInjector::_localAudioInterface{ nullptr };
AudioInjectorState operator& (AudioInjectorState lhs, AudioInjectorState rhs) {
return static_cast<AudioInjectorState>(static_cast<uint8_t>(lhs) & static_cast<uint8_t>(rhs));
@ -37,24 +39,15 @@ AudioInjectorState& operator|= (AudioInjectorState& lhs, AudioInjectorState rhs)
return lhs;
};
AudioInjector::AudioInjector(QObject* parent) :
QObject(parent)
{
}
AudioInjector::AudioInjector(const Sound& sound, const AudioInjectorOptions& injectorOptions) :
_audioData(sound.getByteArray()),
_options(injectorOptions)
AudioInjector(sound.getByteArray(), injectorOptions)
{
}
AudioInjector::AudioInjector(const QByteArray& audioData, const AudioInjectorOptions& injectorOptions) :
_audioData(audioData),
_options(injectorOptions)
{
}
bool AudioInjector::stateHas(AudioInjectorState state) const {
@ -103,26 +96,6 @@ void AudioInjector::finish() {
}
}
void AudioInjector::setupInjection() {
if (!_hasSetup) {
_hasSetup = true;
// check if we need to offset the sound by some number of seconds
if (_options.secondOffset > 0.0f) {
// convert the offset into a number of bytes
int byteOffset = (int) floorf(AudioConstants::SAMPLE_RATE * _options.secondOffset * (_options.stereo ? 2.0f : 1.0f));
byteOffset *= sizeof(int16_t);
_currentSendOffset = byteOffset;
} else {
_currentSendOffset = 0;
}
} else {
qCDebug(audio) << "AudioInjector::setupInjection called but already setup.";
}
}
void AudioInjector::restart() {
// grab the AudioInjectorManager
auto injectorManager = DependencyManager::get<AudioInjectorManager>();
@ -150,30 +123,37 @@ void AudioInjector::restart() {
// check our state to decide if we need extra handling for the restart request
if (stateHas(AudioInjectorState::Finished)) {
// we finished playing, need to reset state so we can get going again
_hasSetup = false;
_shouldStop = false;
_state = AudioInjectorState::NotFinished;
// call inject audio to start injection over again
setupInjection();
// inject locally
if(injectLocally()) {
// if not localOnly, wake the AudioInjectorManager back up if it is stuck waiting
if (!_options.localOnly) {
if (!injectorManager->restartFinishedInjector(this)) {
_state = AudioInjectorState::Finished; // we're not playing, so reset the state used by isPlaying.
}
}
} else {
_state = AudioInjectorState::Finished; // we failed to play, so we are finished again
if (!inject(&AudioInjectorManager::restartFinishedInjector)) {
qWarning() << "AudioInjector::restart failed to thread injector";
}
}
}
bool AudioInjector::inject(bool(AudioInjectorManager::*injection)(AudioInjector*)) {
_state = AudioInjectorState::NotFinished;
int byteOffset = 0;
if (_options.secondOffset > 0.0f) {
byteOffset = (int)floorf(AudioConstants::SAMPLE_RATE * _options.secondOffset * (_options.stereo ? 2.0f : 1.0f));
byteOffset *= sizeof(AudioConstants::SAMPLE_SIZE);
}
_currentSendOffset = byteOffset;
if (!injectLocally()) {
finishLocalInjection();
}
bool success = true;
if (!_options.localOnly) {
auto injectorManager = DependencyManager::get<AudioInjectorManager>();
if (!(*injectorManager.*injection)(this)) {
success = false;
finishNetworkInjection();
}
}
return success;
}
bool AudioInjector::injectLocally() {
bool success = false;
if (_localAudioInterface) {
@ -202,11 +182,6 @@ bool AudioInjector::injectLocally() {
qCDebug(audio) << "AudioInjector::injectLocally cannot inject locally with no local audio interface present.";
}
if (!success) {
// we never started so we are finished with local injection
finishLocalInjection();
}
return success;
}
@ -447,7 +422,7 @@ AudioInjector* AudioInjector::playSound(SharedSoundPointer sound, const float vo
QByteArray samples = sound->getByteArray();
if (stretchFactor == 1.0f) {
return playSoundAndDelete(samples, options, nullptr);
return playSoundAndDelete(samples, options);
}
const int standardRate = AudioConstants::SAMPLE_RATE;
@ -465,11 +440,11 @@ AudioInjector* AudioInjector::playSound(SharedSoundPointer sound, const float vo
nInputFrames);
Q_UNUSED(nOutputFrames);
return playSoundAndDelete(resampled, options, nullptr);
return playSoundAndDelete(resampled, options);
}
AudioInjector* AudioInjector::playSoundAndDelete(const QByteArray& buffer, const AudioInjectorOptions options, AbstractAudioInterface* localInterface) {
AudioInjector* sound = playSound(buffer, options, localInterface);
AudioInjector* AudioInjector::playSoundAndDelete(const QByteArray& buffer, const AudioInjectorOptions options) {
AudioInjector* sound = playSound(buffer, options);
if (sound) {
sound->_state |= AudioInjectorState::PendingDelete;
@ -479,27 +454,10 @@ AudioInjector* AudioInjector::playSoundAndDelete(const QByteArray& buffer, const
}
AudioInjector* AudioInjector::playSound(const QByteArray& buffer, const AudioInjectorOptions options, AbstractAudioInterface* localInterface) {
AudioInjector* AudioInjector::playSound(const QByteArray& buffer, const AudioInjectorOptions options) {
AudioInjector* injector = new AudioInjector(buffer, options);
injector->setLocalAudioInterface(localInterface);
// grab the AudioInjectorManager
auto injectorManager = DependencyManager::get<AudioInjectorManager>();
// setup parameters required for injection
injector->setupInjection();
// we always inject locally, except when there is no localInterface
injector->injectLocally();
// if localOnly, we are done, just return injector.
if (!options.localOnly) {
// send off to server for everyone else
if (!injectorManager->threadInjector(injector)) {
// we failed to thread the new injector (we are at the max number of injector threads)
qDebug() << "AudioInjector::playSound failed to thread injector";
}
if (!injector->inject(&AudioInjectorManager::threadInjector)) {
qWarning() << "AudioInjector::playSound failed to thread injector";
}
return injector;
}

View file

@ -48,9 +48,7 @@ AudioInjectorState& operator|= (AudioInjectorState& lhs, AudioInjectorState rhs)
// until it dies.
class AudioInjector : public QObject {
Q_OBJECT
public:
AudioInjector(QObject* parent);
AudioInjector(const Sound& sound, const AudioInjectorOptions& injectorOptions);
AudioInjector(const QByteArray& audioData, const AudioInjectorOptions& injectorOptions);
@ -66,11 +64,11 @@ public:
float getVolume() const { return _options.volume; }
glm::vec3 getPosition() const { return _options.position; }
bool isStereo() const { return _options.stereo; }
void setLocalAudioInterface(AbstractAudioInterface* localAudioInterface) { _localAudioInterface = localAudioInterface; }
bool stateHas(AudioInjectorState state) const ;
static AudioInjector* playSoundAndDelete(const QByteArray& buffer, const AudioInjectorOptions options, AbstractAudioInterface* localInterface);
static AudioInjector* playSound(const QByteArray& buffer, const AudioInjectorOptions options, AbstractAudioInterface* localInterface);
static void setLocalAudioInterface(AbstractAudioInterface* audioInterface) { _localAudioInterface = audioInterface; }
static AudioInjector* playSoundAndDelete(const QByteArray& buffer, const AudioInjectorOptions options);
static AudioInjector* playSound(const QByteArray& buffer, const AudioInjectorOptions options);
static AudioInjector* playSound(SharedSoundPointer sound, const float volume, const float stretchFactor, const glm::vec3 position);
public slots:
@ -94,20 +92,19 @@ signals:
void restarting();
private:
void setupInjection();
int64_t injectNextFrame();
bool inject(bool(AudioInjectorManager::*injection)(AudioInjector*));
bool injectLocally();
static AbstractAudioInterface* _localAudioInterface;
QByteArray _audioData;
AudioInjectorOptions _options;
AudioInjectorState _state { AudioInjectorState::NotFinished };
bool _hasSentFirstFrame { false };
bool _hasSetup { false };
bool _shouldStop { false };
float _loudness { 0.0f };
int _currentSendOffset { 0 };
std::unique_ptr<NLPacket> _currentPacket { nullptr };
AbstractAudioInterface* _localAudioInterface { nullptr };
AudioInjectorLocalBuffer* _localBuffer { nullptr };
int64_t _nextFrame { 0 };

View file

@ -157,8 +157,6 @@ bool AudioInjectorManager::threadInjector(AudioInjector* injector) {
// move the injector to the QThread
injector->moveToThread(_thread);
// handle a restart once the injector has finished
// add the injector to the queue with a send timestamp of now
_injectors.emplace(usecTimestampNow(), InjectorQPointer { injector });
@ -170,13 +168,17 @@ bool AudioInjectorManager::threadInjector(AudioInjector* injector) {
}
bool AudioInjectorManager::restartFinishedInjector(AudioInjector* injector) {
if (!_shouldStop) {
// guard the injectors vector with a mutex
Lock lock(_injectorsMutex);
if (wouldExceedLimits()) {
return false;
}
if (_shouldStop) {
qDebug() << "AudioInjectorManager::threadInjector asked to thread injector but is shutting down.";
return false;
}
// guard the injectors vector with a mutex
Lock lock(_injectorsMutex);
if (wouldExceedLimits()) {
return false;
} else {
// add the injector to the queue with a send timestamp of now
_injectors.emplace(usecTimestampNow(), InjectorQPointer { injector });

View file

@ -12,9 +12,5 @@
#include "AudioLogging.h"
Q_LOGGING_CATEGORY(audio, "hifi.audio")
Q_LOGGING_CATEGORY(audiostream, "hifi.audio-stream", QtWarningMsg)
#if DEV_BUILD || PR_BUILD
Q_LOGGING_CATEGORY(audiostream, "hifi.audio-stream", QtDebugMsg)
#else
Q_LOGGING_CATEGORY(audiostream, "hifi.audio-stream", QtInfoMsg)
#endif

View file

@ -48,6 +48,7 @@ public:
quint32 _framesAvailable;
quint16 _framesAvailableAverage;
quint16 _unplayedMs;
quint16 _desiredJitterBufferFrames;
quint32 _starveCount;
quint32 _consecutiveNotMixedCount;

View file

@ -18,39 +18,46 @@
#include "InboundAudioStream.h"
#include "AudioLogging.h"
const int STARVE_HISTORY_CAPACITY = 50;
const bool InboundAudioStream::DEFAULT_DYNAMIC_JITTER_BUFFER_ENABLED = true;
const int InboundAudioStream::DEFAULT_STATIC_JITTER_FRAMES = 1;
const int InboundAudioStream::MAX_FRAMES_OVER_DESIRED = 10;
const int InboundAudioStream::WINDOW_STARVE_THRESHOLD = 3;
const int InboundAudioStream::WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES = 50;
const int InboundAudioStream::WINDOW_SECONDS_FOR_DESIRED_REDUCTION = 10;
const bool InboundAudioStream::USE_STDEV_FOR_JITTER = false;
const bool InboundAudioStream::REPETITION_WITH_FADE = true;
InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity, const Settings& settings) :
static const int STARVE_HISTORY_CAPACITY = 50;
// This is called 1x/s, and we want it to log the last 5s
static const int UNPLAYED_MS_WINDOW_SECS = 5;
// This adds some number of frames to the desired jitter buffer frames target we use when we're dropping frames.
// The larger this value is, the less frames we drop when attempting to reduce the jitter buffer length.
// Setting this to 0 will try to get the jitter buffer to be exactly _desiredJitterBufferFrames when dropping frames,
// which could lead to a starve soon after.
static const int DESIRED_JITTER_BUFFER_FRAMES_PADDING = 1;
// this controls the length of the window for stats used in the stats packet (not the stats used in
// _desiredJitterBufferFrames calculation)
static const int STATS_FOR_STATS_PACKET_WINDOW_SECONDS = 30;
// this controls the window size of the time-weighted avg of frames available. Every time the window fills up,
// _currentJitterBufferFrames is updated with the time-weighted avg and the running time-weighted avg is reset.
static const quint64 FRAMES_AVAILABLE_STAT_WINDOW_USECS = 10 * USECS_PER_SECOND;
InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity, int numStaticJitterFrames) :
_ringBuffer(numFrameSamples, numFramesCapacity),
_lastPopSucceeded(false),
_lastPopOutput(),
_dynamicJitterBuffers(settings._dynamicJitterBuffers),
_staticDesiredJitterBufferFrames(settings._staticDesiredJitterBufferFrames),
_useStDevForJitterCalc(settings._useStDevForJitterCalc),
_desiredJitterBufferFrames(settings._dynamicJitterBuffers ? 1 : settings._staticDesiredJitterBufferFrames),
_maxFramesOverDesired(settings._maxFramesOverDesired),
_isStarved(true),
_hasStarted(false),
_consecutiveNotMixedCount(0),
_starveCount(0),
_silentFramesDropped(0),
_oldFramesDropped(0),
_dynamicJitterBufferEnabled(numStaticJitterFrames == -1),
_staticJitterBufferFrames(std::max(numStaticJitterFrames, DEFAULT_STATIC_JITTER_FRAMES)),
_desiredJitterBufferFrames(_dynamicJitterBufferEnabled ? 1 : _staticJitterBufferFrames),
_incomingSequenceNumberStats(STATS_FOR_STATS_PACKET_WINDOW_SECONDS),
_lastPacketReceivedTime(0),
_timeGapStatsForDesiredCalcOnTooManyStarves(0, settings._windowSecondsForDesiredCalcOnTooManyStarves),
_calculatedJitterBufferFramesUsingMaxGap(0),
_stdevStatsForDesiredCalcOnTooManyStarves(),
_calculatedJitterBufferFramesUsingStDev(0),
_timeGapStatsForDesiredReduction(0, settings._windowSecondsForDesiredReduction),
_starveHistoryWindowSeconds(settings._windowSecondsForDesiredCalcOnTooManyStarves),
_starveHistory(STARVE_HISTORY_CAPACITY),
_starveThreshold(settings._windowStarveThreshold),
_framesAvailableStat(),
_currentJitterBufferFrames(0),
_timeGapStatsForStatsPacket(0, STATS_FOR_STATS_PACKET_WINDOW_SECONDS),
_repetitionWithFade(settings._repetitionWithFade),
_hasReverb(false)
{
_unplayedMs(0, UNPLAYED_MS_WINDOW_SECS),
_timeGapStatsForStatsPacket(0, STATS_FOR_STATS_PACKET_WINDOW_SECONDS) {}
InboundAudioStream::~InboundAudioStream() {
cleanupCodec();
}
void InboundAudioStream::reset() {
@ -66,7 +73,7 @@ void InboundAudioStream::reset() {
}
void InboundAudioStream::resetStats() {
if (_dynamicJitterBuffers) {
if (_dynamicJitterBufferEnabled) {
_desiredJitterBufferFrames = 1;
}
_consecutiveNotMixedCount = 0;
@ -76,12 +83,12 @@ void InboundAudioStream::resetStats() {
_incomingSequenceNumberStats.reset();
_lastPacketReceivedTime = 0;
_timeGapStatsForDesiredCalcOnTooManyStarves.reset();
_stdevStatsForDesiredCalcOnTooManyStarves = StDev();
_timeGapStatsForDesiredReduction.reset();
_starveHistory.clear();
_framesAvailableStat.reset();
_currentJitterBufferFrames = 0;
_timeGapStatsForStatsPacket.reset();
_unplayedMs.reset();
}
void InboundAudioStream::clearBuffer() {
@ -101,6 +108,7 @@ void InboundAudioStream::perSecondCallbackForUpdatingStats() {
_timeGapStatsForDesiredCalcOnTooManyStarves.currentIntervalComplete();
_timeGapStatsForDesiredReduction.currentIntervalComplete();
_timeGapStatsForStatsPacket.currentIntervalComplete();
_unplayedMs.currentIntervalComplete();
}
int InboundAudioStream::parseData(ReceivedMessage& message) {
@ -163,11 +171,12 @@ int InboundAudioStream::parseData(ReceivedMessage& message) {
int framesAvailable = _ringBuffer.framesAvailable();
// if this stream was starved, check if we're still starved.
if (_isStarved && framesAvailable >= _desiredJitterBufferFrames) {
qCInfo(audiostream, "Starve ended");
_isStarved = false;
}
// if the ringbuffer exceeds the desired size by more than the threshold specified,
// drop the oldest frames so the ringbuffer is down to the desired size.
if (framesAvailable > _desiredJitterBufferFrames + _maxFramesOverDesired) {
if (framesAvailable > _desiredJitterBufferFrames + MAX_FRAMES_OVER_DESIRED) {
int framesToDrop = framesAvailable - (_desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING);
_ringBuffer.shiftReadPosition(framesToDrop * _ringBuffer.getNumFrameSamples());
@ -176,8 +185,8 @@ int InboundAudioStream::parseData(ReceivedMessage& message) {
_oldFramesDropped += framesToDrop;
qCDebug(audiostream, "Dropped %d frames", framesToDrop);
qCDebug(audiostream, "Resetted current jitter frames");
qCInfo(audiostream, "Dropped %d frames", framesToDrop);
qCInfo(audiostream, "Reset current jitter frames");
}
framesAvailableChanged();
@ -232,8 +241,8 @@ int InboundAudioStream::writeDroppableSilentSamples(int silentSamples) {
_currentJitterBufferFrames -= numSilentFramesToDrop;
_silentFramesDropped += numSilentFramesToDrop;
qCDebug(audiostream, "Dropped %d silent frames", numSilentFramesToDrop);
qCDebug(audiostream, "Set current jitter frames to %d", _currentJitterBufferFrames);
qCInfo(audiostream, "Dropped %d silent frames", numSilentFramesToDrop);
qCInfo(audiostream, "Set current jitter frames to %d (dropped)", _currentJitterBufferFrames);
_framesAvailableStat.reset();
}
@ -243,7 +252,7 @@ int InboundAudioStream::writeDroppableSilentSamples(int silentSamples) {
return ret;
}
int InboundAudioStream::popSamples(int maxSamples, bool allOrNothing, bool starveIfNoSamplesPopped) {
int InboundAudioStream::popSamples(int maxSamples, bool allOrNothing) {
int samplesPopped = 0;
int samplesAvailable = _ringBuffer.samplesAvailable();
if (_isStarved) {
@ -261,47 +270,25 @@ int InboundAudioStream::popSamples(int maxSamples, bool allOrNothing, bool starv
popSamplesNoCheck(samplesAvailable);
samplesPopped = samplesAvailable;
} else {
// we can't pop any samples. set this stream to starved if needed
if (starveIfNoSamplesPopped) {
setToStarved();
_consecutiveNotMixedCount++;
}
// we can't pop any samples, set this stream to starved
setToStarved();
_consecutiveNotMixedCount++;
_lastPopSucceeded = false;
}
}
return samplesPopped;
}
int InboundAudioStream::popFrames(int maxFrames, bool allOrNothing, bool starveIfNoFramesPopped) {
int framesPopped = 0;
int framesAvailable = _ringBuffer.framesAvailable();
if (_isStarved) {
// we're still refilling; don't pop
_consecutiveNotMixedCount++;
_lastPopSucceeded = false;
} else {
if (framesAvailable >= maxFrames) {
// we have enough frames to pop, so we're good to pop
popSamplesNoCheck(maxFrames * _ringBuffer.getNumFrameSamples());
framesPopped = maxFrames;
} else if (!allOrNothing && framesAvailable > 0) {
// we don't have the requested number of frames, but we do have some
// frames available, so pop all those (except in all-or-nothing mode)
popSamplesNoCheck(framesAvailable * _ringBuffer.getNumFrameSamples());
framesPopped = framesAvailable;
} else {
// we can't pop any frames. set this stream to starved if needed
if (starveIfNoFramesPopped) {
setToStarved();
_consecutiveNotMixedCount = 1;
}
_lastPopSucceeded = false;
}
}
return framesPopped;
int InboundAudioStream::popFrames(int maxFrames, bool allOrNothing) {
int numFrameSamples = _ringBuffer.getNumFrameSamples();
int samplesPopped = popSamples(maxFrames * numFrameSamples, allOrNothing);
return samplesPopped / numFrameSamples;
}
void InboundAudioStream::popSamplesNoCheck(int samples) {
float unplayedMs = (_ringBuffer.samplesAvailable() / (float)_ringBuffer.getNumFrameSamples()) * AudioConstants::NETWORK_FRAME_MSECS;
_unplayedMs.update(unplayedMs);
_lastPopOutput = _ringBuffer.nextOutput();
_ringBuffer.shiftReadPosition(samples);
framesAvailableChanged();
@ -315,13 +302,17 @@ void InboundAudioStream::framesAvailableChanged() {
if (_framesAvailableStat.getElapsedUsecs() >= FRAMES_AVAILABLE_STAT_WINDOW_USECS) {
_currentJitterBufferFrames = (int)ceil(_framesAvailableStat.getAverage());
qCDebug(audiostream, "Set current jitter frames to %d", _currentJitterBufferFrames);
qCInfo(audiostream, "Set current jitter frames to %d (changed)", _currentJitterBufferFrames);
_framesAvailableStat.reset();
}
}
void InboundAudioStream::setToStarved() {
if (!_isStarved) {
qCInfo(audiostream, "Starved");
}
_consecutiveNotMixedCount = 0;
_starveCount++;
// if we have more than the desired frames when setToStarved() is called, then we'll immediately
@ -332,10 +323,10 @@ void InboundAudioStream::setToStarved() {
quint64 now = usecTimestampNow();
_starveHistory.insert(now);
if (_dynamicJitterBuffers) {
if (_dynamicJitterBufferEnabled) {
// dynamic jitter buffers are enabled. check if this starve put us over the window
// starve threshold
quint64 windowEnd = now - _starveHistoryWindowSeconds * USECS_PER_SECOND;
quint64 windowEnd = now - WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES * USECS_PER_SECOND;
RingBufferHistory<quint64>::Iterator starvesIterator = _starveHistory.begin();
RingBufferHistory<quint64>::Iterator end = _starveHistory.end();
int starvesInWindow = 1;
@ -349,78 +340,47 @@ void InboundAudioStream::setToStarved() {
// this starve put us over the starve threshold. update _desiredJitterBufferFrames to
// value determined by window A.
if (starvesInWindow >= _starveThreshold) {
if (starvesInWindow >= WINDOW_STARVE_THRESHOLD) {
int calculatedJitterBufferFrames;
if (_useStDevForJitterCalc) {
calculatedJitterBufferFrames = _calculatedJitterBufferFramesUsingStDev;
} else {
// we don't know when the next packet will arrive, so it's possible the gap between the last packet and the
// next packet will exceed the max time gap in the window. If the time since the last packet has already exceeded
// the window max gap, then we should use that value to calculate desired frames.
int framesSinceLastPacket = ceilf((float)(now - _lastPacketReceivedTime)
/ (float)AudioConstants::NETWORK_FRAME_USECS);
calculatedJitterBufferFrames = std::max(_calculatedJitterBufferFramesUsingMaxGap, framesSinceLastPacket);
}
// we don't know when the next packet will arrive, so it's possible the gap between the last packet and the
// next packet will exceed the max time gap in the window. If the time since the last packet has already exceeded
// the window max gap, then we should use that value to calculate desired frames.
int framesSinceLastPacket = ceilf((float)(now - _lastPacketReceivedTime)
/ (float)AudioConstants::NETWORK_FRAME_USECS);
calculatedJitterBufferFrames = std::max(_calculatedJitterBufferFrames, framesSinceLastPacket);
// make sure _desiredJitterBufferFrames does not become lower here
if (calculatedJitterBufferFrames >= _desiredJitterBufferFrames) {
_desiredJitterBufferFrames = calculatedJitterBufferFrames;
qCDebug(audiostream, "Set desired jitter frames to %d", _desiredJitterBufferFrames);
qCInfo(audiostream, "Set desired jitter frames to %d (starved)", _desiredJitterBufferFrames);
}
}
}
}
void InboundAudioStream::setSettings(const Settings& settings) {
setMaxFramesOverDesired(settings._maxFramesOverDesired);
setDynamicJitterBuffers(settings._dynamicJitterBuffers);
setStaticDesiredJitterBufferFrames(settings._staticDesiredJitterBufferFrames);
setUseStDevForJitterCalc(settings._useStDevForJitterCalc);
setWindowStarveThreshold(settings._windowStarveThreshold);
setWindowSecondsForDesiredCalcOnTooManyStarves(settings._windowSecondsForDesiredCalcOnTooManyStarves);
setWindowSecondsForDesiredReduction(settings._windowSecondsForDesiredReduction);
setRepetitionWithFade(settings._repetitionWithFade);
}
void InboundAudioStream::setDynamicJitterBuffers(bool dynamicJitterBuffers) {
if (!dynamicJitterBuffers) {
_desiredJitterBufferFrames = _staticDesiredJitterBufferFrames;
void InboundAudioStream::setDynamicJitterBufferEnabled(bool enable) {
if (!enable) {
_desiredJitterBufferFrames = _staticJitterBufferFrames;
} else {
if (!_dynamicJitterBuffers) {
if (!_dynamicJitterBufferEnabled) {
// if we're enabling dynamic jitter buffer frames, start desired frames at 1
_desiredJitterBufferFrames = 1;
}
}
_dynamicJitterBuffers = dynamicJitterBuffers;
_dynamicJitterBufferEnabled = enable;
}
void InboundAudioStream::setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames) {
_staticDesiredJitterBufferFrames = staticDesiredJitterBufferFrames;
if (!_dynamicJitterBuffers) {
_desiredJitterBufferFrames = _staticDesiredJitterBufferFrames;
void InboundAudioStream::setStaticJitterBufferFrames(int staticJitterBufferFrames) {
_staticJitterBufferFrames = staticJitterBufferFrames;
if (!_dynamicJitterBufferEnabled) {
_desiredJitterBufferFrames = _staticJitterBufferFrames;
}
}
void InboundAudioStream::setWindowSecondsForDesiredCalcOnTooManyStarves(int windowSecondsForDesiredCalcOnTooManyStarves) {
_timeGapStatsForDesiredCalcOnTooManyStarves.setWindowIntervals(windowSecondsForDesiredCalcOnTooManyStarves);
_starveHistoryWindowSeconds = windowSecondsForDesiredCalcOnTooManyStarves;
}
void InboundAudioStream::setWindowSecondsForDesiredReduction(int windowSecondsForDesiredReduction) {
_timeGapStatsForDesiredReduction.setWindowIntervals(windowSecondsForDesiredReduction);
}
int InboundAudioStream::clampDesiredJitterBufferFramesValue(int desired) const {
const int MIN_FRAMES_DESIRED = 0;
const int MAX_FRAMES_DESIRED = _ringBuffer.getFrameCapacity();
return glm::clamp(desired, MIN_FRAMES_DESIRED, MAX_FRAMES_DESIRED);
}
void InboundAudioStream::packetReceivedUpdateTimingStats() {
// update our timegap stats and desired jitter buffer frames if necessary
// discard the first few packets we receive since they usually have gaps that aren't represensative of normal jitter
const quint32 NUM_INITIAL_PACKETS_DISCARD = 3;
const quint32 NUM_INITIAL_PACKETS_DISCARD = 1000; // 10s
quint64 now = usecTimestampNow();
if (_incomingSequenceNumberStats.getReceived() > NUM_INITIAL_PACKETS_DISCARD) {
quint64 gap = now - _lastPacketReceivedTime;
@ -428,25 +388,15 @@ void InboundAudioStream::packetReceivedUpdateTimingStats() {
// update all stats used for desired frames calculations under dynamic jitter buffer mode
_timeGapStatsForDesiredCalcOnTooManyStarves.update(gap);
_stdevStatsForDesiredCalcOnTooManyStarves.addValue(gap);
_timeGapStatsForDesiredReduction.update(gap);
if (_timeGapStatsForDesiredCalcOnTooManyStarves.getNewStatsAvailableFlag()) {
_calculatedJitterBufferFramesUsingMaxGap = ceilf((float)_timeGapStatsForDesiredCalcOnTooManyStarves.getWindowMax()
_calculatedJitterBufferFrames = ceilf((float)_timeGapStatsForDesiredCalcOnTooManyStarves.getWindowMax()
/ (float) AudioConstants::NETWORK_FRAME_USECS);
_timeGapStatsForDesiredCalcOnTooManyStarves.clearNewStatsAvailableFlag();
}
const int STANDARD_DEVIATION_SAMPLE_COUNT = 500;
if (_stdevStatsForDesiredCalcOnTooManyStarves.getSamples() > STANDARD_DEVIATION_SAMPLE_COUNT) {
const float NUM_STANDARD_DEVIATIONS = 3.0f;
_calculatedJitterBufferFramesUsingStDev = ceilf(NUM_STANDARD_DEVIATIONS
* _stdevStatsForDesiredCalcOnTooManyStarves.getStDev()
/ (float) AudioConstants::NETWORK_FRAME_USECS);
_stdevStatsForDesiredCalcOnTooManyStarves.reset();
}
if (_dynamicJitterBuffers) {
if (_dynamicJitterBufferEnabled) {
// if the max gap in window B (_timeGapStatsForDesiredReduction) corresponds to a smaller number of frames than _desiredJitterBufferFrames,
// then reduce _desiredJitterBufferFrames to that number of frames.
if (_timeGapStatsForDesiredReduction.getNewStatsAvailableFlag() && _timeGapStatsForDesiredReduction.isWindowFilled()) {
@ -454,7 +404,7 @@ void InboundAudioStream::packetReceivedUpdateTimingStats() {
/ (float)AudioConstants::NETWORK_FRAME_USECS);
if (calculatedJitterBufferFrames < _desiredJitterBufferFrames) {
_desiredJitterBufferFrames = calculatedJitterBufferFrames;
qCDebug(audiostream, "Set desired jitter frames to %d", _desiredJitterBufferFrames);
qCInfo(audiostream, "Set desired jitter frames to %d (reduced)", _desiredJitterBufferFrames);
}
_timeGapStatsForDesiredReduction.clearNewStatsAvailableFlag();
}
@ -465,10 +415,7 @@ void InboundAudioStream::packetReceivedUpdateTimingStats() {
}
int InboundAudioStream::writeSamplesForDroppedPackets(int networkSamples) {
if (_repetitionWithFade) {
return writeLastFrameRepeatedWithFade(networkSamples);
}
return writeDroppableSilentSamples(networkSamples);
return writeLastFrameRepeatedWithFade(networkSamples);
}
int InboundAudioStream::writeLastFrameRepeatedWithFade(int samples) {
@ -502,6 +449,7 @@ AudioStreamStats InboundAudioStream::getAudioStreamStats() const {
streamStats._framesAvailable = _ringBuffer.framesAvailable();
streamStats._framesAvailableAverage = _framesAvailableStat.getAverage();
streamStats._unplayedMs = (quint16)_unplayedMs.getWindowMax();
streamStats._desiredJitterBufferFrames = _desiredJitterBufferFrames;
streamStats._starveCount = _starveCount;
streamStats._consecutiveNotMixedCount = _consecutiveNotMixedCount;

View file

@ -27,86 +27,28 @@
#include "AudioStreamStats.h"
#include "TimeWeightedAvg.h"
// This adds some number of frames to the desired jitter buffer frames target we use when we're dropping frames.
// The larger this value is, the less frames we drop when attempting to reduce the jitter buffer length.
// Setting this to 0 will try to get the jitter buffer to be exactly _desiredJitterBufferFrames when dropping frames,
// which could lead to a starve soon after.
const int DESIRED_JITTER_BUFFER_FRAMES_PADDING = 1;
// this controls the length of the window for stats used in the stats packet (not the stats used in
// _desiredJitterBufferFrames calculation)
const int STATS_FOR_STATS_PACKET_WINDOW_SECONDS = 30;
// this controls the window size of the time-weighted avg of frames available. Every time the window fills up,
// _currentJitterBufferFrames is updated with the time-weighted avg and the running time-weighted avg is reset.
const quint64 FRAMES_AVAILABLE_STAT_WINDOW_USECS = 10 * USECS_PER_SECOND;
// default values for members of the Settings struct
const int DEFAULT_MAX_FRAMES_OVER_DESIRED = 10;
const bool DEFAULT_DYNAMIC_JITTER_BUFFERS = true;
const int DEFAULT_STATIC_DESIRED_JITTER_BUFFER_FRAMES = 1;
const bool DEFAULT_USE_STDEV_FOR_JITTER_CALC = false;
const int DEFAULT_WINDOW_STARVE_THRESHOLD = 3;
const int DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES = 50;
const int DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION = 10;
const bool DEFAULT_REPETITION_WITH_FADE = true;
// Audio Env bitset
const int HAS_REVERB_BIT = 0; // 1st bit
class InboundAudioStream : public NodeData {
Q_OBJECT
public:
class Settings {
public:
Settings()
: _maxFramesOverDesired(DEFAULT_MAX_FRAMES_OVER_DESIRED),
_dynamicJitterBuffers(DEFAULT_DYNAMIC_JITTER_BUFFERS),
_staticDesiredJitterBufferFrames(DEFAULT_STATIC_DESIRED_JITTER_BUFFER_FRAMES),
_useStDevForJitterCalc(DEFAULT_USE_STDEV_FOR_JITTER_CALC),
_windowStarveThreshold(DEFAULT_WINDOW_STARVE_THRESHOLD),
_windowSecondsForDesiredCalcOnTooManyStarves(DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES),
_windowSecondsForDesiredReduction(DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION),
_repetitionWithFade(DEFAULT_REPETITION_WITH_FADE)
{}
Settings(int maxFramesOverDesired, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames,
bool useStDevForJitterCalc, int windowStarveThreshold, int windowSecondsForDesiredCalcOnTooManyStarves,
int _windowSecondsForDesiredReduction, bool repetitionWithFade)
: _maxFramesOverDesired(maxFramesOverDesired),
_dynamicJitterBuffers(dynamicJitterBuffers),
_staticDesiredJitterBufferFrames(staticDesiredJitterBufferFrames),
_useStDevForJitterCalc(useStDevForJitterCalc),
_windowStarveThreshold(windowStarveThreshold),
_windowSecondsForDesiredCalcOnTooManyStarves(windowSecondsForDesiredCalcOnTooManyStarves),
_windowSecondsForDesiredReduction(windowSecondsForDesiredCalcOnTooManyStarves),
_repetitionWithFade(repetitionWithFade)
{}
// max number of frames over desired in the ringbuffer.
int _maxFramesOverDesired;
// if false, _desiredJitterBufferFrames will always be _staticDesiredJitterBufferFrames. Otherwise,
// either fred or philip's method will be used to calculate _desiredJitterBufferFrames based on packet timegaps.
bool _dynamicJitterBuffers;
// settings for static jitter buffer mode
int _staticDesiredJitterBufferFrames;
// settings for dynamic jitter buffer mode
bool _useStDevForJitterCalc; // if true, philip's method is used. otherwise, fred's method is used.
int _windowStarveThreshold;
int _windowSecondsForDesiredCalcOnTooManyStarves;
int _windowSecondsForDesiredReduction;
// if true, the prev frame will be repeated (fading to silence) for dropped frames.
// otherwise, silence will be inserted.
bool _repetitionWithFade;
};
public:
InboundAudioStream(int numFrameSamples, int numFramesCapacity, const Settings& settings);
~InboundAudioStream() { cleanupCodec(); }
// settings
static const bool DEFAULT_DYNAMIC_JITTER_BUFFER_ENABLED;
static const int DEFAULT_STATIC_JITTER_FRAMES;
// legacy (now static) settings
static const int MAX_FRAMES_OVER_DESIRED;
static const int WINDOW_STARVE_THRESHOLD;
static const int WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES;
static const int WINDOW_SECONDS_FOR_DESIRED_REDUCTION;
// unused (eradicated) settings
static const bool USE_STDEV_FOR_JITTER;
static const bool REPETITION_WITH_FADE;
InboundAudioStream() = delete;
InboundAudioStream(int numFrameSamples, int numFramesCapacity, int numStaticJitterFrames = -1);
~InboundAudioStream();
void reset();
virtual void resetStats();
@ -114,8 +56,8 @@ public:
virtual int parseData(ReceivedMessage& packet) override;
int popFrames(int maxFrames, bool allOrNothing, bool starveIfNoFramesPopped = true);
int popSamples(int maxSamples, bool allOrNothing, bool starveIfNoSamplesPopped = true);
int popFrames(int maxFrames, bool allOrNothing);
int popSamples(int maxSamples, bool allOrNothing);
bool lastPopSucceeded() const { return _lastPopSucceeded; };
const AudioRingBuffer::ConstIterator& getLastPopOutput() const { return _lastPopOutput; }
@ -124,43 +66,23 @@ public:
void setToStarved();
void setSettings(const Settings& settings);
void setMaxFramesOverDesired(int maxFramesOverDesired) { _maxFramesOverDesired = maxFramesOverDesired; }
void setDynamicJitterBuffers(bool setDynamicJitterBuffers);
void setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames);
void setUseStDevForJitterCalc(bool useStDevForJitterCalc) { _useStDevForJitterCalc = useStDevForJitterCalc; }
void setWindowStarveThreshold(int windowStarveThreshold) { _starveThreshold = windowStarveThreshold; }
void setWindowSecondsForDesiredCalcOnTooManyStarves(int windowSecondsForDesiredCalcOnTooManyStarves);
void setWindowSecondsForDesiredReduction(int windowSecondsForDesiredReduction);
void setRepetitionWithFade(bool repetitionWithFade) { _repetitionWithFade = repetitionWithFade; }
void setDynamicJitterBufferEnabled(bool enable);
void setStaticJitterBufferFrames(int staticJitterBufferFrames);
virtual AudioStreamStats getAudioStreamStats() const;
/// returns the desired number of jitter buffer frames under the dyanmic jitter buffers scheme
int getCalculatedJitterBufferFrames() const { return _useStDevForJitterCalc ?
_calculatedJitterBufferFramesUsingStDev : _calculatedJitterBufferFramesUsingMaxGap; };
/// returns the desired number of jitter buffer frames using Philip's method
int getCalculatedJitterBufferFramesUsingStDev() const { return _calculatedJitterBufferFramesUsingStDev; }
/// returns the desired number of jitter buffer frames using Freddy's method
int getCalculatedJitterBufferFramesUsingMaxGap() const { return _calculatedJitterBufferFramesUsingMaxGap; }
int getCalculatedJitterBufferFrames() const { return _calculatedJitterBufferFrames; }
int getWindowSecondsForDesiredReduction() const {
return _timeGapStatsForDesiredReduction.getWindowIntervals(); }
int getWindowSecondsForDesiredCalcOnTooManyStarves() const {
return _timeGapStatsForDesiredCalcOnTooManyStarves.getWindowIntervals(); }
bool getDynamicJitterBuffers() const { return _dynamicJitterBuffers; }
bool getRepetitionWithFade() const { return _repetitionWithFade;}
int getWindowStarveThreshold() const { return _starveThreshold;}
bool getUseStDevForJitterCalc() const { return _useStDevForJitterCalc; }
int getDesiredJitterBufferFrames() const { return _desiredJitterBufferFrames; }
int getMaxFramesOverDesired() const { return _maxFramesOverDesired; }
bool dynamicJitterBufferEnabled() const { return _dynamicJitterBufferEnabled; }
int getStaticJitterBufferFrames() { return _staticJitterBufferFrames; }
int getDesiredJitterBufferFrames() { return _desiredJitterBufferFrames; }
int getNumFrameSamples() const { return _ringBuffer.getNumFrameSamples(); }
int getFrameCapacity() const { return _ringBuffer.getFrameCapacity(); }
int getFramesAvailable() const { return _ringBuffer.framesAvailable(); }
double getFramesAvailableAverage() const { return _framesAvailableStat.getAverage(); }
int getSamplesAvailable() const { return _ringBuffer.samplesAvailable(); }
bool isStarved() const { return _isStarved; }
bool hasStarted() const { return _hasStarted; }
@ -192,7 +114,6 @@ public slots:
private:
void packetReceivedUpdateTimingStats();
int clampDesiredJitterBufferFramesValue(int desired) const;
int writeSamplesForDroppedPackets(int networkSamples);
@ -224,63 +145,49 @@ protected:
AudioRingBuffer _ringBuffer;
bool _lastPopSucceeded;
bool _lastPopSucceeded { false };
AudioRingBuffer::ConstIterator _lastPopOutput;
bool _dynamicJitterBuffers; // if false, _desiredJitterBufferFrames is locked at 1 (old behavior)
int _staticDesiredJitterBufferFrames;
// if jitter buffer is dynamic, this determines what method of calculating _desiredJitterBufferFrames
// if true, Philip's timegap std dev calculation is used. Otherwise, Freddy's max timegap calculation is used
bool _useStDevForJitterCalc;
bool _dynamicJitterBufferEnabled { DEFAULT_DYNAMIC_JITTER_BUFFER_ENABLED };
int _staticJitterBufferFrames { DEFAULT_STATIC_JITTER_FRAMES };
int _desiredJitterBufferFrames;
// if there are more than _desiredJitterBufferFrames + _maxFramesOverDesired frames, old ringbuffer frames
// will be dropped to keep audio delay from building up
int _maxFramesOverDesired;
bool _isStarved;
bool _hasStarted;
bool _isStarved { true };
bool _hasStarted { false };
// stats
int _consecutiveNotMixedCount;
int _starveCount;
int _silentFramesDropped;
int _oldFramesDropped;
int _consecutiveNotMixedCount { 0 };
int _starveCount { 0 };
int _silentFramesDropped { 0 };
int _oldFramesDropped { 0 };
SequenceNumberStats _incomingSequenceNumberStats;
quint64 _lastPacketReceivedTime;
MovingMinMaxAvg<quint64> _timeGapStatsForDesiredCalcOnTooManyStarves; // for Freddy's method
int _calculatedJitterBufferFramesUsingMaxGap;
StDev _stdevStatsForDesiredCalcOnTooManyStarves; // for Philip's method
int _calculatedJitterBufferFramesUsingStDev; // the most recent desired frames calculated by Philip's method
MovingMinMaxAvg<quint64> _timeGapStatsForDesiredReduction;
quint64 _lastPacketReceivedTime { 0 };
MovingMinMaxAvg<quint64> _timeGapStatsForDesiredCalcOnTooManyStarves { 0, WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES };
int _calculatedJitterBufferFrames { 0 };
MovingMinMaxAvg<quint64> _timeGapStatsForDesiredReduction { 0, WINDOW_SECONDS_FOR_DESIRED_REDUCTION };
int _starveHistoryWindowSeconds;
RingBufferHistory<quint64> _starveHistory;
int _starveThreshold;
TimeWeightedAvg<int> _framesAvailableStat;
MovingMinMaxAvg<float> _unplayedMs;
// this value is periodically updated with the time-weighted avg from _framesAvailableStat. it is only used for
// dropping silent frames right now.
int _currentJitterBufferFrames;
int _currentJitterBufferFrames { 0 };
MovingMinMaxAvg<quint64> _timeGapStatsForStatsPacket;
bool _repetitionWithFade;
// Reverb properties
bool _hasReverb;
float _reverbTime;
float _wetLevel;
bool _hasReverb { false };
float _reverbTime { 0.0f };
float _wetLevel { 0.0f };
CodecPluginPointer _codec;
QString _selectedCodecName;
Decoder* _decoder{ nullptr };
Decoder* _decoder { nullptr };
};
float calculateRepeatedFrameFadeFactor(int indexOfRepeat);

View file

@ -19,14 +19,11 @@
#include "InjectedAudioStream.h"
InjectedAudioStream::InjectedAudioStream(const QUuid& streamIdentifier, const bool isStereo, const InboundAudioStream::Settings& settings) :
PositionalAudioStream(PositionalAudioStream::Injector, isStereo, settings),
InjectedAudioStream::InjectedAudioStream(const QUuid& streamIdentifier, bool isStereo, int numStaticJitterFrames) :
PositionalAudioStream(PositionalAudioStream::Injector, isStereo, numStaticJitterFrames),
_streamIdentifier(streamIdentifier),
_radius(0.0f),
_attenuationRatio(0)
{
}
_attenuationRatio(0) {}
const uchar MAX_INJECTOR_VOLUME = 255;

View file

@ -18,7 +18,7 @@
class InjectedAudioStream : public PositionalAudioStream {
public:
InjectedAudioStream(const QUuid& streamIdentifier, const bool isStereo, const InboundAudioStream::Settings& settings);
InjectedAudioStream(const QUuid& streamIdentifier, bool isStereo, int numStaticJitterFrames = -1);
float getRadius() const { return _radius; }
float getAttenuationRatio() const { return _attenuationRatio; }

View file

@ -11,7 +11,5 @@
#include "MixedAudioStream.h"
MixedAudioStream::MixedAudioStream(int numFrameSamples, int numFramesCapacity, const InboundAudioStream::Settings& settings)
: InboundAudioStream(numFrameSamples, numFramesCapacity, settings)
{
}
MixedAudioStream::MixedAudioStream(int numFrameSamples, int numFramesCapacity, int numStaticJitterFrames) :
InboundAudioStream(numFrameSamples, numFramesCapacity, numStaticJitterFrames) {}

View file

@ -16,7 +16,7 @@
class MixedAudioStream : public InboundAudioStream {
public:
MixedAudioStream(int numFrameSamples, int numFramesCapacity, const InboundAudioStream::Settings& settings);
MixedAudioStream(int numFrameSamples, int numFramesCapacity, int numStaticJitterFrames = -1);
float getNextOutputFrameLoudness() const { return _ringBuffer.getNextOutputFrameLoudness(); }
};

View file

@ -10,13 +10,12 @@
//
#include "MixedProcessedAudioStream.h"
#include "AudioLogging.h"
static const int STEREO_FACTOR = 2;
MixedProcessedAudioStream::MixedProcessedAudioStream(int numFrameSamples, int numFramesCapacity, const InboundAudioStream::Settings& settings)
: InboundAudioStream(numFrameSamples, numFramesCapacity, settings)
{
}
MixedProcessedAudioStream::MixedProcessedAudioStream(int numFrameSamples, int numFramesCapacity, int numStaticJitterFrames)
: InboundAudioStream(numFrameSamples, numFramesCapacity, numStaticJitterFrames) {}
void MixedProcessedAudioStream::outputFormatChanged(int outputFormatChannelCountTimesSampleRate) {
_outputFormatChannelsTimesSampleRate = outputFormatChannelCountTimesSampleRate;
@ -56,6 +55,7 @@ int MixedProcessedAudioStream::parseAudioData(PacketType type, const QByteArray&
emit processSamples(decodedBuffer, outputBuffer);
_ringBuffer.writeData(outputBuffer.data(), outputBuffer.size());
qCDebug(audiostream, "Wrote %d samples to buffer (%d available)", outputBuffer.size() / (int)sizeof(int16_t), getSamplesAvailable());
return packetAfterStreamProperties.size();
}

View file

@ -19,7 +19,7 @@ class AudioClient;
class MixedProcessedAudioStream : public InboundAudioStream {
Q_OBJECT
public:
MixedProcessedAudioStream(int numFrameSamples, int numFramesCapacity, const InboundAudioStream::Settings& settings);
MixedProcessedAudioStream(int numFrameSamples, int numFramesCapacity, int numStaticJitterFrames = -1);
signals:

View file

@ -21,11 +21,11 @@
#include <udt/PacketHeaders.h>
#include <UUID.h>
PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, const InboundAudioStream::Settings& settings) :
PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, int numStaticJitterFrames) :
InboundAudioStream(isStereo
? AudioConstants::NETWORK_FRAME_SAMPLES_STEREO
: AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL,
AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, settings),
AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, numStaticJitterFrames),
_type(type),
_position(0.0f, 0.0f, 0.0f),
_orientation(0.0f, 0.0f, 0.0f, 0.0f),
@ -36,9 +36,7 @@ PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, b
_lastPopOutputLoudness(0.0f),
_quietestTrailingFrameLoudness(std::numeric_limits<float>::max()),
_quietestFrameLoudness(0.0f),
_frameCounter(0)
{
}
_frameCounter(0) {}
void PositionalAudioStream::resetStats() {
_lastPopOutputTrailingLoudness = 0.0f;

View file

@ -27,7 +27,7 @@ public:
Injector
};
PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, const InboundAudioStream::Settings& settings);
PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, int numStaticJitterFrames = -1);
const QUuid DEFAULT_STREAM_IDENTIFIER = QUuid();
virtual const QUuid& getStreamIdentifier() const { return DEFAULT_STREAM_IDENTIFIER; }

View file

@ -665,7 +665,7 @@ Mapping::Pointer UserInputMapper::newMapping(const QString& mappingName) {
if (_mappingsByName.count(mappingName)) {
qCWarning(controllers) << "Refusing to recreate mapping named " << mappingName;
}
qDebug() << "Creating new Mapping " << mappingName;
qCDebug(controllers) << "Creating new Mapping " << mappingName;
auto mapping = std::make_shared<Mapping>(mappingName);
_mappingsByName[mappingName] = mapping;
return mapping;
@ -1121,15 +1121,15 @@ Mapping::Pointer UserInputMapper::parseMapping(const QString& json) {
QJsonDocument doc = QJsonDocument::fromJson(json.toUtf8(), &error);
// check validity of the document
if (doc.isNull()) {
qDebug() << "Invalid JSON...\n";
qDebug() << error.errorString();
qDebug() << "JSON was:\n" << json << endl;
qCDebug(controllers) << "Invalid JSON...\n";
qCDebug(controllers) << error.errorString();
qCDebug(controllers) << "JSON was:\n" << json << endl;
return Mapping::Pointer();
}
if (!doc.isObject()) {
qWarning() << "Mapping json Document is not an object" << endl;
qDebug() << "JSON was:\n" << json << endl;
qCDebug(controllers) << "JSON was:\n" << json << endl;
return Mapping::Pointer();
}
return parseMapping(doc.object());

View file

@ -88,6 +88,7 @@ bool HmdDisplayPlugin::internalActivate() {
_monoPreview = clicked;
_container->setBoolSetting("monoPreview", _monoPreview);
}, true, _monoPreview);
#if defined(Q_OS_MAC)
_disablePreview = true;
#else
@ -137,6 +138,7 @@ ivec4 HmdDisplayPlugin::getViewportForSourceSize(const uvec2& size) const {
float windowAspect = aspect(windowSize);
float sceneAspect = aspect(size);
float aspectRatio = sceneAspect / windowAspect;
uvec2 targetViewportSize = windowSize;
if (aspectRatio < 1.0f) {
targetViewportSize.x *= aspectRatio;
@ -152,6 +154,23 @@ ivec4 HmdDisplayPlugin::getViewportForSourceSize(const uvec2& size) const {
return ivec4(targetViewportPosition, targetViewportSize);
}
float HmdDisplayPlugin::getLeftCenterPixel() const {
glm::mat4 eyeProjection = _eyeProjections[Left];
glm::mat4 inverseEyeProjection = glm::inverse(eyeProjection);
vec2 eyeRenderTargetSize = { _renderTargetSize.x / 2, _renderTargetSize.y };
vec4 left = vec4(-1, 0, -1, 1);
vec4 right = vec4(1, 0, -1, 1);
vec4 right2 = inverseEyeProjection * right;
vec4 left2 = inverseEyeProjection * left;
left2 /= left2.w;
right2 /= right2.w;
float width = -left2.x + right2.x;
float leftBias = -left2.x / width;
float leftCenterPixel = eyeRenderTargetSize.x * leftBias;
return leftCenterPixel;
}
void HmdDisplayPlugin::internalPresent() {
PROFILE_RANGE_EX(__FUNCTION__, 0xff00ff00, (uint64_t)presentCount())
@ -164,16 +183,67 @@ void HmdDisplayPlugin::internalPresent() {
if (_monoPreview) {
sourceSize.x >>= 1;
}
auto viewport = getViewportForSourceSize(sourceSize);
float shiftLeftBy = getLeftCenterPixel() - (sourceSize.x / 2);
float newWidth = sourceSize.x - shiftLeftBy;
const unsigned int RATIO_Y = 9;
const unsigned int RATIO_X = 16;
glm::uvec2 originalClippedSize { newWidth, newWidth * RATIO_Y / RATIO_X };
glm::ivec4 viewport = getViewportForSourceSize(sourceSize);
glm::ivec4 scissor = viewport;
render([&](gpu::Batch& batch) {
if (_monoPreview) {
auto window = _container->getPrimaryWidget();
float devicePixelRatio = window->devicePixelRatio();
glm::vec2 windowSize = toGlm(window->size());
windowSize *= devicePixelRatio;
float windowAspect = aspect(windowSize); // example: 1920 x 1080 = 1.78
float sceneAspect = aspect(originalClippedSize); // usually: 1512 x 850 = 1.78
bool scaleToWidth = windowAspect < sceneAspect;
float ratio;
int scissorOffset;
if (scaleToWidth) {
ratio = (float)windowSize.x / (float)newWidth;
} else {
ratio = (float)windowSize.y / (float)originalClippedSize.y;
}
float scaledShiftLeftBy = shiftLeftBy * ratio;
int scissorSizeX = originalClippedSize.x * ratio;
int scissorSizeY = originalClippedSize.y * ratio;
int viewportSizeX = sourceSize.x * ratio;
int viewportSizeY = sourceSize.y * ratio;
int viewportOffset = ((int)windowSize.y - viewportSizeY) / 2;
if (scaleToWidth) {
scissorOffset = ((int)windowSize.y - scissorSizeY) / 2;
scissor = ivec4(0, scissorOffset, scissorSizeX, scissorSizeY);
viewport = ivec4(-scaledShiftLeftBy, viewportOffset, viewportSizeX, viewportSizeY);
} else {
scissorOffset = ((int)windowSize.x - scissorSizeX) / 2;
scissor = ivec4(scissorOffset, 0, scissorSizeX, scissorSizeY);
viewport = ivec4(scissorOffset - scaledShiftLeftBy, viewportOffset, viewportSizeX, viewportSizeY);
}
viewport.z *= 2;
}
batch.enableStereo(false);
batch.resetViewTransform();
batch.setFramebuffer(gpu::FramebufferPointer());
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, vec4(0));
batch.setStateScissorRect(viewport);
if (_monoPreview) {
viewport.z *= 2;
}
batch.setStateScissorRect(scissor); // was viewport
batch.setViewportTransform(viewport);
batch.setResourceTexture(0, _compositeFramebuffer->getRenderBuffer(0));
batch.setPipeline(_presentPipeline);
@ -181,7 +251,13 @@ void HmdDisplayPlugin::internalPresent() {
});
swapBuffers();
} else if (_clearPreviewFlag) {
auto image = QImage(PathUtils::resourcesPath() + "images/preview.png");
QImage image;
if (_vsyncEnabled) {
image = QImage(PathUtils::resourcesPath() + "images/preview.png");
} else {
image = QImage(PathUtils::resourcesPath() + "images/preview-disabled.png");
}
image = image.mirrored();
image = image.convertToFormat(QImage::Format_RGBA8888);
if (!_previewTexture) {
@ -197,6 +273,7 @@ void HmdDisplayPlugin::internalPresent() {
if (getGLBackend()->isTextureReady(_previewTexture)) {
auto viewport = getViewportForSourceSize(uvec2(_previewTexture->getDimensions()));
render([&](gpu::Batch& batch) {
batch.enableStereo(false);
batch.resetViewTransform();
@ -213,6 +290,17 @@ void HmdDisplayPlugin::internalPresent() {
}
}
postPreview();
// If preview is disabled, we need to check to see if the window size has changed
// and re-render the no-preview message
if (_disablePreview) {
auto window = _container->getPrimaryWidget();
glm::vec2 windowSize = toGlm(window->size());
if (windowSize != _lastWindowSize) {
_clearPreviewFlag = true;
_lastWindowSize = windowSize;
}
}
}
// HMD specific stuff
@ -277,27 +365,37 @@ void HmdDisplayPlugin::updateFrameData() {
continue;
}
const auto& laserDirection = handLaser.direction;
auto model = _presentHandPoses[i];
auto castDirection = glm::quat_cast(model) * laserDirection;
const vec3& laserDirection = handLaser.direction;
mat4 model = _presentHandPoses[i];
vec3 castStart = vec3(model[3]);
vec3 castDirection = glm::quat_cast(model) * laserDirection;
if (glm::abs(glm::length2(castDirection) - 1.0f) > EPSILON) {
castDirection = glm::normalize(castDirection);
castDirection = glm::inverse(_presentUiModelTransform.getRotation()) * castDirection;
}
// this offset needs to match GRAB_POINT_SPHERE_OFFSET in scripts/system/libraries/controllers.js
static const vec3 GRAB_POINT_SPHERE_OFFSET = vec3(0.1f, 0.04f, -0.32f);
vec3 grabPointOffset = GRAB_POINT_SPHERE_OFFSET;
if (i == 0) {
grabPointOffset.x *= -1.0f; // this changes between left and right hands
}
castStart += glm::quat_cast(model) * grabPointOffset;
// FIXME fetch the actual UI radius from... somewhere?
float uiRadius = 1.0f;
// Find the intersection of the laser with he UI and use it to scale the model matrix
float distance;
if (!glm::intersectRaySphere(vec3(_presentHandPoses[i][3]), castDirection, _presentUiModelTransform.getTranslation(), uiRadius * uiRadius, distance)) {
if (!glm::intersectRaySphere(castStart, castDirection,
_presentUiModelTransform.getTranslation(), uiRadius * uiRadius, distance)) {
continue;
}
_presentHandLaserPoints[i].first = vec3(_presentHandPoses[i][3]);
_presentHandLaserPoints[i].first = castStart;
_presentHandLaserPoints[i].second = _presentHandLaserPoints[i].first + (castDirection * distance);
vec3 intersectionPosition = vec3(_presentHandPoses[i][3]) + (castDirection * distance) - _presentUiModelTransform.getTranslation();
vec3 intersectionPosition = castStart + (castDirection * distance) - _presentUiModelTransform.getTranslation();
intersectionPosition = glm::inverse(_presentUiModelTransform.getRotation()) * intersectionPosition;
// Take the interesection normal and convert it to a texture coordinate

View file

@ -102,11 +102,13 @@ protected:
bool _disablePreview{ true };
private:
ivec4 getViewportForSourceSize(const uvec2& size) const;
float getLeftCenterPixel() const;
bool _disablePreviewItemAdded { false };
bool _monoPreview { true };
bool _clearPreviewFlag { false };
gpu::TexturePointer _previewTexture;
glm::vec2 _lastWindowSize;
struct OverlayRenderer {
gpu::Stream::FormatPointer format;

View file

@ -198,15 +198,10 @@ void RenderableWebEntityItem::render(RenderArgs* args) {
#endif
if (!_webSurface) {
#if defined(Q_OS_LINUX)
// these don't seem to work on Linux
return;
#else
if (!buildWebSurface(static_cast<EntityTreeRenderer*>(args->_renderer))) {
return;
}
_fadeStartTime = usecTimestampNow();
#endif
}
_lastRenderTime = usecTimestampNow();

View file

@ -22,6 +22,7 @@
#include <QtCore/QFileInfo>
#include <shared/NsightHelpers.h>
#include "ModelFormatLogging.h"
template<class T> int streamSize() {
return sizeof(T);
@ -356,7 +357,7 @@ FBXNode FBXReader::parseFBX(QIODevice* device) {
quint32 fileVersion;
in >> fileVersion;
position += sizeof(fileVersion);
qDebug() << "fileVersion:" << fileVersion;
qCDebug(modelformat) << "fileVersion:" << fileVersion;
bool has64BitPositions = (fileVersion >= VERSION_FBX2016);
// parse the top-level node

View file

@ -20,6 +20,8 @@
#include <QtGui/QGuiApplication>
#include <GLMHelpers.h>
#include "GLLogging.h"
#ifdef Q_OS_WIN
@ -111,7 +113,7 @@ void GLAPIENTRY debugMessageCallback(GLenum source, GLenum type, GLuint id, GLen
if (GL_DEBUG_SEVERITY_NOTIFICATION == severity) {
return;
}
qDebug() << "QQQ " << message;
qCDebug(glLogging) << "QQQ " << message;
}
// FIXME build the PFD based on the

View file

@ -65,7 +65,7 @@ class Context {
QWindow* _window { nullptr };
public:
virtual ~OffscreenContext();
virtual void create();
void create() override;
};
}

View file

@ -0,0 +1,14 @@
//
// GLLogging.cpp
// libraries/gl/src/gl/
//
// Created by Seth Alves on 2016-9-14.
// Copyright 2016 High Fidelity, Inc.
//
// Distribucted under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "GLLogging.h"
Q_LOGGING_CATEGORY(glLogging, "hifi.glLogging")

View file

@ -0,0 +1,19 @@
//
// GLLogging.h
// libraries/gl/src/gl/
//
// Created by Seth Alves on 2016-9-14.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_GLLogging_h
#define hifi_GLLogging_h
#include <QLoggingCategory>
Q_DECLARE_LOGGING_CATEGORY(glLogging)
#endif // hifi_GLLogging_h

View file

@ -12,6 +12,7 @@
#include <QtGui/QOpenGLContext>
#include "GLHelpers.h"
#include "GLLogging.h"
void GLWindow::createContext(QOpenGLContext* shareContext) {
createContext(getDefaultOpenGLSurfaceFormat(), shareContext);
@ -41,10 +42,10 @@ bool GLWindow::makeCurrent() {
Q_ASSERT(makeCurrentResult);
std::call_once(_reportOnce, []{
qDebug() << "GL Version: " << QString((const char*) glGetString(GL_VERSION));
qDebug() << "GL Shader Language Version: " << QString((const char*) glGetString(GL_SHADING_LANGUAGE_VERSION));
qDebug() << "GL Vendor: " << QString((const char*) glGetString(GL_VENDOR));
qDebug() << "GL Renderer: " << QString((const char*) glGetString(GL_RENDERER));
qCDebug(glLogging) << "GL Version: " << QString((const char*) glGetString(GL_VERSION));
qCDebug(glLogging) << "GL Shader Language Version: " << QString((const char*) glGetString(GL_SHADING_LANGUAGE_VERSION));
qCDebug(glLogging) << "GL Vendor: " << QString((const char*) glGetString(GL_VENDOR));
qCDebug(glLogging) << "GL Renderer: " << QString((const char*) glGetString(GL_RENDERER));
});
Q_ASSERT(_context == QOpenGLContext::currentContext());

View file

@ -18,6 +18,8 @@
#include <QtGui/QOpenGLContext>
#include "GLHelpers.h"
#include "GLLogging.h"
OffscreenGLCanvas::OffscreenGLCanvas() : _context(new QOpenGLContext), _offscreenSurface(new QOffscreenSurface){
}
@ -56,10 +58,10 @@ bool OffscreenGLCanvas::makeCurrent() {
Q_ASSERT(result);
std::call_once(_reportOnce, [this]{
qDebug() << "GL Version: " << QString((const char*) glGetString(GL_VERSION));
qDebug() << "GL Shader Language Version: " << QString((const char*) glGetString(GL_SHADING_LANGUAGE_VERSION));
qDebug() << "GL Vendor: " << QString((const char*) glGetString(GL_VENDOR));
qDebug() << "GL Renderer: " << QString((const char*) glGetString(GL_RENDERER));
qCDebug(glLogging) << "GL Version: " << QString((const char*) glGetString(GL_VERSION));
qCDebug(glLogging) << "GL Shader Language Version: " << QString((const char*) glGetString(GL_SHADING_LANGUAGE_VERSION));
qCDebug(glLogging) << "GL Vendor: " << QString((const char*) glGetString(GL_VENDOR));
qCDebug(glLogging) << "GL Renderer: " << QString((const char*) glGetString(GL_RENDERER));
});
return result;

View file

@ -32,6 +32,7 @@
#include "OffscreenGLCanvas.h"
#include "GLEscrow.h"
#include "GLHelpers.h"
#include "GLLogging.h"
QString fixupHifiUrl(const QString& urlString) {
@ -196,7 +197,7 @@ QEvent* OffscreenQmlRenderThread::Queue::take() {
OffscreenQmlRenderThread::OffscreenQmlRenderThread(OffscreenQmlSurface* surface, QOpenGLContext* shareContext) : _surface(surface) {
_canvas.setObjectName("OffscreenQmlRenderCanvas");
qDebug() << "Building QML Renderer";
qCDebug(glLogging) << "Building QML Renderer";
if (!_canvas.create(shareContext)) {
qWarning("Failed to create OffscreenGLCanvas");
_quit = true;
@ -223,7 +224,7 @@ OffscreenQmlRenderThread::OffscreenQmlRenderThread(OffscreenQmlSurface* surface,
}
void OffscreenQmlRenderThread::run() {
qDebug() << "Starting QML Renderer thread";
qCDebug(glLogging) << "Starting QML Renderer thread";
while (!_quit) {
QEvent* e = _queue.take();
@ -282,7 +283,7 @@ QJsonObject OffscreenQmlRenderThread::getGLContextData() {
}
void OffscreenQmlRenderThread::init() {
qDebug() << "Initializing QML Renderer";
qCDebug(glLogging) << "Initializing QML Renderer";
if (!_canvas.makeCurrent()) {
qWarning("Failed to make context current on QML Renderer Thread");
@ -341,7 +342,7 @@ void OffscreenQmlRenderThread::resize() {
return;
}
qDebug() << "Offscreen UI resizing to " << _newSize.width() << "x" << _newSize.height() << " with pixel ratio " << pixelRatio;
qCDebug(glLogging) << "Offscreen UI resizing to " << _newSize.width() << "x" << _newSize.height() << " with pixel ratio " << pixelRatio;
_size = newOffscreenSize;
}
@ -426,7 +427,7 @@ OffscreenQmlSurface::~OffscreenQmlSurface() {
QObject::disconnect(&_updateTimer);
QObject::disconnect(qApp);
qDebug() << "Stopping QML Renderer Thread " << _renderer->currentThreadId();
qCDebug(glLogging) << "Stopping QML Renderer Thread " << _renderer->currentThreadId();
_renderer->_queue.add(STOP);
if (!_renderer->wait(MAX_SHUTDOWN_WAIT_SECS * USECS_PER_SECOND)) {
qWarning() << "Failed to shut down the QML Renderer Thread";
@ -443,7 +444,7 @@ void OffscreenQmlSurface::onAboutToQuit() {
}
void OffscreenQmlSurface::create(QOpenGLContext* shareContext) {
qDebug() << "Building QML surface";
qCDebug(glLogging) << "Building QML surface";
_renderer = new OffscreenQmlRenderThread(this, shareContext);
_renderer->moveToThread(_renderer);

View file

@ -10,6 +10,7 @@
#include <set>
#include <oglplus/shapes/plane.hpp>
#include <oglplus/shapes/sky_box.hpp>
#include "GLLogging.h"
using namespace oglplus;
using namespace oglplus::shapes;
@ -190,7 +191,7 @@ public:
const int stacks_) {
//UV mapping source: http://www.mvps.org/directx/articles/spheremap.htm
if (fov >= PI) {
qDebug() << "TexturedHemisphere::buildVBO(): FOV greater or equal than Pi will create issues";
qCDebug(glLogging) << "TexturedHemisphere::buildVBO(): FOV greater or equal than Pi will create issues";
}
int gridSize = std::max(slices_, stacks_);

View file

@ -39,15 +39,19 @@ static GLBackend* INSTANCE{ nullptr };
static const char* GL_BACKEND_PROPERTY_NAME = "com.highfidelity.gl.backend";
BackendPointer GLBackend::createBackend() {
// The ATI memory info extension only exposes 'free memory' so we want to force it to
// cache the value as early as possible
getDedicatedMemory();
// FIXME provide a mechanism to override the backend for testing
// Where the gpuContext is initialized and where the TRUE Backend is created and assigned
auto version = QOpenGLContextWrapper::currentContextVersion();
std::shared_ptr<GLBackend> result;
if (!disableOpenGL45 && version >= 0x0405) {
qDebug() << "Using OpenGL 4.5 backend";
qCDebug(gpugllogging) << "Using OpenGL 4.5 backend";
result = std::make_shared<gpu::gl45::GL45Backend>();
} else {
qDebug() << "Using OpenGL 4.1 backend";
qCDebug(gpugllogging) << "Using OpenGL 4.1 backend";
result = std::make_shared<gpu::gl41::GL41Backend>();
}
result->initInput();
@ -589,7 +593,23 @@ void GLBackend::releaseQuery(GLuint id) const {
_queriesTrash.push_back(id);
}
void GLBackend::queueLambda(const std::function<void()> lambda) const {
Lock lock(_trashMutex);
_lambdaQueue.push_back(lambda);
}
void GLBackend::recycle() const {
{
std::list<std::function<void()>> lamdbasTrash;
{
Lock lock(_trashMutex);
std::swap(_lambdaQueue, lamdbasTrash);
}
for (auto lambda : lamdbasTrash) {
lambda();
}
}
{
std::vector<GLuint> ids;
std::list<std::pair<GLuint, Size>> buffersTrash;
@ -679,6 +699,10 @@ void GLBackend::recycle() const {
glDeleteQueries((GLsizei)ids.size(), ids.data());
}
}
#ifndef THREADED_TEXTURE_TRANSFER
gl::GLTexture::_textureTransferHelper->process();
#endif
}
void GLBackend::setCameraCorrection(const Mat4& correction) {

View file

@ -175,6 +175,7 @@ public:
virtual void releaseShader(GLuint id) const;
virtual void releaseProgram(GLuint id) const;
virtual void releaseQuery(GLuint id) const;
virtual void queueLambda(const std::function<void()> lambda) const;
protected:
@ -197,6 +198,7 @@ protected:
mutable std::list<GLuint> _shadersTrash;
mutable std::list<GLuint> _programsTrash;
mutable std::list<GLuint> _queriesTrash;
mutable std::list<std::function<void()>> _lambdaQueue;
void renderPassTransfer(const Batch& batch);
void renderPassDraw(const Batch& batch);
@ -365,6 +367,7 @@ protected:
typedef void (GLBackend::*CommandCall)(const Batch&, size_t);
static CommandCall _commandCalls[Batch::NUM_COMMANDS];
friend class GLState;
friend class GLTexture;
};
} }

View file

@ -165,7 +165,7 @@ void GLBackend::do_setUniformBuffer(const Batch& batch, size_t paramOffset) {
_uniform._buffers[slot] = uniformBuffer;
(void) CHECK_GL_ERROR();
} else {
releaseResourceTexture(slot);
releaseUniformBuffer(slot);
return;
}
}

View file

@ -60,6 +60,32 @@ bool checkGLErrorDebug(const char* name) {
#endif
}
gpu::Size getFreeDedicatedMemory() {
Size result { 0 };
static bool nvidiaMemorySupported { true };
static bool atiMemorySupported { true };
if (nvidiaMemorySupported) {
GLint nvGpuMemory { 0 };
glGetIntegerv(GL_GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX, &nvGpuMemory);
if (GL_NO_ERROR == glGetError()) {
result = KB_TO_BYTES(nvGpuMemory);
} else {
nvidiaMemorySupported = false;
}
} else if (atiMemorySupported) {
GLint atiGpuMemory[4];
// not really total memory, but close enough if called early enough in the application lifecycle
glGetIntegerv(GL_TEXTURE_FREE_MEMORY_ATI, atiGpuMemory);
if (GL_NO_ERROR == glGetError()) {
result = KB_TO_BYTES(atiGpuMemory[0]);
} else {
atiMemorySupported = false;
}
}
return result;
}
gpu::Size getDedicatedMemory() {
static Size dedicatedMemory { 0 };
static std::once_flag once;

View file

@ -25,6 +25,7 @@ void serverWait();
void clientWait();
gpu::Size getDedicatedMemory();
gpu::Size getFreeDedicatedMemory();
ComparisonFunction comparisonFuncFromGL(GLenum func);
State::StencilOp stencilOpFromGL(GLenum stencilOp);
State::BlendOp blendOpFromGL(GLenum blendOp);

View file

@ -12,9 +12,244 @@ using namespace gpu;
using namespace gpu::gl;
GLTexelFormat GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
GLTexelFormat texel = { GL_RGBA, GL_RGBA, GL_UNSIGNED_BYTE };
return texel;
GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
GLenum result = GL_RGBA8;
switch (dstFormat.getDimension()) {
case gpu::SCALAR: {
switch (dstFormat.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
case gpu::SRGB:
case gpu::SRGBA:
switch (dstFormat.getType()) {
case gpu::UINT32:
result = GL_R32UI;
break;
case gpu::INT32:
result = GL_R32I;
break;
case gpu::NUINT32:
result = GL_R8;
break;
case gpu::NINT32:
result = GL_R8_SNORM;
break;
case gpu::FLOAT:
result = GL_R32F;
break;
case gpu::UINT16:
result = GL_R16UI;
break;
case gpu::INT16:
result = GL_R16I;
break;
case gpu::NUINT16:
result = GL_R16;
break;
case gpu::NINT16:
result = GL_R16_SNORM;
break;
case gpu::HALF:
result = GL_R16F;
break;
case gpu::UINT8:
result = GL_R8UI;
break;
case gpu::INT8:
result = GL_R8I;
break;
case gpu::NUINT8:
if ((dstFormat.getSemantic() == gpu::SRGB || dstFormat.getSemantic() == gpu::SRGBA)) {
result = GL_SLUMINANCE8;
} else {
result = GL_R8;
}
break;
case gpu::NINT8:
result = GL_R8_SNORM;
break;
default:
Q_UNREACHABLE();
break;
}
break;
case gpu::COMPRESSED_R:
result = GL_COMPRESSED_RED_RGTC1;
break;
case gpu::R11G11B10:
// the type should be float
result = GL_R11F_G11F_B10F;
break;
case gpu::DEPTH:
result = GL_DEPTH_COMPONENT32;
switch (dstFormat.getType()) {
case gpu::UINT32:
case gpu::INT32:
case gpu::NUINT32:
case gpu::NINT32:
result = GL_DEPTH_COMPONENT32;
break;
case gpu::FLOAT:
result = GL_DEPTH_COMPONENT32F;
break;
case gpu::UINT16:
case gpu::INT16:
case gpu::NUINT16:
case gpu::NINT16:
case gpu::HALF:
result = GL_DEPTH_COMPONENT16;
break;
case gpu::UINT8:
case gpu::INT8:
case gpu::NUINT8:
case gpu::NINT8:
result = GL_DEPTH_COMPONENT24;
break;
default:
Q_UNREACHABLE();
break;
}
break;
case gpu::DEPTH_STENCIL:
result = GL_DEPTH24_STENCIL8;
break;
default:
qCDebug(gpugllogging) << "Unknown combination of texel format";
}
break;
}
case gpu::VEC2: {
switch (dstFormat.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
result = GL_RG8;
break;
default:
qCDebug(gpugllogging) << "Unknown combination of texel format";
}
break;
}
case gpu::VEC3: {
switch (dstFormat.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
result = GL_RGB8;
break;
case gpu::SRGB:
case gpu::SRGBA:
result = GL_SRGB8; // standard 2.2 gamma correction color
break;
case gpu::COMPRESSED_RGB:
result = GL_COMPRESSED_RGB;
break;
case gpu::COMPRESSED_SRGB:
result = GL_COMPRESSED_SRGB;
break;
default:
qCDebug(gpugllogging) << "Unknown combination of texel format";
}
break;
}
case gpu::VEC4: {
switch (dstFormat.getSemantic()) {
case gpu::RGB:
result = GL_RGB8;
break;
case gpu::RGBA:
switch (dstFormat.getType()) {
case gpu::UINT32:
result = GL_RGBA32UI;
break;
case gpu::INT32:
result = GL_RGBA32I;
break;
case gpu::FLOAT:
result = GL_RGBA32F;
break;
case gpu::UINT16:
result = GL_RGBA16UI;
break;
case gpu::INT16:
result = GL_RGBA16I;
break;
case gpu::NUINT16:
result = GL_RGBA16;
break;
case gpu::NINT16:
result = GL_RGBA16_SNORM;
break;
case gpu::HALF:
result = GL_RGBA16F;
break;
case gpu::UINT8:
result = GL_RGBA8UI;
break;
case gpu::INT8:
result = GL_RGBA8I;
break;
case gpu::NUINT8:
result = GL_RGBA8;
break;
case gpu::NINT8:
result = GL_RGBA8_SNORM;
break;
case gpu::NUINT32:
case gpu::NINT32:
case gpu::NUM_TYPES: // quiet compiler
Q_UNREACHABLE();
}
break;
case gpu::SRGB:
result = GL_SRGB8;
break;
case gpu::SRGBA:
result = GL_SRGB8_ALPHA8; // standard 2.2 gamma correction color
break;
case gpu::COMPRESSED_RGBA:
result = GL_COMPRESSED_RGBA;
break;
case gpu::COMPRESSED_SRGBA:
result = GL_COMPRESSED_SRGB_ALPHA;
break;
// FIXME: WE will want to support this later
/*
case gpu::COMPRESSED_BC3_RGBA:
result = GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
break;
case gpu::COMPRESSED_BC3_SRGBA:
result = GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT;
break;
case gpu::COMPRESSED_BC7_RGBA:
result = GL_COMPRESSED_RGBA_BPTC_UNORM_ARB;
break;
case gpu::COMPRESSED_BC7_SRGBA:
result = GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM;
break;
*/
default:
qCDebug(gpugllogging) << "Unknown combination of texel format";
}
break;
}
default:
qCDebug(gpugllogging) << "Unknown combination of texel format";
}
return result;
}
GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const Element& srcFormat) {

View file

@ -21,7 +21,7 @@ public:
static GLTexelFormat evalGLTexelFormat(const Element& dstFormat) {
return evalGLTexelFormat(dstFormat, dstFormat);
}
static GLTexelFormat evalGLTexelFormatInternal(const Element& dstFormat);
static GLenum evalGLTexelFormatInternal(const Element& dstFormat);
static GLTexelFormat evalGLTexelFormat(const Element& dstFormat, const Element& srcFormat);
};

View file

@ -17,11 +17,11 @@ using namespace gpu;
using namespace gpu::gl;
std::shared_ptr<GLTextureTransferHelper> GLTexture::_textureTransferHelper;
static std::map<uint16, size_t> _textureCountByMips;
static uint16 _currentMaxMipCount { 0 };
// FIXME placeholder for texture memory over-use
#define DEFAULT_MAX_MEMORY_MB 256
#define MIN_FREE_GPU_MEMORY_PERCENTAGE 0.25f
#define OVER_MEMORY_PRESSURE 2.0f
const GLenum GLTexture::CUBE_FACE_LAYOUT[6] = {
GL_TEXTURE_CUBE_MAP_POSITIVE_X, GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
@ -94,6 +94,7 @@ const std::vector<GLenum>& GLTexture::getFaceTargets(GLenum target) {
return faceTargets;
}
float GLTexture::getMemoryPressure() {
// Check for an explicit memory limit
auto availableTextureMemory = Texture::getAllowedGPUMemoryUsage();
@ -102,15 +103,28 @@ float GLTexture::getMemoryPressure() {
if (!availableTextureMemory) {
auto totalGpuMemory = getDedicatedMemory();
// If no limit has been explicitly set, and the dedicated memory can't be determined,
// just use a fallback fixed value of 256 MB
if (!totalGpuMemory) {
// If we can't query the dedicated memory just use a fallback fixed value of 256 MB
totalGpuMemory = MB_TO_BYTES(DEFAULT_MAX_MEMORY_MB);
} else {
// Check the global free GPU memory
auto freeGpuMemory = getFreeDedicatedMemory();
if (freeGpuMemory) {
static gpu::Size lastFreeGpuMemory = 0;
auto freePercentage = (float)freeGpuMemory / (float)totalGpuMemory;
if (freeGpuMemory != lastFreeGpuMemory) {
lastFreeGpuMemory = freeGpuMemory;
if (freePercentage < MIN_FREE_GPU_MEMORY_PERCENTAGE) {
qDebug() << "Exceeded max GPU memory";
return OVER_MEMORY_PRESSURE;
}
}
}
}
// Allow 75% of all available GPU memory to be consumed by textures
// Allow 50% of all available GPU memory to be consumed by textures
// FIXME overly conservative?
availableTextureMemory = (totalGpuMemory >> 2) * 3;
availableTextureMemory = (totalGpuMemory >> 1);
}
// Return the consumed texture memory divided by the available texture memory.
@ -118,80 +132,27 @@ float GLTexture::getMemoryPressure() {
return (float)consumedGpuMemory / (float)availableTextureMemory;
}
GLTexture::DownsampleSource::DownsampleSource(const std::weak_ptr<GLBackend>& backend, GLTexture* oldTexture) :
_backend(backend),
_size(oldTexture ? oldTexture->_size : 0),
_texture(oldTexture ? oldTexture->takeOwnership() : 0),
_minMip(oldTexture ? oldTexture->_minMip : 0),
_maxMip(oldTexture ? oldTexture->_maxMip : 0)
{
}
GLTexture::DownsampleSource::~DownsampleSource() {
if (_texture) {
auto backend = _backend.lock();
if (backend) {
backend->releaseTexture(_texture, _size);
}
}
}
GLTexture::GLTexture(const std::weak_ptr<GLBackend>& backend, const gpu::Texture& texture, GLuint id, GLTexture* originalTexture, bool transferrable) :
GLObject(backend, texture, id),
_storageStamp(texture.getStamp()),
_target(getGLTextureType(texture)),
_maxMip(texture.maxMip()),
_minMip(texture.minMip()),
_virtualSize(texture.evalTotalSize()),
_transferrable(transferrable),
_downsampleSource(backend, originalTexture)
{
if (_transferrable) {
uint16 mipCount = usedMipLevels();
_currentMaxMipCount = std::max(_currentMaxMipCount, mipCount);
if (!_textureCountByMips.count(mipCount)) {
_textureCountByMips[mipCount] = 1;
} else {
++_textureCountByMips[mipCount];
}
}
Backend::incrementTextureGPUCount();
Backend::updateTextureGPUVirtualMemoryUsage(0, _virtualSize);
}
// Create the texture and allocate storage
GLTexture::GLTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLuint id, bool transferrable) :
GLTexture(backend, texture, id, nullptr, transferrable)
GLObject(backend, texture, id),
_source(texture.source()),
_storageStamp(texture.getStamp()),
_target(getGLTextureType(texture)),
_internalFormat(gl::GLTexelFormat::evalGLTexelFormatInternal(texture.getTexelFormat())),
_maxMip(texture.maxMip()),
_minMip(texture.minMip()),
_virtualSize(texture.evalTotalSize()),
_transferrable(transferrable)
{
// FIXME, do during allocation
//Backend::updateTextureGPUMemoryUsage(0, _size);
Backend::setGPUObject(texture, this);
}
// Create the texture and copy from the original higher resolution version
GLTexture::GLTexture(const std::weak_ptr<GLBackend>& backend, const gpu::Texture& texture, GLuint id, GLTexture* originalTexture) :
GLTexture(backend, texture, id, originalTexture, originalTexture->_transferrable)
{
Q_ASSERT(_minMip >= originalTexture->_minMip);
// Set the GPU object last because that implicitly destroys the originalTexture object
auto strongBackend = _backend.lock();
strongBackend->recycle();
Backend::incrementTextureGPUCount();
Backend::updateTextureGPUVirtualMemoryUsage(0, _virtualSize);
Backend::setGPUObject(texture, this);
}
GLTexture::~GLTexture() {
if (_transferrable) {
uint16 mipCount = usedMipLevels();
Q_ASSERT(_textureCountByMips.count(mipCount));
auto& numTexturesForMipCount = _textureCountByMips[mipCount];
--numTexturesForMipCount;
if (0 == numTexturesForMipCount) {
_textureCountByMips.erase(mipCount);
if (mipCount == _currentMaxMipCount) {
_currentMaxMipCount = (_textureCountByMips.empty() ? 0 : _textureCountByMips.rbegin()->first);
}
}
}
if (_id) {
auto backend = _backend.lock();
if (backend) {
@ -210,6 +171,28 @@ void GLTexture::createTexture() {
});
}
void GLTexture::withPreservedTexture(std::function<void()> f) const {
GLint boundTex = -1;
switch (_target) {
case GL_TEXTURE_2D:
glGetIntegerv(GL_TEXTURE_BINDING_2D, &boundTex);
break;
case GL_TEXTURE_CUBE_MAP:
glGetIntegerv(GL_TEXTURE_BINDING_CUBE_MAP, &boundTex);
break;
default:
qFatal("Unsupported texture type");
}
(void)CHECK_GL_ERROR();
glBindTexture(_target, _texture);
f();
glBindTexture(_target, boundTex);
(void)CHECK_GL_ERROR();
}
void GLTexture::setSize(GLuint size) const {
Backend::updateTextureGPUMemoryUsage(_size, size);
const_cast<GLuint&>(_size) = size;
@ -223,20 +206,6 @@ bool GLTexture::isOutdated() const {
return GLSyncState::Idle == _syncState && _contentStamp < _gpuObject.getDataStamp();
}
bool GLTexture::isOverMaxMemory() const {
// FIXME switch to using the max mip count used from the previous frame
if (usedMipLevels() < _currentMaxMipCount) {
return false;
}
Q_ASSERT(usedMipLevels() == _currentMaxMipCount);
if (getMemoryPressure() < 1.0f) {
return false;
}
return true;
}
bool GLTexture::isReady() const {
// If we have an invalid texture, we're never ready
if (isInvalid()) {
@ -257,11 +226,6 @@ void GLTexture::postTransfer() {
setSyncState(GLSyncState::Idle);
++_transferCount;
//// The public gltexture becaomes available
//_id = _privateTexture;
_downsampleSource.reset();
// At this point the mip pixels have been loaded, we can notify the gpu texture to abandon it's memory
switch (_gpuObject.getType()) {
case Texture::TEX_2D:

View file

@ -73,14 +73,7 @@ public:
return nullptr;
}
// Do we need to reduce texture memory usage?
if (object->isOverMaxMemory() && texturePointer->incremementMinMip()) {
// WARNING, this code path will essentially `delete this`,
// so no dereferencing of this instance should be done past this point
object = new GLTextureType(backend.shared_from_this(), texture, object);
_textureTransferHelper->transferTexture(texturePointer);
return nullptr;
}
((GLTexture*)object)->updateMips();
return object;
}
@ -96,57 +89,38 @@ public:
} else {
object = Backend::getGPUObject<GLTextureType>(*texture);
}
if (!object) {
return 0;
}
GLuint result = object->_id;
if (!shouldSync) {
return object->_id;
}
// Don't return textures that are in transfer state
if (shouldSync) {
if ((object->getSyncState() != GLSyncState::Idle) ||
// Don't return transferrable textures that have never completed transfer
(!object->_transferrable || 0 != object->_transferCount)) {
// Will be either 0 or the original texture being downsampled.
result = object->_downsampleSource._texture;
}
if ((object->getSyncState() != GLSyncState::Idle) ||
// Don't return transferrable textures that have never completed transfer
(!object->_transferrable || 0 != object->_transferCount)) {
return 0;
}
return result;
}
// Used by derived classes and helpers to ensure the actual GL object exceeds the lifetime of `this`
GLuint takeOwnership() {
GLuint result = _id;
const_cast<GLuint&>(_id) = 0;
return result;
return object->_id;
}
~GLTexture();
const GLuint& _texture { _id };
const std::string _source;
const Stamp _storageStamp;
const GLenum _target;
const GLenum _internalFormat;
const uint16 _maxMip;
const uint16 _minMip;
uint16 _minMip;
const GLuint _virtualSize; // theoretical size as expected
Stamp _contentStamp { 0 };
const bool _transferrable;
Size _transferCount { 0 };
struct DownsampleSource {
using Pointer = std::shared_ptr<DownsampleSource>;
DownsampleSource(const std::weak_ptr<gl::GLBackend>& backend) : _backend(backend), _size(0), _texture(0), _minMip(0), _maxMip(0) {}
DownsampleSource(const std::weak_ptr<gl::GLBackend>& backend, GLTexture* originalTexture);
~DownsampleSource();
void reset() const { const_cast<GLuint&>(_texture) = 0; }
const std::weak_ptr<gl::GLBackend>& _backend;
const GLuint _size { 0 };
const GLuint _texture { 0 };
const uint16 _minMip { 0 };
const uint16 _maxMip { 0 };
} _downsampleSource;
GLuint size() const { return _size; }
GLSyncState getSyncState() const { return _syncState; }
@ -160,9 +134,7 @@ public:
bool isReady() const;
// Execute any post-move operations that must occur only on the main thread
void postTransfer();
bool isOverMaxMemory() const;
virtual void postTransfer();
uint16 usedMipLevels() const { return (_maxMip - _minMip) + 1; }
@ -170,33 +142,34 @@ public:
static const GLenum CUBE_FACE_LAYOUT[6];
static const GLFilterMode FILTER_MODES[Sampler::NUM_FILTERS];
static const GLenum WRAP_MODES[Sampler::NUM_WRAP_MODES];
protected:
static const std::vector<GLenum>& getFaceTargets(GLenum textureType);
static GLenum getGLTextureType(const Texture& texture);
// Return a floating point value indicating how much of the allowed
// texture memory we are currently consuming. A value of 0 indicates
// no texture memory usage, while a value of 1 indicates all available / allowed memory
// is consumed. A value above 1 indicates that there is a problem.
static float getMemoryPressure();
protected:
static const std::vector<GLenum>& getFaceTargets(GLenum textureType);
static GLenum getGLTextureType(const Texture& texture);
const GLuint _size { 0 }; // true size as reported by the gl api
std::atomic<GLSyncState> _syncState { GLSyncState::Idle };
GLTexture(const std::weak_ptr<gl::GLBackend>& backend, const Texture& texture, GLuint id, bool transferrable);
GLTexture(const std::weak_ptr<gl::GLBackend>& backend, const Texture& texture, GLuint id, GLTexture* originalTexture);
void setSyncState(GLSyncState syncState) { _syncState = syncState; }
void createTexture();
virtual void updateMips() {}
virtual void allocateStorage() const = 0;
virtual void updateSize() const = 0;
virtual void syncSampler() const = 0;
virtual void generateMips() const = 0;
virtual void withPreservedTexture(std::function<void()> f) const = 0;
virtual void withPreservedTexture(std::function<void()> f) const;
protected:
void setSize(GLuint size) const;
@ -207,9 +180,6 @@ protected:
virtual void finishTransfer();
private:
GLTexture(const std::weak_ptr<GLBackend>& backend, const gpu::Texture& gpuTexture, GLuint id, GLTexture* originalTexture, bool transferrable);
friend class GLTextureTransferHelper;
friend class GLBackend;
};

View file

@ -35,6 +35,8 @@ GLTextureTransferHelper::GLTextureTransferHelper() {
initialize(true, QThread::LowPriority);
// Clean shutdown on UNIX, otherwise _canvas is freed early
connect(qApp, &QCoreApplication::aboutToQuit, [&] { terminate(); });
#else
initialize(false, QThread::LowPriority);
#endif
}
@ -43,23 +45,18 @@ GLTextureTransferHelper::~GLTextureTransferHelper() {
if (isStillRunning()) {
terminate();
}
#else
terminate();
#endif
}
void GLTextureTransferHelper::transferTexture(const gpu::TexturePointer& texturePointer) {
GLTexture* object = Backend::getGPUObject<GLTexture>(*texturePointer);
#ifdef THREADED_TEXTURE_TRANSFER
Backend::incrementTextureGPUTransferCount();
object->setSyncState(GLSyncState::Pending);
Lock lock(_mutex);
_pendingTextures.push_back(texturePointer);
#else
for (object->startTransfer(); object->continueTransfer(); ) { }
object->finishTransfer();
object->_contentStamp = texturePointer->getDataStamp();
object->setSyncState(GLSyncState::Transferred);
#endif
}
void GLTextureTransferHelper::setup() {
@ -100,13 +97,28 @@ void GLTextureTransferHelper::shutdown() {
#endif
}
void GLTextureTransferHelper::queueExecution(VoidLambda lambda) {
Lock lock(_mutex);
_pendingCommands.push_back(lambda);
}
#define MAX_TRANSFERS_PER_PASS 2
bool GLTextureTransferHelper::process() {
#ifdef THREADED_TEXTURE_TRANSFER
// Take any new textures off the queue
// Take any new textures or commands off the queue
VoidLambdaList pendingCommands;
TextureList newTransferTextures;
{
Lock lock(_mutex);
newTransferTextures.swap(_pendingTextures);
pendingCommands.swap(_pendingCommands);
}
if (!pendingCommands.empty()) {
for (auto command : pendingCommands) {
command();
}
glFlush();
}
if (!newTransferTextures.empty()) {
@ -119,11 +131,16 @@ bool GLTextureTransferHelper::process() {
_transferringTextures.push_back(texturePointer);
_textureIterator = _transferringTextures.begin();
}
_transferringTextures.sort([](const gpu::TexturePointer& a, const gpu::TexturePointer& b)->bool {
return a->getSize() < b->getSize();
});
}
// No transfers in progress, sleep
if (_transferringTextures.empty()) {
#ifdef THREADED_TEXTURE_TRANSFER
QThread::usleep(1);
#endif
return true;
}
@ -135,7 +152,11 @@ bool GLTextureTransferHelper::process() {
qDebug() << "Texture list " << _transferringTextures.size();
}
for (auto _textureIterator = _transferringTextures.begin(); _textureIterator != _transferringTextures.end();) {
size_t transferCount = 0;
for (_textureIterator = _transferringTextures.begin(); _textureIterator != _transferringTextures.end();) {
if (++transferCount > MAX_TRANSFERS_PER_PASS) {
break;
}
auto texture = *_textureIterator;
GLTexture* gltexture = Backend::getGPUObject<GLTexture>(*texture);
if (gltexture->continueTransfer()) {
@ -144,9 +165,9 @@ bool GLTextureTransferHelper::process() {
}
gltexture->finishTransfer();
glNamedFramebufferTexture(_readFramebuffer, GL_COLOR_ATTACHMENT0, gltexture->_id, 0);
glBlitNamedFramebuffer(_readFramebuffer, _drawFramebuffer, 0, 0, 1, 1, 0, 0, 1, 1, GL_COLOR_BUFFER_BIT, GL_NEAREST);
#ifdef THREADED_TEXTURE_TRANSFER
clientWait();
#endif
gltexture->_contentStamp = gltexture->_gpuObject.getDataStamp();
gltexture->updateSize();
gltexture->setSyncState(gpu::gl::GLSyncState::Transferred);
@ -159,6 +180,7 @@ bool GLTextureTransferHelper::process() {
_textureIterator = _transferringTextures.erase(_textureIterator);
}
#ifdef THREADED_TEXTURE_TRANSFER
if (!_transferringTextures.empty()) {
// Don't saturate the GPU
clientWait();
@ -166,8 +188,7 @@ bool GLTextureTransferHelper::process() {
// Don't saturate the CPU
QThread::msleep(1);
}
#else
QThread::msleep(1);
#endif
return true;
}

View file

@ -28,12 +28,14 @@ using TextureListIterator = TextureList::iterator;
class GLTextureTransferHelper : public GenericThread {
public:
using VoidLambda = std::function<void()>;
using VoidLambdaList = std::list<VoidLambda>;
using Pointer = std::shared_ptr<GLTextureTransferHelper>;
GLTextureTransferHelper();
~GLTextureTransferHelper();
void transferTexture(const gpu::TexturePointer& texturePointer);
void queueExecution(VoidLambda lambda);
protected:
void setup() override;
void shutdown() override;
bool process() override;
@ -41,8 +43,15 @@ protected:
private:
#ifdef THREADED_TEXTURE_TRANSFER
::gl::OffscreenContext _context;
// Framebuffers / renderbuffers for forcing access to the texture on the transfer thread
GLuint _drawRenderbuffer { 0 };
GLuint _drawFramebuffer { 0 };
GLuint _readFramebuffer { 0 };
#endif
// A mutex for protecting items access on the render and transfer threads
Mutex _mutex;
// Commands that have been submitted for execution on the texture transfer thread
VoidLambdaList _pendingCommands;
// Textures that have been submitted for transfer
TextureList _pendingTextures;
// Textures currently in the transfer process
@ -50,11 +59,6 @@ private:
TextureList _transferringTextures;
TextureListIterator _textureIterator;
// Framebuffers / renderbuffers for forcing access to the texture on the transfer thread
GLuint _drawRenderbuffer { 0 };
GLuint _drawFramebuffer { 0 };
GLuint _readFramebuffer { 0 };
#endif
};
} }

View file

@ -43,7 +43,6 @@ public:
GLuint allocate();
public:
GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& buffer, bool transferrable);
GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& buffer, GL41Texture* original);
protected:
void transferMip(uint16_t mipLevel, uint8_t face) const;
@ -52,7 +51,6 @@ public:
void updateSize() const override;
void syncSampler() const override;
void generateMips() const override;
void withPreservedTexture(std::function<void()> f) const override;
};

View file

@ -40,30 +40,6 @@ GLTexture* GL41Backend::syncGPUObject(const TexturePointer& texture, bool transf
GL41Texture::GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, bool transferrable) : GLTexture(backend, texture, allocate(), transferrable) {}
GL41Texture::GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GL41Texture* original) : GLTexture(backend, texture, allocate(), original) {}
void GL41Texture::withPreservedTexture(std::function<void()> f) const {
GLint boundTex = -1;
switch (_target) {
case GL_TEXTURE_2D:
glGetIntegerv(GL_TEXTURE_BINDING_2D, &boundTex);
break;
case GL_TEXTURE_CUBE_MAP:
glGetIntegerv(GL_TEXTURE_BINDING_CUBE_MAP, &boundTex);
break;
default:
qFatal("Unsupported texture type");
}
(void)CHECK_GL_ERROR();
glBindTexture(_target, _texture);
f();
glBindTexture(_target, boundTex);
(void)CHECK_GL_ERROR();
}
void GL41Texture::generateMips() const {
withPreservedTexture([&] {
glGenerateMipmap(_target);
@ -147,35 +123,12 @@ void GL41Texture::startTransfer() {
glBindTexture(_target, _id);
(void)CHECK_GL_ERROR();
if (_downsampleSource._texture) {
GLuint fbo { 0 };
glGenFramebuffers(1, &fbo);
(void)CHECK_GL_ERROR();
glBindFramebuffer(GL_READ_FRAMEBUFFER, fbo);
(void)CHECK_GL_ERROR();
// Find the distance between the old min mip and the new one
uint16 mipOffset = _minMip - _downsampleSource._minMip;
for (uint16 i = _minMip; i <= _maxMip; ++i) {
uint16 targetMip = i - _minMip;
uint16 sourceMip = targetMip + mipOffset;
Vec3u dimensions = _gpuObject.evalMipDimensions(i);
for (GLenum target : getFaceTargets(_target)) {
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, target, _downsampleSource._texture, sourceMip);
(void)CHECK_GL_ERROR();
glCopyTexSubImage2D(target, targetMip, 0, 0, 0, 0, dimensions.x, dimensions.y);
(void)CHECK_GL_ERROR();
}
}
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
glDeleteFramebuffers(1, &fbo);
} else {
// transfer pixels from each faces
uint8_t numFaces = (Texture::TEX_CUBE == _gpuObject.getType()) ? CUBE_NUM_FACES : 1;
for (uint8_t f = 0; f < numFaces; f++) {
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
if (_gpuObject.isStoredMipFaceAvailable(i, f)) {
transferMip(i, f);
}
// transfer pixels from each faces
uint8_t numFaces = (Texture::TEX_CUBE == _gpuObject.getType()) ? CUBE_NUM_FACES : 1;
for (uint8_t f = 0; f < numFaces; f++) {
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
if (_gpuObject.isStoredMipFaceAvailable(i, f)) {
transferMip(i, f);
}
}
}

View file

@ -147,3 +147,8 @@ void GL45Backend::do_multiDrawIndexedIndirect(const Batch& batch, size_t paramOf
_stats._DSNumAPIDrawcalls++;
(void)CHECK_GL_ERROR();
}
void GL45Backend::recycle() const {
Parent::recycle();
derezTextures();
}

View file

@ -18,29 +18,6 @@ namespace gpu { namespace gl45 {
using namespace gpu::gl;
struct TransferState {
GLTexture& _texture;
GLenum _internalFormat { GL_RGBA8 };
GLTexelFormat _texelFormat;
uint8_t _face { 0 };
uint16_t _mipLevel { 0 };
uint32_t _bytesPerLine { 0 };
uint32_t _bytesPerPixel { 0 };
uint32_t _bytesPerPage { 0 };
GLuint _maxSparseLevel { 0 };
uvec3 _mipDimensions;
uvec3 _mipOffset;
uvec3 _pageSize;
const uint8_t* _srcPointer { nullptr };
uvec3 currentPageSize() const;
void updateSparse();
void updateMip();
void populatePage(std::vector<uint8_t>& dest);
bool increment();
TransferState(GLTexture& texture);
};
class GL45Backend : public GLBackend {
using Parent = GLBackend;
// Context Backend static interface required
@ -53,14 +30,54 @@ public:
class GL45Texture : public GLTexture {
using Parent = GLTexture;
static GLuint allocate(const Texture& texture);
static const uint32_t DEFAULT_PAGE_DIMENSION = 128;
static const uint32_t DEFAULT_MAX_SPARSE_LEVEL = 0xFFFF;
public:
GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, bool transferrable);
GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLTexture* original);
~GL45Texture();
void postTransfer() override;
struct SparseInfo {
SparseInfo(GL45Texture& texture);
void maybeMakeSparse();
void update();
uvec3 getPageCounts(const uvec3& dimensions) const;
uint32_t getPageCount(const uvec3& dimensions) const;
GL45Texture& texture;
bool sparse { false };
uvec3 pageDimensions { DEFAULT_PAGE_DIMENSION };
GLuint maxSparseLevel { DEFAULT_MAX_SPARSE_LEVEL };
uint32_t maxPages { 0 };
uint32_t pageBytes { 0 };
GLint pageDimensionsIndex { 0 };
};
struct TransferState {
TransferState(GL45Texture& texture);
uvec3 currentPageSize() const;
void updateMip();
void populatePage(std::vector<uint8_t>& dest);
bool increment();
GL45Texture& texture;
GLTexelFormat texelFormat;
uint8_t face { 0 };
uint16_t mipLevel { 0 };
uint32_t bytesPerLine { 0 };
uint32_t bytesPerPixel { 0 };
uint32_t bytesPerPage { 0 };
uvec3 mipDimensions;
uvec3 mipOffset;
const uint8_t* srcPointer { nullptr };
};
protected:
void updateMips() override;
void stripToMip(uint16_t newMinMip);
void startTransfer() override;
bool continueTransfer() override;
void finishTransfer() override;
void incrementalTransfer(const uvec3& size, const gpu::Texture::PixelsPointer& mip, std::function<void(const ivec3& offset, const uvec3& size)> f) const;
void transferMip(uint16_t mipLevel, uint8_t face = 0) const;
void allocateMip(uint16_t mipLevel, uint8_t face = 0) const;
@ -69,12 +86,20 @@ public:
void syncSampler() const override;
void generateMips() const override;
void withPreservedTexture(std::function<void()> f) const override;
void derez();
SparseInfo _sparseInfo;
TransferState _transferState;
uint32_t _allocatedPages { 0 };
uint32_t _lastMipAllocatedPages { 0 };
friend class GL45Backend;
};
protected:
void recycle() const override;
void derezTextures() const;
GLuint getFramebufferID(const FramebufferPointer& framebuffer) override;
GLFramebuffer* syncGPUObject(const Framebuffer& framebuffer) override;

View file

@ -17,6 +17,7 @@
#include <glm/gtx/component_wise.hpp>
#include <QtCore/QThread>
#include <QtCore/QProcessEnvironment>
#include "../gl/GLTexelFormat.h"
@ -24,79 +25,192 @@ using namespace gpu;
using namespace gpu::gl;
using namespace gpu::gl45;
#define SPARSE_TEXTURES 0
#ifdef Q_OS_WIN
static const QString DEBUG_FLAG("HIFI_DISABLE_SPARSE_TEXTURES");
static bool enableSparseTextures = !QProcessEnvironment::systemEnvironment().contains(DEBUG_FLAG);
#else
static bool enableSparseTextures = false;
#endif
// Allocate 1 MB of buffer space for paged transfers
#define DEFAULT_PAGE_BUFFER_SIZE (1024*1024)
#define DEFAULT_GL_PIXEL_ALIGNMENT 4
using GL45Texture = GL45Backend::GL45Texture;
static std::map<uint16_t, std::unordered_set<GL45Texture*>> texturesByMipCounts;
static Mutex texturesByMipCountsMutex;
using TextureTypeFormat = std::pair<GLenum, GLenum>;
std::map<TextureTypeFormat, std::vector<uvec3>> sparsePageDimensionsByFormat;
Mutex sparsePageDimensionsByFormatMutex;
static std::vector<uvec3> getPageDimensionsForFormat(const TextureTypeFormat& typeFormat) {
{
Lock lock(sparsePageDimensionsByFormatMutex);
if (sparsePageDimensionsByFormat.count(typeFormat)) {
return sparsePageDimensionsByFormat[typeFormat];
}
}
GLint count = 0;
glGetInternalformativ(typeFormat.first, typeFormat.second, GL_NUM_VIRTUAL_PAGE_SIZES_ARB, 1, &count);
std::vector<uvec3> result;
if (count > 0) {
std::vector<GLint> x, y, z;
x.resize(count);
glGetInternalformativ(typeFormat.first, typeFormat.second, GL_VIRTUAL_PAGE_SIZE_X_ARB, 1, &x[0]);
y.resize(count);
glGetInternalformativ(typeFormat.first, typeFormat.second, GL_VIRTUAL_PAGE_SIZE_Y_ARB, 1, &y[0]);
z.resize(count);
glGetInternalformativ(typeFormat.first, typeFormat.second, GL_VIRTUAL_PAGE_SIZE_Z_ARB, 1, &z[0]);
result.resize(count);
for (GLint i = 0; i < count; ++i) {
result[i] = uvec3(x[i], y[i], z[i]);
}
qCDebug(gpugl45logging) << "Got " << count << " page sizes";
}
{
Lock lock(sparsePageDimensionsByFormatMutex);
if (0 == sparsePageDimensionsByFormat.count(typeFormat)) {
sparsePageDimensionsByFormat[typeFormat] = result;
}
}
return result;
}
static std::vector<uvec3> getPageDimensionsForFormat(GLenum target, GLenum format) {
return getPageDimensionsForFormat({ target, format });
}
GLTexture* GL45Backend::syncGPUObject(const TexturePointer& texture, bool transfer) {
return GL45Texture::sync<GL45Texture>(*this, texture, transfer);
}
TransferState::TransferState(GLTexture& texture) : _texture(texture) {
using SparseInfo = GL45Backend::GL45Texture::SparseInfo;
SparseInfo::SparseInfo(GL45Texture& texture)
: texture(texture) {
}
void TransferState::updateSparse() {
glGetTextureParameterIuiv(_texture._id, GL_NUM_SPARSE_LEVELS_ARB, &_maxSparseLevel);
_internalFormat = gl::GLTexelFormat::evalGLTexelFormat(_texture._gpuObject.getTexelFormat(), _texture._gpuObject.getTexelFormat()).internalFormat;
ivec3 pageSize;
glGetInternalformativ(_texture._target, _internalFormat, GL_VIRTUAL_PAGE_SIZE_X_ARB, 1, &pageSize.x);
glGetInternalformativ(_texture._target, _internalFormat, GL_VIRTUAL_PAGE_SIZE_Y_ARB, 1, &pageSize.y);
glGetInternalformativ(_texture._target, _internalFormat, GL_VIRTUAL_PAGE_SIZE_Z_ARB, 1, &pageSize.z);
_pageSize = uvec3(pageSize);
}
void TransferState::updateMip() {
_mipDimensions = _texture._gpuObject.evalMipDimensions(_mipLevel);
_mipOffset = uvec3();
if (!_texture._gpuObject.isStoredMipFaceAvailable(_mipLevel, _face)) {
_srcPointer = nullptr;
void SparseInfo::maybeMakeSparse() {
// Don't enable sparse for objects with explicitly managed mip levels
if (!texture._gpuObject.isAutogenerateMips()) {
qCDebug(gpugl45logging) << "Don't enable sparse texture for explicitly generated mipmaps on texture " << texture._source.c_str();
return;
}
auto mip = _texture._gpuObject.accessStoredMipFace(_mipLevel, _face);
_texelFormat = gl::GLTexelFormat::evalGLTexelFormat(_texture._gpuObject.getTexelFormat(), mip->getFormat());
_srcPointer = mip->readData();
_bytesPerLine = (uint32_t)mip->getSize() / _mipDimensions.y;
_bytesPerPixel = _bytesPerLine / _mipDimensions.x;
const uvec3 dimensions = texture._gpuObject.getDimensions();
auto allowedPageDimensions = getPageDimensionsForFormat(texture._target, texture._internalFormat);
// In order to enable sparse the texture size must be an integer multiple of the page size
for (size_t i = 0; i < allowedPageDimensions.size(); ++i) {
pageDimensionsIndex = (uint32_t) i;
pageDimensions = allowedPageDimensions[i];
// Is this texture an integer multiple of page dimensions?
if (uvec3(0) == (dimensions % pageDimensions)) {
qCDebug(gpugl45logging) << "Enabling sparse for texture " << texture._source.c_str();
sparse = true;
break;
}
}
if (sparse) {
glTextureParameteri(texture._id, GL_TEXTURE_SPARSE_ARB, GL_TRUE);
glTextureParameteri(texture._id, GL_VIRTUAL_PAGE_SIZE_INDEX_ARB, pageDimensionsIndex);
} else {
qCDebug(gpugl45logging) << "Size " << dimensions.x << " x " << dimensions.y <<
" is not supported by any sparse page size for texture" << texture._source.c_str();
}
}
// This can only be called after we've established our storage size
void SparseInfo::update() {
if (!sparse) {
return;
}
glGetTextureParameterIuiv(texture._id, GL_NUM_SPARSE_LEVELS_ARB, &maxSparseLevel);
pageBytes = texture._gpuObject.getTexelFormat().getSize();
pageBytes *= pageDimensions.x * pageDimensions.y * pageDimensions.z;
for (uint16_t mipLevel = 0; mipLevel <= maxSparseLevel; ++mipLevel) {
auto mipDimensions = texture._gpuObject.evalMipDimensions(mipLevel);
auto mipPageCount = getPageCount(mipDimensions);
maxPages += mipPageCount;
}
if (texture._target == GL_TEXTURE_CUBE_MAP) {
maxPages *= GLTexture::CUBE_NUM_FACES;
}
}
uvec3 SparseInfo::getPageCounts(const uvec3& dimensions) const {
auto result = (dimensions / pageDimensions) +
glm::clamp(dimensions % pageDimensions, glm::uvec3(0), glm::uvec3(1));
return result;
}
uint32_t SparseInfo::getPageCount(const uvec3& dimensions) const {
auto pageCounts = getPageCounts(dimensions);
return pageCounts.x * pageCounts.y * pageCounts.z;
}
using TransferState = GL45Backend::GL45Texture::TransferState;
TransferState::TransferState(GL45Texture& texture) : texture(texture) {
}
void TransferState::updateMip() {
mipDimensions = texture._gpuObject.evalMipDimensions(mipLevel);
mipOffset = uvec3();
if (!texture._gpuObject.isStoredMipFaceAvailable(mipLevel, face)) {
srcPointer = nullptr;
return;
}
auto mip = texture._gpuObject.accessStoredMipFace(mipLevel, face);
texelFormat = gl::GLTexelFormat::evalGLTexelFormat(texture._gpuObject.getTexelFormat(), mip->getFormat());
srcPointer = mip->readData();
bytesPerLine = (uint32_t)mip->getSize() / mipDimensions.y;
bytesPerPixel = bytesPerLine / mipDimensions.x;
}
bool TransferState::increment() {
if ((_mipOffset.x + _pageSize.x) < _mipDimensions.x) {
_mipOffset.x += _pageSize.x;
const SparseInfo& sparse = texture._sparseInfo;
if ((mipOffset.x + sparse.pageDimensions.x) < mipDimensions.x) {
mipOffset.x += sparse.pageDimensions.x;
return true;
}
if ((_mipOffset.y + _pageSize.y) < _mipDimensions.y) {
_mipOffset.x = 0;
_mipOffset.y += _pageSize.y;
if ((mipOffset.y + sparse.pageDimensions.y) < mipDimensions.y) {
mipOffset.x = 0;
mipOffset.y += sparse.pageDimensions.y;
return true;
}
if (_mipOffset.z + _pageSize.z < _mipDimensions.z) {
_mipOffset.x = 0;
_mipOffset.y = 0;
++_mipOffset.z;
if (mipOffset.z + sparse.pageDimensions.z < mipDimensions.z) {
mipOffset.x = 0;
mipOffset.y = 0;
++mipOffset.z;
return true;
}
// Done with this mip?, move on to the next mip
if (_mipLevel + 1 < _texture.usedMipLevels()) {
_mipOffset = uvec3(0);
++_mipLevel;
if (mipLevel + 1 < texture.usedMipLevels()) {
mipOffset = uvec3(0);
++mipLevel;
updateMip();
return true;
}
uint8_t maxFace = (uint8_t)((_texture._target == GL_TEXTURE_CUBE_MAP) ? GLTexture::CUBE_NUM_FACES : 1);
uint8_t nextFace = _face + 1;
uint8_t maxFace = (uint8_t)((texture._target == GL_TEXTURE_CUBE_MAP) ? GLTexture::CUBE_NUM_FACES : 1);
uint8_t nextFace = face + 1;
// Done with this face? Move on to the next
if (nextFace < maxFace) {
++_face;
_mipOffset = uvec3(0);
_mipLevel = 0;
++face;
mipOffset = uvec3(0);
mipLevel = 0;
updateMip();
return true;
}
@ -104,10 +218,9 @@ bool TransferState::increment() {
return false;
}
#define DEFAULT_GL_PIXEL_ALIGNMENT 4
void TransferState::populatePage(std::vector<uint8_t>& buffer) {
uvec3 pageSize = currentPageSize();
auto bytesPerPageLine = _bytesPerPixel * pageSize.x;
auto bytesPerPageLine = bytesPerPixel * pageSize.x;
if (0 != (bytesPerPageLine % DEFAULT_GL_PIXEL_ALIGNMENT)) {
bytesPerPageLine += DEFAULT_GL_PIXEL_ALIGNMENT - (bytesPerPageLine % DEFAULT_GL_PIXEL_ALIGNMENT);
assert(0 == (bytesPerPageLine % DEFAULT_GL_PIXEL_ALIGNMENT));
@ -118,14 +231,14 @@ void TransferState::populatePage(std::vector<uint8_t>& buffer) {
}
uint8_t* dst = &buffer[0];
for (uint32_t y = 0; y < pageSize.y; ++y) {
uint32_t srcOffset = (_bytesPerLine * (_mipOffset.y + y)) + (_bytesPerPixel * _mipOffset.x);
uint32_t srcOffset = (bytesPerLine * (mipOffset.y + y)) + (bytesPerPixel * mipOffset.x);
uint32_t dstOffset = bytesPerPageLine * y;
memcpy(dst + dstOffset, _srcPointer + srcOffset, pageSize.x * _bytesPerPixel);
memcpy(dst + dstOffset, srcPointer + srcOffset, pageSize.x * bytesPerPixel);
}
}
uvec3 TransferState::currentPageSize() const {
return glm::clamp(_mipDimensions - _mipOffset, uvec3(1), _pageSize);
return glm::clamp(mipDimensions - mipOffset, uvec3(1), texture._sparseInfo.pageDimensions);
}
GLuint GL45Texture::allocate(const Texture& texture) {
@ -139,25 +252,64 @@ GLuint GL45Backend::getTextureID(const TexturePointer& texture, bool transfer) {
}
GL45Texture::GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, bool transferrable)
: GLTexture(backend, texture, allocate(texture), transferrable), _transferState(*this) {
: GLTexture(backend, texture, allocate(texture), transferrable), _sparseInfo(*this), _transferState(*this) {
#if SPARSE_TEXTURES
if (transferrable) {
glTextureParameteri(_id, GL_TEXTURE_SPARSE_ARB, GL_TRUE);
if (enableSparseTextures && _transferrable) {
_sparseInfo.maybeMakeSparse();
}
#endif
}
GL45Texture::GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLTexture* original)
: GLTexture(backend, texture, allocate(texture), original), _transferState(*this) { }
GL45Texture::~GL45Texture() {
// FIXME do we need to explicitly deallocate the virtual memory here?
//if (_transferrable) {
// for (uint16_t mipLevel = 0; mipLevel < usedMipLevels(); ++i) {
// glTexturePageCommitmentEXT(_id, mipLevel, offset.x, offset.y, offset.z, size.x, size.y, size.z, GL_TRUE);
// }
//}
qCDebug(gpugl45logging) << "Destroying texture " << _id << " from source " << _source.c_str();
if (_sparseInfo.sparse) {
// Remove this texture from the candidate list of derezzable textures
{
auto mipLevels = usedMipLevels();
Lock lock(texturesByMipCountsMutex);
if (texturesByMipCounts.count(mipLevels)) {
auto& textures = texturesByMipCounts[mipLevels];
textures.erase(this);
if (textures.empty()) {
texturesByMipCounts.erase(mipLevels);
}
}
}
// Experimenation suggests that allocating sparse textures on one context/thread and deallocating
// them on another is buggy. So for sparse textures we need to queue a lambda with the deallocation
// callls to the transfer thread
auto id = _id;
// Set the class _id to 0 so we don't try to double delete
const_cast<GLuint&>(_id) = 0;
std::list<std::function<void()>> destructionFunctions;
uint8_t maxFace = (uint8_t)((_target == GL_TEXTURE_CUBE_MAP) ? GLTexture::CUBE_NUM_FACES : 1);
auto maxSparseMip = std::min<uint16_t>(_maxMip, _sparseInfo.maxSparseLevel);
for (uint16_t mipLevel = _minMip; mipLevel <= maxSparseMip; ++mipLevel) {
auto mipDimensions = _gpuObject.evalMipDimensions(mipLevel);
destructionFunctions.push_back([id, maxFace, mipLevel, mipDimensions] {
glTexturePageCommitmentEXT(id, mipLevel, 0, 0, 0, mipDimensions.x, mipDimensions.y, maxFace, GL_FALSE);
});
auto deallocatedPages = _sparseInfo.getPageCount(mipDimensions) * maxFace;
assert(deallocatedPages <= _allocatedPages);
_allocatedPages -= deallocatedPages;
}
if (0 != _allocatedPages) {
qCWarning(gpugl45logging) << "Allocated pages remaining " << _id << " " << _allocatedPages;
}
auto size = _size;
_textureTransferHelper->queueExecution([id, size, destructionFunctions] {
for (auto function : destructionFunctions) {
function();
}
glDeleteTextures(1, &id);
Backend::decrementTextureGPUCount();
Backend::updateTextureGPUMemoryUsage(size, 0);
});
}
}
void GL45Texture::withPreservedTexture(std::function<void()> f) const {
@ -165,37 +317,38 @@ void GL45Texture::withPreservedTexture(std::function<void()> f) const {
}
void GL45Texture::generateMips() const {
qCDebug(gpugl45logging) << "Generating mipmaps for " << _source.c_str();
glGenerateTextureMipmap(_id);
(void)CHECK_GL_ERROR();
}
void GL45Texture::allocateStorage() const {
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuObject.getTexelFormat());
glTextureParameteri(_id, GL_TEXTURE_BASE_LEVEL, 0);
glTextureParameteri(_id, GL_TEXTURE_MAX_LEVEL, _maxMip - _minMip);
if (_gpuObject.getTexelFormat().isCompressed()) {
qFatal("Compressed textures not yet supported");
}
glTextureParameteri(_id, GL_TEXTURE_BASE_LEVEL, 0);
glTextureParameteri(_id, GL_TEXTURE_MAX_LEVEL, _maxMip - _minMip);
// Get the dimensions, accounting for the downgrade level
Vec3u dimensions = _gpuObject.evalMipDimensions(_minMip);
glTextureStorage2D(_id, usedMipLevels(), texelFormat.internalFormat, dimensions.x, dimensions.y);
glTextureStorage2D(_id, usedMipLevels(), _internalFormat, dimensions.x, dimensions.y);
(void)CHECK_GL_ERROR();
}
void GL45Texture::updateSize() const {
setSize(_virtualSize);
if (!_id) {
return;
}
if (_gpuObject.getTexelFormat().isCompressed()) {
qFatal("Compressed textures not yet supported");
}
if (_transferrable) {
setSize(_allocatedPages * _sparseInfo.pageBytes);
} else {
setSize(_virtualSize);
}
}
void GL45Texture::startTransfer() {
Parent::startTransfer();
_transferState.updateSparse();
_sparseInfo.update();
_transferState.updateMip();
}
@ -204,48 +357,65 @@ bool GL45Texture::continueTransfer() {
if (buffer.empty()) {
buffer.resize(DEFAULT_PAGE_BUFFER_SIZE);
}
uvec3 pageSize = _transferState.currentPageSize();
uvec3 offset = _transferState._mipOffset;
const uvec3 pageSize = _transferState.currentPageSize();
const uvec3& offset = _transferState.mipOffset;
#if SPARSE_TEXTURES
if (_transferState._mipLevel <= _transferState._maxSparseLevel) {
glTexturePageCommitmentEXT(_id, _transferState._mipLevel,
offset.x, offset.y, _transferState._face,
if (_sparseInfo.sparse && _transferState.mipLevel <= _sparseInfo.maxSparseLevel) {
if (_allocatedPages > _sparseInfo.maxPages) {
qCWarning(gpugl45logging) << "Exceeded max page allocation!";
}
glTexturePageCommitmentEXT(_id, _transferState.mipLevel,
offset.x, offset.y, _transferState.face,
pageSize.x, pageSize.y, pageSize.z,
GL_TRUE);
++_allocatedPages;
}
#endif
if (_transferState._srcPointer) {
if (_transferState.srcPointer) {
// Transfer the mip data
_transferState.populatePage(buffer);
if (GL_TEXTURE_2D == _target) {
glTextureSubImage2D(_id, _transferState._mipLevel,
glTextureSubImage2D(_id, _transferState.mipLevel,
offset.x, offset.y,
pageSize.x, pageSize.y,
_transferState._texelFormat.format, _transferState._texelFormat.type, &buffer[0]);
_transferState.texelFormat.format, _transferState.texelFormat.type, &buffer[0]);
} else if (GL_TEXTURE_CUBE_MAP == _target) {
auto target = CUBE_FACE_LAYOUT[_transferState._face];
auto target = CUBE_FACE_LAYOUT[_transferState.face];
// DSA ARB does not work on AMD, so use EXT
// glTextureSubImage3D(_id, mipLevel, 0, 0, face, size.x, size.y, 1, texelFormat.format, texelFormat.type, mip->readData());
glTextureSubImage2DEXT(_id, target, _transferState._mipLevel,
glTextureSubImage2DEXT(_id, target, _transferState.mipLevel,
offset.x, offset.y,
pageSize.x, pageSize.y,
_transferState._texelFormat.format, _transferState._texelFormat.type, &buffer[0]);
_transferState.texelFormat.format, _transferState.texelFormat.type, &buffer[0]);
}
}
serverWait();
return _transferState.increment();
auto currentMip = _transferState.mipLevel;
auto result = _transferState.increment();
if (_sparseInfo.sparse && _transferState.mipLevel != currentMip && currentMip <= _sparseInfo.maxSparseLevel) {
auto mipDimensions = _gpuObject.evalMipDimensions(currentMip);
auto mipExpectedPages = _sparseInfo.getPageCount(mipDimensions);
auto newPages = _allocatedPages - _lastMipAllocatedPages;
if (newPages != mipExpectedPages) {
qCWarning(gpugl45logging) << "Unexpected page allocation size... " << newPages << " " << mipExpectedPages;
}
_lastMipAllocatedPages = _allocatedPages;
}
return result;
}
void GL45Backend::GL45Texture::syncSampler() const {
void GL45Texture::finishTransfer() {
Parent::finishTransfer();
}
void GL45Texture::syncSampler() const {
const Sampler& sampler = _gpuObject.getSampler();
const auto& fm = FILTER_MODES[sampler.getFilter()];
glTextureParameteri(_id, GL_TEXTURE_MIN_FILTER, fm.minFilter);
glTextureParameteri(_id, GL_TEXTURE_MAG_FILTER, fm.magFilter);
if (sampler.doComparison()) {
glTextureParameteri(_id, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_R_TO_TEXTURE);
glTextureParameteri(_id, GL_TEXTURE_COMPARE_FUNC, COMPARISON_TO_GL[sampler.getComparisonFunction()]);
@ -257,9 +427,131 @@ void GL45Backend::GL45Texture::syncSampler() const {
glTextureParameteri(_id, GL_TEXTURE_WRAP_T, WRAP_MODES[sampler.getWrapModeV()]);
glTextureParameteri(_id, GL_TEXTURE_WRAP_R, WRAP_MODES[sampler.getWrapModeW()]);
glTextureParameterfv(_id, GL_TEXTURE_BORDER_COLOR, (const float*)&sampler.getBorderColor());
glTextureParameteri(_id, GL_TEXTURE_BASE_LEVEL, (uint16)sampler.getMipOffset());
auto baseMip = std::max<uint16_t>(sampler.getMipOffset(), _minMip);
glTextureParameteri(_id, GL_TEXTURE_BASE_LEVEL, baseMip);
glTextureParameterf(_id, GL_TEXTURE_MIN_LOD, (float)sampler.getMinMip());
glTextureParameterf(_id, GL_TEXTURE_MAX_LOD, (sampler.getMaxMip() == Sampler::MAX_MIP_LEVEL ? 1000.f : sampler.getMaxMip()));
glTextureParameterf(_id, GL_TEXTURE_MAX_ANISOTROPY_EXT, sampler.getMaxAnisotropy());
}
void GL45Texture::postTransfer() {
Parent::postTransfer();
if (_sparseInfo.sparse) {
auto mipLevels = usedMipLevels();
if (mipLevels > 1 && _minMip < _sparseInfo.maxSparseLevel) {
Lock lock(texturesByMipCountsMutex);
texturesByMipCounts[mipLevels].insert(this);
}
}
}
void GL45Texture::stripToMip(uint16_t newMinMip) {
if (!_sparseInfo.sparse) {
return;
}
if (newMinMip < _minMip) {
qCWarning(gpugl45logging) << "Cannot decrease the min mip";
return;
}
if (newMinMip > _sparseInfo.maxSparseLevel) {
qCWarning(gpugl45logging) << "Cannot increase the min mip into the mip tail";
return;
}
auto mipLevels = usedMipLevels();
{
Lock lock(texturesByMipCountsMutex);
assert(0 != texturesByMipCounts.count(mipLevels));
assert(0 != texturesByMipCounts[mipLevels].count(this));
texturesByMipCounts[mipLevels].erase(this);
if (texturesByMipCounts[mipLevels].empty()) {
texturesByMipCounts.erase(mipLevels);
}
}
// If we weren't generating mips before, we need to now that we're stripping down mip levels.
if (!_gpuObject.isAutogenerateMips()) {
qCDebug(gpugl45logging) << "Force mip generation for texture";
glGenerateTextureMipmap(_id);
}
uint8_t maxFace = (uint8_t)((_target == GL_TEXTURE_CUBE_MAP) ? GLTexture::CUBE_NUM_FACES : 1);
for (uint16_t mip = _minMip; mip < newMinMip; ++mip) {
auto id = _id;
auto mipDimensions = _gpuObject.evalMipDimensions(mip);
_textureTransferHelper->queueExecution([id, mip, mipDimensions, maxFace] {
glTexturePageCommitmentEXT(id, mip, 0, 0, 0, mipDimensions.x, mipDimensions.y, maxFace, GL_FALSE);
});
auto deallocatedPages = _sparseInfo.getPageCount(mipDimensions) * maxFace;
assert(deallocatedPages < _allocatedPages);
_allocatedPages -= deallocatedPages;
}
_minMip = newMinMip;
// Re-sync the sampler to force access to the new mip level
syncSampler();
size_t oldSize = _size;
updateSize();
Q_ASSERT(_size > oldSize);
// Re-insert into the texture-by-mips map if appropriate
mipLevels = usedMipLevels();
if (_sparseInfo.sparse && mipLevels > 1 && _minMip < _sparseInfo.maxSparseLevel) {
Lock lock(texturesByMipCountsMutex);
texturesByMipCounts[mipLevels].insert(this);
}
}
void GL45Texture::updateMips() {
if (!_sparseInfo.sparse) {
return;
}
auto newMinMip = std::min<uint16_t>(_gpuObject.minMip(), _sparseInfo.maxSparseLevel);
if (_minMip < newMinMip) {
stripToMip(newMinMip);
}
}
void GL45Texture::derez() {
assert(_sparseInfo.sparse);
assert(_minMip < _sparseInfo.maxSparseLevel);
assert(_minMip < _maxMip);
assert(_transferrable);
stripToMip(_minMip + 1);
}
void GL45Backend::derezTextures() const {
if (GLTexture::getMemoryPressure() < 1.0f) {
return;
}
Lock lock(texturesByMipCountsMutex);
if (texturesByMipCounts.empty()) {
qCDebug(gpugl45logging) << "No available textures to derez";
return;
}
auto mipLevel = texturesByMipCounts.rbegin()->first;
if (mipLevel <= 1) {
qCDebug(gpugl45logging) << "Max mip levels " << mipLevel;
return;
}
qCDebug(gpugl45logging) << "Allowed texture memory " << Texture::getAllowedGPUMemoryUsage();
qCDebug(gpugl45logging) << "Used texture memory " << Context::getTextureGPUMemoryUsage();
GL45Texture* targetTexture = nullptr;
{
auto& textures = texturesByMipCounts[mipLevel];
assert(!textures.empty());
targetTexture = *textures.begin();
}
lock.unlock();
targetTexture->derez();
qCDebug(gpugl45logging) << "New Used texture memory " << Context::getTextureGPUMemoryUsage();
}

View file

@ -10,6 +10,7 @@
//
#include "Context.h"
#include "Frame.h"
#include "GPULogging.h"
using namespace gpu;
Context::CreateBackend Context::_createBackendCallback = nullptr;
@ -161,8 +162,9 @@ Backend::TransformCamera Backend::TransformCamera::getEyeCamera(int eye, const S
}
// Counters for Buffer and Texture usage in GPU/Context
std::atomic<uint32_t> Context::_bufferGPUCount{ 0 };
std::atomic<Buffer::Size> Context::_bufferGPUMemoryUsage{ 0 };
std::atomic<uint32_t> Context::_fenceCount { 0 };
std::atomic<uint32_t> Context::_bufferGPUCount { 0 };
std::atomic<Buffer::Size> Context::_bufferGPUMemoryUsage { 0 };
std::atomic<uint32_t> Context::_textureGPUCount{ 0 };
std::atomic<Texture::Size> Context::_textureGPUMemoryUsage{ 0 };
@ -170,10 +172,15 @@ std::atomic<Texture::Size> Context::_textureGPUVirtualMemoryUsage{ 0 };
std::atomic<uint32_t> Context::_textureGPUTransferCount{ 0 };
void Context::incrementBufferGPUCount() {
_bufferGPUCount++;
static std::atomic<uint32_t> max { 0 };
auto total = ++_bufferGPUCount;
if (total > max.load()) {
max = total;
qCDebug(gpulogging) << "New max GPU buffers " << total;
}
}
void Context::decrementBufferGPUCount() {
_bufferGPUCount--;
--_bufferGPUCount;
}
void Context::updateBufferGPUMemoryUsage(Size prevObjectSize, Size newObjectSize) {
if (prevObjectSize == newObjectSize) {
@ -186,12 +193,30 @@ void Context::updateBufferGPUMemoryUsage(Size prevObjectSize, Size newObjectSize
}
}
void Context::incrementFenceCount() {
static std::atomic<uint32_t> max { 0 };
auto total = ++_fenceCount;
if (total > max.load()) {
max = total;
qCDebug(gpulogging) << "New max Fences " << total;
}
}
void Context::decrementFenceCount() {
--_fenceCount;
}
void Context::incrementTextureGPUCount() {
_textureGPUCount++;
static std::atomic<uint32_t> max { 0 };
auto total = ++_textureGPUCount;
if (total > max.load()) {
max = total;
qCDebug(gpulogging) << "New max GPU textures " << total;
}
}
void Context::decrementTextureGPUCount() {
_textureGPUCount--;
--_textureGPUCount;
}
void Context::updateTextureGPUMemoryUsage(Size prevObjectSize, Size newObjectSize) {
if (prevObjectSize == newObjectSize) {
return;
@ -215,10 +240,15 @@ void Context::updateTextureGPUVirtualMemoryUsage(Size prevObjectSize, Size newOb
}
void Context::incrementTextureGPUTransferCount() {
_textureGPUTransferCount++;
static std::atomic<uint32_t> max { 0 };
auto total = ++_textureGPUTransferCount;
if (total > max.load()) {
max = total;
qCDebug(gpulogging) << "New max GPU textures transfers" << total;
}
}
void Context::decrementTextureGPUTransferCount() {
_textureGPUTransferCount--;
--_textureGPUTransferCount;
}
uint32_t Context::getBufferGPUCount() {

View file

@ -230,6 +230,9 @@ protected:
static void decrementBufferGPUCount();
static void updateBufferGPUMemoryUsage(Size prevObjectSize, Size newObjectSize);
static void incrementFenceCount();
static void decrementFenceCount();
static void incrementTextureGPUCount();
static void decrementTextureGPUCount();
static void updateTextureGPUMemoryUsage(Size prevObjectSize, Size newObjectSize);
@ -237,7 +240,9 @@ protected:
static void incrementTextureGPUTransferCount();
static void decrementTextureGPUTransferCount();
// Buffer and Texture Counters
// Buffer, Texture and Fence Counters
static std::atomic<uint32_t> _fenceCount;
static std::atomic<uint32_t> _bufferGPUCount;
static std::atomic<Size> _bufferGPUMemoryUsage;

View file

@ -32,6 +32,7 @@ Framebuffer* Framebuffer::create( const Format& colorBufferFormat, uint16 width,
auto framebuffer = Framebuffer::create();
auto colorTexture = TexturePointer(Texture::create2D(colorBufferFormat, width, height, Sampler(Sampler::FILTER_MIN_MAG_POINT)));
colorTexture->setSource("Framebuffer::colorTexture");
framebuffer->setRenderBuffer(0, colorTexture);
@ -42,8 +43,9 @@ Framebuffer* Framebuffer::create( const Format& colorBufferFormat, const Format&
auto framebuffer = Framebuffer::create();
auto colorTexture = TexturePointer(Texture::create2D(colorBufferFormat, width, height, Sampler(Sampler::FILTER_MIN_MAG_POINT)));
colorTexture->setSource("Framebuffer::colorTexture");
auto depthTexture = TexturePointer(Texture::create2D(depthStencilBufferFormat, width, height, Sampler(Sampler::FILTER_MIN_MAG_POINT)));
depthTexture->setSource("Framebuffer::depthTexture");
framebuffer->setRenderBuffer(0, colorTexture);
framebuffer->setDepthStencilBuffer(depthTexture, depthStencilBufferFormat);
@ -55,7 +57,7 @@ Framebuffer* Framebuffer::createShadowmap(uint16 width) {
auto depthFormat = Element(gpu::SCALAR, gpu::FLOAT, gpu::DEPTH); // Depth32 texel format
auto depthTexture = TexturePointer(Texture::create2D(depthFormat, width, width));
depthTexture->setSource("Framebuffer::shadowMap");
Sampler::Desc samplerDesc;
samplerDesc._borderColor = glm::vec4(1.0f);
samplerDesc._wrapModeU = Sampler::WRAP_BORDER;
@ -113,30 +115,6 @@ void Framebuffer::updateSize(const TexturePointer& texture) {
}
}
void Framebuffer::resize(uint16 width, uint16 height, uint16 numSamples) {
if (width && height && numSamples && !isEmpty() && !isSwapchain()) {
if ((width != _width) || (height != _height) || (numSamples != _numSamples)) {
for (uint32 i = 0; i < _renderBuffers.size(); ++i) {
if (_renderBuffers[i]) {
_renderBuffers[i]._texture->resize2D(width, height, numSamples);
_numSamples = _renderBuffers[i]._texture->getNumSamples();
++_colorStamps[i];
}
}
if (_depthStencilBuffer) {
_depthStencilBuffer._texture->resize2D(width, height, numSamples);
_numSamples = _depthStencilBuffer._texture->getNumSamples();
++_depthStamp;
}
_width = width;
_height = height;
// _numSamples = numSamples;
}
}
}
uint16 Framebuffer::getWidth() const {
if (isSwapchain()) {
return getSwapchain()->getWidth();

View file

@ -130,9 +130,6 @@ public:
float getAspectRatio() const { return getWidth() / (float) getHeight() ; }
// If not a swapchain canvas, resize can resize all the render buffers and depth stencil attached in one call
void resize( uint16 width, uint16 height, uint16 samples = 1 );
static const uint32 MAX_NUM_RENDER_BUFFERS = 8;
static uint32 getMaxNumRenderBuffers() { return MAX_NUM_RENDER_BUFFERS; }

View file

@ -389,6 +389,8 @@ public:
uint16 usedMipLevels() const { return (_maxMip - _minMip) + 1; }
const std::string& source() const { return _source; }
void setSource(const std::string& source) { _source = source; }
bool setMinMip(uint16 newMinMip);
bool incremementMinMip(uint16 count = 1);
@ -450,6 +452,8 @@ public:
const GPUObjectPointer gpuObject {};
protected:
// Not strictly necessary, but incredibly useful for debugging
std::string _source;
std::unique_ptr< Storage > _storage;
Stamp _stamp = 0;

View file

@ -35,8 +35,7 @@
#include "ModelNetworkingLogging.h"
TextureCache::TextureCache() {
const qint64 TEXTURE_DEFAULT_UNUSED_MAX_SIZE = DEFAULT_UNUSED_MAX_SIZE;
setUnusedResourceCacheSize(TEXTURE_DEFAULT_UNUSED_MAX_SIZE);
setUnusedResourceCacheSize(0);
setObjectName("TextureCache");
// Expose enum Type to JS/QML via properties
@ -118,6 +117,7 @@ const unsigned char OPAQUE_BLACK[] = { 0x00, 0x00, 0x00, 0xFF };
const gpu::TexturePointer& TextureCache::getWhiteTexture() {
if (!_whiteTexture) {
_whiteTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, 1, 1));
_whiteTexture->setSource("TextureCache::_whiteTexture");
_whiteTexture->assignStoredMip(0, _whiteTexture->getTexelFormat(), sizeof(OPAQUE_WHITE), OPAQUE_WHITE);
}
return _whiteTexture;
@ -126,6 +126,7 @@ const gpu::TexturePointer& TextureCache::getWhiteTexture() {
const gpu::TexturePointer& TextureCache::getGrayTexture() {
if (!_grayTexture) {
_grayTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, 1, 1));
_grayTexture->setSource("TextureCache::_grayTexture");
_grayTexture->assignStoredMip(0, _whiteTexture->getTexelFormat(), sizeof(OPAQUE_WHITE), OPAQUE_GRAY);
}
return _grayTexture;
@ -134,6 +135,7 @@ const gpu::TexturePointer& TextureCache::getGrayTexture() {
const gpu::TexturePointer& TextureCache::getBlueTexture() {
if (!_blueTexture) {
_blueTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, 1, 1));
_blueTexture->setSource("TextureCache::_blueTexture");
_blueTexture->assignStoredMip(0, _blueTexture->getTexelFormat(), sizeof(OPAQUE_BLUE), OPAQUE_BLUE);
}
return _blueTexture;
@ -142,6 +144,7 @@ const gpu::TexturePointer& TextureCache::getBlueTexture() {
const gpu::TexturePointer& TextureCache::getBlackTexture() {
if (!_blackTexture) {
_blackTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, 1, 1));
_blackTexture->setSource("TextureCache::_blackTexture");
_blackTexture->assignStoredMip(0, _whiteTexture->getTexelFormat(), sizeof(OPAQUE_BLACK), OPAQUE_BLACK);
}
return _blackTexture;

View file

@ -176,7 +176,7 @@ void generateFaceMips(gpu::Texture* texture, QImage& image, gpu::Element formatM
#endif
}
gpu::Texture* TextureUsage::process2DTextureColorFromImage(const QImage& srcImage, bool isLinear, bool doCompress, bool generateMips) {
gpu::Texture* TextureUsage::process2DTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips) {
bool validAlpha = false;
bool alphaAsMask = true;
QImage image = process2DImageColor(srcImage, validAlpha, alphaAsMask);
@ -189,7 +189,7 @@ gpu::Texture* TextureUsage::process2DTextureColorFromImage(const QImage& srcImag
defineColorTexelFormats(formatGPU, formatMip, image, isLinear, doCompress);
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
theTexture->setSource(srcImageName);
auto usage = gpu::Texture::Usage::Builder().withColor();
if (validAlpha) {
usage.withAlpha();
@ -210,20 +210,20 @@ gpu::Texture* TextureUsage::process2DTextureColorFromImage(const QImage& srcImag
}
gpu::Texture* TextureUsage::create2DTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
return process2DTextureColorFromImage(srcImage, false, false, true);
return process2DTextureColorFromImage(srcImage, srcImageName, false, false, true);
}
gpu::Texture* TextureUsage::createAlbedoTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
return process2DTextureColorFromImage(srcImage, false, true, true);
return process2DTextureColorFromImage(srcImage, srcImageName, false, true, true);
}
gpu::Texture* TextureUsage::createEmissiveTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
return process2DTextureColorFromImage(srcImage, false, true, true);
return process2DTextureColorFromImage(srcImage, srcImageName, false, true, true);
}
gpu::Texture* TextureUsage::createLightmapTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
return process2DTextureColorFromImage(srcImage, false, true, true);
return process2DTextureColorFromImage(srcImage, srcImageName, false, true, true);
}
@ -241,6 +241,7 @@ gpu::Texture* TextureUsage::createNormalTextureFromNormalImage(const QImage& src
gpu::Element formatMip = gpu::Element(gpu::VEC3, gpu::NUINT8, gpu::RGB);
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
theTexture->setSource(srcImageName);
theTexture->assignStoredMip(0, formatMip, image.byteCount(), image.constBits());
generateMips(theTexture, image, formatMip);
}
@ -324,6 +325,7 @@ gpu::Texture* TextureUsage::createNormalTextureFromBumpImage(const QImage& srcIm
gpu::Element formatMip = gpu::Element(gpu::VEC3, gpu::NUINT8, gpu::RGB);
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
theTexture->setSource(srcImageName);
theTexture->assignStoredMip(0, formatMip, image.byteCount(), image.constBits());
generateMips(theTexture, image, formatMip);
}
@ -355,6 +357,7 @@ gpu::Texture* TextureUsage::createRoughnessTextureFromImage(const QImage& srcIma
gpu::Element formatMip = gpu::Element(gpu::SCALAR, gpu::NUINT8, gpu::RGB);
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
theTexture->setSource(srcImageName);
theTexture->assignStoredMip(0, formatMip, image.byteCount(), image.constBits());
generateMips(theTexture, image, formatMip);
@ -392,6 +395,7 @@ gpu::Texture* TextureUsage::createRoughnessTextureFromGlossImage(const QImage& s
gpu::Element formatMip = gpu::Element(gpu::SCALAR, gpu::NUINT8, gpu::RGB);
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
theTexture->setSource(srcImageName);
theTexture->assignStoredMip(0, formatMip, image.byteCount(), image.constBits());
generateMips(theTexture, image, formatMip);
@ -426,6 +430,7 @@ gpu::Texture* TextureUsage::createMetallicTextureFromImage(const QImage& srcImag
gpu::Element formatMip = gpu::Element(gpu::SCALAR, gpu::NUINT8, gpu::RGB);
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
theTexture->setSource(srcImageName);
theTexture->assignStoredMip(0, formatMip, image.byteCount(), image.constBits());
generateMips(theTexture, image, formatMip);
@ -737,6 +742,7 @@ gpu::Texture* TextureUsage::processCubeTextureColorFromImage(const QImage& srcIm
// If the 6 faces have been created go on and define the true Texture
if (faces.size() == gpu::Texture::NUM_FACES_PER_TYPE[gpu::Texture::TEX_CUBE]) {
theTexture = gpu::Texture::createCube(formatGPU, faces[0].width(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP));
theTexture->setSource(srcImageName);
int f = 0;
for (auto& face : faces) {
theTexture->assignStoredMipFace(0, formatMip, face.byteCount(), face.constBits(), f);

View file

@ -47,7 +47,7 @@ public:
static const QImage process2DImageColor(const QImage& srcImage, bool& validAlpha, bool& alphaAsMask);
static void defineColorTexelFormats(gpu::Element& formatGPU, gpu::Element& formatMip,
const QImage& srcImage, bool isLinear, bool doCompress);
static gpu::Texture* process2DTextureColorFromImage(const QImage& srcImage, bool isLinear, bool doCompress, bool generateMips);
static gpu::Texture* process2DTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips);
static gpu::Texture* processCubeTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips, bool generateIrradiance);
};

View file

@ -765,7 +765,7 @@ void AddressManager::handleShareableNameAPIResponse(QNetworkReply& requestReply)
}
if (shareableNameChanged) {
qDebug() << "AddressManager shareable name changed to" << _shareablePlaceName;
qCDebug(networking) << "AddressManager shareable name changed to" << _shareablePlaceName;
}
}
}

View file

@ -60,7 +60,7 @@ void AssetClient::init() {
cache->setMaximumCacheSize(MAXIMUM_CACHE_SIZE);
cache->setCacheDirectory(cachePath);
networkAccessManager.setCache(cache);
qDebug() << "ResourceManager disk cache setup at" << cachePath
qInfo() << "ResourceManager disk cache setup at" << cachePath
<< "(size:" << MAXIMUM_CACHE_SIZE / BYTES_PER_GIGABYTES << "GB)";
}
}
@ -91,7 +91,7 @@ void AssetClient::clearCache() {
}
if (auto cache = NetworkAccessManager::getInstance().cache()) {
qDebug() << "AssetClient::clearCache(): Clearing disk cache.";
qInfo() << "AssetClient::clearCache(): Clearing disk cache.";
cache->clear();
} else {
qCWarning(asset_client) << "No disk cache to clear.";

Some files were not shown because too many files have changed in this diff Show more