mirror of
https://github.com/overte-org/overte.git
synced 2025-04-15 11:08:06 +02:00
Merge pull request #11718 from kencooke/audio-louder-mastergain
Adjustable master avatar volume
This commit is contained in:
commit
0678a7c9df
9 changed files with 81 additions and 58 deletions
|
@ -29,6 +29,7 @@
|
|||
#include <UUID.h>
|
||||
#include <CPUDetect.h>
|
||||
|
||||
#include "AudioLogging.h"
|
||||
#include "AudioHelpers.h"
|
||||
#include "AudioRingBuffer.h"
|
||||
#include "AudioMixerClientData.h"
|
||||
|
@ -130,7 +131,7 @@ void AudioMixer::queueReplicatedAudioPacket(QSharedPointer<ReceivedMessage> mess
|
|||
PacketType rewrittenType = PacketTypeEnum::getReplicatedPacketMapping().key(message->getType());
|
||||
|
||||
if (rewrittenType == PacketType::Unknown) {
|
||||
qDebug() << "Cannot unwrap replicated packet type not present in REPLICATED_PACKET_WRAPPING";
|
||||
qCDebug(audio) << "Cannot unwrap replicated packet type not present in REPLICATED_PACKET_WRAPPING";
|
||||
}
|
||||
|
||||
auto replicatedMessage = QSharedPointer<ReceivedMessage>::create(audioData, rewrittenType,
|
||||
|
@ -345,7 +346,7 @@ void AudioMixer::sendStatsPacket() {
|
|||
|
||||
void AudioMixer::run() {
|
||||
|
||||
qDebug() << "Waiting for connection to domain to request settings from domain-server.";
|
||||
qCDebug(audio) << "Waiting for connection to domain to request settings from domain-server.";
|
||||
|
||||
// wait until we have the domain-server settings, otherwise we bail
|
||||
DomainHandler& domainHandler = DependencyManager::get<NodeList>()->getDomainHandler();
|
||||
|
@ -502,14 +503,14 @@ void AudioMixer::throttle(std::chrono::microseconds duration, int frame) {
|
|||
int proportionalTerm = 1 + (_trailingMixRatio - TARGET) / 0.1f;
|
||||
_throttlingRatio += THROTTLE_RATE * proportionalTerm;
|
||||
_throttlingRatio = std::min(_throttlingRatio, 1.0f);
|
||||
qDebug("audio-mixer is struggling (%f mix/sleep) - throttling %f of streams",
|
||||
(double)_trailingMixRatio, (double)_throttlingRatio);
|
||||
qCDebug(audio) << "audio-mixer is struggling (" << _trailingMixRatio << "mix/sleep) - throttling"
|
||||
<< _throttlingRatio << "of streams";
|
||||
} else if (_throttlingRatio > 0.0f && _trailingMixRatio <= BACKOFF_TARGET) {
|
||||
int proportionalTerm = 1 + (TARGET - _trailingMixRatio) / 0.2f;
|
||||
_throttlingRatio -= BACKOFF_RATE * proportionalTerm;
|
||||
_throttlingRatio = std::max(_throttlingRatio, 0.0f);
|
||||
qDebug("audio-mixer is recovering (%f mix/sleep) - throttling %f of streams",
|
||||
(double)_trailingMixRatio, (double)_throttlingRatio);
|
||||
qCDebug(audio) << "audio-mixer is recovering (" << _trailingMixRatio << "mix/sleep) - throttling"
|
||||
<< _throttlingRatio << "of streams";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -534,7 +535,7 @@ void AudioMixer::clearDomainSettings() {
|
|||
}
|
||||
|
||||
void AudioMixer::parseSettingsObject(const QJsonObject& settingsObject) {
|
||||
qDebug() << "AVX2 Support:" << (cpuSupportsAVX2() ? "enabled" : "disabled");
|
||||
qCDebug(audio) << "AVX2 Support:" << (cpuSupportsAVX2() ? "enabled" : "disabled");
|
||||
|
||||
if (settingsObject.contains(AUDIO_THREADING_GROUP_KEY)) {
|
||||
QJsonObject audioThreadingGroupObject = settingsObject[AUDIO_THREADING_GROUP_KEY].toObject();
|
||||
|
@ -557,7 +558,7 @@ void AudioMixer::parseSettingsObject(const QJsonObject& settingsObject) {
|
|||
const QString DYNAMIC_JITTER_BUFFER_JSON_KEY = "dynamic_jitter_buffer";
|
||||
bool enableDynamicJitterBuffer = audioBufferGroupObject[DYNAMIC_JITTER_BUFFER_JSON_KEY].toBool();
|
||||
if (enableDynamicJitterBuffer) {
|
||||
qDebug() << "Enabling dynamic jitter buffers.";
|
||||
qCDebug(audio) << "Enabling dynamic jitter buffers.";
|
||||
|
||||
bool ok;
|
||||
const QString DESIRED_JITTER_BUFFER_FRAMES_KEY = "static_desired_jitter_buffer_frames";
|
||||
|
@ -565,9 +566,9 @@ void AudioMixer::parseSettingsObject(const QJsonObject& settingsObject) {
|
|||
if (!ok) {
|
||||
_numStaticJitterFrames = InboundAudioStream::DEFAULT_STATIC_JITTER_FRAMES;
|
||||
}
|
||||
qDebug() << "Static desired jitter buffer frames:" << _numStaticJitterFrames;
|
||||
qCDebug(audio) << "Static desired jitter buffer frames:" << _numStaticJitterFrames;
|
||||
} else {
|
||||
qDebug() << "Disabling dynamic jitter buffers.";
|
||||
qCDebug(audio) << "Disabling dynamic jitter buffers.";
|
||||
_numStaticJitterFrames = DISABLE_STATIC_JITTER_FRAMES;
|
||||
}
|
||||
|
||||
|
@ -621,7 +622,7 @@ void AudioMixer::parseSettingsObject(const QJsonObject& settingsObject) {
|
|||
if (audioEnvGroupObject[CODEC_PREFERENCE_ORDER].isString()) {
|
||||
QString codecPreferenceOrder = audioEnvGroupObject[CODEC_PREFERENCE_ORDER].toString();
|
||||
_codecPreferenceOrder = codecPreferenceOrder.split(",");
|
||||
qDebug() << "Codec preference order changed to" << _codecPreferenceOrder;
|
||||
qCDebug(audio) << "Codec preference order changed to" << _codecPreferenceOrder;
|
||||
}
|
||||
|
||||
const QString ATTENATION_PER_DOULING_IN_DISTANCE = "attenuation_per_doubling_in_distance";
|
||||
|
@ -630,7 +631,7 @@ void AudioMixer::parseSettingsObject(const QJsonObject& settingsObject) {
|
|||
float attenuation = audioEnvGroupObject[ATTENATION_PER_DOULING_IN_DISTANCE].toString().toFloat(&ok);
|
||||
if (ok) {
|
||||
_attenuationPerDoublingInDistance = attenuation;
|
||||
qDebug() << "Attenuation per doubling in distance changed to" << _attenuationPerDoublingInDistance;
|
||||
qCDebug(audio) << "Attenuation per doubling in distance changed to" << _attenuationPerDoublingInDistance;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -640,7 +641,7 @@ void AudioMixer::parseSettingsObject(const QJsonObject& settingsObject) {
|
|||
float noiseMutingThreshold = audioEnvGroupObject[NOISE_MUTING_THRESHOLD].toString().toFloat(&ok);
|
||||
if (ok) {
|
||||
_noiseMutingThreshold = noiseMutingThreshold;
|
||||
qDebug() << "Noise muting threshold changed to" << _noiseMutingThreshold;
|
||||
qCDebug(audio) << "Noise muting threshold changed to" << _noiseMutingThreshold;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -680,8 +681,7 @@ void AudioMixer::parseSettingsObject(const QJsonObject& settingsObject) {
|
|||
glm::vec3 dimensions(xMax - xMin, yMax - yMin, zMax - zMin);
|
||||
AABox zoneAABox(corner, dimensions);
|
||||
_audioZones.insert(zone, zoneAABox);
|
||||
qDebug() << "Added zone:" << zone << "(corner:" << corner
|
||||
<< ", dimensions:" << dimensions << ")";
|
||||
qCDebug(audio) << "Added zone:" << zone << "(corner:" << corner << ", dimensions:" << dimensions << ")";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -712,7 +712,7 @@ void AudioMixer::parseSettingsObject(const QJsonObject& settingsObject) {
|
|||
_audioZones.contains(settings.source) && _audioZones.contains(settings.listener)) {
|
||||
|
||||
_zoneSettings.push_back(settings);
|
||||
qDebug() << "Added Coefficient:" << settings.source << settings.listener << settings.coefficient;
|
||||
qCDebug(audio) << "Added Coefficient:" << settings.source << settings.listener << settings.coefficient;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -745,7 +745,7 @@ void AudioMixer::parseSettingsObject(const QJsonObject& settingsObject) {
|
|||
|
||||
_zoneReverbSettings.push_back(settings);
|
||||
|
||||
qDebug() << "Added Reverb:" << zone << reverbTime << wetLevel;
|
||||
qCDebug(audio) << "Added Reverb:" << zone << reverbTime << wetLevel;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
#include "InjectedAudioStream.h"
|
||||
|
||||
#include "AudioLogging.h"
|
||||
#include "AudioHelpers.h"
|
||||
#include "AudioMixer.h"
|
||||
#include "AudioMixerClientData.h"
|
||||
|
@ -132,7 +133,7 @@ void AudioMixerClientData::optionallyReplicatePacket(ReceivedMessage& message, c
|
|||
if (PacketTypeEnum::getReplicatedPacketMapping().key(message.getType()) != PacketType::Unknown) {
|
||||
mirroredType = message.getType();
|
||||
} else {
|
||||
qDebug() << "Packet passed to optionallyReplicatePacket was not a replicatable type - returning";
|
||||
qCDebug(audio) << "Packet passed to optionallyReplicatePacket was not a replicatable type - returning";
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -189,8 +190,16 @@ void AudioMixerClientData::parsePerAvatarGainSet(ReceivedMessage& message, const
|
|||
uint8_t packedGain;
|
||||
message.readPrimitive(&packedGain);
|
||||
float gain = unpackFloatGainFromByte(packedGain);
|
||||
hrtfForStream(avatarUuid, QUuid()).setGainAdjustment(gain);
|
||||
qDebug() << "Setting gain adjustment for hrtf[" << uuid << "][" << avatarUuid << "] to " << gain;
|
||||
|
||||
if (avatarUuid.isNull()) {
|
||||
// set the MASTER avatar gain
|
||||
setMasterAvatarGain(gain);
|
||||
qCDebug(audio) << "Setting MASTER avatar gain for " << uuid << " to " << gain;
|
||||
} else {
|
||||
// set the per-source avatar gain
|
||||
hrtfForStream(avatarUuid, QUuid()).setGainAdjustment(gain);
|
||||
qCDebug(audio) << "Setting avatar gain adjustment for hrtf[" << uuid << "][" << avatarUuid << "] to " << gain;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioMixerClientData::parseNodeIgnoreRequest(QSharedPointer<ReceivedMessage> message, const SharedNodePointer& node) {
|
||||
|
@ -276,7 +285,7 @@ int AudioMixerClientData::parseData(ReceivedMessage& message) {
|
|||
|
||||
auto avatarAudioStream = new AvatarAudioStream(isStereo, AudioMixer::getStaticJitterFrames());
|
||||
avatarAudioStream->setupCodec(_codec, _selectedCodecName, AudioConstants::MONO);
|
||||
qDebug() << "creating new AvatarAudioStream... codec:" << _selectedCodecName;
|
||||
qCDebug(audio) << "creating new AvatarAudioStream... codec:" << _selectedCodecName;
|
||||
|
||||
connect(avatarAudioStream, &InboundAudioStream::mismatchedAudioCodec,
|
||||
this, &AudioMixerClientData::handleMismatchAudioFormat);
|
||||
|
@ -315,7 +324,7 @@ int AudioMixerClientData::parseData(ReceivedMessage& message) {
|
|||
|
||||
#if INJECTORS_SUPPORT_CODECS
|
||||
injectorStream->setupCodec(_codec, _selectedCodecName, isStereo ? AudioConstants::STEREO : AudioConstants::MONO);
|
||||
qDebug() << "creating new injectorStream... codec:" << _selectedCodecName;
|
||||
qCDebug(audio) << "creating new injectorStream... codec:" << _selectedCodecName;
|
||||
#endif
|
||||
|
||||
auto emplaced = _audioStreams.emplace(
|
||||
|
@ -339,8 +348,8 @@ int AudioMixerClientData::parseData(ReceivedMessage& message) {
|
|||
auto parseResult = matchingStream->parseData(message);
|
||||
|
||||
if (matchingStream->getOverflowCount() > overflowBefore) {
|
||||
qDebug() << "Just overflowed on stream from" << message.getSourceID() << "at" << message.getSenderSockAddr();
|
||||
qDebug() << "This stream is for" << (isMicStream ? "microphone audio" : "injected audio");
|
||||
qCDebug(audio) << "Just overflowed on stream from" << message.getSourceID() << "at" << message.getSenderSockAddr();
|
||||
qCDebug(audio) << "This stream is for" << (isMicStream ? "microphone audio" : "injected audio");
|
||||
}
|
||||
|
||||
return parseResult;
|
||||
|
@ -689,7 +698,7 @@ void AudioMixerClientData::setupCodecForReplicatedAgent(QSharedPointer<ReceivedM
|
|||
auto codecString = message->readString();
|
||||
|
||||
if (codecString != _selectedCodecName) {
|
||||
qDebug() << "Manually setting codec for replicated agent" << uuidStringWithoutCurlyBraces(getNodeID())
|
||||
qCDebug(audio) << "Manually setting codec for replicated agent" << uuidStringWithoutCurlyBraces(getNodeID())
|
||||
<< "-" << codecString;
|
||||
|
||||
const std::pair<QString, CodecPluginPointer> codec = AudioMixer::negotiateCodec({ codecString });
|
||||
|
|
|
@ -83,6 +83,9 @@ public:
|
|||
// uses randomization to have the AudioMixer send a stats packet to this node around every second
|
||||
bool shouldSendStats(int frameNumber);
|
||||
|
||||
float getMasterAvatarGain() const { return _masterAvatarGain; }
|
||||
void setMasterAvatarGain(float gain) { _masterAvatarGain = gain; }
|
||||
|
||||
AudioLimiter audioLimiter;
|
||||
|
||||
void setupCodec(CodecPluginPointer codec, const QString& codecName);
|
||||
|
@ -175,6 +178,8 @@ private:
|
|||
|
||||
int _frameToSendStats { 0 };
|
||||
|
||||
float _masterAvatarGain { 1.0f }; // per-listener mixing gain, applied only to avatars
|
||||
|
||||
CodecPluginPointer _codec;
|
||||
QString _selectedCodecName;
|
||||
Encoder* _encoder{ nullptr }; // for outbound mixed stream
|
||||
|
|
|
@ -48,8 +48,8 @@ void sendEnvironmentPacket(const SharedNodePointer& node, AudioMixerClientData&
|
|||
// mix helpers
|
||||
inline float approximateGain(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
||||
const glm::vec3& relativePosition);
|
||||
inline float computeGain(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
||||
const glm::vec3& relativePosition, bool isEcho);
|
||||
inline float computeGain(const AudioMixerClientData& listenerNodeData, const AvatarAudioStream& listeningNodeStream,
|
||||
const PositionalAudioStream& streamToAdd, const glm::vec3& relativePosition, bool isEcho);
|
||||
inline float computeAzimuth(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
||||
const glm::vec3& relativePosition);
|
||||
|
||||
|
@ -266,7 +266,7 @@ void AudioMixerSlave::addStream(AudioMixerClientData& listenerNodeData, const QU
|
|||
glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition();
|
||||
|
||||
float distance = glm::max(glm::length(relativePosition), EPSILON);
|
||||
float gain = computeGain(listeningNodeStream, streamToAdd, relativePosition, isEcho);
|
||||
float gain = computeGain(listenerNodeData, listeningNodeStream, streamToAdd, relativePosition, isEcho);
|
||||
float azimuth = isEcho ? 0.0f : computeAzimuth(listeningNodeStream, listeningNodeStream, relativePosition);
|
||||
const int HRTF_DATASET_INDEX = 1;
|
||||
|
||||
|
@ -484,10 +484,12 @@ float approximateGain(const AvatarAudioStream& listeningNodeStream, const Positi
|
|||
// when throttling, as close streams are expected to be heard by a user
|
||||
float distance = glm::length(relativePosition);
|
||||
return gain / distance;
|
||||
|
||||
// avatar: skip master gain - it is constant for all streams
|
||||
}
|
||||
|
||||
float computeGain(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
||||
const glm::vec3& relativePosition, bool isEcho) {
|
||||
float computeGain(const AudioMixerClientData& listenerNodeData, const AvatarAudioStream& listeningNodeStream,
|
||||
const PositionalAudioStream& streamToAdd, const glm::vec3& relativePosition, bool isEcho) {
|
||||
float gain = 1.0f;
|
||||
|
||||
// injector: apply attenuation
|
||||
|
@ -507,6 +509,9 @@ float computeGain(const AvatarAudioStream& listeningNodeStream, const Positional
|
|||
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION + (angleOfDelivery * (OFF_AXIS_ATTENUATION_STEP / PI_OVER_TWO));
|
||||
|
||||
gain *= offAxisCoefficient;
|
||||
|
||||
// apply master gain, only to avatars
|
||||
gain *= listenerNodeData.getMasterAvatarGain();
|
||||
}
|
||||
|
||||
auto& audioZones = AudioMixer::getAudioZones();
|
||||
|
|
|
@ -442,7 +442,7 @@ Item {
|
|||
Rectangle {
|
||||
id: nameCardVUMeter
|
||||
// Size
|
||||
width: isMyCard ? myDisplayName.width - 20 : ((gainSlider.value - gainSlider.minimumValue)/(gainSlider.maximumValue - gainSlider.minimumValue)) * (gainSlider.width);
|
||||
width: ((gainSlider.value - gainSlider.minimumValue)/(gainSlider.maximumValue - gainSlider.minimumValue)) * (gainSlider.width);
|
||||
height: 8
|
||||
// Anchors
|
||||
anchors.bottom: isMyCard ? avatarImage.bottom : parent.bottom;
|
||||
|
@ -526,16 +526,14 @@ Item {
|
|||
anchors.verticalCenter: nameCardVUMeter.verticalCenter;
|
||||
anchors.left: nameCardVUMeter.left;
|
||||
// Properties
|
||||
visible: !isMyCard && selected && pal.activeTab == "nearbyTab" && isPresent;
|
||||
visible: (isMyCard || (selected && pal.activeTab == "nearbyTab")) && isPresent;
|
||||
value: Users.getAvatarGain(uuid)
|
||||
minimumValue: -60.0
|
||||
maximumValue: 20.0
|
||||
stepSize: 5
|
||||
updateValueWhileDragging: true
|
||||
onValueChanged: {
|
||||
if (uuid !== "") {
|
||||
updateGainFromQML(uuid, value, false);
|
||||
}
|
||||
updateGainFromQML(uuid, value, false);
|
||||
}
|
||||
onPressedChanged: {
|
||||
if (!pressed) {
|
||||
|
@ -575,7 +573,19 @@ Item {
|
|||
implicitHeight: 16
|
||||
}
|
||||
}
|
||||
}
|
||||
RalewayRegular {
|
||||
// The slider for my card is special, it controls the master gain
|
||||
id: gainSliderText;
|
||||
visible: isMyCard;
|
||||
text: "master volume";
|
||||
size: hifi.fontSizes.tabularData;
|
||||
anchors.left: parent.right;
|
||||
anchors.leftMargin: 8;
|
||||
color: hifi.colors.baseGrayHighlight;
|
||||
horizontalAlignment: Text.AlignLeft;
|
||||
verticalAlignment: Text.AlignTop;
|
||||
}
|
||||
}
|
||||
|
||||
function updateGainFromQML(avatarUuid, sliderValue, isReleased) {
|
||||
Users.setAvatarGain(avatarUuid, sliderValue);
|
||||
|
|
|
@ -673,8 +673,8 @@ static void crossfade_4x2(float* src, float* dst, const float* win, int numFrame
|
|||
// linear interpolation with gain
|
||||
static void interpolate(float* dst, const float* src0, const float* src1, float frac, float gain) {
|
||||
|
||||
float f0 = HRTF_GAIN * gain * (1.0f - frac);
|
||||
float f1 = HRTF_GAIN * gain * frac;
|
||||
float f0 = gain * (1.0f - frac);
|
||||
float f1 = gain * frac;
|
||||
|
||||
for (int k = 0; k < HRTF_TAPS; k++) {
|
||||
dst[k] = f0 * src0[k] + f1 * src1[k];
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#if defined(__AVX512F__)
|
||||
#ifdef __AVX512F__
|
||||
|
||||
#include <assert.h>
|
||||
#include <immintrin.h>
|
||||
|
@ -87,15 +87,4 @@ void FIR_1x4_AVX512(float* src, float* dst0, float* dst1, float* dst2, float* ds
|
|||
_mm256_zeroupper();
|
||||
}
|
||||
|
||||
// FIXME: this fallback can be removed, once we require VS2017
|
||||
#elif defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || defined(__x86_64__)
|
||||
|
||||
#include "../AudioHRTF.h"
|
||||
|
||||
void FIR_1x4_AVX2(float* src, float* dst0, float* dst1, float* dst2, float* dst3, float coef[4][HRTF_TAPS], int numFrames);
|
||||
|
||||
void FIR_1x4_AVX512(float* src, float* dst0, float* dst1, float* dst2, float* dst3, float coef[4][HRTF_TAPS], int numFrames) {
|
||||
FIR_1x4_AVX2(src, dst0, dst1, dst2, dst3, coef, numFrames);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -979,8 +979,8 @@ void NodeList::maybeSendIgnoreSetToNode(SharedNodePointer newNode) {
|
|||
}
|
||||
|
||||
void NodeList::setAvatarGain(const QUuid& nodeID, float gain) {
|
||||
// cannot set gain of yourself or nobody
|
||||
if (!nodeID.isNull() && _sessionUUID != nodeID) {
|
||||
// cannot set gain of yourself
|
||||
if (_sessionUUID != nodeID) {
|
||||
auto audioMixer = soloNodeOfType(NodeType::AudioMixer);
|
||||
if (audioMixer) {
|
||||
// setup the packet
|
||||
|
@ -988,10 +988,15 @@ void NodeList::setAvatarGain(const QUuid& nodeID, float gain) {
|
|||
|
||||
// write the node ID to the packet
|
||||
setAvatarGainPacket->write(nodeID.toRfc4122());
|
||||
// We need to convert the gain in dB (from the script) to an amplitude before packing it.
|
||||
setAvatarGainPacket->writePrimitive(packFloatGainToByte(fastExp2f(gain / 6.0206f)));
|
||||
|
||||
qCDebug(networking) << "Sending Set Avatar Gain packet UUID: " << uuidStringWithoutCurlyBraces(nodeID) << "Gain:" << gain;
|
||||
// We need to convert the gain in dB (from the script) to an amplitude before packing it.
|
||||
setAvatarGainPacket->writePrimitive(packFloatGainToByte(fastExp2f(gain / 6.02059991f)));
|
||||
|
||||
if (nodeID.isNull()) {
|
||||
qCDebug(networking) << "Sending Set MASTER Avatar Gain packet with Gain:" << gain;
|
||||
} else {
|
||||
qCDebug(networking) << "Sending Set Avatar Gain packet with UUID: " << uuidStringWithoutCurlyBraces(nodeID) << "Gain:" << gain;
|
||||
}
|
||||
|
||||
sendPacket(std::move(setAvatarGainPacket), *audioMixer);
|
||||
QWriteLocker{ &_avatarGainMapLock };
|
||||
|
@ -1001,7 +1006,7 @@ void NodeList::setAvatarGain(const QUuid& nodeID, float gain) {
|
|||
qWarning() << "Couldn't find audio mixer to send set gain request";
|
||||
}
|
||||
} else {
|
||||
qWarning() << "NodeList::setAvatarGain called with an invalid ID or an ID which matches the current session ID:" << nodeID;
|
||||
qWarning() << "NodeList::setAvatarGain called with an ID which matches the current session ID:" << nodeID;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ public slots:
|
|||
* Sets an avatar's gain for you and you only.
|
||||
* Units are Decibels (dB)
|
||||
* @function Users.setAvatarGain
|
||||
* @param {nodeID} nodeID The node or session ID of the user whose gain you want to modify.
|
||||
* @param {nodeID} nodeID The node or session ID of the user whose gain you want to modify, or null to set the master gain.
|
||||
* @param {float} gain The gain of the avatar you'd like to set. Units are dB.
|
||||
*/
|
||||
void setAvatarGain(const QUuid& nodeID, float gain);
|
||||
|
@ -73,7 +73,7 @@ public slots:
|
|||
/**jsdoc
|
||||
* Gets an avatar's gain for you and you only.
|
||||
* @function Users.getAvatarGain
|
||||
* @param {nodeID} nodeID The node or session ID of the user whose gain you want to get.
|
||||
* @param {nodeID} nodeID The node or session ID of the user whose gain you want to get, or null to get the master gain.
|
||||
* @return {float} gain (in dB)
|
||||
*/
|
||||
float getAvatarGain(const QUuid& nodeID);
|
||||
|
|
Loading…
Reference in a new issue